From 8cbddf284b89dfd8b2ddc10e01551e337fd47c8b Mon Sep 17 00:00:00 2001 From: davidjumani Date: Thu, 17 Sep 2020 14:09:14 +0530 Subject: [PATCH 001/117] Adding nodeid param in scalekubecluster --- .../apache/cloudstack/api/ApiConstants.java | 1 + .../cluster/KubernetesClusterManagerImpl.java | 46 ++++++++++++------- .../KubernetesClusterActionWorker.java | 4 ++ ...esClusterResourceModifierActionWorker.java | 1 + .../KubernetesClusterScaleWorker.java | 39 ++++++++++++---- .../dao/KubernetesClusterVmMapDao.java | 1 + .../dao/KubernetesClusterVmMapDaoImpl.java | 9 ++++ .../cluster/ScaleKubernetesClusterCmd.java | 15 ++++++ 8 files changed, 90 insertions(+), 26 deletions(-) diff --git a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java index 0087fee33402..842fd4fbce26 100644 --- a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java @@ -818,6 +818,7 @@ public class ApiConstants { public static final String KUBERNETES_VERSION_ID = "kubernetesversionid"; public static final String KUBERNETES_VERSION_NAME = "kubernetesversionname"; public static final String MASTER_NODES = "masternodes"; + public static final String NODE_IDS = "nodeids"; public static final String MIN_SEMANTIC_VERSION = "minimumsemanticversion"; public static final String MIN_KUBERNETES_VERSION_ID = "minimumkubernetesversionid"; public static final String NODE_ROOT_DISK_SIZE = "noderootdisksize"; diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java index 8f124f1e1fa5..0a6c233cb6f2 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java @@ -845,6 +845,7 @@ private void validateKubernetesClusterScaleParameters(ScaleKubernetesClusterCmd final Long kubernetesClusterId = cmd.getId(); final Long serviceOfferingId = cmd.getServiceOfferingId(); final Long clusterSize = cmd.getClusterSize(); + final List nodeIds = cmd.getNodeIds(); if (kubernetesClusterId == null || kubernetesClusterId < 1L) { throw new InvalidParameterValueException("Invalid Kubernetes cluster ID"); } @@ -856,14 +857,26 @@ private void validateKubernetesClusterScaleParameters(ScaleKubernetesClusterCmd if (zone == null) { logAndThrow(Level.WARN, String.format("Unable to find zone for Kubernetes cluster : %s", kubernetesCluster.getName())); } + if (nodeIds != null) { + if (clusterSize != null || serviceOfferingId != null) { + throw new InvalidParameterValueException("nodeids can not be passed along with clustersize or service offering"); + } + List nodes = kubernetesClusterVmMapDao.listByClusterIdAndVmIdsIn(kubernetesCluster.getId(), nodeIds); + // TODO : Ensure the vm is not the master node + nodeIds.stream().forEach(x -> LOGGER.info(String.format("Node: %d", x))); + if (nodes == null || nodes.size() != nodeIds.size()) { + nodes.stream().forEach(x -> LOGGER.info(String.format("NodeMap: %d", x.vmId))); + throw new InvalidParameterValueException("Invalid node ids"); + } + } else { + if (serviceOfferingId == null && clusterSize == null) { + throw new InvalidParameterValueException(String.format("Kubernetes cluster ID: %s cannot be scaled, either a new service offering or a new cluster size or nodeids to be removed must be passed", kubernetesCluster.getUuid())); + } + } Account caller = CallContext.current().getCallingAccount(); accountManager.checkAccess(caller, SecurityChecker.AccessType.OperateEntry, false, kubernetesCluster); - if (serviceOfferingId == null && clusterSize == null) { - throw new InvalidParameterValueException(String.format("Kubernetes cluster : %s cannot be scaled, either a new service offering or a new cluster size must be passed", kubernetesCluster.getName())); - } - final KubernetesSupportedVersion clusterVersion = kubernetesSupportedVersionDao.findById(kubernetesCluster.getKubernetesVersionId()); if (clusterVersion == null) { throw new CloudRuntimeException(String.format("Invalid Kubernetes version associated with Kubernetes cluster : %s", kubernetesCluster.getName())); @@ -1227,8 +1240,9 @@ public boolean scaleKubernetesCluster(ScaleKubernetesClusterCmd cmd) throws Clou } validateKubernetesClusterScaleParameters(cmd); KubernetesClusterScaleWorker scaleWorker = - new KubernetesClusterScaleWorker(kubernetesClusterDao.findById(cmd.getId()), - serviceOfferingDao.findById(cmd.getServiceOfferingId()), cmd.getClusterSize(), this); + new KubernetesClusterScaleWorker(kubernetesClusterDao.findById(cmd.getId()), + serviceOfferingDao.findById(cmd.getServiceOfferingId()), cmd.getClusterSize(), + cmd.getNodeIds(), this); scaleWorker = ComponentContext.inject(scaleWorker); return scaleWorker.scaleCluster(); } @@ -1522,16 +1536,16 @@ public String getConfigComponentName() { @Override public ConfigKey[] getConfigKeys() { return new ConfigKey[] { - KubernetesServiceEnabled, - KubernetesClusterHyperVTemplateName, - KubernetesClusterKVMTemplateName, - KubernetesClusterVMwareTemplateName, - KubernetesClusterXenserverTemplateName, - KubernetesClusterNetworkOffering, - KubernetesClusterStartTimeout, - KubernetesClusterScaleTimeout, - KubernetesClusterUpgradeTimeout, - KubernetesClusterExperimentalFeaturesEnabled + KubernetesServiceEnabled, + KubernetesClusterHyperVTemplateName, + KubernetesClusterKVMTemplateName, + KubernetesClusterVMwareTemplateName, + KubernetesClusterXenserverTemplateName, + KubernetesClusterNetworkOffering, + KubernetesClusterStartTimeout, + KubernetesClusterScaleTimeout, + KubernetesClusterUpgradeTimeout, + KubernetesClusterExperimentalFeaturesEnabled }; } } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java index e5c811878ced..ad5ea45bf493 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java @@ -359,6 +359,10 @@ protected List getKubernetesClusterVMMaps() { return clusterVMs; } + protected List getKubernetesClusterVMMapsForNodes(List nodeIds) { + return kubernetesClusterVmMapDao.listByClusterIdAndVmIdsIn(kubernetesCluster.getId(), nodeIds); + } + protected List getKubernetesClusterVMs() { List vmList = new ArrayList<>(); List clusterVMs = getKubernetesClusterVMMaps(); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java index b715f09d7b88..9458dc47ed89 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java @@ -294,6 +294,7 @@ protected List provisionKubernetesClusterNodeVms(final long nodeCount, f ResourceUnavailableException, InsufficientCapacityException { List nodes = new ArrayList<>(); for (int i = offset + 1; i <= nodeCount; i++) { + // TODO : Get sensible naming / rename them UserVm vm = createKubernetesNode(publicIpAddress, i); addKubernetesClusterVm(kubernetesCluster.getId(), vm.getId()); startKubernetesVM(vm); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java index 1fce00ba81d5..a5a9505cbda0 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java @@ -19,6 +19,7 @@ import java.io.File; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -65,6 +66,7 @@ public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModif private ServiceOffering serviceOffering; private Long clusterSize; + private List nodeIds; private KubernetesCluster.State originalState; private Network network; private long scaleTimeoutTime; @@ -72,11 +74,18 @@ public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModif public KubernetesClusterScaleWorker(final KubernetesCluster kubernetesCluster, final ServiceOffering serviceOffering, final Long clusterSize, + final List nodeIds, final KubernetesClusterManagerImpl clusterManager) { super(kubernetesCluster, clusterManager); this.serviceOffering = serviceOffering; - this.clusterSize = clusterSize; + this.nodeIds = nodeIds; this.originalState = kubernetesCluster.getState(); + if (this.nodeIds != null) { + this.clusterSize = kubernetesCluster.getNodeCount() - this.nodeIds.size(); + } else { + this.clusterSize = clusterSize; + } + } protected void init() { @@ -302,16 +311,11 @@ private void scaleKubernetesClusterOffering() throws CloudRuntimeException { kubernetesCluster = updateKubernetesClusterEntry(null, serviceOffering); } - private void scaleDownKubernetesClusterSize() throws CloudRuntimeException { - if (!kubernetesCluster.getState().equals(KubernetesCluster.State.Scaling)) { - stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.ScaleDownRequested); - } - final List originalVmList = getKubernetesClusterVMMaps(); - int i = originalVmList.size() - 1; + private void removeNodesFromCluster(List vmMaps) throws CloudRuntimeException { List removedVmIds = new ArrayList<>(); - while (i >= kubernetesCluster.getMasterNodeCount() + clusterSize) { - KubernetesClusterVmMapVO vmMapVO = originalVmList.get(i); + for (KubernetesClusterVmMapVO vmMapVO : vmMaps) { UserVmVO userVM = userVmDao.findById(vmMapVO.getVmId()); + LOGGER.info(String.format("Remving vm : %s", userVM.getUuid())); if (!removeKubernetesClusterNode(publicIpAddress, sshPort, userVM, 3, 30000)) { logTransitStateAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes cluster : %s, failed to remove Kubernetes node: %s running on VM : %s", kubernetesCluster.getName(), userVM.getHostName(), userVM.getDisplayName()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); } @@ -333,8 +337,8 @@ private void scaleDownKubernetesClusterSize() throws CloudRuntimeException { if (System.currentTimeMillis() > scaleTimeoutTime) { logTransitStateAndThrow(Level.WARN, String.format("Scaling Kubernetes cluster : %s failed, scaling action timed out", kubernetesCluster.getName()),kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); } - i--; } + // Scale network rules to update firewall rule try { scaleKubernetesClusterNetworkRules(null, removedVmIds); @@ -343,6 +347,21 @@ private void scaleDownKubernetesClusterSize() throws CloudRuntimeException { } } + private void scaleDownKubernetesClusterSize() throws CloudRuntimeException { + if (!kubernetesCluster.getState().equals(KubernetesCluster.State.Scaling)) { + stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.ScaleDownRequested); + } + List vmList; + if (this.nodeIds != null) { + vmList = getKubernetesClusterVMMapsForNodes(this.nodeIds); + } else { + vmList = getKubernetesClusterVMMaps(); + vmList = vmList.subList((int) (kubernetesCluster.getMasterNodeCount() + clusterSize), vmList.size()); + } + Collections.reverse(vmList); + removeNodesFromCluster(vmList); + } + private void scaleUpKubernetesClusterSize(final long newVmCount) throws CloudRuntimeException { if (!kubernetesCluster.getState().equals(KubernetesCluster.State.Scaling)) { stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.ScaleUpRequested); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterVmMapDao.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterVmMapDao.java index 8b08dd37d553..42061cde1f0f 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterVmMapDao.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterVmMapDao.java @@ -23,4 +23,5 @@ public interface KubernetesClusterVmMapDao extends GenericDao { public List listByClusterId(long clusterId); + public List listByClusterIdAndVmIdsIn(long clusterId, List vmIds); } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterVmMapDaoImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterVmMapDaoImpl.java index 0b86b2c1a622..0f6ebfa6909a 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterVmMapDaoImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterVmMapDaoImpl.java @@ -34,6 +34,7 @@ public class KubernetesClusterVmMapDaoImpl extends GenericDaoBase listByClusterId(long clusterId) { sc.setParameters("clusterId", clusterId); return listBy(sc, null); } + + @Override + public List listByClusterIdAndVmIdsIn(long clusterId, List vmIds) { + SearchCriteria sc = clusterIdSearch.create(); + sc.setParameters("clusterId", clusterId); + sc.setParameters("vmIdsIN", vmIds.toArray()); + return listBy(sc); + } } diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ScaleKubernetesClusterCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ScaleKubernetesClusterCmd.java index 574d8a70395e..2d83ee222ba8 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ScaleKubernetesClusterCmd.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ScaleKubernetesClusterCmd.java @@ -16,6 +16,8 @@ // under the License. package org.apache.cloudstack.api.command.user.kubernetes.cluster; +import java.util.List; + import javax.inject.Inject; import org.apache.cloudstack.acl.RoleType; @@ -30,6 +32,7 @@ import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.KubernetesClusterResponse; import org.apache.cloudstack.api.response.ServiceOfferingResponse; +import org.apache.cloudstack.api.response.UserVmResponse; import org.apache.cloudstack.context.CallContext; import org.apache.log4j.Logger; @@ -71,6 +74,14 @@ public class ScaleKubernetesClusterCmd extends BaseAsyncCmd { description = "number of Kubernetes cluster nodes") private Long clusterSize; + @Parameter(name = ApiConstants.NODE_IDS, + type = CommandType.LIST, + collectionType = CommandType.UUID, + entityType = UserVmResponse.class, + description = "the IDs of the nodes to be removed") + private List nodeIds; + + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -87,6 +98,10 @@ public Long getClusterSize() { return clusterSize; } + public List getNodeIds() { + return nodeIds; + } + @Override public String getEventType() { return KubernetesClusterEventTypes.EVENT_KUBERNETES_CLUSTER_SCALE; From 53a94627109a31366a58c1107479da0e6d253343 Mon Sep 17 00:00:00 2001 From: davidjumani Date: Mon, 21 Sep 2020 11:49:50 +0530 Subject: [PATCH 002/117] Changing cks nomenclature --- ...tesClusterResourceModifierActionWorker.java | 18 ++++-------------- .../KubernetesClusterStartWorker.java | 10 ++++------ 2 files changed, 8 insertions(+), 20 deletions(-) diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java index 9458dc47ed89..bef19ac2e831 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java @@ -294,8 +294,7 @@ protected List provisionKubernetesClusterNodeVms(final long nodeCount, f ResourceUnavailableException, InsufficientCapacityException { List nodes = new ArrayList<>(); for (int i = offset + 1; i <= nodeCount; i++) { - // TODO : Get sensible naming / rename them - UserVm vm = createKubernetesNode(publicIpAddress, i); + UserVm vm = createKubernetesNode(publicIpAddress); addKubernetesClusterVm(kubernetesCluster.getId(), vm.getId()); startKubernetesVM(vm); vm = userVmDao.findById(vm.getId()); @@ -315,7 +314,7 @@ protected List provisionKubernetesClusterNodeVms(final long nodeCount, f return provisionKubernetesClusterNodeVms(nodeCount, 0, publicIpAddress); } - protected UserVm createKubernetesNode(String joinIp, int nodeInstance) throws ManagementServerException, + protected UserVm createKubernetesNode(String joinIp) throws ManagementServerException, ResourceUnavailableException, InsufficientCapacityException { UserVm nodeVm = null; DataCenter zone = dataCenterDao.findById(kubernetesCluster.getZoneId()); @@ -329,7 +328,8 @@ protected UserVm createKubernetesNode(String joinIp, int nodeInstance) throws Ma if (rootDiskSize > 0) { customParameterMap.put("rootdisksize", String.valueOf(rootDiskSize)); } - String hostName = getKubernetesClusterNodeAvailableName(String.format("%s-node-%s", kubernetesClusterNodeNamePrefix, nodeInstance)); + String suffix = Long.toHexString(System.currentTimeMillis()); + String hostName = String.format("%s-node-%s", kubernetesClusterNodeNamePrefix, suffix); String k8sNodeConfig = null; try { k8sNodeConfig = getKubernetesNodeConfig(joinIp, Hypervisor.HypervisorType.VMware.equals(clusterTemplate.getHypervisorType())); @@ -513,14 +513,4 @@ protected String getKubernetesClusterNodeNamePrefix() { } return prefix; } - - protected String getKubernetesClusterNodeAvailableName(final String hostName) { - String name = hostName; - int suffix = 1; - while (vmInstanceDao.findVMByHostName(name) != null) { - name = String.format("%s-%d", hostName, suffix); - suffix++; - } - return name; - } } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java index 855c264d6906..ea09a3077e7f 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java @@ -195,11 +195,8 @@ private UserVm createKubernetesMaster(final Network network, String serverIp) th if (rootDiskSize > 0) { customParameterMap.put("rootdisksize", String.valueOf(rootDiskSize)); } - String hostName = kubernetesClusterNodeNamePrefix + "-master"; - if (kubernetesCluster.getMasterNodeCount() > 1) { - hostName += "-1"; - } - hostName = getKubernetesClusterNodeAvailableName(hostName); + String suffix = Long.toHexString(System.currentTimeMillis()); + String hostName = String.format("%s-master-%s", kubernetesClusterNodeNamePrefix, suffix); boolean haSupported = isKubernetesVersionSupportsHA(); String k8sMasterConfig = null; try { @@ -254,7 +251,8 @@ private UserVm createKubernetesAdditionalMaster(final String joinIp, final int a if (rootDiskSize > 0) { customParameterMap.put("rootdisksize", String.valueOf(rootDiskSize)); } - String hostName = getKubernetesClusterNodeAvailableName(String.format("%s-master-%d", kubernetesClusterNodeNamePrefix, additionalMasterNodeInstance + 1)); + String suffix = Long.toHexString(System.currentTimeMillis()); + String hostName = String.format("%s-master-%s", kubernetesClusterNodeNamePrefix, suffix); String k8sMasterConfig = null; try { k8sMasterConfig = getKubernetesAdditionalMasterConfig(joinIp, Hypervisor.HypervisorType.VMware.equals(clusterTemplate.getHypervisorType())); From b81e9ee75bf5e7cfa7ebb6f91e8bca5f9a515581 Mon Sep 17 00:00:00 2001 From: davidjumani Date: Thu, 1 Oct 2020 11:48:17 +0530 Subject: [PATCH 003/117] Adding isMaster in KubernetesClusterVmMap --- .../cluster/KubernetesClusterVmMap.java | 1 + .../cluster/KubernetesClusterVmMapVO.java | 48 +++++++++++-------- .../KubernetesClusterActionWorker.java | 4 +- ...esClusterResourceModifierActionWorker.java | 2 +- .../KubernetesClusterStartWorker.java | 4 +- 5 files changed, 35 insertions(+), 24 deletions(-) diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMap.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMap.java index c7399202348f..b20cf0451a6d 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMap.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMap.java @@ -27,4 +27,5 @@ public interface KubernetesClusterVmMap { long getId(); long getClusterId(); long getVmId(); + boolean isMaster(); } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMapVO.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMapVO.java index edb06e79534a..1f5856b26de4 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMapVO.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMapVO.java @@ -28,6 +28,30 @@ @Table(name = "kubernetes_cluster_vm_map") public class KubernetesClusterVmMapVO implements KubernetesClusterVmMap { + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + @Column(name = "id") + long id; + + @Column(name = "cluster_id") + long clusterId; + + @Column(name = "vm_id") + long vmId; + + @Column(name = "is_master") + boolean isMaster; + + public KubernetesClusterVmMapVO() { + } + + public KubernetesClusterVmMapVO(long clusterId, long vmId, boolean isMaster) { + this.vmId = vmId; + this.clusterId = clusterId; + this.isMaster = isMaster; + } + + @Override public long getId() { return id; @@ -36,11 +60,9 @@ public long getId() { @Override public long getClusterId() { return clusterId; - } public void setClusterId(long clusterId) { - this.clusterId = clusterId; } @@ -50,27 +72,15 @@ public long getVmId() { } public void setVmId(long vmId) { - this.vmId = vmId; } - @Id - @GeneratedValue(strategy = GenerationType.IDENTITY) - @Column(name = "id") - long id; - - @Column(name = "cluster_id") - long clusterId; - - @Column(name = "vm_id") - long vmId; - - public KubernetesClusterVmMapVO() { - + @Override + public boolean isMaster() { + return isMaster; } - public KubernetesClusterVmMapVO(long clusterId, long vmId) { - this.vmId = vmId; - this.clusterId = clusterId; + public void setMaster(boolean isMaster) { + this.isMaster = isMaster; } } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java index ad5ea45bf493..c391e218169b 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java @@ -220,11 +220,11 @@ protected File getManagementServerSshPublicKeyFile() { return new File(keyFile); } - protected KubernetesClusterVmMapVO addKubernetesClusterVm(final long kubernetesClusterId, final long vmId) { + protected KubernetesClusterVmMapVO addKubernetesClusterVm(final long kubernetesClusterId, final long vmId, boolean isMaster) { return Transaction.execute(new TransactionCallback() { @Override public KubernetesClusterVmMapVO doInTransaction(TransactionStatus status) { - KubernetesClusterVmMapVO newClusterVmMap = new KubernetesClusterVmMapVO(kubernetesClusterId, vmId); + KubernetesClusterVmMapVO newClusterVmMap = new KubernetesClusterVmMapVO(kubernetesClusterId, vmId, isMaster); kubernetesClusterVmMapDao.persist(newClusterVmMap); return newClusterVmMap; } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java index bef19ac2e831..8bae1eda5aa3 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java @@ -295,7 +295,7 @@ protected List provisionKubernetesClusterNodeVms(final long nodeCount, f List nodes = new ArrayList<>(); for (int i = offset + 1; i <= nodeCount; i++) { UserVm vm = createKubernetesNode(publicIpAddress); - addKubernetesClusterVm(kubernetesCluster.getId(), vm.getId()); + addKubernetesClusterVm(kubernetesCluster.getId(), vm.getId(), false); startKubernetesVM(vm); vm = userVmDao.findById(vm.getId()); if (vm == null) { diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java index ea09a3077e7f..8b13386ae1f7 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java @@ -274,7 +274,7 @@ private UserVm provisionKubernetesClusterMasterVm(final Network network, final S ManagementServerException, InsufficientCapacityException, ResourceUnavailableException { UserVm k8sMasterVM = null; k8sMasterVM = createKubernetesMaster(network, publicIpAddress); - addKubernetesClusterVm(kubernetesCluster.getId(), k8sMasterVM.getId()); + addKubernetesClusterVm(kubernetesCluster.getId(), k8sMasterVM.getId(), true); startKubernetesVM(k8sMasterVM); k8sMasterVM = userVmDao.findById(k8sMasterVM.getId()); if (k8sMasterVM == null) { @@ -293,7 +293,7 @@ private List provisionKubernetesClusterAdditionalMasterVms(final String for (int i = 1; i < kubernetesCluster.getMasterNodeCount(); i++) { UserVm vm = null; vm = createKubernetesAdditionalMaster(publicIpAddress, i); - addKubernetesClusterVm(kubernetesCluster.getId(), vm.getId()); + addKubernetesClusterVm(kubernetesCluster.getId(), vm.getId(), true); startKubernetesVM(vm); vm = userVmDao.findById(vm.getId()); if (vm == null) { From 52211651e4a21b92b9716ccebb43d6fb2e0346b6 Mon Sep 17 00:00:00 2001 From: davidjumani Date: Thu, 1 Oct 2020 11:57:39 +0530 Subject: [PATCH 004/117] Ensuring we dont remove all masters --- .../main/resources/META-INF/db/schema-41510to41600.sql | 1 + .../cluster/KubernetesClusterManagerImpl.java | 10 +++++++--- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41510to41600.sql b/engine/schema/src/main/resources/META-INF/db/schema-41510to41600.sql index 0588c6dc1a90..23e15a21fa4f 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-41510to41600.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-41510to41600.sql @@ -19,3 +19,4 @@ -- Schema upgrade from 4.15.1.0 to 4.16.0.0 --; +ALTER TABLE `cloud`.`kubernetes_cluster_vm_map` ADD COLUMN `is_master` tinyint(1) NOT NULL DEFAULT '0'; diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java index 0a6c233cb6f2..29de8457f941 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java @@ -862,12 +862,16 @@ private void validateKubernetesClusterScaleParameters(ScaleKubernetesClusterCmd throw new InvalidParameterValueException("nodeids can not be passed along with clustersize or service offering"); } List nodes = kubernetesClusterVmMapDao.listByClusterIdAndVmIdsIn(kubernetesCluster.getId(), nodeIds); - // TODO : Ensure the vm is not the master node - nodeIds.stream().forEach(x -> LOGGER.info(String.format("Node: %d", x))); + // Do all the nodes exist ? if (nodes == null || nodes.size() != nodeIds.size()) { - nodes.stream().forEach(x -> LOGGER.info(String.format("NodeMap: %d", x.vmId))); throw new InvalidParameterValueException("Invalid node ids"); } + // Ensure there's always a master + long mastersToRemove = nodes.stream().filter(x -> x.isMaster()).count(); + if (mastersToRemove >= kubernetesCluster.getMasterNodeCount()) { + throw new InvalidParameterValueException("Can not remove all masters from a cluster"); + } + } else { if (serviceOfferingId == null && clusterSize == null) { throw new InvalidParameterValueException(String.format("Kubernetes cluster ID: %s cannot be scaled, either a new service offering or a new cluster size or nodeids to be removed must be passed", kubernetesCluster.getUuid())); From 83b3bd787bc60bc3aa247778819c76c2c02178c3 Mon Sep 17 00:00:00 2001 From: davidjumani Date: Tue, 20 Oct 2020 12:01:23 +0530 Subject: [PATCH 005/117] Deploying the autoscaler --- .../java/com/cloud/user/AccountService.java | 2 + .../apache/cloudstack/api/ApiConstants.java | 3 + .../META-INF/db/schema-41510to41600.sql | 4 + .../cluster/KubernetesClusterManagerImpl.java | 169 +++++++++++++++--- .../cluster/KubernetesClusterService.java | 1 + .../cluster/KubernetesClusterVO.java | 33 ++++ .../KubernetesClusterScaleWorker.java | 150 ++++++++++++++-- .../KubernetesClusterStartWorker.java | 4 + .../cluster/ScaleKubernetesClusterCmd.java | 33 +++- .../response/KubernetesClusterResponse.java | 21 +++ .../main/resources/conf/k8s-master-add.yml | 75 ++++++++ .../src/main/resources/conf/k8s-master.yml | 75 ++++++++ .../src/main/resources/conf/k8s-node.yml | 75 ++++++++ .../management/MockAccountManager.java | 5 + .../util/create-kubernetes-binaries-iso.sh | 8 + .../com/cloud/user/AccountManagerImpl.java | 4 + .../cloud/user/MockAccountManagerImpl.java | 5 + 17 files changed, 628 insertions(+), 39 deletions(-) diff --git a/api/src/main/java/com/cloud/user/AccountService.java b/api/src/main/java/com/cloud/user/AccountService.java index 4e3733bb5a49..98b1618a8da8 100644 --- a/api/src/main/java/com/cloud/user/AccountService.java +++ b/api/src/main/java/com/cloud/user/AccountService.java @@ -121,4 +121,6 @@ UserAccount createUserAccount(String userName, String password, String firstName UserAccount getUserAccountById(Long userId); public Map getKeys(GetUserKeysCmd cmd); + + public Map getKeys(Long userId); } diff --git a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java index 842fd4fbce26..029d8cde11c9 100644 --- a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java @@ -823,6 +823,9 @@ public class ApiConstants { public static final String MIN_KUBERNETES_VERSION_ID = "minimumkubernetesversionid"; public static final String NODE_ROOT_DISK_SIZE = "noderootdisksize"; public static final String SUPPORTS_HA = "supportsha"; + public static final String AUTOSCALING_ENABLED = "autoscaling_enabled"; + public static final String MIN_SIZE = "minsize"; + public static final String MAX_SIZE = "maxsize"; public static final String BOOT_TYPE = "boottype"; public static final String BOOT_MODE = "bootmode"; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41510to41600.sql b/engine/schema/src/main/resources/META-INF/db/schema-41510to41600.sql index 23e15a21fa4f..28db9489c787 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-41510to41600.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-41510to41600.sql @@ -19,4 +19,8 @@ -- Schema upgrade from 4.15.1.0 to 4.16.0.0 --; +ALTER TABLE `cloud`.`kubernetes_cluster` ADD COLUMN `autoscaling_enabled` tinyint(1) NOT NULL DEFAULT '0'; +ALTER TABLE `cloud`.`kubernetes_cluster` ADD COLUMN `minsize` bigint; +ALTER TABLE `cloud`.`kubernetes_cluster` ADD COLUMN `maxsize` bigint; + ALTER TABLE `cloud`.`kubernetes_cluster_vm_map` ADD COLUMN `is_master` tinyint(1) NOT NULL DEFAULT '0'; diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java index 29de8457f941..5178b9476e2f 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java @@ -26,18 +26,25 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.regex.Matcher; import java.util.regex.Pattern; +import java.util.stream.Collectors; import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.cloudstack.acl.ControlledEntity; +import org.apache.cloudstack.acl.Role; +import org.apache.cloudstack.acl.RoleService; +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.acl.Rule; import org.apache.cloudstack.acl.SecurityChecker; +import org.apache.cloudstack.acl.RolePermissionEntity.Permission; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.ApiConstants.VMDetails; import org.apache.cloudstack.api.ResponseObject.ResponseView; @@ -53,6 +60,7 @@ import org.apache.cloudstack.api.response.KubernetesClusterResponse; import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.UserVmResponse; +import org.apache.cloudstack.config.ApiServiceConfiguration; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.framework.config.ConfigKey; @@ -133,7 +141,11 @@ import com.cloud.user.Account; import com.cloud.user.AccountManager; import com.cloud.user.AccountService; +import com.cloud.user.AccountVO; import com.cloud.user.SSHKeyPairVO; +import com.cloud.user.User; +import com.cloud.user.UserAccount; +import com.cloud.user.dao.AccountDao; import com.cloud.user.dao.SSHKeyPairDao; import com.cloud.utils.Pair; import com.cloud.utils.Ternary; @@ -169,6 +181,8 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne ScheduledExecutorService _gcExecutor; ScheduledExecutorService _stateScanner; + Account kubeadmin; + @Inject public KubernetesClusterDao kubernetesClusterDao; @Inject @@ -194,6 +208,10 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne @Inject protected TemplateJoinDao templateJoinDao; @Inject + protected AccountDao accountDao; + @Inject + protected RoleService roleService; + @Inject protected AccountService accountService; @Inject protected AccountManager accountManager; @@ -226,6 +244,45 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne @Inject protected FirewallRulesDao firewallRulesDao; + private Role createKubeadminRole() { + Role kubeadminRole = roleService.createRole(KUBEADMIN_ACCOUNT_NAME, RoleType.Admin, "Default Kubeadmin role"); + roleService.createRolePermission(kubeadminRole, new Rule("listKubernetesClusters"), Permission.ALLOW, ""); + roleService.createRolePermission(kubeadminRole, new Rule("scaleKubernetesCluster"), Permission.ALLOW, ""); + roleService.createRolePermission(kubeadminRole, new Rule("*"), Permission.DENY, ""); + return kubeadminRole; + } + + private void init() { + + // Check and create Kubeadmin role + Role kubeadminRole = null; + List roles = roleService.findRolesByName(KUBEADMIN_ACCOUNT_NAME); + if (roles == null) { + kubeadminRole = createKubeadminRole(); + } else { + roles = roles.stream().filter(x -> x.getDescription() == "Default Kubeadmin role").collect(Collectors.toList()); + if (roles.size() != 1 ) { + kubeadminRole = createKubeadminRole(); + } else { + kubeadminRole = roles.get(0); + } + } + + // Check and create Kubeadmin account + if (accountManager.getActiveAccountByName(KUBEADMIN_ACCOUNT_NAME, 1L) != null) { + return; + } + + AccountVO kubeadmin = new AccountVO(); + kubeadmin.setAccountName(KUBEADMIN_ACCOUNT_NAME); + kubeadmin.setUuid(UUID.randomUUID().toString()); + kubeadmin.setState(Account.State.enabled); + kubeadmin.setDomainId(1); + kubeadmin.setType(RoleType.Admin.getAccountType()); + kubeadmin.setRoleId(kubeadminRole.getId()); + kubeadmin = accountDao.persist(kubeadmin); + } + private void logMessage(final Level logLevel, final String message, final Exception e) { if (logLevel == Level.WARN) { if (e != null) { @@ -640,6 +697,13 @@ public KubernetesClusterResponse createKubernetesClusterResponse(long kubernetes } } response.setVirtualMachines(vmResponses); + Boolean isAutoscalingEnabled = kubernetesCluster.isAutoscalingEnabled(); + LOGGER.warn("Autoscaling enabled : " + isAutoscalingEnabled); + if (isAutoscalingEnabled != null) { + response.setAutoscalingEnabled(isAutoscalingEnabled); + } + response.setMinSize(kubernetesCluster.getMinSize()); + response.setMaxSize(kubernetesCluster.getMaxSize()); return response; } @@ -846,17 +910,59 @@ private void validateKubernetesClusterScaleParameters(ScaleKubernetesClusterCmd final Long serviceOfferingId = cmd.getServiceOfferingId(); final Long clusterSize = cmd.getClusterSize(); final List nodeIds = cmd.getNodeIds(); - if (kubernetesClusterId == null || kubernetesClusterId < 1L) { - throw new InvalidParameterValueException("Invalid Kubernetes cluster ID"); - } + final Boolean isAutoscalingEnabled = cmd.isAutoscalingEnabled(); + final Long minSize = cmd.getMinSize(); + final Long maxSize = cmd.getMaxSize(); + KubernetesClusterVO kubernetesCluster = kubernetesClusterDao.findById(kubernetesClusterId); if (kubernetesCluster == null || kubernetesCluster.getRemoved() != null) { throw new InvalidParameterValueException("Invalid Kubernetes cluster ID"); } + final DataCenter zone = dataCenterDao.findById(kubernetesCluster.getZoneId()); if (zone == null) { logAndThrow(Level.WARN, String.format("Unable to find zone for Kubernetes cluster : %s", kubernetesCluster.getName())); } + + if (serviceOfferingId == null && clusterSize == null && nodeIds == null && isAutoscalingEnabled == null) { + throw new InvalidParameterValueException(String.format("Kubernetes cluster ID: %s cannot be scaled, either a new service offering or a new cluster size or nodeids to be removed or autoscaling must be passed", kubernetesCluster.getUuid())); + } + + Account caller = CallContext.current().getCallingAccount(); + accountManager.checkAccess(caller, SecurityChecker.AccessType.OperateEntry, false, kubernetesCluster); + + final KubernetesSupportedVersion clusterVersion = kubernetesSupportedVersionDao.findById(kubernetesCluster.getKubernetesVersionId()); + if (clusterVersion == null) { + throw new CloudRuntimeException(String.format("Invalid Kubernetes version associated with Kubernetes cluster ID: %s", kubernetesCluster.getUuid())); + } + + if (!(kubernetesCluster.getState().equals(KubernetesCluster.State.Created) || + kubernetesCluster.getState().equals(KubernetesCluster.State.Running) || + kubernetesCluster.getState().equals(KubernetesCluster.State.Stopped))) { + throw new PermissionDeniedException(String.format("Kubernetes cluster ID: %s is in %s state", kubernetesCluster.getUuid(), kubernetesCluster.getState().toString())); + } + + if (isAutoscalingEnabled != null && isAutoscalingEnabled) { + if (clusterSize != null || serviceOfferingId != null || nodeIds != null) { + throw new InvalidParameterValueException("autoscaling can not be passed along with nodeids or clustersize or service offering"); + } + + String csUrl = ApiServiceConfiguration.ApiServletPath.value(); + if (csUrl == null || csUrl.contains("localhost")) { + throw new InvalidParameterValueException("Global setting endpointe.url has to be set to the Management Server's API end point"); + } + + if (minSize == null || maxSize == null) { + throw new InvalidParameterValueException("autoscaling requires minsize and maxsize to be passed"); + } + if (minSize < 1) { + throw new InvalidParameterValueException("minsize must be more than 1"); + } + if (maxSize <= minSize) { + throw new InvalidParameterValueException("maxsize must be greater than minsize"); + } + } + if (nodeIds != null) { if (clusterSize != null || serviceOfferingId != null) { throw new InvalidParameterValueException("nodeids can not be passed along with clustersize or service offering"); @@ -871,19 +977,6 @@ private void validateKubernetesClusterScaleParameters(ScaleKubernetesClusterCmd if (mastersToRemove >= kubernetesCluster.getMasterNodeCount()) { throw new InvalidParameterValueException("Can not remove all masters from a cluster"); } - - } else { - if (serviceOfferingId == null && clusterSize == null) { - throw new InvalidParameterValueException(String.format("Kubernetes cluster ID: %s cannot be scaled, either a new service offering or a new cluster size or nodeids to be removed must be passed", kubernetesCluster.getUuid())); - } - } - - Account caller = CallContext.current().getCallingAccount(); - accountManager.checkAccess(caller, SecurityChecker.AccessType.OperateEntry, false, kubernetesCluster); - - final KubernetesSupportedVersion clusterVersion = kubernetesSupportedVersionDao.findById(kubernetesCluster.getKubernetesVersionId()); - if (clusterVersion == null) { - throw new CloudRuntimeException(String.format("Invalid Kubernetes version associated with Kubernetes cluster : %s", kubernetesCluster.getName())); } ServiceOffering serviceOffering = null; @@ -916,12 +1009,6 @@ private void validateKubernetesClusterScaleParameters(ScaleKubernetesClusterCmd } } - if (!(kubernetesCluster.getState().equals(KubernetesCluster.State.Created) || - kubernetesCluster.getState().equals(KubernetesCluster.State.Running) || - kubernetesCluster.getState().equals(KubernetesCluster.State.Stopped))) { - throw new PermissionDeniedException(String.format("Kubernetes cluster : %s is in %s state", kubernetesCluster.getName(), kubernetesCluster.getState().toString())); - } - if (clusterSize != null) { if (kubernetesCluster.getState().equals(KubernetesCluster.State.Stopped)) { // Cannot scale stopped cluster currently for cluster size throw new PermissionDeniedException(String.format("Kubernetes cluster : %s is in %s state", kubernetesCluster.getName(), kubernetesCluster.getState().toString())); @@ -1237,16 +1324,50 @@ public KubernetesClusterConfigResponse getKubernetesClusterConfig(GetKubernetesC return response; } + private String[] createServiceAccount() { + // TODO : Maybe create a service account kubeadmin with restricted permissions and add the user to that instead + Account caller = CallContext.current().getCallingAccount(); + String username = caller.getAccountName() + "-kubeadmin"; + UserAccount kubeadmin = accountService.getActiveUserAccount(username, caller.getDomainId()); + String[] keys = null; + if (kubeadmin == null) { + User kube = accountService.createUser(username, "password", "kube", "admin", "kubeadmin", null, caller.getAccountName(), caller.getDomainId(), null); + keys = accountService.createApiKeyAndSecretKey(kube.getId()); + } else { + String apiKey = kubeadmin.getApiKey(); + String secretKey = kubeadmin.getSecretKey(); + if (Strings.isNullOrEmpty(apiKey) || Strings.isNullOrEmpty(secretKey)) { + keys = accountService.createApiKeyAndSecretKey(kubeadmin.getId()); + } else { + keys = new String[]{apiKey, secretKey}; + } + } + return keys; + } + @Override public boolean scaleKubernetesCluster(ScaleKubernetesClusterCmd cmd) throws CloudRuntimeException { if (!KubernetesServiceEnabled.value()) { logAndThrow(Level.ERROR, "Kubernetes Service plugin is disabled"); } validateKubernetesClusterScaleParameters(cmd); + + Boolean isAutoscalingEnabled = cmd.isAutoscalingEnabled(); + String[] keys = null; + if (isAutoscalingEnabled != null && isAutoscalingEnabled) { + keys = createServiceAccount(); + } + KubernetesClusterScaleWorker scaleWorker = new KubernetesClusterScaleWorker(kubernetesClusterDao.findById(cmd.getId()), - serviceOfferingDao.findById(cmd.getServiceOfferingId()), cmd.getClusterSize(), - cmd.getNodeIds(), this); + serviceOfferingDao.findById(cmd.getServiceOfferingId()), + cmd.getClusterSize(), + cmd.getNodeIds(), + cmd.isAutoscalingEnabled(), + cmd.getMinSize(), + cmd.getMaxSize(), + keys, + this); scaleWorker = ComponentContext.inject(scaleWorker); return scaleWorker.scaleCluster(); } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterService.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterService.java index db5ab91b3d11..07939ddb101a 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterService.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterService.java @@ -34,6 +34,7 @@ public interface KubernetesClusterService extends PluggableService, Configurable static final String MIN_KUBERNETES_VERSION_HA_SUPPORT = "1.16.0"; static final int MIN_KUBERNETES_CLUSTER_NODE_CPU = 2; static final int MIN_KUBERNETES_CLUSTER_NODE_RAM_SIZE = 2048; + static final String KUBEADMIN_ACCOUNT_NAME = "kubeadmin"; static final ConfigKey KubernetesServiceEnabled = new ConfigKey("Advanced", Boolean.class, "cloud.kubernetes.service.enabled", diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVO.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVO.java index 9ff0be335f37..9ae2e9fac480 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVO.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVO.java @@ -93,6 +93,15 @@ public class KubernetesClusterVO implements KubernetesCluster { @Column(name = "endpoint") private String endpoint; + @Column(name = "autoscaling_enabled") + private boolean isAutoscalingEnabled; + + @Column(name = "minsize") + private Long minSize; + + @Column(name = "maxsize") + private Long maxSize; + @Column(name = GenericDao.CREATED_COLUMN) private Date created; @@ -303,6 +312,30 @@ public Date getCreated() { return created; } + public boolean isAutoscalingEnabled() { + return isAutoscalingEnabled; + } + + public void setAutoscalingEnabled(boolean isAutoscalingEnabled) { + this.isAutoscalingEnabled = isAutoscalingEnabled; + } + + public Long getMinSize() { + return minSize; + } + + public void setMinSize(Long minSize) { + this.minSize = minSize; + } + + public Long getMaxSize() { + return maxSize; + } + + public void setMaxSize(Long maxSize) { + this.maxSize = maxSize; + } + public KubernetesClusterVO() { this.uuid = UUID.randomUUID().toString(); } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java index a5a9505cbda0..8a44f3153fb1 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java @@ -25,6 +25,7 @@ import javax.inject.Inject; +import org.apache.cloudstack.config.ApiServiceConfiguration; import org.apache.cloudstack.context.CallContext; import org.apache.commons.collections.CollectionUtils; import org.apache.log4j.Level; @@ -69,17 +70,29 @@ public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModif private List nodeIds; private KubernetesCluster.State originalState; private Network network; + private Long minSize; + private Long maxSize; + private Boolean isAutoscalingEnabled; private long scaleTimeoutTime; + private String[] keys; public KubernetesClusterScaleWorker(final KubernetesCluster kubernetesCluster, final ServiceOffering serviceOffering, final Long clusterSize, final List nodeIds, + final Boolean isAutoscalingEnabled, + final Long minSize, + final Long maxSize, + final String[] keys, final KubernetesClusterManagerImpl clusterManager) { super(kubernetesCluster, clusterManager); this.serviceOffering = serviceOffering; this.nodeIds = nodeIds; + this.isAutoscalingEnabled = isAutoscalingEnabled; + this.minSize = minSize; + this.maxSize = maxSize; this.originalState = kubernetesCluster.getState(); + this.keys = keys; if (this.nodeIds != null) { this.clusterSize = kubernetesCluster.getNodeCount() - this.nodeIds.size(); } else { @@ -158,31 +171,54 @@ private void scaleKubernetesClusterNetworkRules(final List clusterVMIds, f } } - private KubernetesClusterVO updateKubernetesClusterEntry(final long cores, final long memory, - final Long size, final Long serviceOfferingId) { + private KubernetesClusterVO updateKubernetesClusterEntry(final Long cores, final Long memory, + final Long size, final Long serviceOfferingId, + final Boolean autoscale, final Long minSize, final Long maxSize) { return Transaction.execute((TransactionCallback) status -> { KubernetesClusterVO updatedCluster = kubernetesClusterDao.createForUpdate(kubernetesCluster.getId()); - updatedCluster.setCores(cores); - updatedCluster.setMemory(memory); + if (cores != null) { + updatedCluster.setCores(cores); + } + if (memory != null) { + updatedCluster.setMemory(memory); + } if (size != null) { updatedCluster.setNodeCount(size); } if (serviceOfferingId != null) { updatedCluster.setServiceOfferingId(serviceOfferingId); } - kubernetesClusterDao.persist(updatedCluster); + LOGGER.warn("GOT : " + autoscale + " - " + minSize + " - " + maxSize); + if (autoscale != null) { + LOGGER.warn("Updating autoscale : " + autoscale); + updatedCluster.setAutoscalingEnabled(autoscale); + LOGGER.warn("Updated autoscale : " + updatedCluster.isAutoscalingEnabled()); + } + updatedCluster.setMinSize(minSize); + updatedCluster.setMaxSize(maxSize); + updatedCluster = kubernetesClusterDao.persist(updatedCluster); + LOGGER.warn("Persisted autoscale : " + updatedCluster.isAutoscalingEnabled()); return updatedCluster; }); } private KubernetesClusterVO updateKubernetesClusterEntry(final Long newSize, final ServiceOffering newServiceOffering) throws CloudRuntimeException { - final ServiceOffering serviceOffering = newServiceOffering == null ? - serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId()) : newServiceOffering; + final ServiceOffering serviceOffering = newServiceOffering; final Long serviceOfferingId = newServiceOffering == null ? null : serviceOffering.getId(); - final long size = newSize == null ? kubernetesCluster.getTotalNodeCount() : (newSize + kubernetesCluster.getMasterNodeCount()); - final long cores = serviceOffering.getCpu() * size; - final long memory = serviceOffering.getRamSize() * size; - KubernetesClusterVO kubernetesClusterVO = updateKubernetesClusterEntry(cores, memory, newSize, serviceOfferingId); + final Long size = newSize; + final Long cores = newServiceOffering == null ? null : serviceOffering.getCpu() * size; + final Long memory = newServiceOffering == null ? null : serviceOffering.getRamSize() * size; + KubernetesClusterVO kubernetesClusterVO = updateKubernetesClusterEntry(cores, memory, newSize, serviceOfferingId, null, null, null); + if (kubernetesClusterVO == null) { + logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster ID: %s failed, unable to update Kubernetes cluster", + kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); + } + return kubernetesClusterVO; + } + + private KubernetesClusterVO updateKubernetesClusterEntry(final Boolean autoscale, final Long minSize, final Long maxSize) throws CloudRuntimeException { + LOGGER.warn("Changing cluster to : " + autoscale + " - " + minSize + " - " + maxSize); + KubernetesClusterVO kubernetesClusterVO = updateKubernetesClusterEntry(null, null, null, null, autoscale, minSize, maxSize); if (kubernetesClusterVO == null) { logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster ID: %s failed, unable to update Kubernetes cluster", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); @@ -417,6 +453,90 @@ private void scaleKubernetesClusterSize() throws CloudRuntimeException { kubernetesCluster = updateKubernetesClusterEntry(clusterSize, null); } + private boolean enableAutoScaleKubernetesCluster() throws CloudRuntimeException { + File pkFile = getManagementServerSshPublicKeyFile(); + Pair publicIpSshPort = getKubernetesClusterServerIpSshPort(null); + publicIpAddress = publicIpSshPort.first(); + sshPort = publicIpSshPort.second(); + + List clusterVMs = getKubernetesClusterVMMaps(); + if (CollectionUtils.isEmpty(clusterVMs)) { + return false; + } + + final UserVm userVm = userVmDao.findById(clusterVMs.get(0).getVmId()); + + String hostName = userVm.getHostName(); + if (!Strings.isNullOrEmpty(hostName)) { + hostName = hostName.toLowerCase(); + } + + try { + Pair result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, + pkFile, null, String.format("sudo kubectl -n kube-system create secret generic cloudstack-secret " + + "--from-literal=api-url='%s' " + + "--from-literal=api-key='%s' " + + "--from-literal=secret-key='%s'", ApiServiceConfiguration.ApiServletPath.value(), keys[0], keys[1]), + 10000, 10000, 60000); + if (!result.first()) { + return false; + } + // result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, + // pkFile, null, String.format("sudo kubectl apply -f /opt/autoscaler/autoscaler.yaml"), + // 10000, 10000, 60000); + result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, + pkFile, null, String.format("sudo /opt/bin/autoscale-kube-cluster -i %s -e -M %d -m %d", kubernetesCluster.getUuid(), maxSize, minSize), + 10000, 10000, 60000); + if (!result.first()) { + return false; + } + updateKubernetesClusterEntry(true, minSize, maxSize); + } catch (Exception e) { + String msg = String.format("Failed to autoscale Kubernetes cluster: %s", kubernetesCluster.getName()); + LOGGER.warn(msg, e); + } + return true; + } + + private boolean disableAutoScaleKubernetesCluster() throws CloudRuntimeException { + File pkFile = getManagementServerSshPublicKeyFile(); + Pair publicIpSshPort = getKubernetesClusterServerIpSshPort(null); + publicIpAddress = publicIpSshPort.first(); + sshPort = publicIpSshPort.second(); + + List clusterVMs = getKubernetesClusterVMMaps(); + if (CollectionUtils.isEmpty(clusterVMs)) { + return false; + } + + final UserVm userVm = userVmDao.findById(clusterVMs.get(0).getVmId()); + + String hostName = userVm.getHostName(); + if (!Strings.isNullOrEmpty(hostName)) { + hostName = hostName.toLowerCase(); + } + + try { + Pair result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, + pkFile, null, "sudo kubectl -n kube-system delete secret cloudstack-secret", + 10000, 10000, 60000); + if (!result.first()) { + return false; + } + result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, + pkFile, null, String.format("sudo /opt/bin/autoscale-kube-cluster -d"), + 10000, 10000, 60000); + if (!result.first()) { + return false; + } + updateKubernetesClusterEntry(false, null, null); + } catch (Exception e) { + String msg = String.format("Failed to autoscale Kubernetes cluster: %s", kubernetesCluster.getName()); + LOGGER.warn(msg, e); + } + return true; + } + public boolean scaleCluster() throws CloudRuntimeException { init(); if (LOGGER.isInfoEnabled()) { @@ -428,6 +548,14 @@ public boolean scaleCluster() throws CloudRuntimeException { if (existingServiceOffering == null) { logAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster : %s failed, service offering for the Kubernetes cluster not found!", kubernetesCluster.getName())); } + + if (this.isAutoscalingEnabled != null) { + if (this.isAutoscalingEnabled) { + return enableAutoScaleKubernetesCluster(); + } else { + return disableAutoScaleKubernetesCluster(); + } + } final boolean serviceOfferingScalingNeeded = serviceOffering != null && serviceOffering.getId() != existingServiceOffering.getId(); final boolean clusterSizeScalingNeeded = clusterSize != null && clusterSize != originalClusterSize; final long newVMRequired = clusterSize == null ? 0 : clusterSize - originalClusterSize; diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java index 8b13386ae1f7..95ace6b1ee52 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java @@ -138,6 +138,7 @@ private String getKubernetesMasterConfig(final String masterIp, final String ser final String clusterToken = "{{ k8s_master.cluster.token }}"; final String clusterInitArgsKey = "{{ k8s_master.cluster.initargs }}"; final String ejectIsoKey = "{{ k8s.eject.iso }}"; + final String clusterId = "{{ k8s_cluster_id }}"; final List addresses = new ArrayList<>(); addresses.add(masterIp); if (!serverIp.equals(masterIp)) { @@ -173,6 +174,7 @@ private String getKubernetesMasterConfig(final String masterIp, final String ser initArgs += String.format(" --kubernetes-version=%s", getKubernetesClusterVersion().getSemanticVersion()); k8sMasterConfig = k8sMasterConfig.replace(clusterInitArgsKey, initArgs); k8sMasterConfig = k8sMasterConfig.replace(ejectIsoKey, String.valueOf(ejectIso)); + k8sMasterConfig = k8sMasterConfig.replace(clusterId, this.kubernetesCluster.getUuid()); return k8sMasterConfig; } @@ -222,6 +224,7 @@ private String getKubernetesAdditionalMasterConfig(final String joinIp, final bo final String sshPubKey = "{{ k8s.ssh.pub.key }}"; final String clusterHACertificateKey = "{{ k8s_master.cluster.ha.certificate.key }}"; final String ejectIsoKey = "{{ k8s.eject.iso }}"; + final String clusterId = "{{ k8s_cluster_id }}"; String pubKey = "- \"" + configurationDao.getValue("ssh.publickey") + "\""; String sshKeyPair = kubernetesCluster.getKeyPair(); if (!Strings.isNullOrEmpty(sshKeyPair)) { @@ -235,6 +238,7 @@ private String getKubernetesAdditionalMasterConfig(final String joinIp, final bo k8sMasterConfig = k8sMasterConfig.replace(clusterTokenKey, KubernetesClusterUtil.generateClusterToken(kubernetesCluster)); k8sMasterConfig = k8sMasterConfig.replace(clusterHACertificateKey, KubernetesClusterUtil.generateClusterHACertificateKey(kubernetesCluster)); k8sMasterConfig = k8sMasterConfig.replace(ejectIsoKey, String.valueOf(ejectIso)); + k8sMasterConfig = k8sMasterConfig.replace(clusterId, this.kubernetesCluster.getUuid()); return k8sMasterConfig; } diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ScaleKubernetesClusterCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ScaleKubernetesClusterCmd.java index 2d83ee222ba8..1b5ab5b9b45b 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ScaleKubernetesClusterCmd.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ScaleKubernetesClusterCmd.java @@ -61,17 +61,17 @@ public class ScaleKubernetesClusterCmd extends BaseAsyncCmd { //////////////// API parameters ///////////////////// ///////////////////////////////////////////////////// @Parameter(name = ApiConstants.ID, type = CommandType.UUID, required = true, - entityType = KubernetesClusterResponse.class, - description = "the ID of the Kubernetes cluster") + entityType = KubernetesClusterResponse.class, + description = "the ID of the Kubernetes cluster") private Long id; @ACL(accessType = SecurityChecker.AccessType.UseEntry) @Parameter(name = ApiConstants.SERVICE_OFFERING_ID, type = CommandType.UUID, entityType = ServiceOfferingResponse.class, - description = "the ID of the service offering for the virtual machines in the cluster.") + description = "the ID of the service offering for the virtual machines in the cluster.") private Long serviceOfferingId; @Parameter(name=ApiConstants.SIZE, type = CommandType.LONG, - description = "number of Kubernetes cluster nodes") + description = "number of Kubernetes cluster nodes") private Long clusterSize; @Parameter(name = ApiConstants.NODE_IDS, @@ -81,6 +81,19 @@ public class ScaleKubernetesClusterCmd extends BaseAsyncCmd { description = "the IDs of the nodes to be removed") private List nodeIds; + @Parameter(name=ApiConstants.AUTOSCALING_ENABLED, type = CommandType.BOOLEAN, + description = "Whether autoscaling is enabled for the cluster") + private Boolean isAutoscalingEnabled; + + @Parameter(name=ApiConstants.MIN_SIZE, type = CommandType.LONG, + description = "Minimum size of the cluster") + private Long minSize; + + @Parameter(name=ApiConstants.MAX_SIZE, type = CommandType.LONG, + description = "Maximum size of the cluster") + private Long maxSize; + + // TODO : Get api keys too! ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// @@ -102,6 +115,18 @@ public List getNodeIds() { return nodeIds; } + public Boolean isAutoscalingEnabled() { + return isAutoscalingEnabled; + } + + public Long getMinSize() { + return minSize; + } + + public Long getMaxSize() { + return maxSize; + } + @Override public String getEventType() { return KubernetesClusterEventTypes.EVENT_KUBERNETES_CLUSTER_SCALE; diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesClusterResponse.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesClusterResponse.java index bb3f14f56891..724107cc96dc 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesClusterResponse.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesClusterResponse.java @@ -140,6 +140,17 @@ public class KubernetesClusterResponse extends BaseResponse implements Controlle @SerializedName(ApiConstants.IP_ADDRESS_ID) @Param(description = "Public IP Address ID of the cluster") private String ipAddressId; + @SerializedName(ApiConstants.AUTOSCALING_ENABLED) + @Param(description = "Whether autoscaling is enabled for the cluster") + private boolean isAutoscalingEnabled; + + @SerializedName(ApiConstants.MIN_SIZE) + @Param(description = "Minimum size of the cluster") + private Long minSize; + + @SerializedName(ApiConstants.MAX_SIZE) + @Param(description = "Maximum size of the cluster") + private Long maxSize; public KubernetesClusterResponse() { } @@ -339,5 +350,15 @@ public void setIpAddress(String ipAddress) { public void setIpAddressId(String ipAddressId) { this.ipAddressId = ipAddressId; + public void setAutoscalingEnabled(boolean isAutoscalingEnabled) { + this.isAutoscalingEnabled = isAutoscalingEnabled; + } + + public void setMinSize(Long minSize) { + this.minSize = minSize; + } + + public void setMaxSize(Long maxSize) { + this.maxSize = maxSize; } } diff --git a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master-add.yml b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master-add.yml index 787ea97491ce..608326eaa44c 100644 --- a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master-add.yml +++ b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master-add.yml @@ -125,6 +125,11 @@ write-files: done <<< "$output" setup_complete=true fi + if [ -e "${BINARIES_DIR}/autoscaler.yaml" ]; then + mkdir -p /opt/autoscaler + cp "${BINARIES_DIR}/autoscaler.yaml" /opt/autoscaler/autoscaler_tmpl.yaml + sed 's//{{ k8s_cluster_id }}/g' /opt/autoscaler/autoscaler_tmpl.yaml > /opt/autoscaler/autoscaler.yaml + fi umount "${ISO_MOUNT_DIR}" && rmdir "${ISO_MOUNT_DIR}" if [ "$EJECT_ISO_FROM_OS" = true ] && [ "$iso_drive_path" != "" ]; then eject "${iso_drive_path}" @@ -201,6 +206,76 @@ write-files: sudo touch /home/core/success echo "true" > /home/core/success + - path: /opt/bin/autoscale-kube-cluster + permissions: 0700 + owner: root:root + content: | + #! /bin/bash + function usage() { + cat << USAGE + Usage: ./autoscale-kube-cluster [OPTIONS]... + Enables autoscaling for the kubernetes cluster. + Arguments: + -i, --id string ID of the cluster + -e, --enable Enables autoscaling + -d, --disable Disables autoscaling + -M, --maxsize number Maximum size of the cluster + -m, --minsize number Minimum size of the cluster + Other arguments: + -h, --help Display this help message and exit + Examples: + ./autoscale-kube-cluster -e -M 3 -m 1 + ./autoscale-kube-cluster -d + USAGE + exit 0 + } + ID="" + ENABLE="" + MINSIZE="" + MAXSIZE="" + while [ -n "$1" ]; do + case "$1" in + -h | --help) + usage + ;; + -i | --id) + ID=$2 + shift 2 + ;; + -e | --enable) + ENABLE="true" + shift 1 + ;; + -d | --enable) + ENABLE="false" + shift 1 + ;; + -M | --maxsize) + MAXSIZE=$2 + shift 2 + ;; + -m | --minsize) + MINSIZE=$2 + shift 2 + ;; + -*|*) + echo "ERROR: no such option $1. -h or --help for help" + exit 1 + ;; + esac + done + if [ $ENABLE == "true" ] ; then + if [ -e /opt/autoscaler/autoscaler.yaml ]; then + sed -e "s//$ID/g" -e "s//$MINSIZE/g" -e "s//$MAXSIZE/g" /opt/autoscaler/autoscaler_tmpl.yaml > /opt/autoscaler/autoscaler_now.yaml + kubectl apply -f /opt/autoscaler/autoscaler_now.yaml + exit 0 + fi + echo "Cluster does not support automated autoscaling. Please deploy the autoscaler manually" + exit 1 + else + kubectl delete deployment -n kube-system cluster-autoscaler + fi + coreos: units: - name: docker.service diff --git a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master.yml b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master.yml index 14828578ed8c..8cf34c36c6eb 100644 --- a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master.yml +++ b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master.yml @@ -147,6 +147,11 @@ write-files: fi mkdir -p "${K8S_CONFIG_SCRIPTS_COPY_DIR}" cp ${BINARIES_DIR}/*.yaml "${K8S_CONFIG_SCRIPTS_COPY_DIR}" + if [ -e "${BINARIES_DIR}/autoscaler.yaml" ]; then + mkdir -p /opt/autoscaler + cp "${BINARIES_DIR}/autoscaler.yaml" /opt/autoscaler/autoscaler_tmpl.yaml + sed 's//{{ k8s_cluster_id }}/g' /opt/autoscaler/autoscaler_tmpl.yaml > /opt/autoscaler/autoscaler.yaml + fi umount "${ISO_MOUNT_DIR}" && rmdir "${ISO_MOUNT_DIR}" if [ "$EJECT_ISO_FROM_OS" = true ] && [ "$iso_drive_path" != "" ]; then eject "${iso_drive_path}" @@ -258,6 +263,76 @@ write-files: sudo touch /home/core/success echo "true" > /home/core/success + - path: /opt/bin/autoscale-kube-cluster + permissions: 0700 + owner: root:root + content: | + #! /bin/bash + function usage() { + cat << USAGE + Usage: ./autoscale-kube-cluster [OPTIONS]... + Enables autoscaling for the kubernetes cluster. + Arguments: + -i, --id string ID of the cluster + -e, --enable Enables autoscaling + -d, --disable Disables autoscaling + -M, --maxsize number Maximum size of the cluster + -m, --minsize number Minimum size of the cluster + Other arguments: + -h, --help Display this help message and exit + Examples: + ./autoscale-kube-cluster -e -M 3 -m 1 + ./autoscale-kube-cluster -d + USAGE + exit 0 + } + ID="" + ENABLE="" + MINSIZE="" + MAXSIZE="" + while [ -n "$1" ]; do + case "$1" in + -h | --help) + usage + ;; + -i | --id) + ID=$2 + shift 2 + ;; + -e | --enable) + ENABLE="true" + shift 1 + ;; + -d | --enable) + ENABLE="false" + shift 1 + ;; + -M | --maxsize) + MAXSIZE=$2 + shift 2 + ;; + -m | --minsize) + MINSIZE=$2 + shift 2 + ;; + -*|*) + echo "ERROR: no such option $1. -h or --help for help" + exit 1 + ;; + esac + done + if [ $ENABLE == "true" ] ; then + if [ -e /opt/autoscaler/autoscaler.yaml ]; then + sed -e "s//$ID/g" -e "s//$MINSIZE/g" -e "s//$MAXSIZE/g" /opt/autoscaler/autoscaler_tmpl.yaml > /opt/autoscaler/autoscaler_now.yaml + kubectl apply -f /opt/autoscaler/autoscaler_now.yaml + exit 0 + fi + echo "Cluster does not support automated autoscaling. Please deploy the autoscaler manually" + exit 1 + else + kubectl delete deployment -n kube-system cluster-autoscaler + fi + coreos: units: - name: docker.service diff --git a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-node.yml b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-node.yml index d2f5454a669d..8040c8a054be 100644 --- a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-node.yml +++ b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-node.yml @@ -125,6 +125,11 @@ write-files: done <<< "$output" setup_complete=true fi + if [ -e "${BINARIES_DIR}/autoscaler.yaml" ]; then + mkdir -p /opt/autoscaler + cp "${BINARIES_DIR}/autoscaler.yaml" /opt/autoscaler/autoscaler_tmpl.yaml + sed 's//{{ k8s_cluster_id }}/g' /opt/autoscaler/autoscaler_tmpl.yaml > /opt/autoscaler/autoscaler.yaml + fi umount "${ISO_MOUNT_DIR}" && rmdir "${ISO_MOUNT_DIR}" if [ "$EJECT_ISO_FROM_OS" = true ] && [ "$iso_drive_path" != "" ]; then eject "${iso_drive_path}" @@ -201,6 +206,76 @@ write-files: sudo touch /home/core/success echo "true" > /home/core/success + - path: /opt/bin/autoscale-kube-cluster + permissions: 0700 + owner: root:root + content: | + #! /bin/bash + function usage() { + cat << USAGE + Usage: ./autoscale-kube-cluster [OPTIONS]... + Enables autoscaling for the kubernetes cluster. + Arguments: + -i, --id string ID of the cluster + -e, --enable Enables autoscaling + -d, --disable Disables autoscaling + -M, --maxsize number Maximum size of the cluster + -m, --minsize number Minimum size of the cluster + Other arguments: + -h, --help Display this help message and exit + Examples: + ./autoscale-kube-cluster -e -M 3 -m 1 + ./autoscale-kube-cluster -d + USAGE + exit 0 + } + ID="" + ENABLE="" + MINSIZE="" + MAXSIZE="" + while [ -n "$1" ]; do + case "$1" in + -h | --help) + usage + ;; + -i | --id) + ID=$2 + shift 2 + ;; + -e | --enable) + ENABLE="true" + shift 1 + ;; + -d | --enable) + ENABLE="false" + shift 1 + ;; + -M | --maxsize) + MAXSIZE=$2 + shift 2 + ;; + -m | --minsize) + MINSIZE=$2 + shift 2 + ;; + -*|*) + echo "ERROR: no such option $1. -h or --help for help" + exit 1 + ;; + esac + done + if [ $ENABLE == "true" ] ; then + if [ -e /opt/autoscaler/autoscaler.yaml ]; then + sed -e "s//$ID/g" -e "s//$MINSIZE/g" -e "s//$MAXSIZE/g" /opt/autoscaler/autoscaler_tmpl.yaml > /opt/autoscaler/autoscaler_now.yaml + kubectl apply -f /opt/autoscaler/autoscaler_now.yaml + exit 0 + fi + echo "Cluster does not support automated autoscaling. Please deploy the autoscaler manually" + exit 1 + else + kubectl delete deployment -n kube-system cluster-autoscaler + fi + coreos: units: - name: docker.service diff --git a/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/MockAccountManager.java b/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/MockAccountManager.java index 68ff2e803789..5cd90c930089 100644 --- a/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/MockAccountManager.java +++ b/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/MockAccountManager.java @@ -457,6 +457,11 @@ public Map getKeys(GetUserKeysCmd cmd){ return null; } + @Override + public Map getKeys(Long userId) { + return null; + } + @Override public void checkAccess(User user, ControlledEntity entity) throws PermissionDeniedException { diff --git a/scripts/util/create-kubernetes-binaries-iso.sh b/scripts/util/create-kubernetes-binaries-iso.sh index e8ad0e573662..b9449329c9a6 100755 --- a/scripts/util/create-kubernetes-binaries-iso.sh +++ b/scripts/util/create-kubernetes-binaries-iso.sh @@ -74,6 +74,11 @@ echo "Downloading dashboard config ${DASHBORAD_CONFIG_URL}" dashboard_conf_file="${working_dir}/dashboard.yaml" curl -sSL ${DASHBORAD_CONFIG_URL} -o ${dashboard_conf_file} +AUTOSCALER_URL="https://github.com/shapeblue/autoscaler/blob/add-acs/cluster-autoscaler/cloudprovider/cloudstack/examples/cluster-autoscaler-standard.yaml" +echo "Downloading kubernetes cluster autoscaler ${AUTOSCALER_URL}" +autoscaler_conf_file="${working_dir}/autoscaler.yaml" +curl -sSL ${AUTOSCALER_URL} -o ${autoscaler_conf_file} + echo "Fetching k8s docker images..." docker -v if [ $? -ne 0 ]; then @@ -99,6 +104,9 @@ do output=`printf "%s\n" ${output} ${images}` done +# Don't forget about the autoscaler image ! +autoscaler_image="davidjumani/cluster-autoscaler:latest" +output=`printf "%s\n" ${output} ${autoscaler_image}` while read -r line; do echo "Downloading docker image $line ---" sudo docker pull "$line" diff --git a/server/src/main/java/com/cloud/user/AccountManagerImpl.java b/server/src/main/java/com/cloud/user/AccountManagerImpl.java index a20090cf7c14..37d49a685730 100644 --- a/server/src/main/java/com/cloud/user/AccountManagerImpl.java +++ b/server/src/main/java/com/cloud/user/AccountManagerImpl.java @@ -2431,7 +2431,11 @@ public Pair findUserByApiKey(String apiKey) { @Override public Map getKeys(GetUserKeysCmd cmd) { final long userId = cmd.getID(); + return getKeys(userId); + } + @Override + public Map getKeys(Long userId) { User user = getActiveUser(userId); if (user == null) { throw new InvalidParameterValueException("Unable to find user by id"); diff --git a/server/src/test/java/com/cloud/user/MockAccountManagerImpl.java b/server/src/test/java/com/cloud/user/MockAccountManagerImpl.java index ea6287d2bcc1..7916007c4065 100644 --- a/server/src/test/java/com/cloud/user/MockAccountManagerImpl.java +++ b/server/src/test/java/com/cloud/user/MockAccountManagerImpl.java @@ -425,6 +425,11 @@ public Map getKeys(GetUserKeysCmd cmd) { return null; } + @Override + public Map getKeys(Long userId) { + return null; + } + @Override public void checkAccess(User user, ControlledEntity entity) throws PermissionDeniedException { From ef65329c99946331b882caac60a335f633a92248 Mon Sep 17 00:00:00 2001 From: davidjumani Date: Tue, 20 Oct 2020 16:43:03 +0530 Subject: [PATCH 006/117] Adding public ip to listKubernetesClusterResponse --- .../api/response/KubernetesClusterResponse.java | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesClusterResponse.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesClusterResponse.java index 724107cc96dc..8358ba061a3e 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesClusterResponse.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesClusterResponse.java @@ -152,6 +152,14 @@ public class KubernetesClusterResponse extends BaseResponse implements Controlle @Param(description = "Maximum size of the cluster") private Long maxSize; + @SerializedName(ApiConstants.IP_ADDRESS) + @Param(description = "Public IP Address of the cluster") + private String ipAddress; + + @SerializedName(ApiConstants.IP_ADDRESS_ID) + @Param(description = "Public IP Address ID of the cluster") + private String ipAddressId; + public KubernetesClusterResponse() { } @@ -361,4 +369,12 @@ public void setMinSize(Long minSize) { public void setMaxSize(Long maxSize) { this.maxSize = maxSize; } + + public void setIpAddress(String ipAddress) { + this.ipAddress = ipAddress; + } + + public void setIpAddressId(String ipAddressId) { + this.ipAddressId = ipAddressId; + } } From 74539e2d63e2d0e049b8a67a003fd368a825929b Mon Sep 17 00:00:00 2001 From: davidjumani Date: Wed, 21 Oct 2020 15:29:09 +0530 Subject: [PATCH 007/117] Adding api keys during cluster creation --- .../apache/cloudstack/api/ApiConstants.java | 2 +- .../META-INF/db/schema-41510to41600.sql | 4 +- .../kubernetes/cluster/KubernetesCluster.java | 2 + .../cluster/KubernetesClusterManagerImpl.java | 83 ++-------- .../cluster/KubernetesClusterVO.java | 10 +- .../cluster/KubernetesClusterVmMapVO.java | 12 +- .../KubernetesClusterActionWorker.java | 35 +++++ .../KubernetesClusterScaleWorker.java | 147 +++++++----------- .../KubernetesClusterStartWorker.java | 9 ++ 9 files changed, 127 insertions(+), 177 deletions(-) diff --git a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java index 029d8cde11c9..ba797196fdc3 100644 --- a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java @@ -823,7 +823,7 @@ public class ApiConstants { public static final String MIN_KUBERNETES_VERSION_ID = "minimumkubernetesversionid"; public static final String NODE_ROOT_DISK_SIZE = "noderootdisksize"; public static final String SUPPORTS_HA = "supportsha"; - public static final String AUTOSCALING_ENABLED = "autoscaling_enabled"; + public static final String AUTOSCALING_ENABLED = "autoscalingenabled"; public static final String MIN_SIZE = "minsize"; public static final String MAX_SIZE = "maxsize"; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41510to41600.sql b/engine/schema/src/main/resources/META-INF/db/schema-41510to41600.sql index 28db9489c787..15a1a4df5536 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-41510to41600.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-41510to41600.sql @@ -19,8 +19,8 @@ -- Schema upgrade from 4.15.1.0 to 4.16.0.0 --; -ALTER TABLE `cloud`.`kubernetes_cluster` ADD COLUMN `autoscaling_enabled` tinyint(1) NOT NULL DEFAULT '0'; +ALTER TABLE `cloud`.`kubernetes_cluster` ADD COLUMN `autoscaling_enabled` tinyint(1) unsigned NOT NULL DEFAULT 0; ALTER TABLE `cloud`.`kubernetes_cluster` ADD COLUMN `minsize` bigint; ALTER TABLE `cloud`.`kubernetes_cluster` ADD COLUMN `maxsize` bigint; -ALTER TABLE `cloud`.`kubernetes_cluster_vm_map` ADD COLUMN `is_master` tinyint(1) NOT NULL DEFAULT '0'; +ALTER TABLE `cloud`.`kubernetes_cluster_vm_map` ADD COLUMN `is_master` tinyint(1) unsigned NOT NULL DEFAULT 0; \ No newline at end of file diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesCluster.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesCluster.java index aef304a03d36..c87ae1df9849 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesCluster.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesCluster.java @@ -37,6 +37,7 @@ enum Event { StopRequested, DestroyRequested, RecoveryRequested, + AutoscaleRequested, ScaleUpRequested, ScaleDownRequested, UpgradeRequested, @@ -81,6 +82,7 @@ enum State { s_fsm.addTransition(State.Running, Event.FaultsDetected, State.Alert); + s_fsm.addTransition(State.Running, Event.AutoscaleRequested, State.Scaling); s_fsm.addTransition(State.Running, Event.ScaleUpRequested, State.Scaling); s_fsm.addTransition(State.Running, Event.ScaleDownRequested, State.Scaling); s_fsm.addTransition(State.Scaling, Event.OperationSucceeded, State.Running); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java index 5178b9476e2f..faf46bd766e1 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java @@ -26,25 +26,18 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.regex.Matcher; import java.util.regex.Pattern; -import java.util.stream.Collectors; import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.cloudstack.acl.ControlledEntity; -import org.apache.cloudstack.acl.Role; -import org.apache.cloudstack.acl.RoleService; -import org.apache.cloudstack.acl.RoleType; -import org.apache.cloudstack.acl.Rule; import org.apache.cloudstack.acl.SecurityChecker; -import org.apache.cloudstack.acl.RolePermissionEntity.Permission; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.ApiConstants.VMDetails; import org.apache.cloudstack.api.ResponseObject.ResponseView; @@ -141,7 +134,6 @@ import com.cloud.user.Account; import com.cloud.user.AccountManager; import com.cloud.user.AccountService; -import com.cloud.user.AccountVO; import com.cloud.user.SSHKeyPairVO; import com.cloud.user.User; import com.cloud.user.UserAccount; @@ -210,8 +202,6 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne @Inject protected AccountDao accountDao; @Inject - protected RoleService roleService; - @Inject protected AccountService accountService; @Inject protected AccountManager accountManager; @@ -244,45 +234,6 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne @Inject protected FirewallRulesDao firewallRulesDao; - private Role createKubeadminRole() { - Role kubeadminRole = roleService.createRole(KUBEADMIN_ACCOUNT_NAME, RoleType.Admin, "Default Kubeadmin role"); - roleService.createRolePermission(kubeadminRole, new Rule("listKubernetesClusters"), Permission.ALLOW, ""); - roleService.createRolePermission(kubeadminRole, new Rule("scaleKubernetesCluster"), Permission.ALLOW, ""); - roleService.createRolePermission(kubeadminRole, new Rule("*"), Permission.DENY, ""); - return kubeadminRole; - } - - private void init() { - - // Check and create Kubeadmin role - Role kubeadminRole = null; - List roles = roleService.findRolesByName(KUBEADMIN_ACCOUNT_NAME); - if (roles == null) { - kubeadminRole = createKubeadminRole(); - } else { - roles = roles.stream().filter(x -> x.getDescription() == "Default Kubeadmin role").collect(Collectors.toList()); - if (roles.size() != 1 ) { - kubeadminRole = createKubeadminRole(); - } else { - kubeadminRole = roles.get(0); - } - } - - // Check and create Kubeadmin account - if (accountManager.getActiveAccountByName(KUBEADMIN_ACCOUNT_NAME, 1L) != null) { - return; - } - - AccountVO kubeadmin = new AccountVO(); - kubeadmin.setAccountName(KUBEADMIN_ACCOUNT_NAME); - kubeadmin.setUuid(UUID.randomUUID().toString()); - kubeadmin.setState(Account.State.enabled); - kubeadmin.setDomainId(1); - kubeadmin.setType(RoleType.Admin.getAccountType()); - kubeadmin.setRoleId(kubeadminRole.getId()); - kubeadmin = accountDao.persist(kubeadmin); - } - private void logMessage(final Level logLevel, final String message, final Exception e) { if (logLevel == Level.WARN) { if (e != null) { @@ -697,8 +648,7 @@ public KubernetesClusterResponse createKubernetesClusterResponse(long kubernetes } } response.setVirtualMachines(vmResponses); - Boolean isAutoscalingEnabled = kubernetesCluster.isAutoscalingEnabled(); - LOGGER.warn("Autoscaling enabled : " + isAutoscalingEnabled); + Boolean isAutoscalingEnabled = kubernetesCluster.getAutoscalingEnabled(); if (isAutoscalingEnabled != null) { response.setAutoscalingEnabled(isAutoscalingEnabled); } @@ -947,11 +897,6 @@ private void validateKubernetesClusterScaleParameters(ScaleKubernetesClusterCmd throw new InvalidParameterValueException("autoscaling can not be passed along with nodeids or clustersize or service offering"); } - String csUrl = ApiServiceConfiguration.ApiServletPath.value(); - if (csUrl == null || csUrl.contains("localhost")) { - throw new InvalidParameterValueException("Global setting endpointe.url has to be set to the Management Server's API end point"); - } - if (minSize == null || maxSize == null) { throw new InvalidParameterValueException("autoscaling requires minsize and maxsize to be passed"); } @@ -1106,6 +1051,12 @@ public KubernetesCluster createKubernetesCluster(CreateKubernetesClusterCmd cmd) logAndThrow(Level.ERROR, "Kubernetes Service plugin is disabled"); } + // Need this for cloudstack-kubernetes-provider && autoscaler + String csUrl = ApiServiceConfiguration.ApiServletPath.value(); + if (csUrl == null || csUrl.contains("localhost")) { + throw new InvalidParameterValueException("Global setting endpointe.url has to be set to the Management Server's API end point"); + } + validateKubernetesClusterCreateParameters(cmd); final DataCenter zone = dataCenterDao.findById(cmd.getZoneId()); @@ -1191,14 +1142,18 @@ public boolean startKubernetesCluster(long kubernetesClusterId, boolean onCreate if (zone == null) { logAndThrow(Level.WARN, String.format("Unable to find zone for Kubernetes cluster : %s", kubernetesCluster.getName())); } - KubernetesClusterStartWorker startWorker = - new KubernetesClusterStartWorker(kubernetesCluster, this); - startWorker = ComponentContext.inject(startWorker); if (onCreate) { // Start for Kubernetes cluster in 'Created' state + String[] keys = getServiceUserKeys(); + KubernetesClusterStartWorker startWorker = + new KubernetesClusterStartWorker(kubernetesCluster, this, keys); + startWorker = ComponentContext.inject(startWorker); return startWorker.startKubernetesClusterOnCreate(); } else { // Start for Kubernetes cluster in 'Stopped' state. Resources are already provisioned, just need to be started + KubernetesClusterStartWorker startWorker = + new KubernetesClusterStartWorker(kubernetesCluster, this); + startWorker = ComponentContext.inject(startWorker); return startWorker.startStoppedKubernetesCluster(); } } @@ -1324,8 +1279,7 @@ public KubernetesClusterConfigResponse getKubernetesClusterConfig(GetKubernetesC return response; } - private String[] createServiceAccount() { - // TODO : Maybe create a service account kubeadmin with restricted permissions and add the user to that instead + private String[] getServiceUserKeys() { Account caller = CallContext.current().getCallingAccount(); String username = caller.getAccountName() + "-kubeadmin"; UserAccount kubeadmin = accountService.getActiveUserAccount(username, caller.getDomainId()); @@ -1352,12 +1306,6 @@ public boolean scaleKubernetesCluster(ScaleKubernetesClusterCmd cmd) throws Clou } validateKubernetesClusterScaleParameters(cmd); - Boolean isAutoscalingEnabled = cmd.isAutoscalingEnabled(); - String[] keys = null; - if (isAutoscalingEnabled != null && isAutoscalingEnabled) { - keys = createServiceAccount(); - } - KubernetesClusterScaleWorker scaleWorker = new KubernetesClusterScaleWorker(kubernetesClusterDao.findById(cmd.getId()), serviceOfferingDao.findById(cmd.getServiceOfferingId()), @@ -1366,7 +1314,6 @@ public boolean scaleKubernetesCluster(ScaleKubernetesClusterCmd cmd) throws Clou cmd.isAutoscalingEnabled(), cmd.getMinSize(), cmd.getMaxSize(), - keys, this); scaleWorker = ComponentContext.inject(scaleWorker); return scaleWorker.scaleCluster(); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVO.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVO.java index 9ae2e9fac480..365fdf63f625 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVO.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVO.java @@ -94,7 +94,7 @@ public class KubernetesClusterVO implements KubernetesCluster { private String endpoint; @Column(name = "autoscaling_enabled") - private boolean isAutoscalingEnabled; + private boolean autoscalingEnabled; @Column(name = "minsize") private Long minSize; @@ -312,12 +312,12 @@ public Date getCreated() { return created; } - public boolean isAutoscalingEnabled() { - return isAutoscalingEnabled; + public boolean getAutoscalingEnabled() { + return autoscalingEnabled; } - public void setAutoscalingEnabled(boolean isAutoscalingEnabled) { - this.isAutoscalingEnabled = isAutoscalingEnabled; + public void setAutoscalingEnabled(boolean enabled) { + this.autoscalingEnabled = enabled; } public Long getMinSize() { diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMapVO.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMapVO.java index 1f5856b26de4..abbd90a949a9 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMapVO.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMapVO.java @@ -40,15 +40,15 @@ public class KubernetesClusterVmMapVO implements KubernetesClusterVmMap { long vmId; @Column(name = "is_master") - boolean isMaster; + boolean master; public KubernetesClusterVmMapVO() { } - public KubernetesClusterVmMapVO(long clusterId, long vmId, boolean isMaster) { + public KubernetesClusterVmMapVO(long clusterId, long vmId, boolean master) { this.vmId = vmId; this.clusterId = clusterId; - this.isMaster = isMaster; + this.master = master; } @@ -77,10 +77,10 @@ public void setVmId(long vmId) { @Override public boolean isMaster() { - return isMaster; + return master; } - public void setMaster(boolean isMaster) { - this.isMaster = isMaster; + public void setMaster(boolean master) { + this.master = master; } } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java index c391e218169b..dded8f4c588d 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java @@ -28,6 +28,7 @@ import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.ca.CAManager; +import org.apache.cloudstack.config.ApiServiceConfiguration; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.commons.collections.CollectionUtils; @@ -70,6 +71,7 @@ import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.fsm.NoTransitionException; import com.cloud.utils.fsm.StateMachine2; +import com.cloud.utils.ssh.SshHelper; import com.cloud.vm.UserVmService; import com.cloud.vm.dao.UserVmDao; import com.google.common.base.Strings; @@ -384,4 +386,37 @@ protected boolean stateTransitTo(long kubernetesClusterId, KubernetesCluster.Eve return false; } } + + protected boolean createSecret(String[] keys) { + File pkFile = getManagementServerSshPublicKeyFile(); + Pair publicIpSshPort = getKubernetesClusterServerIpSshPort(null); + publicIpAddress = publicIpSshPort.first(); + sshPort = publicIpSshPort.second(); + + List clusterVMs = getKubernetesClusterVMMaps(); + if (CollectionUtils.isEmpty(clusterVMs)) { + return false; + } + + final UserVm userVm = userVmDao.findById(clusterVMs.get(0).getVmId()); + + String hostName = userVm.getHostName(); + if (!Strings.isNullOrEmpty(hostName)) { + hostName = hostName.toLowerCase(); + } + + try { + Pair result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, + pkFile, null, String.format("sudo kubectl -n kube-system create secret generic cloudstack-secret " + + "--from-literal=api-url='%s' " + + "--from-literal=api-key='%s' " + + "--from-literal=secret-key='%s'", ApiServiceConfiguration.ApiServletPath.value(), keys[0], keys[1]), + 10000, 10000, 60000); + return result.first(); + } catch (Exception e) { + String msg = String.format("Failed to add cloudstack-secret to Kubernetes cluster: %s", kubernetesCluster.getName()); + LOGGER.warn(msg, e); + } + return true; + } } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java index 8a44f3153fb1..58be477874a1 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java @@ -25,7 +25,6 @@ import javax.inject.Inject; -import org.apache.cloudstack.config.ApiServiceConfiguration; import org.apache.cloudstack.context.CallContext; import org.apache.commons.collections.CollectionUtils; import org.apache.log4j.Level; @@ -52,6 +51,7 @@ import com.cloud.utils.Pair; import com.cloud.utils.db.Transaction; import com.cloud.utils.db.TransactionCallback; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.ssh.SshHelper; import com.cloud.vm.UserVmVO; @@ -74,7 +74,6 @@ public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModif private Long maxSize; private Boolean isAutoscalingEnabled; private long scaleTimeoutTime; - private String[] keys; public KubernetesClusterScaleWorker(final KubernetesCluster kubernetesCluster, final ServiceOffering serviceOffering, @@ -83,7 +82,6 @@ public KubernetesClusterScaleWorker(final KubernetesCluster kubernetesCluster, final Boolean isAutoscalingEnabled, final Long minSize, final Long maxSize, - final String[] keys, final KubernetesClusterManagerImpl clusterManager) { super(kubernetesCluster, clusterManager); this.serviceOffering = serviceOffering; @@ -92,7 +90,6 @@ public KubernetesClusterScaleWorker(final KubernetesCluster kubernetesCluster, this.minSize = minSize; this.maxSize = maxSize; this.originalState = kubernetesCluster.getState(); - this.keys = keys; if (this.nodeIds != null) { this.clusterSize = kubernetesCluster.getNodeCount() - this.nodeIds.size(); } else { @@ -172,33 +169,31 @@ private void scaleKubernetesClusterNetworkRules(final List clusterVMIds, f } private KubernetesClusterVO updateKubernetesClusterEntry(final Long cores, final Long memory, - final Long size, final Long serviceOfferingId, - final Boolean autoscale, final Long minSize, final Long maxSize) { - return Transaction.execute((TransactionCallback) status -> { - KubernetesClusterVO updatedCluster = kubernetesClusterDao.createForUpdate(kubernetesCluster.getId()); - if (cores != null) { - updatedCluster.setCores(cores); - } - if (memory != null) { - updatedCluster.setMemory(memory); - } - if (size != null) { - updatedCluster.setNodeCount(size); - } - if (serviceOfferingId != null) { - updatedCluster.setServiceOfferingId(serviceOfferingId); - } - LOGGER.warn("GOT : " + autoscale + " - " + minSize + " - " + maxSize); - if (autoscale != null) { - LOGGER.warn("Updating autoscale : " + autoscale); - updatedCluster.setAutoscalingEnabled(autoscale); - LOGGER.warn("Updated autoscale : " + updatedCluster.isAutoscalingEnabled()); + final Long size, final Long serviceOfferingId, final Boolean autoscale, final Long minSize, final Long maxSize) { + return Transaction.execute(new TransactionCallback() { + @Override + public KubernetesClusterVO doInTransaction(TransactionStatus status) { + KubernetesClusterVO updatedCluster = kubernetesClusterDao.createForUpdate(); + if (cores != null) { + updatedCluster.setCores(cores); + } + if (memory != null) { + updatedCluster.setMemory(memory); + } + if (size != null) { + updatedCluster.setNodeCount(size); + } + if (serviceOfferingId != null) { + updatedCluster.setServiceOfferingId(serviceOfferingId); + } + if (autoscale != null) { + updatedCluster.setAutoscalingEnabled(autoscale.booleanValue()); + } + updatedCluster.setMinSize(minSize); + updatedCluster.setMaxSize(maxSize); + kubernetesClusterDao.update(kubernetesCluster.getId(), updatedCluster); + return updatedCluster; } - updatedCluster.setMinSize(minSize); - updatedCluster.setMaxSize(maxSize); - updatedCluster = kubernetesClusterDao.persist(updatedCluster); - LOGGER.warn("Persisted autoscale : " + updatedCluster.isAutoscalingEnabled()); - return updatedCluster; }); } @@ -217,7 +212,6 @@ private KubernetesClusterVO updateKubernetesClusterEntry(final Long newSize, fin } private KubernetesClusterVO updateKubernetesClusterEntry(final Boolean autoscale, final Long minSize, final Long maxSize) throws CloudRuntimeException { - LOGGER.warn("Changing cluster to : " + autoscale + " - " + minSize + " - " + maxSize); KubernetesClusterVO kubernetesClusterVO = updateKubernetesClusterEntry(null, null, null, null, autoscale, minSize, maxSize); if (kubernetesClusterVO == null) { logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster ID: %s failed, unable to update Kubernetes cluster", @@ -453,52 +447,11 @@ private void scaleKubernetesClusterSize() throws CloudRuntimeException { kubernetesCluster = updateKubernetesClusterEntry(clusterSize, null); } - private boolean enableAutoScaleKubernetesCluster() throws CloudRuntimeException { - File pkFile = getManagementServerSshPublicKeyFile(); - Pair publicIpSshPort = getKubernetesClusterServerIpSshPort(null); - publicIpAddress = publicIpSshPort.first(); - sshPort = publicIpSshPort.second(); - - List clusterVMs = getKubernetesClusterVMMaps(); - if (CollectionUtils.isEmpty(clusterVMs)) { - return false; - } - - final UserVm userVm = userVmDao.findById(clusterVMs.get(0).getVmId()); - - String hostName = userVm.getHostName(); - if (!Strings.isNullOrEmpty(hostName)) { - hostName = hostName.toLowerCase(); - } - - try { - Pair result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, - pkFile, null, String.format("sudo kubectl -n kube-system create secret generic cloudstack-secret " + - "--from-literal=api-url='%s' " + - "--from-literal=api-key='%s' " + - "--from-literal=secret-key='%s'", ApiServiceConfiguration.ApiServletPath.value(), keys[0], keys[1]), - 10000, 10000, 60000); - if (!result.first()) { - return false; - } - // result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, - // pkFile, null, String.format("sudo kubectl apply -f /opt/autoscaler/autoscaler.yaml"), - // 10000, 10000, 60000); - result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, - pkFile, null, String.format("sudo /opt/bin/autoscale-kube-cluster -i %s -e -M %d -m %d", kubernetesCluster.getUuid(), maxSize, minSize), - 10000, 10000, 60000); - if (!result.first()) { - return false; - } - updateKubernetesClusterEntry(true, minSize, maxSize); - } catch (Exception e) { - String msg = String.format("Failed to autoscale Kubernetes cluster: %s", kubernetesCluster.getName()); - LOGGER.warn(msg, e); + private boolean autoscaleCluster(boolean enable) { + if (!kubernetesCluster.getState().equals(KubernetesCluster.State.Scaling)) { + stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.AutoscaleRequested); } - return true; - } - private boolean disableAutoScaleKubernetesCluster() throws CloudRuntimeException { File pkFile = getManagementServerSshPublicKeyFile(); Pair publicIpSshPort = getKubernetesClusterServerIpSshPort(null); publicIpAddress = publicIpSshPort.first(); @@ -517,24 +470,32 @@ private boolean disableAutoScaleKubernetesCluster() throws CloudRuntimeException } try { - Pair result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, - pkFile, null, "sudo kubectl -n kube-system delete secret cloudstack-secret", - 10000, 10000, 60000); - if (!result.first()) { - return false; - } - result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, - pkFile, null, String.format("sudo /opt/bin/autoscale-kube-cluster -d"), - 10000, 10000, 60000); - if (!result.first()) { - return false; + if (enable) { + Pair result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, + pkFile, null, String.format("sudo /opt/bin/autoscale-kube-cluster -i %s -e -M %d -m %d", kubernetesCluster.getUuid(), maxSize, minSize), + 10000, 10000, 60000); + if (!result.first()) { + return false; + } + updateKubernetesClusterEntry(true, minSize, maxSize); + } else { + Pair result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, + pkFile, null, String.format("sudo /opt/bin/autoscale-kube-cluster -d"), + 10000, 10000, 60000); + if (!result.first()) { + return false; + } + updateKubernetesClusterEntry(false, null, null); } - updateKubernetesClusterEntry(false, null, null); + return true; } catch (Exception e) { - String msg = String.format("Failed to autoscale Kubernetes cluster: %s", kubernetesCluster.getName()); - LOGGER.warn(msg, e); + String msg = String.format("Failed to autoscale Kubernetes cluster: %s : %s", kubernetesCluster.getName(), e.getMessage()); + logAndThrow(Level.ERROR, msg); + return false; + } finally { + // Deploying the autoscaler might fail but it can be deployed manually too, so no need to go to an alert state + stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded); } - return true; } public boolean scaleCluster() throws CloudRuntimeException { @@ -550,11 +511,7 @@ public boolean scaleCluster() throws CloudRuntimeException { } if (this.isAutoscalingEnabled != null) { - if (this.isAutoscalingEnabled) { - return enableAutoScaleKubernetesCluster(); - } else { - return disableAutoScaleKubernetesCluster(); - } + return autoscaleCluster(this.isAutoscalingEnabled); } final boolean serviceOfferingScalingNeeded = serviceOffering != null && serviceOffering.getId() != existingServiceOffering.getId(); final boolean clusterSizeScalingNeeded = clusterSize != null && clusterSize != originalClusterSize; diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java index 95ace6b1ee52..714463b355bc 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java @@ -77,11 +77,17 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModifierActionWorker { private KubernetesSupportedVersion kubernetesClusterVersion; + private String[] keys; public KubernetesClusterStartWorker(final KubernetesCluster kubernetesCluster, final KubernetesClusterManagerImpl clusterManager) { super(kubernetesCluster, clusterManager); } + public KubernetesClusterStartWorker(final KubernetesCluster kubernetesCluster, final KubernetesClusterManagerImpl clusterManager, final String[]keys) { + super(kubernetesCluster, clusterManager); + this.keys = keys; + } + public KubernetesSupportedVersion getKubernetesClusterVersion() { if (kubernetesClusterVersion == null) { kubernetesClusterVersion = kubernetesSupportedVersionDao.findById(kubernetesCluster.getKubernetesVersionId()); @@ -567,6 +573,9 @@ public boolean startKubernetesClusterOnCreate() { if (!isKubernetesClusterDashboardServiceRunning(true, startTimeoutTime)) { logTransitStateAndThrow(Level.ERROR, String.format("Failed to setup Kubernetes cluster : %s in usable state as unable to get Dashboard service running for the cluster", kubernetesCluster.getName()), kubernetesCluster.getId(),KubernetesCluster.Event.OperationFailed); } + if (!createSecret(keys)) { + logTransitStateAndThrow(Level.ERROR, String.format("Failed to setup keys for Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), kubernetesCluster.getId(),KubernetesCluster.Event.OperationFailed); + } stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded); return true; } From 9121b1e613c52139da74c84e8cc5d7e22282589b Mon Sep 17 00:00:00 2001 From: davidjumani Date: Thu, 22 Oct 2020 15:57:53 +0530 Subject: [PATCH 008/117] Cleanup --- .../cluster/KubernetesClusterManagerImpl.java | 14 ++++++++------ .../KubernetesClusterActionWorker.java | 1 + 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java index faf46bd766e1..e79bdf6b0f0d 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java @@ -26,6 +26,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; @@ -137,8 +138,9 @@ import com.cloud.user.SSHKeyPairVO; import com.cloud.user.User; import com.cloud.user.UserAccount; -import com.cloud.user.dao.AccountDao; +import com.cloud.user.UserVO; import com.cloud.user.dao.SSHKeyPairDao; +import com.cloud.user.dao.UserDao; import com.cloud.utils.Pair; import com.cloud.utils.Ternary; import com.cloud.utils.component.ComponentContext; @@ -173,8 +175,6 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne ScheduledExecutorService _gcExecutor; ScheduledExecutorService _stateScanner; - Account kubeadmin; - @Inject public KubernetesClusterDao kubernetesClusterDao; @Inject @@ -200,7 +200,7 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne @Inject protected TemplateJoinDao templateJoinDao; @Inject - protected AccountDao accountDao; + protected UserDao userDao; @Inject protected AccountService accountService; @Inject @@ -1281,11 +1281,13 @@ public KubernetesClusterConfigResponse getKubernetesClusterConfig(GetKubernetesC private String[] getServiceUserKeys() { Account caller = CallContext.current().getCallingAccount(); - String username = caller.getAccountName() + "-kubeadmin"; + String username = caller.getAccountName() + "-" + KUBEADMIN_ACCOUNT_NAME; UserAccount kubeadmin = accountService.getActiveUserAccount(username, caller.getDomainId()); String[] keys = null; if (kubeadmin == null) { - User kube = accountService.createUser(username, "password", "kube", "admin", "kubeadmin", null, caller.getAccountName(), caller.getDomainId(), null); + User kube = userDao.persist(new UserVO(caller.getAccountId(), username, UUID.randomUUID().toString(), "kube", "admin", "kubeadmin", + null, UUID.randomUUID().toString(), User.Source.UNKNOWN)); + // User kube = accountService.createUser(username, "password", "kube", "admin", "kubeadmin", null, caller.getAccountName(), caller.getDomainId(), null); keys = accountService.createApiKeyAndSecretKey(kube.getId()); } else { String apiKey = kubeadmin.getApiKey(); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java index dded8f4c588d..b5ca0977e6ed 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java @@ -388,6 +388,7 @@ protected boolean stateTransitTo(long kubernetesClusterId, KubernetesCluster.Eve } protected boolean createSecret(String[] keys) { + // TODO : Make this into a secret file. Maybe add something in the yamls for it File pkFile = getManagementServerSshPublicKeyFile(); Pair publicIpSshPort = getKubernetesClusterServerIpSshPort(null); publicIpAddress = publicIpSshPort.first(); From 39651680cf1d97151865207135798d303f6ab247 Mon Sep 17 00:00:00 2001 From: davidjumani Date: Fri, 23 Oct 2020 09:54:36 +0530 Subject: [PATCH 009/117] Deploy keys as a secret file --- .../kubernetes/cluster/KubernetesCluster.java | 3 + .../cluster/KubernetesClusterVO.java | 17 ++- .../KubernetesClusterActionWorker.java | 36 ----- ...esClusterResourceModifierActionWorker.java | 127 ++++++++++++++++++ .../KubernetesClusterScaleWorker.java | 94 +------------ .../KubernetesClusterStartWorker.java | 3 +- .../cluster/ScaleKubernetesClusterCmd.java | 2 - .../main/resources/conf/k8s-master-add.yml | 55 ++++++++ .../src/main/resources/conf/k8s-master.yml | 55 ++++++++ .../src/main/resources/conf/k8s-node.yml | 55 ++++++++ 10 files changed, 313 insertions(+), 134 deletions(-) diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesCluster.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesCluster.java index c87ae1df9849..a06424d283b2 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesCluster.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesCluster.java @@ -133,4 +133,7 @@ enum State { @Override State getState(); Date getCreated(); + boolean getAutoscalingEnabled(); + Long getMinSize(); + Long getMaxSize(); } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVO.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVO.java index 365fdf63f625..805f44d4db93 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVO.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVO.java @@ -312,6 +312,7 @@ public Date getCreated() { return created; } + @Override public boolean getAutoscalingEnabled() { return autoscalingEnabled; } @@ -320,6 +321,7 @@ public void setAutoscalingEnabled(boolean enabled) { this.autoscalingEnabled = enabled; } + @Override public Long getMinSize() { return minSize; } @@ -328,6 +330,7 @@ public void setMinSize(Long minSize) { this.minSize = minSize; } + @Override public Long getMaxSize() { return maxSize; } @@ -341,8 +344,8 @@ public KubernetesClusterVO() { } public KubernetesClusterVO(String name, String description, long zoneId, long kubernetesVersionId, long serviceOfferingId, long templateId, - long networkId, long domainId, long accountId, long masterNodeCount, long nodeCount, State state, - String keyPair, long cores, long memory, Long nodeRootDiskSize, String endpoint) { + long networkId, long domainId, long accountId, long masterNodeCount, long nodeCount, State state, String keyPair, long cores, + long memory, Long nodeRootDiskSize, String endpoint) { this.uuid = UUID.randomUUID().toString(); this.name = name; this.description = description; @@ -366,6 +369,16 @@ public KubernetesClusterVO(String name, String description, long zoneId, long ku this.checkForGc = false; } + public KubernetesClusterVO(String name, String description, long zoneId, long kubernetesVersionId, long serviceOfferingId, long templateId, + long networkId, long domainId, long accountId, long masterNodeCount, long nodeCount, State state, String keyPair, long cores, + long memory, Long nodeRootDiskSize, String endpoint, boolean autoscalingEnabled, Long minSize, Long maxSize) { + this(name, description, zoneId, kubernetesVersionId, serviceOfferingId, templateId, networkId, domainId, accountId, masterNodeCount, + nodeCount, state, keyPair, cores, memory, nodeRootDiskSize, endpoint); + this.autoscalingEnabled = autoscalingEnabled; + this.minSize = minSize; + this.maxSize = maxSize; + } + @Override public Class getEntityType() { return KubernetesCluster.class; diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java index b5ca0977e6ed..c391e218169b 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java @@ -28,7 +28,6 @@ import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.ca.CAManager; -import org.apache.cloudstack.config.ApiServiceConfiguration; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.commons.collections.CollectionUtils; @@ -71,7 +70,6 @@ import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.fsm.NoTransitionException; import com.cloud.utils.fsm.StateMachine2; -import com.cloud.utils.ssh.SshHelper; import com.cloud.vm.UserVmService; import com.cloud.vm.dao.UserVmDao; import com.google.common.base.Strings; @@ -386,38 +384,4 @@ protected boolean stateTransitTo(long kubernetesClusterId, KubernetesCluster.Eve return false; } } - - protected boolean createSecret(String[] keys) { - // TODO : Make this into a secret file. Maybe add something in the yamls for it - File pkFile = getManagementServerSshPublicKeyFile(); - Pair publicIpSshPort = getKubernetesClusterServerIpSshPort(null); - publicIpAddress = publicIpSshPort.first(); - sshPort = publicIpSshPort.second(); - - List clusterVMs = getKubernetesClusterVMMaps(); - if (CollectionUtils.isEmpty(clusterVMs)) { - return false; - } - - final UserVm userVm = userVmDao.findById(clusterVMs.get(0).getVmId()); - - String hostName = userVm.getHostName(); - if (!Strings.isNullOrEmpty(hostName)) { - hostName = hostName.toLowerCase(); - } - - try { - Pair result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, - pkFile, null, String.format("sudo kubectl -n kube-system create secret generic cloudstack-secret " + - "--from-literal=api-url='%s' " + - "--from-literal=api-key='%s' " + - "--from-literal=secret-key='%s'", ApiServiceConfiguration.ApiServletPath.value(), keys[0], keys[1]), - 10000, 10000, 60000); - return result.first(); - } catch (Exception e) { - String msg = String.format("Failed to add cloudstack-secret to Kubernetes cluster: %s", kubernetesCluster.getName()); - LOGGER.warn(msg, e); - } - return true; - } } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java index 8bae1eda5aa3..3036dda7bfcf 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java @@ -17,6 +17,7 @@ package com.cloud.kubernetes.cluster.actionworkers; +import java.io.File; import java.io.IOException; import java.lang.reflect.Field; import java.util.ArrayList; @@ -31,6 +32,7 @@ import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.command.user.firewall.CreateFirewallRuleCmd; import org.apache.cloudstack.api.command.user.vm.StartVMCmd; +import org.apache.cloudstack.config.ApiServiceConfiguration; import org.apache.commons.codec.binary.Base64; import org.apache.commons.collections.CollectionUtils; import org.apache.log4j.Level; @@ -55,6 +57,8 @@ import com.cloud.kubernetes.cluster.KubernetesCluster; import com.cloud.kubernetes.cluster.KubernetesClusterDetailsVO; import com.cloud.kubernetes.cluster.KubernetesClusterManagerImpl; +import com.cloud.kubernetes.cluster.KubernetesClusterVO; +import com.cloud.kubernetes.cluster.KubernetesClusterVmMapVO; import com.cloud.kubernetes.cluster.utils.KubernetesClusterUtil; import com.cloud.network.IpAddress; import com.cloud.network.Network; @@ -77,11 +81,14 @@ import com.cloud.utils.StringUtils; import com.cloud.utils.component.ComponentContext; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallback; import com.cloud.utils.db.TransactionCallbackWithException; import com.cloud.utils.db.TransactionStatus; +import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.exception.ExecutionException; import com.cloud.utils.net.Ip; import com.cloud.utils.net.NetUtils; +import com.cloud.utils.ssh.SshHelper; import com.cloud.vm.Nic; import com.cloud.vm.UserVmManager; import com.cloud.vm.VirtualMachine; @@ -513,4 +520,124 @@ protected String getKubernetesClusterNodeNamePrefix() { } return prefix; } + + protected boolean createSecret(String[] keys) { + File pkFile = getManagementServerSshPublicKeyFile(); + Pair publicIpSshPort = getKubernetesClusterServerIpSshPort(null); + publicIpAddress = publicIpSshPort.first(); + sshPort = publicIpSshPort.second(); + + List clusterVMs = getKubernetesClusterVMMaps(); + if (CollectionUtils.isEmpty(clusterVMs)) { + return false; + } + + final UserVm userVm = userVmDao.findById(clusterVMs.get(0).getVmId()); + + String hostName = userVm.getHostName(); + if (!Strings.isNullOrEmpty(hostName)) { + hostName = hostName.toLowerCase(); + } + + try { + Pair result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, + pkFile, null, String.format("sudo /opt/bin/deploy-cloudstack-secret -u '%s' -k '%s' -s '%s'", + ApiServiceConfiguration.ApiServletPath.value(), keys[0], keys[1]), + 10000, 10000, 60000); + return result.first(); + } catch (Exception e) { + String msg = String.format("Failed to add cloudstack-secret to Kubernetes cluster: %s", kubernetesCluster.getName()); + LOGGER.warn(msg, e); + } + return true; + } + + protected KubernetesClusterVO updateKubernetesClusterEntry(final Long cores, final Long memory, + final Long size, final Long serviceOfferingId, final Boolean autoscale, final Long minSize, final Long maxSize) { + return Transaction.execute(new TransactionCallback() { + @Override + public KubernetesClusterVO doInTransaction(TransactionStatus status) { + KubernetesClusterVO updatedCluster = kubernetesClusterDao.createForUpdate(); + if (cores != null) { + updatedCluster.setCores(cores); + } + if (memory != null) { + updatedCluster.setMemory(memory); + } + if (size != null) { + updatedCluster.setNodeCount(size); + } + if (serviceOfferingId != null) { + updatedCluster.setServiceOfferingId(serviceOfferingId); + } + if (autoscale != null) { + updatedCluster.setAutoscalingEnabled(autoscale.booleanValue()); + } + updatedCluster.setMinSize(minSize); + updatedCluster.setMaxSize(maxSize); + kubernetesClusterDao.update(kubernetesCluster.getId(), updatedCluster); + return updatedCluster; + } + }); + } + + private KubernetesClusterVO updateKubernetesClusterEntry(final Boolean autoscale, final Long minSize, final Long maxSize) throws CloudRuntimeException { + KubernetesClusterVO kubernetesClusterVO = updateKubernetesClusterEntry(null, null, null, null, autoscale, minSize, maxSize); + if (kubernetesClusterVO == null) { + logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster ID: %s failed, unable to update Kubernetes cluster", + kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); + } + return kubernetesClusterVO; + } + + protected boolean autoscaleCluster(boolean enable, Long minSize, Long maxSize) { + if (!kubernetesCluster.getState().equals(KubernetesCluster.State.Scaling)) { + stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.AutoscaleRequested); + } + + File pkFile = getManagementServerSshPublicKeyFile(); + Pair publicIpSshPort = getKubernetesClusterServerIpSshPort(null); + publicIpAddress = publicIpSshPort.first(); + sshPort = publicIpSshPort.second(); + + List clusterVMs = getKubernetesClusterVMMaps(); + if (CollectionUtils.isEmpty(clusterVMs)) { + return false; + } + + final UserVm userVm = userVmDao.findById(clusterVMs.get(0).getVmId()); + + String hostName = userVm.getHostName(); + if (!Strings.isNullOrEmpty(hostName)) { + hostName = hostName.toLowerCase(); + } + + try { + if (enable) { + Pair result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, + pkFile, null, String.format("sudo /opt/bin/autoscale-kube-cluster -i %s -e -M %d -m %d", kubernetesCluster.getUuid(), maxSize, minSize), + 10000, 10000, 60000); + if (!result.first()) { + return false; + } + updateKubernetesClusterEntry(true, minSize, maxSize); + } else { + Pair result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, + pkFile, null, String.format("sudo /opt/bin/autoscale-kube-cluster -d"), + 10000, 10000, 60000); + if (!result.first()) { + return false; + } + updateKubernetesClusterEntry(false, null, null); + } + return true; + } catch (Exception e) { + String msg = String.format("Failed to autoscale Kubernetes cluster: %s : %s", kubernetesCluster.getName(), e.getMessage()); + logAndThrow(Level.ERROR, msg); + return false; + } finally { + // Deploying the autoscaler might fail but it can be deployed manually too, so no need to go to an alert state + stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded); + } + } } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java index 58be477874a1..dc37989ad14c 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java @@ -49,9 +49,6 @@ import com.cloud.offering.ServiceOffering; import com.cloud.uservm.UserVm; import com.cloud.utils.Pair; -import com.cloud.utils.db.Transaction; -import com.cloud.utils.db.TransactionCallback; -import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.ssh.SshHelper; import com.cloud.vm.UserVmVO; @@ -168,35 +165,6 @@ private void scaleKubernetesClusterNetworkRules(final List clusterVMIds, f } } - private KubernetesClusterVO updateKubernetesClusterEntry(final Long cores, final Long memory, - final Long size, final Long serviceOfferingId, final Boolean autoscale, final Long minSize, final Long maxSize) { - return Transaction.execute(new TransactionCallback() { - @Override - public KubernetesClusterVO doInTransaction(TransactionStatus status) { - KubernetesClusterVO updatedCluster = kubernetesClusterDao.createForUpdate(); - if (cores != null) { - updatedCluster.setCores(cores); - } - if (memory != null) { - updatedCluster.setMemory(memory); - } - if (size != null) { - updatedCluster.setNodeCount(size); - } - if (serviceOfferingId != null) { - updatedCluster.setServiceOfferingId(serviceOfferingId); - } - if (autoscale != null) { - updatedCluster.setAutoscalingEnabled(autoscale.booleanValue()); - } - updatedCluster.setMinSize(minSize); - updatedCluster.setMaxSize(maxSize); - kubernetesClusterDao.update(kubernetesCluster.getId(), updatedCluster); - return updatedCluster; - } - }); - } - private KubernetesClusterVO updateKubernetesClusterEntry(final Long newSize, final ServiceOffering newServiceOffering) throws CloudRuntimeException { final ServiceOffering serviceOffering = newServiceOffering; final Long serviceOfferingId = newServiceOffering == null ? null : serviceOffering.getId(); @@ -211,15 +179,6 @@ private KubernetesClusterVO updateKubernetesClusterEntry(final Long newSize, fin return kubernetesClusterVO; } - private KubernetesClusterVO updateKubernetesClusterEntry(final Boolean autoscale, final Long minSize, final Long maxSize) throws CloudRuntimeException { - KubernetesClusterVO kubernetesClusterVO = updateKubernetesClusterEntry(null, null, null, null, autoscale, minSize, maxSize); - if (kubernetesClusterVO == null) { - logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster ID: %s failed, unable to update Kubernetes cluster", - kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); - } - return kubernetesClusterVO; - } - private boolean removeKubernetesClusterNode(final String ipAddress, final int port, final UserVm userVm, final int retries, final int waitDuration) { File pkFile = getManagementServerSshPublicKeyFile(); int retryCounter = 0; @@ -447,57 +406,6 @@ private void scaleKubernetesClusterSize() throws CloudRuntimeException { kubernetesCluster = updateKubernetesClusterEntry(clusterSize, null); } - private boolean autoscaleCluster(boolean enable) { - if (!kubernetesCluster.getState().equals(KubernetesCluster.State.Scaling)) { - stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.AutoscaleRequested); - } - - File pkFile = getManagementServerSshPublicKeyFile(); - Pair publicIpSshPort = getKubernetesClusterServerIpSshPort(null); - publicIpAddress = publicIpSshPort.first(); - sshPort = publicIpSshPort.second(); - - List clusterVMs = getKubernetesClusterVMMaps(); - if (CollectionUtils.isEmpty(clusterVMs)) { - return false; - } - - final UserVm userVm = userVmDao.findById(clusterVMs.get(0).getVmId()); - - String hostName = userVm.getHostName(); - if (!Strings.isNullOrEmpty(hostName)) { - hostName = hostName.toLowerCase(); - } - - try { - if (enable) { - Pair result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, - pkFile, null, String.format("sudo /opt/bin/autoscale-kube-cluster -i %s -e -M %d -m %d", kubernetesCluster.getUuid(), maxSize, minSize), - 10000, 10000, 60000); - if (!result.first()) { - return false; - } - updateKubernetesClusterEntry(true, minSize, maxSize); - } else { - Pair result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, - pkFile, null, String.format("sudo /opt/bin/autoscale-kube-cluster -d"), - 10000, 10000, 60000); - if (!result.first()) { - return false; - } - updateKubernetesClusterEntry(false, null, null); - } - return true; - } catch (Exception e) { - String msg = String.format("Failed to autoscale Kubernetes cluster: %s : %s", kubernetesCluster.getName(), e.getMessage()); - logAndThrow(Level.ERROR, msg); - return false; - } finally { - // Deploying the autoscaler might fail but it can be deployed manually too, so no need to go to an alert state - stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded); - } - } - public boolean scaleCluster() throws CloudRuntimeException { init(); if (LOGGER.isInfoEnabled()) { @@ -511,7 +419,7 @@ public boolean scaleCluster() throws CloudRuntimeException { } if (this.isAutoscalingEnabled != null) { - return autoscaleCluster(this.isAutoscalingEnabled); + return autoscaleCluster(this.isAutoscalingEnabled, minSize, maxSize); } final boolean serviceOfferingScalingNeeded = serviceOffering != null && serviceOffering.getId() != existingServiceOffering.getId(); final boolean clusterSizeScalingNeeded = clusterSize != null && clusterSize != originalClusterSize; diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java index 714463b355bc..bcd5722034c7 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java @@ -83,7 +83,8 @@ public KubernetesClusterStartWorker(final KubernetesCluster kubernetesCluster, f super(kubernetesCluster, clusterManager); } - public KubernetesClusterStartWorker(final KubernetesCluster kubernetesCluster, final KubernetesClusterManagerImpl clusterManager, final String[]keys) { + public KubernetesClusterStartWorker(final KubernetesCluster kubernetesCluster, final KubernetesClusterManagerImpl clusterManager, + final String[]keys) { super(kubernetesCluster, clusterManager); this.keys = keys; } diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ScaleKubernetesClusterCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ScaleKubernetesClusterCmd.java index 1b5ab5b9b45b..dec42b53b03f 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ScaleKubernetesClusterCmd.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ScaleKubernetesClusterCmd.java @@ -93,8 +93,6 @@ public class ScaleKubernetesClusterCmd extends BaseAsyncCmd { description = "Maximum size of the cluster") private Long maxSize; - // TODO : Get api keys too! - ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// diff --git a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master-add.yml b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master-add.yml index 608326eaa44c..924840661ecd 100644 --- a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master-add.yml +++ b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master-add.yml @@ -276,6 +276,61 @@ write-files: kubectl delete deployment -n kube-system cluster-autoscaler fi + - path: /opt/bin/deploy-cloudstack-secret + permissions: 0700 + owner: root:root + content: | + #! /bin/bash + function usage() { + cat << USAGE + Usage: ./deploy-cloudstack-secret [OPTIONS]... + Enables autoscaling for the kubernetes cluster. + Arguments: + -u, --url string ID of the cluster + -k, --key string Enables autoscaling + -s, --secret string Disables autoscaling + Other arguments: + -h, --help Display this help message and exit + Examples: + ./deploy-cloudstack-secret -u http://localhost:8080 -k abcd -s efgh + USAGE + exit 0 + } + API_URL="" + API_KEY="" + SECRET_KEY="" + while [ -n "$1" ]; do + case "$1" in + -h | --help) + usage + ;; + -u | --url) + API_URL=$2 + shift 2 + ;; + -k | --key) + API_KEY=$2 + shift 2 + ;; + -s | --secret) + SECRET_KEY=$2 + shift 2 + ;; + -*|*) + echo "ERROR: no such option $1. -h or --help for help" + exit 1 + ;; + esac + done + cat > /opt/autoscaler/cloud-config < /opt/autoscaler/cloud-config < /opt/autoscaler/cloud-config < Date: Fri, 23 Oct 2020 10:42:43 +0530 Subject: [PATCH 010/117] Cleanup --- .../cluster/KubernetesClusterManagerImpl.java | 7 +++---- ...rnetesClusterResourceModifierActionWorker.java | 15 +++++++-------- .../KubernetesClusterScaleWorker.java | 12 +++++++----- .../KubernetesClusterStartWorker.java | 4 ---- .../cluster/ScaleKubernetesClusterCmd.java | 4 ++-- .../src/main/resources/conf/k8s-master-add.yml | 9 ++++----- .../src/main/resources/conf/k8s-master.yml | 9 ++++----- .../src/main/resources/conf/k8s-node.yml | 9 ++++----- scripts/util/create-kubernetes-binaries-iso.sh | 2 +- 9 files changed, 32 insertions(+), 39 deletions(-) diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java index e79bdf6b0f0d..22ee03d99ed4 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java @@ -901,10 +901,10 @@ private void validateKubernetesClusterScaleParameters(ScaleKubernetesClusterCmd throw new InvalidParameterValueException("autoscaling requires minsize and maxsize to be passed"); } if (minSize < 1) { - throw new InvalidParameterValueException("minsize must be more than 1"); + throw new InvalidParameterValueException("minsize must be at least than 1"); } if (maxSize <= minSize) { - throw new InvalidParameterValueException("maxsize must be greater than minsize"); + throw new InvalidParameterValueException("maxsize must be greater than or equal to minsize"); } } @@ -1285,9 +1285,8 @@ private String[] getServiceUserKeys() { UserAccount kubeadmin = accountService.getActiveUserAccount(username, caller.getDomainId()); String[] keys = null; if (kubeadmin == null) { - User kube = userDao.persist(new UserVO(caller.getAccountId(), username, UUID.randomUUID().toString(), "kube", "admin", "kubeadmin", + User kube = userDao.persist(new UserVO(caller.getAccountId(), username, UUID.randomUUID().toString(), caller.getAccountName(), KUBEADMIN_ACCOUNT_NAME, "kubeadmin", null, UUID.randomUUID().toString(), User.Source.UNKNOWN)); - // User kube = accountService.createUser(username, "password", "kube", "admin", "kubeadmin", null, caller.getAccountName(), caller.getDomainId(), null); keys = accountService.createApiKeyAndSecretKey(kube.getId()); } else { String apiKey = kubeadmin.getApiKey(); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java index 3036dda7bfcf..2733821faf3f 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java @@ -553,11 +553,11 @@ protected boolean createSecret(String[] keys) { } protected KubernetesClusterVO updateKubernetesClusterEntry(final Long cores, final Long memory, - final Long size, final Long serviceOfferingId, final Boolean autoscale, final Long minSize, final Long maxSize) { + final Long size, final Long serviceOfferingId, final Boolean autoscaleEnabled, final Long minSize, final Long maxSize) { return Transaction.execute(new TransactionCallback() { @Override public KubernetesClusterVO doInTransaction(TransactionStatus status) { - KubernetesClusterVO updatedCluster = kubernetesClusterDao.createForUpdate(); + KubernetesClusterVO updatedCluster = kubernetesClusterDao.createForUpdate(kubernetesCluster.getId()); if (cores != null) { updatedCluster.setCores(cores); } @@ -570,19 +570,18 @@ public KubernetesClusterVO doInTransaction(TransactionStatus status) { if (serviceOfferingId != null) { updatedCluster.setServiceOfferingId(serviceOfferingId); } - if (autoscale != null) { - updatedCluster.setAutoscalingEnabled(autoscale.booleanValue()); + if (autoscaleEnabled != null) { + updatedCluster.setAutoscalingEnabled(autoscaleEnabled.booleanValue()); } updatedCluster.setMinSize(minSize); updatedCluster.setMaxSize(maxSize); - kubernetesClusterDao.update(kubernetesCluster.getId(), updatedCluster); - return updatedCluster; + return kubernetesClusterDao.persist(updatedCluster); } }); } - private KubernetesClusterVO updateKubernetesClusterEntry(final Boolean autoscale, final Long minSize, final Long maxSize) throws CloudRuntimeException { - KubernetesClusterVO kubernetesClusterVO = updateKubernetesClusterEntry(null, null, null, null, autoscale, minSize, maxSize); + private KubernetesClusterVO updateKubernetesClusterEntry(final Boolean autoscaleEnabled, final Long minSize, final Long maxSize) throws CloudRuntimeException { + KubernetesClusterVO kubernetesClusterVO = updateKubernetesClusterEntry(null, null, null, null, autoscaleEnabled, minSize, maxSize); if (kubernetesClusterVO == null) { logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster ID: %s failed, unable to update Kubernetes cluster", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java index dc37989ad14c..80211d3749e8 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java @@ -166,12 +166,14 @@ private void scaleKubernetesClusterNetworkRules(final List clusterVMIds, f } private KubernetesClusterVO updateKubernetesClusterEntry(final Long newSize, final ServiceOffering newServiceOffering) throws CloudRuntimeException { - final ServiceOffering serviceOffering = newServiceOffering; + final ServiceOffering serviceOffering = newServiceOffering == null ? + serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId()) : newServiceOffering; final Long serviceOfferingId = newServiceOffering == null ? null : serviceOffering.getId(); - final Long size = newSize; - final Long cores = newServiceOffering == null ? null : serviceOffering.getCpu() * size; - final Long memory = newServiceOffering == null ? null : serviceOffering.getRamSize() * size; - KubernetesClusterVO kubernetesClusterVO = updateKubernetesClusterEntry(cores, memory, newSize, serviceOfferingId, null, null, null); + final long size = newSize == null ? kubernetesCluster.getTotalNodeCount() : (newSize + kubernetesCluster.getMasterNodeCount()); + final long cores = serviceOffering.getCpu() * size; + final long memory = serviceOffering.getRamSize() * size; + KubernetesClusterVO kubernetesClusterVO = updateKubernetesClusterEntry(cores, memory, newSize, serviceOfferingId, + kubernetesCluster.getAutoscalingEnabled(), kubernetesCluster.getMinSize(), kubernetesCluster.getMaxSize()); if (kubernetesClusterVO == null) { logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster ID: %s failed, unable to update Kubernetes cluster", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java index bcd5722034c7..89b2feb1b317 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java @@ -145,7 +145,6 @@ private String getKubernetesMasterConfig(final String masterIp, final String ser final String clusterToken = "{{ k8s_master.cluster.token }}"; final String clusterInitArgsKey = "{{ k8s_master.cluster.initargs }}"; final String ejectIsoKey = "{{ k8s.eject.iso }}"; - final String clusterId = "{{ k8s_cluster_id }}"; final List addresses = new ArrayList<>(); addresses.add(masterIp); if (!serverIp.equals(masterIp)) { @@ -181,7 +180,6 @@ private String getKubernetesMasterConfig(final String masterIp, final String ser initArgs += String.format(" --kubernetes-version=%s", getKubernetesClusterVersion().getSemanticVersion()); k8sMasterConfig = k8sMasterConfig.replace(clusterInitArgsKey, initArgs); k8sMasterConfig = k8sMasterConfig.replace(ejectIsoKey, String.valueOf(ejectIso)); - k8sMasterConfig = k8sMasterConfig.replace(clusterId, this.kubernetesCluster.getUuid()); return k8sMasterConfig; } @@ -231,7 +229,6 @@ private String getKubernetesAdditionalMasterConfig(final String joinIp, final bo final String sshPubKey = "{{ k8s.ssh.pub.key }}"; final String clusterHACertificateKey = "{{ k8s_master.cluster.ha.certificate.key }}"; final String ejectIsoKey = "{{ k8s.eject.iso }}"; - final String clusterId = "{{ k8s_cluster_id }}"; String pubKey = "- \"" + configurationDao.getValue("ssh.publickey") + "\""; String sshKeyPair = kubernetesCluster.getKeyPair(); if (!Strings.isNullOrEmpty(sshKeyPair)) { @@ -245,7 +242,6 @@ private String getKubernetesAdditionalMasterConfig(final String joinIp, final bo k8sMasterConfig = k8sMasterConfig.replace(clusterTokenKey, KubernetesClusterUtil.generateClusterToken(kubernetesCluster)); k8sMasterConfig = k8sMasterConfig.replace(clusterHACertificateKey, KubernetesClusterUtil.generateClusterHACertificateKey(kubernetesCluster)); k8sMasterConfig = k8sMasterConfig.replace(ejectIsoKey, String.valueOf(ejectIso)); - k8sMasterConfig = k8sMasterConfig.replace(clusterId, this.kubernetesCluster.getUuid()); return k8sMasterConfig; } diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ScaleKubernetesClusterCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ScaleKubernetesClusterCmd.java index dec42b53b03f..11b74441bbde 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ScaleKubernetesClusterCmd.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ScaleKubernetesClusterCmd.java @@ -86,11 +86,11 @@ public class ScaleKubernetesClusterCmd extends BaseAsyncCmd { private Boolean isAutoscalingEnabled; @Parameter(name=ApiConstants.MIN_SIZE, type = CommandType.LONG, - description = "Minimum size of the cluster") + description = "Minimum number of worker nodes in the cluster") private Long minSize; @Parameter(name=ApiConstants.MAX_SIZE, type = CommandType.LONG, - description = "Maximum size of the cluster") + description = "Maximum number of worker nodes in the cluster") private Long maxSize; ///////////////////////////////////////////////////// diff --git a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master-add.yml b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master-add.yml index 924840661ecd..bda3aefbe1e6 100644 --- a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master-add.yml +++ b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master-add.yml @@ -128,7 +128,6 @@ write-files: if [ -e "${BINARIES_DIR}/autoscaler.yaml" ]; then mkdir -p /opt/autoscaler cp "${BINARIES_DIR}/autoscaler.yaml" /opt/autoscaler/autoscaler_tmpl.yaml - sed 's//{{ k8s_cluster_id }}/g' /opt/autoscaler/autoscaler_tmpl.yaml > /opt/autoscaler/autoscaler.yaml fi umount "${ISO_MOUNT_DIR}" && rmdir "${ISO_MOUNT_DIR}" if [ "$EJECT_ISO_FROM_OS" = true ] && [ "$iso_drive_path" != "" ]; then @@ -265,7 +264,7 @@ write-files: esac done if [ $ENABLE == "true" ] ; then - if [ -e /opt/autoscaler/autoscaler.yaml ]; then + if [ -e /opt/autoscaler/autoscaler_tmpl.yaml ]; then sed -e "s//$ID/g" -e "s//$MINSIZE/g" -e "s//$MAXSIZE/g" /opt/autoscaler/autoscaler_tmpl.yaml > /opt/autoscaler/autoscaler_now.yaml kubectl apply -f /opt/autoscaler/autoscaler_now.yaml exit 0 @@ -322,14 +321,14 @@ write-files: ;; esac done - cat > /opt/autoscaler/cloud-config < /tmp/cloud-config </{{ k8s_cluster_id }}/g' /opt/autoscaler/autoscaler_tmpl.yaml > /opt/autoscaler/autoscaler.yaml fi umount "${ISO_MOUNT_DIR}" && rmdir "${ISO_MOUNT_DIR}" if [ "$EJECT_ISO_FROM_OS" = true ] && [ "$iso_drive_path" != "" ]; then @@ -322,7 +321,7 @@ write-files: esac done if [ $ENABLE == "true" ] ; then - if [ -e /opt/autoscaler/autoscaler.yaml ]; then + if [ -e /opt/autoscaler/autoscaler_tmpl.yaml ]; then sed -e "s//$ID/g" -e "s//$MINSIZE/g" -e "s//$MAXSIZE/g" /opt/autoscaler/autoscaler_tmpl.yaml > /opt/autoscaler/autoscaler_now.yaml kubectl apply -f /opt/autoscaler/autoscaler_now.yaml exit 0 @@ -379,14 +378,14 @@ write-files: ;; esac done - cat > /opt/autoscaler/cloud-config < /tmp/cloud-config </{{ k8s_cluster_id }}/g' /opt/autoscaler/autoscaler_tmpl.yaml > /opt/autoscaler/autoscaler.yaml fi umount "${ISO_MOUNT_DIR}" && rmdir "${ISO_MOUNT_DIR}" if [ "$EJECT_ISO_FROM_OS" = true ] && [ "$iso_drive_path" != "" ]; then @@ -265,7 +264,7 @@ write-files: esac done if [ $ENABLE == "true" ] ; then - if [ -e /opt/autoscaler/autoscaler.yaml ]; then + if [ -e /opt/autoscaler/autoscaler_tmpl.yaml ]; then sed -e "s//$ID/g" -e "s//$MINSIZE/g" -e "s//$MAXSIZE/g" /opt/autoscaler/autoscaler_tmpl.yaml > /opt/autoscaler/autoscaler_now.yaml kubectl apply -f /opt/autoscaler/autoscaler_now.yaml exit 0 @@ -322,14 +321,14 @@ write-files: ;; esac done - cat > /opt/autoscaler/cloud-config < /tmp/cloud-config < Date: Sun, 25 Oct 2020 20:07:08 +0530 Subject: [PATCH 011/117] Adding smoke tests --- .../smoke/test_kubernetes_clusters.py | 93 ++++++++++++++++++- tools/marvin/marvin/config/test_data.py | 2 +- 2 files changed, 91 insertions(+), 4 deletions(-) diff --git a/test/integration/smoke/test_kubernetes_clusters.py b/test/integration/smoke/test_kubernetes_clusters.py index f2f0471cfbf4..a4b9412b82ff 100644 --- a/test/integration/smoke/test_kubernetes_clusters.py +++ b/test/integration/smoke/test_kubernetes_clusters.py @@ -30,6 +30,7 @@ deleteKubernetesCluster, upgradeKubernetesCluster, scaleKubernetesCluster, + getKubernetesClusterConfig, destroyVirtualMachine, deleteNetwork) from marvin.cloudstackException import CloudstackAPIException @@ -47,7 +48,8 @@ from nose.plugins.attrib import attr from marvin.lib.decoratorGenerators import skipTestIf -import time +from kubernetes import client, config +import time, io, yaml _multiprocess_shared_ = True @@ -359,7 +361,7 @@ def test_02_invalid_upgrade_kubernetes_cluster(self): try: k8s_cluster = self.upgradeKubernetesCluster(k8s_cluster.id, self.kubernetes_version_1.id) - self.debug("Invalid CKS Kubernetes HA cluster deployed with ID: %s. Deleting it and failing test." % kubernetes_version_1.id) + self.debug("Invalid CKS Kubernetes HA cluster deployed with ID: %s. Deleting it and failing test." % self.kubernetes_version_1.id) self.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True) self.fail("Kubernetes cluster upgraded to a lower Kubernetes supported version. Must be an error.") except Exception as e: @@ -543,6 +545,36 @@ def test_09_delete_kubernetes_ha_cluster(self): return + @attr(tags=["advanced", "smoke"], required_hardware="true") + @skipTestIf("hypervisorNotSupported") + def test_10_deploy_and_autoscale_kubernetes_cluster(self): + """Test to deploy a new Kubernetes cluster and check for failure while tying to autoscale it + + # Validate the following: + # 1. scaleKubernetesCluster should return valid info for the cluster when it is autoscaled + # 2. cluster-autoscaler pod should be running + """ + if self.setup_failed == True: + self.fail("Setup incomplete") + global k8s_cluster + k8s_cluster = self.getValidKubernetesCluster(1, 1, True) + + self.debug("Autoscaling Kubernetes cluster with ID: %s" % k8s_cluster.id) + + try: + k8s_cluster = self.autoscaleKubernetesCluster(k8s_cluster.id, 1, 2) + self.verifyKubernetesClusterAutocale(k8s_cluster, 1, 2) + + up = self.waitForAutoscalerPodInRunningState(k8s_cluster.id) + self.assertTrue(up, "Autoscaler pod failed to run") + self.debug("Kubernetes cluster with ID: %s has autoscaler running" % k8s_cluster.id) + self.deleteKubernetesClusterAndVerify(k8s_cluster.id) + except Exception as e: + self.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True) + self.fail("Failed to autoscale Kubernetes cluster due to: %s" % e) + + return + def listKubernetesCluster(self, cluster_id = None): listKubernetesClustersCmd = listKubernetesClusters.listKubernetesClustersCmd() if cluster_id != None: @@ -601,12 +633,52 @@ def scaleKubernetesCluster(self, cluster_id, size): response = self.apiclient.scaleKubernetesCluster(scaleKubernetesClusterCmd) return response - def getValidKubernetesCluster(self, size=1, master_nodes=1): + def autoscaleKubernetesCluster(self, cluster_id, minsize, maxsize): + scaleKubernetesClusterCmd = scaleKubernetesCluster.scaleKubernetesClusterCmd() + scaleKubernetesClusterCmd.id = cluster_id + scaleKubernetesClusterCmd.autoscalingenabled = True + scaleKubernetesClusterCmd.minsize = minsize + scaleKubernetesClusterCmd.maxsize = maxsize + response = self.apiclient.scaleKubernetesCluster(scaleKubernetesClusterCmd) + return response + + def fetchKubernetesClusterConfig(self, cluster_id): + getKubernetesClusterConfigCmd = getKubernetesClusterConfig.getKubernetesClusterConfigCmd() + getKubernetesClusterConfigCmd.id = cluster_id + response = self.apiclient.getKubernetesClusterConfig(getKubernetesClusterConfigCmd) + return response + + def waitForAutoscalerPodInRunningState(self, cluster_id, retries=5, interval=60): + k8s_config = self.fetchKubernetesClusterConfig(cluster_id) + cfg = io.StringIO(k8s_config.configdata) + cfg = yaml.load(cfg) + # Adding this so we don't get certificate exceptions + cfg['clusters'][0]['cluster']['insecure-skip-tls-verify']=True + config.load_kube_config_from_dict(cfg) + v1 = client.CoreV1Api() + + while retries > 0: + time.sleep(interval) + pods = v1.list_pod_for_all_namespaces(watch=False, label_selector="app=cluster-autoscaler").items + if len(pods) == 0 : + self.debug("Autoscaler pod still not up") + continue + pod = pods[0] + if pod.status.phase == 'Running' : + self.debug("Autoscaler pod %s up and running!" % pod.metadata.name) + return True + self.debug("Autoscaler pod %s up but not running on retry %d. State is : %s" %(pod.metadata.name, retries, pod.status.phase)) + retries = retries - 1 + return False + + def getValidKubernetesCluster(self, size=1, master_nodes=1, autoscaling=False): cluster = k8s_cluster version = self.kubernetes_version_2 if master_nodes != 1: version = self.kubernetes_version_3 valid = True + if autoscaling: + version = self.kubernetes_version_4 if cluster == None: valid = False self.debug("No existing cluster available, k8s_cluster: %s" % cluster) @@ -715,6 +787,21 @@ def verifyKubernetesClusterScale(self, cluster_response, size=1, master_nodes=1) self.verifyKubernetesClusterState(cluster_response, 'Running') self.verifyKubernetesClusterSize(cluster_response, size, master_nodes) + def verifyKubernetesClusterAutocale(self, cluster_response, minsize, maxsize): + """Check if Kubernetes cluster state and node sizes are valid after upgrade""" + + self.verifyKubernetesClusterState(cluster_response, 'Running') + self.assertEqual( + cluster_response.minsize, + minsize, + "Check KubernetesCluster minsize {}, {}".format(cluster_response.minsize, minsize) + ) + self.assertEqual( + cluster_response.maxsize, + maxsize, + "Check KubernetesCluster maxsize {}, {}".format(cluster_response.maxsize, maxsize) + ) + def stopAndVerifyKubernetesCluster(self, cluster_id): """Stop Kubernetes cluster and check if it is really stopped""" diff --git a/tools/marvin/marvin/config/test_data.py b/tools/marvin/marvin/config/test_data.py index 436c656509d9..106d5a51debd 100644 --- a/tools/marvin/marvin/config/test_data.py +++ b/tools/marvin/marvin/config/test_data.py @@ -2036,7 +2036,7 @@ }, "1.16.3": { "semanticversion": "1.16.3", - "url": "http://download.cloudstack.org/cks/setup-1.16.3.iso", + "url": "http://10.5.1.128/cks/binaries-iso/as-1.16.3.iso", "mincpunumber": 2, "minmemory": 2048 } From 3e145dd075915b60d78b14847608ab355c0884a0 Mon Sep 17 00:00:00 2001 From: davidjumani Date: Tue, 27 Oct 2020 12:49:35 +0530 Subject: [PATCH 012/117] Allowing for upgrades --- .../cluster/KubernetesClusterManagerImpl.java | 24 ++-- .../KubernetesClusterActionWorker.java | 72 +++++++++- ...esClusterResourceModifierActionWorker.java | 39 +----- .../KubernetesClusterScaleWorker.java | 3 + .../KubernetesClusterStartWorker.java | 17 ++- .../KubernetesClusterUpgradeWorker.java | 27 ++-- .../version/KubernetesVersionManagerImpl.java | 4 +- .../main/resources/conf/k8s-master-add.yml | 125 ------------------ .../src/main/resources/conf/k8s-master.yml | 125 ------------------ .../src/main/resources/conf/k8s-node.yml | 125 ------------------ .../resources/script/autoscale-kube-cluster | 65 +++++++++ .../resources/script/deploy-cloudstack-secret | 51 +++++++ .../src/main/resources/script/try-autoscaling | 6 + .../resources/script/upgrade-kubernetes.sh | 6 + .../util/create-kubernetes-binaries-iso.sh | 1 + 15 files changed, 251 insertions(+), 439 deletions(-) create mode 100755 plugins/integrations/kubernetes-service/src/main/resources/script/autoscale-kube-cluster create mode 100755 plugins/integrations/kubernetes-service/src/main/resources/script/deploy-cloudstack-secret create mode 100755 plugins/integrations/kubernetes-service/src/main/resources/script/try-autoscaling mode change 100644 => 100755 plugins/integrations/kubernetes-service/src/main/resources/script/upgrade-kubernetes.sh diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java index 22ee03d99ed4..39e7bd5ae708 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java @@ -1144,7 +1144,8 @@ public boolean startKubernetesCluster(long kubernetesClusterId, boolean onCreate } if (onCreate) { // Start for Kubernetes cluster in 'Created' state - String[] keys = getServiceUserKeys(); + Account owner = accountService.getActiveAccountById(kubernetesCluster.getAccountId()); + String[] keys = getServiceUserKeys(owner); KubernetesClusterStartWorker startWorker = new KubernetesClusterStartWorker(kubernetesCluster, this, keys); startWorker = ComponentContext.inject(startWorker); @@ -1279,14 +1280,16 @@ public KubernetesClusterConfigResponse getKubernetesClusterConfig(GetKubernetesC return response; } - private String[] getServiceUserKeys() { - Account caller = CallContext.current().getCallingAccount(); - String username = caller.getAccountName() + "-" + KUBEADMIN_ACCOUNT_NAME; - UserAccount kubeadmin = accountService.getActiveUserAccount(username, caller.getDomainId()); + private String[] getServiceUserKeys(Account owner) { + if (owner == null) { + owner = CallContext.current().getCallingAccount(); + } + String username = owner.getAccountName() + "-" + KUBEADMIN_ACCOUNT_NAME; + UserAccount kubeadmin = accountService.getActiveUserAccount(username, owner.getDomainId()); String[] keys = null; if (kubeadmin == null) { - User kube = userDao.persist(new UserVO(caller.getAccountId(), username, UUID.randomUUID().toString(), caller.getAccountName(), KUBEADMIN_ACCOUNT_NAME, "kubeadmin", - null, UUID.randomUUID().toString(), User.Source.UNKNOWN)); + User kube = userDao.persist(new UserVO(owner.getAccountId(), username, UUID.randomUUID().toString(), owner.getAccountName(), + KUBEADMIN_ACCOUNT_NAME, "kubeadmin", null, UUID.randomUUID().toString(), User.Source.UNKNOWN)); keys = accountService.createApiKeyAndSecretKey(kube.getId()); } else { String apiKey = kubeadmin.getApiKey(); @@ -1326,9 +1329,12 @@ public boolean upgradeKubernetesCluster(UpgradeKubernetesClusterCmd cmd) throws logAndThrow(Level.ERROR, "Kubernetes Service plugin is disabled"); } validateKubernetesClusterUpgradeParameters(cmd); + KubernetesClusterVO kubernetesCluster = kubernetesClusterDao.findById(cmd.getId()); + Account owner = accountService.getActiveAccountById(kubernetesCluster.getAccountId()); + String[] keys = getServiceUserKeys(owner); KubernetesClusterUpgradeWorker upgradeWorker = - new KubernetesClusterUpgradeWorker(kubernetesClusterDao.findById(cmd.getId()), - kubernetesSupportedVersionDao.findById(cmd.getKubernetesVersionId()), this); + new KubernetesClusterUpgradeWorker(kubernetesClusterDao.findById(cmd.getId()), + kubernetesSupportedVersionDao.findById(cmd.getKubernetesVersionId()), this, keys); upgradeWorker = ComponentContext.inject(upgradeWorker); return upgradeWorker.upgradeCluster(); } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java index c391e218169b..8cda5cda7b15 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java @@ -17,7 +17,9 @@ package com.cloud.kubernetes.cluster.actionworkers; +import java.io.BufferedWriter; import java.io.File; +import java.io.FileWriter; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; @@ -28,6 +30,7 @@ import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.ca.CAManager; +import org.apache.cloudstack.config.ApiServiceConfiguration; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.commons.collections.CollectionUtils; @@ -70,6 +73,7 @@ import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.fsm.NoTransitionException; import com.cloud.utils.fsm.StateMachine2; +import com.cloud.utils.ssh.SshHelper; import com.cloud.vm.UserVmService; import com.cloud.vm.dao.UserVmDao; import com.google.common.base.Strings; @@ -127,6 +131,11 @@ public class KubernetesClusterActionWorker { protected String publicIpAddress; protected int sshPort; + protected final String autoscaleScriptFilename = "autoscale-kube-cluster"; + protected final String deploySecretsScriptFilename = "deploy-cloudstack-secret"; + protected File autoscaleScriptFile; + protected File deploySecretsScriptFile; + protected KubernetesClusterActionWorker(final KubernetesCluster kubernetesCluster, final KubernetesClusterManagerImpl clusterManager) { this.kubernetesCluster = kubernetesCluster; this.kubernetesClusterDao = clusterManager.kubernetesClusterDao; @@ -178,7 +187,7 @@ protected void logMessage(final Level logLevel, final String message, final Exce } protected void logTransitStateDetachIsoAndThrow(final Level logLevel, final String message, final KubernetesCluster kubernetesCluster, - final List clusterVMs, final KubernetesCluster.Event event, final Exception e) throws CloudRuntimeException { + final List clusterVMs, final KubernetesCluster.Event event, final Exception e) throws CloudRuntimeException { logMessage(logLevel, message, e); stateTransitTo(kubernetesCluster.getId(), event); detachIsoKubernetesVMs(clusterVMs); @@ -384,4 +393,65 @@ protected boolean stateTransitTo(long kubernetesClusterId, KubernetesCluster.Eve return false; } } + + protected boolean createCloudStackSecret(String[] keys) { + File pkFile = getManagementServerSshPublicKeyFile(); + Pair publicIpSshPort = getKubernetesClusterServerIpSshPort(null); + publicIpAddress = publicIpSshPort.first(); + sshPort = publicIpSshPort.second(); + + List clusterVMs = getKubernetesClusterVMMaps(); + if (CollectionUtils.isEmpty(clusterVMs)) { + return false; + } + + final UserVm userVm = userVmDao.findById(clusterVMs.get(0).getVmId()); + + String hostName = userVm.getHostName(); + if (!Strings.isNullOrEmpty(hostName)) { + hostName = hostName.toLowerCase(); + } + + try { + Pair result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, + pkFile, null, String.format("sudo /opt/bin/deploy-cloudstack-secret -u '%s' -k '%s' -s '%s'", + ApiServiceConfiguration.ApiServletPath.value(), keys[0], keys[1]), + 10000, 10000, 60000); + return result.first(); + } catch (Exception e) { + String msg = String.format("Failed to add cloudstack-secret to Kubernetes cluster: %s", kubernetesCluster.getName()); + LOGGER.warn(msg, e); + } + return true; + } + + protected File retrieveScriptFile(String filename) { + File file = null; + try { + String data = readResourceFile("/script/" + filename); + file = File.createTempFile(filename, ".sh"); + BufferedWriter writer = new BufferedWriter(new FileWriter(file)); + writer.write(data); + writer.close(); + } catch (IOException e) { + logAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster ID: %s, unable to prepare upgrade script %s", kubernetesCluster.getUuid(), filename), e); + } + return file; + } + + protected void copyAutoscalerScripts(final UserVm vm, final int index) throws Exception { + // TODO: This might be a bad way to do it. Better to fetch the pf rules and try + int nodeSshPort = sshPort == 22 ? sshPort : sshPort + index; + String nodeAddress = (index > 0 && sshPort == 22) ? vm.getPrivateIpAddress() : publicIpAddress; + SshHelper.scpTo(nodeAddress, nodeSshPort, CLUSTER_NODE_VM_USER, sshKeyFile, null, + "~/", autoscaleScriptFile.getAbsolutePath(), "0755"); + SshHelper.scpTo(nodeAddress, nodeSshPort, CLUSTER_NODE_VM_USER, sshKeyFile, null, + "~/", deploySecretsScriptFile.getAbsolutePath(), "0755"); + String cmdStr = String.format("sudo mv ~/%s /opt/bin/%s", autoscaleScriptFile.getName(), autoscaleScriptFilename); + SshHelper.sshExecute(publicIpAddress, nodeSshPort, CLUSTER_NODE_VM_USER, sshKeyFile, null, + cmdStr, 10000, 10000, 10 * 60 * 1000); + cmdStr = String.format("sudo mv ~/%s /opt/bin/%s", deploySecretsScriptFile.getName(), deploySecretsScriptFilename); + SshHelper.sshExecute(publicIpAddress, nodeSshPort, CLUSTER_NODE_VM_USER, sshKeyFile, null, + cmdStr, 10000, 10000, 10 * 60 * 1000); + } } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java index 2733821faf3f..b6e6caca4fe0 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java @@ -32,7 +32,6 @@ import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.command.user.firewall.CreateFirewallRuleCmd; import org.apache.cloudstack.api.command.user.vm.StartVMCmd; -import org.apache.cloudstack.config.ApiServiceConfiguration; import org.apache.commons.codec.binary.Base64; import org.apache.commons.collections.CollectionUtils; import org.apache.log4j.Level; @@ -521,37 +520,6 @@ protected String getKubernetesClusterNodeNamePrefix() { return prefix; } - protected boolean createSecret(String[] keys) { - File pkFile = getManagementServerSshPublicKeyFile(); - Pair publicIpSshPort = getKubernetesClusterServerIpSshPort(null); - publicIpAddress = publicIpSshPort.first(); - sshPort = publicIpSshPort.second(); - - List clusterVMs = getKubernetesClusterVMMaps(); - if (CollectionUtils.isEmpty(clusterVMs)) { - return false; - } - - final UserVm userVm = userVmDao.findById(clusterVMs.get(0).getVmId()); - - String hostName = userVm.getHostName(); - if (!Strings.isNullOrEmpty(hostName)) { - hostName = hostName.toLowerCase(); - } - - try { - Pair result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, - pkFile, null, String.format("sudo /opt/bin/deploy-cloudstack-secret -u '%s' -k '%s' -s '%s'", - ApiServiceConfiguration.ApiServletPath.value(), keys[0], keys[1]), - 10000, 10000, 60000); - return result.first(); - } catch (Exception e) { - String msg = String.format("Failed to add cloudstack-secret to Kubernetes cluster: %s", kubernetesCluster.getName()); - LOGGER.warn(msg, e); - } - return true; - } - protected KubernetesClusterVO updateKubernetesClusterEntry(final Long cores, final Long memory, final Long size, final Long serviceOfferingId, final Boolean autoscaleEnabled, final Long minSize, final Long maxSize) { return Transaction.execute(new TransactionCallback() { @@ -613,11 +581,12 @@ protected boolean autoscaleCluster(boolean enable, Long minSize, Long maxSize) { try { if (enable) { + String data = readResourceFile("/script/try-autoscaling"); Pair result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, - pkFile, null, String.format("sudo /opt/bin/autoscale-kube-cluster -i %s -e -M %d -m %d", kubernetesCluster.getUuid(), maxSize, minSize), + pkFile, null, String.format(data, kubernetesCluster.getUuid(), maxSize, minSize), 10000, 10000, 60000); if (!result.first()) { - return false; + throw new CloudRuntimeException(result.second()); } updateKubernetesClusterEntry(true, minSize, maxSize); } else { @@ -625,7 +594,7 @@ protected boolean autoscaleCluster(boolean enable, Long minSize, Long maxSize) { pkFile, null, String.format("sudo /opt/bin/autoscale-kube-cluster -d"), 10000, 10000, 60000); if (!result.first()) { - return false; + throw new CloudRuntimeException(result.second()); } updateKubernetesClusterEntry(false, null, null); } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java index 80211d3749e8..e198a1498de4 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java @@ -134,6 +134,7 @@ private void scaleKubernetesClusterNetworkRules(final List clusterVMIds, f throw new ManagementServerException(String.format("No source NAT IP addresses found for network : %s, Kubernetes cluster : %s", network.getName(), kubernetesCluster.getName())); } + // TODO : Remove indiv rules per vm // Remove existing SSH firewall rules FirewallRule firewallRule = removeSshFirewallRule(publicIp); if (firewallRule == null) { @@ -141,6 +142,7 @@ private void scaleKubernetesClusterNetworkRules(final List clusterVMIds, f } int existingFirewallRuleSourcePortEnd = firewallRule.getSourcePortEnd(); final int scaledTotalNodeCount = clusterSize == null ? (int)kubernetesCluster.getTotalNodeCount() : (int)(clusterSize + kubernetesCluster.getMasterNodeCount()); + // TODO : Provision indiv rules per vm // Provision new SSH firewall rules try { provisionFirewallRules(publicIp, owner, CLUSTER_NODES_DEFAULT_START_SSH_PORT, CLUSTER_NODES_DEFAULT_START_SSH_PORT + scaledTotalNodeCount - 1); @@ -159,6 +161,7 @@ private void scaleKubernetesClusterNetworkRules(final List clusterVMIds, f } try { + // TODO : Provision indiv rules per vm provisionSshPortForwardingRules(publicIp, network, owner, clusterVMIds, existingFirewallRuleSourcePortEnd + 1); } catch (ResourceUnavailableException | NetworkRuleConflictException e) { throw new ManagementServerException(String.format("Failed to activate SSH port forwarding rules for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java index 89b2feb1b317..fc1caca25721 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java @@ -395,6 +395,7 @@ private void setupKubernetesClusterNetworkRules(Network network, List cl } try { + // TODO : Create indiv fw rules for each vm int endPort = CLUSTER_NODES_DEFAULT_START_SSH_PORT + clusterVMs.size() - 1; provisionFirewallRules(publicIp, owner, CLUSTER_NODES_DEFAULT_START_SSH_PORT, endPort); if (LOGGER.isInfoEnabled()) { @@ -570,13 +571,27 @@ public boolean startKubernetesClusterOnCreate() { if (!isKubernetesClusterDashboardServiceRunning(true, startTimeoutTime)) { logTransitStateAndThrow(Level.ERROR, String.format("Failed to setup Kubernetes cluster : %s in usable state as unable to get Dashboard service running for the cluster", kubernetesCluster.getName()), kubernetesCluster.getId(),KubernetesCluster.Event.OperationFailed); } - if (!createSecret(keys)) { + retrieveScriptFiles(); + for (int i = 0; i < clusterVMs.size(); ++i) { + UserVm vm = clusterVMs.get(i); + try { + copyAutoscalerScripts(vm, i); + } catch (Exception e) { + throw new CloudRuntimeException(e); + } + } + if (!createCloudStackSecret(keys)) { logTransitStateAndThrow(Level.ERROR, String.format("Failed to setup keys for Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), kubernetesCluster.getId(),KubernetesCluster.Event.OperationFailed); } stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded); return true; } + private void retrieveScriptFiles() { + autoscaleScriptFile = retrieveScriptFile(autoscaleScriptFilename); + deploySecretsScriptFile = retrieveScriptFile(deploySecretsScriptFilename); + } + public boolean startStoppedKubernetesCluster() throws CloudRuntimeException { init(); if (LOGGER.isInfoEnabled()) { diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java index 957adea6f774..b8c43c1b018f 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java @@ -17,10 +17,7 @@ package com.cloud.kubernetes.cluster.actionworkers; -import java.io.BufferedWriter; import java.io.File; -import java.io.FileWriter; -import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -45,26 +42,24 @@ public class KubernetesClusterUpgradeWorker extends KubernetesClusterActionWorke private List clusterVMs = new ArrayList<>(); private KubernetesSupportedVersion upgradeVersion; + private final String upgradeScriptFilename = "upgrade-kubernetes.sh"; private File upgradeScriptFile; private long upgradeTimeoutTime; + private String[] keys; public KubernetesClusterUpgradeWorker(final KubernetesCluster kubernetesCluster, final KubernetesSupportedVersion upgradeVersion, - final KubernetesClusterManagerImpl clusterManager) { + final KubernetesClusterManagerImpl clusterManager, + final String[] keys) { super(kubernetesCluster, clusterManager); this.upgradeVersion = upgradeVersion; + this.keys = keys; } - private void retrieveUpgradeScriptFile() { - try { - String upgradeScriptData = readResourceFile("/script/upgrade-kubernetes.sh"); - upgradeScriptFile = File.createTempFile("upgrade-kuberntes", ".sh"); - BufferedWriter upgradeScriptFileWriter = new BufferedWriter(new FileWriter(upgradeScriptFile)); - upgradeScriptFileWriter.write(upgradeScriptData); - upgradeScriptFileWriter.close(); - } catch (IOException e) { - logAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster : %s, unable to prepare upgrade script", kubernetesCluster.getName()), e); - } + private void retrieveScriptFiles() { + upgradeScriptFile = retrieveScriptFile(upgradeScriptFilename); + autoscaleScriptFile = retrieveScriptFile(autoscaleScriptFilename); + deploySecretsScriptFile = retrieveScriptFile(deploySecretsScriptFilename); } private Pair runInstallScriptOnVM(final UserVm vm, final int index) throws Exception { @@ -110,6 +105,8 @@ private void upgradeKubernetesClusterNodes() { logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster : %s, upgrade action timed out", kubernetesCluster.getName()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null); } try { + copyAutoscalerScripts(vm, i); + createCloudStackSecret(keys); result = runInstallScriptOnVM(vm, i); } catch (Exception e) { logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster : %s, unable to upgrade Kubernetes node on VM : %s", kubernetesCluster.getName(), vm.getDisplayName()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, e); @@ -151,7 +148,7 @@ public boolean upgradeCluster() throws CloudRuntimeException { if (CollectionUtils.isEmpty(clusterVMs)) { logAndThrow(Level.ERROR, String.format("Upgrade failed for Kubernetes cluster : %s, unable to retrieve VMs for cluster", kubernetesCluster.getName())); } - retrieveUpgradeScriptFile(); + retrieveScriptFiles(); stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.UpgradeRequested); attachIsoKubernetesVMs(clusterVMs, upgradeVersion); upgradeKubernetesClusterNodes(); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionManagerImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionManagerImpl.java index 72a1c3794871..cd1ee4834634 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionManagerImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionManagerImpl.java @@ -214,9 +214,7 @@ public static int compareSemanticVersions(String v1, String v2) throws IllegalAr */ public static boolean canUpgradeKubernetesVersion(final String currentVersion, final String upgradeVersion) throws IllegalArgumentException { int versionDiff = compareSemanticVersions(upgradeVersion, currentVersion); - if (versionDiff == 0) { - throw new IllegalArgumentException(String.format("Kubernetes clusters can not be upgraded, current version: %s, upgrade version: %s", currentVersion, upgradeVersion)); - } else if (versionDiff < 0) { + if (versionDiff < 0) { throw new IllegalArgumentException(String.format("Kubernetes clusters can not be downgraded, current version: %s, upgrade version: %s", currentVersion, upgradeVersion)); } String[] thisParts = currentVersion.split("\\."); diff --git a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master-add.yml b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master-add.yml index bda3aefbe1e6..2654854e4605 100644 --- a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master-add.yml +++ b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master-add.yml @@ -205,131 +205,6 @@ write-files: sudo touch /home/core/success echo "true" > /home/core/success - - path: /opt/bin/autoscale-kube-cluster - permissions: 0700 - owner: root:root - content: | - #! /bin/bash - function usage() { - cat << USAGE - Usage: ./autoscale-kube-cluster [OPTIONS]... - Enables autoscaling for the kubernetes cluster. - Arguments: - -i, --id string ID of the cluster - -e, --enable Enables autoscaling - -d, --disable Disables autoscaling - -M, --maxsize number Maximum size of the cluster - -m, --minsize number Minimum size of the cluster - Other arguments: - -h, --help Display this help message and exit - Examples: - ./autoscale-kube-cluster -e -M 3 -m 1 - ./autoscale-kube-cluster -d - USAGE - exit 0 - } - ID="" - ENABLE="" - MINSIZE="" - MAXSIZE="" - while [ -n "$1" ]; do - case "$1" in - -h | --help) - usage - ;; - -i | --id) - ID=$2 - shift 2 - ;; - -e | --enable) - ENABLE="true" - shift 1 - ;; - -d | --enable) - ENABLE="false" - shift 1 - ;; - -M | --maxsize) - MAXSIZE=$2 - shift 2 - ;; - -m | --minsize) - MINSIZE=$2 - shift 2 - ;; - -*|*) - echo "ERROR: no such option $1. -h or --help for help" - exit 1 - ;; - esac - done - if [ $ENABLE == "true" ] ; then - if [ -e /opt/autoscaler/autoscaler_tmpl.yaml ]; then - sed -e "s//$ID/g" -e "s//$MINSIZE/g" -e "s//$MAXSIZE/g" /opt/autoscaler/autoscaler_tmpl.yaml > /opt/autoscaler/autoscaler_now.yaml - kubectl apply -f /opt/autoscaler/autoscaler_now.yaml - exit 0 - fi - echo "Cluster does not support automated autoscaling. Please deploy the autoscaler manually" - exit 1 - else - kubectl delete deployment -n kube-system cluster-autoscaler - fi - - - path: /opt/bin/deploy-cloudstack-secret - permissions: 0700 - owner: root:root - content: | - #! /bin/bash - function usage() { - cat << USAGE - Usage: ./deploy-cloudstack-secret [OPTIONS]... - Enables autoscaling for the kubernetes cluster. - Arguments: - -u, --url string ID of the cluster - -k, --key string Enables autoscaling - -s, --secret string Disables autoscaling - Other arguments: - -h, --help Display this help message and exit - Examples: - ./deploy-cloudstack-secret -u http://localhost:8080 -k abcd -s efgh - USAGE - exit 0 - } - API_URL="" - API_KEY="" - SECRET_KEY="" - while [ -n "$1" ]; do - case "$1" in - -h | --help) - usage - ;; - -u | --url) - API_URL=$2 - shift 2 - ;; - -k | --key) - API_KEY=$2 - shift 2 - ;; - -s | --secret) - SECRET_KEY=$2 - shift 2 - ;; - -*|*) - echo "ERROR: no such option $1. -h or --help for help" - exit 1 - ;; - esac - done - cat > /tmp/cloud-config < /home/core/success - - path: /opt/bin/autoscale-kube-cluster - permissions: 0700 - owner: root:root - content: | - #! /bin/bash - function usage() { - cat << USAGE - Usage: ./autoscale-kube-cluster [OPTIONS]... - Enables autoscaling for the kubernetes cluster. - Arguments: - -i, --id string ID of the cluster - -e, --enable Enables autoscaling - -d, --disable Disables autoscaling - -M, --maxsize number Maximum size of the cluster - -m, --minsize number Minimum size of the cluster - Other arguments: - -h, --help Display this help message and exit - Examples: - ./autoscale-kube-cluster -e -M 3 -m 1 - ./autoscale-kube-cluster -d - USAGE - exit 0 - } - ID="" - ENABLE="" - MINSIZE="" - MAXSIZE="" - while [ -n "$1" ]; do - case "$1" in - -h | --help) - usage - ;; - -i | --id) - ID=$2 - shift 2 - ;; - -e | --enable) - ENABLE="true" - shift 1 - ;; - -d | --enable) - ENABLE="false" - shift 1 - ;; - -M | --maxsize) - MAXSIZE=$2 - shift 2 - ;; - -m | --minsize) - MINSIZE=$2 - shift 2 - ;; - -*|*) - echo "ERROR: no such option $1. -h or --help for help" - exit 1 - ;; - esac - done - if [ $ENABLE == "true" ] ; then - if [ -e /opt/autoscaler/autoscaler_tmpl.yaml ]; then - sed -e "s//$ID/g" -e "s//$MINSIZE/g" -e "s//$MAXSIZE/g" /opt/autoscaler/autoscaler_tmpl.yaml > /opt/autoscaler/autoscaler_now.yaml - kubectl apply -f /opt/autoscaler/autoscaler_now.yaml - exit 0 - fi - echo "Cluster does not support automated autoscaling. Please deploy the autoscaler manually" - exit 1 - else - kubectl delete deployment -n kube-system cluster-autoscaler - fi - - - path: /opt/bin/deploy-cloudstack-secret - permissions: 0700 - owner: root:root - content: | - #! /bin/bash - function usage() { - cat << USAGE - Usage: ./deploy-cloudstack-secret [OPTIONS]... - Enables autoscaling for the kubernetes cluster. - Arguments: - -u, --url string ID of the cluster - -k, --key string Enables autoscaling - -s, --secret string Disables autoscaling - Other arguments: - -h, --help Display this help message and exit - Examples: - ./deploy-cloudstack-secret -u http://localhost:8080 -k abcd -s efgh - USAGE - exit 0 - } - API_URL="" - API_KEY="" - SECRET_KEY="" - while [ -n "$1" ]; do - case "$1" in - -h | --help) - usage - ;; - -u | --url) - API_URL=$2 - shift 2 - ;; - -k | --key) - API_KEY=$2 - shift 2 - ;; - -s | --secret) - SECRET_KEY=$2 - shift 2 - ;; - -*|*) - echo "ERROR: no such option $1. -h or --help for help" - exit 1 - ;; - esac - done - cat > /tmp/cloud-config < /home/core/success - - path: /opt/bin/autoscale-kube-cluster - permissions: 0700 - owner: root:root - content: | - #! /bin/bash - function usage() { - cat << USAGE - Usage: ./autoscale-kube-cluster [OPTIONS]... - Enables autoscaling for the kubernetes cluster. - Arguments: - -i, --id string ID of the cluster - -e, --enable Enables autoscaling - -d, --disable Disables autoscaling - -M, --maxsize number Maximum size of the cluster - -m, --minsize number Minimum size of the cluster - Other arguments: - -h, --help Display this help message and exit - Examples: - ./autoscale-kube-cluster -e -M 3 -m 1 - ./autoscale-kube-cluster -d - USAGE - exit 0 - } - ID="" - ENABLE="" - MINSIZE="" - MAXSIZE="" - while [ -n "$1" ]; do - case "$1" in - -h | --help) - usage - ;; - -i | --id) - ID=$2 - shift 2 - ;; - -e | --enable) - ENABLE="true" - shift 1 - ;; - -d | --enable) - ENABLE="false" - shift 1 - ;; - -M | --maxsize) - MAXSIZE=$2 - shift 2 - ;; - -m | --minsize) - MINSIZE=$2 - shift 2 - ;; - -*|*) - echo "ERROR: no such option $1. -h or --help for help" - exit 1 - ;; - esac - done - if [ $ENABLE == "true" ] ; then - if [ -e /opt/autoscaler/autoscaler_tmpl.yaml ]; then - sed -e "s//$ID/g" -e "s//$MINSIZE/g" -e "s//$MAXSIZE/g" /opt/autoscaler/autoscaler_tmpl.yaml > /opt/autoscaler/autoscaler_now.yaml - kubectl apply -f /opt/autoscaler/autoscaler_now.yaml - exit 0 - fi - echo "Cluster does not support automated autoscaling. Please deploy the autoscaler manually" - exit 1 - else - kubectl delete deployment -n kube-system cluster-autoscaler - fi - - - path: /opt/bin/deploy-cloudstack-secret - permissions: 0700 - owner: root:root - content: | - #! /bin/bash - function usage() { - cat << USAGE - Usage: ./deploy-cloudstack-secret [OPTIONS]... - Enables autoscaling for the kubernetes cluster. - Arguments: - -u, --url string ID of the cluster - -k, --key string Enables autoscaling - -s, --secret string Disables autoscaling - Other arguments: - -h, --help Display this help message and exit - Examples: - ./deploy-cloudstack-secret -u http://localhost:8080 -k abcd -s efgh - USAGE - exit 0 - } - API_URL="" - API_KEY="" - SECRET_KEY="" - while [ -n "$1" ]; do - case "$1" in - -h | --help) - usage - ;; - -u | --url) - API_URL=$2 - shift 2 - ;; - -k | --key) - API_KEY=$2 - shift 2 - ;; - -s | --secret) - SECRET_KEY=$2 - shift 2 - ;; - -*|*) - echo "ERROR: no such option $1. -h or --help for help" - exit 1 - ;; - esac - done - cat > /tmp/cloud-config </$ID/g" -e "s//$MINSIZE/g" -e "s//$MAXSIZE/g" /opt/autoscaler/autoscaler_tmpl.yaml > /opt/autoscaler/autoscaler_now.yaml + kubectl apply -f /opt/autoscaler/autoscaler_now.yaml + exit 0 + fi + echo "Cluster does not support automated autoscaling. Please upgrade your cluster to a supported version or deploy the autoscaler manually" + exit 1 +else + kubectl delete deployment -n kube-system cluster-autoscaler +fi diff --git a/plugins/integrations/kubernetes-service/src/main/resources/script/deploy-cloudstack-secret b/plugins/integrations/kubernetes-service/src/main/resources/script/deploy-cloudstack-secret new file mode 100755 index 000000000000..e734e0436bbf --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/resources/script/deploy-cloudstack-secret @@ -0,0 +1,51 @@ +#! /bin/bash +function usage() { + cat << USAGE +Usage: ./deploy-cloudstack-secret [OPTIONS]... +Enables autoscaling for the kubernetes cluster. +Arguments: + -u, --url string ID of the cluster + -k, --key string Enables autoscaling + -s, --secret string Disables autoscaling +Other arguments: + -h, --help Display this help message and exit +Examples: + ./deploy-cloudstack-secret -u http://localhost:8080 -k abcd -s efgh +USAGE + exit 0 +} +API_URL="" +API_KEY="" +SECRET_KEY="" +while [ -n "$1" ]; do + case "$1" in + -h | --help) + usage + ;; + -u | --url) + API_URL=$2 + shift 2 + ;; + -k | --key) + API_KEY=$2 + shift 2 + ;; + -s | --secret) + SECRET_KEY=$2 + shift 2 + ;; + -*|*) + echo "ERROR: no such option $1. -h or --help for help" + exit 1 + ;; + esac +done +cat > /tmp/cloud-config < Date: Wed, 28 Oct 2020 14:05:40 +0530 Subject: [PATCH 013/117] Ensure there's always a node during scale down --- .../kubernetes/cluster/KubernetesClusterManagerImpl.java | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java index 39e7bd5ae708..f5fd3a514379 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java @@ -922,6 +922,12 @@ private void validateKubernetesClusterScaleParameters(ScaleKubernetesClusterCmd if (mastersToRemove >= kubernetesCluster.getMasterNodeCount()) { throw new InvalidParameterValueException("Can not remove all masters from a cluster"); } + + // Ensure there's always a node + long nodesToRemove = nodes.stream().filter(x -> !x.isMaster()).count(); + if (nodesToRemove >= kubernetesCluster.getNodeCount()) { + throw new InvalidParameterValueException("Can not remove all nodes from a cluster"); + } } ServiceOffering serviceOffering = null; From 1f23c7eb3fd8fa54344f8c8afb23574d043b2baa Mon Sep 17 00:00:00 2001 From: davidjumani Date: Wed, 28 Oct 2020 14:05:55 +0530 Subject: [PATCH 014/117] Tweaking nw rules while scaling --- ...esClusterResourceModifierActionWorker.java | 11 +++++++ .../KubernetesClusterScaleWorker.java | 29 +++++++------------ .../KubernetesClusterStartWorker.java | 10 +++---- 3 files changed, 27 insertions(+), 23 deletions(-) diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java index b6e6caca4fe0..7d7c60aaa988 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java @@ -491,6 +491,17 @@ protected void removePortForwardingRules(final IpAddress publicIp, final Network } } + protected void removePortForwardingRules(final IpAddress publicIp, final Network network, final Account account, int startPort, int endPort) + throws ResourceUnavailableException { + List pfRules = portForwardingRulesDao.listByNetwork(network.getId()); + for (PortForwardingRuleVO pfRule : pfRules) { + if (startPort <= pfRule.getSourcePortStart() && pfRule.getSourcePortStart() <= endPort) { + portForwardingRulesDao.remove(pfRule.getId()); + } + } + rulesService.applyPortForwardingRules(publicIp.getId(), account); + } + protected void removeLoadBalancingRule(final IpAddress publicIp, final Network network, final Account account, final int port) throws ResourceUnavailableException { List rules = loadBalancerDao.listByIpAddress(publicIp.getId()); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java index e198a1498de4..df38612ff699 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java @@ -22,6 +22,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.List; +import java.util.stream.Collectors; import javax.inject.Inject; @@ -116,13 +117,12 @@ private void logTransitStateToFailedIfNeededAndThrow(final Level logLevel, final /** * Scale network rules for an existing Kubernetes cluster while scaling it * Open up firewall for SSH access from port NODES_DEFAULT_START_SSH_PORT to NODES_DEFAULT_START_SSH_PORT+n. - * Also remove port forwarding rules for removed virtual machines and create port-forwarding rule + * Also remove port forwarding rules for all virtual machines and re-create port-forwarding rule * to forward public IP traffic to all node VMs' private IP. * @param clusterVMIds - * @param removedVMIds * @throws ManagementServerException */ - private void scaleKubernetesClusterNetworkRules(final List clusterVMIds, final List removedVMIds) throws ManagementServerException { + private void scaleKubernetesClusterNetworkRules(final List clusterVMIds) throws ManagementServerException { if (!Network.GuestType.Isolated.equals(network.getGuestType())) { if (LOGGER.isDebugEnabled()) { LOGGER.debug(String.format("Network : %s for Kubernetes cluster : %s is not an isolated network, therefore, no need for network rules", network.getName(), kubernetesCluster.getName())); @@ -134,7 +134,6 @@ private void scaleKubernetesClusterNetworkRules(final List clusterVMIds, f throw new ManagementServerException(String.format("No source NAT IP addresses found for network : %s, Kubernetes cluster : %s", network.getName(), kubernetesCluster.getName())); } - // TODO : Remove indiv rules per vm // Remove existing SSH firewall rules FirewallRule firewallRule = removeSshFirewallRule(publicIp); if (firewallRule == null) { @@ -142,10 +141,10 @@ private void scaleKubernetesClusterNetworkRules(final List clusterVMIds, f } int existingFirewallRuleSourcePortEnd = firewallRule.getSourcePortEnd(); final int scaledTotalNodeCount = clusterSize == null ? (int)kubernetesCluster.getTotalNodeCount() : (int)(clusterSize + kubernetesCluster.getMasterNodeCount()); - // TODO : Provision indiv rules per vm + int endPort = CLUSTER_NODES_DEFAULT_START_SSH_PORT + clusterVMIds.size() - 1; // Provision new SSH firewall rules try { - provisionFirewallRules(publicIp, owner, CLUSTER_NODES_DEFAULT_START_SSH_PORT, CLUSTER_NODES_DEFAULT_START_SSH_PORT + scaledTotalNodeCount - 1); + provisionFirewallRules(publicIp, owner, CLUSTER_NODES_DEFAULT_START_SSH_PORT, endPort); if (LOGGER.isDebugEnabled()) { LOGGER.debug(String.format("Provisioned firewall rule to open up port %d to %d on %s in Kubernetes cluster ID: %s", CLUSTER_NODES_DEFAULT_START_SSH_PORT, CLUSTER_NODES_DEFAULT_START_SSH_PORT + scaledTotalNodeCount - 1, publicIp.getAddress().addr(), kubernetesCluster.getUuid())); @@ -155,14 +154,13 @@ private void scaleKubernetesClusterNetworkRules(final List clusterVMIds, f } try { - removePortForwardingRules(publicIp, network, owner, removedVMIds); + removePortForwardingRules(publicIp, network, owner, CLUSTER_NODES_DEFAULT_START_SSH_PORT, existingFirewallRuleSourcePortEnd); } catch (ResourceUnavailableException e) { throw new ManagementServerException(String.format("Failed to remove SSH port forwarding rules for removed VMs for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); } try { - // TODO : Provision indiv rules per vm - provisionSshPortForwardingRules(publicIp, network, owner, clusterVMIds, existingFirewallRuleSourcePortEnd + 1); + provisionSshPortForwardingRules(publicIp, network, owner, clusterVMIds, CLUSTER_NODES_DEFAULT_START_SSH_PORT); } catch (ResourceUnavailableException | NetworkRuleConflictException e) { throw new ManagementServerException(String.format("Failed to activate SSH port forwarding rules for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); } @@ -306,15 +304,12 @@ private void scaleKubernetesClusterOffering() throws CloudRuntimeException { } private void removeNodesFromCluster(List vmMaps) throws CloudRuntimeException { - List removedVmIds = new ArrayList<>(); for (KubernetesClusterVmMapVO vmMapVO : vmMaps) { UserVmVO userVM = userVmDao.findById(vmMapVO.getVmId()); LOGGER.info(String.format("Remving vm : %s", userVM.getUuid())); if (!removeKubernetesClusterNode(publicIpAddress, sshPort, userVM, 3, 30000)) { logTransitStateAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes cluster : %s, failed to remove Kubernetes node: %s running on VM : %s", kubernetesCluster.getName(), userVM.getHostName(), userVM.getDisplayName()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); } - // For removing port-forwarding network rules - removedVmIds.add(userVM.getId()); try { UserVm vm = userVmService.destroyVm(userVM.getId(), true); if (!userVmManager.expunge(userVM, CallContext.current().getCallingUserId(), CallContext.current().getCallingAccount())) { @@ -335,7 +330,8 @@ private void removeNodesFromCluster(List vmMaps) throw // Scale network rules to update firewall rule try { - scaleKubernetesClusterNetworkRules(null, removedVmIds); + List clusterVMIds = getKubernetesClusterVMMaps().stream().map(KubernetesClusterVmMapVO::getVmId).collect(Collectors.toList()); + scaleKubernetesClusterNetworkRules(clusterVMIds); } catch (ManagementServerException e) { logTransitStateAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes cluster : %s, unable to update network rules", kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed, e); } @@ -361,18 +357,15 @@ private void scaleUpKubernetesClusterSize(final long newVmCount) throws CloudRun stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.ScaleUpRequested); } List clusterVMs = new ArrayList<>(); - List clusterVMIds = new ArrayList<>(); try { clusterVMs = provisionKubernetesClusterNodeVms((int)(newVmCount + kubernetesCluster.getNodeCount()), (int)kubernetesCluster.getNodeCount(), publicIpAddress); } catch (CloudRuntimeException | ManagementServerException | ResourceUnavailableException | InsufficientCapacityException e) { logTransitStateToFailedIfNeededAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes cluster : %s, unable to provision node VM in the cluster", kubernetesCluster.getName()), e); } attachIsoKubernetesVMs(clusterVMs); - for (UserVm vm : clusterVMs) { - clusterVMIds.add(vm.getId()); - } try { - scaleKubernetesClusterNetworkRules(clusterVMIds, null); + List clusterVMIds = getKubernetesClusterVMMaps().stream().map(KubernetesClusterVmMapVO::getVmId).collect(Collectors.toList()); + scaleKubernetesClusterNetworkRules(clusterVMIds); } catch (ManagementServerException e) { logTransitStateToFailedIfNeededAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes cluster : %s, unable to update network rules", kubernetesCluster.getName()), e); } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java index fc1caca25721..65cd4af06adf 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java @@ -383,7 +383,7 @@ private void setupKubernetesClusterNetworkRules(Network network, List cl throw new ManagementServerException(String.format("No source NAT IP addresses found for network : %s, Kubernetes cluster : %s", network.getName(), kubernetesCluster.getName())); } - + // Firewall rule fo API access for master node VMs try { provisionFirewallRules(publicIp, owner, CLUSTER_API_PORT, CLUSTER_API_PORT); if (LOGGER.isInfoEnabled()) { @@ -394,8 +394,8 @@ private void setupKubernetesClusterNetworkRules(Network network, List cl throw new ManagementServerException(String.format("Failed to provision firewall rules for API access for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); } + // Firewall rule fo SSH access on each node VM try { - // TODO : Create indiv fw rules for each vm int endPort = CLUSTER_NODES_DEFAULT_START_SSH_PORT + clusterVMs.size() - 1; provisionFirewallRules(publicIp, owner, CLUSTER_NODES_DEFAULT_START_SSH_PORT, endPort); if (LOGGER.isInfoEnabled()) { @@ -575,10 +575,10 @@ public boolean startKubernetesClusterOnCreate() { for (int i = 0; i < clusterVMs.size(); ++i) { UserVm vm = clusterVMs.get(i); try { - copyAutoscalerScripts(vm, i); - } catch (Exception e) { + copyAutoscalerScripts(vm, i); + } catch (Exception e) { throw new CloudRuntimeException(e); - } + } } if (!createCloudStackSecret(keys)) { logTransitStateAndThrow(Level.ERROR, String.format("Failed to setup keys for Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), kubernetesCluster.getId(),KubernetesCluster.Event.OperationFailed); From fa4d064980c04028733095899bc0fdc5b5b1f638 Mon Sep 17 00:00:00 2001 From: davidjumani Date: Wed, 28 Oct 2020 14:43:55 +0530 Subject: [PATCH 015/117] Cleanup --- .../cluster/KubernetesClusterManagerImpl.java | 18 ++++++++++-------- .../KubernetesClusterActionWorker.java | 3 +-- .../KubernetesClusterScaleWorker.java | 2 +- .../KubernetesClusterStartWorker.java | 10 ++++------ .../response/KubernetesClusterResponse.java | 19 +++---------------- .../util/create-kubernetes-binaries-iso.sh | 2 +- 6 files changed, 20 insertions(+), 34 deletions(-) diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java index f5fd3a514379..63ca907049a4 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java @@ -864,6 +864,10 @@ private void validateKubernetesClusterScaleParameters(ScaleKubernetesClusterCmd final Long minSize = cmd.getMinSize(); final Long maxSize = cmd.getMaxSize(); + if (kubernetesClusterId == null || kubernetesClusterId < 1L) { + throw new InvalidParameterValueException("Invalid Kubernetes cluster ID"); + } + KubernetesClusterVO kubernetesCluster = kubernetesClusterDao.findById(kubernetesClusterId); if (kubernetesCluster == null || kubernetesCluster.getRemoved() != null) { throw new InvalidParameterValueException("Invalid Kubernetes cluster ID"); @@ -883,13 +887,13 @@ private void validateKubernetesClusterScaleParameters(ScaleKubernetesClusterCmd final KubernetesSupportedVersion clusterVersion = kubernetesSupportedVersionDao.findById(kubernetesCluster.getKubernetesVersionId()); if (clusterVersion == null) { - throw new CloudRuntimeException(String.format("Invalid Kubernetes version associated with Kubernetes cluster ID: %s", kubernetesCluster.getUuid())); + throw new CloudRuntimeException(String.format("Invalid Kubernetes version associated with Kubernetes cluster : %s", kubernetesCluster.getName())); } if (!(kubernetesCluster.getState().equals(KubernetesCluster.State.Created) || kubernetesCluster.getState().equals(KubernetesCluster.State.Running) || kubernetesCluster.getState().equals(KubernetesCluster.State.Stopped))) { - throw new PermissionDeniedException(String.format("Kubernetes cluster ID: %s is in %s state", kubernetesCluster.getUuid(), kubernetesCluster.getState().toString())); + throw new PermissionDeniedException(String.format("Kubernetes cluster %s is in %s state and can not be scaled", kubernetesCluster.getName(), kubernetesCluster.getState().toString())); } if (isAutoscalingEnabled != null && isAutoscalingEnabled) { @@ -1148,19 +1152,17 @@ public boolean startKubernetesCluster(long kubernetesClusterId, boolean onCreate if (zone == null) { logAndThrow(Level.WARN, String.format("Unable to find zone for Kubernetes cluster : %s", kubernetesCluster.getName())); } + KubernetesClusterStartWorker startWorker = + new KubernetesClusterStartWorker(kubernetesCluster, this); + startWorker = ComponentContext.inject(startWorker); if (onCreate) { // Start for Kubernetes cluster in 'Created' state Account owner = accountService.getActiveAccountById(kubernetesCluster.getAccountId()); String[] keys = getServiceUserKeys(owner); - KubernetesClusterStartWorker startWorker = - new KubernetesClusterStartWorker(kubernetesCluster, this, keys); - startWorker = ComponentContext.inject(startWorker); + startWorker.setKeys(keys); return startWorker.startKubernetesClusterOnCreate(); } else { // Start for Kubernetes cluster in 'Stopped' state. Resources are already provisioned, just need to be started - KubernetesClusterStartWorker startWorker = - new KubernetesClusterStartWorker(kubernetesCluster, this); - startWorker = ComponentContext.inject(startWorker); return startWorker.startStoppedKubernetesCluster(); } } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java index 8cda5cda7b15..b3577a7945fd 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java @@ -422,7 +422,7 @@ protected boolean createCloudStackSecret(String[] keys) { String msg = String.format("Failed to add cloudstack-secret to Kubernetes cluster: %s", kubernetesCluster.getName()); LOGGER.warn(msg, e); } - return true; + return false; } protected File retrieveScriptFile(String filename) { @@ -440,7 +440,6 @@ protected File retrieveScriptFile(String filename) { } protected void copyAutoscalerScripts(final UserVm vm, final int index) throws Exception { - // TODO: This might be a bad way to do it. Better to fetch the pf rules and try int nodeSshPort = sshPort == 22 ? sshPort : sshPort + index; String nodeAddress = (index > 0 && sshPort == 22) ? vm.getPrivateIpAddress() : publicIpAddress; SshHelper.scpTo(nodeAddress, nodeSshPort, CLUSTER_NODE_VM_USER, sshKeyFile, null, diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java index df38612ff699..380e3eddec8e 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java @@ -306,7 +306,7 @@ private void scaleKubernetesClusterOffering() throws CloudRuntimeException { private void removeNodesFromCluster(List vmMaps) throws CloudRuntimeException { for (KubernetesClusterVmMapVO vmMapVO : vmMaps) { UserVmVO userVM = userVmDao.findById(vmMapVO.getVmId()); - LOGGER.info(String.format("Remving vm : %s", userVM.getUuid())); + LOGGER.info(String.format("Removing vm : %s from cluster %s", userVM.getDisplayName(), kubernetesCluster.getName())); if (!removeKubernetesClusterNode(publicIpAddress, sshPort, userVM, 3, 30000)) { logTransitStateAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes cluster : %s, failed to remove Kubernetes node: %s running on VM : %s", kubernetesCluster.getName(), userVM.getHostName(), userVM.getDisplayName()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java index 65cd4af06adf..8c2c2d023b57 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java @@ -83,12 +83,6 @@ public KubernetesClusterStartWorker(final KubernetesCluster kubernetesCluster, f super(kubernetesCluster, clusterManager); } - public KubernetesClusterStartWorker(final KubernetesCluster kubernetesCluster, final KubernetesClusterManagerImpl clusterManager, - final String[]keys) { - super(kubernetesCluster, clusterManager); - this.keys = keys; - } - public KubernetesSupportedVersion getKubernetesClusterVersion() { if (kubernetesClusterVersion == null) { kubernetesClusterVersion = kubernetesSupportedVersionDao.findById(kubernetesCluster.getKubernetesVersionId()); @@ -668,4 +662,8 @@ public boolean reconcileAlertCluster() { stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded); return true; } + + public void setKeys(String[] keys) { + this.keys = keys; + } } diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesClusterResponse.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesClusterResponse.java index 8358ba061a3e..f8a054333829 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesClusterResponse.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesClusterResponse.java @@ -140,6 +140,7 @@ public class KubernetesClusterResponse extends BaseResponse implements Controlle @SerializedName(ApiConstants.IP_ADDRESS_ID) @Param(description = "Public IP Address ID of the cluster") private String ipAddressId; + @SerializedName(ApiConstants.AUTOSCALING_ENABLED) @Param(description = "Whether autoscaling is enabled for the cluster") private boolean isAutoscalingEnabled; @@ -152,14 +153,6 @@ public class KubernetesClusterResponse extends BaseResponse implements Controlle @Param(description = "Maximum size of the cluster") private Long maxSize; - @SerializedName(ApiConstants.IP_ADDRESS) - @Param(description = "Public IP Address of the cluster") - private String ipAddress; - - @SerializedName(ApiConstants.IP_ADDRESS_ID) - @Param(description = "Public IP Address ID of the cluster") - private String ipAddressId; - public KubernetesClusterResponse() { } @@ -358,6 +351,8 @@ public void setIpAddress(String ipAddress) { public void setIpAddressId(String ipAddressId) { this.ipAddressId = ipAddressId; + } + public void setAutoscalingEnabled(boolean isAutoscalingEnabled) { this.isAutoscalingEnabled = isAutoscalingEnabled; } @@ -369,12 +364,4 @@ public void setMinSize(Long minSize) { public void setMaxSize(Long maxSize) { this.maxSize = maxSize; } - - public void setIpAddress(String ipAddress) { - this.ipAddress = ipAddress; - } - - public void setIpAddressId(String ipAddressId) { - this.ipAddressId = ipAddressId; - } } diff --git a/scripts/util/create-kubernetes-binaries-iso.sh b/scripts/util/create-kubernetes-binaries-iso.sh index ab40ea18d965..8fec56e03de4 100755 --- a/scripts/util/create-kubernetes-binaries-iso.sh +++ b/scripts/util/create-kubernetes-binaries-iso.sh @@ -75,7 +75,7 @@ dashboard_conf_file="${working_dir}/dashboard.yaml" curl -sSL ${DASHBORAD_CONFIG_URL} -o ${dashboard_conf_file} # TODO : Change the url once merged -AUTOSCALER_URL="https://github.com/shapeblue/autoscaler/blob/add-acs/cluster-autoscaler/cloudprovider/cloudstack/examples/cluster-autoscaler-standard.yaml" +AUTOSCALER_URL="https://raw.githubusercontent.com/shapeblue/autoscaler/add-acs/cluster-autoscaler/cloudprovider/cloudstack/examples/cluster-autoscaler-standard.yaml" echo "Downloading kubernetes cluster autoscaler ${AUTOSCALER_URL}" autoscaler_conf_file="${working_dir}/autoscaler.yaml" curl -sSL ${AUTOSCALER_URL} -o ${autoscaler_conf_file} From cd136db317840ae4730366ad518b7b907dbcdf8b Mon Sep 17 00:00:00 2001 From: davidjumani Date: Thu, 29 Oct 2020 10:11:10 +0530 Subject: [PATCH 016/117] Enhancing auytoscaler deployment --- .../cluster/KubernetesClusterManagerImpl.java | 34 +++++++++++----- .../KubernetesClusterActionWorker.java | 32 ++++++++------- ...esClusterResourceModifierActionWorker.java | 40 ++++++++++--------- .../KubernetesClusterScaleWorker.java | 20 +++++----- .../KubernetesClusterStartWorker.java | 20 +++------- .../KubernetesClusterUpgradeWorker.java | 21 +++++----- .../version/KubernetesVersionManagerImpl.java | 6 +++ .../resources/script/autoscale-kube-cluster | 15 ++++++- .../src/main/resources/script/try-autoscaling | 6 --- 9 files changed, 106 insertions(+), 88 deletions(-) delete mode 100755 plugins/integrations/kubernetes-service/src/main/resources/script/try-autoscaling diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java index 63ca907049a4..85fb0e549d29 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java @@ -648,10 +648,7 @@ public KubernetesClusterResponse createKubernetesClusterResponse(long kubernetes } } response.setVirtualMachines(vmResponses); - Boolean isAutoscalingEnabled = kubernetesCluster.getAutoscalingEnabled(); - if (isAutoscalingEnabled != null) { - response.setAutoscalingEnabled(isAutoscalingEnabled); - } + response.setAutoscalingEnabled(kubernetesCluster.getAutoscalingEnabled()); response.setMinSize(kubernetesCluster.getMinSize()); response.setMaxSize(kubernetesCluster.getMaxSize()); return response; @@ -879,7 +876,7 @@ private void validateKubernetesClusterScaleParameters(ScaleKubernetesClusterCmd } if (serviceOfferingId == null && clusterSize == null && nodeIds == null && isAutoscalingEnabled == null) { - throw new InvalidParameterValueException(String.format("Kubernetes cluster ID: %s cannot be scaled, either a new service offering or a new cluster size or nodeids to be removed or autoscaling must be passed", kubernetesCluster.getUuid())); + throw new InvalidParameterValueException(String.format("Kubernetes cluster %s cannot be scaled, either service offering or cluster size or nodeids to be removed or autoscaling must be passed", kubernetesCluster.getName())); } Account caller = CallContext.current().getCallingAccount(); @@ -901,6 +898,11 @@ private void validateKubernetesClusterScaleParameters(ScaleKubernetesClusterCmd throw new InvalidParameterValueException("autoscaling can not be passed along with nodeids or clustersize or service offering"); } + if (!KubernetesVersionManagerImpl.versionSupportsAutoscaling(clusterVersion)) { + throw new InvalidParameterValueException(String.format("Autoscaling requires Kubernetes Version %s or above", + KubernetesVersionManagerImpl.MINIMUN_AUTOSCALER_SUPPORTED_VERSION )); + } + if (minSize == null || maxSize == null) { throw new InvalidParameterValueException("autoscaling requires minsize and maxsize to be passed"); } @@ -926,7 +928,6 @@ private void validateKubernetesClusterScaleParameters(ScaleKubernetesClusterCmd if (mastersToRemove >= kubernetesCluster.getMasterNodeCount()) { throw new InvalidParameterValueException("Can not remove all masters from a cluster"); } - // Ensure there's always a node long nodesToRemove = nodes.stream().filter(x -> !x.isMaster()).count(); if (nodesToRemove >= kubernetesCluster.getNodeCount()) { @@ -1014,8 +1015,8 @@ private void validateKubernetesClusterUpgradeParameters(UpgradeKubernetesCluster } KubernetesSupportedVersionVO clusterVersion = kubernetesSupportedVersionDao.findById(kubernetesCluster.getKubernetesVersionId()); if (clusterVersion == null || clusterVersion.getRemoved() != null) { - throw new InvalidParameterValueException(String.format("Invalid Kubernetes version associated with cluster ID: %s", - kubernetesCluster.getUuid())); + throw new InvalidParameterValueException(String.format("Invalid Kubernetes version associated with cluster : %s", + kubernetesCluster.getName())); } final ServiceOffering serviceOffering = serviceOfferingDao.findByIdIncludingRemoved(kubernetesCluster.getServiceOfferingId()); if (serviceOffering == null) { @@ -1061,7 +1062,7 @@ public KubernetesCluster createKubernetesCluster(CreateKubernetesClusterCmd cmd) logAndThrow(Level.ERROR, "Kubernetes Service plugin is disabled"); } - // Need this for cloudstack-kubernetes-provider && autoscaler + // Need this for the autoscaler && cloudstack-kubernetes-provider String csUrl = ApiServiceConfiguration.ApiServletPath.value(); if (csUrl == null || csUrl.contains("localhost")) { throw new InvalidParameterValueException("Global setting endpointe.url has to be set to the Management Server's API end point"); @@ -1318,6 +1319,9 @@ public boolean scaleKubernetesCluster(ScaleKubernetesClusterCmd cmd) throws Clou } validateKubernetesClusterScaleParameters(cmd); + KubernetesClusterVO kubernetesCluster = kubernetesClusterDao.findById(cmd.getId()); + Account owner = accountService.getActiveAccountById(kubernetesCluster.getAccountId()); + String[] keys = getServiceUserKeys(owner); KubernetesClusterScaleWorker scaleWorker = new KubernetesClusterScaleWorker(kubernetesClusterDao.findById(cmd.getId()), serviceOfferingDao.findById(cmd.getServiceOfferingId()), @@ -1327,6 +1331,7 @@ public boolean scaleKubernetesCluster(ScaleKubernetesClusterCmd cmd) throws Clou cmd.getMinSize(), cmd.getMaxSize(), this); + scaleWorker.setKeys(keys); scaleWorker = ComponentContext.inject(scaleWorker); return scaleWorker.scaleCluster(); } @@ -1336,6 +1341,13 @@ public boolean upgradeKubernetesCluster(UpgradeKubernetesClusterCmd cmd) throws if (!KubernetesServiceEnabled.value()) { logAndThrow(Level.ERROR, "Kubernetes Service plugin is disabled"); } + + // Need this for the autoscaler && cloudstack-kubernetes-provider + String csUrl = ApiServiceConfiguration.ApiServletPath.value(); + if (csUrl == null || csUrl.contains("localhost")) { + throw new InvalidParameterValueException("Global setting endpointe.url has to be set to the Management Server's API end point"); + } + validateKubernetesClusterUpgradeParameters(cmd); KubernetesClusterVO kubernetesCluster = kubernetesClusterDao.findById(cmd.getId()); Account owner = accountService.getActiveAccountById(kubernetesCluster.getAccountId()); @@ -1546,8 +1558,8 @@ boolean isClusterVMsInDesiredState(KubernetesCluster kubernetesCluster, VirtualM // check cluster is running at desired capacity include master nodes as well if (clusterVMs.size() < kubernetesCluster.getTotalNodeCount()) { if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Found only %d VMs in the Kubernetes cluster ID: %s while expected %d VMs to be in state: %s", - clusterVMs.size(), kubernetesCluster.getUuid(), kubernetesCluster.getTotalNodeCount(), state.toString())); + LOGGER.debug(String.format("Found only %d VMs in the Kubernetes cluster %s while expected %d VMs to be in state: %s", + clusterVMs.size(), kubernetesCluster.getName(), kubernetesCluster.getTotalNodeCount(), state.toString())); } return false; } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java index b3577a7945fd..60f28b1d3747 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java @@ -136,6 +136,8 @@ public class KubernetesClusterActionWorker { protected File autoscaleScriptFile; protected File deploySecretsScriptFile; + protected String[] keys; + protected KubernetesClusterActionWorker(final KubernetesCluster kubernetesCluster, final KubernetesClusterManagerImpl clusterManager) { this.kubernetesCluster = kubernetesCluster; this.kubernetesClusterDao = clusterManager.kubernetesClusterDao; @@ -405,13 +407,6 @@ protected boolean createCloudStackSecret(String[] keys) { return false; } - final UserVm userVm = userVmDao.findById(clusterVMs.get(0).getVmId()); - - String hostName = userVm.getHostName(); - if (!Strings.isNullOrEmpty(hostName)) { - hostName = hostName.toLowerCase(); - } - try { Pair result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, pkFile, null, String.format("sudo /opt/bin/deploy-cloudstack-secret -u '%s' -k '%s' -s '%s'", @@ -434,23 +429,30 @@ protected File retrieveScriptFile(String filename) { writer.write(data); writer.close(); } catch (IOException e) { - logAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster ID: %s, unable to prepare upgrade script %s", kubernetesCluster.getUuid(), filename), e); + logAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster %s, unable to prepare upgrade script %s", kubernetesCluster.getName(), filename), e); } return file; } - protected void copyAutoscalerScripts(final UserVm vm, final int index) throws Exception { - int nodeSshPort = sshPort == 22 ? sshPort : sshPort + index; - String nodeAddress = (index > 0 && sshPort == 22) ? vm.getPrivateIpAddress() : publicIpAddress; - SshHelper.scpTo(nodeAddress, nodeSshPort, CLUSTER_NODE_VM_USER, sshKeyFile, null, + protected void retrieveScriptFiles() { + autoscaleScriptFile = retrieveScriptFile(autoscaleScriptFilename); + deploySecretsScriptFile = retrieveScriptFile(deploySecretsScriptFilename); + } + + protected void copyAutoscalerScripts(String nodeAddress, final int sshPort) throws Exception { + SshHelper.scpTo(nodeAddress, sshPort, CLUSTER_NODE_VM_USER, sshKeyFile, null, "~/", autoscaleScriptFile.getAbsolutePath(), "0755"); - SshHelper.scpTo(nodeAddress, nodeSshPort, CLUSTER_NODE_VM_USER, sshKeyFile, null, + SshHelper.scpTo(nodeAddress, sshPort, CLUSTER_NODE_VM_USER, sshKeyFile, null, "~/", deploySecretsScriptFile.getAbsolutePath(), "0755"); String cmdStr = String.format("sudo mv ~/%s /opt/bin/%s", autoscaleScriptFile.getName(), autoscaleScriptFilename); - SshHelper.sshExecute(publicIpAddress, nodeSshPort, CLUSTER_NODE_VM_USER, sshKeyFile, null, + SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, sshKeyFile, null, cmdStr, 10000, 10000, 10 * 60 * 1000); cmdStr = String.format("sudo mv ~/%s /opt/bin/%s", deploySecretsScriptFile.getName(), deploySecretsScriptFilename); - SshHelper.sshExecute(publicIpAddress, nodeSshPort, CLUSTER_NODE_VM_USER, sshKeyFile, null, + SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, sshKeyFile, null, cmdStr, 10000, 10000, 10 * 60 * 1000); } + + public void setKeys(String[] keys) { + this.keys = keys; + } } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java index 7d7c60aaa988..77db075eb0b9 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java @@ -562,8 +562,8 @@ public KubernetesClusterVO doInTransaction(TransactionStatus status) { private KubernetesClusterVO updateKubernetesClusterEntry(final Boolean autoscaleEnabled, final Long minSize, final Long maxSize) throws CloudRuntimeException { KubernetesClusterVO kubernetesClusterVO = updateKubernetesClusterEntry(null, null, null, null, autoscaleEnabled, minSize, maxSize); if (kubernetesClusterVO == null) { - logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster ID: %s failed, unable to update Kubernetes cluster", - kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); + logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster %s failed, unable to update Kubernetes cluster", + kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); } return kubernetesClusterVO; } @@ -578,26 +578,30 @@ protected boolean autoscaleCluster(boolean enable, Long minSize, Long maxSize) { publicIpAddress = publicIpSshPort.first(); sshPort = publicIpSshPort.second(); - List clusterVMs = getKubernetesClusterVMMaps(); - if (CollectionUtils.isEmpty(clusterVMs)) { - return false; - } - - final UserVm userVm = userVmDao.findById(clusterVMs.get(0).getVmId()); - - String hostName = userVm.getHostName(); - if (!Strings.isNullOrEmpty(hostName)) { - hostName = hostName.toLowerCase(); - } - try { if (enable) { - String data = readResourceFile("/script/try-autoscaling"); + String command = String.format("sudo /opt/bin/autoscale-kube-cluster -i %s -e -M %d -m %d", + kubernetesCluster.getUuid(), maxSize, minSize); Pair result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, - pkFile, null, String.format(data, kubernetesCluster.getUuid(), maxSize, minSize), - 10000, 10000, 60000); + pkFile, null, command, 10000, 10000, 60000); + + // Maybe the file isn't present. Try and copy it if (!result.first()) { - throw new CloudRuntimeException(result.second()); + logMessage(Level.INFO, "Autoscaling files missing. Adding them now", null); + retrieveScriptFiles(); + copyAutoscalerScripts(publicIpAddress, sshPort); + + if (!createCloudStackSecret(keys)) { + logTransitStateAndThrow(Level.ERROR, String.format("Failed to setup keys for Kubernetes cluster %s", + kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); + } + + // If at first you don't succeed ... + result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, + pkFile, null, command, 10000, 10000, 60000); + if (!result.first()) { + throw new CloudRuntimeException(result.second()); + } } updateKubernetesClusterEntry(true, minSize, maxSize); } else { diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java index 380e3eddec8e..b558cc2db808 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java @@ -146,8 +146,8 @@ private void scaleKubernetesClusterNetworkRules(final List clusterVMIds) t try { provisionFirewallRules(publicIp, owner, CLUSTER_NODES_DEFAULT_START_SSH_PORT, endPort); if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Provisioned firewall rule to open up port %d to %d on %s in Kubernetes cluster ID: %s", - CLUSTER_NODES_DEFAULT_START_SSH_PORT, CLUSTER_NODES_DEFAULT_START_SSH_PORT + scaledTotalNodeCount - 1, publicIp.getAddress().addr(), kubernetesCluster.getUuid())); + LOGGER.debug(String.format("Provisioned firewall rule to open up port %d to %d on %s in Kubernetes cluster %s", + CLUSTER_NODES_DEFAULT_START_SSH_PORT, CLUSTER_NODES_DEFAULT_START_SSH_PORT + scaledTotalNodeCount - 1, publicIp.getAddress().addr(), kubernetesCluster.getName())); } } catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException e) { throw new ManagementServerException(String.format("Failed to activate SSH firewall rules for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); @@ -176,8 +176,8 @@ private KubernetesClusterVO updateKubernetesClusterEntry(final Long newSize, fin KubernetesClusterVO kubernetesClusterVO = updateKubernetesClusterEntry(cores, memory, newSize, serviceOfferingId, kubernetesCluster.getAutoscalingEnabled(), kubernetesCluster.getMinSize(), kubernetesCluster.getMaxSize()); if (kubernetesClusterVO == null) { - logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster ID: %s failed, unable to update Kubernetes cluster", - kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); + logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster %s failed, unable to update Kubernetes cluster", + kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); } return kubernetesClusterVO; } @@ -313,18 +313,16 @@ private void removeNodesFromCluster(List vmMaps) throw try { UserVm vm = userVmService.destroyVm(userVM.getId(), true); if (!userVmManager.expunge(userVM, CallContext.current().getCallingUserId(), CallContext.current().getCallingAccount())) { - logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster ID: %s failed, unable to expunge VM '%s'." - , kubernetesCluster.getUuid() - , vm.getInstanceName()), - kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); + logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster %s failed, unable to expunge VM '%s'." + , kubernetesCluster.getName(), vm.getDisplayName()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); } } catch (ResourceUnavailableException e) { - logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster ID: %s failed, unable to remove VM ID: %s" - , kubernetesCluster.getUuid() , userVM.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed, e); + logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster %s failed, unable to remove VM ID: %s", + kubernetesCluster.getName() , userVM.getDisplayName()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed, e); } kubernetesClusterVmMapDao.expunge(vmMapVO.getId()); if (System.currentTimeMillis() > scaleTimeoutTime) { - logTransitStateAndThrow(Level.WARN, String.format("Scaling Kubernetes cluster : %s failed, scaling action timed out", kubernetesCluster.getName()),kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); + logTransitStateAndThrow(Level.WARN, String.format("Scaling Kubernetes cluster %s failed, scaling action timed out", kubernetesCluster.getName()),kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); } } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java index 8c2c2d023b57..583d215509c5 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java @@ -77,7 +77,6 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModifierActionWorker { private KubernetesSupportedVersion kubernetesClusterVersion; - private String[] keys; public KubernetesClusterStartWorker(final KubernetesCluster kubernetesCluster, final KubernetesClusterManagerImpl clusterManager) { super(kubernetesCluster, clusterManager); @@ -381,8 +380,8 @@ private void setupKubernetesClusterNetworkRules(Network network, List cl try { provisionFirewallRules(publicIp, owner, CLUSTER_API_PORT, CLUSTER_API_PORT); if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Provisioned firewall rule to open up port %d on %s for Kubernetes cluster ID: %s", - CLUSTER_API_PORT, publicIp.getAddress().addr(), kubernetesCluster.getUuid())); + LOGGER.info(String.format("Provisioned firewall rule to open up port %d on %s for Kubernetes cluster %s", + CLUSTER_API_PORT, publicIp.getAddress().addr(), kubernetesCluster.getName())); } } catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException | NetworkRuleConflictException e) { throw new ManagementServerException(String.format("Failed to provision firewall rules for API access for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); @@ -567,25 +566,20 @@ public boolean startKubernetesClusterOnCreate() { } retrieveScriptFiles(); for (int i = 0; i < clusterVMs.size(); ++i) { - UserVm vm = clusterVMs.get(i); try { - copyAutoscalerScripts(vm, i); + copyAutoscalerScripts(publicIpAddress, CLUSTER_NODES_DEFAULT_START_SSH_PORT + i); } catch (Exception e) { throw new CloudRuntimeException(e); } } if (!createCloudStackSecret(keys)) { - logTransitStateAndThrow(Level.ERROR, String.format("Failed to setup keys for Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), kubernetesCluster.getId(),KubernetesCluster.Event.OperationFailed); + logTransitStateAndThrow(Level.ERROR, String.format("Failed to setup keys for Kubernetes cluster %s", + kubernetesCluster.getName()), kubernetesCluster.getId(),KubernetesCluster.Event.OperationFailed); } stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded); return true; } - private void retrieveScriptFiles() { - autoscaleScriptFile = retrieveScriptFile(autoscaleScriptFilename); - deploySecretsScriptFile = retrieveScriptFile(deploySecretsScriptFilename); - } - public boolean startStoppedKubernetesCluster() throws CloudRuntimeException { init(); if (LOGGER.isInfoEnabled()) { @@ -662,8 +656,4 @@ public boolean reconcileAlertCluster() { stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded); return true; } - - public void setKeys(String[] keys) { - this.keys = keys; - } } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java index b8c43c1b018f..9bc142de788f 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java @@ -45,7 +45,6 @@ public class KubernetesClusterUpgradeWorker extends KubernetesClusterActionWorke private final String upgradeScriptFilename = "upgrade-kubernetes.sh"; private File upgradeScriptFile; private long upgradeTimeoutTime; - private String[] keys; public KubernetesClusterUpgradeWorker(final KubernetesCluster kubernetesCluster, final KubernetesSupportedVersion upgradeVersion, @@ -56,10 +55,9 @@ public KubernetesClusterUpgradeWorker(final KubernetesCluster kubernetesCluster, this.keys = keys; } - private void retrieveScriptFiles() { + protected void retrieveScriptFiles() { + super.retrieveScriptFiles(); upgradeScriptFile = retrieveScriptFile(upgradeScriptFilename); - autoscaleScriptFile = retrieveScriptFile(autoscaleScriptFilename); - deploySecretsScriptFile = retrieveScriptFile(deploySecretsScriptFilename); } private Pair runInstallScriptOnVM(final UserVm vm, final int index) throws Exception { @@ -88,8 +86,8 @@ private void upgradeKubernetesClusterNodes() { } result = null; if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Upgrading node on VM ID: %s in Kubernetes cluster ID: %s with Kubernetes version(%s) ID: %s", - vm.getUuid(), kubernetesCluster.getUuid(), upgradeVersion.getSemanticVersion(), upgradeVersion.getUuid())); + LOGGER.info(String.format("Upgrading node on VM %s in Kubernetes cluster %s with Kubernetes version(%s) ID: %s", + vm.getDisplayName(), kubernetesCluster.getName(), upgradeVersion.getSemanticVersion(), upgradeVersion.getUuid())); } try { result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, sshKeyFile, null, @@ -105,8 +103,11 @@ private void upgradeKubernetesClusterNodes() { logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster : %s, upgrade action timed out", kubernetesCluster.getName()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null); } try { - copyAutoscalerScripts(vm, i); - createCloudStackSecret(keys); + copyAutoscalerScripts(publicIpAddress, CLUSTER_NODES_DEFAULT_START_SSH_PORT + i); + if (!createCloudStackSecret(keys)) { + logTransitStateAndThrow(Level.ERROR, String.format("Failed to setup keys for Kubernetes cluster %s", + kubernetesCluster.getName()), kubernetesCluster.getId(),KubernetesCluster.Event.OperationFailed); + } result = runInstallScriptOnVM(vm, i); } catch (Exception e) { logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster : %s, unable to upgrade Kubernetes node on VM : %s", kubernetesCluster.getName(), vm.getDisplayName()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, e); @@ -126,8 +127,8 @@ private void upgradeKubernetesClusterNodes() { } } if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Successfully upgraded node on VM ID: %s in Kubernetes cluster ID: %s with Kubernetes version(%s) ID: %s", - vm.getUuid(), kubernetesCluster.getUuid(), upgradeVersion.getSemanticVersion(), upgradeVersion.getUuid())); + LOGGER.info(String.format("Successfully upgraded node on VM %s in Kubernetes cluster %s with Kubernetes version(%s) ID: %s", + vm.getDisplayName(), kubernetesCluster.getName(), upgradeVersion.getSemanticVersion(), upgradeVersion.getUuid())); } } } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionManagerImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionManagerImpl.java index cd1ee4834634..41ef095c7ab6 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionManagerImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionManagerImpl.java @@ -78,6 +78,8 @@ public class KubernetesVersionManagerImpl extends ManagerBase implements Kuberne @Inject private TemplateApiService templateService; + public static final String MINIMUN_AUTOSCALER_SUPPORTED_VERSION = "1.15.0"; + private KubernetesSupportedVersionResponse createKubernetesSupportedVersionResponse(final KubernetesSupportedVersion kubernetesSupportedVersion) { KubernetesSupportedVersionResponse response = new KubernetesSupportedVersionResponse(); response.setObjectName("kubernetessupportedversion"); @@ -202,6 +204,10 @@ public static int compareSemanticVersions(String v1, String v2) throws IllegalAr return 0; } + public static boolean versionSupportsAutoscaling(KubernetesSupportedVersion clusterVersion) { + return clusterVersion.getSemanticVersion().compareTo(MINIMUN_AUTOSCALER_SUPPORTED_VERSION) >= 0; + } + /** * Returns a boolean value whether Kubernetes cluster upgrade can be carried from a given currentVersion to upgradeVersion * Kubernetes clusters can only be upgraded from one MINOR version to the next MINOR version, or between PATCH versions of the same MINOR. diff --git a/plugins/integrations/kubernetes-service/src/main/resources/script/autoscale-kube-cluster b/plugins/integrations/kubernetes-service/src/main/resources/script/autoscale-kube-cluster index 2549a33bb567..8d234b394617 100755 --- a/plugins/integrations/kubernetes-service/src/main/resources/script/autoscale-kube-cluster +++ b/plugins/integrations/kubernetes-service/src/main/resources/script/autoscale-kube-cluster @@ -57,9 +57,20 @@ if [ $ENABLE == "true" ] ; then sed -e "s//$ID/g" -e "s//$MINSIZE/g" -e "s//$MAXSIZE/g" /opt/autoscaler/autoscaler_tmpl.yaml > /opt/autoscaler/autoscaler_now.yaml kubectl apply -f /opt/autoscaler/autoscaler_now.yaml exit 0 + else + mkdir -p /opt/autoscaler + AUTOSCALER_URL="https://raw.githubusercontent.com/shapeblue/autoscaler/add-acs/cluster-autoscaler/cloudprovider/cloudstack/examples/cluster-autoscaler-standard.yaml" + autoscaler_conf_file="/opt/autoscaler/autoscaler_tmpl.yaml" + curl -sSL ${AUTOSCALER_URL} -o ${autoscaler_conf_file} + if [ $? -ne 0 ]; then + echo "Unable to connect to the internet to download the autoscaler deployment and image" + exit 1 + else + sed -e "s//$ID/g" -e "s//$MINSIZE/g" -e "s//$MAXSIZE/g" /opt/autoscaler/autoscaler_tmpl.yaml > /opt/autoscaler/autoscaler_now.yaml + kubectl apply -f /opt/autoscaler/autoscaler_now.yaml + exit 0 + fi fi - echo "Cluster does not support automated autoscaling. Please upgrade your cluster to a supported version or deploy the autoscaler manually" - exit 1 else kubectl delete deployment -n kube-system cluster-autoscaler fi diff --git a/plugins/integrations/kubernetes-service/src/main/resources/script/try-autoscaling b/plugins/integrations/kubernetes-service/src/main/resources/script/try-autoscaling deleted file mode 100755 index 20a9450bf60a..000000000000 --- a/plugins/integrations/kubernetes-service/src/main/resources/script/try-autoscaling +++ /dev/null @@ -1,6 +0,0 @@ -if [ -e /opt/bin/autoscale-kube-cluster ]; then - sudo /opt/bin/autoscale-kube-cluster -i %s -e -M %d -m %d - exit 0 -fi -echo 'Cluster does not support automated autoscaling. Please upgrade your cluster to a supported version or deploy the autoscaler manually' -exit 1 From 4fa20f4e8c74860de74ab643436e5f8512fc2967 Mon Sep 17 00:00:00 2001 From: davidjumani Date: Thu, 29 Oct 2020 13:13:27 +0530 Subject: [PATCH 017/117] Fixing tests --- test/integration/smoke/test_kubernetes_clusters.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/test/integration/smoke/test_kubernetes_clusters.py b/test/integration/smoke/test_kubernetes_clusters.py index a4b9412b82ff..d76072522955 100644 --- a/test/integration/smoke/test_kubernetes_clusters.py +++ b/test/integration/smoke/test_kubernetes_clusters.py @@ -75,8 +75,13 @@ def setUpClass(cls): cls.kubernetes_version_ids = [] if cls.hypervisorNotSupported == False: - cls.initial_configuration_cks_enabled = Configurations.list(cls.apiclient, - name="cloud.kubernetes.service.enabled")[0].value + cls.endpoint_url = Configurations.list(cls.apiclient, name="endpointe.url")[0].value + if "localhost" in cls.endpoint_url: + endpoint_url = "http://%s:%d/client/api " %(cls.mgtSvrDetails["mgtSvrIp"], cls.mgtSvrDetails["port"]) + cls.debug("Setting endpointe.url to %s" %(endpoint_url)) + Configurations.update(cls.apiclient, "endpointe.url", endpoint_url) + + cls.initial_configuration_cks_enabled = Configurations.list(cls.apiclient, name="cloud.kubernetes.service.enabled")[0].value if cls.initial_configuration_cks_enabled not in ["true", True]: cls.debug("Enabling CloudStack Kubernetes Service plugin and restarting management server") Configurations.update(cls.apiclient, From 71d1ee102f5093b75ebcbbe0613c0e4f2b643c20 Mon Sep 17 00:00:00 2001 From: davidjumani Date: Thu, 29 Oct 2020 13:13:42 +0530 Subject: [PATCH 018/117] Refactoring --- .../cluster/KubernetesClusterManagerImpl.java | 25 ++++++++++--------- ...esClusterResourceModifierActionWorker.java | 1 - 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java index 85fb0e549d29..cb5217274ded 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java @@ -654,7 +654,16 @@ public KubernetesClusterResponse createKubernetesClusterResponse(long kubernetes return response; } + private void validateEndpointUrl() { + String csUrl = ApiServiceConfiguration.ApiServletPath.value(); + if (csUrl == null || csUrl.contains("localhost")) { + throw new InvalidParameterValueException("Global setting endpointe.url has to be set to the Management Server's API end point"); + } + } + private void validateKubernetesClusterCreateParameters(final CreateKubernetesClusterCmd cmd) throws CloudRuntimeException { + validateEndpointUrl(); + final String name = cmd.getName(); final Long zoneId = cmd.getZoneId(); final Long kubernetesVersionId = cmd.getKubernetesVersionId(); @@ -903,6 +912,8 @@ private void validateKubernetesClusterScaleParameters(ScaleKubernetesClusterCmd KubernetesVersionManagerImpl.MINIMUN_AUTOSCALER_SUPPORTED_VERSION )); } + validateEndpointUrl(); + if (minSize == null || maxSize == null) { throw new InvalidParameterValueException("autoscaling requires minsize and maxsize to be passed"); } @@ -985,6 +996,8 @@ private void validateKubernetesClusterScaleParameters(ScaleKubernetesClusterCmd } private void validateKubernetesClusterUpgradeParameters(UpgradeKubernetesClusterCmd cmd) { + validateEndpointUrl(); + // Validate parameters final Long kubernetesClusterId = cmd.getId(); final Long upgradeVersionId = cmd.getKubernetesVersionId(); @@ -1062,12 +1075,6 @@ public KubernetesCluster createKubernetesCluster(CreateKubernetesClusterCmd cmd) logAndThrow(Level.ERROR, "Kubernetes Service plugin is disabled"); } - // Need this for the autoscaler && cloudstack-kubernetes-provider - String csUrl = ApiServiceConfiguration.ApiServletPath.value(); - if (csUrl == null || csUrl.contains("localhost")) { - throw new InvalidParameterValueException("Global setting endpointe.url has to be set to the Management Server's API end point"); - } - validateKubernetesClusterCreateParameters(cmd); final DataCenter zone = dataCenterDao.findById(cmd.getZoneId()); @@ -1342,12 +1349,6 @@ public boolean upgradeKubernetesCluster(UpgradeKubernetesClusterCmd cmd) throws logAndThrow(Level.ERROR, "Kubernetes Service plugin is disabled"); } - // Need this for the autoscaler && cloudstack-kubernetes-provider - String csUrl = ApiServiceConfiguration.ApiServletPath.value(); - if (csUrl == null || csUrl.contains("localhost")) { - throw new InvalidParameterValueException("Global setting endpointe.url has to be set to the Management Server's API end point"); - } - validateKubernetesClusterUpgradeParameters(cmd); KubernetesClusterVO kubernetesCluster = kubernetesClusterDao.findById(cmd.getId()); Account owner = accountService.getActiveAccountById(kubernetesCluster.getAccountId()); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java index 77db075eb0b9..a7c5b4a609e1 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java @@ -57,7 +57,6 @@ import com.cloud.kubernetes.cluster.KubernetesClusterDetailsVO; import com.cloud.kubernetes.cluster.KubernetesClusterManagerImpl; import com.cloud.kubernetes.cluster.KubernetesClusterVO; -import com.cloud.kubernetes.cluster.KubernetesClusterVmMapVO; import com.cloud.kubernetes.cluster.utils.KubernetesClusterUtil; import com.cloud.network.IpAddress; import com.cloud.network.Network; From 4a08b1e20fbe969ce00d654c0c32ff163d12f361 Mon Sep 17 00:00:00 2001 From: davidjumani Date: Thu, 29 Oct 2020 16:26:53 +0530 Subject: [PATCH 019/117] Adding logs for upgrade failure --- .../actionworkers/KubernetesClusterUpgradeWorker.java | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java index 9bc142de788f..66ab130b4d54 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java @@ -113,7 +113,11 @@ private void upgradeKubernetesClusterNodes() { logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster : %s, unable to upgrade Kubernetes node on VM : %s", kubernetesCluster.getName(), vm.getDisplayName()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, e); } if (!result.first()) { - logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster : %s, unable to upgrade Kubernetes node on VM : %s", kubernetesCluster.getName(), vm.getDisplayName()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null); + String message = String.format("Failed to upgrade Kubernetes cluster : %s, unable to upgrade Kubernetes node on VM : %s", + kubernetesCluster.getName(), vm.getDisplayName()); + String messageWithLogs = String.format("%s. Logs :\n%s", message, result.second()); + logMessage(Level.ERROR, messageWithLogs, null); + logTransitStateDetachIsoAndThrow(Level.ERROR, message, kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null); } if (System.currentTimeMillis() > upgradeTimeoutTime) { logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster : %s, upgrade action timed out", kubernetesCluster.getName()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null); From 0a67da858cd4a69d12571976558974f79d60dbde Mon Sep 17 00:00:00 2001 From: davidjumani Date: Thu, 29 Oct 2020 17:15:21 +0530 Subject: [PATCH 020/117] More cleanup --- .../cluster/actionworkers/KubernetesClusterActionWorker.java | 5 ----- .../cluster/actionworkers/KubernetesClusterScaleWorker.java | 3 +-- tools/marvin/marvin/config/test_data.py | 2 +- 3 files changed, 2 insertions(+), 8 deletions(-) diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java index 60f28b1d3747..66af46cfb7aa 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java @@ -402,11 +402,6 @@ protected boolean createCloudStackSecret(String[] keys) { publicIpAddress = publicIpSshPort.first(); sshPort = publicIpSshPort.second(); - List clusterVMs = getKubernetesClusterVMMaps(); - if (CollectionUtils.isEmpty(clusterVMs)) { - return false; - } - try { Pair result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, pkFile, null, String.format("sudo /opt/bin/deploy-cloudstack-secret -u '%s' -k '%s' -s '%s'", diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java index b558cc2db808..3401a161de3b 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java @@ -140,14 +140,13 @@ private void scaleKubernetesClusterNetworkRules(final List clusterVMIds) t throw new ManagementServerException("Firewall rule for node SSH access can't be provisioned"); } int existingFirewallRuleSourcePortEnd = firewallRule.getSourcePortEnd(); - final int scaledTotalNodeCount = clusterSize == null ? (int)kubernetesCluster.getTotalNodeCount() : (int)(clusterSize + kubernetesCluster.getMasterNodeCount()); int endPort = CLUSTER_NODES_DEFAULT_START_SSH_PORT + clusterVMIds.size() - 1; // Provision new SSH firewall rules try { provisionFirewallRules(publicIp, owner, CLUSTER_NODES_DEFAULT_START_SSH_PORT, endPort); if (LOGGER.isDebugEnabled()) { LOGGER.debug(String.format("Provisioned firewall rule to open up port %d to %d on %s in Kubernetes cluster %s", - CLUSTER_NODES_DEFAULT_START_SSH_PORT, CLUSTER_NODES_DEFAULT_START_SSH_PORT + scaledTotalNodeCount - 1, publicIp.getAddress().addr(), kubernetesCluster.getName())); + CLUSTER_NODES_DEFAULT_START_SSH_PORT, endPort, publicIp.getAddress().addr(), kubernetesCluster.getName())); } } catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException e) { throw new ManagementServerException(String.format("Failed to activate SSH firewall rules for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); diff --git a/tools/marvin/marvin/config/test_data.py b/tools/marvin/marvin/config/test_data.py index 106d5a51debd..60f0f1cc27d3 100644 --- a/tools/marvin/marvin/config/test_data.py +++ b/tools/marvin/marvin/config/test_data.py @@ -2036,7 +2036,7 @@ }, "1.16.3": { "semanticversion": "1.16.3", - "url": "http://10.5.1.128/cks/binaries-iso/as-1.16.3.iso", + "url": "http://sbjenkins-stagingrepo.jenkins.lon/cks/binaries-iso/as-1.16.3.iso", "mincpunumber": 2, "minmemory": 2048 } From 0860d1e57e1c6ccf9a2dac1d62af3be083d0a319 Mon Sep 17 00:00:00 2001 From: davidjumani Date: Sun, 1 Nov 2020 12:00:44 +0530 Subject: [PATCH 021/117] Limiting max cluster size to global setting KubernetesMaxClusterSize --- .../cluster/KubernetesClusterManagerImpl.java | 19 ++++++++++++++++--- .../cluster/KubernetesClusterService.java | 6 ++++++ 2 files changed, 22 insertions(+), 3 deletions(-) diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java index cb5217274ded..78f11e011182 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java @@ -673,6 +673,7 @@ private void validateKubernetesClusterCreateParameters(final CreateKubernetesClu final String sshKeyPair = cmd.getSSHKeyPairName(); final Long masterNodeCount = cmd.getMasterNodes(); final Long clusterSize = cmd.getClusterSize(); + final long totalNodeCount = masterNodeCount + clusterSize; final String dockerRegistryUserName = cmd.getDockerRegistryUserName(); final String dockerRegistryPassword = cmd.getDockerRegistryPassword(); final String dockerRegistryUrl = cmd.getDockerRegistryUrl(); @@ -684,14 +685,20 @@ private void validateKubernetesClusterCreateParameters(final CreateKubernetesClu throw new InvalidParameterValueException("Invalid name for the Kubernetes cluster name:" + name); } - if (masterNodeCount < 1 || masterNodeCount > 100) { + if (masterNodeCount < 1) { throw new InvalidParameterValueException("Invalid cluster master nodes count: " + masterNodeCount); } - if (clusterSize < 1 || clusterSize > 100) { + if (clusterSize < 1) { throw new InvalidParameterValueException("Invalid cluster size: " + clusterSize); } + int maxClusterSize = KubernetesMaxClusterSize.valueIn(owner.getId()); + if (totalNodeCount > maxClusterSize) { + throw new InvalidParameterValueException( + String.format("Maximum cluster size can not exceed %d. Please contact your administrator", maxClusterSize)); + } + DataCenter zone = dataCenterDao.findById(zoneId); if (zone == null) { throw new InvalidParameterValueException("Unable to find zone by ID: " + zoneId); @@ -923,6 +930,11 @@ private void validateKubernetesClusterScaleParameters(ScaleKubernetesClusterCmd if (maxSize <= minSize) { throw new InvalidParameterValueException("maxsize must be greater than or equal to minsize"); } + int maxClusterSize = KubernetesMaxClusterSize.valueIn(kubernetesCluster.getAccountId()); + if (maxSize + kubernetesCluster.getMasterNodeCount() > maxClusterSize) { + throw new InvalidParameterValueException( + String.format("Maximum cluster size can not exceed %d. Please contact your administrator", maxClusterSize)); + } } if (nodeIds != null) { @@ -1645,7 +1657,8 @@ public ConfigKey[] getConfigKeys() { KubernetesClusterStartTimeout, KubernetesClusterScaleTimeout, KubernetesClusterUpgradeTimeout, - KubernetesClusterExperimentalFeaturesEnabled + KubernetesClusterExperimentalFeaturesEnabled, + KubernetesMaxClusterSize }; } } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterService.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterService.java index 07939ddb101a..fa560b53bc5b 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterService.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterService.java @@ -86,6 +86,12 @@ public interface KubernetesClusterService extends PluggableService, Configurable "false", "Indicates whether experimental feature for Kubernetes cluster such as Docker private registry are enabled or not", true); + static final ConfigKey KubernetesMaxClusterSize = new ConfigKey("Advanced", Integer.class, + "cloud.kubernetes.cluster.max.size", + "10", + "Maximum size of the kubernetes cluster.", + true, ConfigKey.Scope.Account); + KubernetesCluster findById(final Long id); From 15ccfbfd76cdeda95a3b045a995dd2f73c9ecf26 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Mon, 2 Nov 2020 12:31:47 +0530 Subject: [PATCH 022/117] CKS CoreOS EOL update (#68) * prevent NPE during ssh key reset * Initial Commit - Debian template as replacement for CoreOS * Support for cks nodes to use systemvm template * Allow usage of systemvm template by other accounts during k8s cluster deployment * Refactor tests + allow user to use SystemVM template during scaling * Fix additional master config file * remove redundant lines * Formatted code * temporary checksum update * change guest os id for xenserver * Adding public ip to listKubernetesClusterResponse * Guestos id for vmware * Cleaned up code * Wait for ssh to become available before attaching ISO and template changes * debug logs * commit addresses systemvm iso not getting attached during bootup * Cleanup + set vmware systemvm template deploy_as_is to 1 * fix for systemvm template - deploy as is field * code cleanup * Additional details to support deployment on shared network * Support for shared networks Co-authored-by: Pearl Dsilva Co-authored-by: davidjumani --- .../main/java/com/cloud/vm/UserVmService.java | 2 +- .../cloud/vm/VirtualMachineManagerImpl.java | 2 +- .../upgrade/dao/Upgrade41510to41600.java | 12 +++ .../src/main/java/com/cloud/vm/UserVmVO.java | 11 +++ .../META-INF/db/schema-41510to41600.sql | 4 + .../wrapper/LibvirtStartCommandWrapper.java | 31 ++++--- .../cluster/KubernetesClusterManagerImpl.java | 87 ++---------------- .../cluster/KubernetesClusterService.java | 20 ----- .../KubernetesClusterActionWorker.java | 26 +++++- ...esClusterResourceModifierActionWorker.java | 20 ++--- .../KubernetesClusterScaleWorker.java | 10 ++- .../KubernetesClusterStartWorker.java | 12 ++- .../KubernetesClusterUpgradeWorker.java | 2 +- .../cluster/utils/KubernetesClusterUtil.java | 10 +-- .../main/resources/conf/k8s-master-add.yml | 89 +++++++++---------- .../src/main/resources/conf/k8s-master.yml | 83 +++++++++-------- .../src/main/resources/conf/k8s-node.yml | 84 +++++++++-------- .../consoleproxy/ConsoleProxyManagerImpl.java | 1 - .../network/as/AutoScaleManagerImpl.java | 2 +- .../main/java/com/cloud/vm/UserVmManager.java | 4 + .../java/com/cloud/vm/UserVmManagerImpl.java | 69 +++++++++++--- .../debian/opt/cloud/bin/setup/CKSNode.sh | 61 +++++++++++++ .../debian/opt/cloud/bin/setup/bootstrap.sh | 1 + .../opt/cloud/bin/setup/cloud-early-config | 1 - systemvm/debian/opt/cloud/bin/setup/common.sh | 2 +- .../debian/opt/cloud/bin/setup/postinit.sh | 15 +++- .../smoke/test_kubernetes_clusters.py | 59 ------------ .../systemvmtemplate/http/preseed.cfg | 4 +- .../systemvmtemplate/scripts/cleanup.sh | 3 +- .../scripts/configure_conntrack.sh | 2 - .../scripts/configure_systemvm_services.sh | 21 +++++ .../scripts/install_systemvm_packages.sh | 25 ++++-- .../appliance/systemvmtemplate/template.json | 2 +- tools/marvin/marvin/config/test_data.py | 42 +-------- 34 files changed, 422 insertions(+), 397 deletions(-) create mode 100755 systemvm/debian/opt/cloud/bin/setup/CKSNode.sh diff --git a/api/src/main/java/com/cloud/vm/UserVmService.java b/api/src/main/java/com/cloud/vm/UserVmService.java index 56a6dfd25a27..d9a1eb86be7a 100644 --- a/api/src/main/java/com/cloud/vm/UserVmService.java +++ b/api/src/main/java/com/cloud/vm/UserVmService.java @@ -379,7 +379,7 @@ UserVm createAdvancedVirtualMachine(DataCenter zone, ServiceOffering serviceOffe String hostName, String displayName, Long diskOfferingId, Long diskSize, String group, HypervisorType hypervisor, HTTPMethod httpmethod, String userData, String sshKeyPair, Map requestedIps, IpAddresses defaultIps, Boolean displayVm, String keyboard, List affinityGroupIdList, Map customParameters, String customId, Map> dhcpOptionMap, Map dataDiskTemplateToDiskOfferingMap, - Map templateOvfPropertiesMap) + Map templateOvfPropertiesMap, String type) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException, StorageUnavailableException, ResourceAllocationException; diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java index f1ab9cd1ff95..dd5a280385bc 100755 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java @@ -1675,7 +1675,7 @@ protected boolean sendStop(final VirtualMachineGuru guru, final VirtualMachinePr final UserVmVO userVm = _userVmDao.findById(vm.getId()); if (vm.getType() == VirtualMachine.Type.User) { - if (userVm != null){ + if (userVm != null) { userVm.setPowerState(PowerState.PowerOff); _userVmDao.update(userVm.getId(), userVm); } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41510to41600.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41510to41600.java index 231d1e9ab638..666ec289290f 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41510to41600.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41510to41600.java @@ -169,6 +169,7 @@ private void updateSystemVmTemplates(final Connection conn) { LOG.error("updateSystemVmTemplates:Exception while updating template with id " + templateId + " to be marked as 'system': " + e.getMessage()); throw new CloudRuntimeException("updateSystemVmTemplates:Exception while updating template with id " + templateId + " to be marked as 'system'", e); } + updateVMwareSystemvVMTemplateField(conn, NewTemplateNameList.get(Hypervisor.HypervisorType.VMware)); // update template ID of system Vms try (PreparedStatement update_templ_id_pstmt = conn .prepareStatement("update `cloud`.`vm_instance` set vm_template_id = ? where type <> 'User' and hypervisor_type = ? and removed is NULL");) { @@ -225,6 +226,7 @@ private void updateSystemVmTemplates(final Connection conn) { throw new CloudRuntimeException("updateSystemVmTemplates:Exception while updating 'url' and 'checksum' for hypervisor type " + hypervisorAndTemplateName.getKey().toString(), e); } + updateVMwareSystemvVMTemplateField(conn, NewTemplateNameList.get(Hypervisor.HypervisorType.VMware)); } } } catch (final SQLException e) { @@ -235,6 +237,16 @@ private void updateSystemVmTemplates(final Connection conn) { LOG.debug("Updating System Vm Template IDs Complete"); } + private void updateVMwareSystemvVMTemplateField(final Connection conn, String templateName) { + try (PreparedStatement update_templ_vmware_pstmt = conn + .prepareStatement("UPDATE `cloud`.`vm_template` SET deploy_as_is = 1 WHERE name = '"+ templateName +"' AND removed is null order by id desc limit 1");) { + update_templ_vmware_pstmt.executeUpdate(); + } catch (final SQLException e) { + LOG.error("updateSystemVmTemplates:Exception while updating 'deploy_as_is' for VMWare hypervisor type : " + e.getMessage()); + throw new CloudRuntimeException("updateSystemVmTemplates:Exception while updating deploy_as_is for VMware hypervisor type ", e); + } + } + @Override public InputStream[] getCleanupScripts() { final String scriptFile = "META-INF/db/schema-41510to41600-cleanup.sql"; diff --git a/engine/schema/src/main/java/com/cloud/vm/UserVmVO.java b/engine/schema/src/main/java/com/cloud/vm/UserVmVO.java index e3950340469a..5d9399d7b2a5 100644 --- a/engine/schema/src/main/java/com/cloud/vm/UserVmVO.java +++ b/engine/schema/src/main/java/com/cloud/vm/UserVmVO.java @@ -48,6 +48,9 @@ public class UserVmVO extends VMInstanceVO implements UserVm { @Column(name = "update_parameters", updatable = true) protected boolean updateParameters = true; + @Column(name = "user_vm_type", updatable = true) + private String userVmType; + transient String password; @Override @@ -125,4 +128,12 @@ public void setUpdateParameters(boolean updateParameters) { public boolean isUpdateParameters() { return updateParameters; } + + public String getUserVmType() { + return userVmType; + } + + public void setUserVmType(String userVmType) { + this.userVmType = userVmType; + } } diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41510to41600.sql b/engine/schema/src/main/resources/META-INF/db/schema-41510to41600.sql index 15a1a4df5536..1bac92f96d09 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-41510to41600.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-41510to41600.sql @@ -19,6 +19,10 @@ -- Schema upgrade from 4.15.1.0 to 4.16.0.0 --; +ALTER TABLE `cloud`.`user_vm` ADD COLUMN `user_vm_type` varchar(255) DEFAULT "UserVM" COMMENT 'Defines the type of UserVM'; + +UPDATE `cloud`.`vm_template` set deploy_as_is = 1 where id = 8; + ALTER TABLE `cloud`.`kubernetes_cluster` ADD COLUMN `autoscaling_enabled` tinyint(1) unsigned NOT NULL DEFAULT 0; ALTER TABLE `cloud`.`kubernetes_cluster` ADD COLUMN `minsize` bigint; ALTER TABLE `cloud`.`kubernetes_cluster` ADD COLUMN `maxsize` bigint; diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtStartCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtStartCommandWrapper.java index dbb9571cea31..54ffe17f68c6 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtStartCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtStartCommandWrapper.java @@ -88,14 +88,7 @@ public Answer execute(final StartCommand command, final LibvirtComputingResource libvirtComputingResource.applyDefaultNetworkRules(conn, vmSpec, false); // pass cmdline info to system vms - if (vmSpec.getType() != VirtualMachine.Type.User) { - String controlIp = null; - for (final NicTO nic : vmSpec.getNics()) { - if (nic.getType() == TrafficType.Control) { - controlIp = nic.getIp(); - break; - } - } + if (vmSpec.getType() != VirtualMachine.Type.User || (vmSpec.getBootArgs() != null && vmSpec.getBootArgs().contains("CKSNode"))) { // try to patch and SSH into the systemvm for up to 5 minutes for (int count = 0; count < 10; count++) { // wait and try passCmdLine for 30 seconds at most for CLOUDSTACK-2823 @@ -104,12 +97,22 @@ public Answer execute(final StartCommand command, final LibvirtComputingResource } } - final VirtualRoutingResource virtRouterResource = libvirtComputingResource.getVirtRouterResource(); - // check if the router is up? - for (int count = 0; count < 60; count++) { - final boolean result = virtRouterResource.connect(controlIp, 1, 5000); - if (result) { - break; + if (vmSpec.getType() != VirtualMachine.Type.User) { + String controlIp = null; + for (final NicTO nic : vmSpec.getNics()) { + if (nic.getType() == TrafficType.Control) { + controlIp = nic.getIp(); + break; + } + } + + final VirtualRoutingResource virtRouterResource = libvirtComputingResource.getVirtRouterResource(); + // check if the router is up? + for (int count = 0; count < 60; count++) { + final boolean result = virtRouterResource.connect(controlIp, 1, 5000); + if (result) { + break; + } } } } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java index 78f11e011182..d15a1b6f2ea9 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java @@ -269,60 +269,6 @@ private void logAndThrow(final Level logLevel, final String message, final Excep logTransitStateAndThrow(logLevel, message, null, null, ex); } - private boolean isKubernetesServiceTemplateConfigured(DataCenter zone) { - // Check Kubernetes VM template for zone - boolean isHyperVAvailable = false; - boolean isKVMAvailable = false; - boolean isVMwareAvailable = false; - boolean isXenserverAvailable = false; - List clusters = clusterDao.listByZoneId(zone.getId()); - for (ClusterVO clusterVO : clusters) { - if (Hypervisor.HypervisorType.Hyperv.equals(clusterVO.getHypervisorType())) { - isHyperVAvailable = true; - } - if (Hypervisor.HypervisorType.KVM.equals(clusterVO.getHypervisorType())) { - isKVMAvailable = true; - } - if (Hypervisor.HypervisorType.VMware.equals(clusterVO.getHypervisorType())) { - isVMwareAvailable = true; - } - if (Hypervisor.HypervisorType.XenServer.equals(clusterVO.getHypervisorType())) { - isXenserverAvailable = true; - } - } - List> templatePairs = new ArrayList<>(); - if (isHyperVAvailable) { - templatePairs.add(new Pair<>(KubernetesClusterHyperVTemplateName.key(), KubernetesClusterHyperVTemplateName.value())); - } - if (isKVMAvailable) { - templatePairs.add(new Pair<>(KubernetesClusterKVMTemplateName.key(), KubernetesClusterKVMTemplateName.value())); - } - if (isVMwareAvailable) { - templatePairs.add(new Pair<>(KubernetesClusterVMwareTemplateName.key(), KubernetesClusterVMwareTemplateName.value())); - } - if (isXenserverAvailable) { - templatePairs.add(new Pair<>(KubernetesClusterXenserverTemplateName.key(), KubernetesClusterXenserverTemplateName.value())); - } - for (Pair templatePair : templatePairs) { - String templateKey = templatePair.first(); - String templateName = templatePair.second(); - if (Strings.isNullOrEmpty(templateName)) { - LOGGER.warn(String.format("Global setting %s is empty. Template name need to be specified for Kubernetes service to function", templateKey)); - return false; - } - final VMTemplateVO template = templateDao.findValidByTemplateName(templateName); - if (template == null) { - LOGGER.warn(String.format("Unable to find the template %s to be used for provisioning Kubernetes cluster nodes", templateName)); - return false; - } - if (CollectionUtils.isEmpty(templateJoinDao.newTemplateView(template, zone.getId(), true))) { - LOGGER.warn(String.format("The template ID: %s, name: %s is not available for use in zone ID: %s provisioning Kubernetes cluster nodes", template.getUuid(), templateName, zone.getUuid())); - return false; - } - } - return true; - } - private boolean isKubernetesServiceNetworkOfferingConfigured(DataCenter zone) { // Check network offering String networkOfferingName = KubernetesClusterNetworkOffering.value(); @@ -370,9 +316,6 @@ private boolean isKubernetesServiceNetworkOfferingConfigured(DataCenter zone) { } private boolean isKubernetesServiceConfigured(DataCenter zone) { - if (!isKubernetesServiceTemplateConfigured(zone)) { - return false; - } if (!isKubernetesServiceNetworkOfferingConfigured(zone)) { return false; } @@ -392,23 +335,12 @@ private IpAddress getSourceNatIp(Network network) { return null; } - private VMTemplateVO getKubernetesServiceTemplate(Hypervisor.HypervisorType hypervisorType) { - String templateName = null; - switch (hypervisorType) { - case Hyperv: - templateName = KubernetesClusterHyperVTemplateName.value(); - break; - case KVM: - templateName = KubernetesClusterKVMTemplateName.value(); - break; - case VMware: - templateName = KubernetesClusterVMwareTemplateName.value(); - break; - case XenServer: - templateName = KubernetesClusterXenserverTemplateName.value(); - break; + private VMTemplateVO getKubernetesServiceTemplate(DataCenter dataCenter, Hypervisor.HypervisorType hypervisorType) { + VMTemplateVO template = templateDao.findSystemVMReadyTemplate(dataCenter.getId(), hypervisorType); + if (template == null) { + throw new CloudRuntimeException("Not able to find the System templates or not downloaded in zone " + dataCenter.getId()); } - return templateDao.findValidByTemplateName(templateName); + return template; } private boolean validateIsolatedNetwork(Network network, int clusterTotalNodeCount) { @@ -478,7 +410,7 @@ private boolean validateServiceOffering(final ServiceOffering serviceOffering, f throw new InvalidParameterValueException(String.format("Custom service offerings are not supported for creating clusters, service offering ID: %s", serviceOffering.getUuid())); } if (serviceOffering.getCpu() < MIN_KUBERNETES_CLUSTER_NODE_CPU || serviceOffering.getRamSize() < MIN_KUBERNETES_CLUSTER_NODE_RAM_SIZE) { - throw new InvalidParameterValueException(String.format("Kubernetes cluster cannot be created with service offering ID: %s, Kubernetes cluster template(CoreOS) needs minimum %d vCPUs and %d MB RAM", serviceOffering.getUuid(), MIN_KUBERNETES_CLUSTER_NODE_CPU, MIN_KUBERNETES_CLUSTER_NODE_RAM_SIZE)); + throw new InvalidParameterValueException(String.format("Kubernetes cluster cannot be created with service offering ID: %s, Kubernetes cluster template needs minimum %d vCPUs and %d MB RAM", serviceOffering.getUuid(), MIN_KUBERNETES_CLUSTER_NODE_CPU, MIN_KUBERNETES_CLUSTER_NODE_RAM_SIZE)); } if (serviceOffering.getCpu() < version.getMinimumCpu()) { throw new InvalidParameterValueException(String.format("Kubernetes cluster cannot be created with service offering ID: %s, Kubernetes version ID: %s needs minimum %d vCPUs", serviceOffering.getUuid(), version.getUuid(), version.getMinimumCpu())); @@ -629,6 +561,7 @@ public KubernetesClusterResponse createKubernetesClusterResponse(long kubernetes response.setIpAddressId(ipAddresses.get(0).getUuid()); } } + List vmResponses = new ArrayList(); List vmList = kubernetesClusterVmMapDao.listByClusterId(kubernetesCluster.getId()); ResponseView respView = ResponseView.Restricted; @@ -1108,7 +1041,7 @@ public KubernetesCluster createKubernetesCluster(CreateKubernetesClusterCmd cmd) } final Network defaultNetwork = getKubernetesClusterNetworkIfMissing(cmd.getName(), zone, owner, (int)masterNodeCount, (int)clusterSize, cmd.getExternalLoadBalancerIpAddress(), cmd.getNetworkId()); - final VMTemplateVO finalTemplate = getKubernetesServiceTemplate(deployDestination.getCluster().getHypervisorType()); + final VMTemplateVO finalTemplate = getKubernetesServiceTemplate(zone, deployDestination.getCluster().getHypervisorType()); final long cores = serviceOffering.getCpu() * (masterNodeCount + clusterSize); final long memory = serviceOffering.getRamSize() * (masterNodeCount + clusterSize); @@ -1649,10 +1582,6 @@ public String getConfigComponentName() { public ConfigKey[] getConfigKeys() { return new ConfigKey[] { KubernetesServiceEnabled, - KubernetesClusterHyperVTemplateName, - KubernetesClusterKVMTemplateName, - KubernetesClusterVMwareTemplateName, - KubernetesClusterXenserverTemplateName, KubernetesClusterNetworkOffering, KubernetesClusterStartTimeout, KubernetesClusterScaleTimeout, diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterService.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterService.java index fa560b53bc5b..138889a2fb37 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterService.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterService.java @@ -41,26 +41,6 @@ public interface KubernetesClusterService extends PluggableService, Configurable "false", "Indicates whether Kubernetes Service plugin is enabled or not. Management server restart needed on change", false); - static final ConfigKey KubernetesClusterHyperVTemplateName = new ConfigKey("Advanced", String.class, - "cloud.kubernetes.cluster.template.name.hyperv", - "Kubernetes-Service-Template-HyperV", - "Name of the template to be used for creating Kubernetes cluster nodes on HyperV", - true); - static final ConfigKey KubernetesClusterKVMTemplateName = new ConfigKey("Advanced", String.class, - "cloud.kubernetes.cluster.template.name.kvm", - "Kubernetes-Service-Template-KVM", - "Name of the template to be used for creating Kubernetes cluster nodes on KVM", - true); - static final ConfigKey KubernetesClusterVMwareTemplateName = new ConfigKey("Advanced", String.class, - "cloud.kubernetes.cluster.template.name.vmware", - "Kubernetes-Service-Template-VMware", - "Name of the template to be used for creating Kubernetes cluster nodes on VMware", - true); - static final ConfigKey KubernetesClusterXenserverTemplateName = new ConfigKey("Advanced", String.class, - "cloud.kubernetes.cluster.template.name.xenserver", - "Kubernetes-Service-Template-Xenserver", - "Name of the template to be used for creating Kubernetes cluster nodes on Xenserver", - true); static final ConfigKey KubernetesClusterNetworkOffering = new ConfigKey("Advanced", String.class, "cloud.kubernetes.cluster.network.offering", "DefaultNetworkOfferingforKubernetesService", diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java index 66af46cfb7aa..ad07a004f70e 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java @@ -25,6 +25,7 @@ import java.util.Collections; import java.util.List; import java.util.Objects; +import java.util.stream.Collectors; import javax.inject.Inject; @@ -58,6 +59,7 @@ import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.storage.Storage; import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.dao.LaunchPermissionDao; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.template.TemplateApiService; import com.cloud.template.VirtualMachineTemplate; @@ -75,12 +77,13 @@ import com.cloud.utils.fsm.StateMachine2; import com.cloud.utils.ssh.SshHelper; import com.cloud.vm.UserVmService; +import com.cloud.vm.VirtualMachineManager; import com.cloud.vm.dao.UserVmDao; import com.google.common.base.Strings; public class KubernetesClusterActionWorker { - public static final String CLUSTER_NODE_VM_USER = "core"; + public static final String CLUSTER_NODE_VM_USER = "root"; public static final int CLUSTER_API_PORT = 6443; public static final int CLUSTER_NODES_DEFAULT_START_SSH_PORT = 2222; @@ -118,6 +121,10 @@ public class KubernetesClusterActionWorker { protected UserVmService userVmService; @Inject protected VlanDao vlanDao; + @Inject + protected VirtualMachineManager itMgr; + @Inject + protected LaunchPermissionDao launchPermissionDao; protected KubernetesClusterDao kubernetesClusterDao; protected KubernetesClusterVmMapDao kubernetesClusterVmMapDao; @@ -199,11 +206,19 @@ protected void logTransitStateDetachIsoAndThrow(final Level logLevel, final Stri throw new CloudRuntimeException(message, e); } + protected void deleteTemplateLaunchPermission() { + if (clusterTemplate != null && owner != null) { + LOGGER.info("Revoking launch permission for systemVM template"); + launchPermissionDao.removePermissions(clusterTemplate.getId(), Collections.singletonList(owner.getId())); + } + } + protected void logTransitStateAndThrow(final Level logLevel, final String message, final Long kubernetesClusterId, final KubernetesCluster.Event event, final Exception e) throws CloudRuntimeException { logMessage(logLevel, message, e); if (kubernetesClusterId != null && event != null) { stateTransitTo(kubernetesClusterId, event); } + deleteTemplateLaunchPermission(); if (e == null) { throw new CloudRuntimeException(message); } @@ -268,6 +283,13 @@ protected String getMasterVmPrivateIp() { return ip; } + private boolean containsMasterNode(List clusterVMs) { + List nodeNames = clusterVMs.stream().map(vm -> vm.getHostName()).collect(Collectors.toList()); + boolean present = false; + present = nodeNames.stream().anyMatch(s -> s.contains("master")); + return present; + } + protected Pair getKubernetesClusterServerIpSshPort(UserVm masterVm) { int port = CLUSTER_NODES_DEFAULT_START_SSH_PORT; KubernetesClusterDetailsVO detail = kubernetesClusterDetailsDao.findDetail(kubernetesCluster.getId(), ApiConstants.EXTERNAL_LOAD_BALANCER_IP_ADDRESS); @@ -306,6 +328,7 @@ protected Pair getKubernetesClusterServerIpSshPort(UserVm maste } protected void attachIsoKubernetesVMs(List clusterVMs, final KubernetesSupportedVersion kubernetesSupportedVersion) throws CloudRuntimeException { + //final long startTimeoutTime = System.currentTimeMillis() + KubernetesClusterService.KubernetesClusterStartTimeout.value() * 1000; KubernetesSupportedVersion version = kubernetesSupportedVersion; if (kubernetesSupportedVersion == null) { version = kubernetesSupportedVersionDao.findById(kubernetesCluster.getKubernetesVersionId()); @@ -328,6 +351,7 @@ protected void attachIsoKubernetesVMs(List clusterVMs, final KubernetesS if (!iso.getState().equals(VirtualMachineTemplate.State.Active)) { logTransitStateAndThrow(Level.ERROR, String.format("Unable to attach ISO to Kubernetes cluster : %s. Binaries ISO not active.", kubernetesCluster.getName()), kubernetesCluster.getId(), failedEvent); } + for (UserVm vm : clusterVMs) { try { templateService.attachIso(iso.getId(), vm.getId()); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java index a7c5b4a609e1..e6805ac79744 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java @@ -47,7 +47,7 @@ import com.cloud.exception.InsufficientServerCapacityException; import com.cloud.exception.ManagementServerException; import com.cloud.exception.NetworkRuleConflictException; -import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.OperationTimedoutException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.host.Host; import com.cloud.host.HostVO; @@ -72,6 +72,7 @@ import com.cloud.network.rules.dao.PortForwardingRulesDao; import com.cloud.offering.ServiceOffering; import com.cloud.resource.ResourceManager; +import com.cloud.storage.dao.LaunchPermissionDao; import com.cloud.user.Account; import com.cloud.user.SSHKeyPairVO; import com.cloud.uservm.UserVm; @@ -83,7 +84,6 @@ import com.cloud.utils.db.TransactionCallbackWithException; import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.utils.exception.ExecutionException; import com.cloud.utils.net.Ip; import com.cloud.utils.net.NetUtils; import com.cloud.utils.ssh.SshHelper; @@ -123,6 +123,8 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu protected VMInstanceDao vmInstanceDao; @Inject protected UserVmManager userVmManager; + @Inject + protected LaunchPermissionDao launchPermissionDao; protected String kubernetesClusterNodeNamePrefix; @@ -177,9 +179,9 @@ private String getKubernetesNodeConfig(final String joinIp, final boolean ejectI if (!Strings.isNullOrEmpty(dockerUserName) && !Strings.isNullOrEmpty(dockerPassword)) { // do write file for /.docker/config.json through the code instead of k8s-node.yml as we can no make a section // optional or conditionally applied - String dockerConfigString = "write-files:\n" + + String dockerConfigString = "write_files:\n" + " - path: /.docker/config.json\n" + - " owner: core:core\n" + + " owner: root:root\n" + " permissions: '0644'\n" + " content: |\n" + " {\n" + @@ -190,7 +192,7 @@ private String getKubernetesNodeConfig(final String joinIp, final boolean ejectI " }\n" + " }\n" + " }"; - k8sNodeConfig = k8sNodeConfig.replace("write-files:", dockerConfigString); + k8sNodeConfig = k8sNodeConfig.replace("write_files:", dockerConfigString); final String dockerUrlKey = "{{docker.url}}"; final String dockerAuthKey = "{{docker.secret}}"; final String dockerEmailKey = "{{docker.email}}"; @@ -280,12 +282,11 @@ protected void startKubernetesVM(final UserVm vm) throws ManagementServerExcepti Field f = startVm.getClass().getDeclaredField("id"); f.setAccessible(true); f.set(startVm, vm.getId()); - userVmService.startVirtualMachine(startVm); + itMgr.advanceStart(vm.getUuid(), null, null); if (LOGGER.isInfoEnabled()) { LOGGER.info(String.format("Started VM : %s in the Kubernetes cluster : %s", vm.getDisplayName(), kubernetesCluster.getName())); } - } catch (IllegalAccessException | NoSuchFieldException | ExecutionException | - ResourceUnavailableException | ResourceAllocationException | InsufficientCapacityException ex) { + } catch (IllegalAccessException | NoSuchFieldException | OperationTimedoutException | ResourceUnavailableException | InsufficientCapacityException ex) { throw new ManagementServerException(String.format("Failed to start VM in the Kubernetes cluster : %s", kubernetesCluster.getName()), ex); } @@ -345,7 +346,7 @@ protected UserVm createKubernetesNode(String joinIp) throws ManagementServerExce nodeVm = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, clusterTemplate, networkIds, owner, hostName, hostName, null, null, null, Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST, base64UserData, kubernetesCluster.getKeyPair(), - null, addrs, null, null, null, customParameterMap, null, null, null, null); + null, addrs, null, null, null, customParameterMap, null, null, null, null, String.valueOf(UserVmManager.UserVmType.CKSNode)); if (LOGGER.isInfoEnabled()) { LOGGER.info(String.format("Created node VM : %s, %s in the Kubernetes cluster : %s", hostName, nodeVm.getUuid(), kubernetesCluster.getName())); } @@ -424,7 +425,6 @@ protected void provisionSshPortForwardingRules(IpAddress publicIp, Network netwo final Ip vmIp = new Ip(vmNic.getIPv4Address()); final long vmIdFinal = vmId; final int srcPortFinal = firewallRuleSourcePortStart + i; - PortForwardingRuleVO pfRule = Transaction.execute(new TransactionCallbackWithException() { @Override public PortForwardingRuleVO doInTransaction(TransactionStatus status) throws NetworkRuleConflictException { diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java index 3401a161de3b..8833907bc92d 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java @@ -48,6 +48,7 @@ import com.cloud.network.Network; import com.cloud.network.rules.FirewallRule; import com.cloud.offering.ServiceOffering; +import com.cloud.storage.LaunchPermissionVO; import com.cloud.uservm.UserVm; import com.cloud.utils.Pair; import com.cloud.utils.exception.CloudRuntimeException; @@ -192,13 +193,13 @@ private boolean removeKubernetesClusterNode(final String ipAddress, final int po retryCounter++; try { Pair result = SshHelper.sshExecute(ipAddress, port, CLUSTER_NODE_VM_USER, - pkFile, null, String.format("sudo kubectl drain %s --ignore-daemonsets --delete-local-data", hostName), + pkFile, null, String.format("sudo /opt/bin/kubectl drain %s --ignore-daemonsets --delete-local-data", hostName), 10000, 10000, 60000); if (!result.first()) { LOGGER.warn(String.format("Draining node: %s on VM : %s in Kubernetes cluster : %s unsuccessful", hostName, userVm.getDisplayName(), kubernetesCluster.getName())); } else { result = SshHelper.sshExecute(ipAddress, port, CLUSTER_NODE_VM_USER, - pkFile, null, String.format("sudo kubectl delete node %s", hostName), + pkFile, null, String.format("sudo /opt/bin/kubectl delete node %s", hostName), 10000, 10000, 30000); if (result.first()) { return true; @@ -354,23 +355,26 @@ private void scaleUpKubernetesClusterSize(final long newVmCount) throws CloudRun stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.ScaleUpRequested); } List clusterVMs = new ArrayList<>(); + LaunchPermissionVO launchPermission = new LaunchPermissionVO(clusterTemplate.getId(), owner.getId()); + launchPermissionDao.persist(launchPermission); try { clusterVMs = provisionKubernetesClusterNodeVms((int)(newVmCount + kubernetesCluster.getNodeCount()), (int)kubernetesCluster.getNodeCount(), publicIpAddress); } catch (CloudRuntimeException | ManagementServerException | ResourceUnavailableException | InsufficientCapacityException e) { logTransitStateToFailedIfNeededAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes cluster : %s, unable to provision node VM in the cluster", kubernetesCluster.getName()), e); } - attachIsoKubernetesVMs(clusterVMs); try { List clusterVMIds = getKubernetesClusterVMMaps().stream().map(KubernetesClusterVmMapVO::getVmId).collect(Collectors.toList()); scaleKubernetesClusterNetworkRules(clusterVMIds); } catch (ManagementServerException e) { logTransitStateToFailedIfNeededAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes cluster : %s, unable to update network rules", kubernetesCluster.getName()), e); } + attachIsoKubernetesVMs(clusterVMs); KubernetesClusterVO kubernetesClusterVO = kubernetesClusterDao.findById(kubernetesCluster.getId()); kubernetesClusterVO.setNodeCount(clusterSize); boolean readyNodesCountValid = KubernetesClusterUtil.validateKubernetesClusterReadyNodesCount(kubernetesClusterVO, publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, sshKeyFile, scaleTimeoutTime, 15000); detachIsoKubernetesVMs(clusterVMs); + deleteTemplateLaunchPermission(); if (!readyNodesCountValid) { // Scaling failed logTransitStateToFailedIfNeededAndThrow(Level.ERROR, String.format("Scaling unsuccessful for Kubernetes cluster : %s as it does not have desired number of nodes in ready state", kubernetesCluster.getName())); } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java index 583d215509c5..d14a054339da 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java @@ -60,6 +60,7 @@ import com.cloud.network.addr.PublicIp; import com.cloud.network.rules.LoadBalancer; import com.cloud.offering.ServiceOffering; +import com.cloud.storage.LaunchPermissionVO; import com.cloud.user.Account; import com.cloud.user.SSHKeyPairVO; import com.cloud.uservm.UserVm; @@ -71,6 +72,7 @@ import com.cloud.vm.Nic; import com.cloud.vm.ReservationContext; import com.cloud.vm.ReservationContextImpl; +import com.cloud.vm.UserVmManager; import com.cloud.vm.VirtualMachine; import com.google.common.base.Strings; @@ -208,7 +210,7 @@ private UserVm createKubernetesMaster(final Network network, String serverIp) th masterVm = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, clusterTemplate, networkIds, owner, hostName, hostName, null, null, null, Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST, base64UserData, kubernetesCluster.getKeyPair(), - requestedIps, addrs, null, null, null, customParameterMap, null, null, null, null); + requestedIps, addrs, null, null, null, customParameterMap, null, null, null, null, String.valueOf(UserVmManager.UserVmType.CKSNode)); if (LOGGER.isInfoEnabled()) { LOGGER.info(String.format("Created master VM ID: %s, %s in the Kubernetes cluster : %s", masterVm.getUuid(), hostName, kubernetesCluster.getName())); } @@ -263,7 +265,7 @@ private UserVm createKubernetesAdditionalMaster(final String joinIp, final int a additionalMasterVm = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, clusterTemplate, networkIds, owner, hostName, hostName, null, null, null, Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST, base64UserData, kubernetesCluster.getKeyPair(), - null, addrs, null, null, null, customParameterMap, null, null, null, null); + null, addrs, null, null, null, customParameterMap, null, null, null, null, String.valueOf(UserVmManager.UserVmType.CKSNode)); if (LOGGER.isInfoEnabled()) { LOGGER.info(String.format("Created master VM ID : %s, %s in the Kubernetes cluster : %s", additionalMasterVm.getUuid(), hostName, kubernetesCluster.getName())); } @@ -499,6 +501,10 @@ public boolean startKubernetesClusterOnCreate() { (Network.GuestType.Isolated.equals(network.getGuestType()) || kubernetesCluster.getMasterNodeCount() > 1)) { // Shared network, single-master cluster won't have an IP yet logTransitStateAndThrow(Level.ERROR, String.format("Failed to start Kubernetes cluster : %s as no public IP found for the cluster" , kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed); } + // Allow account creating the kubernetes cluster to access systemVM template + LaunchPermissionVO launchPermission = new LaunchPermissionVO(clusterTemplate.getId(), owner.getId()); + launchPermissionDao.persist(launchPermission); + List clusterVMs = new ArrayList<>(); UserVm k8sMasterVM = null; try { @@ -577,6 +583,8 @@ public boolean startKubernetesClusterOnCreate() { kubernetesCluster.getName()), kubernetesCluster.getId(),KubernetesCluster.Event.OperationFailed); } stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded); + // remove launch permissions + deleteTemplateLaunchPermission(); return true; } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java index 66ab130b4d54..5371e4742594 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java @@ -91,7 +91,7 @@ private void upgradeKubernetesClusterNodes() { } try { result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, sshKeyFile, null, - String.format("sudo kubectl drain %s --ignore-daemonsets --delete-local-data", hostName), + String.format("sudo /opt/bin/kubectl drain %s --ignore-daemonsets --delete-local-data", hostName), 10000, 10000, 60000); } catch (Exception e) { logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster : %s, unable to drain Kubernetes node on VM : %s", kubernetesCluster.getName(), vm.getDisplayName()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, e); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java index b06cc00c9229..abb9fbf662d7 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java @@ -41,7 +41,7 @@ public static boolean isKubernetesClusterNodeReady(final KubernetesCluster kuber String user, File sshKeyFile, String nodeName) throws Exception { Pair result = SshHelper.sshExecute(ipAddress, port, user, sshKeyFile, null, - String.format("sudo kubectl get nodes | awk '{if ($1 == \"%s\" && $2 == \"Ready\") print $1}'", nodeName.toLowerCase()), + String.format("sudo /opt/bin/kubectl get nodes | awk '{if ($1 == \"%s\" && $2 == \"Ready\") print $1}'", nodeName.toLowerCase()), 10000, 10000, 20000); if (result.first() && nodeName.equals(result.second().trim())) { return true; @@ -102,7 +102,7 @@ public static boolean uncordonKubernetesClusterNode(final KubernetesCluster kube Pair result = null; try { result = SshHelper.sshExecute(ipAddress, port, user, sshKeyFile, null, - String.format("sudo kubectl uncordon %s", hostName), + String.format("sudo /opt/bin/kubectl uncordon %s", hostName), 10000, 10000, 30000); if (result.first()) { return true; @@ -125,9 +125,9 @@ public static boolean isKubernetesClusterAddOnServiceRunning(final KubernetesClu final int port, final String user, final File sshKeyFile, final String namespace, String serviceName) { try { - String cmd = "sudo kubectl get pods --all-namespaces"; + String cmd = "sudo /opt/bin/kubectl get pods --all-namespaces"; if (!Strings.isNullOrEmpty(namespace)) { - cmd = String.format("sudo kubectl get pods --namespace=%s", namespace); + cmd = String.format("sudo /opt/bin/kubectl get pods --namespace=%s", namespace); } Pair result = SshHelper.sshExecute(ipAddress, port, user, sshKeyFile, null, cmd, @@ -203,7 +203,7 @@ public static int getKubernetesClusterReadyNodesCount(final KubernetesCluster ku final int port, final String user, final File sshKeyFile) throws Exception { Pair result = SshHelper.sshExecute(ipAddress, port, user, sshKeyFile, null, - "sudo kubectl get nodes | awk '{if ($2 == \"Ready\") print $1}' | wc -l", + "sudo /opt/bin/kubectl get nodes | awk '{if ($2 == \"Ready\") print $1}' | wc -l", 10000, 10000, 20000); if (result.first()) { return Integer.parseInt(result.second().trim().replace("\"", "")); diff --git a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master-add.yml b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master-add.yml index 2654854e4605..9e395a3f5673 100644 --- a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master-add.yml +++ b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master-add.yml @@ -20,14 +20,14 @@ ssh_authorized_keys: {{ k8s.ssh.pub.key }} -write-files: +write_files: - path: /opt/bin/setup-kube-system - permissions: 0700 + permissions: '0700' owner: root:root content: | #!/bin/bash -e - if [[ -f "/home/core/success" ]]; then + if [[ -f "/home/debian/success" ]]; then echo "Already provisioned!" exit 0 fi @@ -96,7 +96,7 @@ write-files: mkdir -p /opt/bin cd /opt/bin - cp -a ${BINARIES_DIR}/k8s/{kubeadm,kubelet,kubectl} /opt/bin + cp -a ${BINARIES_DIR}/k8s/{kubeadm,kubelet,kubectl} . chmod +x {kubeadm,kubelet,kubectl} sed "s:/usr/bin:/opt/bin:g" ${BINARIES_DIR}/kubelet.service > /etc/systemd/system/kubelet.service @@ -179,14 +179,14 @@ write-files: fi - path: /opt/bin/deploy-kube-system - permissions: 0700 + permissions: '0700' owner: root:root content: | #!/bin/bash -e - if [[ -f "/home/core/success" ]]; then - echo "Already provisioned!" - exit 0 + if [[ -f "/home/debian/success" ]]; then + echo "Already provisioned!" + exit 0 fi if [[ $(systemctl is-active setup-kube-system) != "inactive" ]]; then @@ -196,46 +196,43 @@ write-files: modprobe ip_vs modprobe ip_vs_wrr modprobe ip_vs_sh - modprobe nf_conntrack_ipv4 + modprobe nf_conntrack if [[ "$PATH" != *:/opt/bin && "$PATH" != *:/opt/bin:* ]]; then export PATH=$PATH:/opt/bin fi kubeadm join {{ k8s_master.join_ip }}:6443 --token {{ k8s_master.cluster.token }} --control-plane --certificate-key {{ k8s_master.cluster.ha.certificate.key }} --discovery-token-unsafe-skip-ca-verification - sudo touch /home/core/success - echo "true" > /home/core/success - -coreos: - units: - - name: docker.service - command: start - enable: true - - - name: setup-kube-system.service - command: start - content: | - [Unit] - Requires=docker.service - After=docker.service - - [Service] - Type=simple - StartLimitInterval=0 - ExecStart=/opt/bin/setup-kube-system - - - name: deploy-kube-system.service - command: start - content: | - [Unit] - After=setup-kube-system.service - - [Service] - Type=simple - StartLimitInterval=0 - Restart=on-failure - ExecStartPre=/usr/bin/curl -k https://{{ k8s_master.join_ip }}:6443/version - ExecStart=/opt/bin/deploy-kube-system - - update: - group: stable - reboot-strategy: off + sudo touch /home/debian/success + echo "true" > /home/debian/success + + - path: /etc/systemd/system/setup-kube-system.service + permissions: '0755' + owner: root:root + content: | + [Unit] + Requires=docker.service + After=docker.service + + [Service] + Type=simple + StartLimitInterval=0 + ExecStart=/opt/bin/setup-kube-system + + - path: /etc/systemd/system/deploy-kube-system.service + permissions: '0755' + owner: root:root + content: | + [Unit] + After=setup-kube-system.service + + [Service] + Type=simple + StartLimitInterval=0 + Restart=on-failure + ExecStartPre=/usr/bin/curl -k https://{{ k8s_master.join_ip }}:6443/version + ExecStart=/opt/bin/deploy-kube-system + +runcmd: + - [ systemctl, start, setup-kube-system ] + - [ systemctl, start, deploy-kube-system ] + diff --git a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master.yml b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master.yml index c9f1e6150d13..d17adc664057 100644 --- a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master.yml +++ b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master.yml @@ -20,7 +20,7 @@ ssh_authorized_keys: {{ k8s.ssh.pub.key }} -write-files: +write_files: - path: /etc/conf.d/nfs permissions: '0644' content: | @@ -42,12 +42,12 @@ write-files: {{ k8s_master.apiserver.key }} - path: /opt/bin/setup-kube-system - permissions: 0700 + permissions: '0700' owner: root:root content: | #!/bin/bash -e - if [[ -f "/home/core/success" ]]; then + if [[ -f "/home/debian/success" ]]; then echo "Already provisioned!" exit 0 fi @@ -116,7 +116,7 @@ write-files: mkdir -p /opt/bin cd /opt/bin - cp -a ${BINARIES_DIR}/k8s/{kubeadm,kubelet,kubectl} /opt/bin + cp -a ${BINARIES_DIR}/k8s/{kubeadm,kubelet,kubectl} . chmod +x {kubeadm,kubelet,kubectl} sed "s:/usr/bin:/opt/bin:g" ${BINARIES_DIR}/kubelet.service > /etc/systemd/system/kubelet.service @@ -218,12 +218,12 @@ write-files: done - path: /opt/bin/deploy-kube-system - permissions: 0700 + permissions: '0700' owner: root:root content: | #!/bin/bash -e - if [[ -f "/home/core/success" ]]; then + if [[ -f "/home/debian/success" ]]; then echo "Already provisioned!" exit 0 fi @@ -259,40 +259,37 @@ write-files: kubectl create clusterrolebinding cluster-admin-binding --clusterrole=cluster-admin --user=admin || true kubectl create clusterrolebinding kubernetes-dashboard-ui --clusterrole=cluster-admin --serviceaccount=kubernetes-dashboard:kubernetes-dashboard || true - sudo touch /home/core/success - echo "true" > /home/core/success - -coreos: - units: - - name: docker.service - command: start - enable: true - - - name: setup-kube-system.service - command: start - content: | - [Unit] - Requires=docker.service - After=docker.service - - [Service] - Type=simple - StartLimitInterval=0 - ExecStart=/opt/bin/setup-kube-system - - - name: deploy-kube-system.service - command: start - content: | - [Unit] - After=setup-kube-system.service - - [Service] - Type=simple - StartLimitInterval=0 - Restart=on-failure - ExecStartPre=/usr/bin/curl -k https://127.0.0.1:6443/version - ExecStart=/opt/bin/deploy-kube-system - - update: - group: stable - reboot-strategy: off + sudo touch /home/debian/success + echo "true" > /home/debian/success + + - path: /etc/systemd/system/setup-kube-system.service + permissions: '0755' + owner: root:root + content: | + [Unit] + Requires=docker.service + After=docker.service + + [Service] + Type=simple + StartLimitInterval=0 + ExecStart=/opt/bin/setup-kube-system + + - path: /etc/systemd/system/deploy-kube-system.service + permissions: '0755' + owner: root:root + content: | + [Unit] + After=setup-kube-system.service + + [Service] + Type=simple + StartLimitInterval=0 + Restart=on-failure + ExecStartPre=/usr/bin/curl -k https://127.0.0.1:6443/version + ExecStart=/opt/bin/deploy-kube-system + +runcmd: + - [ systemctl, start, setup-kube-system ] + - [ systemctl, start, deploy-kube-system ] + diff --git a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-node.yml b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-node.yml index 98be095bff48..74cd18b5ea7d 100644 --- a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-node.yml +++ b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-node.yml @@ -20,14 +20,14 @@ ssh_authorized_keys: {{ k8s.ssh.pub.key }} -write-files: +write_files: - path: /opt/bin/setup-kube-system - permissions: 0700 + permissions: '0700' owner: root:root content: | #!/bin/bash -e - if [[ -f "/home/core/success" ]]; then + if [[ -f "/home/debian/success" ]]; then echo "Already provisioned!" exit 0 fi @@ -96,7 +96,7 @@ write-files: mkdir -p /opt/bin cd /opt/bin - cp -a ${BINARIES_DIR}/k8s/{kubeadm,kubelet,kubectl} /opt/bin + cp -a ${BINARIES_DIR}/k8s/{kubeadm,kubelet,kubectl} . chmod +x {kubeadm,kubelet,kubectl} sed "s:/usr/bin:/opt/bin:g" ${BINARIES_DIR}/kubelet.service > /etc/systemd/system/kubelet.service @@ -179,12 +179,12 @@ write-files: fi - path: /opt/bin/deploy-kube-system - permissions: 0700 + permissions: '0700' owner: root:root content: | #!/bin/bash -e - if [[ -f "/home/core/success" ]]; then + if [[ -f "/home/debian/success" ]]; then echo "Already provisioned!" exit 0 fi @@ -196,46 +196,42 @@ write-files: modprobe ip_vs modprobe ip_vs_wrr modprobe ip_vs_sh - modprobe nf_conntrack_ipv4 + modprobe nf_conntrack if [[ "$PATH" != *:/opt/bin && "$PATH" != *:/opt/bin:* ]]; then export PATH=$PATH:/opt/bin fi kubeadm join {{ k8s_master.join_ip }}:6443 --token {{ k8s_master.cluster.token }} --discovery-token-unsafe-skip-ca-verification - sudo touch /home/core/success - echo "true" > /home/core/success - -coreos: - units: - - name: docker.service - command: start - enable: true - - - name: setup-kube-system.service - command: start - content: | - [Unit] - Requires=docker.service - After=docker.service - - [Service] - Type=simple - StartLimitInterval=0 - ExecStart=/opt/bin/setup-kube-system - - - name: deploy-kube-system.service - command: start - content: | - [Unit] - After=setup-kube-system.service - - [Service] - Type=simple - StartLimitInterval=0 - Restart=on-failure - ExecStartPre=/usr/bin/curl -k https://{{ k8s_master.join_ip }}:6443/version - ExecStart=/opt/bin/deploy-kube-system - - update: - group: stable - reboot-strategy: off + sudo touch /home/debian/success + echo "true" > /home/debian/success + + - path: /etc/systemd/system/setup-kube-system.service + permissions: '0755' + owner: root:root + content: | + [Unit] + Requires=docker.service + After=docker.service + + [Service] + Type=simple + StartLimitInterval=0 + ExecStart=/opt/bin/setup-kube-system + + - path: /etc/systemd/system/deploy-kube-system.service + permissions: '0755' + owner: root:root + content: | + [Unit] + After=setup-kube-system.service + + [Service] + Type=simple + StartLimitInterval=0 + Restart=on-failure + ExecStartPre=/usr/bin/curl -k https://{{ k8s_master.join_ip }}:6443/version + ExecStart=/opt/bin/deploy-kube-system + +runcmd: + - [ systemctl, start, setup-kube-system ] + - [ systemctl, start, deploy-kube-system ] diff --git a/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java b/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java index b0eac2bcf448..2949ad92f71f 100644 --- a/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java +++ b/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java @@ -1483,7 +1483,6 @@ public boolean finalizeCommandsOnStart(Commands cmds, VirtualMachineProfile prof if(profile.getHypervisorType() == HypervisorType.Hyperv) { controlNic = managementNic; } - CheckSshCommand check = new CheckSshCommand(profile.getInstanceName(), controlNic.getIPv4Address(), 3922); cmds.addCommand("checkSsh", check); diff --git a/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java b/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java index c71054951619..f0f2b3a5a1fd 100644 --- a/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java +++ b/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java @@ -1336,7 +1336,7 @@ private long createNewVM(AutoScaleVmGroupVO asGroup) { } else { vm = _userVmService.createAdvancedVirtualMachine(zone, serviceOffering, template, null, owner, "autoScaleVm-" + asGroup.getId() + "-" + getCurrentTimeStampString(), "autoScaleVm-" + asGroup.getId() + "-" + getCurrentTimeStampString(), - null, null, null, HypervisorType.XenServer, HTTPMethod.GET, null, null, null, addrs, true, null, null, null, null, null, null, null); + null, null, null, HypervisorType.XenServer, HTTPMethod.GET, null, null, null, addrs, true, null, null, null, null, null, null, null, String.valueOf(UserVmManager.UserVmType.AutoScaleVM)); } } diff --git a/server/src/main/java/com/cloud/vm/UserVmManager.java b/server/src/main/java/com/cloud/vm/UserVmManager.java index e8f709729c1e..3fb81228b68a 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManager.java +++ b/server/src/main/java/com/cloud/vm/UserVmManager.java @@ -55,6 +55,10 @@ public interface UserVmManager extends UserVmService { static final int MAX_USER_DATA_LENGTH_BYTES = 2048; + public static enum UserVmType { + UserVM, AutoScaleVM, CKSNode + } + /** * @param hostId get all of the virtual machines that belong to one host. * @return collection of VirtualMachine. diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index 286fe2ec903f..2cc0ec618afe 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -3229,7 +3229,7 @@ public UserVm createBasicSecurityGroupVirtualMachine(DataCenter zone, ServiceOff return createVirtualMachine(zone, serviceOffering, template, hostName, displayName, owner, diskOfferingId, diskSize, networkList, securityGroupIdList, group, httpmethod, userData, sshKeyPair, hypervisor, caller, requestedIps, defaultIps, displayVm, keyboard, affinityGroupIdList, customParametes, customId, dhcpOptionMap, - dataDiskTemplateToDiskOfferingMap, userVmOVFProperties); + dataDiskTemplateToDiskOfferingMap, userVmOVFProperties, null); } @@ -3340,7 +3340,7 @@ public UserVm createAdvancedSecurityGroupVirtualMachine(DataCenter zone, Service return createVirtualMachine(zone, serviceOffering, template, hostName, displayName, owner, diskOfferingId, diskSize, networkList, securityGroupIdList, group, httpmethod, userData, sshKeyPair, hypervisor, caller, requestedIps, defaultIps, displayVm, keyboard, affinityGroupIdList, customParameters, customId, dhcpOptionMap, dataDiskTemplateToDiskOfferingMap, - userVmOVFProperties); + userVmOVFProperties, null); } @Override @@ -3349,7 +3349,7 @@ public UserVm createAdvancedVirtualMachine(DataCenter zone, ServiceOffering serv String hostName, String displayName, Long diskOfferingId, Long diskSize, String group, HypervisorType hypervisor, HTTPMethod httpmethod, String userData, String sshKeyPair, Map requestedIps, IpAddresses defaultIps, Boolean displayvm, String keyboard, List affinityGroupIdList, Map customParametrs, String customId, Map> dhcpOptionsMap, Map dataDiskTemplateToDiskOfferingMap, - Map userVmOVFPropertiesMap) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException, + Map userVmOVFPropertiesMap, String type) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException, StorageUnavailableException, ResourceAllocationException { Account caller = CallContext.current().getCallingAccount(); @@ -3401,7 +3401,7 @@ public UserVm createAdvancedVirtualMachine(DataCenter zone, ServiceOffering serv return createVirtualMachine(zone, serviceOffering, template, hostName, displayName, owner, diskOfferingId, diskSize, networkList, null, group, httpmethod, userData, sshKeyPair, hypervisor, caller, requestedIps, defaultIps, displayvm, keyboard, affinityGroupIdList, customParametrs, customId, dhcpOptionsMap, - dataDiskTemplateToDiskOfferingMap, userVmOVFPropertiesMap); + dataDiskTemplateToDiskOfferingMap, userVmOVFPropertiesMap, type); } private NetworkVO getNetworkToAddToNetworkList(VirtualMachineTemplate template, Account owner, HypervisorType hypervisor, @@ -3520,7 +3520,7 @@ private UserVm createVirtualMachine(DataCenter zone, ServiceOffering serviceOffe String sshKeyPair, HypervisorType hypervisor, Account caller, Map requestedIps, IpAddresses defaultIps, Boolean isDisplayVm, String keyboard, List affinityGroupIdList, Map customParameters, String customId, Map> dhcpOptionMap, Map datadiskTemplateToDiskOfferringMap, - Map userVmOVFPropertiesMap) throws InsufficientCapacityException, ResourceUnavailableException, + Map userVmOVFPropertiesMap, String type) throws InsufficientCapacityException, ResourceUnavailableException, ConcurrentOperationException, StorageUnavailableException, ResourceAllocationException { _accountMgr.checkAccess(caller, null, true, owner); @@ -3695,7 +3695,7 @@ private UserVm createVirtualMachine(DataCenter zone, ServiceOffering serviceOffe } } - if (template.getTemplateType().equals(TemplateType.SYSTEM)) { + if (template.getTemplateType().equals(TemplateType.SYSTEM) && !String.valueOf(UserVmType.CKSNode).equals(type)) { throw new InvalidParameterValueException("Unable to use system template " + template.getId() + " to deploy a user vm"); } List listZoneTemplate = _templateZoneDao.listByZoneTemplate(zone.getId(), template.getId()); @@ -3875,7 +3875,7 @@ private UserVm createVirtualMachine(DataCenter zone, ServiceOffering serviceOffe UserVmVO vm = commitUserVm(zone, template, hostName, displayName, owner, diskOfferingId, diskSize, userData, caller, isDisplayVm, keyboard, accountId, userId, offering, isIso, sshPublicKey, networkNicMap, id, instanceName, uuidName, hypervisorType, customParameters, dhcpOptionMap, - datadiskTemplateToDiskOfferringMap, userVmOVFPropertiesMap); + datadiskTemplateToDiskOfferringMap, userVmOVFPropertiesMap, type); // Assign instance to the group try { @@ -3977,7 +3977,7 @@ private UserVmVO commitUserVm(final boolean isImport, final DataCenter zone, fin final long accountId, final long userId, final ServiceOffering offering, final boolean isIso, final String sshPublicKey, final LinkedHashMap> networkNicMap, final long id, final String instanceName, final String uuidName, final HypervisorType hypervisorType, final Map customParameters, final Map> extraDhcpOptionMap, final Map dataDiskTemplateToDiskOfferingMap, - final Map userVmOVFPropertiesMap, final VirtualMachine.PowerState powerState) throws InsufficientCapacityException { + final Map userVmOVFPropertiesMap, final VirtualMachine.PowerState powerState, String type) throws InsufficientCapacityException { return Transaction.execute(new TransactionCallbackWithException() { @Override public UserVmVO doInTransaction(TransactionStatus status) throws InsufficientCapacityException { @@ -4063,6 +4063,7 @@ public UserVmVO doInTransaction(TransactionStatus status) throws InsufficientCap } } + vm.setUserVmType(type); _vmDao.persist(vm); for (String key : customParameters.keySet()) { if (key.equalsIgnoreCase(VmDetailConstants.CPU_NUMBER) || @@ -4164,13 +4165,13 @@ private UserVmVO commitUserVm(final DataCenter zone, final VirtualMachineTemplat final long accountId, final long userId, final ServiceOfferingVO offering, final boolean isIso, final String sshPublicKey, final LinkedHashMap> networkNicMap, final long id, final String instanceName, final String uuidName, final HypervisorType hypervisorType, final Map customParameters, final Map> extraDhcpOptionMap, final Map dataDiskTemplateToDiskOfferingMap, - Map userVmOVFPropertiesMap) throws InsufficientCapacityException { + Map userVmOVFPropertiesMap, String type) throws InsufficientCapacityException { return commitUserVm(false, zone, null, null, template, hostName, displayName, owner, diskOfferingId, diskSize, userData, caller, isDisplayVm, keyboard, accountId, userId, offering, isIso, sshPublicKey, networkNicMap, id, instanceName, uuidName, hypervisorType, customParameters, extraDhcpOptionMap, dataDiskTemplateToDiskOfferingMap, - userVmOVFPropertiesMap, null); + userVmOVFPropertiesMap, null, type); } public void validateRootDiskResize(final HypervisorType hypervisorType, Long rootDiskSize, VMTemplateVO templateVO, UserVmVO vm, final Map customParameters) throws InvalidParameterValueException @@ -4477,12 +4478,54 @@ private UserVm startVirtualMachine(long vmId, Long podId, Long clusterId, Long h return vm; } + private void addUserVMCmdlineArgs(Long vmId, VirtualMachineProfile profile, DeployDestination dest, StringBuilder buf) { + UserVmVO k8sVM = _vmDao.findById(vmId); + buf.append(" template=domP"); + buf.append(" name=").append(profile.getHostName()); + buf.append(" type=").append(k8sVM.getUserVmType()); + for (NicProfile nic : profile.getNics()) { + int deviceId = nic.getDeviceId(); + if (nic.getIPv4Address() == null) { + buf.append(" eth").append(deviceId).append("ip=").append("0.0.0.0"); + buf.append(" eth").append(deviceId).append("mask=").append("0.0.0.0"); + } else { + buf.append(" eth").append(deviceId).append("ip=").append(nic.getIPv4Address()); + buf.append(" eth").append(deviceId).append("mask=").append(nic.getIPv4Netmask()); + } + + if (nic.isDefaultNic()) { + buf.append(" gateway=").append(nic.getIPv4Gateway()); + } + + if (nic.getTrafficType() == TrafficType.Management) { + String mgmt_cidr = _configDao.getValue(Config.ManagementNetwork.key()); + if (NetUtils.isValidIp4Cidr(mgmt_cidr)) { + buf.append(" mgmtcidr=").append(mgmt_cidr); + } + buf.append(" localgw=").append(dest.getPod().getGateway()); + } + } + DataCenterVO dc = _dcDao.findById(profile.getVirtualMachine().getDataCenterId()); + buf.append(" internaldns1=").append(dc.getInternalDns1()); + if (dc.getInternalDns2() != null) { + buf.append(" internaldns2=").append(dc.getInternalDns2()); + } + buf.append(" dns1=").append(dc.getDns1()); + if (dc.getDns2() != null) { + buf.append(" dns2=").append(dc.getDns2()); + } + s_logger.info("cmdline details: "+ buf.toString()); + } + @Override public boolean finalizeVirtualMachineProfile(VirtualMachineProfile profile, DeployDestination dest, ReservationContext context) { UserVmVO vm = _vmDao.findById(profile.getId()); Map details = userVmDetailsDao.listDetailsKeyPairs(vm.getId()); vm.setDetails(details); - + StringBuilder buf = profile.getBootArgsBuilder(); + if (String.valueOf(UserVmType.CKSNode).equals(vm.getUserVmType())) { + addUserVMCmdlineArgs(vm.getId(), profile, dest, buf); + } // add userdata info into vm profile Nic defaultNic = _networkModel.getDefaultNic(vm.getId()); if(defaultNic != null) { @@ -5325,7 +5368,7 @@ public UserVm createVirtualMachine(DeployVMCmd cmd) throws InsufficientCapacityE } vm = createAdvancedVirtualMachine(zone, serviceOffering, template, networkIds, owner, name, displayName, diskOfferingId, size, group, cmd.getHypervisor(), cmd.getHttpMethod(), userData, sshKeyPairName, cmd.getIpToNetworkMap(), addrs, displayVm, keyboard, cmd.getAffinityGroupIdList(), cmd.getDetails(), - cmd.getCustomId(), cmd.getDhcpOptionsMap(), dataDiskTemplateToDiskOfferingMap, userVmOVFProperties); + cmd.getCustomId(), cmd.getDhcpOptionsMap(), dataDiskTemplateToDiskOfferingMap, userVmOVFProperties, null); } } // check if this templateId has a child ISO @@ -7355,7 +7398,7 @@ public UserVm importVM(final DataCenter zone, final Host host, final VirtualMach null, null, userData, caller, isDisplayVm, keyboard, accountId, userId, serviceOffering, template.getFormat().equals(ImageFormat.ISO), sshPublicKey, null, id, instanceName, uuidName, hypervisorType, customParameters, - null, null, null, powerState); + null, null, null, powerState, null); } @Override diff --git a/systemvm/debian/opt/cloud/bin/setup/CKSNode.sh b/systemvm/debian/opt/cloud/bin/setup/CKSNode.sh new file mode 100755 index 000000000000..bd79e885096a --- /dev/null +++ b/systemvm/debian/opt/cloud/bin/setup/CKSNode.sh @@ -0,0 +1,61 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +. /opt/cloud/bin/setup/common.sh + +setup_k8s_node() { + log_it "Setting up k8s node" + + # set default ssh port and restart sshd service + sed -i 's/3922/22/g' /etc/ssh/sshd_config + + swapoff -a + sudo sed -i '/ swap / s/^/#/' /etc/fstab + log_it "Swap disabled" + + log_it "Setting up interfaces" + setup_common eth0 + setup_system_rfc1918_internal + + log_it "Setting up entry in hosts" + sed -i /$NAME/d /etc/hosts + echo "$ETH0_IP $NAME" >> /etc/hosts + + public_ip=`getPublicIp` + echo "$public_ip $NAME" >> /etc/hosts + + echo "export PATH='$PATH:/opt/bin/'">> ~/.bashrc + + disable_rpfilter + enable_fwding 1 + enable_irqbalance 0 + setup_ntp + dhclient + + rm -f /etc/logrotate.d/cloud + + log_it "Starting cloud-init services" + systemctl enable --now --no-block containerd + systemctl enable --now --no-block docker.socket + systemctl enable --now --no-block docker.service + systemctl enable --now --no-block cloud-init + systemctl enable --now --no-block cloud-config + systemctl enable --now --no-block cloud-final +} + +setup_k8s_node \ No newline at end of file diff --git a/systemvm/debian/opt/cloud/bin/setup/bootstrap.sh b/systemvm/debian/opt/cloud/bin/setup/bootstrap.sh index 769078ea8f09..2335d649a950 100755 --- a/systemvm/debian/opt/cloud/bin/setup/bootstrap.sh +++ b/systemvm/debian/opt/cloud/bin/setup/bootstrap.sh @@ -173,6 +173,7 @@ patch_systemvm() { patch() { local PATCH_MOUNT=/media/cdrom local logfile="/var/log/patchsystemvm.log" + if [ "$TYPE" == "consoleproxy" ] || [ "$TYPE" == "secstorage" ] && [ -f ${PATCH_MOUNT}/agent.zip ] && [ -f /var/cache/cloud/patch.required ] then echo "Patching systemvm for cloud service with mount=$PATCH_MOUNT for type=$TYPE" >> $logfile diff --git a/systemvm/debian/opt/cloud/bin/setup/cloud-early-config b/systemvm/debian/opt/cloud/bin/setup/cloud-early-config index 02593a37affb..58302ef3a82c 100755 --- a/systemvm/debian/opt/cloud/bin/setup/cloud-early-config +++ b/systemvm/debian/opt/cloud/bin/setup/cloud-early-config @@ -61,7 +61,6 @@ patch() { [ -f ${md5file} ] && oldmd5=$(cat ${md5file}) local newmd5= [ -f ${patchfile} ] && newmd5=$(md5sum ${patchfile} | awk '{print $1}') - log_it "Scripts checksum detected: oldmd5=$oldmd5 newmd5=$newmd5" if [ "$oldmd5" != "$newmd5" ] && [ -f ${patchfile} ] && [ "$newmd5" != "" ] then diff --git a/systemvm/debian/opt/cloud/bin/setup/common.sh b/systemvm/debian/opt/cloud/bin/setup/common.sh index e24642fc6035..9b406b1e2b25 100755 --- a/systemvm/debian/opt/cloud/bin/setup/common.sh +++ b/systemvm/debian/opt/cloud/bin/setup/common.sh @@ -543,7 +543,7 @@ setup_system_rfc1918_internal() { public_ip=`getPublicIp` echo "$public_ip" | grep -E "^((127\.)|(10\.)|(172\.1[6-9]\.)|(172\.2[0-9]\.)|(172\.3[0-1]\.)|(192\.168\.))" if [ "$?" == "0" ]; then - log_it "Not setting up route of RFC1918 space to $LOCAL_GW befause $public_ip is RFC1918." + log_it "Not setting up route of RFC1918 space to $LOCAL_GW because $public_ip is RFC1918." else log_it "Setting up route of RFC1918 space to $LOCAL_GW" # Setup general route for RFC 1918 space, as otherwise it will be sent to diff --git a/systemvm/debian/opt/cloud/bin/setup/postinit.sh b/systemvm/debian/opt/cloud/bin/setup/postinit.sh index 5e7e4c01a228..0ebd73a23ca6 100755 --- a/systemvm/debian/opt/cloud/bin/setup/postinit.sh +++ b/systemvm/debian/opt/cloud/bin/setup/postinit.sh @@ -18,8 +18,17 @@ # # This scripts before ssh.service but after cloud-early-config +log_it() { + echo "$(date) $@" >> /var/log/cloud.log + log_action_msg "$@" +} + # Eject cdrom if any -eject || true +CMDLINE=/var/cache/cloud/cmdline +export TYPE=$(grep -Po 'type=\K[a-zA-Z]*' $CMDLINE) +if [ "$TYPE" != "CKSNode" ]; then + eject || true +fi # Restart journald for setting changes to apply systemctl restart systemd-journald @@ -33,6 +42,10 @@ then fi fi +if [ "$TYPE" == "CKSNode" ]; then + pkill -9 dhclient +fi + [ ! -f /var/cache/cloud/enabled_svcs ] && touch /var/cache/cloud/enabled_svcs for svc in $(cat /var/cache/cloud/enabled_svcs) do diff --git a/test/integration/smoke/test_kubernetes_clusters.py b/test/integration/smoke/test_kubernetes_clusters.py index d76072522955..31a772f3d117 100644 --- a/test/integration/smoke/test_kubernetes_clusters.py +++ b/test/integration/smoke/test_kubernetes_clusters.py @@ -65,7 +65,6 @@ def setUpClass(cls): cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) cls.hypervisor = cls.testClient.getHypervisorInfo() cls.mgtSvrDetails = cls.config.__dict__["mgtSvr"][0].__dict__ - cls.cks_template_name_key = "cloud.kubernetes.cluster.template.name." + cls.hypervisor.lower() cls.hypervisorNotSupported = False if cls.hypervisor.lower() not in ["kvm", "vmware", "xenserver"]: @@ -89,8 +88,6 @@ def setUpClass(cls): "true") cls.restartServer() - cls.cks_template = None - cls.initial_configuration_cks_template_name = None cls.cks_service_offering = None if cls.setup_failed == False: @@ -127,20 +124,6 @@ def setUpClass(cls): (cls.services["cks_kubernetes_versions"]["1.16.3"]["semanticversion"], cls.services["cks_kubernetes_versions"]["1.16.3"]["url"], e)) if cls.setup_failed == False: - cls.cks_template = cls.getKubernetesTemplate() - if cls.cks_template == FAILED: - assert False, "getKubernetesTemplate() failed to return template for hypervisor %s" % cls.hypervisor - cls.setup_failed = True - else: - cls._cleanup.append(cls.cks_template) - - if cls.setup_failed == False: - cls.initial_configuration_cks_template_name = Configurations.list(cls.apiclient, - name=cls.cks_template_name_key)[0].value - Configurations.update(cls.apiclient, - cls.cks_template_name_key, - cls.cks_template.name) - cks_offering_data = cls.services["cks_service_offering"] cks_offering_data["name"] = 'CKS-Instance-' + random_gen() cls.cks_service_offering = ServiceOffering.create( @@ -168,13 +151,6 @@ def tearDownClass(cls): version_delete_failed = True cls.debug("Error: Exception during cleanup for added Kubernetes supported versions: %s" % e) try: - # Restore original CKS template - if cls.cks_template != None: - cls.cks_template.delete(cls.apiclient) - if cls.hypervisorNotSupported == False and cls.initial_configuration_cks_template_name != None: - Configurations.update(cls.apiclient, - cls.cks_template_name_key, - cls.initial_configuration_cks_template_name) # Restore CKS enabled if cls.initial_configuration_cks_enabled not in ["true", True]: cls.debug("Restoring Kubernetes Service enabled value") @@ -224,41 +200,6 @@ def isManagementUp(cls): except Exception: return False - @classmethod - def getKubernetesTemplate(cls, cks_templates=None): - - if cks_templates is None: - cks_templates = cls.services["cks_templates"] - - hypervisor = cls.hypervisor.lower() - - if hypervisor not in cks_templates.keys(): - cls.debug("Provided hypervisor has no CKS template") - return FAILED - - cks_template = cks_templates[hypervisor] - - cmd = listTemplates.listTemplatesCmd() - cmd.name = cks_template['name'] - cmd.templatefilter = 'all' - cmd.zoneid = cls.zone.id - cmd.hypervisor = hypervisor - templates = cls.apiclient.listTemplates(cmd) - - if validateList(templates)[0] != PASS: - details = None - if hypervisor in ["vmware"]: - details = [{"keyboard": "us"}] - template = Template.register(cls.apiclient, cks_template, zoneid=cls.zone.id, hypervisor=hypervisor.lower(), randomize_name=False, details=details) - template.download(cls.apiclient) - return template - - for template in templates: - if template.isready and template.ispublic: - return Template(template.__dict__) - - return FAILED - @classmethod def waitForKubernetesSupportedVersionIsoReadyState(cls, version_id, retries=30, interval=60): """Check if Kubernetes supported version ISO is in Ready state""" diff --git a/tools/appliance/systemvmtemplate/http/preseed.cfg b/tools/appliance/systemvmtemplate/http/preseed.cfg index ce51f746c300..0a07e97b15dd 100644 --- a/tools/appliance/systemvmtemplate/http/preseed.cfg +++ b/tools/appliance/systemvmtemplate/http/preseed.cfg @@ -56,13 +56,13 @@ d-i partman-auto/disk string /dev/vda d-i partman-auto/method string regular d-i partman-auto/expert_recipe string \ boot-root :: \ - 100 60 100 ext2 \ + 512 60 512 ext2 \ $primary{ } $bootable{ } \ method{ format } format{ } \ use_filesystem{ } filesystem{ ext2 } \ mountpoint{ /boot } \ . \ - 2240 40 2500 ext4 \ + 5000 40 10000 ext4 \ method{ format } format{ } \ use_filesystem{ } filesystem{ ext4 } \ mountpoint{ / } \ diff --git a/tools/appliance/systemvmtemplate/scripts/cleanup.sh b/tools/appliance/systemvmtemplate/scripts/cleanup.sh index 8f2408a325a3..ab0ceb628611 100644 --- a/tools/appliance/systemvmtemplate/scripts/cleanup.sh +++ b/tools/appliance/systemvmtemplate/scripts/cleanup.sh @@ -17,11 +17,10 @@ # under the License. set -e -set -x function cleanup_apt() { export DEBIAN_FRONTEND=noninteractive - apt-get -y remove --purge dictionaries-common busybox isc-dhcp-client isc-dhcp-common \ + apt-get -y remove --purge dictionaries-common busybox \ task-english task-ssh-server tasksel tasksel-data laptop-detect wamerican sharutils \ nano util-linux-locales krb5-locales diff --git a/tools/appliance/systemvmtemplate/scripts/configure_conntrack.sh b/tools/appliance/systemvmtemplate/scripts/configure_conntrack.sh index 7202717d73b5..63016a98d003 100644 --- a/tools/appliance/systemvmtemplate/scripts/configure_conntrack.sh +++ b/tools/appliance/systemvmtemplate/scripts/configure_conntrack.sh @@ -34,8 +34,6 @@ function load_conntrack_modules() { grep nf_conntrack_ipv4 /etc/modules && return cat >> /etc/modules << EOF -nf_conntrack_ipv4 -nf_conntrack_ipv6 nf_conntrack nf_conntrack_ftp nf_conntrack_pptp diff --git a/tools/appliance/systemvmtemplate/scripts/configure_systemvm_services.sh b/tools/appliance/systemvmtemplate/scripts/configure_systemvm_services.sh index 516085705afa..72ea34ac6156 100644 --- a/tools/appliance/systemvmtemplate/scripts/configure_systemvm_services.sh +++ b/tools/appliance/systemvmtemplate/scripts/configure_systemvm_services.sh @@ -124,6 +124,27 @@ function configure_services() { systemctl disable hyperv-daemons.hv-vss-daemon.service systemctl disable qemu-guest-agent + # Disable container services + systemctl disable containerd + systemctl disable docker.service + systemctl stop docker.service + systemctl disable docker.socket + systemctl stop docker.socket + + # Disable cloud init by default +cat < /etc/cloud/cloud.cfg.d/cloudstack.cfg +datasource_list: ['CloudStack'] +datasource: + CloudStack: + max_wait: 120 + timeout: 50 +EOF + + sed -i 's/\(disable_root: \)\(.*\)/\1false/' /etc/cloud/cloud.cfg + touch /etc/cloud/cloud-init.disabled + systemctl stop cloud-init + systemctl disable cloud-init + configure_apache2 configure_strongswan configure_issue diff --git a/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh b/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh index 2e788f3ced59..1cbdfea56308 100644 --- a/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh +++ b/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh @@ -35,6 +35,12 @@ function debconf_packages() { echo "libc6 libraries/restart-without-asking boolean false" | debconf-set-selections } +function apt_clean() { + apt-get -y autoremove --purge + apt-get clean + apt-get autoclean +} + function install_packages() { export DEBIAN_FRONTEND=noninteractive export DEBIAN_PRIORITY=critical @@ -70,20 +76,29 @@ function install_packages() { radvd \ sharutils genisoimage aria2 \ strongswan libcharon-extra-plugins libstrongswan-extra-plugins strongswan-charon strongswan-starter \ - virt-what open-vm-tools qemu-guest-agent hyperv-daemons - - apt-get -y autoremove --purge - apt-get clean - apt-get autoclean + virt-what open-vm-tools qemu-guest-agent hyperv-daemons \ + apt-transport-https ca-certificates curl gnupg gnupg-agent software-properties-common cloud-init + apt_clean ${apt_get} install links + curl -fsSL https://download.docker.com/linux/debian/gpg | sudo apt-key add - + apt-key fingerprint 0EBFCD88 + #32 bit architecture support for vhd-util: not required for 32 bit template if [ "${arch}" != "i386" ]; then dpkg --add-architecture i386 apt-get update ${apt_get} install libuuid1:i386 libc6:i386 + + add-apt-repository \ + "deb [arch=amd64] https://download.docker.com/linux/debian \ + $(lsb_release -cs) \ + stable" + apt-get update + ${apt_get} install docker-ce docker-ce-cli containerd.io fi + apt_clean install_vhd_util # Install xenserver guest utilities as debian repos don't have it diff --git a/tools/appliance/systemvmtemplate/template.json b/tools/appliance/systemvmtemplate/template.json index 3de676bc7b6e..ec9894a76207 100644 --- a/tools/appliance/systemvmtemplate/template.json +++ b/tools/appliance/systemvmtemplate/template.json @@ -33,7 +33,7 @@ [ "-smp", "1" ] ], "format": "qcow2", - "disk_size": 2500, + "disk_size": 10000, "disk_interface": "virtio", "net_device": "virtio-net", "iso_url": "https://download.cloudstack.org/systemvm/debian/debian-10.7.0-amd64-netinst.iso", diff --git a/tools/marvin/marvin/config/test_data.py b/tools/marvin/marvin/config/test_data.py index 60f0f1cc27d3..d3791cc201d4 100644 --- a/tools/marvin/marvin/config/test_data.py +++ b/tools/marvin/marvin/config/test_data.py @@ -2018,64 +2018,30 @@ "cks_kubernetes_versions": { "1.14.9": { "semanticversion": "1.14.9", - "url": "http://download.cloudstack.org/cks/setup-1.14.9.iso", + "url": "http://sbjenkins-stagingrepo.jenkins.lon/flatcar/setup-v1.14.9.iso", "mincpunumber": 2, "minmemory": 2048 }, "1.15.0": { "semanticversion": "1.15.0", - "url": "http://download.cloudstack.org/cks/setup-1.15.0.iso", + "url": "http://sbjenkins-stagingrepo.jenkins.lon/flatcar/setup-v1.15.0.iso", "mincpunumber": 2, "minmemory": 2048 }, "1.16.0": { "semanticversion": "1.16.0", - "url": "http://download.cloudstack.org/cks/setup-1.16.0.iso", + "url": "http://sbjenkins-stagingrepo.jenkins.lon/flatcar/setup-v1.16.0.iso", "mincpunumber": 2, "minmemory": 2048 }, "1.16.3": { "semanticversion": "1.16.3", + #"url": "http://sbjenkins-stagingrepo.jenkins.lon/flatcar/setup-v1.16.3.iso", "url": "http://sbjenkins-stagingrepo.jenkins.lon/cks/binaries-iso/as-1.16.3.iso", "mincpunumber": 2, "minmemory": 2048 } }, - "cks_templates": { - "kvm": { - "name": "Kubernetes-Service-Template-kvm", - "displaytext": "Kubernetes-Service-Template kvm", - "format": "qcow2", - "hypervisor": "kvm", - "ostype": "CoreOS", - "url": "http://dl.openvm.eu/cloudstack/coreos/x86_64/coreos_production_cloudstack_image-kvm.qcow2.bz2", - "requireshvm": "True", - "ispublic": "True", - "isextractable": "True" - }, - "xenserver": { - "name": "Kubernetes-Service-Template-xen", - "displaytext": "Kubernetes-Service-Template xen", - "format": "vhd", - "hypervisor": "xenserver", - "ostype": "CoreOS", - "url": "http://dl.openvm.eu/cloudstack/coreos/x86_64/coreos_production_cloudstack_image-xen.vhd.bz2", - "requireshvm": "True", - "ispublic": "True", - "isextractable": "True" - }, - "vmware": { - "name": "Kubernetes-Service-Template-vmware", - "displaytext": "Kubernetes-Service-Template vmware", - "format": "ova", - "hypervisor": "vmware", - "ostype": "CoreOS", - "url": "http://dl.openvm.eu/cloudstack/coreos/x86_64/coreos_production_cloudstack_image-vmware.ova", - "requireshvm": "True", - "ispublic": "True", - "details": [{"keyboard":"us","nicAdapter":"Vmxnet3","rootDiskController":"pvscsi"}] - } - }, "cks_service_offering": { "name": "CKS-Instance", "displaytext": "CKS Instance", From e66c085ea38ca9e8c47c1176c1f120c7c6984a2e Mon Sep 17 00:00:00 2001 From: davidjumani Date: Mon, 2 Nov 2020 13:00:20 +0530 Subject: [PATCH 023/117] Adding verbosity to upgrade script --- .../src/main/resources/script/upgrade-kubernetes.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/integrations/kubernetes-service/src/main/resources/script/upgrade-kubernetes.sh b/plugins/integrations/kubernetes-service/src/main/resources/script/upgrade-kubernetes.sh index b5000da0e081..96e07a92f119 100755 --- a/plugins/integrations/kubernetes-service/src/main/resources/script/upgrade-kubernetes.sh +++ b/plugins/integrations/kubernetes-service/src/main/resources/script/upgrade-kubernetes.sh @@ -108,17 +108,17 @@ if [ -d "$BINARIES_DIR" ]; then if [ "${IS_MAIN_MASTER}" == 'true' ]; then set +e - kubeadm upgrade apply ${UPGRADE_VERSION} -y + kubeadm --v=5 upgrade apply ${UPGRADE_VERSION} -y retval=$? set -e if [ $retval -ne 0 ]; then - kubeadm upgrade apply ${UPGRADE_VERSION} --ignore-preflight-errors=CoreDNSUnsupportedPlugins -y + kubeadm --v=5 upgrade apply ${UPGRADE_VERSION} --ignore-preflight-errors=CoreDNSUnsupportedPlugins -y fi else if [ "${IS_OLD_VERSION}" == 'true' ]; then - kubeadm upgrade node config --kubelet-version ${UPGRADE_VERSION} + kubeadm --v=5 upgrade node config --kubelet-version ${UPGRADE_VERSION} else - kubeadm upgrade node + kubeadm --v=5 upgrade node fi fi From 200e01c7bdf153bec769a8662a1957844530730a Mon Sep 17 00:00:00 2001 From: davidjumani Date: Mon, 2 Nov 2020 13:02:20 +0530 Subject: [PATCH 024/117] Adding supportsautoscaling to supported version response --- .../java/org/apache/cloudstack/api/ApiConstants.java | 1 + .../version/KubernetesVersionManagerImpl.java | 9 +++------ .../response/KubernetesSupportedVersionResponse.java | 12 ++++++++++++ 3 files changed, 16 insertions(+), 6 deletions(-) diff --git a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java index ba797196fdc3..0d91e8d6e12c 100644 --- a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java @@ -823,6 +823,7 @@ public class ApiConstants { public static final String MIN_KUBERNETES_VERSION_ID = "minimumkubernetesversionid"; public static final String NODE_ROOT_DISK_SIZE = "noderootdisksize"; public static final String SUPPORTS_HA = "supportsha"; + public static final String SUPPORTS_AUTOSCALING = "supportsautoscaling"; public static final String AUTOSCALING_ENABLED = "autoscalingenabled"; public static final String MIN_SIZE = "minsize"; public static final String MAX_SIZE = "maxsize"; diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionManagerImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionManagerImpl.java index 41ef095c7ab6..9e58cf395d5d 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionManagerImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionManagerImpl.java @@ -96,12 +96,9 @@ private KubernetesSupportedVersionResponse createKubernetesSupportedVersionRespo response.setZoneId(zone.getUuid()); response.setZoneName(zone.getName()); } - if (compareSemanticVersions(kubernetesSupportedVersion.getSemanticVersion(), - KubernetesClusterService.MIN_KUBERNETES_VERSION_HA_SUPPORT)>=0) { - response.setSupportsHA(true); - } else { - response.setSupportsHA(false); - } + response.setSupportsHA(compareSemanticVersions(kubernetesSupportedVersion.getSemanticVersion(), + KubernetesClusterService.MIN_KUBERNETES_VERSION_HA_SUPPORT)>=0); + response.setSupportsAutoscaling(versionSupportsAutoscaling(kubernetesSupportedVersion)); TemplateJoinVO template = templateJoinDao.findById(kubernetesSupportedVersion.getIsoId()); if (template != null) { response.setIsoId(template.getUuid()); diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesSupportedVersionResponse.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesSupportedVersionResponse.java index 4deb50d4a0b5..c9a1b53f4509 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesSupportedVersionResponse.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesSupportedVersionResponse.java @@ -64,6 +64,10 @@ public class KubernetesSupportedVersionResponse extends BaseResponse { @Param(description = "whether Kubernetes supported version supports HA, multi-master") private Boolean supportsHA; + @SerializedName(ApiConstants.SUPPORTS_AUTOSCALING) + @Param(description = "whether Kubernetes supported version supports Autoscaling") + private Boolean supportsAutoscaling; + @SerializedName(ApiConstants.STATE) @Param(description = "the enabled or disabled state of the Kubernetes supported version") private String state; @@ -171,4 +175,12 @@ public Integer getMinimumRamSize() { public void setMinimumRamSize(Integer minimumRamSize) { this.minimumRamSize = minimumRamSize; } + + public Boolean getSupportsAutoscaling() { + return supportsAutoscaling; + } + + public void setSupportsAutoscaling(Boolean supportsAutoscaling) { + this.supportsAutoscaling = supportsAutoscaling; + } } From ca15896f691a2005217aba8bd92c87de2b0c4117 Mon Sep 17 00:00:00 2001 From: davidjumani Date: Mon, 2 Nov 2020 13:02:52 +0530 Subject: [PATCH 025/117] Refactor --- .../cluster/KubernetesClusterManagerImpl.java | 14 +++-- .../KubernetesClusterStartWorker.java | 53 +++++++++++-------- .../KubernetesClusterUpgradeWorker.java | 6 +-- .../smoke/test_kubernetes_clusters.py | 2 +- 4 files changed, 41 insertions(+), 34 deletions(-) diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java index d15a1b6f2ea9..d905b0dcc54e 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java @@ -842,9 +842,10 @@ private void validateKubernetesClusterScaleParameters(ScaleKubernetesClusterCmd throw new PermissionDeniedException(String.format("Kubernetes cluster %s is in %s state and can not be scaled", kubernetesCluster.getName(), kubernetesCluster.getState().toString())); } + int maxClusterSize = KubernetesMaxClusterSize.valueIn(kubernetesCluster.getAccountId()); if (isAutoscalingEnabled != null && isAutoscalingEnabled) { if (clusterSize != null || serviceOfferingId != null || nodeIds != null) { - throw new InvalidParameterValueException("autoscaling can not be passed along with nodeids or clustersize or service offering"); + throw new InvalidParameterValueException("Autoscaling can not be passed along with nodeids or clustersize or service offering"); } if (!KubernetesVersionManagerImpl.versionSupportsAutoscaling(clusterVersion)) { @@ -855,15 +856,14 @@ private void validateKubernetesClusterScaleParameters(ScaleKubernetesClusterCmd validateEndpointUrl(); if (minSize == null || maxSize == null) { - throw new InvalidParameterValueException("autoscaling requires minsize and maxsize to be passed"); + throw new InvalidParameterValueException("Autoscaling requires minsize and maxsize to be passed"); } if (minSize < 1) { - throw new InvalidParameterValueException("minsize must be at least than 1"); + throw new InvalidParameterValueException("Minsize must be at least than 1"); } if (maxSize <= minSize) { - throw new InvalidParameterValueException("maxsize must be greater than or equal to minsize"); + throw new InvalidParameterValueException("Maxsize must be greater than or equal to minsize"); } - int maxClusterSize = KubernetesMaxClusterSize.valueIn(kubernetesCluster.getAccountId()); if (maxSize + kubernetesCluster.getMasterNodeCount() > maxClusterSize) { throw new InvalidParameterValueException( String.format("Maximum cluster size can not exceed %d. Please contact your administrator", maxClusterSize)); @@ -928,6 +928,10 @@ private void validateKubernetesClusterScaleParameters(ScaleKubernetesClusterCmd if (clusterSize < 1) { throw new InvalidParameterValueException(String.format("Kubernetes cluster : %s cannot be scaled for size, %d", kubernetesCluster.getName(), clusterSize)); } + if (clusterSize + kubernetesCluster.getMasterNodeCount() > maxClusterSize) { + throw new InvalidParameterValueException( + String.format("Maximum cluster size can not exceed %d. Please contact your administrator", maxClusterSize)); + } if (clusterSize > kubernetesCluster.getNodeCount()) { // Upscale VMTemplateVO template = templateDao.findById(kubernetesCluster.getTemplateId()); if (template == null) { diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java index d14a054339da..53b3c9300049 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java @@ -378,6 +378,25 @@ private void setupKubernetesClusterNetworkRules(Network network, List cl throw new ManagementServerException(String.format("No source NAT IP addresses found for network : %s, Kubernetes cluster : %s", network.getName(), kubernetesCluster.getName())); } + + createFirewallRules(publicIp, clusterVMIds); + + // Load balancer rule fo API access for master node VMs + try { + provisionLoadBalancerRule(publicIp, network, owner, clusterVMIds, CLUSTER_API_PORT); + } catch (NetworkRuleConflictException | InsufficientAddressCapacityException e) { + throw new ManagementServerException(String.format("Failed to provision load balancer rule for API access for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); + } + + // Port forwarding rule fo SSH access on each node VM + try { + provisionSshPortForwardingRules(publicIp, network, owner, clusterVMIds, CLUSTER_NODES_DEFAULT_START_SSH_PORT); + } catch (ResourceUnavailableException | NetworkRuleConflictException e) { + throw new ManagementServerException(String.format("Failed to activate SSH port forwarding rules for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); + } + } + + private void createFirewallRules(IpAddress publicIp, List clusterVMIds) throws ManagementServerException { // Firewall rule fo API access for master node VMs try { provisionFirewallRules(publicIp, owner, CLUSTER_API_PORT, CLUSTER_API_PORT); @@ -391,7 +410,7 @@ private void setupKubernetesClusterNetworkRules(Network network, List cl // Firewall rule fo SSH access on each node VM try { - int endPort = CLUSTER_NODES_DEFAULT_START_SSH_PORT + clusterVMs.size() - 1; + int endPort = CLUSTER_NODES_DEFAULT_START_SSH_PORT + clusterVMIds.size() - 1; provisionFirewallRules(publicIp, owner, CLUSTER_NODES_DEFAULT_START_SSH_PORT, endPort); if (LOGGER.isInfoEnabled()) { LOGGER.info(String.format("Provisioned firewall rule to open up port %d to %d on %s for Kubernetes cluster : %s", CLUSTER_NODES_DEFAULT_START_SSH_PORT, endPort, publicIp.getAddress().addr(), kubernetesCluster.getName())); @@ -399,20 +418,6 @@ private void setupKubernetesClusterNetworkRules(Network network, List cl } catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException | NetworkRuleConflictException e) { throw new ManagementServerException(String.format("Failed to provision firewall rules for SSH access for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); } - - // Load balancer rule fo API access for master node VMs - try { - provisionLoadBalancerRule(publicIp, network, owner, clusterVMIds, CLUSTER_API_PORT); - } catch (NetworkRuleConflictException | InsufficientAddressCapacityException e) { - throw new ManagementServerException(String.format("Failed to provision load balancer rule for API access for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); - } - - // Port forwarding rule fo SSH access on each node VM - try { - provisionSshPortForwardingRules(publicIp, network, owner, clusterVMIds, CLUSTER_NODES_DEFAULT_START_SSH_PORT); - } catch (ResourceUnavailableException | NetworkRuleConflictException e) { - throw new ManagementServerException(String.format("Failed to activate SSH port forwarding rules for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); - } } private void startKubernetesClusterVMs() { @@ -571,6 +576,16 @@ public boolean startKubernetesClusterOnCreate() { logTransitStateAndThrow(Level.ERROR, String.format("Failed to setup Kubernetes cluster : %s in usable state as unable to get Dashboard service running for the cluster", kubernetesCluster.getName()), kubernetesCluster.getId(),KubernetesCluster.Event.OperationFailed); } retrieveScriptFiles(); + copyAutoscalerScriptsToNodes(publicIpAddress, clusterVMs); + if (!createCloudStackSecret(keys)) { + logTransitStateAndThrow(Level.ERROR, String.format("Failed to setup keys for Kubernetes cluster %s", + kubernetesCluster.getName()), kubernetesCluster.getId(),KubernetesCluster.Event.OperationFailed); + } + stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded); + return true; + } + + private void copyAutoscalerScriptsToNodes(String publicIpAddress, List clusterVMs) { for (int i = 0; i < clusterVMs.size(); ++i) { try { copyAutoscalerScripts(publicIpAddress, CLUSTER_NODES_DEFAULT_START_SSH_PORT + i); @@ -578,14 +593,6 @@ public boolean startKubernetesClusterOnCreate() { throw new CloudRuntimeException(e); } } - if (!createCloudStackSecret(keys)) { - logTransitStateAndThrow(Level.ERROR, String.format("Failed to setup keys for Kubernetes cluster %s", - kubernetesCluster.getName()), kubernetesCluster.getId(),KubernetesCluster.Event.OperationFailed); - } - stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded); - // remove launch permissions - deleteTemplateLaunchPermission(); - return true; } public boolean startStoppedKubernetesCluster() throws CloudRuntimeException { diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java index 5371e4742594..e7eb3c2aa921 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java @@ -113,11 +113,7 @@ private void upgradeKubernetesClusterNodes() { logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster : %s, unable to upgrade Kubernetes node on VM : %s", kubernetesCluster.getName(), vm.getDisplayName()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, e); } if (!result.first()) { - String message = String.format("Failed to upgrade Kubernetes cluster : %s, unable to upgrade Kubernetes node on VM : %s", - kubernetesCluster.getName(), vm.getDisplayName()); - String messageWithLogs = String.format("%s. Logs :\n%s", message, result.second()); - logMessage(Level.ERROR, messageWithLogs, null); - logTransitStateDetachIsoAndThrow(Level.ERROR, message, kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null); + logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster : %s, unable to upgrade Kubernetes node on VM : %s", kubernetesCluster.getName(), vm.getDisplayName()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null); } if (System.currentTimeMillis() > upgradeTimeoutTime) { logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster : %s, upgrade action timed out", kubernetesCluster.getName()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null); diff --git a/test/integration/smoke/test_kubernetes_clusters.py b/test/integration/smoke/test_kubernetes_clusters.py index 31a772f3d117..f81b6980500d 100644 --- a/test/integration/smoke/test_kubernetes_clusters.py +++ b/test/integration/smoke/test_kubernetes_clusters.py @@ -159,7 +159,7 @@ def tearDownClass(cls): "false") cls.restartServer() - cleanup_resources(cls.apiclient, cls._cleanup) + cleanup_resources(cls.apiclient, reversed(cls._cleanup)) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) if version_delete_failed == True: From 586f2613c3d9c8b8ce4b207bb3fc2c15236daffd Mon Sep 17 00:00:00 2001 From: davidjumani Date: Mon, 2 Nov 2020 14:57:35 +0530 Subject: [PATCH 026/117] Adding licensing headers --- .../resources/script/autoscale-kube-cluster | 19 ++++++++++++++++++- .../resources/script/deploy-cloudstack-secret | 19 ++++++++++++++++++- 2 files changed, 36 insertions(+), 2 deletions(-) diff --git a/plugins/integrations/kubernetes-service/src/main/resources/script/autoscale-kube-cluster b/plugins/integrations/kubernetes-service/src/main/resources/script/autoscale-kube-cluster index 8d234b394617..fe31829a869d 100755 --- a/plugins/integrations/kubernetes-service/src/main/resources/script/autoscale-kube-cluster +++ b/plugins/integrations/kubernetes-service/src/main/resources/script/autoscale-kube-cluster @@ -1,4 +1,21 @@ -#! /bin/bash +#!/bin/bash -e +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + function usage() { cat << USAGE Usage: ./autoscale-kube-cluster [OPTIONS]... diff --git a/plugins/integrations/kubernetes-service/src/main/resources/script/deploy-cloudstack-secret b/plugins/integrations/kubernetes-service/src/main/resources/script/deploy-cloudstack-secret index e734e0436bbf..9084b2c016df 100755 --- a/plugins/integrations/kubernetes-service/src/main/resources/script/deploy-cloudstack-secret +++ b/plugins/integrations/kubernetes-service/src/main/resources/script/deploy-cloudstack-secret @@ -1,4 +1,21 @@ -#! /bin/bash +#!/bin/bash -e +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + function usage() { cat << USAGE Usage: ./deploy-cloudstack-secret [OPTIONS]... From e8ab782368b97ff1e06a5bb07ccf4e2ba89377f8 Mon Sep 17 00:00:00 2001 From: davidjumani Date: Mon, 2 Nov 2020 17:08:13 +0530 Subject: [PATCH 027/117] Adding absolute path to kubectl --- .../src/main/resources/conf/k8s-master.yml | 14 +++++++------- .../main/resources/script/autoscale-kube-cluster | 6 +++--- .../main/resources/script/deploy-cloudstack-secret | 2 +- .../main/resources/script/upgrade-kubernetes.sh | 4 ++-- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master.yml b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master.yml index d17adc664057..25f773f6d38f 100644 --- a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master.yml +++ b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master.yml @@ -247,17 +247,17 @@ write_files: if [ -d "$K8S_CONFIG_SCRIPTS_COPY_DIR" ]; then ### Network, dashboard configs available offline ### echo "Offline configs are available!" - kubectl apply -f ${K8S_CONFIG_SCRIPTS_COPY_DIR}/network.yaml - kubectl apply -f ${K8S_CONFIG_SCRIPTS_COPY_DIR}/dashboard.yaml + /opt/bin/kubectl apply -f ${K8S_CONFIG_SCRIPTS_COPY_DIR}/network.yaml + /opt/bin/kubectl apply -f ${K8S_CONFIG_SCRIPTS_COPY_DIR}/dashboard.yaml rm -rf "${K8S_CONFIG_SCRIPTS_COPY_DIR}" else - kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')" - kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta6/aio/deploy/recommended.yaml + /opt/bin/kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(/opt/bin/kubectl version | base64 | tr -d '\n')" + /opt/bin/kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta6/aio/deploy/recommended.yaml fi - kubectl create rolebinding admin-binding --role=admin --user=admin || true - kubectl create clusterrolebinding cluster-admin-binding --clusterrole=cluster-admin --user=admin || true - kubectl create clusterrolebinding kubernetes-dashboard-ui --clusterrole=cluster-admin --serviceaccount=kubernetes-dashboard:kubernetes-dashboard || true + /opt/bin/kubectl create rolebinding admin-binding --role=admin --user=admin || true + /opt/bin/kubectl create clusterrolebinding cluster-admin-binding --clusterrole=cluster-admin --user=admin || true + /opt/bin/kubectl create clusterrolebinding kubernetes-dashboard-ui --clusterrole=cluster-admin --serviceaccount=kubernetes-dashboard:kubernetes-dashboard || true sudo touch /home/debian/success echo "true" > /home/debian/success diff --git a/plugins/integrations/kubernetes-service/src/main/resources/script/autoscale-kube-cluster b/plugins/integrations/kubernetes-service/src/main/resources/script/autoscale-kube-cluster index fe31829a869d..4f2254fdc051 100755 --- a/plugins/integrations/kubernetes-service/src/main/resources/script/autoscale-kube-cluster +++ b/plugins/integrations/kubernetes-service/src/main/resources/script/autoscale-kube-cluster @@ -72,7 +72,7 @@ done if [ $ENABLE == "true" ] ; then if [ -e /opt/autoscaler/autoscaler_tmpl.yaml ]; then sed -e "s//$ID/g" -e "s//$MINSIZE/g" -e "s//$MAXSIZE/g" /opt/autoscaler/autoscaler_tmpl.yaml > /opt/autoscaler/autoscaler_now.yaml - kubectl apply -f /opt/autoscaler/autoscaler_now.yaml + /opt/bin/kubectl apply -f /opt/autoscaler/autoscaler_now.yaml exit 0 else mkdir -p /opt/autoscaler @@ -84,10 +84,10 @@ if [ $ENABLE == "true" ] ; then exit 1 else sed -e "s//$ID/g" -e "s//$MINSIZE/g" -e "s//$MAXSIZE/g" /opt/autoscaler/autoscaler_tmpl.yaml > /opt/autoscaler/autoscaler_now.yaml - kubectl apply -f /opt/autoscaler/autoscaler_now.yaml + /opt/bin/kubectl apply -f /opt/autoscaler/autoscaler_now.yaml exit 0 fi fi else - kubectl delete deployment -n kube-system cluster-autoscaler + /opt/bin/kubectl delete deployment -n kube-system cluster-autoscaler fi diff --git a/plugins/integrations/kubernetes-service/src/main/resources/script/deploy-cloudstack-secret b/plugins/integrations/kubernetes-service/src/main/resources/script/deploy-cloudstack-secret index 9084b2c016df..2c46de7aa982 100755 --- a/plugins/integrations/kubernetes-service/src/main/resources/script/deploy-cloudstack-secret +++ b/plugins/integrations/kubernetes-service/src/main/resources/script/deploy-cloudstack-secret @@ -64,5 +64,5 @@ api-key = $API_KEY secret-key = $SECRET_KEY EOF # Create secret if not exists -kubectl -n kube-system get secret cloudstack-secret || kubectl -n kube-system create secret generic cloudstack-secret --from-file=/tmp/cloud-config +/opt/bin/kubectl -n kube-system get secret cloudstack-secret || /opt/bin/kubectl -n kube-system create secret generic cloudstack-secret --from-file=/tmp/cloud-config rm /tmp/cloud-config diff --git a/plugins/integrations/kubernetes-service/src/main/resources/script/upgrade-kubernetes.sh b/plugins/integrations/kubernetes-service/src/main/resources/script/upgrade-kubernetes.sh index 96e07a92f119..dd7703b466bb 100755 --- a/plugins/integrations/kubernetes-service/src/main/resources/script/upgrade-kubernetes.sh +++ b/plugins/integrations/kubernetes-service/src/main/resources/script/upgrade-kubernetes.sh @@ -128,8 +128,8 @@ if [ -d "$BINARIES_DIR" ]; then systemctl restart kubelet if [ "${IS_MAIN_MASTER}" == 'true' ]; then - kubectl apply -f ${BINARIES_DIR}/network.yaml - kubectl apply -f ${BINARIES_DIR}/dashboard.yaml + /opt/bin/kubectl apply -f ${BINARIES_DIR}/network.yaml + /opt/bin/kubectl apply -f ${BINARIES_DIR}/dashboard.yaml fi umount "${ISO_MOUNT_DIR}" && rmdir "${ISO_MOUNT_DIR}" From 7f5e3297c83918e8c56d141512d3706520b530fb Mon Sep 17 00:00:00 2001 From: davidjumani Date: Mon, 2 Nov 2020 17:10:00 +0530 Subject: [PATCH 028/117] Removing sudo for kubectl commands --- .../actionworkers/KubernetesClusterScaleWorker.java | 4 ++-- .../actionworkers/KubernetesClusterUpgradeWorker.java | 2 +- .../cluster/utils/KubernetesClusterUtil.java | 10 +++++----- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java index 8833907bc92d..2e9b57cfb3e0 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java @@ -193,13 +193,13 @@ private boolean removeKubernetesClusterNode(final String ipAddress, final int po retryCounter++; try { Pair result = SshHelper.sshExecute(ipAddress, port, CLUSTER_NODE_VM_USER, - pkFile, null, String.format("sudo /opt/bin/kubectl drain %s --ignore-daemonsets --delete-local-data", hostName), + pkFile, null, String.format("/opt/bin/kubectl drain %s --ignore-daemonsets --delete-local-data", hostName), 10000, 10000, 60000); if (!result.first()) { LOGGER.warn(String.format("Draining node: %s on VM : %s in Kubernetes cluster : %s unsuccessful", hostName, userVm.getDisplayName(), kubernetesCluster.getName())); } else { result = SshHelper.sshExecute(ipAddress, port, CLUSTER_NODE_VM_USER, - pkFile, null, String.format("sudo /opt/bin/kubectl delete node %s", hostName), + pkFile, null, String.format("/opt/bin/kubectl delete node %s", hostName), 10000, 10000, 30000); if (result.first()) { return true; diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java index e7eb3c2aa921..ac8f5b49df41 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java @@ -91,7 +91,7 @@ private void upgradeKubernetesClusterNodes() { } try { result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, sshKeyFile, null, - String.format("sudo /opt/bin/kubectl drain %s --ignore-daemonsets --delete-local-data", hostName), + String.format("/opt/bin/kubectl drain %s --ignore-daemonsets --delete-local-data", hostName), 10000, 10000, 60000); } catch (Exception e) { logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster : %s, unable to drain Kubernetes node on VM : %s", kubernetesCluster.getName(), vm.getDisplayName()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, e); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java index abb9fbf662d7..1c1c3056fe36 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java @@ -41,7 +41,7 @@ public static boolean isKubernetesClusterNodeReady(final KubernetesCluster kuber String user, File sshKeyFile, String nodeName) throws Exception { Pair result = SshHelper.sshExecute(ipAddress, port, user, sshKeyFile, null, - String.format("sudo /opt/bin/kubectl get nodes | awk '{if ($1 == \"%s\" && $2 == \"Ready\") print $1}'", nodeName.toLowerCase()), + String.format("/opt/bin/kubectl get nodes | awk '{if ($1 == \"%s\" && $2 == \"Ready\") print $1}'", nodeName.toLowerCase()), 10000, 10000, 20000); if (result.first() && nodeName.equals(result.second().trim())) { return true; @@ -102,7 +102,7 @@ public static boolean uncordonKubernetesClusterNode(final KubernetesCluster kube Pair result = null; try { result = SshHelper.sshExecute(ipAddress, port, user, sshKeyFile, null, - String.format("sudo /opt/bin/kubectl uncordon %s", hostName), + String.format("/opt/bin/kubectl uncordon %s", hostName), 10000, 10000, 30000); if (result.first()) { return true; @@ -125,9 +125,9 @@ public static boolean isKubernetesClusterAddOnServiceRunning(final KubernetesClu final int port, final String user, final File sshKeyFile, final String namespace, String serviceName) { try { - String cmd = "sudo /opt/bin/kubectl get pods --all-namespaces"; + String cmd = "/opt/bin/kubectl get pods --all-namespaces"; if (!Strings.isNullOrEmpty(namespace)) { - cmd = String.format("sudo /opt/bin/kubectl get pods --namespace=%s", namespace); + cmd = String.format("/opt/bin/kubectl get pods --namespace=%s", namespace); } Pair result = SshHelper.sshExecute(ipAddress, port, user, sshKeyFile, null, cmd, @@ -203,7 +203,7 @@ public static int getKubernetesClusterReadyNodesCount(final KubernetesCluster ku final int port, final String user, final File sshKeyFile) throws Exception { Pair result = SshHelper.sshExecute(ipAddress, port, user, sshKeyFile, null, - "sudo /opt/bin/kubectl get nodes | awk '{if ($2 == \"Ready\") print $1}' | wc -l", + "/opt/bin/kubectl get nodes | awk '{if ($2 == \"Ready\") print $1}' | wc -l", 10000, 10000, 20000); if (result.first()) { return Integer.parseInt(result.second().trim().replace("\"", "")); From 2e69c17dc862042fc28bb5cc177f25f4ece895c1 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Tue, 3 Nov 2020 11:09:38 +0530 Subject: [PATCH 029/117] refactor code and test --- .../resources/META-INF/db/schema-41510to41600.sql | 2 ++ .../cloud/network/as/AutoScaleManagerImpl.java | 2 +- .../src/main/java/com/cloud/vm/UserVmManager.java | 2 +- .../integration/smoke/test_kubernetes_clusters.py | 15 ++++++++++----- tools/marvin/marvin/config/test_data.py | 7 +++---- 5 files changed, 17 insertions(+), 11 deletions(-) diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41510to41600.sql b/engine/schema/src/main/resources/META-INF/db/schema-41510to41600.sql index 1bac92f96d09..83b0a6f70ef4 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-41510to41600.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-41510to41600.sql @@ -23,6 +23,8 @@ ALTER TABLE `cloud`.`user_vm` ADD COLUMN `user_vm_type` varchar(255) DEFAULT "Us UPDATE `cloud`.`vm_template` set deploy_as_is = 1 where id = 8; +DELETE FROM `cloud`.`configuration` WHERE name IN ("cloud.kubernetes.cluster.template.name.kvm", "cloud.kubernetes.cluster.template.name.vmware", "cloud.kubernetes.cluster.template.name.xenserver", "cloud.kubernetes.cluster.template.name.hyperv"); + ALTER TABLE `cloud`.`kubernetes_cluster` ADD COLUMN `autoscaling_enabled` tinyint(1) unsigned NOT NULL DEFAULT 0; ALTER TABLE `cloud`.`kubernetes_cluster` ADD COLUMN `minsize` bigint; ALTER TABLE `cloud`.`kubernetes_cluster` ADD COLUMN `maxsize` bigint; diff --git a/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java b/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java index f0f2b3a5a1fd..60ddbb5be1e5 100644 --- a/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java +++ b/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java @@ -1336,7 +1336,7 @@ private long createNewVM(AutoScaleVmGroupVO asGroup) { } else { vm = _userVmService.createAdvancedVirtualMachine(zone, serviceOffering, template, null, owner, "autoScaleVm-" + asGroup.getId() + "-" + getCurrentTimeStampString(), "autoScaleVm-" + asGroup.getId() + "-" + getCurrentTimeStampString(), - null, null, null, HypervisorType.XenServer, HTTPMethod.GET, null, null, null, addrs, true, null, null, null, null, null, null, null, String.valueOf(UserVmManager.UserVmType.AutoScaleVM)); + null, null, null, HypervisorType.XenServer, HTTPMethod.GET, null, null, null, addrs, true, null, null, null, null, null, null, null, null); } } diff --git a/server/src/main/java/com/cloud/vm/UserVmManager.java b/server/src/main/java/com/cloud/vm/UserVmManager.java index 3fb81228b68a..0775e973e1a7 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManager.java +++ b/server/src/main/java/com/cloud/vm/UserVmManager.java @@ -56,7 +56,7 @@ public interface UserVmManager extends UserVmService { static final int MAX_USER_DATA_LENGTH_BYTES = 2048; public static enum UserVmType { - UserVM, AutoScaleVM, CKSNode + CKSNode } /** diff --git a/test/integration/smoke/test_kubernetes_clusters.py b/test/integration/smoke/test_kubernetes_clusters.py index f81b6980500d..40c113ada637 100644 --- a/test/integration/smoke/test_kubernetes_clusters.py +++ b/test/integration/smoke/test_kubernetes_clusters.py @@ -326,19 +326,21 @@ def test_03_deploy_and_upgrade_kubernetes_cluster(self): if self.setup_failed == True: self.fail("Setup incomplete") global k8s_cluster - k8s_cluster = self.getValidKubernetesCluster() + k8s_cluster = self.getValidKubernetesCluster(1, 1, self.kubernetes_version_3) + time.sleep(self.services["sleep"]) self.debug("Upgrading Kubernetes cluster with ID: %s" % k8s_cluster.id) try: - k8s_cluster = self.upgradeKubernetesCluster(k8s_cluster.id, self.kubernetes_version_3.id) + k8s_cluster = self.upgradeKubernetesCluster(k8s_cluster.id, self.kubernetes_version_4.id) except Exception as e: self.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True) self.fail("Failed to upgrade Kubernetes cluster due to: %s" % e) self.verifyKubernetesClusterUpgrade(k8s_cluster, self.kubernetes_version_3.id) - + self.debug("Deleting cluster : "+ k8s_cluster.id) + self.deleteKubernetesClusterAndVerify(k8s_cluster.id) return @attr(tags=["advanced", "smoke"], required_hardware="true") @@ -617,9 +619,12 @@ def waitForAutoscalerPodInRunningState(self, cluster_id, retries=5, interval=60) retries = retries - 1 return False - def getValidKubernetesCluster(self, size=1, master_nodes=1, autoscaling=False): + def getValidKubernetesCluster(self, size=1, master_nodes=1, version={}, autoscaling=False): cluster = k8s_cluster - version = self.kubernetes_version_2 + if not version: + version = self.kubernetes_version_2 + else: + version = self.kubernetes_version_3 if master_nodes != 1: version = self.kubernetes_version_3 valid = True diff --git a/tools/marvin/marvin/config/test_data.py b/tools/marvin/marvin/config/test_data.py index d3791cc201d4..7059070f881d 100644 --- a/tools/marvin/marvin/config/test_data.py +++ b/tools/marvin/marvin/config/test_data.py @@ -2018,25 +2018,24 @@ "cks_kubernetes_versions": { "1.14.9": { "semanticversion": "1.14.9", - "url": "http://sbjenkins-stagingrepo.jenkins.lon/flatcar/setup-v1.14.9.iso", + "url": "http://download.cloudstack.org/cks/setup-1.14.9.iso", "mincpunumber": 2, "minmemory": 2048 }, "1.15.0": { "semanticversion": "1.15.0", - "url": "http://sbjenkins-stagingrepo.jenkins.lon/flatcar/setup-v1.15.0.iso", + "url": "http://download.cloudstack.org/cks/setup-1.15.0.iso", "mincpunumber": 2, "minmemory": 2048 }, "1.16.0": { "semanticversion": "1.16.0", - "url": "http://sbjenkins-stagingrepo.jenkins.lon/flatcar/setup-v1.16.0.iso", + "url": "http://download.cloudstack.org/cks/setup-1.16.0.iso", "mincpunumber": 2, "minmemory": 2048 }, "1.16.3": { "semanticversion": "1.16.3", - #"url": "http://sbjenkins-stagingrepo.jenkins.lon/flatcar/setup-v1.16.3.iso", "url": "http://sbjenkins-stagingrepo.jenkins.lon/cks/binaries-iso/as-1.16.3.iso", "mincpunumber": 2, "minmemory": 2048 From e53af8db96264d4f83950d97e52ea27897c53671 Mon Sep 17 00:00:00 2001 From: davidjumani Date: Tue, 3 Nov 2020 11:33:55 +0530 Subject: [PATCH 030/117] Refactoring tests --- .../smoke/test_kubernetes_clusters.py | 48 +++++++------------ 1 file changed, 17 insertions(+), 31 deletions(-) diff --git a/test/integration/smoke/test_kubernetes_clusters.py b/test/integration/smoke/test_kubernetes_clusters.py index 40c113ada637..fbbbc72a472f 100644 --- a/test/integration/smoke/test_kubernetes_clusters.py +++ b/test/integration/smoke/test_kubernetes_clusters.py @@ -92,32 +92,24 @@ def setUpClass(cls): if cls.setup_failed == False: try: - cls.kubernetes_version_1 = cls.addKubernetesSupportedVersion(cls.services["cks_kubernetes_versions"]["1.14.9"]) - cls.kubernetes_version_ids.append(cls.kubernetes_version_1.id) - except Exception as e: - cls.setup_failed = True - cls.debug("Failed to get Kubernetes version ISO in ready state, version=%s, url=%s, %s" % - (cls.services["cks_kubernetes_versions"]["1.14.9"]["semanticversion"], cls.services["cks_kubernetes_versions"]["1.14.9"]["url"], e)) - if cls.setup_failed == False: - try: - cls.kubernetes_version_2 = cls.addKubernetesSupportedVersion(cls.services["cks_kubernetes_versions"]["1.15.0"]) - cls.kubernetes_version_ids.append(cls.kubernetes_version_2.id) + cls.kubernetes_version_1_15_0 = cls.addKubernetesSupportedVersion(cls.services["cks_kubernetes_versions"]["1.15.0"]) + cls.kubernetes_version_ids.append(cls.kubernetes_version_1_15_0.id) except Exception as e: cls.setup_failed = True cls.debug("Failed to get Kubernetes version ISO in ready state, version=%s, url=%s, %s" % (cls.services["cks_kubernetes_versions"]["1.15.0"]["semanticversion"], cls.services["cks_kubernetes_versions"]["1.15.0"]["url"], e)) if cls.setup_failed == False: try: - cls.kubernetes_version_3 = cls.addKubernetesSupportedVersion(cls.services["cks_kubernetes_versions"]["1.16.0"]) - cls.kubernetes_version_ids.append(cls.kubernetes_version_3.id) + cls.kubernetes_version_1_16_0 = cls.addKubernetesSupportedVersion(cls.services["cks_kubernetes_versions"]["1.16.0"]) + cls.kubernetes_version_ids.append(cls.kubernetes_version_1_16_0.id) except Exception as e: cls.setup_failed = True cls.debug("Failed to get Kubernetes version ISO in ready state, version=%s, url=%s, %s" % (cls.services["cks_kubernetes_versions"]["1.16.0"]["semanticversion"], cls.services["cks_kubernetes_versions"]["1.16.0"]["url"], e)) if cls.setup_failed == False: try: - cls.kubernetes_version_4 = cls.addKubernetesSupportedVersion(cls.services["cks_kubernetes_versions"]["1.16.3"]) - cls.kubernetes_version_ids.append(cls.kubernetes_version_4.id) + cls.kubernetes_version_1_16_3 = cls.addKubernetesSupportedVersion(cls.services["cks_kubernetes_versions"]["1.16.3"]) + cls.kubernetes_version_ids.append(cls.kubernetes_version_1_16_3.id) except Exception as e: cls.setup_failed = True cls.debug("Failed to get Kubernetes version ISO in ready state, version=%s, url=%s, %s" % @@ -306,8 +298,8 @@ def test_02_invalid_upgrade_kubernetes_cluster(self): self.debug("Upgrading Kubernetes cluster with ID: %s to a lower version" % k8s_cluster.id) try: - k8s_cluster = self.upgradeKubernetesCluster(k8s_cluster.id, self.kubernetes_version_1.id) - self.debug("Invalid CKS Kubernetes HA cluster deployed with ID: %s. Deleting it and failing test." % self.kubernetes_version_1.id) + k8s_cluster = self.upgradeKubernetesCluster(k8s_cluster.id, self.kubernetes_version_1_15_0.id) + self.debug("Invalid CKS Kubernetes HA cluster deployed with ID: %s. Deleting it and failing test." % self.kubernetes_version_1_15_0.id) self.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True) self.fail("Kubernetes cluster upgraded to a lower Kubernetes supported version. Must be an error.") except Exception as e: @@ -326,19 +318,19 @@ def test_03_deploy_and_upgrade_kubernetes_cluster(self): if self.setup_failed == True: self.fail("Setup incomplete") global k8s_cluster - k8s_cluster = self.getValidKubernetesCluster(1, 1, self.kubernetes_version_3) + k8s_cluster = self.getValidKubernetesCluster(version=self.kubernetes_version_1_16_0) time.sleep(self.services["sleep"]) self.debug("Upgrading Kubernetes cluster with ID: %s" % k8s_cluster.id) try: - k8s_cluster = self.upgradeKubernetesCluster(k8s_cluster.id, self.kubernetes_version_4.id) + k8s_cluster = self.upgradeKubernetesCluster(k8s_cluster.id, self.kubernetes_version_1_16_3.id) except Exception as e: self.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True) self.fail("Failed to upgrade Kubernetes cluster due to: %s" % e) - self.verifyKubernetesClusterUpgrade(k8s_cluster, self.kubernetes_version_3.id) + self.verifyKubernetesClusterUpgrade(k8s_cluster, self.kubernetes_version_1_16_3.id) self.debug("Deleting cluster : "+ k8s_cluster.id) self.deleteKubernetesClusterAndVerify(k8s_cluster.id) return @@ -418,7 +410,7 @@ def test_06_deploy_invalid_kubernetes_ha_cluster(self): self.debug("Creating for Kubernetes cluster with name %s" % name) try: - cluster_response = self.createKubernetesCluster(name, self.kubernetes_version_2.id, 1, 2) + cluster_response = self.createKubernetesCluster(name, self.kubernetes_version_1_15_0.id, 1, 2) self.debug("Invalid CKS Kubernetes HA cluster deployed with ID: %s. Deleting it and failing test." % cluster_response.id) self.deleteKubernetesClusterAndVerify(cluster_response.id, False, True) self.fail("HA Kubernetes cluster deployed with Kubernetes supported version below version 1.16.0. Must be an error.") @@ -461,12 +453,12 @@ def test_08_deploy_and_upgrade_kubernetes_ha_cluster(self): self.debug("Upgrading HA Kubernetes cluster with ID: %s" % k8s_cluster.id) try: - k8s_cluster = self.upgradeKubernetesCluster(k8s_cluster.id, self.kubernetes_version_4.id) + k8s_cluster = self.upgradeKubernetesCluster(k8s_cluster.id, self.kubernetes_version_1_16_3.id) except Exception as e: self.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True) self.fail("Failed to upgrade Kubernetes HA cluster due to: %s" % e) - self.verifyKubernetesClusterUpgrade(k8s_cluster, self.kubernetes_version_4.id) + self.verifyKubernetesClusterUpgrade(k8s_cluster, self.kubernetes_version_1_16_3.id) self.debug("Kubernetes cluster with ID: %s successfully upgraded" % k8s_cluster.id) @@ -505,7 +497,7 @@ def test_10_deploy_and_autoscale_kubernetes_cluster(self): if self.setup_failed == True: self.fail("Setup incomplete") global k8s_cluster - k8s_cluster = self.getValidKubernetesCluster(1, 1, True) + k8s_cluster = self.getValidKubernetesCluster(version=self.self.kubernetes_version_1_16_3.id) self.debug("Autoscaling Kubernetes cluster with ID: %s" % k8s_cluster.id) @@ -619,17 +611,11 @@ def waitForAutoscalerPodInRunningState(self, cluster_id, retries=5, interval=60) retries = retries - 1 return False - def getValidKubernetesCluster(self, size=1, master_nodes=1, version={}, autoscaling=False): + def getValidKubernetesCluster(self, size=1, master_nodes=1, version={}): cluster = k8s_cluster if not version: - version = self.kubernetes_version_2 - else: - version = self.kubernetes_version_3 - if master_nodes != 1: - version = self.kubernetes_version_3 + version = self.kubernetes_version_1_16_0 valid = True - if autoscaling: - version = self.kubernetes_version_4 if cluster == None: valid = False self.debug("No existing cluster available, k8s_cluster: %s" % cluster) From 347428d5241a7f7e4a1a599d6fb2f24908e73806 Mon Sep 17 00:00:00 2001 From: davidjumani Date: Tue, 3 Nov 2020 13:50:12 +0530 Subject: [PATCH 031/117] Fixes --- .../actionworkers/KubernetesClusterActionWorker.java | 10 +--------- .../KubernetesClusterResourceModifierActionWorker.java | 4 ++-- test/integration/smoke/test_kubernetes_clusters.py | 3 +-- tools/marvin/marvin/config/test_data.py | 6 ------ 4 files changed, 4 insertions(+), 19 deletions(-) diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java index ad07a004f70e..71e3cc8d454d 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java @@ -25,7 +25,6 @@ import java.util.Collections; import java.util.List; import java.util.Objects; -import java.util.stream.Collectors; import javax.inject.Inject; @@ -283,13 +282,6 @@ protected String getMasterVmPrivateIp() { return ip; } - private boolean containsMasterNode(List clusterVMs) { - List nodeNames = clusterVMs.stream().map(vm -> vm.getHostName()).collect(Collectors.toList()); - boolean present = false; - present = nodeNames.stream().anyMatch(s -> s.contains("master")); - return present; - } - protected Pair getKubernetesClusterServerIpSshPort(UserVm masterVm) { int port = CLUSTER_NODES_DEFAULT_START_SSH_PORT; KubernetesClusterDetailsVO detail = kubernetesClusterDetailsDao.findDetail(kubernetesCluster.getId(), ApiConstants.EXTERNAL_LOAD_BALANCER_IP_ADDRESS); @@ -428,7 +420,7 @@ protected boolean createCloudStackSecret(String[] keys) { try { Pair result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, - pkFile, null, String.format("sudo /opt/bin/deploy-cloudstack-secret -u '%s' -k '%s' -s '%s'", + pkFile, null, String.format("/opt/bin/deploy-cloudstack-secret -u '%s' -k '%s' -s '%s'", ApiServiceConfiguration.ApiServletPath.value(), keys[0], keys[1]), 10000, 10000, 60000); return result.first(); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java index e6805ac79744..d51ea0af2e70 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java @@ -579,7 +579,7 @@ protected boolean autoscaleCluster(boolean enable, Long minSize, Long maxSize) { try { if (enable) { - String command = String.format("sudo /opt/bin/autoscale-kube-cluster -i %s -e -M %d -m %d", + String command = String.format("/opt/bin/autoscale-kube-cluster -i %s -e -M %d -m %d", kubernetesCluster.getUuid(), maxSize, minSize); Pair result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, pkFile, null, command, 10000, 10000, 60000); @@ -605,7 +605,7 @@ protected boolean autoscaleCluster(boolean enable, Long minSize, Long maxSize) { updateKubernetesClusterEntry(true, minSize, maxSize); } else { Pair result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, - pkFile, null, String.format("sudo /opt/bin/autoscale-kube-cluster -d"), + pkFile, null, String.format("/opt/bin/autoscale-kube-cluster -d"), 10000, 10000, 60000); if (!result.first()) { throw new CloudRuntimeException(result.second()); diff --git a/test/integration/smoke/test_kubernetes_clusters.py b/test/integration/smoke/test_kubernetes_clusters.py index fbbbc72a472f..dc24dad3688e 100644 --- a/test/integration/smoke/test_kubernetes_clusters.py +++ b/test/integration/smoke/test_kubernetes_clusters.py @@ -234,7 +234,6 @@ def addKubernetesSupportedVersion(cls, version_service): def deleteKubernetesSupportedVersion(cls, version_id): deleteKubernetesSupportedVersionCmd = deleteKubernetesSupportedVersion.deleteKubernetesSupportedVersionCmd() deleteKubernetesSupportedVersionCmd.id = version_id - deleteKubernetesSupportedVersionCmd.deleteiso = True cls.apiclient.deleteKubernetesSupportedVersion(deleteKubernetesSupportedVersionCmd) def setUp(self): @@ -497,7 +496,7 @@ def test_10_deploy_and_autoscale_kubernetes_cluster(self): if self.setup_failed == True: self.fail("Setup incomplete") global k8s_cluster - k8s_cluster = self.getValidKubernetesCluster(version=self.self.kubernetes_version_1_16_3.id) + k8s_cluster = self.getValidKubernetesCluster(version=self.kubernetes_version_1_16_3) self.debug("Autoscaling Kubernetes cluster with ID: %s" % k8s_cluster.id) diff --git a/tools/marvin/marvin/config/test_data.py b/tools/marvin/marvin/config/test_data.py index 7059070f881d..21bafb2341ea 100644 --- a/tools/marvin/marvin/config/test_data.py +++ b/tools/marvin/marvin/config/test_data.py @@ -2016,12 +2016,6 @@ } }, "cks_kubernetes_versions": { - "1.14.9": { - "semanticversion": "1.14.9", - "url": "http://download.cloudstack.org/cks/setup-1.14.9.iso", - "mincpunumber": 2, - "minmemory": 2048 - }, "1.15.0": { "semanticversion": "1.15.0", "url": "http://download.cloudstack.org/cks/setup-1.15.0.iso", From 63454cecee7307f49f34952af4d9f487a6914126 Mon Sep 17 00:00:00 2001 From: davidjumani Date: Wed, 4 Nov 2020 21:10:20 +0530 Subject: [PATCH 032/117] Upgrade path changes --- .../src/main/resources/META-INF/db/schema-41510to41600.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41510to41600.sql b/engine/schema/src/main/resources/META-INF/db/schema-41510to41600.sql index 83b0a6f70ef4..ff0d80d0d66e 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-41510to41600.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-41510to41600.sql @@ -21,6 +21,7 @@ ALTER TABLE `cloud`.`user_vm` ADD COLUMN `user_vm_type` varchar(255) DEFAULT "UserVM" COMMENT 'Defines the type of UserVM'; +-- This is set, so as to ensure that the controller details from the ovf template are adhered to UPDATE `cloud`.`vm_template` set deploy_as_is = 1 where id = 8; DELETE FROM `cloud`.`configuration` WHERE name IN ("cloud.kubernetes.cluster.template.name.kvm", "cloud.kubernetes.cluster.template.name.vmware", "cloud.kubernetes.cluster.template.name.xenserver", "cloud.kubernetes.cluster.template.name.hyperv"); From 6cd04545e3f2802bb5c3f63771df6a820609e9e8 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Fri, 6 Nov 2020 15:30:07 +0530 Subject: [PATCH 033/117] Added core user for b/w compatibility + cks template fixes --- .../wrapper/LibvirtStartCommandWrapper.java | 3 ++- .../cluster/KubernetesClusterManagerImpl.java | 2 +- .../KubernetesClusterActionWorker.java | 15 ++++++++++++--- ...tesClusterResourceModifierActionWorker.java | 8 ++++---- .../KubernetesClusterScaleWorker.java | 4 ++-- .../KubernetesClusterStartWorker.java | 8 ++++---- .../KubernetesClusterUpgradeWorker.java | 2 +- .../cluster/utils/KubernetesClusterUtil.java | 10 +++++----- .../src/main/resources/conf/k8s-master-add.yml | 16 ++++++++++------ .../src/main/resources/conf/k8s-master.yml | 16 ++++++++++------ .../src/main/resources/conf/k8s-node.yml | 16 ++++++++++------ scripts/vm/hypervisor/kvm/patch.sh | 8 ++++++-- .../main/java/com/cloud/vm/UserVmManager.java | 4 +--- .../java/com/cloud/vm/UserVmManagerImpl.java | 4 ++-- .../cloud/bin/setup/{CKSNode.sh => cksnode.sh} | 18 ++++++++++++++---- .../debian/opt/cloud/bin/setup/postinit.sh | 6 +----- .../systemvmtemplate/http/preseed.cfg | 2 +- .../scripts/configure_systemvm_services.sh | 1 - tools/appliance/systemvmtemplate/template.json | 2 +- 19 files changed, 87 insertions(+), 58 deletions(-) rename systemvm/debian/opt/cloud/bin/setup/{CKSNode.sh => cksnode.sh} (77%) diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtStartCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtStartCommandWrapper.java index 54ffe17f68c6..f151255d5cdc 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtStartCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtStartCommandWrapper.java @@ -40,6 +40,7 @@ import com.cloud.network.Networks.TrafficType; import com.cloud.resource.CommandWrapper; import com.cloud.resource.ResourceWrapper; +import com.cloud.vm.UserVmManager; import com.cloud.vm.VirtualMachine; @ResourceWrapper(handles = StartCommand.class) @@ -88,7 +89,7 @@ public Answer execute(final StartCommand command, final LibvirtComputingResource libvirtComputingResource.applyDefaultNetworkRules(conn, vmSpec, false); // pass cmdline info to system vms - if (vmSpec.getType() != VirtualMachine.Type.User || (vmSpec.getBootArgs() != null && vmSpec.getBootArgs().contains("CKSNode"))) { + if (vmSpec.getType() != VirtualMachine.Type.User || (vmSpec.getBootArgs() != null && vmSpec.getBootArgs().contains(UserVmManager.CKS_NODE))) { // try to patch and SSH into the systemvm for up to 5 minutes for (int count = 0; count < 10; count++) { // wait and try passCmdLine for 30 seconds at most for CLOUDSTACK-2823 diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java index d905b0dcc54e..86b6bdfc0aa3 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java @@ -335,7 +335,7 @@ private IpAddress getSourceNatIp(Network network) { return null; } - private VMTemplateVO getKubernetesServiceTemplate(DataCenter dataCenter, Hypervisor.HypervisorType hypervisorType) { + public VMTemplateVO getKubernetesServiceTemplate(DataCenter dataCenter, Hypervisor.HypervisorType hypervisorType) { VMTemplateVO template = templateDao.findSystemVMReadyTemplate(dataCenter.getId(), hypervisorType); if (template == null) { throw new CloudRuntimeException("Not able to find the System templates or not downloaded in zone " + dataCenter.getId()); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java index 71e3cc8d454d..a8cf798b11a5 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java @@ -38,8 +38,10 @@ import org.apache.log4j.Level; import org.apache.log4j.Logger; +import com.cloud.dc.DataCenterVO; import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.VlanDao; +import com.cloud.hypervisor.Hypervisor; import com.cloud.kubernetes.cluster.KubernetesCluster; import com.cloud.kubernetes.cluster.KubernetesClusterDetailsVO; import com.cloud.kubernetes.cluster.KubernetesClusterManagerImpl; @@ -82,7 +84,7 @@ public class KubernetesClusterActionWorker { - public static final String CLUSTER_NODE_VM_USER = "root"; + public static final String CLUSTER_NODE_VM_USER = "core"; public static final int CLUSTER_API_PORT = 6443; public static final int CLUSTER_NODES_DEFAULT_START_SSH_PORT = 2222; @@ -141,6 +143,7 @@ public class KubernetesClusterActionWorker { protected final String deploySecretsScriptFilename = "deploy-cloudstack-secret"; protected File autoscaleScriptFile; protected File deploySecretsScriptFile; + protected KubernetesClusterManagerImpl manager; protected String[] keys; @@ -150,11 +153,17 @@ protected KubernetesClusterActionWorker(final KubernetesCluster kubernetesCluste this.kubernetesClusterDetailsDao = clusterManager.kubernetesClusterDetailsDao; this.kubernetesClusterVmMapDao = clusterManager.kubernetesClusterVmMapDao; this.kubernetesSupportedVersionDao = clusterManager.kubernetesSupportedVersionDao; + this.manager = clusterManager; } protected void init() { this.owner = accountDao.findById(kubernetesCluster.getAccountId()); - this.clusterTemplate = templateDao.findById(kubernetesCluster.getTemplateId()); + long zoneId = this.kubernetesCluster.getZoneId(); + long templateId = this.kubernetesCluster.getTemplateId(); + DataCenterVO dataCenterVO = dataCenterDao.findById(zoneId); + VMTemplateVO template = templateDao.findById(templateId); + Hypervisor.HypervisorType type = template.getHypervisorType(); + this.clusterTemplate = manager.getKubernetesServiceTemplate(dataCenterVO, type); this.sshKeyFile = getManagementServerSshPublicKeyFile(); } @@ -420,7 +429,7 @@ protected boolean createCloudStackSecret(String[] keys) { try { Pair result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, - pkFile, null, String.format("/opt/bin/deploy-cloudstack-secret -u '%s' -k '%s' -s '%s'", + pkFile, null, String.format("sudo /opt/bin/deploy-cloudstack-secret -u '%s' -k '%s' -s '%s'", ApiServiceConfiguration.ApiServletPath.value(), keys[0], keys[1]), 10000, 10000, 60000); return result.first(); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java index d51ea0af2e70..76323160cb25 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java @@ -148,7 +148,7 @@ private String getKubernetesNodeConfig(final String joinIp, final boolean ejectI if (!Strings.isNullOrEmpty(sshKeyPair)) { SSHKeyPairVO sshkp = sshKeyPairDao.findByName(owner.getAccountId(), owner.getDomainId(), sshKeyPair); if (sshkp != null) { - pubKey += "\n - \"" + sshkp.getPublicKey() + "\""; + pubKey += "\n - \"" + sshkp.getPublicKey() + "\""; } } k8sNodeConfig = k8sNodeConfig.replace(sshPubKey, pubKey); @@ -346,7 +346,7 @@ protected UserVm createKubernetesNode(String joinIp) throws ManagementServerExce nodeVm = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, clusterTemplate, networkIds, owner, hostName, hostName, null, null, null, Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST, base64UserData, kubernetesCluster.getKeyPair(), - null, addrs, null, null, null, customParameterMap, null, null, null, null, String.valueOf(UserVmManager.UserVmType.CKSNode)); + null, addrs, null, null, null, customParameterMap, null, null, null, null, UserVmManager.CKS_NODE); if (LOGGER.isInfoEnabled()) { LOGGER.info(String.format("Created node VM : %s, %s in the Kubernetes cluster : %s", hostName, nodeVm.getUuid(), kubernetesCluster.getName())); } @@ -579,7 +579,7 @@ protected boolean autoscaleCluster(boolean enable, Long minSize, Long maxSize) { try { if (enable) { - String command = String.format("/opt/bin/autoscale-kube-cluster -i %s -e -M %d -m %d", + String command = String.format("sudo /opt/bin/autoscale-kube-cluster -i %s -e -M %d -m %d", kubernetesCluster.getUuid(), maxSize, minSize); Pair result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, pkFile, null, command, 10000, 10000, 60000); @@ -605,7 +605,7 @@ protected boolean autoscaleCluster(boolean enable, Long minSize, Long maxSize) { updateKubernetesClusterEntry(true, minSize, maxSize); } else { Pair result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, - pkFile, null, String.format("/opt/bin/autoscale-kube-cluster -d"), + pkFile, null, String.format("sudo /opt/bin/autoscale-kube-cluster -d"), 10000, 10000, 60000); if (!result.first()) { throw new CloudRuntimeException(result.second()); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java index 2e9b57cfb3e0..8833907bc92d 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java @@ -193,13 +193,13 @@ private boolean removeKubernetesClusterNode(final String ipAddress, final int po retryCounter++; try { Pair result = SshHelper.sshExecute(ipAddress, port, CLUSTER_NODE_VM_USER, - pkFile, null, String.format("/opt/bin/kubectl drain %s --ignore-daemonsets --delete-local-data", hostName), + pkFile, null, String.format("sudo /opt/bin/kubectl drain %s --ignore-daemonsets --delete-local-data", hostName), 10000, 10000, 60000); if (!result.first()) { LOGGER.warn(String.format("Draining node: %s on VM : %s in Kubernetes cluster : %s unsuccessful", hostName, userVm.getDisplayName(), kubernetesCluster.getName())); } else { result = SshHelper.sshExecute(ipAddress, port, CLUSTER_NODE_VM_USER, - pkFile, null, String.format("/opt/bin/kubectl delete node %s", hostName), + pkFile, null, String.format("sudo /opt/bin/kubectl delete node %s", hostName), 10000, 10000, 30000); if (result.first()) { return true; diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java index 53b3c9300049..98aa18dae36e 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java @@ -159,7 +159,7 @@ private String getKubernetesMasterConfig(final String masterIp, final String ser if (!Strings.isNullOrEmpty(sshKeyPair)) { SSHKeyPairVO sshkp = sshKeyPairDao.findByName(owner.getAccountId(), owner.getDomainId(), sshKeyPair); if (sshkp != null) { - pubKey += "\n - \"" + sshkp.getPublicKey() + "\""; + pubKey += "\n - \"" + sshkp.getPublicKey() + "\""; } } k8sMasterConfig = k8sMasterConfig.replace(sshPubKey, pubKey); @@ -210,7 +210,7 @@ private UserVm createKubernetesMaster(final Network network, String serverIp) th masterVm = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, clusterTemplate, networkIds, owner, hostName, hostName, null, null, null, Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST, base64UserData, kubernetesCluster.getKeyPair(), - requestedIps, addrs, null, null, null, customParameterMap, null, null, null, null, String.valueOf(UserVmManager.UserVmType.CKSNode)); + requestedIps, addrs, null, null, null, customParameterMap, null, null, null, null, UserVmManager.CKS_NODE); if (LOGGER.isInfoEnabled()) { LOGGER.info(String.format("Created master VM ID: %s, %s in the Kubernetes cluster : %s", masterVm.getUuid(), hostName, kubernetesCluster.getName())); } @@ -229,7 +229,7 @@ private String getKubernetesAdditionalMasterConfig(final String joinIp, final bo if (!Strings.isNullOrEmpty(sshKeyPair)) { SSHKeyPairVO sshkp = sshKeyPairDao.findByName(owner.getAccountId(), owner.getDomainId(), sshKeyPair); if (sshkp != null) { - pubKey += "\n - \"" + sshkp.getPublicKey() + "\""; + pubKey += "\n - \"" + sshkp.getPublicKey() + "\""; } } k8sMasterConfig = k8sMasterConfig.replace(sshPubKey, pubKey); @@ -265,7 +265,7 @@ private UserVm createKubernetesAdditionalMaster(final String joinIp, final int a additionalMasterVm = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, clusterTemplate, networkIds, owner, hostName, hostName, null, null, null, Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST, base64UserData, kubernetesCluster.getKeyPair(), - null, addrs, null, null, null, customParameterMap, null, null, null, null, String.valueOf(UserVmManager.UserVmType.CKSNode)); + null, addrs, null, null, null, customParameterMap, null, null, null, null, UserVmManager.CKS_NODE); if (LOGGER.isInfoEnabled()) { LOGGER.info(String.format("Created master VM ID : %s, %s in the Kubernetes cluster : %s", additionalMasterVm.getUuid(), hostName, kubernetesCluster.getName())); } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java index ac8f5b49df41..e7eb3c2aa921 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java @@ -91,7 +91,7 @@ private void upgradeKubernetesClusterNodes() { } try { result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, sshKeyFile, null, - String.format("/opt/bin/kubectl drain %s --ignore-daemonsets --delete-local-data", hostName), + String.format("sudo /opt/bin/kubectl drain %s --ignore-daemonsets --delete-local-data", hostName), 10000, 10000, 60000); } catch (Exception e) { logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster : %s, unable to drain Kubernetes node on VM : %s", kubernetesCluster.getName(), vm.getDisplayName()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, e); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java index 1c1c3056fe36..abb9fbf662d7 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java @@ -41,7 +41,7 @@ public static boolean isKubernetesClusterNodeReady(final KubernetesCluster kuber String user, File sshKeyFile, String nodeName) throws Exception { Pair result = SshHelper.sshExecute(ipAddress, port, user, sshKeyFile, null, - String.format("/opt/bin/kubectl get nodes | awk '{if ($1 == \"%s\" && $2 == \"Ready\") print $1}'", nodeName.toLowerCase()), + String.format("sudo /opt/bin/kubectl get nodes | awk '{if ($1 == \"%s\" && $2 == \"Ready\") print $1}'", nodeName.toLowerCase()), 10000, 10000, 20000); if (result.first() && nodeName.equals(result.second().trim())) { return true; @@ -102,7 +102,7 @@ public static boolean uncordonKubernetesClusterNode(final KubernetesCluster kube Pair result = null; try { result = SshHelper.sshExecute(ipAddress, port, user, sshKeyFile, null, - String.format("/opt/bin/kubectl uncordon %s", hostName), + String.format("sudo /opt/bin/kubectl uncordon %s", hostName), 10000, 10000, 30000); if (result.first()) { return true; @@ -125,9 +125,9 @@ public static boolean isKubernetesClusterAddOnServiceRunning(final KubernetesClu final int port, final String user, final File sshKeyFile, final String namespace, String serviceName) { try { - String cmd = "/opt/bin/kubectl get pods --all-namespaces"; + String cmd = "sudo /opt/bin/kubectl get pods --all-namespaces"; if (!Strings.isNullOrEmpty(namespace)) { - cmd = String.format("/opt/bin/kubectl get pods --namespace=%s", namespace); + cmd = String.format("sudo /opt/bin/kubectl get pods --namespace=%s", namespace); } Pair result = SshHelper.sshExecute(ipAddress, port, user, sshKeyFile, null, cmd, @@ -203,7 +203,7 @@ public static int getKubernetesClusterReadyNodesCount(final KubernetesCluster ku final int port, final String user, final File sshKeyFile) throws Exception { Pair result = SshHelper.sshExecute(ipAddress, port, user, sshKeyFile, null, - "/opt/bin/kubectl get nodes | awk '{if ($2 == \"Ready\") print $1}' | wc -l", + "sudo /opt/bin/kubectl get nodes | awk '{if ($2 == \"Ready\") print $1}' | wc -l", 10000, 10000, 20000); if (result.first()) { return Integer.parseInt(result.second().trim().replace("\"", "")); diff --git a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master-add.yml b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master-add.yml index 9e395a3f5673..370c3bfaa32a 100644 --- a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master-add.yml +++ b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master-add.yml @@ -17,8 +17,12 @@ # under the License. --- -ssh_authorized_keys: - {{ k8s.ssh.pub.key }} +users: + - name: core + sudo: ALL=(ALL) NOPASSWD:ALL + shell: /bin/bash + ssh_authorized_keys: + {{ k8s.ssh.pub.key }} write_files: - path: /opt/bin/setup-kube-system @@ -27,7 +31,7 @@ write_files: content: | #!/bin/bash -e - if [[ -f "/home/debian/success" ]]; then + if [[ -f "/home/core/success" ]]; then echo "Already provisioned!" exit 0 fi @@ -184,7 +188,7 @@ write_files: content: | #!/bin/bash -e - if [[ -f "/home/debian/success" ]]; then + if [[ -f "/home/core/success" ]]; then echo "Already provisioned!" exit 0 fi @@ -202,8 +206,8 @@ write_files: fi kubeadm join {{ k8s_master.join_ip }}:6443 --token {{ k8s_master.cluster.token }} --control-plane --certificate-key {{ k8s_master.cluster.ha.certificate.key }} --discovery-token-unsafe-skip-ca-verification - sudo touch /home/debian/success - echo "true" > /home/debian/success + sudo touch /home/core/success + echo "true" > /home/core/success - path: /etc/systemd/system/setup-kube-system.service permissions: '0755' diff --git a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master.yml b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master.yml index 25f773f6d38f..661503958b2c 100644 --- a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master.yml +++ b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master.yml @@ -17,8 +17,12 @@ # under the License. --- -ssh_authorized_keys: - {{ k8s.ssh.pub.key }} +users: + - name: core + sudo: ALL=(ALL) NOPASSWD:ALL + shell: /bin/bash + ssh_authorized_keys: + {{ k8s.ssh.pub.key }} write_files: - path: /etc/conf.d/nfs @@ -47,7 +51,7 @@ write_files: content: | #!/bin/bash -e - if [[ -f "/home/debian/success" ]]; then + if [[ -f "/home/core/success" ]]; then echo "Already provisioned!" exit 0 fi @@ -223,7 +227,7 @@ write_files: content: | #!/bin/bash -e - if [[ -f "/home/debian/success" ]]; then + if [[ -f "/home/core/success" ]]; then echo "Already provisioned!" exit 0 fi @@ -259,8 +263,8 @@ write_files: /opt/bin/kubectl create clusterrolebinding cluster-admin-binding --clusterrole=cluster-admin --user=admin || true /opt/bin/kubectl create clusterrolebinding kubernetes-dashboard-ui --clusterrole=cluster-admin --serviceaccount=kubernetes-dashboard:kubernetes-dashboard || true - sudo touch /home/debian/success - echo "true" > /home/debian/success + sudo touch /home/core/success + echo "true" > /home/core/success - path: /etc/systemd/system/setup-kube-system.service permissions: '0755' diff --git a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-node.yml b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-node.yml index 74cd18b5ea7d..9bb7f75fe72f 100644 --- a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-node.yml +++ b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-node.yml @@ -17,8 +17,12 @@ # under the License. --- -ssh_authorized_keys: - {{ k8s.ssh.pub.key }} +users: + - name: core + sudo: ALL=(ALL) NOPASSWD:ALL + shell: /bin/bash + ssh_authorized_keys: + {{ k8s.ssh.pub.key }} write_files: - path: /opt/bin/setup-kube-system @@ -27,7 +31,7 @@ write_files: content: | #!/bin/bash -e - if [[ -f "/home/debian/success" ]]; then + if [[ -f "/home/core/success" ]]; then echo "Already provisioned!" exit 0 fi @@ -184,7 +188,7 @@ write_files: content: | #!/bin/bash -e - if [[ -f "/home/debian/success" ]]; then + if [[ -f "/home/core/success" ]]; then echo "Already provisioned!" exit 0 fi @@ -202,8 +206,8 @@ write_files: fi kubeadm join {{ k8s_master.join_ip }}:6443 --token {{ k8s_master.cluster.token }} --discovery-token-unsafe-skip-ca-verification - sudo touch /home/debian/success - echo "true" > /home/debian/success + sudo touch /home/core/success + echo "true" > /home/core/success - path: /etc/systemd/system/setup-kube-system.service permissions: '0755' diff --git a/scripts/vm/hypervisor/kvm/patch.sh b/scripts/vm/hypervisor/kvm/patch.sh index e7c79fd9a739..5c77fd6df462 100755 --- a/scripts/vm/hypervisor/kvm/patch.sh +++ b/scripts/vm/hypervisor/kvm/patch.sh @@ -25,6 +25,7 @@ while getopts "n:c:h" opt; do name=$OPTARG ;; c ) + bootargs=$OPTARG cmdline=$(echo $OPTARG | base64 -w 0) ;; h ) @@ -70,8 +71,11 @@ do sleep 0.1 done -# Write ssh public key -send_file $name "/root/.ssh/authorized_keys" $sshkey +# Write ssh public key - only for systemVMs. For CKS nodes, the userdata handles pushing of the ssh public keys +vmtype=$(echo $bootargs | grep -Po 'type=\K[a-zA-Z]*') +if [ $vmtype != 'cksnode' ]; then + send_file $name "/root/.ssh/authorized_keys" $sshkey +fi # Fix ssh public key permission virsh qemu-agent-command $name '{"execute":"guest-exec","arguments":{"path":"chmod","arg":["go-rwx","/root/.ssh/authorized_keys"]}}' > /dev/null diff --git a/server/src/main/java/com/cloud/vm/UserVmManager.java b/server/src/main/java/com/cloud/vm/UserVmManager.java index 0775e973e1a7..1cfb1f6d1927 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManager.java +++ b/server/src/main/java/com/cloud/vm/UserVmManager.java @@ -55,9 +55,7 @@ public interface UserVmManager extends UserVmService { static final int MAX_USER_DATA_LENGTH_BYTES = 2048; - public static enum UserVmType { - CKSNode - } + public static final String CKS_NODE = "cksnode"; /** * @param hostId get all of the virtual machines that belong to one host. diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index 2cc0ec618afe..6cd28929ebe5 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -3695,7 +3695,7 @@ private UserVm createVirtualMachine(DataCenter zone, ServiceOffering serviceOffe } } - if (template.getTemplateType().equals(TemplateType.SYSTEM) && !String.valueOf(UserVmType.CKSNode).equals(type)) { + if (template.getTemplateType().equals(TemplateType.SYSTEM) && !CKS_NODE.equals(type)) { throw new InvalidParameterValueException("Unable to use system template " + template.getId() + " to deploy a user vm"); } List listZoneTemplate = _templateZoneDao.listByZoneTemplate(zone.getId(), template.getId()); @@ -4523,7 +4523,7 @@ public boolean finalizeVirtualMachineProfile(VirtualMachineProfile profile, Depl Map details = userVmDetailsDao.listDetailsKeyPairs(vm.getId()); vm.setDetails(details); StringBuilder buf = profile.getBootArgsBuilder(); - if (String.valueOf(UserVmType.CKSNode).equals(vm.getUserVmType())) { + if (CKS_NODE.equals(vm.getUserVmType())) { addUserVMCmdlineArgs(vm.getId(), profile, dest, buf); } // add userdata info into vm profile diff --git a/systemvm/debian/opt/cloud/bin/setup/CKSNode.sh b/systemvm/debian/opt/cloud/bin/setup/cksnode.sh similarity index 77% rename from systemvm/debian/opt/cloud/bin/setup/CKSNode.sh rename to systemvm/debian/opt/cloud/bin/setup/cksnode.sh index bd79e885096a..45361984be4a 100755 --- a/systemvm/debian/opt/cloud/bin/setup/CKSNode.sh +++ b/systemvm/debian/opt/cloud/bin/setup/cksnode.sh @@ -24,6 +24,11 @@ setup_k8s_node() { # set default ssh port and restart sshd service sed -i 's/3922/22/g' /etc/ssh/sshd_config + # Prevent root login + > /root/.ssh/authorized_keys + passwd -l root + #sed -i 's#root:x:0:0:root:/root:/bin/bash#root:x:0:0:root:/root:/sbin/nologin#' /etc/passwd + swapoff -a sudo sed -i '/ swap / s/^/#/' /etc/fstab log_it "Swap disabled" @@ -45,7 +50,7 @@ setup_k8s_node() { enable_fwding 1 enable_irqbalance 0 setup_ntp - dhclient + dhclient -timeout 60 rm -f /etc/logrotate.d/cloud @@ -53,9 +58,14 @@ setup_k8s_node() { systemctl enable --now --no-block containerd systemctl enable --now --no-block docker.socket systemctl enable --now --no-block docker.service - systemctl enable --now --no-block cloud-init - systemctl enable --now --no-block cloud-config - systemctl enable --now --no-block cloud-final + if [ -f /home/core/success ]; then + systemctl stop cloud-init cloud-config cloud-final + systemctl disable cloud-init cloud-config cloud-final + else + systemctl start --no-block cloud-init + systemctl start --no-block cloud-config + systemctl start --no-block cloud-final + fi } setup_k8s_node \ No newline at end of file diff --git a/systemvm/debian/opt/cloud/bin/setup/postinit.sh b/systemvm/debian/opt/cloud/bin/setup/postinit.sh index 0ebd73a23ca6..a8dffeed71c4 100755 --- a/systemvm/debian/opt/cloud/bin/setup/postinit.sh +++ b/systemvm/debian/opt/cloud/bin/setup/postinit.sh @@ -26,7 +26,7 @@ log_it() { # Eject cdrom if any CMDLINE=/var/cache/cloud/cmdline export TYPE=$(grep -Po 'type=\K[a-zA-Z]*' $CMDLINE) -if [ "$TYPE" != "CKSNode" ]; then +if [ "$TYPE" != "cksnode" ]; then eject || true fi @@ -42,10 +42,6 @@ then fi fi -if [ "$TYPE" == "CKSNode" ]; then - pkill -9 dhclient -fi - [ ! -f /var/cache/cloud/enabled_svcs ] && touch /var/cache/cloud/enabled_svcs for svc in $(cat /var/cache/cloud/enabled_svcs) do diff --git a/tools/appliance/systemvmtemplate/http/preseed.cfg b/tools/appliance/systemvmtemplate/http/preseed.cfg index 0a07e97b15dd..7dfd74d15dbc 100644 --- a/tools/appliance/systemvmtemplate/http/preseed.cfg +++ b/tools/appliance/systemvmtemplate/http/preseed.cfg @@ -62,7 +62,7 @@ d-i partman-auto/expert_recipe string \ use_filesystem{ } filesystem{ ext2 } \ mountpoint{ /boot } \ . \ - 5000 40 10000 ext4 \ + 5500 40 6000 ext4 \ method{ format } format{ } \ use_filesystem{ } filesystem{ ext4 } \ mountpoint{ / } \ diff --git a/tools/appliance/systemvmtemplate/scripts/configure_systemvm_services.sh b/tools/appliance/systemvmtemplate/scripts/configure_systemvm_services.sh index 72ea34ac6156..fb682bc94218 100644 --- a/tools/appliance/systemvmtemplate/scripts/configure_systemvm_services.sh +++ b/tools/appliance/systemvmtemplate/scripts/configure_systemvm_services.sh @@ -140,7 +140,6 @@ datasource: timeout: 50 EOF - sed -i 's/\(disable_root: \)\(.*\)/\1false/' /etc/cloud/cloud.cfg touch /etc/cloud/cloud-init.disabled systemctl stop cloud-init systemctl disable cloud-init diff --git a/tools/appliance/systemvmtemplate/template.json b/tools/appliance/systemvmtemplate/template.json index ec9894a76207..7e9b72197739 100644 --- a/tools/appliance/systemvmtemplate/template.json +++ b/tools/appliance/systemvmtemplate/template.json @@ -33,7 +33,7 @@ [ "-smp", "1" ] ], "format": "qcow2", - "disk_size": 10000, + "disk_size": 6000, "disk_interface": "virtio", "net_device": "virtio-net", "iso_url": "https://download.cloudstack.org/systemvm/debian/debian-10.7.0-amd64-netinst.iso", From 46250e52c03b3fd4b2f1de306a5d7fd87391e621 Mon Sep 17 00:00:00 2001 From: davidjumani Date: Fri, 6 Nov 2020 16:22:37 +0530 Subject: [PATCH 034/117] Renaming db column --- .../src/main/resources/META-INF/db/schema-41510to41600.sql | 2 +- .../com/cloud/kubernetes/cluster/KubernetesClusterVmMapVO.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41510to41600.sql b/engine/schema/src/main/resources/META-INF/db/schema-41510to41600.sql index ff0d80d0d66e..273819ccb3cd 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-41510to41600.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-41510to41600.sql @@ -30,4 +30,4 @@ ALTER TABLE `cloud`.`kubernetes_cluster` ADD COLUMN `autoscaling_enabled` tinyin ALTER TABLE `cloud`.`kubernetes_cluster` ADD COLUMN `minsize` bigint; ALTER TABLE `cloud`.`kubernetes_cluster` ADD COLUMN `maxsize` bigint; -ALTER TABLE `cloud`.`kubernetes_cluster_vm_map` ADD COLUMN `is_master` tinyint(1) unsigned NOT NULL DEFAULT 0; \ No newline at end of file +ALTER TABLE `cloud`.`kubernetes_cluster_vm_map` ADD COLUMN `master` tinyint(1) unsigned NOT NULL DEFAULT 0; diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMapVO.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMapVO.java index abbd90a949a9..d9ed7ae119ac 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMapVO.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMapVO.java @@ -39,7 +39,7 @@ public class KubernetesClusterVmMapVO implements KubernetesClusterVmMap { @Column(name = "vm_id") long vmId; - @Column(name = "is_master") + @Column(name = "master") boolean master; public KubernetesClusterVmMapVO() { From 5f36ddb22f0276fda0edddc7ffd850f8b42d0a77 Mon Sep 17 00:00:00 2001 From: Wei Zhou Date: Fri, 14 Feb 2020 20:32:23 +0000 Subject: [PATCH 035/117] systemvm: install haproxy 1.8 instead of 1.7 to support HTTP2 --- .../systemvmtemplate/scripts/install_systemvm_packages.sh | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh b/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh index 1cbdfea56308..ed58a0035615 100644 --- a/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh +++ b/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh @@ -71,7 +71,6 @@ function install_packages() { iptables-persistent \ libtcnative-1 libssl-dev libapr1-dev \ python-flask \ - haproxy \ haveged \ radvd \ sharutils genisoimage aria2 \ @@ -79,6 +78,12 @@ function install_packages() { virt-what open-vm-tools qemu-guest-agent hyperv-daemons \ apt-transport-https ca-certificates curl gnupg gnupg-agent software-properties-common cloud-init + apt-get -q -y -t stretch-backports install nftables openjdk-11-jre-headless haproxy + + apt-get -y autoremove --purge + apt-get clean + apt-get autoclean + apt_clean ${apt_get} install links From a5fa98861b814dc314212da90fc538aab724d0fa Mon Sep 17 00:00:00 2001 From: Wei Zhou Date: Fri, 14 Feb 2020 20:31:34 +0000 Subject: [PATCH 036/117] systemvm: configurable root disk size --- .../com/cloud/vm/VirtualMachineManagerImpl.java | 14 ++++++++++++-- .../debian/opt/cloud/bin/setup/cloud-early-config | 4 ++++ tools/appliance/systemvmtemplate/http/preseed.cfg | 8 ++++---- .../scripts/install_systemvm_packages.sh | 2 +- 4 files changed, 21 insertions(+), 7 deletions(-) diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java index dd5a280385bc..b68f3ef2e2d7 100755 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java @@ -387,6 +387,10 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac static final ConfigKey HaVmRestartHostUp = new ConfigKey("Advanced", Boolean.class, "ha.vm.restart.hostup", "true", "If an out-of-band stop of a VM is detected and its host is up, then power on the VM", true); + static final ConfigKey SystemVmRootDiskSize = new ConfigKey("Advanced", + Long.class, "systemvm.root.disk.size", "-1", + "root size (in GB) of systemvm and virtual routers", true); + ScheduledExecutorService _executor = null; private long _nodeId; @@ -435,6 +439,12 @@ public void allocate(final String vmInstanceName, final VirtualMachineTemplate t final VirtualMachineProfileImpl vmProfile = new VirtualMachineProfileImpl(vmFinal, template, serviceOffering, null, null); + Long rootDiskSize = rootDiskOfferingInfo.getSize(); + if (vm.getType().isUsedBySystem() && SystemVmRootDiskSize.value() != null) { + rootDiskSize = SystemVmRootDiskSize.value(); + } + final Long rootDiskSizeFinal = rootDiskSize; + Transaction.execute(new TransactionCallbackWithExceptionNoReturn() { @Override public void doInTransactionWithoutResult(final TransactionStatus status) throws InsufficientCapacityException { @@ -460,7 +470,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) throws } else if (template.getFormat() == ImageFormat.BAREMETAL) { // Do nothing } else { - volumeMgr.allocateTemplatedVolumes(Type.ROOT, "ROOT-" + vmFinal.getId(), rootDiskOfferingInfo.getDiskOffering(), rootDiskOfferingInfo.getSize(), + volumeMgr.allocateTemplatedVolumes(Type.ROOT, "ROOT-" + vmFinal.getId(), rootDiskOfferingInfo.getDiskOffering(), rootDiskSizeFinal, rootDiskOfferingInfo.getMinIops(), rootDiskOfferingInfo.getMaxIops(), template, vmFinal, owner); } @@ -4519,7 +4529,7 @@ public ConfigKey[] getConfigKeys() { return new ConfigKey[] {ClusterDeltaSyncInterval, StartRetry, VmDestroyForcestop, VmOpCancelInterval, VmOpCleanupInterval, VmOpCleanupWait, VmOpLockStateRetry, VmOpWaitInterval, ExecuteInSequence, VmJobCheckInterval, VmJobTimeout, VmJobStateReportInterval, VmConfigDriveLabel, VmConfigDriveOnPrimaryPool, HaVmRestartHostUp, - ResoureCountRunningVMsonly, AllowExposeHypervisorHostname, AllowExposeHypervisorHostnameAccountLevel, + ResoureCountRunningVMsonly, AllowExposeHypervisorHostname, AllowExposeHypervisorHostnameAccountLevel, SystemVmRootDiskSize, VmServiceOfferingMaxCPUCores, VmServiceOfferingMaxRAMSize }; } diff --git a/systemvm/debian/opt/cloud/bin/setup/cloud-early-config b/systemvm/debian/opt/cloud/bin/setup/cloud-early-config index 58302ef3a82c..e4967d261c0d 100755 --- a/systemvm/debian/opt/cloud/bin/setup/cloud-early-config +++ b/systemvm/debian/opt/cloud/bin/setup/cloud-early-config @@ -88,6 +88,10 @@ start() { rm -f /root/.rnd echo "" > /root/.ssh/known_hosts + growpart /dev/vda 2 + growpart /dev/vda 6 + resize2fs /dev/vda6 + patch sync /opt/cloud/bin/setup/bootstrap.sh diff --git a/tools/appliance/systemvmtemplate/http/preseed.cfg b/tools/appliance/systemvmtemplate/http/preseed.cfg index 7dfd74d15dbc..ec070e6a05e9 100644 --- a/tools/appliance/systemvmtemplate/http/preseed.cfg +++ b/tools/appliance/systemvmtemplate/http/preseed.cfg @@ -62,13 +62,13 @@ d-i partman-auto/expert_recipe string \ use_filesystem{ } filesystem{ ext2 } \ mountpoint{ /boot } \ . \ - 5500 40 6000 ext4 \ + 256 1000 256 linux-swap \ + method{ swap } format{ } \ + . \ + 3256 40 3500 ext4 \ method{ format } format{ } \ use_filesystem{ } filesystem{ ext4 } \ mountpoint{ / } \ - . \ - 256 1000 256 linux-swap \ - method{ swap } format{ } \ . d-i partman-md/confirm boolean true diff --git a/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh b/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh index ed58a0035615..5c0f0e7ce502 100644 --- a/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh +++ b/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh @@ -75,7 +75,7 @@ function install_packages() { radvd \ sharutils genisoimage aria2 \ strongswan libcharon-extra-plugins libstrongswan-extra-plugins strongswan-charon strongswan-starter \ - virt-what open-vm-tools qemu-guest-agent hyperv-daemons \ + virt-what open-vm-tools qemu-guest-agent hyperv-daemons cloud-guest-utils \ apt-transport-https ca-certificates curl gnupg gnupg-agent software-properties-common cloud-init apt-get -q -y -t stretch-backports install nftables openjdk-11-jre-headless haproxy From 7fc88572ed81706f26327bda9a7005dedaa30975 Mon Sep 17 00:00:00 2001 From: Wei Zhou Date: Thu, 27 Feb 2020 09:21:45 +0000 Subject: [PATCH 037/117] systemvm: auto-grow partitions only when growpart is found --- systemvm/debian/opt/cloud/bin/setup/cloud-early-config | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/systemvm/debian/opt/cloud/bin/setup/cloud-early-config b/systemvm/debian/opt/cloud/bin/setup/cloud-early-config index e4967d261c0d..8ada7eaa6828 100755 --- a/systemvm/debian/opt/cloud/bin/setup/cloud-early-config +++ b/systemvm/debian/opt/cloud/bin/setup/cloud-early-config @@ -88,9 +88,11 @@ start() { rm -f /root/.rnd echo "" > /root/.ssh/known_hosts - growpart /dev/vda 2 - growpart /dev/vda 6 - resize2fs /dev/vda6 + if which growpart > /dev/null; then + growpart /dev/vda 2 + growpart /dev/vda 6 + resize2fs /dev/vda6 + fi patch sync From 59e11ea3b63185ed2c01273caa2b126b48eef54b Mon Sep 17 00:00:00 2001 From: Wei Zhou Date: Fri, 10 Jul 2020 11:40:33 +0000 Subject: [PATCH 038/117] add haproxy back --- .../systemvmtemplate/scripts/install_systemvm_packages.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh b/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh index 5c0f0e7ce502..46790fb9046d 100644 --- a/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh +++ b/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh @@ -71,6 +71,7 @@ function install_packages() { iptables-persistent \ libtcnative-1 libssl-dev libapr1-dev \ python-flask \ + haproxy \ haveged \ radvd \ sharutils genisoimage aria2 \ From 01edb107d6f736dee06dd972d96a782fad6000a2 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Mon, 9 Nov 2020 12:44:52 +0530 Subject: [PATCH 039/117] grow root partition for cks node --- .../cluster/CreateKubernetesClusterCmd.java | 12 +++++++++++- systemvm/debian/opt/cloud/bin/setup/cksnode.sh | 2 +- .../debian/opt/cloud/bin/setup/cloud-early-config | 8 +++++--- systemvm/debian/opt/cloud/bin/setup/postinit.sh | 4 ++++ .../scripts/install_systemvm_packages.sh | 2 -- tools/appliance/systemvmtemplate/template.json | 2 +- 6 files changed, 22 insertions(+), 8 deletions(-) diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/CreateKubernetesClusterCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/CreateKubernetesClusterCmd.java index 54e307c0c5b9..cf1759730abf 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/CreateKubernetesClusterCmd.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/CreateKubernetesClusterCmd.java @@ -16,6 +16,8 @@ // under the License. package org.apache.cloudstack.api.command.user.kubernetes.cluster; +import java.security.InvalidParameterException; + import javax.inject.Inject; import org.apache.cloudstack.acl.RoleType; @@ -55,6 +57,7 @@ public class CreateKubernetesClusterCmd extends BaseAsyncCreateCmd { public static final Logger LOGGER = Logger.getLogger(CreateKubernetesClusterCmd.class.getName()); public static final String APINAME = "createKubernetesCluster"; + private static final Long DEFAULT_NODE_ROOT_DISK_SIZE = 8L; @Inject public KubernetesClusterService kubernetesClusterService; @@ -216,7 +219,14 @@ public String getDockerRegistryEmail() { } public Long getNodeRootDiskSize() { - return nodeRootDiskSize; + if (nodeRootDiskSize != null) { + if (nodeRootDiskSize < DEFAULT_NODE_ROOT_DISK_SIZE) { + throw new InvalidParameterException("Provided node root disk size is lesser than default size of " + DEFAULT_NODE_ROOT_DISK_SIZE +"GB"); + } + return nodeRootDiskSize; + } else { + return DEFAULT_NODE_ROOT_DISK_SIZE; + } } ///////////////////////////////////////////////////// diff --git a/systemvm/debian/opt/cloud/bin/setup/cksnode.sh b/systemvm/debian/opt/cloud/bin/setup/cksnode.sh index 45361984be4a..b40de4124df7 100755 --- a/systemvm/debian/opt/cloud/bin/setup/cksnode.sh +++ b/systemvm/debian/opt/cloud/bin/setup/cksnode.sh @@ -50,7 +50,7 @@ setup_k8s_node() { enable_fwding 1 enable_irqbalance 0 setup_ntp - dhclient -timeout 60 + dhclient -1 rm -f /etc/logrotate.d/cloud diff --git a/systemvm/debian/opt/cloud/bin/setup/cloud-early-config b/systemvm/debian/opt/cloud/bin/setup/cloud-early-config index 8ada7eaa6828..917fa632cd42 100755 --- a/systemvm/debian/opt/cloud/bin/setup/cloud-early-config +++ b/systemvm/debian/opt/cloud/bin/setup/cloud-early-config @@ -89,9 +89,11 @@ start() { echo "" > /root/.ssh/known_hosts if which growpart > /dev/null; then - growpart /dev/vda 2 - growpart /dev/vda 6 - resize2fs /dev/vda6 + ROOT_MOUNT_POINT=`df -h / | tail -n 1 | cut -d' ' -f1` + ROOT_DISK=`echo $ROOT_MOUNT_POINT | sed 's/[0-9]*$//g'` + growpart $ROOT_DISK 2 + growpart $ROOT_DISK 6 + resize2fs $ROOT_MOUNT_POINT fi patch diff --git a/systemvm/debian/opt/cloud/bin/setup/postinit.sh b/systemvm/debian/opt/cloud/bin/setup/postinit.sh index a8dffeed71c4..04929302f513 100755 --- a/systemvm/debian/opt/cloud/bin/setup/postinit.sh +++ b/systemvm/debian/opt/cloud/bin/setup/postinit.sh @@ -42,6 +42,10 @@ then fi fi +if [ "$TYPE" == "cksnode" ]; then + pkill -9 dhclient +fi + [ ! -f /var/cache/cloud/enabled_svcs ] && touch /var/cache/cloud/enabled_svcs for svc in $(cat /var/cache/cloud/enabled_svcs) do diff --git a/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh b/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh index 46790fb9046d..7f9bd63a9b36 100644 --- a/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh +++ b/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh @@ -79,8 +79,6 @@ function install_packages() { virt-what open-vm-tools qemu-guest-agent hyperv-daemons cloud-guest-utils \ apt-transport-https ca-certificates curl gnupg gnupg-agent software-properties-common cloud-init - apt-get -q -y -t stretch-backports install nftables openjdk-11-jre-headless haproxy - apt-get -y autoremove --purge apt-get clean apt-get autoclean diff --git a/tools/appliance/systemvmtemplate/template.json b/tools/appliance/systemvmtemplate/template.json index 7e9b72197739..db49d1d2c46a 100644 --- a/tools/appliance/systemvmtemplate/template.json +++ b/tools/appliance/systemvmtemplate/template.json @@ -33,7 +33,7 @@ [ "-smp", "1" ] ], "format": "qcow2", - "disk_size": 6000, + "disk_size": 4000, "disk_interface": "virtio", "net_device": "virtio-net", "iso_url": "https://download.cloudstack.org/systemvm/debian/debian-10.7.0-amd64-netinst.iso", From 785ac2cb56192163c01a30f2f55c10453a1d865c Mon Sep 17 00:00:00 2001 From: davidjumani Date: Tue, 10 Nov 2020 10:49:16 +0530 Subject: [PATCH 040/117] Updating api param doc --- .../user/kubernetes/cluster/CreateKubernetesClusterCmd.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/CreateKubernetesClusterCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/CreateKubernetesClusterCmd.java index cf1759730abf..f74b1db8a72f 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/CreateKubernetesClusterCmd.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/CreateKubernetesClusterCmd.java @@ -140,7 +140,7 @@ public class CreateKubernetesClusterCmd extends BaseAsyncCreateCmd { private String dockerRegistryEmail; @Parameter(name = ApiConstants.NODE_ROOT_DISK_SIZE, type = CommandType.LONG, - description = "root disk size of root disk for each node") + description = "root disk size in GB for each node") private Long nodeRootDiskSize; ///////////////////////////////////////////////////// From 01142cde637d78ff4ba6e9941602aaa8cf8b27d6 Mon Sep 17 00:00:00 2001 From: davidjumani Date: Tue, 10 Nov 2020 10:50:43 +0530 Subject: [PATCH 041/117] Refactoring tests --- .../smoke/test_kubernetes_clusters.py | 369 ++++++++---------- 1 file changed, 157 insertions(+), 212 deletions(-) diff --git a/test/integration/smoke/test_kubernetes_clusters.py b/test/integration/smoke/test_kubernetes_clusters.py index dc24dad3688e..2440de06b7ae 100644 --- a/test/integration/smoke/test_kubernetes_clusters.py +++ b/test/integration/smoke/test_kubernetes_clusters.py @@ -90,14 +90,6 @@ def setUpClass(cls): cls.cks_service_offering = None - if cls.setup_failed == False: - try: - cls.kubernetes_version_1_15_0 = cls.addKubernetesSupportedVersion(cls.services["cks_kubernetes_versions"]["1.15.0"]) - cls.kubernetes_version_ids.append(cls.kubernetes_version_1_15_0.id) - except Exception as e: - cls.setup_failed = True - cls.debug("Failed to get Kubernetes version ISO in ready state, version=%s, url=%s, %s" % - (cls.services["cks_kubernetes_versions"]["1.15.0"]["semanticversion"], cls.services["cks_kubernetes_versions"]["1.15.0"]["url"], e)) if cls.setup_failed == False: try: cls.kubernetes_version_1_16_0 = cls.addKubernetesSupportedVersion(cls.services["cks_kubernetes_versions"]["1.16.0"]) @@ -134,6 +126,9 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): + if k8s_cluster != None and k8s_cluster.id != None: + cls.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True) + version_delete_failed = False # Delete added Kubernetes supported version for version_id in cls.kubernetes_version_ids: @@ -236,6 +231,76 @@ def deleteKubernetesSupportedVersion(cls, version_id): deleteKubernetesSupportedVersionCmd.id = version_id cls.apiclient.deleteKubernetesSupportedVersion(deleteKubernetesSupportedVersionCmd) + @classmethod + def listKubernetesCluster(cls, cluster_id = None): + listKubernetesClustersCmd = listKubernetesClusters.listKubernetesClustersCmd() + listKubernetesClustersCmd.listall = True + if cluster_id != None: + listKubernetesClustersCmd.id = cluster_id + clusterResponse = cls.apiclient.listKubernetesClusters(listKubernetesClustersCmd) + if cluster_id != None and clusterResponse != None: + return clusterResponse[0] + return clusterResponse + + @classmethod + def deleteKubernetesCluster(cls, cluster_id): + deleteKubernetesClusterCmd = deleteKubernetesCluster.deleteKubernetesClusterCmd() + deleteKubernetesClusterCmd.id = cluster_id + response = cls.apiclient.deleteKubernetesCluster(deleteKubernetesClusterCmd) + return response + + @classmethod + def stopKubernetesCluster(cls, cluster_id): + stopKubernetesClusterCmd = stopKubernetesCluster.stopKubernetesClusterCmd() + stopKubernetesClusterCmd.id = cluster_id + response = cls.apiclient.stopKubernetesCluster(stopKubernetesClusterCmd) + return response + + + @classmethod + def deleteKubernetesClusterAndVerify(cls, cluster_id, verify = True, forced = False): + """Delete Kubernetes cluster and check if it is really deleted""" + + forceDeleted = False + try: + delete_response = cls.deleteKubernetesCluster(cluster_id) + except Exception as e: + if forced: + cluster = cls.listKubernetesCluster(cluster_id) + if cluster != None: + if cluster.state in ['Starting', 'Running', 'Upgrading', 'Scaling']: + cls.stopKubernetesCluster(cluster_id) + cls.deleteKubernetesCluster(cluster_id) + else: + forceDeleted = True + for cluster_vm in cluster.virtualmachines: + cmd = destroyVirtualMachine.destroyVirtualMachineCmd() + cmd.id = cluster_vm.id + cmd.expunge = True + cls.apiclient.destroyVirtualMachine(cmd) + cmd = deleteNetwork.deleteNetworkCmd() + cmd.id = cluster.networkid + cmd.forced = True + cls.apiclient.deleteNetwork(cmd) + cls.dbclient.execute("update kubernetes_cluster set state='Destroyed', removed=now() where uuid = '%s';" % cluster.id) + else: + raise Exception("Error: Exception during delete cluster : %s" % e) + + if verify == True and forceDeleted == False: + cls.assertEqual( + delete_response.success, + True, + "Check KubernetesCluster delete response {}, {}".format(delete_response.success, True) + ) + + db_cluster_removed = cls.dbclient.execute("select removed from kubernetes_cluster where uuid = '%s';" % cluster_id)[0][0] + + cls.assertNotEqual( + db_cluster_removed, + None, + "KubernetesCluster not removed in DB, {}".format(db_cluster_removed) + ) + def setUp(self): self.services = self.testClient.getParsedTestDataConfig() self.apiclient = self.testClient.getApiClient() @@ -252,13 +317,15 @@ def tearDown(self): @attr(tags=["advanced", "smoke"], required_hardware="true") @skipTestIf("hypervisorNotSupported") - def test_01_deploy_kubernetes_cluster(self): + def test_01_basic_lifecycle_kubernetes_cluster(self): """Test to deploy a new Kubernetes cluster # Validate the following: # 1. createKubernetesCluster should return valid info for new cluster # 2. The Cloud Database contains the valid information # 3. stopKubernetesCluster should stop the cluster + # 4. startKubernetesCluster should start the cluster + # 4. deleteKubernetesCluster should delete the cluster """ if self.setup_failed == True: self.fail("Setup incomplete") @@ -266,9 +333,7 @@ def test_01_deploy_kubernetes_cluster(self): k8s_cluster = self.getValidKubernetesCluster() self.debug("Kubernetes cluster with ID: %s successfully deployed, now stopping it" % k8s_cluster.id) - self.stopAndVerifyKubernetesCluster(k8s_cluster.id) - self.debug("Kubernetes cluster with ID: %s successfully stopped, now starting it again" % k8s_cluster.id) try: @@ -278,66 +343,61 @@ def test_01_deploy_kubernetes_cluster(self): self.fail("Failed to start Kubernetes cluster due to: %s" % e) self.verifyKubernetesClusterState(k8s_cluster, 'Running') - + self.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True) return @attr(tags=["advanced", "smoke"], required_hardware="true") @skipTestIf("hypervisorNotSupported") - def test_02_invalid_upgrade_kubernetes_cluster(self): - """Test to check for failure while tying to upgrade a Kubernetes cluster to a lower version + def test_02_upgrade_kubernetes_cluster(self): + """Test to Upgrade a Kubernetes cluster to newer version # Validate the following: - # 1. upgradeKubernetesCluster should fail + # 1. upgradeKubernetesCluster should return valid info for the cluster """ if self.setup_failed == True: self.fail("Setup incomplete") global k8s_cluster - k8s_cluster = self.getValidKubernetesCluster() + k8s_cluster = self.getValidKubernetesCluster(version=self.kubernetes_version_1_16_0) - self.debug("Upgrading Kubernetes cluster with ID: %s to a lower version" % k8s_cluster.id) + time.sleep(self.services["sleep"]) + self.debug("Upgrading Kubernetes cluster with ID: %s" % k8s_cluster.id) try: - k8s_cluster = self.upgradeKubernetesCluster(k8s_cluster.id, self.kubernetes_version_1_15_0.id) - self.debug("Invalid CKS Kubernetes HA cluster deployed with ID: %s. Deleting it and failing test." % self.kubernetes_version_1_15_0.id) - self.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True) - self.fail("Kubernetes cluster upgraded to a lower Kubernetes supported version. Must be an error.") + k8s_cluster = self.upgradeKubernetesCluster(k8s_cluster.id, self.kubernetes_version_1_16_3.id) except Exception as e: - self.debug("Upgrading Kubernetes cluster with invalid Kubernetes supported version check successful, API failure: %s" % e) + self.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True) + self.fail("Failed to upgrade Kubernetes cluster due to: %s" % e) + self.verifyKubernetesClusterUpgrade(k8s_cluster, self.kubernetes_version_1_16_3.id) return @attr(tags=["advanced", "smoke"], required_hardware="true") @skipTestIf("hypervisorNotSupported") - def test_03_deploy_and_upgrade_kubernetes_cluster(self): - """Test to deploy a new Kubernetes cluster and upgrade it to newer version + def test_03_invalid_upgrade_kubernetes_cluster(self): + """Test to check for failure while tying to downgrade a Kubernetes cluster # Validate the following: - # 1. upgradeKubernetesCluster should return valid info for the cluster + # 1. upgradeKubernetesCluster should fail """ if self.setup_failed == True: self.fail("Setup incomplete") global k8s_cluster - k8s_cluster = self.getValidKubernetesCluster(version=self.kubernetes_version_1_16_0) - - time.sleep(self.services["sleep"]) - - self.debug("Upgrading Kubernetes cluster with ID: %s" % k8s_cluster.id) + k8s_cluster = self.getValidKubernetesCluster(version=self.kubernetes_version_1_16_3) + self.debug("Downgrading Kubernetes cluster with ID: %s to a lower version. This should fail!" % k8s_cluster.id) try: - k8s_cluster = self.upgradeKubernetesCluster(k8s_cluster.id, self.kubernetes_version_1_16_3.id) - except Exception as e: + k8s_cluster = self.upgradeKubernetesCluster(k8s_cluster.id, self.kubernetes_version_1_16_0.id) + self.debug("Invalid CKS Kubernetes HA cluster deployed with ID: %s. Deleting it and failing test." % self.kubernetes_version_1_16_0.id) self.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True) - self.fail("Failed to upgrade Kubernetes cluster due to: %s" % e) - - self.verifyKubernetesClusterUpgrade(k8s_cluster, self.kubernetes_version_1_16_3.id) - self.debug("Deleting cluster : "+ k8s_cluster.id) - self.deleteKubernetesClusterAndVerify(k8s_cluster.id) + self.fail("Kubernetes cluster downgrade to a lower Kubernetes supported version. Must be an error.") + except Exception as e: + self.debug("Downgrading Kubernetes cluster check successful, API failure: %s" % e) return @attr(tags=["advanced", "smoke"], required_hardware="true") @skipTestIf("hypervisorNotSupported") - def test_04_deploy_and_scale_kubernetes_cluster(self): - """Test to deploy a new Kubernetes cluster and check for failure while tying to scale it + def test_04_scale_kubernetes_cluster(self): + """Test to scale up and down a Kubernetes cluster # Validate the following: # 1. scaleKubernetesCluster should return valid info for the cluster when it is scaled up @@ -349,7 +409,6 @@ def test_04_deploy_and_scale_kubernetes_cluster(self): k8s_cluster = self.getValidKubernetesCluster() self.debug("Upscaling Kubernetes cluster with ID: %s" % k8s_cluster.id) - try: k8s_cluster = self.scaleKubernetesCluster(k8s_cluster.id, 2) except Exception as e: @@ -357,7 +416,6 @@ def test_04_deploy_and_scale_kubernetes_cluster(self): self.fail("Failed to upscale Kubernetes cluster due to: %s" % e) self.verifyKubernetesClusterScale(k8s_cluster, 2) - self.debug("Kubernetes cluster with ID: %s successfully upscaled, now downscaling it" % k8s_cluster.id) try: @@ -367,60 +425,39 @@ def test_04_deploy_and_scale_kubernetes_cluster(self): self.fail("Failed to downscale Kubernetes cluster due to: %s" % e) self.verifyKubernetesClusterScale(k8s_cluster) - self.debug("Kubernetes cluster with ID: %s successfully downscaled" % k8s_cluster.id) - return @attr(tags=["advanced", "smoke"], required_hardware="true") @skipTestIf("hypervisorNotSupported") - def test_05_delete_kubernetes_cluster(self): - """Test to delete an existing Kubernetes cluster + def test_05_autoscale_kubernetes_cluster(self): + """Test to enable autoscaling a Kubernetes cluster # Validate the following: - # 1. deleteKubernetesCluster should delete an existing Kubernetes cluster + # 1. scaleKubernetesCluster should return valid info for the cluster when it is autoscaled + # 2. cluster-autoscaler pod should be running """ if self.setup_failed == True: self.fail("Setup incomplete") global k8s_cluster - k8s_cluster = self.getValidKubernetesCluster() - - self.debug("Deleting Kubernetes cluster with ID: %s" % k8s_cluster.id) - - self.deleteKubernetesClusterAndVerify(k8s_cluster.id) - - self.debug("Kubernetes cluster with ID: %s successfully deleted" % k8s_cluster.id) - - k8s_cluster = None - - return - - @attr(tags=["advanced", "smoke"], required_hardware="true") - @skipTestIf("hypervisorNotSupported") - def test_06_deploy_invalid_kubernetes_ha_cluster(self): - """Test to deploy an invalid HA Kubernetes cluster - - # Validate the following: - # 1. createKubernetesCluster should fail as version doesn't support HA - """ - if self.setup_failed == True: - self.fail("Setup incomplete") - name = 'testcluster-' + random_gen() - self.debug("Creating for Kubernetes cluster with name %s" % name) + k8s_cluster = self.getValidKubernetesCluster(version=self.kubernetes_version_1_16_3) + self.debug("Autoscaling Kubernetes cluster with ID: %s" % k8s_cluster.id) try: - cluster_response = self.createKubernetesCluster(name, self.kubernetes_version_1_15_0.id, 1, 2) - self.debug("Invalid CKS Kubernetes HA cluster deployed with ID: %s. Deleting it and failing test." % cluster_response.id) - self.deleteKubernetesClusterAndVerify(cluster_response.id, False, True) - self.fail("HA Kubernetes cluster deployed with Kubernetes supported version below version 1.16.0. Must be an error.") - except CloudstackAPIException as e: - self.debug("HA Kubernetes cluster with invalid Kubernetes supported version check successful, API failure: %s" % e) + k8s_cluster = self.autoscaleKubernetesCluster(k8s_cluster.id, 1, 2) + self.verifyKubernetesClusterAutocale(k8s_cluster, 1, 2) + up = self.waitForAutoscalerPodInRunningState(k8s_cluster.id) + self.assertTrue(up, "Autoscaler pod failed to run") + self.debug("Kubernetes cluster with ID: %s has autoscaler running" % k8s_cluster.id) + except Exception as e: + self.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True) + self.fail("Failed to autoscale Kubernetes cluster due to: %s" % e) return @attr(tags=["advanced", "smoke"], required_hardware="true") @skipTestIf("hypervisorNotSupported") - def test_07_deploy_kubernetes_ha_cluster(self): + def test_06_deploy_kubernetes_ha_cluster(self): """Test to deploy a new Kubernetes cluster # Validate the following: @@ -431,15 +468,13 @@ def test_07_deploy_kubernetes_ha_cluster(self): self.fail("Setup incomplete") global k8s_cluster k8s_cluster = self.getValidKubernetesCluster(1, 2) - self.debug("HA Kubernetes cluster with ID: %s successfully deployed" % k8s_cluster.id) - return @attr(tags=["advanced", "smoke"], required_hardware="true") @skipTestIf("hypervisorNotSupported") - def test_08_deploy_and_upgrade_kubernetes_ha_cluster(self): - """Test to deploy a new HA Kubernetes cluster and upgrade it to newer version + def test_07_upgrade_kubernetes_ha_cluster(self): + """Test to upgrade a Kubernetes cluster to newer version # Validate the following: # 1. upgradeKubernetesCluster should return valid info for the cluster @@ -458,14 +493,12 @@ def test_08_deploy_and_upgrade_kubernetes_ha_cluster(self): self.fail("Failed to upgrade Kubernetes HA cluster due to: %s" % e) self.verifyKubernetesClusterUpgrade(k8s_cluster, self.kubernetes_version_1_16_3.id) - self.debug("Kubernetes cluster with ID: %s successfully upgraded" % k8s_cluster.id) - return @attr(tags=["advanced", "smoke"], required_hardware="true") @skipTestIf("hypervisorNotSupported") - def test_09_delete_kubernetes_ha_cluster(self): + def test_08_delete_kubernetes_ha_cluster(self): """Test to delete a HA Kubernetes cluster # Validate the following: @@ -477,52 +510,8 @@ def test_09_delete_kubernetes_ha_cluster(self): k8s_cluster = self.getValidKubernetesCluster(1, 2) self.debug("Deleting Kubernetes cluster with ID: %s" % k8s_cluster.id) - - self.deleteKubernetesClusterAndVerify(k8s_cluster.id) - - self.debug("Kubernetes cluster with ID: %s successfully deleted" % k8s_cluster.id) - - return - - @attr(tags=["advanced", "smoke"], required_hardware="true") - @skipTestIf("hypervisorNotSupported") - def test_10_deploy_and_autoscale_kubernetes_cluster(self): - """Test to deploy a new Kubernetes cluster and check for failure while tying to autoscale it - - # Validate the following: - # 1. scaleKubernetesCluster should return valid info for the cluster when it is autoscaled - # 2. cluster-autoscaler pod should be running - """ - if self.setup_failed == True: - self.fail("Setup incomplete") - global k8s_cluster - k8s_cluster = self.getValidKubernetesCluster(version=self.kubernetes_version_1_16_3) - - self.debug("Autoscaling Kubernetes cluster with ID: %s" % k8s_cluster.id) - - try: - k8s_cluster = self.autoscaleKubernetesCluster(k8s_cluster.id, 1, 2) - self.verifyKubernetesClusterAutocale(k8s_cluster, 1, 2) - - up = self.waitForAutoscalerPodInRunningState(k8s_cluster.id) - self.assertTrue(up, "Autoscaler pod failed to run") - self.debug("Kubernetes cluster with ID: %s has autoscaler running" % k8s_cluster.id) - self.deleteKubernetesClusterAndVerify(k8s_cluster.id) - except Exception as e: - self.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True) - self.fail("Failed to autoscale Kubernetes cluster due to: %s" % e) - return - def listKubernetesCluster(self, cluster_id = None): - listKubernetesClustersCmd = listKubernetesClusters.listKubernetesClustersCmd() - if cluster_id != None: - listKubernetesClustersCmd.id = cluster_id - clusterResponse = self.apiclient.listKubernetesClusters(listKubernetesClustersCmd) - if cluster_id != None and clusterResponse != None: - return clusterResponse[0] - return clusterResponse - def createKubernetesCluster(self, name, version_id, size=1, master_nodes=1): createKubernetesClusterCmd = createKubernetesCluster.createKubernetesClusterCmd() createKubernetesClusterCmd.name = name @@ -540,24 +529,12 @@ def createKubernetesCluster(self, name, version_id, size=1, master_nodes=1): self.cleanup.append(clusterResponse) return clusterResponse - def stopKubernetesCluster(self, cluster_id): - stopKubernetesClusterCmd = stopKubernetesCluster.stopKubernetesClusterCmd() - stopKubernetesClusterCmd.id = cluster_id - response = self.apiclient.stopKubernetesCluster(stopKubernetesClusterCmd) - return response - def startKubernetesCluster(self, cluster_id): startKubernetesClusterCmd = startKubernetesCluster.startKubernetesClusterCmd() startKubernetesClusterCmd.id = cluster_id response = self.apiclient.startKubernetesCluster(startKubernetesClusterCmd) return response - def deleteKubernetesCluster(self, cluster_id): - deleteKubernetesClusterCmd = deleteKubernetesCluster.deleteKubernetesClusterCmd() - deleteKubernetesClusterCmd.id = cluster_id - response = self.apiclient.deleteKubernetesCluster(deleteKubernetesClusterCmd) - return response - def upgradeKubernetesCluster(self, cluster_id, version_id): upgradeKubernetesClusterCmd = upgradeKubernetesCluster.upgradeKubernetesClusterCmd() upgradeKubernetesClusterCmd.id = cluster_id @@ -612,39 +589,56 @@ def waitForAutoscalerPodInRunningState(self, cluster_id, retries=5, interval=60) def getValidKubernetesCluster(self, size=1, master_nodes=1, version={}): cluster = k8s_cluster - if not version: - version = self.kubernetes_version_1_16_0 - valid = True - if cluster == None: - valid = False + + # Does a cluster already exist ? + if cluster == None or cluster.id == None: + if not version: + version = self.kubernetes_version_1_16_0 self.debug("No existing cluster available, k8s_cluster: %s" % cluster) - if valid == True and cluster.id == None: - valid = False - self.debug("ID for existing cluster not found, k8s_cluster ID: %s" % cluster.id) - if valid == True: + return self.createNewKubernetesCluster(version, size, master_nodes) + + # Is the existing cluster what is needed ? + valid = cluster.size == size and cluster.masternodes == master_nodes + if version: + # Check the version only if specified + valid = valid and cluster.kubernetesversionid == version.id + else: + version = self.kubernetes_version_1_16_0 + + if valid: cluster_id = cluster.id cluster = self.listKubernetesCluster(cluster_id) if cluster == None: - valid = False + # Looks like the cluster disappeared ! self.debug("Existing cluster, k8s_cluster ID: %s not returned by list API" % cluster_id) - if valid == True: + return self.createNewKubernetesCluster(version, size, master_nodes) + + if valid: try: self.verifyKubernetesCluster(cluster, cluster.name, None, size, master_nodes) self.debug("Existing Kubernetes cluster available with name %s" % cluster.name) + return cluster except AssertionError as error: - valid = False self.debug("Existing cluster failed verification due to %s, need to deploy a new one" % error) - if valid == False: - name = 'testcluster-' + random_gen() - self.debug("Creating for Kubernetes cluster with name %s" % name) - try: - self.deleteAllLeftoverClusters() - cluster = self.createKubernetesCluster(name, version.id, size, master_nodes) - self.verifyKubernetesCluster(cluster, name, version.id, size, master_nodes) - except Exception as ex: - self.fail("Kubernetes cluster deployment failed: %s" % ex) - except AssertionError as err: - self.fail("Kubernetes cluster deployment failed during cluster verification: %s" % err) + self.deleteKubernetesClusterAndVerify(cluster.id, False, True) + + # Can't have too many loose clusters running around + if cluster.id != None: + self.deleteKubernetesClusterAndVerify(cluster.id, False, True) + + self.debug("No valid cluster, need to deploy a new one") + return self.createNewKubernetesCluster(version, size, master_nodes) + + def createNewKubernetesCluster(self, version, size, master_nodes) : + name = 'testcluster-' + random_gen() + self.debug("Creating for Kubernetes cluster with name %s" % name) + try: + cluster = self.createKubernetesCluster(name, version.id, size, master_nodes) + self.verifyKubernetesCluster(cluster, name, version.id, size, master_nodes) + except Exception as ex: + self.fail("Kubernetes cluster deployment failed: %s" % ex) + except AssertionError as err: + self.fail("Kubernetes cluster deployment failed during cluster verification: %s" % err) return cluster def verifyKubernetesCluster(self, cluster_response, name, version_id=None, size=1, master_nodes=1): @@ -756,52 +750,3 @@ def stopAndVerifyKubernetesCluster(self, cluster_id): 'Stopped', "KubernetesCluster not stopped in DB, {}".format(db_cluster_state) ) - - def deleteKubernetesClusterAndVerify(self, cluster_id, verify = True, forced = False): - """Delete Kubernetes cluster and check if it is really deleted""" - - forceDeleted = False - try: - delete_response = self.deleteKubernetesCluster(cluster_id) - except Exception as e: - if forced: - cluster = self.listKubernetesCluster(cluster_id) - if cluster != None: - if cluster.state in ['Starting', 'Running', 'Upgrading', 'Scaling']: - self.stopKubernetesCluster(cluster_id) - self.deleteKubernetesCluster(cluster_id) - else: - forceDeleted = True - for cluster_vm in cluster.virtualmachines: - cmd = destroyVirtualMachine.destroyVirtualMachineCmd() - cmd.id = cluster_vm.id - cmd.expunge = True - self.apiclient.destroyVirtualMachine(cmd) - cmd = deleteNetwork.deleteNetworkCmd() - cmd.id = cluster.networkid - cmd.forced = True - self.apiclient.deleteNetwork(cmd) - self.dbclient.execute("update kubernetes_cluster set state='Destroyed', removed=now() where uuid = '%s';" % cluster.id) - else: - raise Exception("Error: Exception during delete cluster : %s" % e) - - if verify == True and forceDeleted == False: - self.assertEqual( - delete_response.success, - True, - "Check KubernetesCluster delete response {}, {}".format(delete_response.success, True) - ) - - db_cluster_removed = self.dbclient.execute("select removed from kubernetes_cluster where uuid = '%s';" % cluster_id)[0][0] - - self.assertNotEqual( - db_cluster_removed, - None, - "KubernetesCluster not removed in DB, {}".format(db_cluster_removed) - ) - - def deleteAllLeftoverClusters(self): - clusters = self.listKubernetesCluster() - if clusters != None: - for cluster in clusters: - self.deleteKubernetesClusterAndVerify(cluster.id, False, True) From 9843efc44b1ef21f8480ee5674b3388fc311622d Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Wed, 11 Nov 2020 10:14:41 +0530 Subject: [PATCH 042/117] fix port number passed on shared n/ws + optimize disk size --- .../actionworkers/KubernetesClusterActionWorker.java | 3 --- .../KubernetesClusterResourceModifierActionWorker.java | 2 +- .../actionworkers/KubernetesClusterStartWorker.java | 8 +++++--- .../actionworkers/KubernetesClusterUpgradeWorker.java | 4 +++- .../cluster/dao/KubernetesClusterVmMapDaoImpl.java | 4 +++- tools/appliance/systemvmtemplate/http/preseed.cfg | 4 ++-- tools/appliance/systemvmtemplate/template.json | 2 +- 7 files changed, 15 insertions(+), 12 deletions(-) diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java index a8cf798b11a5..3c76ab1df819 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java @@ -389,9 +389,6 @@ protected void detachIsoKubernetesVMs(List clusterVMs) { protected List getKubernetesClusterVMMaps() { List clusterVMs = kubernetesClusterVmMapDao.listByClusterId(kubernetesCluster.getId()); - if (!CollectionUtils.isEmpty(clusterVMs)) { - clusterVMs.sort((t1, t2) -> (int)((t1.getId() - t2.getId())/Math.abs(t1.getId() - t2.getId()))); - } return clusterVMs; } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java index 76323160cb25..abc6a91e1e59 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java @@ -181,7 +181,7 @@ private String getKubernetesNodeConfig(final String joinIp, final boolean ejectI // optional or conditionally applied String dockerConfigString = "write_files:\n" + " - path: /.docker/config.json\n" + - " owner: root:root\n" + + " owner: core:core\n" + " permissions: '0644'\n" + " content: |\n" + " {\n" + diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java index 98aa18dae36e..92bf4f48f999 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java @@ -576,7 +576,7 @@ public boolean startKubernetesClusterOnCreate() { logTransitStateAndThrow(Level.ERROR, String.format("Failed to setup Kubernetes cluster : %s in usable state as unable to get Dashboard service running for the cluster", kubernetesCluster.getName()), kubernetesCluster.getId(),KubernetesCluster.Event.OperationFailed); } retrieveScriptFiles(); - copyAutoscalerScriptsToNodes(publicIpAddress, clusterVMs); + copyAutoscalerScriptsToNodes(publicIpAddress, sshPort, clusterVMs); if (!createCloudStackSecret(keys)) { logTransitStateAndThrow(Level.ERROR, String.format("Failed to setup keys for Kubernetes cluster %s", kubernetesCluster.getName()), kubernetesCluster.getId(),KubernetesCluster.Event.OperationFailed); @@ -585,10 +585,12 @@ public boolean startKubernetesClusterOnCreate() { return true; } - private void copyAutoscalerScriptsToNodes(String publicIpAddress, List clusterVMs) { + private void copyAutoscalerScriptsToNodes(String publicIpAddress, int sshPort, List clusterVMs) { for (int i = 0; i < clusterVMs.size(); ++i) { try { - copyAutoscalerScripts(publicIpAddress, CLUSTER_NODES_DEFAULT_START_SSH_PORT + i); + // Check for shared networks + int port = (sshPort == CLUSTER_NODES_DEFAULT_START_SSH_PORT) ? sshPort + i : sshPort; + copyAutoscalerScripts(publicIpAddress, port); } catch (Exception e) { throw new CloudRuntimeException(e); } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java index e7eb3c2aa921..1a99e621322c 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java @@ -103,7 +103,9 @@ private void upgradeKubernetesClusterNodes() { logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster : %s, upgrade action timed out", kubernetesCluster.getName()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null); } try { - copyAutoscalerScripts(publicIpAddress, CLUSTER_NODES_DEFAULT_START_SSH_PORT + i); + // Check for shared networks + int port = (sshPort == CLUSTER_NODES_DEFAULT_START_SSH_PORT) ? sshPort + i : sshPort; + copyAutoscalerScripts(publicIpAddress, port); if (!createCloudStackSecret(keys)) { logTransitStateAndThrow(Level.ERROR, String.format("Failed to setup keys for Kubernetes cluster %s", kubernetesCluster.getName()), kubernetesCluster.getId(),KubernetesCluster.Event.OperationFailed); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterVmMapDaoImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterVmMapDaoImpl.java index 0f6ebfa6909a..c5a9ad47814c 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterVmMapDaoImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterVmMapDaoImpl.java @@ -21,6 +21,7 @@ import org.springframework.stereotype.Component; import com.cloud.kubernetes.cluster.KubernetesClusterVmMapVO; +import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; @@ -42,7 +43,8 @@ public KubernetesClusterVmMapDaoImpl() { public List listByClusterId(long clusterId) { SearchCriteria sc = clusterIdSearch.create(); sc.setParameters("clusterId", clusterId); - return listBy(sc, null); + Filter filter = new Filter(KubernetesClusterVmMapVO.class, "id", Boolean.TRUE, null, null); + return listBy(sc, filter); } @Override diff --git a/tools/appliance/systemvmtemplate/http/preseed.cfg b/tools/appliance/systemvmtemplate/http/preseed.cfg index ec070e6a05e9..e5f9740089b6 100644 --- a/tools/appliance/systemvmtemplate/http/preseed.cfg +++ b/tools/appliance/systemvmtemplate/http/preseed.cfg @@ -56,7 +56,7 @@ d-i partman-auto/disk string /dev/vda d-i partman-auto/method string regular d-i partman-auto/expert_recipe string \ boot-root :: \ - 512 60 512 ext2 \ + 216 60 216 ext2 \ $primary{ } $bootable{ } \ method{ format } format{ } \ use_filesystem{ } filesystem{ ext2 } \ @@ -65,7 +65,7 @@ d-i partman-auto/expert_recipe string \ 256 1000 256 linux-swap \ method{ swap } format{ } \ . \ - 3256 40 3500 ext4 \ + 2500 40 3000 ext4 \ method{ format } format{ } \ use_filesystem{ } filesystem{ ext4 } \ mountpoint{ / } \ diff --git a/tools/appliance/systemvmtemplate/template.json b/tools/appliance/systemvmtemplate/template.json index db49d1d2c46a..929e6d4af0d6 100644 --- a/tools/appliance/systemvmtemplate/template.json +++ b/tools/appliance/systemvmtemplate/template.json @@ -33,7 +33,7 @@ [ "-smp", "1" ] ], "format": "qcow2", - "disk_size": 4000, + "disk_size": 3000, "disk_interface": "virtio", "net_device": "virtio-net", "iso_url": "https://download.cloudstack.org/systemvm/debian/debian-10.7.0-amd64-netinst.iso", From 4a1cc89b426184166a996f162eb042192c10eea3 Mon Sep 17 00:00:00 2001 From: davidjumani Date: Fri, 20 Nov 2020 10:58:13 +0530 Subject: [PATCH 043/117] Adding kubernetes as a dependency for marvin --- tools/marvin/setup.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/marvin/setup.py b/tools/marvin/setup.py index 275b6e3db858..ea90525163ea 100644 --- a/tools/marvin/setup.py +++ b/tools/marvin/setup.py @@ -57,7 +57,8 @@ "ipmisim >= 0.7", "pytz", "retries", - "PyCrypt" + "PyCrypt", + "kubernetes" ], py_modules=['marvin.marvinPlugin'], zip_safe=False, From 06f629967e2dd5cbfcc3fedb9d09ed3cda1f38e3 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Thu, 4 Feb 2021 18:09:26 +0530 Subject: [PATCH 044/117] Port UI changes --- .../cloud/vm/VirtualMachineManagerImpl.java | 2 +- .../upgrade/dao/Upgrade41510to41600.java | 32 ++-- ui/public/locales/en.json | 13 +- ui/src/components/view/ListView.vue | 4 + ui/src/components/view/ResourceView.vue | 23 +-- ui/src/config/section/compute.js | 9 +- ui/src/config/section/image.js | 2 +- ui/src/views/AutogenView.vue | 2 +- .../views/compute/CreateKubernetesCluster.vue | 5 +- ui/src/views/compute/KubernetesServiceTab.vue | 61 ++++++- .../views/compute/ScaleKubernetesCluster.vue | 162 +++++++++++++----- ui/src/views/network/PublicIpResource.vue | 73 +++++--- 12 files changed, 277 insertions(+), 111 deletions(-) diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java index b68f3ef2e2d7..881871d75926 100755 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java @@ -440,7 +440,7 @@ public void allocate(final String vmInstanceName, final VirtualMachineTemplate t final VirtualMachineProfileImpl vmProfile = new VirtualMachineProfileImpl(vmFinal, template, serviceOffering, null, null); Long rootDiskSize = rootDiskOfferingInfo.getSize(); - if (vm.getType().isUsedBySystem() && SystemVmRootDiskSize.value() != null) { + if (vm.getType().isUsedBySystem() && SystemVmRootDiskSize.value() != null && SystemVmRootDiskSize.value() > 0L) { rootDiskSize = SystemVmRootDiskSize.value(); } final Long rootDiskSizeFinal = rootDiskSize; diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41510to41600.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41510to41600.java index 666ec289290f..3be9483a923a 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41510to41600.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41510to41600.java @@ -125,23 +125,23 @@ private void updateSystemVmTemplates(final Connection conn) { final Map newTemplateUrl = new HashMap() { { - put(Hypervisor.HypervisorType.KVM, "https://download.cloudstack.org/systemvm/4.16/systemvmtemplate-4.16.0-kvm.qcow2.bz2"); - put(Hypervisor.HypervisorType.VMware, "https://download.cloudstack.org/systemvm/4.16/systemvmtemplate-4.16.0-vmware.ova"); - put(Hypervisor.HypervisorType.XenServer, "https://download.cloudstack.org/systemvm/4.16/systemvmtemplate-4.16.0-xen.vhd.bz2"); - put(Hypervisor.HypervisorType.Hyperv, "https://download.cloudstack.org/systemvm/4.16/systemvmtemplate-4.16.0-hyperv.vhd.zip"); - put(Hypervisor.HypervisorType.LXC, "https://download.cloudstack.org/systemvm/4.16/systemvmtemplate-4.16.0-kvm.qcow2.bz2"); - put(Hypervisor.HypervisorType.Ovm3, "https://download.cloudstack.org/systemvm/4.16/systemvmtemplate-4.16.0-ovm.raw.bz2"); + put(Hypervisor.HypervisorType.KVM, "http://sbjenkins-stagingrepo.jenkins.lon/systemvmtemplate/custom/cks-debian/systemvmtemplate-kvm.qcow2.bz2"); + put(Hypervisor.HypervisorType.VMware, "http://sbjenkins-stagingrepo.jenkins.lon/systemvmtemplate/custom/cks-debian/systemvmtemplate-vmware.ova"); + put(Hypervisor.HypervisorType.XenServer, "http://sbjenkins-stagingrepo.jenkins.lon/systemvmtemplate/custom/cks-debian/systemvmtemplate-xen.vhd.bz2"); + put(Hypervisor.HypervisorType.Hyperv, "http://sbjenkins-stagingrepo.jenkins.lon/systemvmtemplate/custom/cks-debian/systemvmtemplate-hyperv.vhd.zip"); + put(Hypervisor.HypervisorType.LXC, "http://sbjenkins-stagingrepo.jenkins.lon/systemvmtemplate/custom/cks-debian/systemvmtemplate-kvm.qcow2.bz2"); + put(Hypervisor.HypervisorType.Ovm3, "http://sbjenkins-stagingrepo.jenkins.lon/systemvmtemplate/custom/cks-debian/systemvmtemplate-ovm.raw.bz22"); } }; final Map newTemplateChecksum = new HashMap() { { - put(Hypervisor.HypervisorType.KVM, "81b3e48bb934784a13555a43c5ef5ffb"); - put(Hypervisor.HypervisorType.XenServer, "1b178a5dbdbe090555515340144c6017"); - put(Hypervisor.HypervisorType.VMware, "e6a88e518c57d6f36c096c4204c3417f"); - put(Hypervisor.HypervisorType.Hyperv, "5c94da45337cf3e1910dcbe084d4b9ad"); - put(Hypervisor.HypervisorType.LXC, "81b3e48bb934784a13555a43c5ef5ffb"); - put(Hypervisor.HypervisorType.Ovm3, "875c5c65455fc06c4a012394410db375"); + put(Hypervisor.HypervisorType.KVM, "6e4ed6ba28b50e455ac6708963331013"); + put(Hypervisor.HypervisorType.XenServer, "4ba9fb9b749c5503e46396be999406c1"); + put(Hypervisor.HypervisorType.VMware, "dcb9d3b0e6b7fed9e65846b0c52a1964"); + put(Hypervisor.HypervisorType.Hyperv, "7c7716bf5f4f3ee2b708aa9a3162a8e4"); + put(Hypervisor.HypervisorType.LXC, "6e4ed6ba28b50e455ac6708963331013"); + put(Hypervisor.HypervisorType.Ovm3, "6f87e2945b8dbd400de5e5c9c788d593"); } }; @@ -169,7 +169,9 @@ private void updateSystemVmTemplates(final Connection conn) { LOG.error("updateSystemVmTemplates:Exception while updating template with id " + templateId + " to be marked as 'system': " + e.getMessage()); throw new CloudRuntimeException("updateSystemVmTemplates:Exception while updating template with id " + templateId + " to be marked as 'system'", e); } - updateVMwareSystemvVMTemplateField(conn, NewTemplateNameList.get(Hypervisor.HypervisorType.VMware)); + if (hypervisorAndTemplateName.getKey() == Hypervisor.HypervisorType.VMware) { + updateVMwareSystemvVMTemplateField(conn, hypervisorAndTemplateName.getValue()); + } // update template ID of system Vms try (PreparedStatement update_templ_id_pstmt = conn .prepareStatement("update `cloud`.`vm_instance` set vm_template_id = ? where type <> 'User' and hypervisor_type = ? and removed is NULL");) { @@ -226,7 +228,9 @@ private void updateSystemVmTemplates(final Connection conn) { throw new CloudRuntimeException("updateSystemVmTemplates:Exception while updating 'url' and 'checksum' for hypervisor type " + hypervisorAndTemplateName.getKey().toString(), e); } - updateVMwareSystemvVMTemplateField(conn, NewTemplateNameList.get(Hypervisor.HypervisorType.VMware)); + if (hypervisorAndTemplateName.getKey() == Hypervisor.HypervisorType.VMware) { + updateVMwareSystemvVMTemplateField(conn, hypervisorAndTemplateName.getValue()); + } } } } catch (final SQLException e) { diff --git a/ui/public/locales/en.json b/ui/public/locales/en.json index 5b1cc6a95178..31e7bd902020 100644 --- a/ui/public/locales/en.json +++ b/ui/public/locales/en.json @@ -113,6 +113,7 @@ "label.action.delete.network.processing": "Deleting Network....", "label.action.delete.nexusvswitch": "Delete Nexus 1000v", "label.action.delete.nic": "Remove NIC", +"label.action.delete.node": "Delete node", "label.action.delete.physical.network": "Delete physical network", "label.action.delete.pod": "Delete Pod", "label.action.delete.pod.processing": "Deleting Pod....", @@ -451,6 +452,7 @@ "label.author.name": "Author name", "label.autoscale": "AutoScale", "label.autoscale.configuration.wizard": "AutoScale Configuration Wizard", +"label.autoscalingenabled": "Autoscaling", "label.availability": "Availability", "label.availabilityzone": "Availability Zone", "label.available": "Available", @@ -543,6 +545,9 @@ "label.cisco.nexus1000v.password": "Nexus 1000v Password", "label.cisco.nexus1000v.username": "Nexus 1000v Username", "label.ciscovnmc.resource.details": "CiscoVNMC resource details", +"label.cks.cluster.autoscalingenabled": "Enable autoscaling on this cluster", +"label.cks.cluster.maxsize": "Maximum cluster size (Worker nodes)", +"label.cks.cluster.minsize": "Minimum cluster size (Worker nodes)", "label.cks.cluster.size": "Cluster size (Worker nodes)", "label.cleanup": "Clean up", "label.clear": "Clear", @@ -1294,6 +1299,7 @@ "label.maxproject": "Max. Projects", "label.maxpublicip": "Max. Public IPs", "label.maxsecondarystorage": "Max. Secondary Storage (GiB)", +"label.maxsize": "Maximum size", "label.maxsnapshot": "Max. Snapshots", "label.maxtemplate": "Max. Templates", "label.maxuservm": "Max. User VMs", @@ -1848,8 +1854,8 @@ "label.save.changes": "Save changes", "label.save.new.rule": "Save new Rule", "label.saving.processing": "Saving....", -"label.scale.vm": "Scale VM", "label.scale.up.policy": "SCALE UP POLICY", +"label.scale.vm": "Scale VM", "label.scaledown.policy": "ScaleDown Policy", "label.scaleup.policy": "ScaleUp Policy", "label.schedule": "Schedule", @@ -2048,6 +2054,7 @@ "label.summary": "Summary", "label.sunday": "Sunday", "label.supportedservices": "Supported Services", +"label.supportsautoscaling": "Supports Autoscaling", "label.supportsha": "Supports HA", "label.supportspublicaccess": "Supports Public Access", "label.supportsregionlevelvpc": "Supports Region Level VPC", @@ -2379,6 +2386,7 @@ "message.action.delete.iso.for.all.zones": "The ISO is used by all zones. Please confirm that you want to delete it from all zones.", "message.action.delete.network": "Please confirm that you want to delete this network.", "message.action.delete.nexusvswitch": "Please confirm that you want to delete this nexus 1000v", +"message.action.delete.node": "Please confirm that you want to delete this node.", "message.action.delete.physical.network": "Please confirm that you want to delete this physical network", "message.action.delete.pod": "Please confirm that you want to delete this pod.", "message.action.delete.primary.storage": "Please confirm that you want to delete this primary storage.", @@ -2428,6 +2436,7 @@ "message.action.revert.snapshot": "Please confirm that you want to revert the owning volume to this snapshot.", "message.action.router.health.checks": "Health checks result will be fetched from router.", "message.action.router.health.checks.disabled.warning": "Please enable router health checks.", +"message.action.scale.kubernetes.cluster.warning": "Please do not manually scale the cluster if cluster autoscaling is enabled", "message.action.secure.host": "This will restart the host agent and libvirtd process after applying new X509 certificates, please confirm?", "message.action.settings.warning.vm.running": "Please stop the virtual machine to access settings", "message.action.settings.warning.vm.started": "Virtual machine has been started. It needs to be stopped to access settings", @@ -2675,6 +2684,7 @@ "message.delete.vpn.connection": "Please confirm that you want to delete VPN connection", "message.delete.vpn.customer.gateway": "Please confirm that you want to delete this VPN Customer Gateway", "message.delete.vpn.gateway": "Please confirm that you want to delete this VPN Gateway", +"message.deleting.node": "Deleting Node", "message.deleting.vm": "Deleting VM", "message.desc.add.new.lb.sticky.rule": "Add new LB sticky rule", "message.desc.advanced.zone": "For more sophisticated network topologies. This network model provides the most flexibility in defining guest networks and providing custom network offerings such as firewall, VPN, or load balancer support.", @@ -3117,6 +3127,7 @@ "message.success.delete": "Delete success", "message.success.delete.acl.rule": "Successfully removed ACL rule", "message.success.delete.backup.schedule": "Successfully deleted Configure VM backup schedule", +"message.success.delete.node": "Successfully Deleted Node", "message.success.delete.snapshot.policy": "Successfully deleted snapshot policy", "message.success.delete.static.route": "Successfully deleted static route", "message.success.delete.tag": "Successfully deleted tag", diff --git a/ui/src/components/view/ListView.vue b/ui/src/components/view/ListView.vue index 35f68b550660..f14546d3754f 100644 --- a/ui/src/components/view/ListView.vue +++ b/ui/src/components/view/ListView.vue @@ -230,6 +230,10 @@ + + + {{ record.autoscalingenabled ? 'Enabled' : 'Disabled' }} + {{ $toLocaleDate(text) }} diff --git a/ui/src/components/view/ResourceView.vue b/ui/src/components/view/ResourceView.vue index 333f582ad0e5..4ab8bff2446c 100644 --- a/ui/src/components/view/ResourceView.vue +++ b/ui/src/components/view/ResourceView.vue @@ -130,27 +130,8 @@ export default { ) }, showTab (tab) { - if ('networkServiceFilter' in tab) { - if (this.resource && this.resource.virtualmachineid && !this.resource.vpcid && tab.name !== 'firewall') { - return false - } - if (this.resource && this.resource.virtualmachineid && this.resource.vpcid) { - return false - } - // dont display any option for source NAT IP of VPC - if (this.resource && this.resource.vpcid && !this.resource.issourcenat && tab.name !== 'firewall') { - return true - } - // display LB and PF options for isolated networks if static nat is disabled - if (this.resource && !this.resource.vpcid) { - if (!this.resource.isstaticnat) { - return true - } else if (tab.name === 'firewall') { - return true - } - } - return this.networkService && this.networkService.service && - tab.networkServiceFilter(this.networkService.service) + if (this.networkService && this.networkService.service && tab.networkServiceFilter) { + return tab.networkServiceFilter(this.networkService.service) } else if ('show' in tab) { return tab.show(this.resource, this.$route, this.$store.getters.userInfo) } else { diff --git a/ui/src/config/section/compute.js b/ui/src/config/section/compute.js index 89501406503f..d209d5a64916 100644 --- a/ui/src/config/section/compute.js +++ b/ui/src/config/section/compute.js @@ -400,15 +400,18 @@ export default { icon: kubernetes, docHelp: 'plugins/cloudstack-kubernetes-service.html', permission: ['listKubernetesClusters'], - columns: () => { + columns: (store) => { var fields = ['name', 'state', 'size', 'cpunumber', 'memory'] - if (['Admin', 'DomainAdmin'].includes(store.getters.userInfo.roletype)) { + if (['Admin', 'DomainAdmin'].includes(store.userInfo.roletype)) { fields.push('account') } + if (store.apis.scaleKubernetesCluster.params.filter(x => x.name === 'autoscalingenabled').length > 0) { + fields.splice(2, 0, 'autoscalingenabled') + } fields.push('zonename') return fields }, - details: ['name', 'description', 'zonename', 'kubernetesversionname', 'size', 'masternodes', 'cpunumber', 'memory', 'keypair', 'associatednetworkname', 'account', 'domain', 'zonename'], + details: ['name', 'description', 'zonename', 'kubernetesversionname', 'autoscalingenabled', 'minsize', 'maxsize', 'size', 'masternodes', 'cpunumber', 'memory', 'keypair', 'associatednetworkname', 'account', 'domain', 'zonename'], tabs: [{ name: 'k8s', component: () => import('@/views/compute/KubernetesServiceTab.vue') diff --git a/ui/src/config/section/image.js b/ui/src/config/section/image.js index 6f08b9e4bd4d..3838aec13b9e 100644 --- a/ui/src/config/section/image.js +++ b/ui/src/config/section/image.js @@ -310,7 +310,7 @@ export default { docHelp: 'plugins/cloudstack-kubernetes-service.html#kubernetes-supported-versions', permission: ['listKubernetesSupportedVersions'], columns: ['name', 'state', 'semanticversion', 'isostate', 'mincpunumber', 'minmemory', 'zonename'], - details: ['name', 'semanticversion', 'zoneid', 'zonename', 'isoid', 'isoname', 'isostate', 'mincpunumber', 'minmemory', 'supportsha', 'state'], + details: ['name', 'semanticversion', 'supportsautoscaling', 'zoneid', 'zonename', 'isoid', 'isoname', 'isostate', 'mincpunumber', 'minmemory', 'supportsha', 'state'], actions: [ { api: 'addKubernetesSupportedVersion', diff --git a/ui/src/views/AutogenView.vue b/ui/src/views/AutogenView.vue index 157d7d7eb3c5..1136c44c98b4 100644 --- a/ui/src/views/AutogenView.vue +++ b/ui/src/views/AutogenView.vue @@ -547,7 +547,7 @@ export default { if (this.$route.meta.columns) { const columns = this.$route.meta.columns if (columns && typeof columns === 'function') { - this.columnKeys = columns() + this.columnKeys = columns(this.$store.getters) } else { this.columnKeys = columns } diff --git a/ui/src/views/compute/CreateKubernetesCluster.vue b/ui/src/views/compute/CreateKubernetesCluster.vue index 9759a5d90554..ac56b0432e44 100644 --- a/ui/src/views/compute/CreateKubernetesCluster.vue +++ b/ui/src/views/compute/CreateKubernetesCluster.vue @@ -131,10 +131,11 @@ @@ -107,6 +107,26 @@ + @@ -141,6 +161,7 @@ export default { Status }, mixins: [mixinDevice], + inject: ['parentFetchData'], props: { resource: { type: Object, @@ -220,6 +241,13 @@ export default { } }, mounted () { + if (this.$store.getters.apis.scaleKubernetesCluster.params.filter(x => x.name === 'nodeids').length > 0) { + this.vmColumns.push({ + title: this.$t('label.action'), + dataIndex: 'action', + scopedSlots: { customRender: 'action' } + }) + } this.handleFetchData() this.setCurrentTab() }, @@ -359,6 +387,35 @@ export default { elem.click() document.body.removeChild(elem) } + }, + deleteNode (node) { + const params = { + id: this.resource.id, + nodeids: node.id + } + api('scaleKubernetesCluster', params).then(json => { + const jobId = json.scalekubernetesclusterresponse.jobid + console.log(jobId) + this.$store.dispatch('AddAsyncJob', { + title: this.$t('label.action.delete.node'), + jobid: jobId, + description: node.name, + status: 'progress' + }) + this.$pollJob({ + jobId, + loadingMessage: `${this.$t('message.deleting.node')} ${node.name}`, + catchMessage: this.$t('error.fetching.async.job.result'), + successMessage: `${this.$t('message.success.delete.node')} ${node.name}`, + successMethod: () => { + this.parentFetchData() + } + }) + }).catch(error => { + this.$notifyError(error) + }).finally(() => { + this.parentFetchData() + }) } } } diff --git a/ui/src/views/compute/ScaleKubernetesCluster.vue b/ui/src/views/compute/ScaleKubernetesCluster.vue index 8ebc1fb29e89..39d0df340ce7 100644 --- a/ui/src/views/compute/ScaleKubernetesCluster.vue +++ b/ui/src/views/compute/ScaleKubernetesCluster.vue @@ -19,57 +19,113 @@
- +
- + - {{ $t('label.cks.cluster.size') }} - + {{ $t('label.cks.cluster.autoscalingenabled') }} + - - - - - {{ $t('label.serviceofferingid') }} - - - - - - - {{ opt.name || opt.description }} - - + - + + + + {{ $t('label.cks.cluster.minsize') }} + + + + + + + + + {{ $t('label.cks.cluster.maxsize') }} + + + + + + + + + + + {{ $t('label.serviceofferingid') }} + + + + + + + {{ opt.name || opt.description }} + + + + + + {{ $t('label.cks.cluster.size') }} + + + + + + +
{{ this.$t('label.cancel') }} {{ this.$t('label.ok') }} @@ -96,7 +152,11 @@ export default { serviceOfferingLoading: false, minCpu: 2, minMemory: 2048, - loading: false + loading: false, + originalSize: 1, + autoscalingenabled: null, + minsize: null, + maxsize: null } }, beforeCreate () { @@ -108,7 +168,14 @@ export default { }) }, created () { - this.originalSize = !this.isObjectEmpty(this.resource) ? this.resource.size : 1 + if (!this.isObjectEmpty(this.resource)) { + this.originalSize = this.resource.size + if (this.apiParams.autoscalingenabled) { + this.autoscalingenabled = this.resource.autoscalingenabled ? true : null + this.minsize = this.resource.minsize + this.maxsize = this.resource.maxsize + } + } }, mounted () { this.fetchData() @@ -179,12 +246,21 @@ export default { const params = { id: this.resource.id } + if (this.autoscalingenabled != null) { + params.autoscalingenabled = this.autoscalingenabled + } if (this.isValidValueForKey(values, 'size') && values.size > 0) { params.size = values.size } - if (this.isValidValueForKey(values, 'serviceofferingid') && this.arrayHasItems(this.serviceOfferings)) { + if (this.isValidValueForKey(values, 'serviceofferingid') && this.arrayHasItems(this.serviceOfferings) && this.autoscalingenabled == null) { params.serviceofferingid = this.serviceOfferings[values.serviceofferingid].id } + if (this.isValidValueForKey(values, 'minsize')) { + params.minsize = values.minsize + } + if (this.isValidValueForKey(values, 'maxsize')) { + params.maxsize = values.maxsize + } api('scaleKubernetesCluster', params).then(json => { const jobId = json.scalekubernetesclusterresponse.jobid this.$store.dispatch('AddAsyncJob', { diff --git a/ui/src/views/network/PublicIpResource.vue b/ui/src/views/network/PublicIpResource.vue index 84c62eb4c453..8aca91496f70 100644 --- a/ui/src/views/network/PublicIpResource.vue +++ b/ui/src/views/network/PublicIpResource.vue @@ -63,6 +63,10 @@ export default { tabs: [{ name: 'details', component: () => import('@/components/view/DetailsTab.vue') + }], + defaultTabs: [{ + name: 'details', + component: () => import('@/components/view/DetailsTab.vue') }] } }, @@ -97,39 +101,64 @@ export default { } this.loading = true - this.portFWRuleCount = await this.fetchPortFWRule() + await this.filterTabs() + await this.fetchAction() + this.loading = false + }, + async filterTabs () { + // VPC IPs with source nat have only VPN + if (this.resource && this.resource.vpcid && this.resource.issourcenat) { + this.tabs = this.defaultTabs.concat(this.$route.meta.tabs.filter(tab => tab.name === 'vpn')) + return + } + // VPC IPs with vpnenabled have only VPN + if (this.resource && this.resource.vpcid && this.resource.vpnenabled) { + this.tabs = this.defaultTabs.concat(this.$route.meta.tabs.filter(tab => tab.name === 'vpn')) + return + } + // VPC IPs with static nat have nothing + if (this.resource && this.resource.vpcid && this.resource.isstaticnat) { + return + } + if (this.resource && this.resource.vpcid) { + // VPC IPs don't have firewall + let tabs = this.$route.meta.tabs.filter(tab => tab.name !== 'firewall') - // disable load balancing rules only if port forwarding is enabled and - // network belongs to VPC - if (this.portFWRuleCount > 0 && this.resource.vpcid) { - this.tabs = this.$route.meta.tabs.filter(tab => tab.name !== 'loadbalancing') - } else { + this.portFWRuleCount = await this.fetchPortFWRule() this.loadBalancerRuleCount = await this.fetchLoadBalancerRule() - // for isolated networks, display both LB and PF - // for VPC they are mutually exclusive + // VPC IPs with PF only have PF + if (this.portFWRuleCount > 0) { + tabs = this.defaultTabs.concat(this.$route.meta.tabs.filter(tab => tab.name === 'portforwarding')) + } + + // VPC IPs with LB rules only have LB if (this.loadBalancerRuleCount > 0) { - this.tabs = - this.resource.vpcid ? this.$route.meta.tabs.filter(tab => tab.name !== 'portforwarding') : this.$route.meta.tabs - this.loading = false - } else { - this.tabs = this.$route.meta.tabs + tabs = this.defaultTabs.concat(this.$route.meta.tabs.filter(tab => tab.name === 'loadbalancing')) } + this.tabs = tabs + return } - await this.fetchAction() - this.loading = false - }, - fetchAction () { - this.actions = [] - if (this.$route.meta.actions) { - this.actions = this.$route.meta.actions + // Regular guest networks with Source Nat have everything + if (this.resource && !this.resource.vpcid && this.resource.issourcenat) { + this.tabs = this.$route.meta.tabs + return + } + // Regular guest networks with Static Nat only have Firewall + if (this.resource && !this.resource.vpcid && this.resource.isstaticnat) { + this.tabs = this.defaultTabs.concat(this.$route.meta.tabs.filter(tab => tab.name === 'firewall')) + return } - if (this.portFWRuleCount > 0 || this.loadBalancerRuleCount > 0) { - this.actions = this.actions.filter(action => action.api !== 'enableStaticNat') + // Regular guest networks have all tabs + if (this.resource && !this.resource.vpcid) { + this.tabs = this.$route.meta.tabs } }, + fetchAction () { + this.actions = this.$route.meta.actions || [] + }, fetchPortFWRule () { return new Promise((resolve, reject) => { api('listPortForwardingRules', { From 735b257ee2c3da425524d5541963cac696476f4c Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Fri, 5 Feb 2021 22:44:19 +0530 Subject: [PATCH 045/117] Fix versions --- .../com/cloud/upgrade/dao/Upgrade41510to41600.java | 12 ++++++------ .../scripts/install_systemvm_packages.sh | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41510to41600.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41510to41600.java index 3be9483a923a..cedf8fb2a76f 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41510to41600.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41510to41600.java @@ -136,12 +136,12 @@ private void updateSystemVmTemplates(final Connection conn) { final Map newTemplateChecksum = new HashMap() { { - put(Hypervisor.HypervisorType.KVM, "6e4ed6ba28b50e455ac6708963331013"); - put(Hypervisor.HypervisorType.XenServer, "4ba9fb9b749c5503e46396be999406c1"); - put(Hypervisor.HypervisorType.VMware, "dcb9d3b0e6b7fed9e65846b0c52a1964"); - put(Hypervisor.HypervisorType.Hyperv, "7c7716bf5f4f3ee2b708aa9a3162a8e4"); - put(Hypervisor.HypervisorType.LXC, "6e4ed6ba28b50e455ac6708963331013"); - put(Hypervisor.HypervisorType.Ovm3, "6f87e2945b8dbd400de5e5c9c788d593"); + put(Hypervisor.HypervisorType.KVM, "0493097e888e7bb86c7118d23c5eeb67"); + put(Hypervisor.HypervisorType.XenServer, "b712722757fffcee0cfcdf63abdbbcff"); + put(Hypervisor.HypervisorType.VMware, "3d903f31df5324f21a730cab76706786"); + put(Hypervisor.HypervisorType.Hyperv, "54156ca498d5e96a6eb010c06d10ab55"); + put(Hypervisor.HypervisorType.LXC, "0493097e888e7bb86c7118d23c5eeb67"); + put(Hypervisor.HypervisorType.Ovm3, "e956da950052ad5e62098f3a366a6aa1"); } }; diff --git a/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh b/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh index 7f9bd63a9b36..b4f63045ca52 100644 --- a/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh +++ b/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh @@ -100,7 +100,7 @@ function install_packages() { $(lsb_release -cs) \ stable" apt-get update - ${apt_get} install docker-ce docker-ce-cli containerd.io + ${apt_get} install docker-ce=5:19.03.10~3-0~debian-buster docker-ce-cli=5:19.03.10~3-0~debian-buster containerd.io=1.3.7-1 fi apt_clean From f5e866cb1f2a21e345f0dc1d6b6061960176d4df Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Wed, 17 Mar 2021 17:33:21 +0530 Subject: [PATCH 046/117] CKS deployment fix for Vmware - resize volume --- .../vmware/resource/VmwareResource.java | 9 ++++- ...esClusterResourceModifierActionWorker.java | 33 +++++++++++++++++++ .../KubernetesClusterStartWorker.java | 3 ++ .../java/com/cloud/vm/UserVmManagerImpl.java | 14 ++++++++ ui/public/locales/en.json | 3 ++ ui/src/views/compute/DeployVM.vue | 11 ++++++- .../wizard/ComputeOfferingSelection.vue | 10 ++++++ 7 files changed, 81 insertions(+), 2 deletions(-) diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java index 44add8e662c2..27950d2a4da2 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java @@ -2072,7 +2072,14 @@ protected StartAnswer execute(StartCommand cmd) { // Setup ROOT/DATA disk devices // for (DiskTO vol : sortedDisks) { - if (vol.getType() == Volume.Type.ISO || deployAsIs && vol.getType() == Volume.Type.ROOT) { + if (vol.getType() == Volume.Type.ISO) { + continue; + } + + if (deployAsIs && vol.getType() == Volume.Type.ROOT) { + rootDiskTO = vol; + s_logger.info("root disk to path: "+ rootDiskTO.getPath()); + resizeRootDiskOnVMStart(vmMo, rootDiskTO, hyperHost, context); continue; } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java index abc6a91e1e59..efe9622584c8 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java @@ -32,6 +32,7 @@ import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.command.user.firewall.CreateFirewallRuleCmd; import org.apache.cloudstack.api.command.user.vm.StartVMCmd; +import org.apache.cloudstack.api.command.user.volume.ResizeVolumeCmd; import org.apache.commons.codec.binary.Base64; import org.apache.commons.collections.CollectionUtils; import org.apache.log4j.Level; @@ -72,7 +73,11 @@ import com.cloud.network.rules.dao.PortForwardingRulesDao; import com.cloud.offering.ServiceOffering; import com.cloud.resource.ResourceManager; +import com.cloud.storage.Volume; +import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.LaunchPermissionDao; +import com.cloud.storage.VolumeApiService; +import com.cloud.storage.dao.VolumeDao; import com.cloud.user.Account; import com.cloud.user.SSHKeyPairVO; import com.cloud.uservm.UserVm; @@ -125,6 +130,10 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu protected UserVmManager userVmManager; @Inject protected LaunchPermissionDao launchPermissionDao; + @Inject + protected VolumeApiService volumeService; + @Inject + protected VolumeDao volumeDao; protected String kubernetesClusterNodeNamePrefix; @@ -275,6 +284,29 @@ protected DeployDestination plan() throws InsufficientServerCapacityException { return plan(kubernetesCluster.getTotalNodeCount(), zone, offering); } + protected void resizeNodeVolume(final UserVm vm) throws ManagementServerException { + try { + if (vm.getHypervisorType() == Hypervisor.HypervisorType.VMware && templateDao.findById(vm.getTemplateId()).isDeployAsIs()) { + List vmVols = volumeDao.findByInstance(vm.getId()); + for (VolumeVO volumeVO : vmVols) { + if (volumeVO.getVolumeType() == Volume.Type.ROOT) { + ResizeVolumeCmd resizeVolumeCmd = new ResizeVolumeCmd(); + resizeVolumeCmd = ComponentContext.inject(resizeVolumeCmd); + Field f = resizeVolumeCmd.getClass().getDeclaredField("size"); + Field f1 = resizeVolumeCmd.getClass().getDeclaredField("id"); + f.setAccessible(true); + f1.setAccessible(true); + f1.set(resizeVolumeCmd, volumeVO.getId()); + f.set(resizeVolumeCmd, kubernetesCluster.getNodeRootDiskSize()); + volumeService.resizeVolume(resizeVolumeCmd); + } + } + } + } catch (IllegalAccessException | NoSuchFieldException e) { + throw new ManagementServerException(String.format("Failed to resize volume of VM in the Kubernetes cluster : %s", kubernetesCluster.getName()), e); + } + } + protected void startKubernetesVM(final UserVm vm) throws ManagementServerException { try { StartVMCmd startVm = new StartVMCmd(); @@ -302,6 +334,7 @@ protected List provisionKubernetesClusterNodeVms(final long nodeCount, f for (int i = offset + 1; i <= nodeCount; i++) { UserVm vm = createKubernetesNode(publicIpAddress); addKubernetesClusterVm(kubernetesCluster.getId(), vm.getId(), false); + resizeNodeVolume(vm); startKubernetesVM(vm); vm = userVmDao.findById(vm.getId()); if (vm == null) { diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java index 92bf4f48f999..293d01651d45 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java @@ -277,6 +277,7 @@ private UserVm provisionKubernetesClusterMasterVm(final Network network, final S UserVm k8sMasterVM = null; k8sMasterVM = createKubernetesMaster(network, publicIpAddress); addKubernetesClusterVm(kubernetesCluster.getId(), k8sMasterVM.getId(), true); + resizeNodeVolume(k8sMasterVM); startKubernetesVM(k8sMasterVM); k8sMasterVM = userVmDao.findById(k8sMasterVM.getId()); if (k8sMasterVM == null) { @@ -296,6 +297,7 @@ private List provisionKubernetesClusterAdditionalMasterVms(final String UserVm vm = null; vm = createKubernetesAdditionalMaster(publicIpAddress, i); addKubernetesClusterVm(kubernetesCluster.getId(), vm.getId(), true); + resizeNodeVolume(vm); startKubernetesVM(vm); vm = userVmDao.findById(vm.getId()); if (vm == null) { @@ -427,6 +429,7 @@ private void startKubernetesClusterVMs() { logTransitStateAndThrow(Level.ERROR, String.format("Failed to start all VMs in Kubernetes cluster : %s", kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); } try { + resizeNodeVolume(vm); startKubernetesVM(vm); } catch (ManagementServerException ex) { LOGGER.warn(String.format("Failed to start VM : %s in Kubernetes cluster : %s due to ", vm.getDisplayName(), kubernetesCluster.getName()) + ex); diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index 6646a769e528..a11546464458 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -145,6 +145,8 @@ import com.cloud.agent.manager.Commands; import com.cloud.alert.AlertManager; import com.cloud.api.ApiDBUtils; +import com.cloud.api.query.dao.ServiceOfferingJoinDao; +import com.cloud.api.query.vo.ServiceOfferingJoinVO; import com.cloud.capacity.Capacity; import com.cloud.capacity.CapacityManager; import com.cloud.configuration.Config; @@ -513,6 +515,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir private UserVmDeployAsIsDetailsDao userVmDeployAsIsDetailsDao; @Inject private StorageManager storageMgr; + @Inject + private ServiceOfferingJoinDao serviceOfferingJoinDao; private ScheduledExecutorService _executor = null; private ScheduledExecutorService _vmIpFetchExecutor = null; @@ -5308,6 +5312,16 @@ public UserVm createVirtualMachine(DeployVMCmd cmd) throws InsufficientCapacityE throw new InvalidParameterValueException("Unable to use template " + templateId); } + ServiceOfferingJoinVO svcOffering = serviceOfferingJoinDao.findById(serviceOfferingId); + + if (template.isDeployAsIs() && svcOffering != null && svcOffering.getRootDiskSize() != null && svcOffering.getRootDiskSize() > 0) { + throw new InvalidParameterValueException("Failed to deploy Virtual Machine as a service offering with root disk size specified cannot be used with a deploy as-is template"); + } + + if (template.isDeployAsIs() && cmd.getDetails().get("rootdisksize") != null) { + throw new InvalidParameterValueException("Overriding root disk size isn't supported for VMs deployed from defploy as-is templates"); + } + // Bootmode and boottype are not supported on VMWare dpeloy-as-is templates (since 4.15) if (template.isDeployAsIs() && (cmd.getBootMode() != null || cmd.getBootType() != null)) { throw new InvalidParameterValueException("Boot type and boot mode are not supported on VMware, as we honour what is defined in the template."); diff --git a/ui/public/locales/en.json b/ui/public/locales/en.json index e7a8b714e81e..606eaa40fd3c 100644 --- a/ui/public/locales/en.json +++ b/ui/public/locales/en.json @@ -1816,6 +1816,7 @@ "label.rolename": "Role", "label.roles": "Roles", "label.roletype": "Role Type", +"label.rootdisksize": "Root disk size (GB)", "label.root.certificate": "Root certificate", "label.root.disk.offering": "Root Disk Offering", "label.root.disk.size": "Root disk size (GB)", @@ -1995,6 +1996,7 @@ "label.start.rolling.maintenance": "Start Rolling Maintenance", "label.start.rolling.maintenance.payload": "Payload", "label.start.vlan": "Start VLAN", +"label.start.vm": "Start VM", "label.start.vxlan": "Start VXLAN", "label.startdate": "By date (start)", "label.startip": "Start IP", @@ -2690,6 +2692,7 @@ "message.delete.vpn.gateway": "Please confirm that you want to delete this VPN Gateway", "message.deleting.node": "Deleting Node", "message.deleting.vm": "Deleting VM", +"message.deployasis": "Selected template is Deploy As-Is i.e., the VM is deployed by importing an OVA with vApps directly into vCenter. Root disk(s) resize is allowed only on stopped VMs for such templates.", "message.desc.add.new.lb.sticky.rule": "Add new LB sticky rule", "message.desc.advanced.zone": "For more sophisticated network topologies. This network model provides the most flexibility in defining guest networks and providing custom network offerings such as firewall, VPN, or load balancer support.", "message.desc.basic.zone": "Provide a single network where each VM instance is assigned an IP directly from the network. Guest isolation can be provided through layer-3 means such as security groups (IP address source filtering).", diff --git a/ui/src/views/compute/DeployVM.vue b/ui/src/views/compute/DeployVM.vue index bee2a386579b..f398abe1c762 100644 --- a/ui/src/views/compute/DeployVM.vue +++ b/ui/src/views/compute/DeployVM.vue @@ -105,7 +105,8 @@ @update-template-iso="updateFieldValue" /> {{ $t('label.override.rootdisk.size') }} - + +
{{ this.$t('message.deployasis') }}
+ + +
@@ -663,6 +668,7 @@ export default { podId: null, clusterId: null, zoneSelected: false, + startvm: true, vm: { name: null, zoneid: null, @@ -1419,6 +1425,9 @@ export default { if (values.hypervisor && values.hypervisor.length > 0) { deployVmData.hypervisor = values.hypervisor } + + deployVmData.startvm = values.startvm + // step 3: select service offering deployVmData.serviceofferingid = values.computeofferingid if (this.serviceOffering && this.serviceOffering.iscustomized) { diff --git a/ui/src/views/compute/wizard/ComputeOfferingSelection.vue b/ui/src/views/compute/wizard/ComputeOfferingSelection.vue index c372b821aef5..f1a6128872e8 100644 --- a/ui/src/views/compute/wizard/ComputeOfferingSelection.vue +++ b/ui/src/views/compute/wizard/ComputeOfferingSelection.vue @@ -63,6 +63,10 @@ export default { type: Array, default: () => [] }, + selectedTemplate: { + type: Object, + default: () => {} + }, rowCount: { type: Number, default: () => 0 @@ -161,6 +165,9 @@ export default { (item.iscustomized === true && maxMemory < this.minimumMemory))) { disabled = true } + if (this.selectedTemplate && this.selectedTemplate.hypervisor === 'VMware' && this.selectedTemplate.deployasis && item.rootdisksize) { + disabled = true + } return { key: item.id, name: item.name, @@ -238,6 +245,9 @@ export default { return { on: { click: () => { + if (record.disabled) { + return + } this.selectedRowKeys = [record.key] this.$emit('select-compute-item', record.key) } From 6a48ac8830d79bc7342fdc016542ff020a116383 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Tue, 23 Mar 2021 18:01:32 +0530 Subject: [PATCH 047/117] Update deploy as is field for templates registed < ACS 4.15 --- .../java/com/cloud/upgrade/dao/Upgrade41510to41600.java | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41510to41600.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41510to41600.java index 6a7e12cf4a99..d5e93a8eecd0 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41510to41600.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41510to41600.java @@ -64,6 +64,7 @@ public InputStream[] getPrepareScripts() { @Override public void performDataMigration(Connection conn) { + updateVMwareSystemvVMTemplateField(conn, "systemvm-vmware-4.16.0"); } @Override @@ -169,9 +170,7 @@ public void updateSystemVmTemplates(final Connection conn) { LOG.error("updateSystemVmTemplates:Exception while updating template with id " + templateId + " to be marked as 'system': " + e.getMessage()); throw new CloudRuntimeException("updateSystemVmTemplates:Exception while updating template with id " + templateId + " to be marked as 'system'", e); } - if (hypervisorAndTemplateName.getKey() == Hypervisor.HypervisorType.VMware) { - updateVMwareSystemvVMTemplateField(conn, hypervisorAndTemplateName.getValue()); - } + // update template ID of system Vms try (PreparedStatement update_templ_id_pstmt = conn .prepareStatement("update `cloud`.`vm_instance` set vm_template_id = ? where type <> 'User' and hypervisor_type = ? and removed is NULL");) { @@ -228,9 +227,6 @@ public void updateSystemVmTemplates(final Connection conn) { throw new CloudRuntimeException("updateSystemVmTemplates:Exception while updating 'url' and 'checksum' for hypervisor type " + hypervisorAndTemplateName.getKey().toString(), e); } - if (hypervisorAndTemplateName.getKey() == Hypervisor.HypervisorType.VMware) { - updateVMwareSystemvVMTemplateField(conn, hypervisorAndTemplateName.getValue()); - } } } } catch (final SQLException e) { From ce506892be726239fa2fbe3ad89289a2a9864455 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Thu, 25 Mar 2021 10:37:14 +0530 Subject: [PATCH 048/117] Allow vol snap for a disk brought up using sysvm template - only for cks nodes --- .../com/cloud/storage/VolumeApiServiceImpl.java | 14 ++++++++++++-- .../storage/snapshot/SnapshotManagerImpl.java | 8 +++++++- ui/public/locales/en.json | 3 --- 3 files changed, 19 insertions(+), 6 deletions(-) diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java index 6f362a7720f1..4e0b676682aa 100644 --- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java @@ -2734,7 +2734,12 @@ public Snapshot allocSnapshot(Long volumeId, Long policyId, String snapshotName, if (volume.getTemplateId() != null) { VMTemplateVO template = _templateDao.findById(volume.getTemplateId()); - if (template != null && template.getTemplateType() == Storage.TemplateType.SYSTEM) { + Long instanceId = volume.getInstanceId(); + UserVmVO userVmVO = null; + if (instanceId != null) { + userVmVO = _userVmDao.findById(instanceId); + } + if (template != null && template.getTemplateType() == Storage.TemplateType.SYSTEM && (userVmVO == null || !UserVmManager.CKS_NODE.equals(userVmVO.getUserVmType()))) { throw new InvalidParameterValueException("VolumeId: " + volumeId + " is for System VM , Creating snapshot against System VM volumes is not supported"); } } @@ -2791,7 +2796,12 @@ public Snapshot allocSnapshotForVm(Long vmId, Long volumeId, String snapshotName if (volume.getTemplateId() != null) { VMTemplateVO template = _templateDao.findById(volume.getTemplateId()); - if (template != null && template.getTemplateType() == Storage.TemplateType.SYSTEM) { + Long instanceId = volume.getInstanceId(); + UserVmVO userVmVO = null; + if (instanceId != null) { + userVmVO = _userVmDao.findById(instanceId); + } + if (template != null && template.getTemplateType() == Storage.TemplateType.SYSTEM && (userVmVO == null || !UserVmManager.CKS_NODE.equals(userVmVO.getUserVmType()))) { throw new InvalidParameterValueException("VolumeId: " + volumeId + " is for System VM , Creating snapshot against System VM volumes is not supported"); } } diff --git a/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java b/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java index 630adde57dc9..346898b191bb 100755 --- a/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java @@ -132,6 +132,7 @@ import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.UserVmManager; import com.cloud.vm.UserVmVO; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; @@ -836,7 +837,12 @@ public SnapshotPolicyVO createPolicy(CreateSnapshotPolicyCmd cmd, Account policy if (volume.getTemplateId() != null) { VMTemplateVO template = _templateDao.findById(volume.getTemplateId()); - if (template != null && template.getTemplateType() == Storage.TemplateType.SYSTEM) { + Long instanceId = volume.getInstanceId(); + UserVmVO userVmVO = null; + if (instanceId != null) { + userVmVO = _vmDao.findById(instanceId); + } + if (template != null && template.getTemplateType() == Storage.TemplateType.SYSTEM && (userVmVO == null || !UserVmManager.CKS_NODE.equals(userVmVO.getUserVmType()))) { throw new InvalidParameterValueException("VolumeId: " + volumeId + " is for System VM , Creating snapshot against System VM volumes is not supported"); } } diff --git a/ui/public/locales/en.json b/ui/public/locales/en.json index 879c0d773503..a13561460ce4 100644 --- a/ui/public/locales/en.json +++ b/ui/public/locales/en.json @@ -2452,12 +2452,9 @@ "message.action.revert.snapshot": "Please confirm that you want to revert the owning volume to this snapshot.", "message.action.router.health.checks": "Health checks result will be fetched from router.", "message.action.router.health.checks.disabled.warning": "Please enable router health checks.", -<<<<<<< HEAD "message.action.scale.kubernetes.cluster.warning": "Please do not manually scale the cluster if cluster autoscaling is enabled", -======= "message.action.secondary.storage.read.only": "Please confirm that you want to make this secondary storage read only.", "message.action.secondary.storage.read.write": "Please confirm that you want to make this secondary storage read write.", ->>>>>>> 775de36688b18dad651af69179cd7776d6d2b791 "message.action.secure.host": "This will restart the host agent and libvirtd process after applying new X509 certificates, please confirm?", "message.action.settings.warning.vm.running": "Please stop the virtual machine to access settings", "message.action.settings.warning.vm.started": "Virtual machine has been started. It needs to be stopped to access settings", From 39ef51bbde633f03f97fa356b7acbd5d7162b921 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Fri, 26 Mar 2021 21:05:52 +0530 Subject: [PATCH 049/117] Update sysvm template + default to legacy iptables on cks nodes --- systemvm/debian/opt/cloud/bin/setup/cksnode.sh | 5 +++++ tools/appliance/systemvmtemplate/template.json | 4 ++-- .../com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java | 2 +- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/systemvm/debian/opt/cloud/bin/setup/cksnode.sh b/systemvm/debian/opt/cloud/bin/setup/cksnode.sh index b40de4124df7..a864d188d009 100755 --- a/systemvm/debian/opt/cloud/bin/setup/cksnode.sh +++ b/systemvm/debian/opt/cloud/bin/setup/cksnode.sh @@ -21,6 +21,11 @@ setup_k8s_node() { log_it "Setting up k8s node" + update-alternatives --set iptables /usr/sbin/iptables-legacy + update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy + update-alternatives --set arptables /usr/sbin/arptables-legacy + update-alternatives --set ebtables /usr/sbin/ebtables-legacy + # set default ssh port and restart sshd service sed -i 's/3922/22/g' /etc/ssh/sshd_config diff --git a/tools/appliance/systemvmtemplate/template.json b/tools/appliance/systemvmtemplate/template.json index 929e6d4af0d6..bbff56de69f4 100644 --- a/tools/appliance/systemvmtemplate/template.json +++ b/tools/appliance/systemvmtemplate/template.json @@ -36,8 +36,8 @@ "disk_size": 3000, "disk_interface": "virtio", "net_device": "virtio-net", - "iso_url": "https://download.cloudstack.org/systemvm/debian/debian-10.7.0-amd64-netinst.iso", - "iso_checksum": "cb6795ca61326e9fa58099898e53dc6c708f4b1473687fab5679f824adc78bbe1d543f3b4aed9e56613e7b150e27d6be317efc499e25a92efefed1ed623a90a6", + "iso_url": "https://cdimage.debian.org/debian-cd/10.8.0/amd64/iso-cd/debian-10.8.0-amd64-netinst.iso", + "iso_checksum": "934336d266535c91fcd12cd122c81f8261721efa117fdcb9a31615caa96c5c5ce3454ed5f28f1b25a7b1b5f44631fdfa78a93adb6445e2e2caaf6455ab344cf8", "iso_checksum_type": "sha512", "output_directory": "../dist", "http_directory": "http", diff --git a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java index 5f61b31171ca..2aa3d0fd41eb 100644 --- a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java +++ b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java @@ -1564,7 +1564,7 @@ public void attachIso(String isoDatastorePath, ManagedObjectReference morDs, } public int detachIso(String isoDatastorePath) throws Exception { - return detachIso(isoDatastorePath, false); + return detachIso(isoDatastorePath, true); } public int detachIso(String isoDatastorePath, final boolean force) throws Exception { From 8b0554c779ec534512d6b99202c3bfd053c7dd26 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Wed, 31 Mar 2021 15:26:46 +0530 Subject: [PATCH 050/117] enable full clone for vmware for cks tests --- .../smoke/test_kubernetes_clusters.py | 30 +++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/test/integration/smoke/test_kubernetes_clusters.py b/test/integration/smoke/test_kubernetes_clusters.py index 2440de06b7ae..c1399ea15707 100644 --- a/test/integration/smoke/test_kubernetes_clusters.py +++ b/test/integration/smoke/test_kubernetes_clusters.py @@ -38,6 +38,7 @@ from marvin.lib.base import (Template, ServiceOffering, Account, + StoragePool, Configurations) from marvin.lib.utils import (cleanup_resources, validateList, @@ -87,6 +88,20 @@ def setUpClass(cls): "cloud.kubernetes.service.enabled", "true") cls.restartServer() + if cls.hypervisor.lower() == 'vmware': + cls.create_full_clone = Configurations.list(cls.apiclient, name="vmware.create.full.clone")[0].value + if cls.create_full_clone not in ["true", True]: + Configurations.update(cls.apiclient, + "vmware.create.full.clone", + "true") + allStoragePools = StoragePool.list( + cls.apiclient + ) + for pool in allStoragePools: + Configurations.update(cls.apiclient, + storageid=pool.id, + name="vmware.create.full.clone", + value="true") cls.cks_service_offering = None @@ -146,6 +161,21 @@ def tearDownClass(cls): "false") cls.restartServer() + if cls.hypervisor.lower() == 'vmware': + cls.create_full_clone = Configurations.list(cls.apiclient, name="vmware.create.full.clone")[0].value + if cls.create_full_clone in ["true", True]: + Configurations.update(cls.apiclient, + "vmware.create.full.clone", + "false") + allStoragePools = StoragePool.list( + cls.apiclient + ) + for pool in allStoragePools: + Configurations.update(cls.apiclient, + storageid=pool.id, + name="vmware.create.full.clone", + value="false") + cleanup_resources(cls.apiclient, reversed(cls._cleanup)) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) From 289b6de6c2f2dd425cbd5802a9b002973ed357c4 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Wed, 31 Mar 2021 20:06:24 +0530 Subject: [PATCH 051/117] Update checksums for new system vm template --- .../upgrade/dao/Upgrade41510to41600.java | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41510to41600.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41510to41600.java index d5e93a8eecd0..551f9dde991c 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41510to41600.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41510to41600.java @@ -126,23 +126,23 @@ public void updateSystemVmTemplates(final Connection conn) { final Map newTemplateUrl = new HashMap() { { - put(Hypervisor.HypervisorType.KVM, "http://sbjenkins-stagingrepo.jenkins.lon/systemvmtemplate/custom/cks-debian/systemvmtemplate-kvm.qcow2.bz2"); - put(Hypervisor.HypervisorType.VMware, "http://sbjenkins-stagingrepo.jenkins.lon/systemvmtemplate/custom/cks-debian/systemvmtemplate-vmware.ova"); - put(Hypervisor.HypervisorType.XenServer, "http://sbjenkins-stagingrepo.jenkins.lon/systemvmtemplate/custom/cks-debian/systemvmtemplate-xen.vhd.bz2"); - put(Hypervisor.HypervisorType.Hyperv, "http://sbjenkins-stagingrepo.jenkins.lon/systemvmtemplate/custom/cks-debian/systemvmtemplate-hyperv.vhd.zip"); - put(Hypervisor.HypervisorType.LXC, "http://sbjenkins-stagingrepo.jenkins.lon/systemvmtemplate/custom/cks-debian/systemvmtemplate-kvm.qcow2.bz2"); - put(Hypervisor.HypervisorType.Ovm3, "http://sbjenkins-stagingrepo.jenkins.lon/systemvmtemplate/custom/cks-debian/systemvmtemplate-ovm.raw.bz22"); + put(Hypervisor.HypervisorType.KVM, "http://sl-sb-repo.sofia.shapeblue.com/systemvmtemplate/custom/cks-debian/systemvmtemplate-kvm.qcow2.bz2"); + put(Hypervisor.HypervisorType.VMware, "http://sl-sb-repo.sofia.shapeblue.com/systemvmtemplate/custom/cks-debian/systemvmtemplate-vmware.ova"); + put(Hypervisor.HypervisorType.XenServer, "http://sl-sb-repo.sofia.shapeblue.com/systemvmtemplate/custom/cks-debian/systemvmtemplate-xen.vhd.bz2"); + put(Hypervisor.HypervisorType.Hyperv, "http://sl-sb-repo.sofia.shapeblue.com/systemvmtemplate/custom/cks-debian/systemvmtemplate-hyperv.vhd.zip"); + put(Hypervisor.HypervisorType.LXC, "http://sl-sb-repo.sofia.shapeblue.com/systemvmtemplate/custom/cks-debian/systemvmtemplate-kvm.qcow2.bz2"); + put(Hypervisor.HypervisorType.Ovm3, "http://sl-sb-repo.sofia.shapeblue.com/systemvmtemplate/custom/cks-debian/systemvmtemplate-ovm.raw.bz2"); } }; final Map newTemplateChecksum = new HashMap() { { - put(Hypervisor.HypervisorType.KVM, "6e4ed6ba28b50e455ac6708963331013"); - put(Hypervisor.HypervisorType.XenServer, "4ba9fb9b749c5503e46396be999406c1"); - put(Hypervisor.HypervisorType.VMware, "dcb9d3b0e6b7fed9e65846b0c52a1964"); - put(Hypervisor.HypervisorType.Hyperv, "7c7716bf5f4f3ee2b708aa9a3162a8e4"); - put(Hypervisor.HypervisorType.LXC, "6e4ed6ba28b50e455ac6708963331013"); - put(Hypervisor.HypervisorType.Ovm3, "6f87e2945b8dbd400de5e5c9c788d593"); + put(Hypervisor.HypervisorType.KVM, "810b13ad9c095f3a69cfd8285f720507"); + put(Hypervisor.HypervisorType.XenServer, "30a93ca4c80e929cfc8b806d6751a396"); + put(Hypervisor.HypervisorType.VMware, "31d386e8abf5234dbbe1cae048bab700"); + put(Hypervisor.HypervisorType.Hyperv, "474e4d4126cea4a0f55e925bace3457f"); + put(Hypervisor.HypervisorType.LXC, "810b13ad9c095f3a69cfd8285f720507"); + put(Hypervisor.HypervisorType.Ovm3, "6c06ec7c6c4b18ab7475d87a75b1edf1"); } }; From d49bab74b2f653c94182e6ae894fda2d692d2d84 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Thu, 10 Jun 2021 10:22:30 +0530 Subject: [PATCH 052/117] fix test and dependencies --- test/integration/smoke/test_kubernetes_clusters.py | 3 +-- tools/marvin/marvin/config/test_data.py | 2 +- tools/marvin/setup.py | 2 +- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/test/integration/smoke/test_kubernetes_clusters.py b/test/integration/smoke/test_kubernetes_clusters.py index 0b979a4dae3d..5b2eef4a67a1 100644 --- a/test/integration/smoke/test_kubernetes_clusters.py +++ b/test/integration/smoke/test_kubernetes_clusters.py @@ -485,8 +485,7 @@ def test_06_delete_kubernetes_cluster(self): """Test to delete an existing Kubernetes cluster # Validate the following: - # 1. scaleKubernetesCluster should return valid info for the cluster when it is autoscaled - # 2. cluster-autoscaler pod should be running + # 1. deleteKubernetesCluster should delete an existing Kubernetes cluster """ if self.setup_failed == True: self.fail("Setup incomplete") diff --git a/tools/marvin/marvin/config/test_data.py b/tools/marvin/marvin/config/test_data.py index 26e79a375b01..48169aaf853d 100644 --- a/tools/marvin/marvin/config/test_data.py +++ b/tools/marvin/marvin/config/test_data.py @@ -2052,7 +2052,7 @@ }, "1.16.3": { "semanticversion": "1.16.3", - "url": "http://sbjenkins-stagingrepo.jenkins.lon/cks/binaries-iso/as-1.16.3.iso", + "url": "http://download.cloudstack.org/cks/as-1.16.3.iso", "mincpunumber": 2, "minmemory": 2048 } diff --git a/tools/marvin/setup.py b/tools/marvin/setup.py index e216724f83c5..a35aec23d07e 100644 --- a/tools/marvin/setup.py +++ b/tools/marvin/setup.py @@ -58,7 +58,7 @@ "pytz", "retries", "PyCrypt", - "kubernetes" + "kubernetes", "urllib3" ], py_modules=['marvin.marvinPlugin'], From 4d11ea75b8c172c80729a998055887c2140170ac Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Thu, 8 Jul 2021 20:22:24 +0530 Subject: [PATCH 053/117] test changes to include inclusivity changes --- .../smoke/test_kubernetes_clusters.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/test/integration/smoke/test_kubernetes_clusters.py b/test/integration/smoke/test_kubernetes_clusters.py index 5b2eef4a67a1..71419f8e4d2d 100644 --- a/test/integration/smoke/test_kubernetes_clusters.py +++ b/test/integration/smoke/test_kubernetes_clusters.py @@ -634,7 +634,7 @@ def waitForAutoscalerPodInRunningState(self, cluster_id, retries=5, interval=60) retries = retries - 1 return False - def getValidKubernetesCluster(self, size=1, master_nodes=1, version={}): + def getValidKubernetesCluster(self, size=1, control_nodes=1, version={}): cluster = k8s_cluster # Does a cluster already exist ? @@ -642,10 +642,10 @@ def getValidKubernetesCluster(self, size=1, master_nodes=1, version={}): if not version: version = self.kubernetes_version_1_16_0 self.debug("No existing cluster available, k8s_cluster: %s" % cluster) - return self.createNewKubernetesCluster(version, size, master_nodes) + return self.createNewKubernetesCluster(version, size, control_nodes) # Is the existing cluster what is needed ? - valid = cluster.size == size and cluster.masternodes == master_nodes + valid = cluster.size == size and cluster.controlnodes == control_nodes if version: # Check the version only if specified valid = valid and cluster.kubernetesversionid == version.id @@ -658,7 +658,7 @@ def getValidKubernetesCluster(self, size=1, master_nodes=1, version={}): if cluster == None: # Looks like the cluster disappeared ! self.debug("Existing cluster, k8s_cluster ID: %s not returned by list API" % cluster_id) - return self.createNewKubernetesCluster(version, size, master_nodes) + return self.createNewKubernetesCluster(version, size, control_nodes) if valid: try: @@ -674,14 +674,14 @@ def getValidKubernetesCluster(self, size=1, master_nodes=1, version={}): self.deleteKubernetesClusterAndVerify(cluster.id, False, True) self.debug("No valid cluster, need to deploy a new one") - return self.createNewKubernetesCluster(version, size, master_nodes) + return self.createNewKubernetesCluster(version, size, control_nodes) - def createNewKubernetesCluster(self, version, size, master_nodes) : + def createNewKubernetesCluster(self, version, size, control_nodes) : name = 'testcluster-' + random_gen() self.debug("Creating for Kubernetes cluster with name %s" % name) try: - cluster = self.createKubernetesCluster(name, version.id, size, master_nodes) - self.verifyKubernetesCluster(cluster, name, version.id, size, master_nodes) + cluster = self.createKubernetesCluster(name, version.id, size, control_nodes) + self.verifyKubernetesCluster(cluster, name, version.id, size, control_nodes) except Exception as ex: self.fail("Kubernetes cluster deployment failed: %s" % ex) except AssertionError as err: From 062fa0c4764645eae790383ed6aa593801103653 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Tue, 20 Jul 2021 11:28:20 +0530 Subject: [PATCH 054/117] SystemVM upgrade improvements --- .../upgrade/SystemVmTemplateRegistration.java | 20 ++++ packaging/centos7/cloud.spec | 4 + packaging/centos8/cloud.spec | 3 + packaging/package.sh | 20 ++++ packaging/templateConfig.py | 99 +++++++++++++++++++ .../resource/LibvirtComputingResource.java | 1 + scripts/vm/hypervisor/kvm/patch.sh | 10 +- .../consoleproxy/ConsoleProxyManagerImpl.java | 11 +++ .../VirtualNetworkApplianceManagerImpl.java | 10 ++ .../cloud/server/ConfigurationServerImpl.java | 11 ++- .../SecondaryStorageManagerImpl.java | 10 ++ systemvm/debian/opt/cloud/bin/setup/common.sh | 13 +++ systemvm/pom.xml | 59 +++++++++++ systemvm/templateConfig.py | 99 +++++++++++++++++++ tools/apidoc/build-apidoc.sh | 11 +++ 15 files changed, 371 insertions(+), 10 deletions(-) create mode 100644 engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java create mode 100644 packaging/templateConfig.py create mode 100644 systemvm/templateConfig.py diff --git a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java new file mode 100644 index 000000000000..be1fccf84b81 --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java @@ -0,0 +1,20 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.upgrade; + +public class SystemVmTemplateRegistration { +} diff --git a/packaging/centos7/cloud.spec b/packaging/centos7/cloud.spec index b1b69b7549de..d887c384e77b 100644 --- a/packaging/centos7/cloud.spec +++ b/packaging/centos7/cloud.spec @@ -298,6 +298,9 @@ install -D server/target/conf/cloudstack-sudoers ${RPM_BUILD_ROOT}%{_sysconfdir} touch ${RPM_BUILD_ROOT}%{_localstatedir}/run/%{name}-management.pid #install -D server/target/conf/cloudstack-catalina.logrotate ${RPM_BUILD_ROOT}%{_sysconfdir}/logrotate.d/%{name}-catalina +# SystemVM template +cp -r packaging/templates/* ${RPM_BUILD_ROOT}%{_datadir}/templates/ + # UI mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/ui mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-ui/ @@ -551,6 +554,7 @@ pip3 install --upgrade urllib3 %{_datadir}/%{name}-management/conf %{_datadir}/%{name}-management/lib/*.jar %{_datadir}/%{name}-management/logs +%{_datadir}/%{name}-management/templates %attr(0755,root,root) %{_bindir}/%{name}-setup-databases %attr(0755,root,root) %{_bindir}/%{name}-migrate-databases %attr(0755,root,root) %{_bindir}/%{name}-set-guest-password diff --git a/packaging/centos8/cloud.spec b/packaging/centos8/cloud.spec index 9e62691782f9..eba1308b946e 100644 --- a/packaging/centos8/cloud.spec +++ b/packaging/centos8/cloud.spec @@ -291,6 +291,9 @@ install -D server/target/conf/cloudstack-sudoers ${RPM_BUILD_ROOT}%{_sysconfdir} touch ${RPM_BUILD_ROOT}%{_localstatedir}/run/%{name}-management.pid #install -D server/target/conf/cloudstack-catalina.logrotate ${RPM_BUILD_ROOT}%{_sysconfdir}/logrotate.d/%{name}-catalina +# SystemVM template +cp -r packaging/templates/* ${RPM_BUILD_ROOT}%{_datadir}/templates/ + # UI mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/ui mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-ui/ diff --git a/packaging/package.sh b/packaging/package.sh index 380908be7ba7..f653b9d52993 100755 --- a/packaging/package.sh +++ b/packaging/package.sh @@ -79,6 +79,21 @@ function packaging() { DISTRO=$3 + if [ "$1" == "noredist" ] ; then + PYTHON=$(which python) + if [ -z "$PYTHON" ] ; then + PYTHON=$(which python2) + if [ -z "$PYTHON" ] ; then + PYTHON=$(which python3) + if [ -z "$PYTHON" ] ; then + echo -e "python not found\n RPM Build Failed" + exit 2 + fi + fi + fi + $PYTHON ./templateConfig.py + fi + MVN=$(which mvn) if [ -z "$MVN" ] ; then MVN=$(locate bin/mvn | grep -e mvn$ | tail -1) @@ -91,6 +106,9 @@ function packaging() { VERSION=$(cd $PWD/../; $MVN org.apache.maven.plugins:maven-help-plugin:2.1.1:evaluate -Dexpression=project.version | grep --color=none '^[0-9]\.') REALVER=$(echo "$VERSION" | cut -d '-' -f 1) + echo $VERSION + echo $REALVER + if [ -n "$5" ]; then BRAND="${5}." else @@ -104,6 +122,8 @@ function packaging() { fi fi + echo $BASEVER + if echo "$VERSION" | grep -q SNAPSHOT ; then if [ -n "$4" ] ; then DEFREL="-D_rel ${BRAND}${INDICATOR}.$4" diff --git a/packaging/templateConfig.py b/packaging/templateConfig.py new file mode 100644 index 000000000000..6179133cc9c3 --- /dev/null +++ b/packaging/templateConfig.py @@ -0,0 +1,99 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import os +import requests +import hashlib +import backports.configparser as configparser +from multiprocessing.pool import ThreadPool + +CS_MAJOR_VERSION=4.16 +CS_MINOR_VERSION=0 +templates = { + "kvm": "https://download.cloudstack.org/systemvm/{0}/systemvmtemplate-{0}.{1}-kvm.qcow2.bz2" + .format(CS_MAJOR_VERSION, CS_MINOR_VERSION), + "vmware": "https://download.cloudstack.org/systemvm/{0}/systemvmtemplate-{0}.{1}-vmware.ova" + .format(CS_MAJOR_VERSION, CS_MINOR_VERSION), + "xenserver": "https://download.cloudstack.org/systemvm/{0}/systemvmtemplate-{0}.{1}-xen.vhd.bz2" + .format(CS_MAJOR_VERSION, CS_MINOR_VERSION), + "hyperv": "https://download.cloudstack.org/systemvm/{0}/systemvmtemplate-{0}.{1}-hyperv.vhd.zip" + .format(CS_MAJOR_VERSION, CS_MINOR_VERSION), + "lxc": "https://download.cloudstack.org/systemvm/{0}/systemvmtemplate-{0}.{1}-kvm.qcow2.bz2" + .format(CS_MAJOR_VERSION, CS_MINOR_VERSION), + "ovm3": "https://download.cloudstack.org/systemvm/{0}/systemvmtemplate-{0}.{1}-ovm.raw.bz2" + .format(CS_MAJOR_VERSION, CS_MINOR_VERSION), +} + +checksums = { + "kvm": "07268f267dc4316dc5f86150346bb8d7", + "vmware": "b356cbbdef67c4eefa8c336328e2b202", + "xenserver": "71d8adb40baa609997acdc3eae15fbde", + "hyperv": "0982aa1461800ce1538e0cae07e00770", + "lxc": "07268f267dc4316dc5f86150346bb8d7", + "ovm3": "8c643d146c82f92843b8a48c7661f800" +} +destination = os.path.dirname(os.path.abspath(__file__)) + '/templates/' +if not os.path.exists(destination): + os.makedirs(destination) + +metadataFile = destination + 'metadata.ini' + + +def downloadSystemvmTemplate(url): + fileName = url.rsplit('/', 1)[1] + fileName = destination + fileName + if (os.path.exists(fileName)): + checksum = hashlib.md5(open(fileName, 'rb').read()).hexdigest() + fileChecksum = checksums[list(templates.keys())[list(templates.values()).index(url)]] + if checksum == fileChecksum: + print('Template ' + url + ' already downloaded') + return + try: + r = requests.get(url, stream=True) + if r.status_code == 200: + with open(fileName, 'wb') as f: + for chunk in r: + f.write(chunk) + except Exception as e: + print(e) + + return fileName + +def downloadTemplates(): + results = ThreadPool(4).imap_unordered(downloadSystemvmTemplate, list(templates.values())) + for path in results: + print(path) + +def createMetadataFile(): + templateFiles = [f for f in os.listdir(destination) if os.path.isfile(os.path.join(destination, f))] + # print(templates) + write_config = configparser.ConfigParser() + for template in templateFiles: + value = list(filter(lambda temp : template in temp, templates.values())) + if len(value) == 0: + continue + hypervisor = list(templates.keys())[list(templates.values()).index(value[0])] + write_config.add_section(hypervisor) + write_config.set(hypervisor, "templateName", "systemvm-{0}-{1}.{2}".format(hypervisor, CS_MAJOR_VERSION, CS_MINOR_VERSION)) + write_config.set(hypervisor, "checksum", checksums[hypervisor]) + + cfgfile = open(metadataFile, 'w') + write_config.write(cfgfile) + cfgfile.close() + +downloadTemplates() +createMetadataFile() diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java index 338a41b6c256..be195aefeba9 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java @@ -1596,6 +1596,7 @@ public boolean passCmdLine(final String vmName, final String cmdLine) throws Int command.add("-n", vmName); command.add("-c", cmdLine); result = command.execute(); + if (result != null) { s_logger.error("Passing cmdline failed:" + result); return false; diff --git a/scripts/vm/hypervisor/kvm/patch.sh b/scripts/vm/hypervisor/kvm/patch.sh index e7c79fd9a739..e95b046d4038 100755 --- a/scripts/vm/hypervisor/kvm/patch.sh +++ b/scripts/vm/hypervisor/kvm/patch.sh @@ -70,11 +70,11 @@ do sleep 0.1 done -# Write ssh public key -send_file $name "/root/.ssh/authorized_keys" $sshkey - -# Fix ssh public key permission -virsh qemu-agent-command $name '{"execute":"guest-exec","arguments":{"path":"chmod","arg":["go-rwx","/root/.ssh/authorized_keys"]}}' > /dev/null +## Write ssh public key +#send_file $name "/root/.ssh/authorized_keys" $sshkey +# +## Fix ssh public key permission +#virsh qemu-agent-command $name '{"execute":"guest-exec","arguments":{"path":"chmod","arg":["go-rwx","/root/.ssh/authorized_keys"]}}' > /dev/null # Write cmdline payload send_file $name "/var/cache/cloud/cmdline" $cmdline diff --git a/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java b/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java index 3a89d9641c57..31bf7dcdce65 100644 --- a/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java +++ b/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java @@ -17,8 +17,10 @@ package com.cloud.consoleproxy; import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; +import java.util.Base64; import java.util.Date; import java.util.HashMap; import java.util.Iterator; @@ -1387,6 +1389,15 @@ public boolean finalizeVirtualMachineProfile(VirtualMachineProfile profile, Depl buf.append(" disable_rp_filter=true"); } + String MsPublicKey = _configDao.getValue("ssh.publickey"); + String base64EncodedPublicKey = null; + if (MsPublicKey != null) { + // base64EncodedPublicKey = Base64.getEncoder().encodeToString(DBEncryptionUtil.decrypt(MsPrivateKey).getBytes(StandardCharsets.UTF_8)); + base64EncodedPublicKey = Base64.getEncoder().encodeToString(MsPublicKey.getBytes(StandardCharsets.UTF_8)); + } + + buf.append(" authorized_key=").append(base64EncodedPublicKey); + boolean externalDhcp = false; String externalDhcpStr = _configDao.getValue("direct.attach.network.externalIpAllocator.enabled"); if (externalDhcpStr != null && externalDhcpStr.equalsIgnoreCase("true")) { diff --git a/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java b/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java index 7ff911393f1a..9d617d491ae5 100644 --- a/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java +++ b/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java @@ -20,12 +20,14 @@ import java.lang.reflect.Type; import java.math.BigInteger; import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Arrays; +import java.util.Base64; import java.util.Calendar; import java.util.Collections; import java.util.Date; @@ -1961,6 +1963,14 @@ public boolean finalizeVirtualMachineProfile(final VirtualMachineProfile profile if (Boolean.valueOf(_configDao.getValue("system.vm.random.password"))) { buf.append(" vmpassword=").append(_configDao.getValue("system.vm.password")); } + String MsPublicKey = _configDao.getValue("ssh.publickey"); + String base64EncodedPublicKey = null; + if (MsPublicKey != null) { + // base64EncodedPublicKey = Base64.getEncoder().encodeToString(DBEncryptionUtil.decrypt(MsPublicKey).getBytes(StandardCharsets.UTF_8)); + base64EncodedPublicKey = Base64.getEncoder().encodeToString(MsPublicKey.getBytes(StandardCharsets.UTF_8)); + } + + buf.append(" authorized_key=").append(base64EncodedPublicKey); NicProfile controlNic = null; String defaultDns1 = null; diff --git a/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java b/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java index 1d5b5821b467..5557a2bfb95d 100644 --- a/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java +++ b/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java @@ -669,7 +669,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } s_logger.info("Going to update systemvm iso with generated keypairs if needed"); try { - injectSshKeysIntoSystemVmIsoPatch(pubkeyfile.getAbsolutePath(), privkeyfile.getAbsolutePath()); + // injectSshKeysIntoSystemVmIsoPatch(pubkeyfile.getAbsolutePath(), privkeyfile.getAbsolutePath()); } catch (CloudRuntimeException e) { if (!devel) { throw new CloudRuntimeException(e.getMessage()); @@ -761,10 +761,11 @@ protected void injectSshKeysIntoSystemVmIsoPatch(String publicKeyPath, String pr publicKeyPath = publicKeyPath.replaceAll("\\\\" ,"/" ); privKeyPath = privKeyPath.replaceAll("\\\\" ,"/" ); } - command.add(scriptPath); - command.add(publicKeyPath); - command.add(privKeyPath); - command.add(systemVmIsoPath); + +// command.add(scriptPath); +// command.add(publicKeyPath); +// command.add(privKeyPath); +// command.add(systemVmIsoPath); final String result = command.execute(); s_logger.info("The script injectkeys.sh was run with result : " + result); diff --git a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java index f7c22c209852..b6a14f97ead2 100644 --- a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java +++ b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java @@ -18,8 +18,10 @@ import java.net.URI; import java.net.URISyntaxException; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; +import java.util.Base64; import java.util.Collections; import java.util.Date; import java.util.HashMap; @@ -1084,6 +1086,14 @@ public boolean finalizeVirtualMachineProfile(VirtualMachineProfile profile, Depl buf.append(" guid=").append(profile.getVirtualMachine().getHostName()); buf.append(" workers=").append(_configDao.getValue("workers")); + String MsPublicKey = _configDao.getValue("ssh.publickey"); + String base64EncodedPublicKey = null; + if (MsPublicKey != null) { + // base64EncodedPublicKey = Base64.getEncoder().encodeToString(DBEncryptionUtil.decrypt(MsPrivateKey).getBytes(StandardCharsets.UTF_8)); + base64EncodedPublicKey = Base64.getEncoder().encodeToString(MsPublicKey.getBytes(StandardCharsets.UTF_8)); + } + + buf.append(" authorized_key=").append(base64EncodedPublicKey); if (_configDao.isPremium()) { s_logger.debug("VMWare hypervisor was configured, informing secondary storage VM to load the PremiumSecondaryStorageResource."); diff --git a/systemvm/debian/opt/cloud/bin/setup/common.sh b/systemvm/debian/opt/cloud/bin/setup/common.sh index 987f07d7659d..7a999684c834 100755 --- a/systemvm/debian/opt/cloud/bin/setup/common.sh +++ b/systemvm/debian/opt/cloud/bin/setup/common.sh @@ -759,6 +759,8 @@ parse_cmd_line() { ntpserverlist) export NTP_SERVER_LIST=$VALUE ;; + authorized_key) + export AUTHORIZED_KEYS=$VALUE esac done echo -e "\n\t}\n}" >> ${CHEF_TMP_FILE} @@ -767,6 +769,17 @@ parse_cmd_line() { mv ${CHEF_TMP_FILE} /var/cache/cloud/cmd_line.json fi + TMP_KEY_PATH=/tmp/.auth_key + AUTHORIZED_KEYS_PATH=/root/.ssh/authorized_keys + if [ ! -z "$AUTHORIZED_KEYS" ] + then + echo "$AUTHORIZED_KEYS" > $TMP_KEY_PATH + base64Val=`base64 -d $TMP_KEY_PATH` + echo "$base64Val" > $AUTHORIZED_KEYS_PATH + chmod go-rwx $AUTHORIZED_KEYS_PATH + rm -rf $TMP_KEY_PATH + fi + [ $ETH0_IP ] && export LOCAL_ADDRS=$ETH0_IP [ $ETH0_IP6 ] && export LOCAL_ADDRS=$ETH0_IP6 [ $ETH0_IP ] && [ $ETH0_IP6 ] && export LOCAL_ADDRS="$ETH0_IP,$ETH0_IP6" diff --git a/systemvm/pom.xml b/systemvm/pom.xml index 0c5406796ce4..f3d3a86ecdc8 100644 --- a/systemvm/pom.xml +++ b/systemvm/pom.xml @@ -138,6 +138,32 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + org.codehaus.mojo exec-maven-plugin @@ -178,6 +204,39 @@ genisoimage + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + vmware diff --git a/systemvm/templateConfig.py b/systemvm/templateConfig.py new file mode 100644 index 000000000000..d0fb590e13d4 --- /dev/null +++ b/systemvm/templateConfig.py @@ -0,0 +1,99 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import os +import requests +import hashlib +import backports.configparser as configparser +from multiprocessing.pool import ThreadPool + +CS_MAJOR_VERSION=4.16 +CS_MINOR_VERSION=0 +templates = { + "kvm": "https://download.cloudstack.org/systemvm/{0}/systemvmtemplate-{0}.{1}-kvm.qcow2.bz2" + .format(CS_MAJOR_VERSION, CS_MINOR_VERSION), + "vmware": "https://download.cloudstack.org/systemvm/{0}/systemvmtemplate-{0}.{1}-vmware.ova" + .format(CS_MAJOR_VERSION, CS_MINOR_VERSION), + "xenserver": "https://download.cloudstack.org/systemvm/{0}/systemvmtemplate-{0}.{1}-xen.vhd.bz2" + .format(CS_MAJOR_VERSION, CS_MINOR_VERSION), + "hyperv": "https://download.cloudstack.org/systemvm/{0}/systemvmtemplate-{0}.{1}-hyperv.vhd.zip" + .format(CS_MAJOR_VERSION, CS_MINOR_VERSION), + "lxc": "https://download.cloudstack.org/systemvm/{0}/systemvmtemplate-{0}.{1}-kvm.qcow2.bz2" + .format(CS_MAJOR_VERSION, CS_MINOR_VERSION), + "ovm3": "https://download.cloudstack.org/systemvm/{0}/systemvmtemplate-{0}.{1}-ovm.raw.bz2" + .format(CS_MAJOR_VERSION, CS_MINOR_VERSION), +} + +checksums = { + "kvm": "07268f267dc4316dc5f86150346bb8d7", + "vmware": "b356cbbdef67c4eefa8c336328e2b202", + "xenserver": "71d8adb40baa609997acdc3eae15fbde", + "hyperv": "0982aa1461800ce1538e0cae07e00770", + "lxc": "07268f267dc4316dc5f86150346bb8d7", + "ovm3": "8c643d146c82f92843b8a48c7661f800" +} +destination = os.path.dirname(os.path.abspath(__file__)) + '/templates/' +if not os.path.exists(destination): + os.makedirs(destination) + +metadataFile = destination + 'metadata.ini' + + +def downloadSystemvmTemplate(url): + fileName = url.rsplit('/', 1)[1] + fileName = destination + fileName + if (os.path.exists(fileName)): + checksum = hashlib.md5(open(fileName, 'rb').read()).hexdigest() + fileChecksum = checksums[list(templates.keys())[list(templates.values()).index(url)]] + if checksum == fileChecksum: + print('Template ' + url + ' already downloaded') + return + try: + r = requests.get(url, stream=True) + if r.status_code == 200: + with open(fileName, 'wb') as f: + for chunk in r: + f.write(chunk) + except Exception as e: + print(e) + + return fileName + +def downloadTemplates(): + results = ThreadPool(4).imap_unordered(downloadSystemvmTemplate, list(templates.values())) + for path in results: + print(path) + +def createMetadataFile(): + templateFiles = [f for f in os.listdir(destination) if os.path.isfile(os.path.join(destination, f))] + # print(templates) + write_config = configparser.ConfigParser() + for template in templateFiles: + value = filter(lambda temp : template in temp, templates.values()) + if len(value) == 0: + continue + hypervisor = list(templates.keys())[list(templates.values()).index(value[0])] + write_config.add_section(hypervisor) + write_config.set(hypervisor, "templateName", "systemvm-{0}-{1}.{2}".format(hypervisor, CS_MAJOR_VERSION, CS_MINOR_VERSION)) + write_config.set(hypervisor, "checksum", checksums[hypervisor]) + + cfgfile = open(metadataFile, 'w') + write_config.write(cfgfile) + cfgfile.close() + +downloadTemplates() +createMetadataFile() \ No newline at end of file diff --git a/tools/apidoc/build-apidoc.sh b/tools/apidoc/build-apidoc.sh index 33123b31f2b2..4555056a95ce 100755 --- a/tools/apidoc/build-apidoc.sh +++ b/tools/apidoc/build-apidoc.sh @@ -63,6 +63,17 @@ case "$(uname)" in Darwin*) sedi='-i ""' esac +PYTHON=$(which python) +if [ -z "$PYTHON" ] ; then + PYTHON=$(which python2) + if [ -z "$PYTHON" ] ; then + PYTHON=$(which python3) + if [ -z "$PYTHON" ] ; then + echo -e "python not found\n API Docs build failed" + exit 2 + fi + fi +fi set -e (cd "$DISTDIR/xmldoc" cp "$thisdir"/*.java . From fc7d93e273ee86e4d30096e7b1813d73ff091968 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Tue, 20 Jul 2021 17:20:15 +0530 Subject: [PATCH 055/117] install pkg - requests --- debian/rules | 3 +++ packaging/build-deb.sh | 6 ++++++ packaging/centos7/cloud.spec | 3 ++- packaging/centos8/cloud.spec | 4 +++- packaging/package.sh | 16 ++-------------- 5 files changed, 16 insertions(+), 16 deletions(-) diff --git a/debian/rules b/debian/rules index a19089a18165..ea59db0a20c0 100755 --- a/debian/rules +++ b/debian/rules @@ -66,17 +66,20 @@ override_dh_auto_install: mkdir -p $(DESTDIR)/usr/share/$(PACKAGE)-management mkdir -p $(DESTDIR)/usr/share/$(PACKAGE)-management/lib mkdir -p $(DESTDIR)/usr/share/$(PACKAGE)-management/setup + mkdir -p $(DESTDIR)/usr/share/$(PACKAGE)-management/templates mkdir $(DESTDIR)/var/log/$(PACKAGE)/management mkdir $(DESTDIR)/var/cache/$(PACKAGE)/management mkdir $(DESTDIR)/var/log/$(PACKAGE)/ipallocator mkdir $(DESTDIR)/var/lib/$(PACKAGE)/management mkdir $(DESTDIR)/var/lib/$(PACKAGE)/mnt + cp -r client/target/utilities/scripts/db/* $(DESTDIR)/usr/share/$(PACKAGE)-management/setup/ cp -r client/target/classes/META-INF/webapp $(DESTDIR)/usr/share/$(PACKAGE)-management/webapp cp server/target/conf/* $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/server/ cp client/target/conf/* $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/management/ cp client/target/cloud-client-ui-$(VERSION).jar $(DESTDIR)/usr/share/$(PACKAGE)-management/lib/cloudstack-$(VERSION).jar cp client/target/lib/*jar $(DESTDIR)/usr/share/$(PACKAGE)-management/lib/ + cp -r packaging/templates/* $(DESTDIR)/usr/share/$(PACKAGE)-management/templates/ # nast hack for a couple of configuration files mv $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/server/cloudstack-limits.conf $(DESTDIR)/$(SYSCONFDIR)/security/limits.d/ diff --git a/packaging/build-deb.sh b/packaging/build-deb.sh index 3ec3ee12ff64..5857ffdd8a4c 100755 --- a/packaging/build-deb.sh +++ b/packaging/build-deb.sh @@ -155,6 +155,12 @@ fi dch -b -v "${VERSION}~${DISTCODE}" -u low -m "Apache CloudStack Release ${VERSION}" sed -i '0,/ UNRELEASED;/s// unstable;/g' debian/changelog +apt-get install -y wget +wget https://bootstrap.pypa.io/pip/2.7/get-pip.py +python get-pip.py +pip2 install requests configparser +python templateConfig.py + dpkg-checkbuilddeps dpkg-buildpackage -uc -us -b diff --git a/packaging/centos7/cloud.spec b/packaging/centos7/cloud.spec index d887c384e77b..7ad8062d7053 100644 --- a/packaging/centos7/cloud.spec +++ b/packaging/centos7/cloud.spec @@ -299,7 +299,8 @@ touch ${RPM_BUILD_ROOT}%{_localstatedir}/run/%{name}-management.pid #install -D server/target/conf/cloudstack-catalina.logrotate ${RPM_BUILD_ROOT}%{_sysconfdir}/logrotate.d/%{name}-catalina # SystemVM template -cp -r packaging/templates/* ${RPM_BUILD_ROOT}%{_datadir}/templates/ +mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/ +cp -r packaging/templates/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/ # UI mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/ui diff --git a/packaging/centos8/cloud.spec b/packaging/centos8/cloud.spec index eba1308b946e..2c2bc71a42ce 100644 --- a/packaging/centos8/cloud.spec +++ b/packaging/centos8/cloud.spec @@ -292,7 +292,8 @@ touch ${RPM_BUILD_ROOT}%{_localstatedir}/run/%{name}-management.pid #install -D server/target/conf/cloudstack-catalina.logrotate ${RPM_BUILD_ROOT}%{_sysconfdir}/logrotate.d/%{name}-catalina # SystemVM template -cp -r packaging/templates/* ${RPM_BUILD_ROOT}%{_datadir}/templates/ +mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/ +cp -r packaging/templates/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/ # UI mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/ui @@ -542,6 +543,7 @@ pip install --upgrade /usr/share/cloudstack-marvin/Marvin-*.tar.gz %{_datadir}/%{name}-management/conf %{_datadir}/%{name}-management/lib/*.jar %{_datadir}/%{name}-management/logs +%{_datadir}/%{name}-management/templates %attr(0755,root,root) %{_bindir}/%{name}-setup-databases %attr(0755,root,root) %{_bindir}/%{name}-migrate-databases %attr(0755,root,root) %{_bindir}/%{name}-set-guest-password diff --git a/packaging/package.sh b/packaging/package.sh index f653b9d52993..bf4b3d33c1e1 100755 --- a/packaging/package.sh +++ b/packaging/package.sh @@ -79,20 +79,8 @@ function packaging() { DISTRO=$3 - if [ "$1" == "noredist" ] ; then - PYTHON=$(which python) - if [ -z "$PYTHON" ] ; then - PYTHON=$(which python2) - if [ -z "$PYTHON" ] ; then - PYTHON=$(which python3) - if [ -z "$PYTHON" ] ; then - echo -e "python not found\n RPM Build Failed" - exit 2 - fi - fi - fi - $PYTHON ./templateConfig.py - fi + pip3 install requests configparser + python3 ./templateConfig.py MVN=$(which mvn) if [ -z "$MVN" ] ; then From 33eec1e02490771ed7194ed0e5d5ad716578c093 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Wed, 21 Jul 2021 18:10:16 +0530 Subject: [PATCH 056/117] Add logic for template registration if not preset --- .../upgrade/SystemVmTemplateRegistration.java | 269 ++++++++++++++++++ .../upgrade/dao/Upgrade41510to41600.java | 64 +---- 2 files changed, 281 insertions(+), 52 deletions(-) diff --git a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java index be1fccf84b81..5b4608122b75 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java @@ -16,5 +16,274 @@ // under the License. package com.cloud.upgrade; +import com.cloud.hypervisor.Hypervisor; +import com.cloud.utils.db.TransactionLegacy; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.script.Script; +import org.apache.log4j.Logger; + +import javax.naming.ConfigurationException; +import java.io.File; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.UUID; +import java.util.stream.Collectors; + public class SystemVmTemplateRegistration { + private static final Logger LOGGER = Logger.getLogger(SystemVmTemplateRegistration.class); + private static final String mountCommand = "mount -t nfs %s %s"; + private static final String templatesPath = "/usr/share/cloudstack-management/templates"; + private static final String TEMPORARY_SECONDARY_STORE = "/tmp/tmpSecStorage"; + private static final String FETCH_CLOUDSTACK_VERSION = "SELECT version FROM version ORDER BY id DESC LIMIT 1"; + private static final String FETCH_DISTINCT_ELIGILBLE_ZONES = "SELECT DISTINCT(data_center_id) FROM image_store WHERE protocol = \"nfs\" AND removed is null"; + private static final String FETCH_IMAGE_STORE_PER_ZONE = "SELECT url FROM image_store WHERE data_center_id=? AND removed IS NULL LIMIT 1"; + private static final String CS_MAJOR_VERSION = "4.16"; + private static final String CS_MINOR_VERSION = "0"; + + + public static final Map NewTemplateNameList = new HashMap() { + { + put(Hypervisor.HypervisorType.KVM, "systemvm-kvm-" + CS_MAJOR_VERSION + "." + CS_MINOR_VERSION); + put(Hypervisor.HypervisorType.VMware, "systemvm-vmware-" + CS_MAJOR_VERSION + "." + CS_MINOR_VERSION); + put(Hypervisor.HypervisorType.XenServer, "systemvm-xenserver-" + CS_MAJOR_VERSION + "." + CS_MINOR_VERSION); + put(Hypervisor.HypervisorType.Hyperv, "systemvm-hyperv-" + CS_MAJOR_VERSION + "." + CS_MINOR_VERSION); + put(Hypervisor.HypervisorType.LXC, "systemvm-lxc-" + CS_MAJOR_VERSION + "." + CS_MINOR_VERSION); + put(Hypervisor.HypervisorType.Ovm3, "systemvm-ovm3-" + CS_MAJOR_VERSION + "." + CS_MINOR_VERSION); + } + }; + + public static final Map routerTemplateConfigurationNames = new HashMap() { + { + put(Hypervisor.HypervisorType.KVM, "router.template.kvm"); + put(Hypervisor.HypervisorType.VMware, "router.template.vmware"); + put(Hypervisor.HypervisorType.XenServer, "router.template.xenserver"); + put(Hypervisor.HypervisorType.Hyperv, "router.template.hyperv"); + put(Hypervisor.HypervisorType.LXC, "router.template.lxc"); + put(Hypervisor.HypervisorType.Ovm3, "router.template.ovm3"); + } + }; + + public static final Map fileNames = new HashMap() { + { + put(Hypervisor.HypervisorType.KVM, "systemvmtemplate-4.16.0-kvm.qcow2.bz2"); + put(Hypervisor.HypervisorType.VMware, "systemvmtemplate-4.16.0-vmware.ova"); + put(Hypervisor.HypervisorType.XenServer, "systemvmtemplate-4.16.0-xen.vhd.bz2"); + put(Hypervisor.HypervisorType.Hyperv, "systemvmtemplate-4.16.0-hyperv.vhd.zip"); + put(Hypervisor.HypervisorType.LXC, "systemvmtemplate-4.16.0-kvm.qcow2.bz2"); + put(Hypervisor.HypervisorType.Ovm3, "systemvmtemplate-4.16.0-ovm.raw.bz2"); + } + }; + + public static final Map newTemplateUrl = new HashMap() { + { + put(Hypervisor.HypervisorType.KVM, "https://download.cloudstack.org/systemvm/4.16/" + fileNames.get(Hypervisor.HypervisorType.KVM)); + put(Hypervisor.HypervisorType.VMware, "https://download.cloudstack.org/systemvm/4.16/" + fileNames.get(Hypervisor.HypervisorType.VMware)); + put(Hypervisor.HypervisorType.XenServer, "https://download.cloudstack.org/systemvm/4.16/" + fileNames.get(Hypervisor.HypervisorType.XenServer)); + put(Hypervisor.HypervisorType.Hyperv, "https://download.cloudstack.org/systemvm/4.16/" + fileNames.get(Hypervisor.HypervisorType.Hyperv)); + put(Hypervisor.HypervisorType.LXC, "https://download.cloudstack.org/systemvm/4.16/" + fileNames.get(Hypervisor.HypervisorType.LXC)); + put(Hypervisor.HypervisorType.Ovm3, "https://download.cloudstack.org/systemvm/4.16/" + fileNames.get(Hypervisor.HypervisorType.Ovm3)); + } + }; + + public static final Map newTemplateChecksum = new HashMap() { + { + put(Hypervisor.HypervisorType.KVM, "81b3e48bb934784a13555a43c5ef5ffb"); + put(Hypervisor.HypervisorType.XenServer, "1b178a5dbdbe090555515340144c6017"); + put(Hypervisor.HypervisorType.VMware, "e6a88e518c57d6f36c096c4204c3417f"); + put(Hypervisor.HypervisorType.Hyperv, "5c94da45337cf3e1910dcbe084d4b9ad"); + put(Hypervisor.HypervisorType.LXC, "81b3e48bb934784a13555a43c5ef5ffb"); + put(Hypervisor.HypervisorType.Ovm3, "875c5c65455fc06c4a012394410db375"); + } + }; + + public static final Map templateFiles = new HashMap() { + { + put(Hypervisor.HypervisorType.KVM, templatesPath + "/" + fileNames.get(Hypervisor.HypervisorType.KVM)); + put(Hypervisor.HypervisorType.XenServer, templatesPath + "/" + fileNames.get(Hypervisor.HypervisorType.XenServer)); + put(Hypervisor.HypervisorType.VMware, templatesPath + "/" + fileNames.get(Hypervisor.HypervisorType.VMware)); + put(Hypervisor.HypervisorType.Hyperv, templatesPath + "/" + fileNames.get(Hypervisor.HypervisorType.Hyperv)); + put(Hypervisor.HypervisorType.LXC, templatesPath + "/" + fileNames.get(Hypervisor.HypervisorType.LXC)); + put(Hypervisor.HypervisorType.Ovm3, templatesPath + "/" + fileNames.get(Hypervisor.HypervisorType.Ovm3)); + } + }; + + + static long isTemplateAlreadyRegistered(Map.Entry hypervisorAndTemplateName) { + final TransactionLegacy txn = TransactionLegacy.open("TemplateValidation"); + long templateId = -1; + Connection conn; + try { + conn = txn.getConnection(); + PreparedStatement pstmt = conn.prepareStatement("select id from `cloud`.`vm_template` where name = ? and removed is null order by id desc limit 1"); + // Get systemvm template id for corresponding hypervisor + pstmt.setString(1, hypervisorAndTemplateName.getValue()); + try (ResultSet rs = pstmt.executeQuery()) { + if (rs.next()) { + templateId = rs.getLong(1); + } + } catch (final SQLException e) { + LOGGER.error("updateSystemVmTemplates: Exception caught while getting ids of SystemVM templates: " + e.getMessage()); + throw new CloudRuntimeException("updateSystemVmTemplates: Exception caught while getting ids of SystemVM templates", e); + } + } catch (SQLException e) { + String errorMessage = "Unable to upgrade the database"; + LOGGER.error(errorMessage, e); + throw new CloudRuntimeException(errorMessage, e); + } finally { + txn.close(); + } + return templateId; + } + + + private static String getHypervisorName(String name) { + if (name.equals("xenserver")) { + return "xen"; + } + if (name.equals("ovm3")) { + return "ovm"; + } + return name; + + } + + private static List getEligibleZoneIds() { + final TransactionLegacy txn = TransactionLegacy.open("FetchZones"); + List zones = new ArrayList(); + Connection conn; + try { + conn = txn.getConnection(); + PreparedStatement pstmt = conn.prepareStatement(FETCH_DISTINCT_ELIGILBLE_ZONES); + ResultSet rs = pstmt.executeQuery(); + while (rs.next()) { + zones.add(rs.getLong(1)); + } + } catch (SQLException e) { + throw new CloudRuntimeException("Failed to fetch eligible zones for SystemVM template registration"); + } finally { + txn.close(); + } + return zones; + } + + private static String getNfsStoreInZone(Long zoneId) { + final TransactionLegacy txn = TransactionLegacy.open("FetchStore"); + String url = null; + Connection conn; + try { + conn = txn.getConnection(); + PreparedStatement pstmt = conn.prepareStatement(FETCH_IMAGE_STORE_PER_ZONE); + if(pstmt != null) { + pstmt.setLong(1, zoneId); + ResultSet resultSet = pstmt.executeQuery(); + while (resultSet.next()) { + url = resultSet.getString(1); + } + } + } catch (SQLException e) { + throw new CloudRuntimeException("Failed to fetch eligible zones for SystemVM template registration"); + } finally { + txn.close(); + } + return url; + } + + public static void mountStore(String storeUrl) { + try { + if (storeUrl != null) { + String path = storeUrl.split("://")[1]; + int index = path.indexOf('/'); + String host = path.substring(0, index); + String mountPath = path.substring(index); + String mount = String.format(mountCommand, host + ":" + mountPath, TEMPORARY_SECONDARY_STORE); + Script.runSimpleBashScript(mount); + } + } catch (Exception e) { + String msg = "NFS Store URL is not in the correct format"; + LOGGER.error(msg); + throw new CloudRuntimeException(msg); + + } + } + + private static String getTemplateFolder() { + String path = TEMPORARY_SECONDARY_STORE + "/template/tmpl/1/"; + File templatesDirectory = new File(path); + List templates = Arrays.asList(Objects.requireNonNull(templatesDirectory.list())); + if (templates != null && templates.size() > 0) { + Collections.sort(templates); + return path + String.valueOf(Long.parseLong(templates.get(templates.size() -1)) + 1L); + } else { + return path + "9"; + } + } + + + public static void registerTemplate(Map.Entry hypervisorAndTemplateName) { + try { + List zoneIds = getEligibleZoneIds(); + for (Long zoneId : zoneIds) { + String storeUrl = getNfsStoreInZone(zoneId); + mountStore(storeUrl); + String destTempFolder = getTemplateFolder(); + Script.runSimpleBashScript("cp " + templateFiles.get(hypervisorAndTemplateName.getKey()) + " " + destTempFolder); + String storageScriptsDir = "scripts/storage/secondary"; + String createTmplPath = Script.findScript(storageScriptsDir, "createtmplt.sh"); + if (createTmplPath == null) { + throw new ConfigurationException("Unable to find the createtmplt.sh"); + } + String templateName = UUID.randomUUID().toString(); + // TODO: need to add extension + String templateFilename = templateName; + Script scr = new Script(createTmplPath, 120, LOGGER); + scr.add("-n", templateFilename); + + scr.add("-t", destTempFolder); + scr.add("-f", destTempFolder); // this is the temporary + // template file downloaded + String result = scr.execute(); + } + } catch (Exception e) { + String errMsg = "Failed to register template for hypervisor: " + hypervisorAndTemplateName.getKey(); + LOGGER.error(errMsg); + throw new CloudRuntimeException(errMsg); + } + } + + public static void registerTemplates(Set hypervisorsInUse) { + // Check if templates path exists + Set hypervisors = hypervisorsInUse.stream().map(Enum::name). + map(name -> name.toLowerCase(Locale.ROOT)).map(SystemVmTemplateRegistration::getHypervisorName).collect(Collectors.toSet()); + File templatesDirectory = new File(templatesPath); + List templatePaths = new ArrayList<>(); + String[] templates = Objects.requireNonNull(templatesDirectory.list()); + for (String template : templates) { + if (hypervisors.stream().anyMatch(template::contains)) { + templatePaths.add(templatesDirectory.getPath() + "/" + template); + } else { + throw new CloudRuntimeException("SystemVm template " + template + " not found. Cannot upgrade system Vms"); + } + } + + // Perform Registration if templates not already registered + for (final Map.Entry hypervisorAndTemplateName : SystemVmTemplateRegistration.NewTemplateNameList.entrySet()) { + long templateId = isTemplateAlreadyRegistered(hypervisorAndTemplateName); + if (templateId != -1) { + continue; + } + // TODO: Concurrency?? + registerTemplate(hypervisorAndTemplateName); + } + } } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41510to41600.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41510to41600.java index 358fa574faa5..76f5fb3c073e 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41510to41600.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41510to41600.java @@ -22,11 +22,11 @@ import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; -import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; +import com.cloud.upgrade.SystemVmTemplateRegistration; import org.apache.log4j.Logger; import com.cloud.hypervisor.Hypervisor; @@ -101,51 +101,7 @@ public void updateSystemVmTemplates(final Connection conn) { throw new CloudRuntimeException("updateSystemVmTemplates:Exception while getting hypervisor types from clusters", e); } - final Map NewTemplateNameList = new HashMap() { - { - put(Hypervisor.HypervisorType.KVM, "systemvm-kvm-4.16.0"); - put(Hypervisor.HypervisorType.VMware, "systemvm-vmware-4.16.0"); - put(Hypervisor.HypervisorType.XenServer, "systemvm-xenserver-4.16.0"); - put(Hypervisor.HypervisorType.Hyperv, "systemvm-hyperv-4.16.0"); - put(Hypervisor.HypervisorType.LXC, "systemvm-lxc-4.16.0"); - put(Hypervisor.HypervisorType.Ovm3, "systemvm-ovm3-4.16.0"); - } - }; - - final Map routerTemplateConfigurationNames = new HashMap() { - { - put(Hypervisor.HypervisorType.KVM, "router.template.kvm"); - put(Hypervisor.HypervisorType.VMware, "router.template.vmware"); - put(Hypervisor.HypervisorType.XenServer, "router.template.xenserver"); - put(Hypervisor.HypervisorType.Hyperv, "router.template.hyperv"); - put(Hypervisor.HypervisorType.LXC, "router.template.lxc"); - put(Hypervisor.HypervisorType.Ovm3, "router.template.ovm3"); - } - }; - - final Map newTemplateUrl = new HashMap() { - { - put(Hypervisor.HypervisorType.KVM, "https://download.cloudstack.org/systemvm/4.16/systemvmtemplate-4.16.0-kvm.qcow2.bz2"); - put(Hypervisor.HypervisorType.VMware, "https://download.cloudstack.org/systemvm/4.16/systemvmtemplate-4.16.0-vmware.ova"); - put(Hypervisor.HypervisorType.XenServer, "https://download.cloudstack.org/systemvm/4.16/systemvmtemplate-4.16.0-xen.vhd.bz2"); - put(Hypervisor.HypervisorType.Hyperv, "https://download.cloudstack.org/systemvm/4.16/systemvmtemplate-4.16.0-hyperv.vhd.zip"); - put(Hypervisor.HypervisorType.LXC, "https://download.cloudstack.org/systemvm/4.16/systemvmtemplate-4.16.0-kvm.qcow2.bz2"); - put(Hypervisor.HypervisorType.Ovm3, "https://download.cloudstack.org/systemvm/4.16/systemvmtemplate-4.16.0-ovm.raw.bz2"); - } - }; - - final Map newTemplateChecksum = new HashMap() { - { - put(Hypervisor.HypervisorType.KVM, "81b3e48bb934784a13555a43c5ef5ffb"); - put(Hypervisor.HypervisorType.XenServer, "1b178a5dbdbe090555515340144c6017"); - put(Hypervisor.HypervisorType.VMware, "e6a88e518c57d6f36c096c4204c3417f"); - put(Hypervisor.HypervisorType.Hyperv, "5c94da45337cf3e1910dcbe084d4b9ad"); - put(Hypervisor.HypervisorType.LXC, "81b3e48bb934784a13555a43c5ef5ffb"); - put(Hypervisor.HypervisorType.Ovm3, "875c5c65455fc06c4a012394410db375"); - } - }; - - for (final Map.Entry hypervisorAndTemplateName : NewTemplateNameList.entrySet()) { + for (final Map.Entry hypervisorAndTemplateName : SystemVmTemplateRegistration.NewTemplateNameList.entrySet()) { LOG.debug("Updating " + hypervisorAndTemplateName.getKey() + " System Vms"); try (PreparedStatement pstmt = conn.prepareStatement("select id from `cloud`.`vm_template` where name = ? and removed is null order by id desc limit 1")) { // Get systemvm template id for corresponding hypervisor @@ -186,13 +142,13 @@ public void updateSystemVmTemplates(final Connection conn) { // router.template.* for the corresponding hypervisor try (PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`configuration` SET value = ? WHERE name = ?");) { update_pstmt.setString(1, hypervisorAndTemplateName.getValue()); - update_pstmt.setString(2, routerTemplateConfigurationNames.get(hypervisorAndTemplateName.getKey())); + update_pstmt.setString(2, SystemVmTemplateRegistration.routerTemplateConfigurationNames.get(hypervisorAndTemplateName.getKey())); update_pstmt.executeUpdate(); } catch (final SQLException e) { - LOG.error("updateSystemVmTemplates:Exception while setting " + routerTemplateConfigurationNames.get(hypervisorAndTemplateName.getKey()) + " to " + LOG.error("updateSystemVmTemplates:Exception while setting " + SystemVmTemplateRegistration.routerTemplateConfigurationNames.get(hypervisorAndTemplateName.getKey()) + " to " + hypervisorAndTemplateName.getValue() + ": " + e.getMessage()); throw new CloudRuntimeException("updateSystemVmTemplates:Exception while setting " - + routerTemplateConfigurationNames.get(hypervisorAndTemplateName.getKey()) + " to " + hypervisorAndTemplateName.getValue(), e); + + SystemVmTemplateRegistration.routerTemplateConfigurationNames.get(hypervisorAndTemplateName.getKey()) + " to " + hypervisorAndTemplateName.getValue(), e); } // Change value of global configuration parameter @@ -207,7 +163,11 @@ public void updateSystemVmTemplates(final Connection conn) { } } else { if (hypervisorsListInUse.contains(hypervisorAndTemplateName.getKey())) { - throw new CloudRuntimeException(getUpgradedVersion() + hypervisorAndTemplateName.getKey() + " SystemVm template not found. Cannot upgrade system Vms"); + try { + SystemVmTemplateRegistration.registerTemplates(hypervisorsListInUse); + } catch (final Exception e) { + throw new CloudRuntimeException(getUpgradedVersion() + hypervisorAndTemplateName.getKey() + " SystemVm template not found. Cannot upgrade system Vms"); + } } else { LOG.warn(getUpgradedVersion() + hypervisorAndTemplateName.getKey() + " SystemVm template not found. " + hypervisorAndTemplateName.getKey() + " hypervisor is not used, so not failing upgrade"); @@ -215,8 +175,8 @@ public void updateSystemVmTemplates(final Connection conn) { // hypervisor try (PreparedStatement update_templ_url_pstmt = conn .prepareStatement("UPDATE `cloud`.`vm_template` SET url = ? , checksum = ? WHERE hypervisor_type = ? AND type = 'SYSTEM' AND removed is null order by id desc limit 1");) { - update_templ_url_pstmt.setString(1, newTemplateUrl.get(hypervisorAndTemplateName.getKey())); - update_templ_url_pstmt.setString(2, newTemplateChecksum.get(hypervisorAndTemplateName.getKey())); + update_templ_url_pstmt.setString(1, SystemVmTemplateRegistration.newTemplateUrl.get(hypervisorAndTemplateName.getKey())); + update_templ_url_pstmt.setString(2, SystemVmTemplateRegistration.newTemplateChecksum.get(hypervisorAndTemplateName.getKey())); update_templ_url_pstmt.setString(3, hypervisorAndTemplateName.getKey().toString()); update_templ_url_pstmt.executeUpdate(); } catch (final SQLException e) { From be80bafee50e37ab3c1b76138f9daccc74da5558 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Mon, 26 Jul 2021 13:59:08 +0530 Subject: [PATCH 057/117] handle automatic upgrade if templates present in package(cloudstack-management) --- .../upgrade/SystemVmTemplateRegistration.java | 556 +++++++++++++++--- .../upgrade/dao/Upgrade41510to41600.java | 46 +- scripts/storage/secondary/createtmplt.sh | 26 +- scripts/storage/secondary/setup-sysvm-tmplt | 169 ++++++ .../consoleproxy/ConsoleProxyManagerImpl.java | 1 - .../VirtualNetworkApplianceManagerImpl.java | 1 - .../SecondaryStorageManagerImpl.java | 1 - systemvm/pom.xml | 59 -- tools/apidoc/build-apidoc.sh | 11 - 9 files changed, 662 insertions(+), 208 deletions(-) create mode 100644 scripts/storage/secondary/setup-sysvm-tmplt diff --git a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java index 5b4608122b75..736865b186f4 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java @@ -17,41 +17,163 @@ package com.cloud.upgrade; import com.cloud.hypervisor.Hypervisor; -import com.cloud.utils.db.TransactionLegacy; +import com.cloud.utils.DateUtil; +import com.cloud.utils.Pair; +import com.cloud.utils.db.GlobalLock; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.script.Script; import org.apache.log4j.Logger; import javax.naming.ConfigurationException; +import java.io.BufferedReader; import java.io.File; +import java.io.FileInputStream; +import java.io.FileReader; +import java.io.IOException; +import java.security.MessageDigest; import java.sql.Connection; +import java.sql.Date; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Locale; import java.util.Map; -import java.util.Objects; import java.util.Set; import java.util.UUID; import java.util.stream.Collectors; public class SystemVmTemplateRegistration { private static final Logger LOGGER = Logger.getLogger(SystemVmTemplateRegistration.class); - private static final String mountCommand = "mount -t nfs %s %s"; - private static final String templatesPath = "/usr/share/cloudstack-management/templates"; + private static final String mountCommand = "sudo mount -t nfs %s %s"; + private static final String umountCommand = "sudo umount %s"; + private static final String hashAlgorithm = "MD5"; + private static final String templatesPath = "/usr/share/cloudstack-management/templates/"; private static final String TEMPORARY_SECONDARY_STORE = "/tmp/tmpSecStorage"; - private static final String FETCH_CLOUDSTACK_VERSION = "SELECT version FROM version ORDER BY id DESC LIMIT 1"; - private static final String FETCH_DISTINCT_ELIGILBLE_ZONES = "SELECT DISTINCT(data_center_id) FROM image_store WHERE protocol = \"nfs\" AND removed is null"; - private static final String FETCH_IMAGE_STORE_PER_ZONE = "SELECT url FROM image_store WHERE data_center_id=? AND removed IS NULL LIMIT 1"; - private static final String CS_MAJOR_VERSION = "4.16"; - private static final String CS_MINOR_VERSION = "0"; + private static final String PARENT_TEMPLATE_FOLDER = TEMPORARY_SECONDARY_STORE; + private static final String PARTIAL_TEMPLATE_FOLDER = "/template/tmpl/1/"; + private static final String FETCH_FOLDER_NAME = "SELECT id FROM vm_template ORDER BY id DESC LIMIT 1;"; + private static final String FETCH_DISTINCT_ELIGIBLE_ZONES = "SELECT DISTINCT(data_center_id) FROM `cloud`.`image_store` WHERE protocol = \"nfs\" AND removed is null"; + private static final String FETCH_DISTINCT_HYPERVISORS_IN_ZONE = "SELECT DISTINCT(hypervisor_type) FROM `cloud`.`cluster` where removed is null and data_center_id=?"; + private static final String FETCH_IMAGE_STORE_PER_ZONE = "SELECT url,id FROM `cloud`.`image_store` WHERE data_center_id=? AND removed IS NULL LIMIT 1"; + private static final String UPDATE_VM_TEMPLATE_TABLE = "INSERT INTO `cloud`.`vm_template` (id, uuid, unique_name, name, public, featured, created, type, hvm, bits, account_id, url, checksum, enable_password, display_text, format, guest_os_id, cross_zones, hypervisor_type, state)" + + "VALUES (?, ?, ?, ?, 0, 0, ?, 'SYSTEM', 0, 64, 1, ?, ?, 0, ?, ?, ?, 1, ?, 'Active')"; + private static final String UPDATE_TEMPLATE_STORE_REF_TABLE = "INSERT INTO `cloud`.`template_store_ref` (store_id, template_id, created, last_updated, job_id, download_pct, size, physical_size, download_state, error_str, local_path, install_path, url, state, destroyed, is_copy," + + " update_count, ref_cnt, store_role) VALUES (?, ?, ?, ?, NULL, 100, ?, ?, 'DOWNLOADED', NULL, NULL, ?, ?, 'READY', 0, 0, 0, 0, 'Image')"; + private static final String UPDATE_CONFIGURATION_TABLE = "UPDATE `cloud`.`configuration` SET value = ? WHERE name = ?"; + public static final String CS_MAJOR_VERSION = "4.16"; + public static final String CS_MINOR_VERSION = "0"; + private static class SystemVMTemplateDetails { + Long id; + String uuid; + String name; + String uniqueName; + Date created; + String url; + String checksum; + TemplateFormat format; + Integer guestOsId; + Hypervisor.HypervisorType hypervisorType; + Long storeId; + Long size; + Long physicalSize; + String installPath; + + SystemVMTemplateDetails() { + } + + SystemVMTemplateDetails(Long id, String uuid, String name, Date created, String url, String checksum, + TemplateFormat format, Integer guestOsId, Hypervisor.HypervisorType hypervisorType, + Long storeId) { + this.id = id; + this.uuid = uuid; + this.name = name; + this.created = created; + this.url = url; + this.checksum = checksum; + this.format = format; + this.guestOsId = guestOsId; + this.hypervisorType = hypervisorType; + this.storeId = storeId; + } + + public Long getId() { + return id; + } + + public String getUuid() { + return uuid; + } + + public String getName() { + return name; + } + + public Date getCreated() { + return created; + } + + public String getUrl() { + return url; + } + + public String getChecksum() { + return checksum; + } + + public TemplateFormat getFormat() { + return format; + } + + public Integer getGuestOsId() { + return guestOsId; + } + + public Hypervisor.HypervisorType getHypervisorType() { + return hypervisorType; + } + + public Long getStoreId() { + return storeId; + } + + public Long getSize() { + return size; + } + + public void setSize(Long size) { + this.size = size; + } + + public Long getPhysicalSize() { + return physicalSize; + } + + public void setPhysicalSize(Long physicalSize) { + this.physicalSize = physicalSize; + } + + public String getInstallPath() { + return installPath; + } + + public void setInstallPath(String installPath) { + this.installPath = installPath; + } + + public String getUniqueName() { + return uniqueName; + } + + public void setUniqueName(String uniqueName) { + this.uniqueName = uniqueName; + } + } + public static final Map NewTemplateNameList = new HashMap() { { put(Hypervisor.HypervisorType.KVM, "systemvm-kvm-" + CS_MAJOR_VERSION + "." + CS_MINOR_VERSION); @@ -98,50 +220,96 @@ public class SystemVmTemplateRegistration { public static final Map newTemplateChecksum = new HashMap() { { - put(Hypervisor.HypervisorType.KVM, "81b3e48bb934784a13555a43c5ef5ffb"); - put(Hypervisor.HypervisorType.XenServer, "1b178a5dbdbe090555515340144c6017"); - put(Hypervisor.HypervisorType.VMware, "e6a88e518c57d6f36c096c4204c3417f"); - put(Hypervisor.HypervisorType.Hyperv, "5c94da45337cf3e1910dcbe084d4b9ad"); - put(Hypervisor.HypervisorType.LXC, "81b3e48bb934784a13555a43c5ef5ffb"); - put(Hypervisor.HypervisorType.Ovm3, "875c5c65455fc06c4a012394410db375"); + put(Hypervisor.HypervisorType.KVM, "07268f267dc4316dc5f86150346bb8d7"); + put(Hypervisor.HypervisorType.XenServer, "71d8adb40baa609997acdc3eae15fbde"); + put(Hypervisor.HypervisorType.VMware, "b356cbbdef67c4eefa8c336328e2b202"); + put(Hypervisor.HypervisorType.Hyperv, "0982aa1461800ce1538e0cae07e00770"); + put(Hypervisor.HypervisorType.LXC, "07268f267dc4316dc5f86150346bb8d7"); + put(Hypervisor.HypervisorType.Ovm3, "8c643d146c82f92843b8a48c7661f800"); } }; - public static final Map templateFiles = new HashMap() { + public static final Map hypervisorGuestOsMap = new HashMap() { + { + put(Hypervisor.HypervisorType.KVM, 15); + put(Hypervisor.HypervisorType.XenServer, 99); + put(Hypervisor.HypervisorType.VMware, 99); + put(Hypervisor.HypervisorType.Hyperv, 15); + put(Hypervisor.HypervisorType.LXC, 15); + put(Hypervisor.HypervisorType.Ovm3, 183); + } + }; + + public static enum TemplateFormat{ + QCOW2("qcow2"), + RAW("raw"), + VHD("vhd"), + OVA("ova"); + + private final String fileExtension; + + TemplateFormat(String fileExtension) { + this.fileExtension = fileExtension; + } + } + + public static final Map hypervisorImageFormat = new HashMap() { { - put(Hypervisor.HypervisorType.KVM, templatesPath + "/" + fileNames.get(Hypervisor.HypervisorType.KVM)); - put(Hypervisor.HypervisorType.XenServer, templatesPath + "/" + fileNames.get(Hypervisor.HypervisorType.XenServer)); - put(Hypervisor.HypervisorType.VMware, templatesPath + "/" + fileNames.get(Hypervisor.HypervisorType.VMware)); - put(Hypervisor.HypervisorType.Hyperv, templatesPath + "/" + fileNames.get(Hypervisor.HypervisorType.Hyperv)); - put(Hypervisor.HypervisorType.LXC, templatesPath + "/" + fileNames.get(Hypervisor.HypervisorType.LXC)); - put(Hypervisor.HypervisorType.Ovm3, templatesPath + "/" + fileNames.get(Hypervisor.HypervisorType.Ovm3)); + put(Hypervisor.HypervisorType.KVM, TemplateFormat.QCOW2); + put(Hypervisor.HypervisorType.XenServer, TemplateFormat.VHD); + put(Hypervisor.HypervisorType.VMware, TemplateFormat.OVA); + put(Hypervisor.HypervisorType.Hyperv, TemplateFormat.VHD); + put(Hypervisor.HypervisorType.LXC, TemplateFormat.QCOW2); + put(Hypervisor.HypervisorType.Ovm3, TemplateFormat.RAW); } }; + private static String calculateChecksum(MessageDigest digest, File file) { + try { + FileInputStream fis = new FileInputStream(file); + byte[] byteArray = new byte[1024]; + int bytesCount = 0; + + while ((bytesCount = fis.read(byteArray)) != -1) { + digest.update(byteArray, 0, bytesCount); + } - static long isTemplateAlreadyRegistered(Map.Entry hypervisorAndTemplateName) { - final TransactionLegacy txn = TransactionLegacy.open("TemplateValidation"); + fis.close(); + byte[] bytes = digest.digest(); + + StringBuilder sb = new StringBuilder(); + for (byte aByte : bytes) { + sb.append(Integer + .toString((aByte & 0xff) + 0x100, 16) + .substring(1)); + } + return sb.toString(); + } catch (IOException e) { + String errMsg = String.format("Failed to calculate Checksum of template file: %s ", file.getName()); + LOGGER.error(errMsg); + throw new CloudRuntimeException(errMsg); + } + } + + static long isTemplateAlreadyRegistered(Connection conn, Pair hypervisorAndTemplateName) { long templateId = -1; - Connection conn; try { - conn = txn.getConnection(); PreparedStatement pstmt = conn.prepareStatement("select id from `cloud`.`vm_template` where name = ? and removed is null order by id desc limit 1"); // Get systemvm template id for corresponding hypervisor - pstmt.setString(1, hypervisorAndTemplateName.getValue()); + pstmt.setString(1, hypervisorAndTemplateName.second()); try (ResultSet rs = pstmt.executeQuery()) { if (rs.next()) { templateId = rs.getLong(1); } } catch (final SQLException e) { - LOGGER.error("updateSystemVmTemplates: Exception caught while getting ids of SystemVM templates: " + e.getMessage()); - throw new CloudRuntimeException("updateSystemVmTemplates: Exception caught while getting ids of SystemVM templates", e); + String errMsg = String.format("updateSystemVmTemplates: Exception caught while getting ids of SystemVM templates: %s ", e.getMessage()); + LOGGER.error(errMsg); + throw new CloudRuntimeException(errMsg); } } catch (SQLException e) { String errorMessage = "Unable to upgrade the database"; LOGGER.error(errorMessage, e); throw new CloudRuntimeException(errorMessage, e); - } finally { - txn.close(); } return templateId; } @@ -158,45 +326,46 @@ private static String getHypervisorName(String name) { } - private static List getEligibleZoneIds() { - final TransactionLegacy txn = TransactionLegacy.open("FetchZones"); + private static Hypervisor.HypervisorType getHypervisorType(String hypervisor) { + if (hypervisor.equalsIgnoreCase("xen")) { + hypervisor = "xenserver"; + } else if (hypervisor.equalsIgnoreCase("ovm")) { + hypervisor = "ovm3"; + } + return Hypervisor.HypervisorType.getType(hypervisor); + } + + private static List getEligibleZoneIds(Connection conn) { List zones = new ArrayList(); - Connection conn; try { - conn = txn.getConnection(); - PreparedStatement pstmt = conn.prepareStatement(FETCH_DISTINCT_ELIGILBLE_ZONES); + PreparedStatement pstmt = conn.prepareStatement(FETCH_DISTINCT_ELIGIBLE_ZONES); ResultSet rs = pstmt.executeQuery(); while (rs.next()) { zones.add(rs.getLong(1)); } } catch (SQLException e) { throw new CloudRuntimeException("Failed to fetch eligible zones for SystemVM template registration"); - } finally { - txn.close(); } return zones; } - private static String getNfsStoreInZone(Long zoneId) { - final TransactionLegacy txn = TransactionLegacy.open("FetchStore"); + private static Pair getNfsStoreInZone(Connection conn, Long zoneId) { String url = null; - Connection conn; + Long storeId = null; try { - conn = txn.getConnection(); PreparedStatement pstmt = conn.prepareStatement(FETCH_IMAGE_STORE_PER_ZONE); if(pstmt != null) { pstmt.setLong(1, zoneId); ResultSet resultSet = pstmt.executeQuery(); while (resultSet.next()) { url = resultSet.getString(1); + storeId = resultSet.getLong(2); } } } catch (SQLException e) { throw new CloudRuntimeException("Failed to fetch eligible zones for SystemVM template registration"); - } finally { - txn.close(); } - return url; + return new Pair<>(url, storeId); } public static void mountStore(String storeUrl) { @@ -206,6 +375,7 @@ public static void mountStore(String storeUrl) { int index = path.indexOf('/'); String host = path.substring(0, index); String mountPath = path.substring(index); + Script.runSimpleBashScript("mkdir -p " + TEMPORARY_SECONDARY_STORE); String mount = String.format(mountCommand, host + ":" + mountPath, TEMPORARY_SECONDARY_STORE); Script.runSimpleBashScript(mount); } @@ -216,74 +386,264 @@ public static void mountStore(String storeUrl) { } } + private static String getTemplateFolderName(Connection conn) { + Long templateId = null; + try { + PreparedStatement pstmt = conn.prepareStatement(FETCH_FOLDER_NAME); + if(pstmt != null) { + ResultSet resultSet = pstmt.executeQuery(); + while (resultSet.next()) { + templateId = resultSet.getLong(1); + } + } + templateId += 1L; + } catch (SQLException e) { + String errMsg = "Failed to get folder name"; + LOGGER.error(errMsg); + throw new CloudRuntimeException(errMsg); + } + return String.valueOf(templateId); + } - private static String getTemplateFolder() { - String path = TEMPORARY_SECONDARY_STORE + "/template/tmpl/1/"; - File templatesDirectory = new File(path); - List templates = Arrays.asList(Objects.requireNonNull(templatesDirectory.list())); - if (templates != null && templates.size() > 0) { - Collections.sort(templates); - return path + String.valueOf(Long.parseLong(templates.get(templates.size() -1)) + 1L); + private static String getTemplateFolder(Connection conn) { + String folderName = getTemplateFolderName(conn); + if (folderName != null || !folderName.equals(0)) { + return folderName; } else { - return path + "9"; + return "202"; } } - - public static void registerTemplate(Map.Entry hypervisorAndTemplateName) { + private static List fetchAllHypervisors(Connection conn, Long zoneId) { + List hypervisorList = new ArrayList<>(); try { - List zoneIds = getEligibleZoneIds(); - for (Long zoneId : zoneIds) { - String storeUrl = getNfsStoreInZone(zoneId); - mountStore(storeUrl); - String destTempFolder = getTemplateFolder(); - Script.runSimpleBashScript("cp " + templateFiles.get(hypervisorAndTemplateName.getKey()) + " " + destTempFolder); - String storageScriptsDir = "scripts/storage/secondary"; - String createTmplPath = Script.findScript(storageScriptsDir, "createtmplt.sh"); - if (createTmplPath == null) { - throw new ConfigurationException("Unable to find the createtmplt.sh"); + PreparedStatement pstmt = conn.prepareStatement(FETCH_DISTINCT_HYPERVISORS_IN_ZONE); + if(pstmt != null) { + pstmt.setLong(1, zoneId); + ResultSet resultSet = pstmt.executeQuery(); + while (resultSet.next()) { + hypervisorList.add(resultSet.getString(1)); } - String templateName = UUID.randomUUID().toString(); - // TODO: need to add extension - String templateFilename = templateName; - Script scr = new Script(createTmplPath, 120, LOGGER); - scr.add("-n", templateFilename); - - scr.add("-t", destTempFolder); - scr.add("-f", destTempFolder); // this is the temporary - // template file downloaded - String result = scr.execute(); + } + } catch (SQLException e) { + throw new CloudRuntimeException("Failed to fetch eligible zones for SystemVM template registration"); + } + return hypervisorList; + } + + public static void updateDb(Connection conn, SystemVMTemplateDetails details) { + try { + PreparedStatement pstmt = conn.prepareStatement(UPDATE_VM_TEMPLATE_TABLE); + if (pstmt != null) { + pstmt.setLong(1, details.getId()); + pstmt.setString(2, details.getUuid()); + pstmt.setString(3, details.getUniqueName()); + pstmt.setString(4, details.getName()); + pstmt.setDate(5, details.getCreated()); + pstmt.setString(6, details.getUrl()); + pstmt.setString(7, details.getChecksum()); + pstmt.setString(8, details.getName()); + pstmt.setString(9, details.getFormat().toString()); + pstmt.setLong(10, details.getGuestOsId()); + pstmt.setString(11, details.getHypervisorType().toString()); + pstmt.executeUpdate(); + } + + PreparedStatement pstmt1 = conn.prepareStatement(UPDATE_TEMPLATE_STORE_REF_TABLE); + if (pstmt1 != null) { + pstmt1.setLong(1, details.getStoreId()); + pstmt1.setLong(2, details.getId()); + pstmt1.setDate(3, details.getCreated()); + pstmt1.setDate(4, details.getCreated()); + pstmt1.setLong(5, details.getSize()); + pstmt1.setLong(6, details.getPhysicalSize()); + pstmt1.setString(7, details.getInstallPath()); + pstmt1.setString(8, details.getUrl()); + pstmt1.executeUpdate(); } } catch (Exception e) { - String errMsg = "Failed to register template for hypervisor: " + hypervisorAndTemplateName.getKey(); + throw new CloudRuntimeException("Failed to fetch eligible zones for SystemVM template registration: " + e.getMessage()); + } + } + + public static void updateSystemVMEntries(Connection conn, Long templateId, Pair hypervisorAndTemplateName) { + // update template ID of system Vms + try { + PreparedStatement update_templ_id_pstmt = conn + .prepareStatement("update `cloud`.`vm_instance` set vm_template_id = ? where type <> 'User' and hypervisor_type = ? and removed is NULL"); + update_templ_id_pstmt.setLong(1, templateId); + update_templ_id_pstmt.setString(2, hypervisorAndTemplateName.first().toString()); + update_templ_id_pstmt.executeUpdate(); + } catch (SQLException e) { + String errMsg = String.format("updateSystemVmTemplates:Exception while setting template for %s to %s : %s",hypervisorAndTemplateName.first().toString(), templateId, + e.getMessage()); LOGGER.error(errMsg); throw new CloudRuntimeException(errMsg); } } - public static void registerTemplates(Set hypervisorsInUse) { - // Check if templates path exists - Set hypervisors = hypervisorsInUse.stream().map(Enum::name). - map(name -> name.toLowerCase(Locale.ROOT)).map(SystemVmTemplateRegistration::getHypervisorName).collect(Collectors.toSet()); - File templatesDirectory = new File(templatesPath); - List templatePaths = new ArrayList<>(); - String[] templates = Objects.requireNonNull(templatesDirectory.list()); - for (String template : templates) { - if (hypervisors.stream().anyMatch(template::contains)) { - templatePaths.add(templatesDirectory.getPath() + "/" + template); - } else { - throw new CloudRuntimeException("SystemVm template " + template + " not found. Cannot upgrade system Vms"); + public static void updateConfigurationParams(Connection conn, Map configParams) { + String key = null; + String value = null; + try { + PreparedStatement pstmt = conn.prepareStatement(UPDATE_CONFIGURATION_TABLE); + for (Map.Entry config : configParams.entrySet()) { + key = config.getKey(); + value = config.getValue(); + pstmt.setString(1, value); + pstmt.setString(2, key); + pstmt.executeUpdate(); } + + } catch (final SQLException e) { + String errMsg = String.format("updateSystemVmTemplates: Exception while setting %s to %s: %s ", key, value, e.getMessage()); + LOGGER.error(errMsg); + throw new CloudRuntimeException(errMsg); } + } - // Perform Registration if templates not already registered - for (final Map.Entry hypervisorAndTemplateName : SystemVmTemplateRegistration.NewTemplateNameList.entrySet()) { - long templateId = isTemplateAlreadyRegistered(hypervisorAndTemplateName); - if (templateId != -1) { - continue; - } - // TODO: Concurrency?? - registerTemplate(hypervisorAndTemplateName); + private static void readTemplateProperties(String path, SystemVMTemplateDetails details) { + File tmpFile = new File(path); + String uniqName = null; + Long size = null; + Long physicalSize = 0L; + try (FileReader fr = new FileReader(tmpFile); BufferedReader brf = new BufferedReader(fr);) { + String line = null; + while ((line = brf.readLine()) != null) { + if (line.startsWith("uniquename=")) { + uniqName = line.split("=")[1]; + } else if (line.startsWith("size=")) { + physicalSize = Long.parseLong(line.split("=")[1]); + } else if (line.startsWith("virtualsize=")) { + size = Long.parseLong(line.split("=")[1]); + } + if (size == null) { + size = physicalSize; + } + } + } catch (IOException ex) { + LOGGER.debug(String.format("Failed to read from template.properties due to: %s ", ex.getMessage())); + } + details.setSize(size); + details.setPhysicalSize(physicalSize); + details.setUniqueName(uniqName); + } + + private static void unmountStore() { + try { + LOGGER.info("Unmounting store"); + String umountCmd = String.format(umountCommand, TEMPORARY_SECONDARY_STORE); + Script.runSimpleBashScript(umountCmd); + } catch (Exception e) { + String msg = String.format("Failed to unmount store mounted at %s", TEMPORARY_SECONDARY_STORE); + LOGGER.error(msg); + throw new CloudRuntimeException(msg); + } + } + public static void registerTemplate(Connection conn, Pair hypervisorAndTemplateName, Long zoneId) { + try { + Hypervisor.HypervisorType hypervisor = hypervisorAndTemplateName.first(); + Pair storeUrlAndId = getNfsStoreInZone(conn, zoneId); + mountStore(storeUrlAndId.first()); + String destTempFolderName = getTemplateFolder(conn); + String destTempFolder = PARENT_TEMPLATE_FOLDER + PARTIAL_TEMPLATE_FOLDER + destTempFolderName; + Script.runSimpleBashScript("mkdir -p " + destTempFolder); + String storageScriptsDir = "scripts/storage/secondary"; + String setupTmpltScript = Script.findScript(storageScriptsDir, "setup-sysvm-tmplt"); + if (setupTmpltScript == null) { + throw new ConfigurationException("Unable to find the createtmplt.sh"); + } + + Script scr = new Script(setupTmpltScript, 120000, LOGGER); + final String templateName = UUID.randomUUID().toString(); + scr.add("-u", templateName); + scr.add("-f", templatesPath + fileNames.get(hypervisorAndTemplateName.first())); + scr.add("-h", hypervisorAndTemplateName.first().name().toLowerCase(Locale.ROOT)); + scr.add("-d", destTempFolder); + String result = scr.execute(); + if (result != null) { + String errMsg = String.format("failed to create template: %s ", result); + LOGGER.error(errMsg); + throw new CloudRuntimeException(errMsg); + } + Date created = new Date(DateUtil.currentGMTTime().getTime()); + SystemVMTemplateDetails details = new SystemVMTemplateDetails(Long.parseLong(destTempFolderName), templateName, NewTemplateNameList.get(hypervisor), created, + newTemplateUrl.get(hypervisor), newTemplateChecksum.get(hypervisor), hypervisorImageFormat.get(hypervisor), hypervisorGuestOsMap.get(hypervisor), hypervisor, storeUrlAndId.second()); + details.setInstallPath(PARTIAL_TEMPLATE_FOLDER + destTempFolderName + "/" + templateName + "." + hypervisorImageFormat.get(hypervisor).fileExtension); + readTemplateProperties(destTempFolder + "/template.properties", details); + updateDb(conn, details); + Map configParams = new HashMap<>(); + configParams.put(SystemVmTemplateRegistration.routerTemplateConfigurationNames.get(hypervisorAndTemplateName.first()), hypervisorAndTemplateName.second()); + configParams.put("minreq.sysvmtemplate.version", CS_MAJOR_VERSION + "." + CS_MINOR_VERSION); + updateConfigurationParams(conn, configParams); + updateSystemVMEntries(conn, Long.valueOf(destTempFolderName), hypervisorAndTemplateName); + } catch (Exception e) { + String errMsg = String.format("Failed to register template for hypervisor: %s ", hypervisorAndTemplateName.first()); + LOGGER.error(errMsg); + throw new CloudRuntimeException(errMsg); + } + } + + public static void registerTemplates(Connection conn, Set hypervisorsInUse) { + GlobalLock lock = GlobalLock.getInternLock("UpgradeDatabase-Lock"); + try { + LOGGER.info("Grabbing lock to register templates."); + if (!lock.lock(20 * 60)) { + throw new CloudRuntimeException("Unable to acquire lock to register systemvm template."); + } + // Check if templates path exists + try { + Set hypervisors = hypervisorsInUse.stream().map(Enum::name). + map(name -> name.toLowerCase(Locale.ROOT)).map(SystemVmTemplateRegistration::getHypervisorName).collect(Collectors.toSet()); + List templates = new ArrayList<>(); + for (Hypervisor.HypervisorType hypervisorType : hypervisorsInUse) { + templates.add(fileNames.get(hypervisorType)); + } + + boolean templatesFound = true; + for (String hypervisor : hypervisors) { + String matchedTemplate = templates.stream().filter(x -> x.contains(hypervisor)).findAny().orElse(null); + if (matchedTemplate == null) { + templatesFound = false; + break; + } + MessageDigest mdigest = MessageDigest.getInstance(hashAlgorithm); + File tempFile = new File(templatesPath + matchedTemplate); + String templateChecksum = calculateChecksum(mdigest, tempFile); + if (!templateChecksum.equals(newTemplateChecksum.get(getHypervisorType(hypervisor)))) { + LOGGER.error(String.format("Checksum mismatch: %s != %s ", templateChecksum, newTemplateChecksum.get(getHypervisorType(hypervisor)))); + templatesFound = false; + break; + } + } + + if (!templatesFound) { + LOGGER.info("SystemVm template not found. Cannot upgrade system Vms"); + throw new CloudRuntimeException("SystemVm template not found. Cannot upgrade system Vms"); + } + + // Perform Registration if templates not already registered + List zoneIds = getEligibleZoneIds(conn); + for (Long zoneId : zoneIds) { + List hypervisorList = fetchAllHypervisors(conn, zoneId); + for (String hypervisor : hypervisorList) { + Hypervisor.HypervisorType name = Hypervisor.HypervisorType.getType(hypervisor); + String templateName = NewTemplateNameList.get(name); + Pair hypervisorAndTemplateName = new Pair(name, templateName); + long templateId = isTemplateAlreadyRegistered(conn, hypervisorAndTemplateName); + if (templateId != -1) { + continue; + } + registerTemplate(conn, hypervisorAndTemplateName, zoneId); + } + unmountStore(); + } + } catch (Exception e) { + throw new CloudRuntimeException("Failed to register systemVM template. Upgrade Failed"); + } + } finally { + lock.unlock(); + lock.releaseRef(); } } } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41510to41600.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41510to41600.java index 76f5fb3c073e..5d7e5bcec087 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41510to41600.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41510to41600.java @@ -22,11 +22,13 @@ import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; +import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; import com.cloud.upgrade.SystemVmTemplateRegistration; +import com.cloud.utils.Pair; import org.apache.log4j.Logger; import com.cloud.hypervisor.Hypervisor; @@ -126,45 +128,21 @@ public void updateSystemVmTemplates(final Connection conn) { throw new CloudRuntimeException("updateSystemVmTemplates:Exception while updating template with id " + templateId + " to be marked as 'system'", e); } // update template ID of system Vms - try (PreparedStatement update_templ_id_pstmt = conn - .prepareStatement("update `cloud`.`vm_instance` set vm_template_id = ? where type <> 'User' and hypervisor_type = ? and removed is NULL");) { - update_templ_id_pstmt.setLong(1, templateId); - update_templ_id_pstmt.setString(2, hypervisorAndTemplateName.getKey().toString()); - update_templ_id_pstmt.executeUpdate(); - } catch (final Exception e) { - LOG.error("updateSystemVmTemplates:Exception while setting template for " + hypervisorAndTemplateName.getKey().toString() + " to " + templateId - + ": " + e.getMessage()); - throw new CloudRuntimeException("updateSystemVmTemplates:Exception while setting template for " + hypervisorAndTemplateName.getKey().toString() + " to " - + templateId, e); - } + SystemVmTemplateRegistration.updateSystemVMEntries(conn, templateId, + new Pair(hypervisorAndTemplateName.getKey(), hypervisorAndTemplateName.getValue())); - // Change value of global configuration parameter - // router.template.* for the corresponding hypervisor - try (PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`configuration` SET value = ? WHERE name = ?");) { - update_pstmt.setString(1, hypervisorAndTemplateName.getValue()); - update_pstmt.setString(2, SystemVmTemplateRegistration.routerTemplateConfigurationNames.get(hypervisorAndTemplateName.getKey())); - update_pstmt.executeUpdate(); - } catch (final SQLException e) { - LOG.error("updateSystemVmTemplates:Exception while setting " + SystemVmTemplateRegistration.routerTemplateConfigurationNames.get(hypervisorAndTemplateName.getKey()) + " to " - + hypervisorAndTemplateName.getValue() + ": " + e.getMessage()); - throw new CloudRuntimeException("updateSystemVmTemplates:Exception while setting " - + SystemVmTemplateRegistration.routerTemplateConfigurationNames.get(hypervisorAndTemplateName.getKey()) + " to " + hypervisorAndTemplateName.getValue(), e); - } + // Change value of global configuration parameter router.template.* for the corresponding hypervisor + // Change value of global configuration parameter - minreq.sysvmtemplate.version for the ACS version + Map configParams = new HashMap<>(); + configParams.put(SystemVmTemplateRegistration.routerTemplateConfigurationNames.get(hypervisorAndTemplateName.getKey()), hypervisorAndTemplateName.getValue()); + configParams.put("minreq.sysvmtemplate.version", SystemVmTemplateRegistration.CS_MAJOR_VERSION + "." + SystemVmTemplateRegistration.CS_MINOR_VERSION); - // Change value of global configuration parameter - // minreq.sysvmtemplate.version for the ACS version - try (PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`configuration` SET value = ? WHERE name = ?");) { - update_pstmt.setString(1, "4.16.0"); - update_pstmt.setString(2, "minreq.sysvmtemplate.version"); - update_pstmt.executeUpdate(); - } catch (final SQLException e) { - LOG.error("updateSystemVmTemplates:Exception while setting 'minreq.sysvmtemplate.version' to 4.16.0: " + e.getMessage()); - throw new CloudRuntimeException("updateSystemVmTemplates:Exception while setting 'minreq.sysvmtemplate.version' to 4.16.0", e); - } + SystemVmTemplateRegistration.updateConfigurationParams(conn, configParams); } else { if (hypervisorsListInUse.contains(hypervisorAndTemplateName.getKey())) { try { - SystemVmTemplateRegistration.registerTemplates(hypervisorsListInUse); + SystemVmTemplateRegistration.registerTemplates(conn, hypervisorsListInUse); + break; } catch (final Exception e) { throw new CloudRuntimeException(getUpgradedVersion() + hypervisorAndTemplateName.getKey() + " SystemVm template not found. Cannot upgrade system Vms"); } diff --git a/scripts/storage/secondary/createtmplt.sh b/scripts/storage/secondary/createtmplt.sh index 391b291d3bbd..9d6d746dba7e 100755 --- a/scripts/storage/secondary/createtmplt.sh +++ b/scripts/storage/secondary/createtmplt.sh @@ -110,6 +110,16 @@ create_from_file() { } +create_from_file_user() { + local tmpltfs=$1 + local tmpltimg=$2 + local tmpltname=$3 + + [ -n "$verbose" ] && echo "Copying to $tmpltfs/$tmpltname...could take a while" >&2 + sudo cp $tmpltimg /$tmpltfs/$tmpltname + +} + tflag= nflag= fflag= @@ -118,8 +128,9 @@ hflag= hvm=false cleanup=false dflag= +cloud=false -while getopts 'vuht:n:f:s:d:S:' OPTION +while getopts 'vcuht:n:f:s:d:S:' OPTION do case $OPTION in t) tflag=1 @@ -144,6 +155,8 @@ do h) hflag=1 hvm="true" ;; + c) cloud="true" + ;; u) cleanup="true" ;; v) verbose="true" @@ -199,7 +212,14 @@ fi imgsize=$(ls -l $tmpltimg2| awk -F" " '{print $5}') -create_from_file $tmpltfs $tmpltimg2 $tmpltname +if [ $cloud ] +then + create_from_file_user $tmpltfs $tmpltimg2 $tmpltname + tmpltfs=/tmp/tmpSecStorage/ +else + create_from_file $tmpltfs $tmpltimg2 $tmpltname +fi + touch /$tmpltfs/template.properties rollback_if_needed $tmpltfs $? "Failed to create template.properties file" @@ -213,7 +233,7 @@ echo "description=$descr" >> /$tmpltfs/template.properties echo "hvm=$hvm" >> /$tmpltfs/template.properties echo "size=$imgsize" >> /$tmpltfs/template.properties -if [ "$cleanup" == "true" ] +if [[ "$cleanup" == "true" && $cloud != "true"]] then rm -f $tmpltimg fi diff --git a/scripts/storage/secondary/setup-sysvm-tmplt b/scripts/storage/secondary/setup-sysvm-tmplt new file mode 100644 index 000000000000..e0c90f99942e --- /dev/null +++ b/scripts/storage/secondary/setup-sysvm-tmplt @@ -0,0 +1,169 @@ +#!/bin/bash + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# Usage: e.g. failed $? "this is an error" +set -x + +failed() { + local returnval=$1 + local returnmsg=$2 + + # check for an message, if there is no one dont print anything + if [[ -z $returnmsg ]]; then + : + else + echo -e $returnmsg + fi + if [[ $returnval -eq 0 ]]; then + return 0 + else + echo "Installation failed" + exit $returnval + fi +} + +# check if first parameter is not a dash (-) then print the usage block +if [[ ! $@ =~ ^\-.+ ]]; then + usage + exit 0 +fi + +OPTERR=0 +while getopts 'h:f:d:u::'# OPTION +do + case $OPTION in + u) uflag=1 + uuid="$OPTARG" + ;; + f) fflag=1 + tmpltimg="$OPTARG" + ;; + h) hyper="$OPTARG" + ;; + d) destdir="$OPTARG" + ;; + ?) usage + exit 0 + ;; + *) usage + exit 0 + ;; + esac +done + +if [[ "$hyper" == "kvm" ]]; then + ext="qcow2" + qemuimgcmd=$(which qemu-img) + elif [[ "$hyper" == "xenserver" ]]; then + ext="vhd" + elif [[ "$hyper" == "vmware" ]]; then + ext="ova" + elif [[ "$hyper" == "lxc" ]]; then + ext="qcow2" + elif [[ "$hyper" == "hyperv" ]]; then + ext="vhd" + elif [[ "$hyper" == "ovm3" ]]; then + ext="raw" + else + failed 2 "Please add a correct hypervisor name like: kvm|vmware|xenserver|hyperv|ovm3" +fi + + +localfile=$uuid.$ext + + +sudo mkdir -p $destdir +if [[ $? -ne 0 ]]; then + failed 2 "Failed to write to destdir $destdir -- is it mounted?\n" +fi + +if [[ -f $destdir/template.properties ]]; then + failed 2 "Data already exists at destination $destdir" +fi + +destfiles=$(find $destdir -name \*.$ext) +if [[ "$destfiles" != "" ]]; then + failed 2 "Data already exists at destination $destdir" +fi + +tmpfolder=/tmp/tmpSecStorage/ +tmplfile=$tmpfolder/$localfile + +sudo touch $tmplfile +if [[ $? -ne 0 ]]; then + failed 2 "Failed to create temporary file in directory $tmpfolder -- is it read-only or full?\n" +fi + +destcap=$(df -P $destdir | awk '{print $4}' | tail -1 ) +[ $destcap -lt $DISKSPACE ] && echo "Insufficient free disk space for target folder $destdir: avail=${destcap}k req=${DISKSPACE}k" && failed 4 + +localcap=$(df -P $tmpfolder | awk '{print $4}' | tail -1 ) +[ $localcap -lt $DISKSPACE ] && echo "Insufficient free disk space for local temporary folder $tmpfolder: avail=${localcap}k req=${DISKSPACE}k" && failed 4 + + +if [[ "$fflag" == "1" ]]; then + sudo cp $tmpltimg $tmplfile + if [[ $? -ne 0 ]]; then + failed 2 "Failed to create temporary file in directory $tmpfolder -- is it read-only or full?\n" + fi +fi + +installrslt=$($(dirname $0)/createtmplt.sh -s 2 -d "SystemVM Template ( $hyper )" -n $localfile -t $destdir/ -f $tmplfile -u -v -c) + +if [[ $? -ne 0 ]]; then + failed 2 "Failed to install system vm template $tmpltimg to $destdir: $installrslt" +fi + +tmpdestdir=$tmpfolder + +if [ "$ext" == "ova" ] +then + tar xvf $tmpdestdir/$localfile -C $tmpdestdir &> /dev/null + sudo cp $tmpdestdir/*.vmdk $tmpdestdir/*.mf $tmpdestdir/*.ovf $destdir/ + rm -rf $tmpdestdir/*.vmdk $tmpdestdir/*.mf $tmpdestdir/*.ovf $tmpdestdir/*.ova +fi + + +tmpltfile=$destdir/$localfile +tmpltsize=$(ls -l $tmpltfile | awk -F" " '{print $5}') +if [[ "$ext" == "qcow2" ]]; then + vrtmpltsize=$($qemuimgcmd info $tmpltfile | grep -i 'virtual size' | sed -ne 's/.*(\([0-9]*\).*/\1/p' | xargs) +else + vrtmpltsize=$tmpltsize +fi + +templateId=${destdir##*/} +sudo touch $destdir/template.properties +echo "$ext=true" >> $tmpdestdir/template.properties +echo "id=$templateId" >> $tmpdestdir/template.properties +echo "public=true" >> $tmpdestdir/template.properties +echo "$ext.filename=$localfile" >> $tmpdestdir/template.properties +echo "uniquename=$uuid" >> $tmpdestdir/template.properties +echo "$ext.virtualsize=$vrtmpltsize" >> $tmpdestdir/template.properties +echo "virtualsize=$vrtmpltsize" >> $tmpdestdir/template.properties +echo "$ext.size=$tmpltsize" >> $tmpdestdir/template.properties + +sudo cp $tmpdestdir/template.properties $destdir/template.properties +if [ -f "$tmpdestdir/template.properties" ] +then + rm -rf $tmpdestdir/template.properties +fi + +echo "Successfully installed system VM template $tmpltimg and template.properties to $destdir" +exit 0 \ No newline at end of file diff --git a/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java b/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java index 31bf7dcdce65..1703a116b6a7 100644 --- a/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java +++ b/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java @@ -1392,7 +1392,6 @@ public boolean finalizeVirtualMachineProfile(VirtualMachineProfile profile, Depl String MsPublicKey = _configDao.getValue("ssh.publickey"); String base64EncodedPublicKey = null; if (MsPublicKey != null) { - // base64EncodedPublicKey = Base64.getEncoder().encodeToString(DBEncryptionUtil.decrypt(MsPrivateKey).getBytes(StandardCharsets.UTF_8)); base64EncodedPublicKey = Base64.getEncoder().encodeToString(MsPublicKey.getBytes(StandardCharsets.UTF_8)); } diff --git a/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java b/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java index 9d617d491ae5..d6f7cfe3c16c 100644 --- a/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java +++ b/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java @@ -1966,7 +1966,6 @@ public boolean finalizeVirtualMachineProfile(final VirtualMachineProfile profile String MsPublicKey = _configDao.getValue("ssh.publickey"); String base64EncodedPublicKey = null; if (MsPublicKey != null) { - // base64EncodedPublicKey = Base64.getEncoder().encodeToString(DBEncryptionUtil.decrypt(MsPublicKey).getBytes(StandardCharsets.UTF_8)); base64EncodedPublicKey = Base64.getEncoder().encodeToString(MsPublicKey.getBytes(StandardCharsets.UTF_8)); } diff --git a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java index b6a14f97ead2..77529f235d72 100644 --- a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java +++ b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java @@ -1089,7 +1089,6 @@ public boolean finalizeVirtualMachineProfile(VirtualMachineProfile profile, Depl String MsPublicKey = _configDao.getValue("ssh.publickey"); String base64EncodedPublicKey = null; if (MsPublicKey != null) { - // base64EncodedPublicKey = Base64.getEncoder().encodeToString(DBEncryptionUtil.decrypt(MsPrivateKey).getBytes(StandardCharsets.UTF_8)); base64EncodedPublicKey = Base64.getEncoder().encodeToString(MsPublicKey.getBytes(StandardCharsets.UTF_8)); } diff --git a/systemvm/pom.xml b/systemvm/pom.xml index f3d3a86ecdc8..0c5406796ce4 100644 --- a/systemvm/pom.xml +++ b/systemvm/pom.xml @@ -138,32 +138,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - org.codehaus.mojo exec-maven-plugin @@ -204,39 +178,6 @@ genisoimage - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - vmware diff --git a/tools/apidoc/build-apidoc.sh b/tools/apidoc/build-apidoc.sh index 4555056a95ce..33123b31f2b2 100755 --- a/tools/apidoc/build-apidoc.sh +++ b/tools/apidoc/build-apidoc.sh @@ -63,17 +63,6 @@ case "$(uname)" in Darwin*) sedi='-i ""' esac -PYTHON=$(which python) -if [ -z "$PYTHON" ] ; then - PYTHON=$(which python2) - if [ -z "$PYTHON" ] ; then - PYTHON=$(which python3) - if [ -z "$PYTHON" ] ; then - echo -e "python not found\n API Docs build failed" - exit 2 - fi - fi -fi set -e (cd "$DISTDIR/xmldoc" cp "$thisdir"/*.java . From c622904c7ecc8655ef25e934d20c4aefc5ad794c Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Wed, 28 Jul 2021 12:16:37 +0530 Subject: [PATCH 058/117] packaging fixes --- packaging/package.sh | 5 - .../vmware/resource/VmwareResource.java | 19 +--- scripts/storage/secondary/createtmplt.sh | 4 +- scripts/storage/secondary/setup-sysvm-tmplt | 3 +- server/conf/cloudstack-sudoers.in | 2 +- systemvm/templateConfig.py | 99 ------------------- 6 files changed, 9 insertions(+), 123 deletions(-) delete mode 100644 systemvm/templateConfig.py diff --git a/packaging/package.sh b/packaging/package.sh index bf4b3d33c1e1..7369165e80b5 100755 --- a/packaging/package.sh +++ b/packaging/package.sh @@ -94,9 +94,6 @@ function packaging() { VERSION=$(cd $PWD/../; $MVN org.apache.maven.plugins:maven-help-plugin:2.1.1:evaluate -Dexpression=project.version | grep --color=none '^[0-9]\.') REALVER=$(echo "$VERSION" | cut -d '-' -f 1) - echo $VERSION - echo $REALVER - if [ -n "$5" ]; then BRAND="${5}." else @@ -110,8 +107,6 @@ function packaging() { fi fi - echo $BASEVER - if echo "$VERSION" | grep -q SNAPSHOT ; then if [ -n "$4" ] ; then DEFREL="-D_rel ${BRAND}${INDICATOR}.$4" diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java index 3e234f2b28d5..7d4e167a73a2 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java @@ -25,7 +25,6 @@ import java.net.ConnectException; import java.net.InetSocketAddress; import java.net.URI; -import java.net.URL; import java.nio.channels.SocketChannel; import java.rmi.RemoteException; import java.util.ArrayList; @@ -273,7 +272,6 @@ import com.cloud.utils.mgmt.PropertyMapDynamicBean; import com.cloud.utils.net.NetUtils; import com.cloud.utils.nicira.nvp.plugin.NiciraNvpApiVersion; -import com.cloud.utils.script.Script; import com.cloud.utils.ssh.SshHelper; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachine.PowerState; @@ -424,8 +422,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa protected static File s_systemVmKeyFile = null; private static final Object s_syncLockObjectFetchKeyFile = new Object(); - protected static final String s_relativePathSystemVmKeyFileInstallDir = "scripts/vm/systemvm/id_rsa.cloud"; - protected static final String s_defaultPathSystemVmKeyFile = "/usr/share/cloudstack-common/scripts/vm/systemvm/id_rsa.cloud"; + protected static final String s_defaultPathSystemVmKeyFile = "/var/cloudstack/management/.ssh/id_rsa"; public Gson getGson() { return _gson; @@ -7040,18 +7037,10 @@ private static void syncFetchSystemVmKeyFile() { } private static File fetchSystemVmKeyFile() { - String filePath = s_relativePathSystemVmKeyFileInstallDir; + String filePath = s_defaultPathSystemVmKeyFile; + File keyFile = new File(filePath); s_logger.debug("Looking for file [" + filePath + "] in the classpath."); - URL url = Script.class.getClassLoader().getResource(filePath); - File keyFile = null; - if (url != null) { - keyFile = new File(url.getPath()); - } - if (keyFile == null || !keyFile.exists()) { - filePath = s_defaultPathSystemVmKeyFile; - keyFile = new File(filePath); - s_logger.debug("Looking for file [" + filePath + "] in the classpath."); - } + if (!keyFile.exists()) { s_logger.error("Unable to locate id_rsa.cloud in your setup at " + keyFile.toString()); } diff --git a/scripts/storage/secondary/createtmplt.sh b/scripts/storage/secondary/createtmplt.sh index 9d6d746dba7e..0de441ca0c16 100755 --- a/scripts/storage/secondary/createtmplt.sh +++ b/scripts/storage/secondary/createtmplt.sh @@ -215,7 +215,7 @@ imgsize=$(ls -l $tmpltimg2| awk -F" " '{print $5}') if [ $cloud ] then create_from_file_user $tmpltfs $tmpltimg2 $tmpltname - tmpltfs=/tmp/tmpSecStorage/ + tmpltfs=/tmp/cloud/templates/ else create_from_file $tmpltfs $tmpltimg2 $tmpltname fi @@ -233,7 +233,7 @@ echo "description=$descr" >> /$tmpltfs/template.properties echo "hvm=$hvm" >> /$tmpltfs/template.properties echo "size=$imgsize" >> /$tmpltfs/template.properties -if [[ "$cleanup" == "true" && $cloud != "true"]] +if [[ "$cleanup" == "true" ]] && [[ $cloud != "true" ]] then rm -f $tmpltimg fi diff --git a/scripts/storage/secondary/setup-sysvm-tmplt b/scripts/storage/secondary/setup-sysvm-tmplt index e0c90f99942e..e8579ba31b39 100644 --- a/scripts/storage/secondary/setup-sysvm-tmplt +++ b/scripts/storage/secondary/setup-sysvm-tmplt @@ -102,7 +102,8 @@ if [[ "$destfiles" != "" ]]; then failed 2 "Data already exists at destination $destdir" fi -tmpfolder=/tmp/tmpSecStorage/ +tmpfolder=/tmp/cloud/templates/ +mkdir -p $tmpfolder tmplfile=$tmpfolder/$localfile sudo touch $tmplfile diff --git a/server/conf/cloudstack-sudoers.in b/server/conf/cloudstack-sudoers.in index 908d2f4733f4..5c879f3303f9 100644 --- a/server/conf/cloudstack-sudoers.in +++ b/server/conf/cloudstack-sudoers.in @@ -18,7 +18,7 @@ # The CloudStack management server needs sudo permissions # without a password. -Cmnd_Alias CLOUDSTACK = /bin/mkdir, /bin/mount, /bin/umount, /bin/cp, /bin/chmod, /usr/bin/keytool, /bin/keytool +Cmnd_Alias CLOUDSTACK = /bin/mkdir, /bin/mount, /bin/umount, /bin/cp, /bin/chmod, /usr/bin/keytool, /bin/keytool, /bin/touch Defaults:@MSUSER@ !requiretty diff --git a/systemvm/templateConfig.py b/systemvm/templateConfig.py deleted file mode 100644 index d0fb590e13d4..000000000000 --- a/systemvm/templateConfig.py +++ /dev/null @@ -1,99 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -import os -import requests -import hashlib -import backports.configparser as configparser -from multiprocessing.pool import ThreadPool - -CS_MAJOR_VERSION=4.16 -CS_MINOR_VERSION=0 -templates = { - "kvm": "https://download.cloudstack.org/systemvm/{0}/systemvmtemplate-{0}.{1}-kvm.qcow2.bz2" - .format(CS_MAJOR_VERSION, CS_MINOR_VERSION), - "vmware": "https://download.cloudstack.org/systemvm/{0}/systemvmtemplate-{0}.{1}-vmware.ova" - .format(CS_MAJOR_VERSION, CS_MINOR_VERSION), - "xenserver": "https://download.cloudstack.org/systemvm/{0}/systemvmtemplate-{0}.{1}-xen.vhd.bz2" - .format(CS_MAJOR_VERSION, CS_MINOR_VERSION), - "hyperv": "https://download.cloudstack.org/systemvm/{0}/systemvmtemplate-{0}.{1}-hyperv.vhd.zip" - .format(CS_MAJOR_VERSION, CS_MINOR_VERSION), - "lxc": "https://download.cloudstack.org/systemvm/{0}/systemvmtemplate-{0}.{1}-kvm.qcow2.bz2" - .format(CS_MAJOR_VERSION, CS_MINOR_VERSION), - "ovm3": "https://download.cloudstack.org/systemvm/{0}/systemvmtemplate-{0}.{1}-ovm.raw.bz2" - .format(CS_MAJOR_VERSION, CS_MINOR_VERSION), -} - -checksums = { - "kvm": "07268f267dc4316dc5f86150346bb8d7", - "vmware": "b356cbbdef67c4eefa8c336328e2b202", - "xenserver": "71d8adb40baa609997acdc3eae15fbde", - "hyperv": "0982aa1461800ce1538e0cae07e00770", - "lxc": "07268f267dc4316dc5f86150346bb8d7", - "ovm3": "8c643d146c82f92843b8a48c7661f800" -} -destination = os.path.dirname(os.path.abspath(__file__)) + '/templates/' -if not os.path.exists(destination): - os.makedirs(destination) - -metadataFile = destination + 'metadata.ini' - - -def downloadSystemvmTemplate(url): - fileName = url.rsplit('/', 1)[1] - fileName = destination + fileName - if (os.path.exists(fileName)): - checksum = hashlib.md5(open(fileName, 'rb').read()).hexdigest() - fileChecksum = checksums[list(templates.keys())[list(templates.values()).index(url)]] - if checksum == fileChecksum: - print('Template ' + url + ' already downloaded') - return - try: - r = requests.get(url, stream=True) - if r.status_code == 200: - with open(fileName, 'wb') as f: - for chunk in r: - f.write(chunk) - except Exception as e: - print(e) - - return fileName - -def downloadTemplates(): - results = ThreadPool(4).imap_unordered(downloadSystemvmTemplate, list(templates.values())) - for path in results: - print(path) - -def createMetadataFile(): - templateFiles = [f for f in os.listdir(destination) if os.path.isfile(os.path.join(destination, f))] - # print(templates) - write_config = configparser.ConfigParser() - for template in templateFiles: - value = filter(lambda temp : template in temp, templates.values()) - if len(value) == 0: - continue - hypervisor = list(templates.keys())[list(templates.values()).index(value[0])] - write_config.add_section(hypervisor) - write_config.set(hypervisor, "templateName", "systemvm-{0}-{1}.{2}".format(hypervisor, CS_MAJOR_VERSION, CS_MINOR_VERSION)) - write_config.set(hypervisor, "checksum", checksums[hypervisor]) - - cfgfile = open(metadataFile, 'w') - write_config.write(cfgfile) - cfgfile.close() - -downloadTemplates() -createMetadataFile() \ No newline at end of file From fd14a87d2ff79c15fc5f049ad0b051bc178debf9 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Thu, 29 Jul 2021 12:55:54 +0530 Subject: [PATCH 059/117] 1. Move download of templates and creation of meta file logic to mvn (systemvm/pom) 2. Remove hard-coded references in the upgrade path - use meta file as input 3. changed path of templates from packaging to systemvm/dist/systemvm-templates --- debian/rules | 3 +- engine/schema/pom.xml | 5 + .../upgrade/SystemVmTemplateRegistration.java | 125 ++++++++++++------ .../upgrade/dao/Upgrade41510to41600.java | 1 + packaging/build-deb.sh | 5 +- packaging/centos7/cloud.spec | 4 +- packaging/centos8/cloud.spec | 3 +- packaging/package.sh | 6 +- packaging/templateConfig.py | 99 -------------- pom.xml | 11 +- systemvm/pom.xml | 90 +++++++++++++ systemvm/templateConfig.py | 102 ++++++++++++++ 12 files changed, 298 insertions(+), 156 deletions(-) delete mode 100644 packaging/templateConfig.py create mode 100644 systemvm/templateConfig.py diff --git a/debian/rules b/debian/rules index ea59db0a20c0..32a9083295a4 100755 --- a/debian/rules +++ b/debian/rules @@ -79,7 +79,8 @@ override_dh_auto_install: cp client/target/conf/* $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/management/ cp client/target/cloud-client-ui-$(VERSION).jar $(DESTDIR)/usr/share/$(PACKAGE)-management/lib/cloudstack-$(VERSION).jar cp client/target/lib/*jar $(DESTDIR)/usr/share/$(PACKAGE)-management/lib/ - cp -r packaging/templates/* $(DESTDIR)/usr/share/$(PACKAGE)-management/templates/ + cp -r systemvm/dist/systemvm-templates/* $(DESTDIR)/usr/share/$(PACKAGE)-management/templates/ + rm -rf $(DESTDIR)/usr/share/$(PACKAGE)-management/templates/md5sum.txt # nast hack for a couple of configuration files mv $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/server/cloudstack-limits.conf $(DESTDIR)/$(SYSCONFDIR)/security/limits.d/ diff --git a/engine/schema/pom.xml b/engine/schema/pom.xml index f5daf7a6e22f..dcb485281358 100644 --- a/engine/schema/pom.xml +++ b/engine/schema/pom.xml @@ -52,5 +52,10 @@ mysql mysql-connector-java + + org.ini4j + ini4j + ${cs.ini.version} + diff --git a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java index 736865b186f4..608383645382 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java @@ -23,6 +23,7 @@ import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.script.Script; import org.apache.log4j.Logger; +import org.ini4j.Ini; import javax.naming.ConfigurationException; import java.io.BufferedReader; @@ -37,6 +38,7 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.util.ArrayList; +import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Locale; @@ -51,6 +53,7 @@ public class SystemVmTemplateRegistration { private static final String umountCommand = "sudo umount %s"; private static final String hashAlgorithm = "MD5"; private static final String templatesPath = "/usr/share/cloudstack-management/templates/"; + private static final String metadataFile = templatesPath + "metadata.ini"; private static final String TEMPORARY_SECONDARY_STORE = "/tmp/tmpSecStorage"; private static final String PARENT_TEMPLATE_FOLDER = TEMPORARY_SECONDARY_STORE; private static final String PARTIAL_TEMPLATE_FOLDER = "/template/tmpl/1/"; @@ -174,16 +177,26 @@ public void setUniqueName(String uniqueName) { } } - public static final Map NewTemplateNameList = new HashMap() { - { - put(Hypervisor.HypervisorType.KVM, "systemvm-kvm-" + CS_MAJOR_VERSION + "." + CS_MINOR_VERSION); - put(Hypervisor.HypervisorType.VMware, "systemvm-vmware-" + CS_MAJOR_VERSION + "." + CS_MINOR_VERSION); - put(Hypervisor.HypervisorType.XenServer, "systemvm-xenserver-" + CS_MAJOR_VERSION + "." + CS_MINOR_VERSION); - put(Hypervisor.HypervisorType.Hyperv, "systemvm-hyperv-" + CS_MAJOR_VERSION + "." + CS_MINOR_VERSION); - put(Hypervisor.HypervisorType.LXC, "systemvm-lxc-" + CS_MAJOR_VERSION + "." + CS_MINOR_VERSION); - put(Hypervisor.HypervisorType.Ovm3, "systemvm-ovm3-" + CS_MAJOR_VERSION + "." + CS_MINOR_VERSION); - } - }; + public static final List hypervisorList = Arrays.asList(Hypervisor.HypervisorType.KVM, + Hypervisor.HypervisorType.VMware, + Hypervisor.HypervisorType.XenServer, + Hypervisor.HypervisorType.Hyperv, + Hypervisor.HypervisorType.LXC, + Hypervisor.HypervisorType.Ovm3 + ); + + public static final Map NewTemplateNameList = new HashMap(); + +// public static final Map NewTemplateNameList = new HashMap() { +// { +// put(Hypervisor.HypervisorType.KVM, "systemvm-kvm-" + CS_MAJOR_VERSION + "." + CS_MINOR_VERSION); +// put(Hypervisor.HypervisorType.VMware, "systemvm-vmware-" + CS_MAJOR_VERSION + "." + CS_MINOR_VERSION); +// put(Hypervisor.HypervisorType.XenServer, "systemvm-xenserver-" + CS_MAJOR_VERSION + "." + CS_MINOR_VERSION); +// put(Hypervisor.HypervisorType.Hyperv, "systemvm-hyperv-" + CS_MAJOR_VERSION + "." + CS_MINOR_VERSION); +// put(Hypervisor.HypervisorType.LXC, "systemvm-lxc-" + CS_MAJOR_VERSION + "." + CS_MINOR_VERSION); +// put(Hypervisor.HypervisorType.Ovm3, "systemvm-ovm3-" + CS_MAJOR_VERSION + "." + CS_MINOR_VERSION); +// } +// }; public static final Map routerTemplateConfigurationNames = new HashMap() { { @@ -196,38 +209,42 @@ public void setUniqueName(String uniqueName) { } }; - public static final Map fileNames = new HashMap() { - { - put(Hypervisor.HypervisorType.KVM, "systemvmtemplate-4.16.0-kvm.qcow2.bz2"); - put(Hypervisor.HypervisorType.VMware, "systemvmtemplate-4.16.0-vmware.ova"); - put(Hypervisor.HypervisorType.XenServer, "systemvmtemplate-4.16.0-xen.vhd.bz2"); - put(Hypervisor.HypervisorType.Hyperv, "systemvmtemplate-4.16.0-hyperv.vhd.zip"); - put(Hypervisor.HypervisorType.LXC, "systemvmtemplate-4.16.0-kvm.qcow2.bz2"); - put(Hypervisor.HypervisorType.Ovm3, "systemvmtemplate-4.16.0-ovm.raw.bz2"); - } - }; - - public static final Map newTemplateUrl = new HashMap() { - { - put(Hypervisor.HypervisorType.KVM, "https://download.cloudstack.org/systemvm/4.16/" + fileNames.get(Hypervisor.HypervisorType.KVM)); - put(Hypervisor.HypervisorType.VMware, "https://download.cloudstack.org/systemvm/4.16/" + fileNames.get(Hypervisor.HypervisorType.VMware)); - put(Hypervisor.HypervisorType.XenServer, "https://download.cloudstack.org/systemvm/4.16/" + fileNames.get(Hypervisor.HypervisorType.XenServer)); - put(Hypervisor.HypervisorType.Hyperv, "https://download.cloudstack.org/systemvm/4.16/" + fileNames.get(Hypervisor.HypervisorType.Hyperv)); - put(Hypervisor.HypervisorType.LXC, "https://download.cloudstack.org/systemvm/4.16/" + fileNames.get(Hypervisor.HypervisorType.LXC)); - put(Hypervisor.HypervisorType.Ovm3, "https://download.cloudstack.org/systemvm/4.16/" + fileNames.get(Hypervisor.HypervisorType.Ovm3)); - } - }; - - public static final Map newTemplateChecksum = new HashMap() { - { - put(Hypervisor.HypervisorType.KVM, "07268f267dc4316dc5f86150346bb8d7"); - put(Hypervisor.HypervisorType.XenServer, "71d8adb40baa609997acdc3eae15fbde"); - put(Hypervisor.HypervisorType.VMware, "b356cbbdef67c4eefa8c336328e2b202"); - put(Hypervisor.HypervisorType.Hyperv, "0982aa1461800ce1538e0cae07e00770"); - put(Hypervisor.HypervisorType.LXC, "07268f267dc4316dc5f86150346bb8d7"); - put(Hypervisor.HypervisorType.Ovm3, "8c643d146c82f92843b8a48c7661f800"); - } - }; + public static final Map fileNames = new HashMap(); +// public static final Map fileNames = new HashMap() { +// { +// put(Hypervisor.HypervisorType.KVM, "systemvmtemplate-4.16.0-kvm.qcow2.bz2"); +// put(Hypervisor.HypervisorType.VMware, "systemvmtemplate-4.16.0-vmware.ova"); +// put(Hypervisor.HypervisorType.XenServer, "systemvmtemplate-4.16.0-xen.vhd.bz2"); +// put(Hypervisor.HypervisorType.Hyperv, "systemvmtemplate-4.16.0-hyperv.vhd.zip"); +// put(Hypervisor.HypervisorType.LXC, "systemvmtemplate-4.16.0-kvm.qcow2.bz2"); +// put(Hypervisor.HypervisorType.Ovm3, "systemvmtemplate-4.16.0-ovm.raw.bz2"); +// } +// }; + + public static final Map newTemplateUrl = new HashMap(); +// public static final Map newTemplateUrl = new HashMap() { +// { +// put(Hypervisor.HypervisorType.KVM, "https://download.cloudstack.org/systemvm/4.16/" + fileNames.get(Hypervisor.HypervisorType.KVM)); +// put(Hypervisor.HypervisorType.VMware, "https://download.cloudstack.org/systemvm/4.16/" + fileNames.get(Hypervisor.HypervisorType.VMware)); +// put(Hypervisor.HypervisorType.XenServer, "https://download.cloudstack.org/systemvm/4.16/" + fileNames.get(Hypervisor.HypervisorType.XenServer)); +// put(Hypervisor.HypervisorType.Hyperv, "https://download.cloudstack.org/systemvm/4.16/" + fileNames.get(Hypervisor.HypervisorType.Hyperv)); +// put(Hypervisor.HypervisorType.LXC, "https://download.cloudstack.org/systemvm/4.16/" + fileNames.get(Hypervisor.HypervisorType.LXC)); +// put(Hypervisor.HypervisorType.Ovm3, "https://download.cloudstack.org/systemvm/4.16/" + fileNames.get(Hypervisor.HypervisorType.Ovm3)); +// } +// }; + + // Read from ini - don't pkg ovs/hyperv - + public static final Map newTemplateChecksum = new HashMap(); +// public static final Map newTemplateChecksum = new HashMap() { +// { +// put(Hypervisor.HypervisorType.KVM, "07268f267dc4316dc5f86150346bb8d7"); +// put(Hypervisor.HypervisorType.XenServer, "71d8adb40baa609997acdc3eae15fbde"); +// put(Hypervisor.HypervisorType.VMware, "b356cbbdef67c4eefa8c336328e2b202"); +// put(Hypervisor.HypervisorType.Hyperv, "0982aa1461800ce1538e0cae07e00770"); +// put(Hypervisor.HypervisorType.LXC, "07268f267dc4316dc5f86150346bb8d7"); +// put(Hypervisor.HypervisorType.Ovm3, "8c643d146c82f92843b8a48c7661f800"); +// } +// }; public static final Map hypervisorGuestOsMap = new HashMap() { { @@ -415,7 +432,7 @@ private static String getTemplateFolder(Connection conn) { } private static List fetchAllHypervisors(Connection conn, Long zoneId) { - List hypervisorList = new ArrayList<>(); + List hypervisorList = new ArrayList<>(); try { PreparedStatement pstmt = conn.prepareStatement(FETCH_DISTINCT_HYPERVISORS_IN_ZONE); if(pstmt != null) { @@ -470,7 +487,7 @@ public static void updateSystemVMEntries(Connection conn, Long templateId, Pair< // update template ID of system Vms try { PreparedStatement update_templ_id_pstmt = conn - .prepareStatement("update `cloud`.`vm_instance` set vm_template_id = ? where type <> 'User' and hypervisor_type = ? and removed is NULL"); + .prepareStatement("update `cloud`.`vm_instance` set vm_template_id = ? where type <> 'User' and hypervisor_type = ? and removed is NULL"); update_templ_id_pstmt.setLong(1, templateId); update_templ_id_pstmt.setString(2, hypervisorAndTemplateName.first().toString()); update_templ_id_pstmt.executeUpdate(); @@ -584,6 +601,25 @@ public static void registerTemplate(Connection conn, Pair hypervisorsInUse) { GlobalLock lock = GlobalLock.getInternLock("UpgradeDatabase-Lock"); try { @@ -639,6 +675,7 @@ public static void registerTemplates(Connection conn, Set hypervisorsListInUse = new HashSet(); try (PreparedStatement pstmt = conn.prepareStatement("select distinct(hypervisor_type) from `cloud`.`cluster` where removed is null"); ResultSet rs = pstmt.executeQuery()) { while (rs.next()) { diff --git a/packaging/build-deb.sh b/packaging/build-deb.sh index 5857ffdd8a4c..11bdf576b2f2 100755 --- a/packaging/build-deb.sh +++ b/packaging/build-deb.sh @@ -155,11 +155,10 @@ fi dch -b -v "${VERSION}~${DISTCODE}" -u low -m "Apache CloudStack Release ${VERSION}" sed -i '0,/ UNRELEASED;/s// unstable;/g' debian/changelog -apt-get install -y wget +apt-get install -y wget python-lxml wget https://bootstrap.pypa.io/pip/2.7/get-pip.py python get-pip.py -pip2 install requests configparser -python templateConfig.py +pip2 install configparser dpkg-checkbuilddeps dpkg-buildpackage -uc -us -b diff --git a/packaging/centos7/cloud.spec b/packaging/centos7/cloud.spec index 7ad8062d7053..698e274a4e3a 100644 --- a/packaging/centos7/cloud.spec +++ b/packaging/centos7/cloud.spec @@ -300,7 +300,9 @@ touch ${RPM_BUILD_ROOT}%{_localstatedir}/run/%{name}-management.pid # SystemVM template mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/ -cp -r packaging/templates/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/ +cp -r systemvm/dist/systemvm-templates/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/ +rm -rf ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/md5sum.txt + # UI mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/ui diff --git a/packaging/centos8/cloud.spec b/packaging/centos8/cloud.spec index 2c2bc71a42ce..abedbc45cffa 100644 --- a/packaging/centos8/cloud.spec +++ b/packaging/centos8/cloud.spec @@ -293,7 +293,8 @@ touch ${RPM_BUILD_ROOT}%{_localstatedir}/run/%{name}-management.pid # SystemVM template mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/ -cp -r packaging/templates/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/ +cp -r systemvm/dist/systemvm-templates/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/ +rm -rf ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/md5sum.txt # UI mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/ui diff --git a/packaging/package.sh b/packaging/package.sh index 7369165e80b5..02cde646f605 100755 --- a/packaging/package.sh +++ b/packaging/package.sh @@ -79,8 +79,10 @@ function packaging() { DISTRO=$3 - pip3 install requests configparser - python3 ./templateConfig.py + yum install -y python3-lxml + pip3 install configparser + pip3 list installed | grep configparser + ls -lrt /usr/local/lib/python3.6/site-packages/backports MVN=$(which mvn) if [ -z "$MVN" ] ; then diff --git a/packaging/templateConfig.py b/packaging/templateConfig.py deleted file mode 100644 index 6179133cc9c3..000000000000 --- a/packaging/templateConfig.py +++ /dev/null @@ -1,99 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -import os -import requests -import hashlib -import backports.configparser as configparser -from multiprocessing.pool import ThreadPool - -CS_MAJOR_VERSION=4.16 -CS_MINOR_VERSION=0 -templates = { - "kvm": "https://download.cloudstack.org/systemvm/{0}/systemvmtemplate-{0}.{1}-kvm.qcow2.bz2" - .format(CS_MAJOR_VERSION, CS_MINOR_VERSION), - "vmware": "https://download.cloudstack.org/systemvm/{0}/systemvmtemplate-{0}.{1}-vmware.ova" - .format(CS_MAJOR_VERSION, CS_MINOR_VERSION), - "xenserver": "https://download.cloudstack.org/systemvm/{0}/systemvmtemplate-{0}.{1}-xen.vhd.bz2" - .format(CS_MAJOR_VERSION, CS_MINOR_VERSION), - "hyperv": "https://download.cloudstack.org/systemvm/{0}/systemvmtemplate-{0}.{1}-hyperv.vhd.zip" - .format(CS_MAJOR_VERSION, CS_MINOR_VERSION), - "lxc": "https://download.cloudstack.org/systemvm/{0}/systemvmtemplate-{0}.{1}-kvm.qcow2.bz2" - .format(CS_MAJOR_VERSION, CS_MINOR_VERSION), - "ovm3": "https://download.cloudstack.org/systemvm/{0}/systemvmtemplate-{0}.{1}-ovm.raw.bz2" - .format(CS_MAJOR_VERSION, CS_MINOR_VERSION), -} - -checksums = { - "kvm": "07268f267dc4316dc5f86150346bb8d7", - "vmware": "b356cbbdef67c4eefa8c336328e2b202", - "xenserver": "71d8adb40baa609997acdc3eae15fbde", - "hyperv": "0982aa1461800ce1538e0cae07e00770", - "lxc": "07268f267dc4316dc5f86150346bb8d7", - "ovm3": "8c643d146c82f92843b8a48c7661f800" -} -destination = os.path.dirname(os.path.abspath(__file__)) + '/templates/' -if not os.path.exists(destination): - os.makedirs(destination) - -metadataFile = destination + 'metadata.ini' - - -def downloadSystemvmTemplate(url): - fileName = url.rsplit('/', 1)[1] - fileName = destination + fileName - if (os.path.exists(fileName)): - checksum = hashlib.md5(open(fileName, 'rb').read()).hexdigest() - fileChecksum = checksums[list(templates.keys())[list(templates.values()).index(url)]] - if checksum == fileChecksum: - print('Template ' + url + ' already downloaded') - return - try: - r = requests.get(url, stream=True) - if r.status_code == 200: - with open(fileName, 'wb') as f: - for chunk in r: - f.write(chunk) - except Exception as e: - print(e) - - return fileName - -def downloadTemplates(): - results = ThreadPool(4).imap_unordered(downloadSystemvmTemplate, list(templates.values())) - for path in results: - print(path) - -def createMetadataFile(): - templateFiles = [f for f in os.listdir(destination) if os.path.isfile(os.path.join(destination, f))] - # print(templates) - write_config = configparser.ConfigParser() - for template in templateFiles: - value = list(filter(lambda temp : template in temp, templates.values())) - if len(value) == 0: - continue - hypervisor = list(templates.keys())[list(templates.values()).index(value[0])] - write_config.add_section(hypervisor) - write_config.set(hypervisor, "templateName", "systemvm-{0}-{1}.{2}".format(hypervisor, CS_MAJOR_VERSION, CS_MINOR_VERSION)) - write_config.set(hypervisor, "checksum", checksums[hypervisor]) - - cfgfile = open(metadataFile, 'w') - write_config.write(cfgfile) - cfgfile.close() - -downloadTemplates() -createMetadataFile() diff --git a/pom.xml b/pom.xml index 291005217798..8c2dbe703461 100644 --- a/pom.xml +++ b/pom.xml @@ -17,7 +17,7 @@ under the License. --> + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd"> 4.0.0 @@ -172,6 +172,7 @@ 3.1.3 1.4.15 5.3.3 + 0.5.4 @@ -1040,10 +1041,10 @@ 128m 512m - -XDignore.symbol.file=true - --add-opens=java.base/java.lang=ALL-UNNAMED - --add-exports=java.base/sun.security.x509=ALL-UNNAMED - --add-exports=java.base/sun.security.provider=ALL-UNNAMED + -XDignore.symbol.file=true + --add-opens=java.base/java.lang=ALL-UNNAMED + --add-exports=java.base/sun.security.x509=ALL-UNNAMED + --add-exports=java.base/sun.security.provider=ALL-UNNAMED diff --git a/systemvm/pom.xml b/systemvm/pom.xml index 0c5406796ce4..c4c19212ae24 100644 --- a/systemvm/pom.xml +++ b/systemvm/pom.xml @@ -198,6 +198,96 @@ + + template-create + + + noredist + + + + + + com.googlecode.maven-download-plugin + download-maven-plugin + 1.6.3 + + + download-kvm-template + + wget + + + https://download.cloudstack.org/systemvm/4.16/systemvmtemplate-4.16.0-kvm.qcow2.bz2 + ${basedir}/dist/systemvm-templates/ + + + + download-vmware-template + + wget + + + https://download.cloudstack.org/systemvm/4.16/systemvmtemplate-4.16.0-vmware.ova + ${basedir}/dist/systemvm-templates/ + + + + download-xenserver-template + + wget + + + https://download.cloudstack.org/systemvm/4.16/systemvmtemplate-4.16.0-xen.vhd.bz2 + ${basedir}/dist/systemvm-templates/ + + + + download-checksums + + wget + + + https://download.cloudstack.org/systemvm/4.16/md5sum.txt + ${basedir}/dist/systemvm-templates/ + + + + download-tgz-sha512sum + + wget + + + https://download.cloudstack.org/systemvm/4.16/systemvmtemplate-4.16.0-vmware.ova + ${basedir}/dist/systemvm-templates/ + + + + + + org.codehaus.mojo + exec-maven-plugin + 1.2.1 + + + systemvm-templpate-metadata + package + + exec + + + ${basedir}/ + python3 + + templateConfig.py + + + + + + + + quickcloud diff --git a/systemvm/templateConfig.py b/systemvm/templateConfig.py new file mode 100644 index 000000000000..7e3cdf86c3a8 --- /dev/null +++ b/systemvm/templateConfig.py @@ -0,0 +1,102 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import os +from lxml import etree +import backports.configparser as configparser + +ns = {"ns" : "http://maven.apache.org/POM/4.0.0"} +doc=etree.parse("./pom.xml") + +def getCloudstackVersion(): + table = doc.xpath('.//ns:parent', namespaces=ns) + version="" + try: + for df in table: + versionTag = df.find('.//ns:version', ns) + if versionTag is not None: + version = versionTag.text + break + splitVersion=version.split("-SNAPSHOT")[0].split('.') + major='.'.join(splitVersion[0:2]) + minor=splitVersion[2] + return major,minor + + except Exception as e: + raise Exception("Failed to fetch cloudstack version") + +def getGenericName(hypervisor): + if hypervisor.lower() == "ovm3": + return "ovm" + if hypervisor.lower() == "lxc": + return "kvm" + if hypervisor.lower() == "xenserver": + return "xen" + else: + return hypervisor + +def fetchChecksum(checksumData, hypervisor): + for line in checksumData: + hypervisor = getGenericName(hypervisor) + if hypervisor in line: + print(type(line.split(" ")[0])) + return str(line.split(" ")[0]) + +def createMetadataFile(): + write_config = configparser.ConfigParser() + with open(sourceFile, "r") as md5sumFile: + checksumData = md5sumFile.readlines() + + for hypervisor in templates.keys(): + write_config.add_section(hypervisor) + write_config.set(hypervisor, "templatename", "systemvm-{0}-{1}.{2}".format(hypervisor, CS_MAJOR_VERSION, CS_MINOR_VERSION)) + checksum=fetchChecksum(checksumData, hypervisor) + write_config.set(hypervisor, "checksum", str(checksum)) + downloadUrl=templates.get(hypervisor).format(CS_MAJOR_VERSION, CS_MINOR_VERSION) + write_config.set(hypervisor, "downloadurl", str(downloadUrl)) + write_config.set(hypervisor, "filename", str(downloadUrl.split('/')[-1])) + + cfgfile = open(metadataFile, 'w') + write_config.write(cfgfile) + cfgfile.close() + + +CS_MAJOR_VERSION,CS_MINOR_VERSION=getCloudstackVersion() +templates = { + "kvm": "https://download.cloudstack.org/systemvm/{0}/systemvmtemplate-{0}.{1}-kvm.qcow2.bz2" + .format(CS_MAJOR_VERSION, CS_MINOR_VERSION), + "vmware": "https://download.cloudstack.org/systemvm/{0}/systemvmtemplate-{0}.{1}-vmware.ova" + .format(CS_MAJOR_VERSION, CS_MINOR_VERSION), + "xenserver": "https://download.cloudstack.org/systemvm/{0}/systemvmtemplate-{0}.{1}-xen.vhd.bz2" + .format(CS_MAJOR_VERSION, CS_MINOR_VERSION), + "hyperv": "https://download.cloudstack.org/systemvm/{0}/systemvmtemplate-{0}.{1}-hyperv.vhd.zip" + .format(CS_MAJOR_VERSION, CS_MINOR_VERSION), + "lxc": "https://download.cloudstack.org/systemvm/{0}/systemvmtemplate-{0}.{1}-kvm.qcow2.bz2" + .format(CS_MAJOR_VERSION, CS_MINOR_VERSION), + "ovm3": "https://download.cloudstack.org/systemvm/{0}/systemvmtemplate-{0}.{1}-ovm.raw.bz2" + .format(CS_MAJOR_VERSION, CS_MINOR_VERSION), +} + +parentPath = os.path.dirname(os.path.abspath(__file__)) + '/dist/systemvm-templates/' +if not os.path.exists(parentPath): + os.makedirs(parentPath) +metadataFile = parentPath + 'metadata.ini' +sourceFile = parentPath + 'md5sum.txt' + +createMetadataFile() + + From 9787f140241f6519d2f2e2623bcad39a16bd96cc Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Fri, 30 Jul 2021 15:13:53 +0530 Subject: [PATCH 060/117] fix rpm build --- packaging/package.sh | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/packaging/package.sh b/packaging/package.sh index 02cde646f605..ffc452fc9e9f 100755 --- a/packaging/package.sh +++ b/packaging/package.sh @@ -80,9 +80,7 @@ function packaging() { DISTRO=$3 yum install -y python3-lxml - pip3 install configparser - pip3 list installed | grep configparser - ls -lrt /usr/local/lib/python3.6/site-packages/backports + pip3 install --user configparser MVN=$(which mvn) if [ -z "$MVN" ] ; then From 05aa40ca2e17bb699084fb5c04e6130272cd6f91 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Fri, 30 Jul 2021 15:23:33 +0530 Subject: [PATCH 061/117] fix deb rules --- debian/rules | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/debian/rules b/debian/rules index 32a9083295a4..f7dae7a60390 100755 --- a/debian/rules +++ b/debian/rules @@ -80,7 +80,7 @@ override_dh_auto_install: cp client/target/cloud-client-ui-$(VERSION).jar $(DESTDIR)/usr/share/$(PACKAGE)-management/lib/cloudstack-$(VERSION).jar cp client/target/lib/*jar $(DESTDIR)/usr/share/$(PACKAGE)-management/lib/ cp -r systemvm/dist/systemvm-templates/* $(DESTDIR)/usr/share/$(PACKAGE)-management/templates/ - rm -rf $(DESTDIR)/usr/share/$(PACKAGE)-management/templates/md5sum.txt + rm -rf $(DESTDIR)/usr/share/$(PACKAGE)-management/templates/md5sum.txt # nast hack for a couple of configuration files mv $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/server/cloudstack-limits.conf $(DESTDIR)/$(SYSCONFDIR)/security/limits.d/ From 6ba24bf502d488001b1c603f4e34b32cacc900ed Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Fri, 30 Jul 2021 15:23:33 +0530 Subject: [PATCH 062/117] fix deb rules --- debian/rules | 2 +- packaging/build-deb.sh | 6 ++---- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/debian/rules b/debian/rules index 32a9083295a4..f7dae7a60390 100755 --- a/debian/rules +++ b/debian/rules @@ -80,7 +80,7 @@ override_dh_auto_install: cp client/target/cloud-client-ui-$(VERSION).jar $(DESTDIR)/usr/share/$(PACKAGE)-management/lib/cloudstack-$(VERSION).jar cp client/target/lib/*jar $(DESTDIR)/usr/share/$(PACKAGE)-management/lib/ cp -r systemvm/dist/systemvm-templates/* $(DESTDIR)/usr/share/$(PACKAGE)-management/templates/ - rm -rf $(DESTDIR)/usr/share/$(PACKAGE)-management/templates/md5sum.txt + rm -rf $(DESTDIR)/usr/share/$(PACKAGE)-management/templates/md5sum.txt # nast hack for a couple of configuration files mv $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/server/cloudstack-limits.conf $(DESTDIR)/$(SYSCONFDIR)/security/limits.d/ diff --git a/packaging/build-deb.sh b/packaging/build-deb.sh index 11bdf576b2f2..9ff2f2cca24d 100755 --- a/packaging/build-deb.sh +++ b/packaging/build-deb.sh @@ -155,10 +155,8 @@ fi dch -b -v "${VERSION}~${DISTCODE}" -u low -m "Apache CloudStack Release ${VERSION}" sed -i '0,/ UNRELEASED;/s// unstable;/g' debian/changelog -apt-get install -y wget python-lxml -wget https://bootstrap.pypa.io/pip/2.7/get-pip.py -python get-pip.py -pip2 install configparser +apt-get install -y wget python3-lxml python3-pip +pip3 install configparser dpkg-checkbuilddeps dpkg-buildpackage -uc -us -b From 8677d4658306508f0948b0aa656e7bbde8935d2f Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Fri, 30 Jul 2021 15:23:33 +0530 Subject: [PATCH 063/117] fix deb rules --- packaging/build-deb.sh | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/packaging/build-deb.sh b/packaging/build-deb.sh index 11bdf576b2f2..9ff2f2cca24d 100755 --- a/packaging/build-deb.sh +++ b/packaging/build-deb.sh @@ -155,10 +155,8 @@ fi dch -b -v "${VERSION}~${DISTCODE}" -u low -m "Apache CloudStack Release ${VERSION}" sed -i '0,/ UNRELEASED;/s// unstable;/g' debian/changelog -apt-get install -y wget python-lxml -wget https://bootstrap.pypa.io/pip/2.7/get-pip.py -python get-pip.py -pip2 install configparser +apt-get install -y wget python3-lxml python3-pip +pip3 install configparser dpkg-checkbuilddeps dpkg-buildpackage -uc -us -b From 8718b9c607edfcc4998508502c80e4bdf40a6a4a Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Fri, 30 Jul 2021 17:07:25 +0530 Subject: [PATCH 064/117] test --- scripts/storage/secondary/cloud-install-sys-tmplt | 2 +- scripts/storage/secondary/createtmplt.sh | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/storage/secondary/cloud-install-sys-tmplt b/scripts/storage/secondary/cloud-install-sys-tmplt index cc4435475d15..b11030d92560 100755 --- a/scripts/storage/secondary/cloud-install-sys-tmplt +++ b/scripts/storage/secondary/cloud-install-sys-tmplt @@ -16,7 +16,7 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. - +set -x usage() { printf "\nUsage: %s:\n\t-m secondary storage mount point\n\t-f system vm template file\n\t-h hypervisor name: kvm|vmware|xenserver|hyperv|ovm3\n\t-s mgmt server secret key, if you specified any when running cloudstack-setup-database, default is password\n\t-u Url to system vm template\n\t-F clean up system templates of specified hypervisor\n\t-e Template suffix, e.g vhd, ova, qcow2\n\t-o Database server hostname or ip, e.g localhost\n\t-r Database user name, e.g root\n\t-p mysql database port\n\t-d Database password. Followed by nothing if the password is empty\n\n" $(basename $0) >&2 diff --git a/scripts/storage/secondary/createtmplt.sh b/scripts/storage/secondary/createtmplt.sh index 0de441ca0c16..a143580b000e 100755 --- a/scripts/storage/secondary/createtmplt.sh +++ b/scripts/storage/secondary/createtmplt.sh @@ -15,7 +15,7 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. - +set -x # $Id: createtmplt.sh 9132 2010-06-04 20:17:43Z manuel $ $HeadURL: svn://svn.lab.vmops.com/repos/vmdev/java/scripts/storage/secondary/createtmplt.sh $ @@ -212,7 +212,7 @@ fi imgsize=$(ls -l $tmpltimg2| awk -F" " '{print $5}') -if [ $cloud ] +if [ "$cloud" == "true" ] then create_from_file_user $tmpltfs $tmpltimg2 $tmpltname tmpltfs=/tmp/cloud/templates/ From a6b23715fefe0acfeec42e81de1ceec1194031c4 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Fri, 30 Jul 2021 18:37:38 +0530 Subject: [PATCH 065/117] cleanup --- .../upgrade/SystemVmTemplateRegistration.java | 53 ++----------------- 1 file changed, 4 insertions(+), 49 deletions(-) diff --git a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java index 608383645382..639bc89c0095 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java @@ -186,18 +186,10 @@ public void setUniqueName(String uniqueName) { ); public static final Map NewTemplateNameList = new HashMap(); - -// public static final Map NewTemplateNameList = new HashMap() { -// { -// put(Hypervisor.HypervisorType.KVM, "systemvm-kvm-" + CS_MAJOR_VERSION + "." + CS_MINOR_VERSION); -// put(Hypervisor.HypervisorType.VMware, "systemvm-vmware-" + CS_MAJOR_VERSION + "." + CS_MINOR_VERSION); -// put(Hypervisor.HypervisorType.XenServer, "systemvm-xenserver-" + CS_MAJOR_VERSION + "." + CS_MINOR_VERSION); -// put(Hypervisor.HypervisorType.Hyperv, "systemvm-hyperv-" + CS_MAJOR_VERSION + "." + CS_MINOR_VERSION); -// put(Hypervisor.HypervisorType.LXC, "systemvm-lxc-" + CS_MAJOR_VERSION + "." + CS_MINOR_VERSION); -// put(Hypervisor.HypervisorType.Ovm3, "systemvm-ovm3-" + CS_MAJOR_VERSION + "." + CS_MINOR_VERSION); -// } -// }; - + public static final Map fileNames = new HashMap(); + public static final Map newTemplateUrl = new HashMap(); + public static final Map newTemplateChecksum = new HashMap(); + public static final Map routerTemplateConfigurationNames = new HashMap() { { put(Hypervisor.HypervisorType.KVM, "router.template.kvm"); @@ -209,43 +201,6 @@ public void setUniqueName(String uniqueName) { } }; - public static final Map fileNames = new HashMap(); -// public static final Map fileNames = new HashMap() { -// { -// put(Hypervisor.HypervisorType.KVM, "systemvmtemplate-4.16.0-kvm.qcow2.bz2"); -// put(Hypervisor.HypervisorType.VMware, "systemvmtemplate-4.16.0-vmware.ova"); -// put(Hypervisor.HypervisorType.XenServer, "systemvmtemplate-4.16.0-xen.vhd.bz2"); -// put(Hypervisor.HypervisorType.Hyperv, "systemvmtemplate-4.16.0-hyperv.vhd.zip"); -// put(Hypervisor.HypervisorType.LXC, "systemvmtemplate-4.16.0-kvm.qcow2.bz2"); -// put(Hypervisor.HypervisorType.Ovm3, "systemvmtemplate-4.16.0-ovm.raw.bz2"); -// } -// }; - - public static final Map newTemplateUrl = new HashMap(); -// public static final Map newTemplateUrl = new HashMap() { -// { -// put(Hypervisor.HypervisorType.KVM, "https://download.cloudstack.org/systemvm/4.16/" + fileNames.get(Hypervisor.HypervisorType.KVM)); -// put(Hypervisor.HypervisorType.VMware, "https://download.cloudstack.org/systemvm/4.16/" + fileNames.get(Hypervisor.HypervisorType.VMware)); -// put(Hypervisor.HypervisorType.XenServer, "https://download.cloudstack.org/systemvm/4.16/" + fileNames.get(Hypervisor.HypervisorType.XenServer)); -// put(Hypervisor.HypervisorType.Hyperv, "https://download.cloudstack.org/systemvm/4.16/" + fileNames.get(Hypervisor.HypervisorType.Hyperv)); -// put(Hypervisor.HypervisorType.LXC, "https://download.cloudstack.org/systemvm/4.16/" + fileNames.get(Hypervisor.HypervisorType.LXC)); -// put(Hypervisor.HypervisorType.Ovm3, "https://download.cloudstack.org/systemvm/4.16/" + fileNames.get(Hypervisor.HypervisorType.Ovm3)); -// } -// }; - - // Read from ini - don't pkg ovs/hyperv - - public static final Map newTemplateChecksum = new HashMap(); -// public static final Map newTemplateChecksum = new HashMap() { -// { -// put(Hypervisor.HypervisorType.KVM, "07268f267dc4316dc5f86150346bb8d7"); -// put(Hypervisor.HypervisorType.XenServer, "71d8adb40baa609997acdc3eae15fbde"); -// put(Hypervisor.HypervisorType.VMware, "b356cbbdef67c4eefa8c336328e2b202"); -// put(Hypervisor.HypervisorType.Hyperv, "0982aa1461800ce1538e0cae07e00770"); -// put(Hypervisor.HypervisorType.LXC, "07268f267dc4316dc5f86150346bb8d7"); -// put(Hypervisor.HypervisorType.Ovm3, "8c643d146c82f92843b8a48c7661f800"); -// } -// }; - public static final Map hypervisorGuestOsMap = new HashMap() { { put(Hypervisor.HypervisorType.KVM, 15); From 390c14b8fafd9f05c6626c211ff64c4434943de8 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Mon, 2 Aug 2021 11:55:39 +0530 Subject: [PATCH 066/117] update injectkeys.sh script to only copy private key to hosts and update pom xml to adapt for noredis noredist vs other builds --- scripts/vm/systemvm/injectkeys.sh | 48 ++--------- .../cloud/server/ConfigurationServerImpl.java | 17 ++-- systemvm/pom.xml | 79 +++++++++---------- 3 files changed, 50 insertions(+), 94 deletions(-) diff --git a/scripts/vm/systemvm/injectkeys.sh b/scripts/vm/systemvm/injectkeys.sh index 6f006ea130ef..c05d232c0e7c 100755 --- a/scripts/vm/systemvm/injectkeys.sh +++ b/scripts/vm/systemvm/injectkeys.sh @@ -18,8 +18,7 @@ # Copies keys that enable SSH communication with system vms -# $1 = new public key -# $2 = new private key +# $1 = new private key #set -x set -e @@ -33,34 +32,6 @@ clean_up() { $SUDO umount $MOUNTPATH } -inject_into_iso() { - local isofile=${systemvmpath} - local newpubkey=$2 - local backup=${isofile}.bak - local tmpiso=${TMP}/$1 - mkdir -p $MOUNTPATH - [ ! -f $isofile ] && echo "$(basename $0): Could not find systemvm iso patch file $isofile" && return 1 - $SUDO mount -o loop $isofile $MOUNTPATH - [ $? -ne 0 ] && echo "$(basename $0): Failed to mount original iso $isofile" && clean_up && return 1 - diff -q $MOUNTPATH/authorized_keys $newpubkey &> /dev/null && echo "New public key is the same as the one in the systemvm.iso, not injecting it, not modifying systemvm.iso" && clean_up && return 0 - $SUDO cp -b $isofile $backup - [ $? -ne 0 ] && echo "$(basename $0): Failed to backup original iso $isofile" && clean_up && return 1 - rm -rf $TMPDIR - mkdir -p $TMPDIR - [ ! -d $TMPDIR ] && echo "$(basename $0): Could not find/create temporary dir $TMPDIR" && clean_up && return 1 - $SUDO cp -fr $MOUNTPATH/* $TMPDIR/ - [ $? -ne 0 ] && echo "$(basename $0): Failed to copy from original iso $isofile" && clean_up && return 1 - $SUDO cp $newpubkey $TMPDIR/authorized_keys - [ $? -ne 0 ] && echo "$(basename $0): Failed to copy key $newpubkey from original iso to new iso " && clean_up && return 1 - mkisofs -quiet -r -o $tmpiso $TMPDIR - [ $? -ne 0 ] && echo "$(basename $0): Failed to create new iso $tmpiso from $TMPDIR" && clean_up && return 1 - $SUDO umount $MOUNTPATH - [ $? -ne 0 ] && echo "$(basename $0): Failed to unmount old iso from $MOUNTPATH" && return 1 - $SUDO cp -f $tmpiso $isofile - [ $? -ne 0 ] && echo "$(basename $0): Failed to overwrite old iso $isofile with $tmpiso" && return 1 - rm -rf $TMPDIR -} - copy_priv_key() { local newprivkey=$1 diff -q $newprivkey $(dirname $0)/id_rsa.cloud && return 0 @@ -76,28 +47,19 @@ fi $SUDO mkdir -p $MOUNTPATH -[ $# -ne 3 ] && echo "Usage: $(basename $0) " && exit 3 -newpubkey=$1 -newprivkey=$2 -systemvmpath=$3 -[ ! -f $newpubkey ] && echo "$(basename $0): Could not open $newpubkey" && exit 3 +[ $# -ne 1 ] && echo "Usage: $(basename $0) " && exit 3 +newprivkey=$1 [ ! -f $newprivkey ] && echo "$(basename $0): Could not open $newprivkey" && exit 3 -command -v mkisofs > /dev/null || (echo "$(basename $0): mkisofs not found, please install or ensure PATH is accurate" ; exit 4) - # if running into Docker as unprivileges, skip ssh verification as iso cannot be mounted due to missing loop device. if [ -f /.dockerenv ]; then if [ -e /dev/loop0 ]; then # it's a docker instance with privileges. - inject_into_iso systemvm.iso $newpubkey - [ $? -ne 0 ] && exit 5 copy_priv_key $newprivkey else - # this mean it's a docker instance, ssh key cannot be verify. - echo "We run inside Docker, skipping ssh key insertion in systemvm.iso" + # this mean it's a docker instance, ssh key cannot be verified. + echo "We run inside Docker, skipping copying private key" fi else - inject_into_iso systemvm.iso $newpubkey - [ $? -ne 0 ] && exit 5 copy_priv_key $newprivkey fi diff --git a/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java b/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java index 5557a2bfb95d..bc5595dbd42d 100644 --- a/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java +++ b/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java @@ -669,7 +669,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } s_logger.info("Going to update systemvm iso with generated keypairs if needed"); try { - // injectSshKeysIntoSystemVmIsoPatch(pubkeyfile.getAbsolutePath(), privkeyfile.getAbsolutePath()); + copyPrivateKeyToHosts(pubkeyfile.getAbsolutePath(), privkeyfile.getAbsolutePath()); } catch (CloudRuntimeException e) { if (!devel) { throw new CloudRuntimeException(e.getMessage()); @@ -738,8 +738,8 @@ private void updateKeyPairsOnDisk(String homeDir) { } } - protected void injectSshKeysIntoSystemVmIsoPatch(String publicKeyPath, String privKeyPath) { - s_logger.info("Trying to inject public and private keys into systemvm iso"); + protected void copyPrivateKeyToHosts(String publicKeyPath, String privKeyPath) { + s_logger.info("Trying to copy private keys to hosts"); String injectScript = getInjectScript(); String scriptPath = Script.findScript("", injectScript); String systemVmIsoPath = Script.findScript("", "vms/systemvm.iso"); @@ -757,16 +757,13 @@ protected void injectSshKeysIntoSystemVmIsoPatch(String publicKeyPath, String pr } if (isOnWindows()) { scriptPath = scriptPath.replaceAll("\\\\" ,"/" ); - systemVmIsoPath = systemVmIsoPath.replaceAll("\\\\" ,"/" ); - publicKeyPath = publicKeyPath.replaceAll("\\\\" ,"/" ); + //systemVmIsoPath = systemVmIsoPath.replaceAll("\\\\" ,"/" ); + //publicKeyPath = publicKeyPath.replaceAll("\\\\" ,"/" ); privKeyPath = privKeyPath.replaceAll("\\\\" ,"/" ); } -// command.add(scriptPath); -// command.add(publicKeyPath); -// command.add(privKeyPath); -// command.add(systemVmIsoPath); - + command.add(scriptPath); + command.add(privKeyPath); final String result = command.execute(); s_logger.info("The script injectkeys.sh was run with result : " + result); if (result != null) { diff --git a/systemvm/pom.xml b/systemvm/pom.xml index c4c19212ae24..4bd317c1af37 100644 --- a/systemvm/pom.xml +++ b/systemvm/pom.xml @@ -163,6 +163,44 @@ + + com.googlecode.maven-download-plugin + download-maven-plugin + 1.6.3 + + + download-checksums + + wget + + + https://download.cloudstack.org/systemvm/4.16/md5sum.txt + ${basedir}/dist/systemvm-templates/ + + + + + + org.codehaus.mojo + exec-maven-plugin + 1.2.1 + + + systemvm-template-metadata + package + + exec + + + ${basedir}/ + python3 + + templateConfig.py + + + + + @@ -242,47 +280,6 @@ ${basedir}/dist/systemvm-templates/ - - download-checksums - - wget - - - https://download.cloudstack.org/systemvm/4.16/md5sum.txt - ${basedir}/dist/systemvm-templates/ - - - - download-tgz-sha512sum - - wget - - - https://download.cloudstack.org/systemvm/4.16/systemvmtemplate-4.16.0-vmware.ova - ${basedir}/dist/systemvm-templates/ - - - - - - org.codehaus.mojo - exec-maven-plugin - 1.2.1 - - - systemvm-templpate-metadata - package - - exec - - - ${basedir}/ - python3 - - templateConfig.py - - - From 6038c60911cc181b1264d7beec94459fb6daca21 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Mon, 2 Aug 2021 15:21:55 +0530 Subject: [PATCH 067/117] Fix travis failure --- .../upgrade/SystemVmTemplateRegistration.java | 30 +++++++++++++++++-- tools/travis/before_install.sh | 4 +-- 2 files changed, 29 insertions(+), 5 deletions(-) diff --git a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java index a0ddd9d5362a..70a6e2475670 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java @@ -52,8 +52,11 @@ public class SystemVmTemplateRegistration { private static final String mountCommand = "sudo mount -t nfs %s %s"; private static final String umountCommand = "sudo umount %s"; private static final String hashAlgorithm = "MD5"; - private static final String templatesPath = "/usr/share/cloudstack-management/templates/"; - private static final String metadataFile = templatesPath + "metadata.ini"; + private static final String relativeTemplatePath = "./systemvm/dist/systemvm-templates/"; + private static final String AbsolutetemplatesPath = "/usr/share/cloudstack-management/templates/"; + private static final String templatesPath = fetchTemplatesPath(); + private static final String metadataFileName = "metadata.ini"; + private static final String metadataFile = templatesPath + metadataFileName; private static final String TEMPORARY_SECONDARY_STORE = "/tmp/tmpSecStorage"; private static final String PARENT_TEMPLATE_FOLDER = TEMPORARY_SECONDARY_STORE; private static final String PARTIAL_TEMPLATE_FOLDER = "/template/tmpl/1/"; @@ -286,6 +289,27 @@ static long isTemplateAlreadyRegistered(Connection conn, Pair /tmp/piplog - python2 -m pip install --user --upgrade urllib3 lxml paramiko nose texttable ipmisim pyopenssl pycrypto mock flask netaddr pylint pycodestyle six astroid >> /tmp/piplog + python3 -m pip install --user --upgrade urllib3 lxml paramiko nose texttable ipmisim pyopenssl pycrypto mock flask netaddr pylint pycodestyle six astroid configparser > /tmp/piplog + python2 -m pip install --user --upgrade urllib3 lxml paramiko nose texttable ipmisim pyopenssl pycrypto mock flask netaddr pylint pycodestyle six astroid configparser >> /tmp/piplog if [[ $? -eq 0 ]]; then echo -e "\npython packages installed successfully" break; From 82620a7d271187f927b68d4991da7ddd28c2e5ca Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Mon, 2 Aug 2021 18:33:59 +0530 Subject: [PATCH 068/117] Update registration workflow during upgrade --- .../upgrade/SystemVmTemplateRegistration.java | 205 ++++++++++++++---- 1 file changed, 157 insertions(+), 48 deletions(-) diff --git a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java index 70a6e2475670..d1e5cd514a6c 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java @@ -64,11 +64,16 @@ public class SystemVmTemplateRegistration { private static final String FETCH_DISTINCT_ELIGIBLE_ZONES = "SELECT DISTINCT(data_center_id) FROM `cloud`.`image_store` WHERE protocol = \"nfs\" AND removed is null"; private static final String FETCH_DISTINCT_HYPERVISORS_IN_ZONE = "SELECT DISTINCT(hypervisor_type) FROM `cloud`.`cluster` where removed is null and data_center_id=?"; private static final String FETCH_IMAGE_STORE_PER_ZONE = "SELECT url,id FROM `cloud`.`image_store` WHERE data_center_id=? AND removed IS NULL LIMIT 1"; - private static final String UPDATE_VM_TEMPLATE_TABLE = "INSERT INTO `cloud`.`vm_template` (id, uuid, unique_name, name, public, featured, created, type, hvm, bits, account_id, url, checksum, enable_password, display_text, format, guest_os_id, cross_zones, hypervisor_type, state)" + - "VALUES (?, ?, ?, ?, 0, 0, ?, 'SYSTEM', 0, 64, 1, ?, ?, 0, ?, ?, ?, 1, ?, 'Active')"; - private static final String UPDATE_TEMPLATE_STORE_REF_TABLE = "INSERT INTO `cloud`.`template_store_ref` (store_id, template_id, created, last_updated, job_id, download_pct, size, physical_size, download_state, error_str, local_path, install_path, url, state, destroyed, is_copy," + - " update_count, ref_cnt, store_role) VALUES (?, ?, ?, ?, NULL, 100, ?, ?, 'DOWNLOADED', NULL, NULL, ?, ?, 'READY', 0, 0, 0, 0, 'Image')"; + private static final String INSERT_VM_TEMPLATE_TABLE = "INSERT INTO `cloud`.`vm_template` (uuid, unique_name, name, public, featured, created, type, hvm, bits, account_id, url, checksum, enable_password, display_text, format, guest_os_id, cross_zones, hypervisor_type, state, deploy_as_is)" + + "VALUES (?, ?, ?, 0, 0, ?, 'SYSTEM', 0, 64, 1, ?, ?, 0, ?, ?, ?, 1, ?, 'Inactive', ?)"; + private static final String INSERT_TEMPLATE_STORE_REF_TABLE = "INSERT INTO `cloud`.`template_store_ref` (store_id, template_id, created, last_updated, job_id, download_pct, download_state, error_str, local_path, install_path, url, state, destroyed, is_copy," + + " update_count, ref_cnt, store_role) VALUES (?, ?, ?, ?, NULL, 0, 'NOT_DOWNLOADED', NULL, NULL, ?, ?, 'Allocated', 0, 0, 0, 0, 'Image')"; + private static final String UPDATE_TEMPLATE_STORE_REF_TABLE = "UPDATE template_store_ref SET download_pct=100, download_state='DOWNLOADED', " + + "state='Ready', size=?, physical_size=?, last_updated=?, updated=? where template_id=?"; + private static final String UPDATE_VM_TEMPLATE_ENTRY = "UPDATE vm_template set size = ?, state = 'Active' where id = ?"; private static final String UPDATE_CONFIGURATION_TABLE = "UPDATE `cloud`.`configuration` SET value = ? WHERE name = ?"; + private static final String UPDATE_TEMPLATE_TABLE_ON_FAILURE = "UPDATE vm_template set removed = ?, state = 'Inactive' where id = ?"; + private static final String DELETE_TEMPLATE_REF_RECORD_ON_FAILURE = "DELETE from template_store_ref where template_id = ?"; public static final String CS_MAJOR_VERSION = "4.16"; public static final String CS_MINOR_VERSION = "0"; @@ -88,14 +93,15 @@ private static class SystemVMTemplateDetails { Long size; Long physicalSize; String installPath; + boolean deployAsIs; + Date updated; SystemVMTemplateDetails() { } - SystemVMTemplateDetails(Long id, String uuid, String name, Date created, String url, String checksum, + SystemVMTemplateDetails(String uuid, String name, Date created, String url, String checksum, TemplateFormat format, Integer guestOsId, Hypervisor.HypervisorType hypervisorType, Long storeId) { - this.id = id; this.uuid = uuid; this.name = name; this.created = created; @@ -107,6 +113,10 @@ private static class SystemVMTemplateDetails { this.storeId = storeId; } + public void setId(Long id) { + this.id = id; + } + public Long getId() { return id; } @@ -178,6 +188,22 @@ public String getUniqueName() { public void setUniqueName(String uniqueName) { this.uniqueName = uniqueName; } + + public boolean isDeployAsIs() { + return deployAsIs; + } + + public void setDeployAsIs(boolean deployAsIs) { + this.deployAsIs = deployAsIs; + } + + public Date getUpdated() { + return updated; + } + + public void setUpdated(Date updated) { + this.updated = updated; + } } public static final List hypervisorList = Arrays.asList(Hypervisor.HypervisorType.KVM, @@ -340,6 +366,8 @@ private static List getEligibleZoneIds(Connection conn) { zones.add(rs.getLong(1)); } } catch (SQLException e) { + String errMsg = "Failed to fetch eligible zones for SystemVM template registration due to: %s"; + LOGGER.error(String.format(errMsg, e.getMessage())); throw new CloudRuntimeException("Failed to fetch eligible zones for SystemVM template registration"); } return zones; @@ -359,7 +387,9 @@ private static Pair getNfsStoreInZone(Connection conn, Long zoneId } } } catch (SQLException e) { - throw new CloudRuntimeException("Failed to fetch eligible zones for SystemVM template registration"); + String errMsg = String.format("Failed to fetch NFS store in zone = %s for SystemVM template registration", zoneId); + LOGGER.error(errMsg + String.format("due to: %s", e.getMessage())); + throw new CloudRuntimeException(errMsg); } return new Pair<>(url, storeId); } @@ -422,43 +452,88 @@ private static List fetchAllHypervisors(Connection conn, Long zoneId) { } } } catch (SQLException e) { - throw new CloudRuntimeException("Failed to fetch eligible zones for SystemVM template registration"); + String errMsg = String.format("Failed to fetch distinct hypervisors in zone: %s for SystemVM template registration", zoneId); + LOGGER.error(errMsg + String.format("due to: %s", e.getMessage())); + throw new CloudRuntimeException(errMsg); } return hypervisorList; } - public static void updateDb(Connection conn, SystemVMTemplateDetails details) { + private static Long createTemplateObjectInDB(Connection conn, SystemVMTemplateDetails details) { + Long id = null; try { - PreparedStatement pstmt = conn.prepareStatement(UPDATE_VM_TEMPLATE_TABLE); + PreparedStatement pstmt = conn.prepareStatement(INSERT_VM_TEMPLATE_TABLE); if (pstmt != null) { - pstmt.setLong(1, details.getId()); - pstmt.setString(2, details.getUuid()); - pstmt.setString(3, details.getUniqueName()); - pstmt.setString(4, details.getName()); - pstmt.setDate(5, details.getCreated()); - pstmt.setString(6, details.getUrl()); - pstmt.setString(7, details.getChecksum()); - pstmt.setString(8, details.getName()); - pstmt.setString(9, details.getFormat().toString()); - pstmt.setLong(10, details.getGuestOsId()); - pstmt.setString(11, details.getHypervisorType().toString()); + int i = 1; + pstmt.setString(i++, details.getUuid()); + pstmt.setString(i++, details.getUuid()); + pstmt.setString(i++, details.getName()); + pstmt.setDate(i++, details.getCreated()); + pstmt.setString(i++, details.getUrl()); + pstmt.setString(i++, details.getChecksum()); + pstmt.setString(i++, details.getName()); + pstmt.setString(i++, details.getFormat().toString()); + pstmt.setLong(i++, details.getGuestOsId()); + pstmt.setString(i++, details.getHypervisorType().toString()); + pstmt.setBoolean(i++, details.getHypervisorType() == Hypervisor.HypervisorType.VMware); pstmt.executeUpdate(); + + pstmt = conn.prepareStatement("SELECT id FROM vm_template ORDER BY id DESC LIMIT 1"); + try (ResultSet rs = pstmt.executeQuery()) { + if (rs.next()) { + id = rs.getLong(1); + } + } catch (final SQLException e) { + String errMsg = String.format("Failed to fetch template id %s", e.getMessage()); + LOGGER.error(errMsg); + throw new CloudRuntimeException(errMsg); + } } + } catch (Exception e) { + throw new CloudRuntimeException(String.format("Failed to create vm_template record for the systemVM template for hypervisor: %s, due to: %s", details.getHypervisorType().name(), e.getMessage())); + } + return id; + } - PreparedStatement pstmt1 = conn.prepareStatement(UPDATE_TEMPLATE_STORE_REF_TABLE); - if (pstmt1 != null) { - pstmt1.setLong(1, details.getStoreId()); - pstmt1.setLong(2, details.getId()); - pstmt1.setDate(3, details.getCreated()); - pstmt1.setDate(4, details.getCreated()); - pstmt1.setLong(5, details.getSize()); - pstmt1.setLong(6, details.getPhysicalSize()); - pstmt1.setString(7, details.getInstallPath()); - pstmt1.setString(8, details.getUrl()); - pstmt1.executeUpdate(); + private static void createTemplateStoreRefEntry(Connection conn, SystemVMTemplateDetails details) { + try { + PreparedStatement pstmt = conn.prepareStatement(INSERT_TEMPLATE_STORE_REF_TABLE); + if (pstmt != null) { + int i = 1; + pstmt.setLong(i++, details.getStoreId()); + pstmt.setLong(i++, details.getId()); + pstmt.setDate(i++, details.getCreated()); + pstmt.setDate(i++, details.getCreated()); + pstmt.setString(i++, details.getInstallPath()); + pstmt.setString(i++, details.getUrl()); + pstmt.executeUpdate(); } } catch (Exception e) { - throw new CloudRuntimeException("Failed to fetch eligible zones for SystemVM template registration: " + e.getMessage()); + throw new CloudRuntimeException(String.format("Failed to create template_store_ref record for the systemVM template for hypervisor: %s, due to: %s", details.getHypervisorType().name(), e.getMessage())); + } + } + + public static void updateDb(Connection conn, SystemVMTemplateDetails details) { + try { + int i = 1; + PreparedStatement pstmt = conn.prepareStatement(UPDATE_VM_TEMPLATE_ENTRY); + if (pstmt != null) { + pstmt.setLong(i++, details.getSize()); + pstmt.setLong(i++, details.getId()); + pstmt.executeUpdate(); + } + i = 1; + pstmt = conn.prepareStatement(UPDATE_TEMPLATE_STORE_REF_TABLE); + if (pstmt != null) { + pstmt.setLong(i++, details.getSize()); + pstmt.setLong(i++, details.getPhysicalSize()); + pstmt.setDate(i++, details.getUpdated()); + pstmt.setDate(i++, details.getUpdated()); + pstmt.setLong(i++, details.getId()); + pstmt.executeUpdate(); + } + } catch (Exception e) { + throw new CloudRuntimeException(String.format("Failed to update template_store_ref record for the systemVM template registered for hypervisor: %s, due to: %s", details.getHypervisorType().name(), e.getMessage())); } } @@ -500,15 +575,12 @@ public static void updateConfigurationParams(Connection conn, Map hypervisorAndTemplateName, Long zoneId) { + Long templateId = null; try { Hypervisor.HypervisorType hypervisor = hypervisorAndTemplateName.first(); Pair storeUrlAndId = getNfsStoreInZone(conn, zoneId); mountStore(storeUrlAndId.first()); - String destTempFolderName = getTemplateFolder(conn); + final String templateName = UUID.randomUUID().toString(); + Date created = new Date(DateUtil.currentGMTTime().getTime()); + SystemVMTemplateDetails details = new SystemVMTemplateDetails(templateName, NewTemplateNameList.get(hypervisor), created, + newTemplateUrl.get(hypervisor), newTemplateChecksum.get(hypervisor), hypervisorImageFormat.get(hypervisor), hypervisorGuestOsMap.get(hypervisor), hypervisor, storeUrlAndId.second()); + templateId = createTemplateObjectInDB(conn, details); + if (templateId == null) { + throw new CloudRuntimeException(String.format("Failed to register template for hypervisor: %s", hypervisor.name())); + } + details.setId(templateId); + String destTempFolderName = String.valueOf(templateId); String destTempFolder = PARENT_TEMPLATE_FOLDER + PARTIAL_TEMPLATE_FOLDER + destTempFolderName; - Script.runSimpleBashScript("mkdir -p " + destTempFolder); + details.setInstallPath(PARTIAL_TEMPLATE_FOLDER + destTempFolderName + "/" + templateName + "." + hypervisorImageFormat.get(hypervisor).fileExtension); + createTemplateStoreRefEntry(conn, details); String storageScriptsDir = "scripts/storage/secondary"; String setupTmpltScript = Script.findScript(storageScriptsDir, "setup-sysvm-tmplt"); if (setupTmpltScript == null) { throw new ConfigurationException("Unable to find the createtmplt.sh"); } - Script scr = new Script(setupTmpltScript, 120000, LOGGER); - final String templateName = UUID.randomUUID().toString(); scr.add("-u", templateName); scr.add("-f", templatesPath + fileNames.get(hypervisorAndTemplateName.first())); scr.add("-h", hypervisorAndTemplateName.first().name().toLowerCase(Locale.ROOT)); @@ -562,20 +664,22 @@ public static void registerTemplate(Connection conn, Pair configParams = new HashMap<>(); configParams.put(SystemVmTemplateRegistration.routerTemplateConfigurationNames.get(hypervisorAndTemplateName.first()), hypervisorAndTemplateName.second()); configParams.put("minreq.sysvmtemplate.version", CS_MAJOR_VERSION + "." + CS_MINOR_VERSION); updateConfigurationParams(conn, configParams); - updateSystemVMEntries(conn, Long.valueOf(destTempFolderName), hypervisorAndTemplateName); + updateSystemVMEntries(conn, templateId, hypervisorAndTemplateName); } catch (Exception e) { - String errMsg = String.format("Failed to register template for hypervisor: %s ", hypervisorAndTemplateName.first()); + String errMsg = String.format("Failed to register template for hypervisor: %s, due to: %s", hypervisorAndTemplateName.first(), e.getMessage()); LOGGER.error(errMsg); + if (templateId != null) { + updateTemplateTablesOnFailure(conn, templateId); + cleanupStore(templateId); + } throw new CloudRuntimeException(errMsg); } } @@ -592,13 +696,18 @@ public static void parseMetadataFile() { newTemplateChecksum.put(hypervisorType, section.get("checksum")); newTemplateUrl.put(hypervisorType, section.get("downloadurl")); } - } catch (Exception e) { String errMsg = String.format("Failed to parse systemVM template metadata file: %s", metadataFile); LOGGER.error(errMsg); throw new CloudRuntimeException(errMsg); } } + + private static void cleanupStore(Long templateId) { + String destTempFolder = PARENT_TEMPLATE_FOLDER + PARTIAL_TEMPLATE_FOLDER + String.valueOf(templateId); + Script.runSimpleBashScript("rm -rf " + destTempFolder); + } + public static void registerTemplates(Connection conn, Set hypervisorsInUse) { GlobalLock lock = GlobalLock.getInternLock("UpgradeDatabase-Lock"); try { From 027d8f9f03b87a1bc97119c417c867eb96b9ae35 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Mon, 2 Aug 2021 18:38:18 +0530 Subject: [PATCH 069/117] change master reference in k8s context to control Node --- .../META-INF/db/schema-41510to41600.sql | 2 +- .../cluster/KubernetesClusterManagerImpl.java | 10 +++++----- .../kubernetes/cluster/KubernetesClusterVO.java | 4 ++-- .../cluster/KubernetesClusterVmMap.java | 2 +- .../cluster/KubernetesClusterVmMapVO.java | 16 ++++++++-------- .../KubernetesClusterActionWorker.java | 4 ++-- 6 files changed, 19 insertions(+), 19 deletions(-) diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41510to41600.sql b/engine/schema/src/main/resources/META-INF/db/schema-41510to41600.sql index 3f30f7f39b34..f61ed1bfa12e 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-41510to41600.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-41510to41600.sql @@ -30,7 +30,7 @@ ALTER TABLE `cloud`.`kubernetes_cluster` ADD COLUMN `autoscaling_enabled` tinyin ALTER TABLE `cloud`.`kubernetes_cluster` ADD COLUMN `minsize` bigint; ALTER TABLE `cloud`.`kubernetes_cluster` ADD COLUMN `maxsize` bigint; -ALTER TABLE `cloud`.`kubernetes_cluster_vm_map` ADD COLUMN `master` tinyint(1) unsigned NOT NULL DEFAULT 0; +ALTER TABLE `cloud`.`kubernetes_cluster_vm_map` ADD COLUMN `control_node` tinyint(1) unsigned NOT NULL DEFAULT 0; -- Adding dynamic scalable flag for service offering table ALTER TABLE `cloud`.`service_offering` ADD COLUMN `dynamic_scaling_enabled` tinyint(1) unsigned NOT NULL DEFAULT 1 COMMENT 'true(1) if VM needs to be dynamically scalable of cpu or memory'; diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java index 07f3d58af33a..e54517588c4e 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java @@ -880,13 +880,13 @@ private void validateKubernetesClusterScaleParameters(ScaleKubernetesClusterCmd if (nodes == null || nodes.size() != nodeIds.size()) { throw new InvalidParameterValueException("Invalid node ids"); } - // Ensure there's always a master - long mastersToRemove = nodes.stream().filter(x -> x.isMaster()).count(); - if (mastersToRemove >= kubernetesCluster.getControlNodeCount()) { - throw new InvalidParameterValueException("Can not remove all masters from a cluster"); + // Ensure there's always a control node + long controleNodesToRemove = nodes.stream().filter(x -> x.isControlNode()).count(); + if (controleNodesToRemove >= kubernetesCluster.getControlNodeCount()) { + throw new InvalidParameterValueException("Can not remove all control nodes from a cluster"); } // Ensure there's always a node - long nodesToRemove = nodes.stream().filter(x -> !x.isMaster()).count(); + long nodesToRemove = nodes.stream().filter(x -> !x.isControlNode()).count(); if (nodesToRemove >= kubernetesCluster.getNodeCount()) { throw new InvalidParameterValueException("Can not remove all nodes from a cluster"); } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVO.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVO.java index 35081f4e193c..90b368de1192 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVO.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVO.java @@ -370,9 +370,9 @@ public KubernetesClusterVO(String name, String description, long zoneId, long ku } public KubernetesClusterVO(String name, String description, long zoneId, long kubernetesVersionId, long serviceOfferingId, long templateId, - long networkId, long domainId, long accountId, long masterNodeCount, long nodeCount, State state, String keyPair, long cores, + long networkId, long domainId, long accountId, long controlNodeCount, long nodeCount, State state, String keyPair, long cores, long memory, Long nodeRootDiskSize, String endpoint, boolean autoscalingEnabled, Long minSize, Long maxSize) { - this(name, description, zoneId, kubernetesVersionId, serviceOfferingId, templateId, networkId, domainId, accountId, masterNodeCount, + this(name, description, zoneId, kubernetesVersionId, serviceOfferingId, templateId, networkId, domainId, accountId, controlNodeCount, nodeCount, state, keyPair, cores, memory, nodeRootDiskSize, endpoint); this.autoscalingEnabled = autoscalingEnabled; this.minSize = minSize; diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMap.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMap.java index b20cf0451a6d..9a35fccdf8d9 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMap.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMap.java @@ -27,5 +27,5 @@ public interface KubernetesClusterVmMap { long getId(); long getClusterId(); long getVmId(); - boolean isMaster(); + boolean isControlNode(); } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMapVO.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMapVO.java index d9ed7ae119ac..f6126f01be5b 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMapVO.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMapVO.java @@ -39,16 +39,16 @@ public class KubernetesClusterVmMapVO implements KubernetesClusterVmMap { @Column(name = "vm_id") long vmId; - @Column(name = "master") - boolean master; + @Column(name = "control_node") + boolean controlNode; public KubernetesClusterVmMapVO() { } - public KubernetesClusterVmMapVO(long clusterId, long vmId, boolean master) { + public KubernetesClusterVmMapVO(long clusterId, long vmId, boolean controlNode) { this.vmId = vmId; this.clusterId = clusterId; - this.master = master; + this.controlNode = controlNode; } @@ -76,11 +76,11 @@ public void setVmId(long vmId) { } @Override - public boolean isMaster() { - return master; + public boolean isControlNode() { + return controlNode; } - public void setMaster(boolean master) { - this.master = master; + public void setControlNode(boolean controlNode) { + this.controlNode = controlNode; } } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java index 1067be22d863..660eb03a2d52 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java @@ -254,11 +254,11 @@ protected File getManagementServerSshPublicKeyFile() { return new File(keyFile); } - protected KubernetesClusterVmMapVO addKubernetesClusterVm(final long kubernetesClusterId, final long vmId, boolean isMaster) { + protected KubernetesClusterVmMapVO addKubernetesClusterVm(final long kubernetesClusterId, final long vmId, boolean isControlNode) { return Transaction.execute(new TransactionCallback() { @Override public KubernetesClusterVmMapVO doInTransaction(TransactionStatus status) { - KubernetesClusterVmMapVO newClusterVmMap = new KubernetesClusterVmMapVO(kubernetesClusterId, vmId, isMaster); + KubernetesClusterVmMapVO newClusterVmMap = new KubernetesClusterVmMapVO(kubernetesClusterId, vmId, isControlNode); kubernetesClusterVmMapDao.persist(newClusterVmMap); return newClusterVmMap; } From 96d148279ca82c02844e6109295d286cf0a8180f Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Mon, 2 Aug 2021 20:27:39 +0530 Subject: [PATCH 070/117] upgrade setuptools - required by k8s --- tools/marvin/setup.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/marvin/setup.py b/tools/marvin/setup.py index a35aec23d07e..3da62333113a 100644 --- a/tools/marvin/setup.py +++ b/tools/marvin/setup.py @@ -59,7 +59,8 @@ "retries", "PyCrypt", "kubernetes", - "urllib3" + "urllib3", + "setuptools >= 40.3.0" ], py_modules=['marvin.marvinPlugin'], zip_safe=False, From e49ad71f3f1a38fe19553388aa35e95ac61c3061 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Tue, 3 Aug 2021 15:34:48 +0530 Subject: [PATCH 071/117] Change location of template files and script to work with simulator build --- debian/rules | 2 +- engine/schema/pom.xml | 93 +++++++++++++++++++ .../upgrade/SystemVmTemplateRegistration.java | 3 +- {systemvm => engine/schema}/templateConfig.py | 1 - packaging/centos7/cloud.spec | 2 +- packaging/centos8/cloud.spec | 2 +- systemvm/pom.xml | 87 ----------------- 7 files changed, 98 insertions(+), 92 deletions(-) rename {systemvm => engine/schema}/templateConfig.py (98%) diff --git a/debian/rules b/debian/rules index f7dae7a60390..7139ee85ada2 100755 --- a/debian/rules +++ b/debian/rules @@ -79,7 +79,7 @@ override_dh_auto_install: cp client/target/conf/* $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/management/ cp client/target/cloud-client-ui-$(VERSION).jar $(DESTDIR)/usr/share/$(PACKAGE)-management/lib/cloudstack-$(VERSION).jar cp client/target/lib/*jar $(DESTDIR)/usr/share/$(PACKAGE)-management/lib/ - cp -r systemvm/dist/systemvm-templates/* $(DESTDIR)/usr/share/$(PACKAGE)-management/templates/ + cp -r engine/schema/dist/systemvm-templates/* $(DESTDIR)/usr/share/$(PACKAGE)-management/templates/ rm -rf $(DESTDIR)/usr/share/$(PACKAGE)-management/templates/md5sum.txt # nast hack for a couple of configuration files diff --git a/engine/schema/pom.xml b/engine/schema/pom.xml index dcb485281358..6f99d9e5b846 100644 --- a/engine/schema/pom.xml +++ b/engine/schema/pom.xml @@ -58,4 +58,97 @@ ${cs.ini.version} + + + + com.googlecode.maven-download-plugin + download-maven-plugin + 1.6.3 + + + download-checksums + + wget + + + https://download.cloudstack.org/systemvm/4.16/md5sum.txt + ${basedir}/dist/systemvm-templates/ + + + + + + org.codehaus.mojo + exec-maven-plugin + 1.2.1 + + + systemvm-template-metadata + package + + exec + + + ${basedir}/ + python3 + + templateConfig.py + + + + + + + + + + template-create + + + noredist + + + + + + com.googlecode.maven-download-plugin + download-maven-plugin + 1.6.3 + + + download-kvm-template + + wget + + + https://download.cloudstack.org/systemvm/4.16/systemvmtemplate-4.16.0-kvm.qcow2.bz2 + ${basedir}/dist/systemvm-templates/ + + + + download-vmware-template + + wget + + + https://download.cloudstack.org/systemvm/4.16/systemvmtemplate-4.16.0-vmware.ova + ${basedir}/dist/systemvm-templates/ + + + + download-xenserver-template + + wget + + + https://download.cloudstack.org/systemvm/4.16/systemvmtemplate-4.16.0-xen.vhd.bz2 + ${basedir}/dist/systemvm-templates/ + + + + + + + + diff --git a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java index d1e5cd514a6c..0c71a8044c53 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java @@ -52,7 +52,7 @@ public class SystemVmTemplateRegistration { private static final String mountCommand = "sudo mount -t nfs %s %s"; private static final String umountCommand = "sudo umount %s"; private static final String hashAlgorithm = "MD5"; - private static final String relativeTemplatePath = "./systemvm/dist/systemvm-templates/"; + private static final String relativeTemplatePath = "./engine/schema/dist/systemvm-templates/"; private static final String AbsolutetemplatesPath = "/usr/share/cloudstack-management/templates/"; private static final String templatesPath = fetchTemplatesPath(); private static final String metadataFileName = "metadata.ini"; @@ -61,6 +61,7 @@ public class SystemVmTemplateRegistration { private static final String PARENT_TEMPLATE_FOLDER = TEMPORARY_SECONDARY_STORE; private static final String PARTIAL_TEMPLATE_FOLDER = "/template/tmpl/1/"; private static final String FETCH_FOLDER_NAME = "SELECT id FROM vm_template ORDER BY id DESC LIMIT 1;"; + // TODO: filter out only zones with NFS based 'Image' stores - to rule out image cache scenario private static final String FETCH_DISTINCT_ELIGIBLE_ZONES = "SELECT DISTINCT(data_center_id) FROM `cloud`.`image_store` WHERE protocol = \"nfs\" AND removed is null"; private static final String FETCH_DISTINCT_HYPERVISORS_IN_ZONE = "SELECT DISTINCT(hypervisor_type) FROM `cloud`.`cluster` where removed is null and data_center_id=?"; private static final String FETCH_IMAGE_STORE_PER_ZONE = "SELECT url,id FROM `cloud`.`image_store` WHERE data_center_id=? AND removed IS NULL LIMIT 1"; diff --git a/systemvm/templateConfig.py b/engine/schema/templateConfig.py similarity index 98% rename from systemvm/templateConfig.py rename to engine/schema/templateConfig.py index 7e3cdf86c3a8..5adbf2106edf 100644 --- a/systemvm/templateConfig.py +++ b/engine/schema/templateConfig.py @@ -53,7 +53,6 @@ def fetchChecksum(checksumData, hypervisor): for line in checksumData: hypervisor = getGenericName(hypervisor) if hypervisor in line: - print(type(line.split(" ")[0])) return str(line.split(" ")[0]) def createMetadataFile(): diff --git a/packaging/centos7/cloud.spec b/packaging/centos7/cloud.spec index 698e274a4e3a..1ab538b22ada 100644 --- a/packaging/centos7/cloud.spec +++ b/packaging/centos7/cloud.spec @@ -300,7 +300,7 @@ touch ${RPM_BUILD_ROOT}%{_localstatedir}/run/%{name}-management.pid # SystemVM template mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/ -cp -r systemvm/dist/systemvm-templates/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/ +cp -r engine/schema/dist/systemvm-templates/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/ rm -rf ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/md5sum.txt diff --git a/packaging/centos8/cloud.spec b/packaging/centos8/cloud.spec index abedbc45cffa..46c98b9048fb 100644 --- a/packaging/centos8/cloud.spec +++ b/packaging/centos8/cloud.spec @@ -293,7 +293,7 @@ touch ${RPM_BUILD_ROOT}%{_localstatedir}/run/%{name}-management.pid # SystemVM template mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/ -cp -r systemvm/dist/systemvm-templates/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/ +cp -r engine/schema/dist/systemvm-templates/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/ rm -rf ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/md5sum.txt # UI diff --git a/systemvm/pom.xml b/systemvm/pom.xml index 4bd317c1af37..0c5406796ce4 100644 --- a/systemvm/pom.xml +++ b/systemvm/pom.xml @@ -163,44 +163,6 @@ - - com.googlecode.maven-download-plugin - download-maven-plugin - 1.6.3 - - - download-checksums - - wget - - - https://download.cloudstack.org/systemvm/4.16/md5sum.txt - ${basedir}/dist/systemvm-templates/ - - - - - - org.codehaus.mojo - exec-maven-plugin - 1.2.1 - - - systemvm-template-metadata - package - - exec - - - ${basedir}/ - python3 - - templateConfig.py - - - - - @@ -236,55 +198,6 @@ - - template-create - - - noredist - - - - - - com.googlecode.maven-download-plugin - download-maven-plugin - 1.6.3 - - - download-kvm-template - - wget - - - https://download.cloudstack.org/systemvm/4.16/systemvmtemplate-4.16.0-kvm.qcow2.bz2 - ${basedir}/dist/systemvm-templates/ - - - - download-vmware-template - - wget - - - https://download.cloudstack.org/systemvm/4.16/systemvmtemplate-4.16.0-vmware.ova - ${basedir}/dist/systemvm-templates/ - - - - download-xenserver-template - - wget - - - https://download.cloudstack.org/systemvm/4.16/systemvmtemplate-4.16.0-xen.vhd.bz2 - ${basedir}/dist/systemvm-templates/ - - - - - - - quickcloud From 820192f39ebf41d722d7bac92118ad39b5d586d9 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Wed, 4 Aug 2021 11:06:31 +0530 Subject: [PATCH 072/117] Port python script to bash - to avoid package installations --- engine/schema/pom.xml | 5 +- .../upgrade/SystemVmTemplateRegistration.java | 2 +- engine/schema/templateConfig.py | 101 ------------------ engine/schema/templateConfig.sh | 84 +++++++++++++++ packaging/build-deb.sh | 3 - packaging/package.sh | 3 - 6 files changed, 88 insertions(+), 110 deletions(-) delete mode 100644 engine/schema/templateConfig.py create mode 100644 engine/schema/templateConfig.sh diff --git a/engine/schema/pom.xml b/engine/schema/pom.xml index 6f99d9e5b846..5f0729b2481b 100644 --- a/engine/schema/pom.xml +++ b/engine/schema/pom.xml @@ -90,9 +90,10 @@ ${basedir}/ - python3 + bash - templateConfig.py + templateConfig.sh + ${project.version} diff --git a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java index 0c71a8044c53..3a0e90c15731 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java @@ -287,7 +287,7 @@ private static String calculateChecksum(MessageDigest digest, File file) { } return sb.toString(); } catch (IOException e) { - String errMsg = String.format("Failed to calculate Checksum of template file: %s ", file.getName()); + String errMsg = String.format("Failed to calculate Checksum of template file: %s due to: %s ", file.getName(), e); LOGGER.error(errMsg); throw new CloudRuntimeException(errMsg); } diff --git a/engine/schema/templateConfig.py b/engine/schema/templateConfig.py deleted file mode 100644 index 5adbf2106edf..000000000000 --- a/engine/schema/templateConfig.py +++ /dev/null @@ -1,101 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -import os -from lxml import etree -import backports.configparser as configparser - -ns = {"ns" : "http://maven.apache.org/POM/4.0.0"} -doc=etree.parse("./pom.xml") - -def getCloudstackVersion(): - table = doc.xpath('.//ns:parent', namespaces=ns) - version="" - try: - for df in table: - versionTag = df.find('.//ns:version', ns) - if versionTag is not None: - version = versionTag.text - break - splitVersion=version.split("-SNAPSHOT")[0].split('.') - major='.'.join(splitVersion[0:2]) - minor=splitVersion[2] - return major,minor - - except Exception as e: - raise Exception("Failed to fetch cloudstack version") - -def getGenericName(hypervisor): - if hypervisor.lower() == "ovm3": - return "ovm" - if hypervisor.lower() == "lxc": - return "kvm" - if hypervisor.lower() == "xenserver": - return "xen" - else: - return hypervisor - -def fetchChecksum(checksumData, hypervisor): - for line in checksumData: - hypervisor = getGenericName(hypervisor) - if hypervisor in line: - return str(line.split(" ")[0]) - -def createMetadataFile(): - write_config = configparser.ConfigParser() - with open(sourceFile, "r") as md5sumFile: - checksumData = md5sumFile.readlines() - - for hypervisor in templates.keys(): - write_config.add_section(hypervisor) - write_config.set(hypervisor, "templatename", "systemvm-{0}-{1}.{2}".format(hypervisor, CS_MAJOR_VERSION, CS_MINOR_VERSION)) - checksum=fetchChecksum(checksumData, hypervisor) - write_config.set(hypervisor, "checksum", str(checksum)) - downloadUrl=templates.get(hypervisor).format(CS_MAJOR_VERSION, CS_MINOR_VERSION) - write_config.set(hypervisor, "downloadurl", str(downloadUrl)) - write_config.set(hypervisor, "filename", str(downloadUrl.split('/')[-1])) - - cfgfile = open(metadataFile, 'w') - write_config.write(cfgfile) - cfgfile.close() - - -CS_MAJOR_VERSION,CS_MINOR_VERSION=getCloudstackVersion() -templates = { - "kvm": "https://download.cloudstack.org/systemvm/{0}/systemvmtemplate-{0}.{1}-kvm.qcow2.bz2" - .format(CS_MAJOR_VERSION, CS_MINOR_VERSION), - "vmware": "https://download.cloudstack.org/systemvm/{0}/systemvmtemplate-{0}.{1}-vmware.ova" - .format(CS_MAJOR_VERSION, CS_MINOR_VERSION), - "xenserver": "https://download.cloudstack.org/systemvm/{0}/systemvmtemplate-{0}.{1}-xen.vhd.bz2" - .format(CS_MAJOR_VERSION, CS_MINOR_VERSION), - "hyperv": "https://download.cloudstack.org/systemvm/{0}/systemvmtemplate-{0}.{1}-hyperv.vhd.zip" - .format(CS_MAJOR_VERSION, CS_MINOR_VERSION), - "lxc": "https://download.cloudstack.org/systemvm/{0}/systemvmtemplate-{0}.{1}-kvm.qcow2.bz2" - .format(CS_MAJOR_VERSION, CS_MINOR_VERSION), - "ovm3": "https://download.cloudstack.org/systemvm/{0}/systemvmtemplate-{0}.{1}-ovm.raw.bz2" - .format(CS_MAJOR_VERSION, CS_MINOR_VERSION), -} - -parentPath = os.path.dirname(os.path.abspath(__file__)) + '/dist/systemvm-templates/' -if not os.path.exists(parentPath): - os.makedirs(parentPath) -metadataFile = parentPath + 'metadata.ini' -sourceFile = parentPath + 'md5sum.txt' - -createMetadataFile() - - diff --git a/engine/schema/templateConfig.sh b/engine/schema/templateConfig.sh new file mode 100644 index 000000000000..686804435515 --- /dev/null +++ b/engine/schema/templateConfig.sh @@ -0,0 +1,84 @@ +#!/bin/bash + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +function getTemplateVersion() { + projVersion=$1 + version="$(cut -d'-' -f1 <<<"$projVersion")" + subversion1="$(cut -d'.' -f1 <<<"$version")" + subversion2="$(cut -d'.' -f2 <<<"$version")" + minorversion="$(cut -d'.' -f3 <<<"$version")" + export CS_VERSION="${subversion1}"."${subversion2}" + export CS_MINOR_VERSION="${minorversion}" +} + +function getGenericName() { + hypervisor=$(echo "$1" | tr "[:upper:]" "[:lower:]") + if [[ "$hypervisor" == "ovm3" ]]; then + echo "ovm" + elif [[ "$hypervisor" == "lxc" ]]; then + echo "kvm" + elif [[ "$hypervisor" == "xenserver" ]]; then + echo "xen" + else + echo "$hypervisor" + fi +} + +function getChecksum() { + local fileData="$1" + local hvName=$2 + while IFS= read -r line; do + if [[ $line == *"$hvName"* ]]; then + echo "$(cut -d' ' -f1 <<<"$line")" + fi + done <<< "$fileData" +} + +function createMetadataFile() { + local fileData=$(cat $SOURCEFILE) + for i in "${!templates[@]}" + do + section="$i" + hvName=$(getGenericName $i) + templatename="systemvm-${i}-${CS_VERSION}" + checksum=$(getChecksum "$fileData" $hvName) + downloadurl="${templates[$i]}" + filename=$(echo ${downloadurl##*'/'}) + echo -e "["$section"]\ntemplatename = $templatename\nchecksum = $checksum\ndownloadurl = $downloadurl\nfilename = $filename\n" >> $METADATAFILE + done + + +} + +declare -A templates +getTemplateVersion $1 +templates=( ["kvm"]="https://download.cloudstack.org/systemvm/${CS_VERSION}/systemvmtemplate-${CS_VERSION}.${CS_MINOR_VERSION}-kvm.qcow2.bz2" + ["vmware"]="https://download.cloudstack.org/systemvm/${CS_VERSION}/systemvmtemplate-${CS_VERSION}.${CS_MINOR_VERSION}-vmware.ova" + ["xenserver"]="https://download.cloudstack.org/systemvm/$CS_VERSION/systemvmtemplate-$CS_VERSION.$CS_MINOR_VERSION-xen.vhd.bz2" + ["hyperv"]="https://download.cloudstack.org/systemvm/$CS_VERSION/systemvmtemplate-$CS_VERSION.$CS_MINOR_VERSION-hyperv.vhd.zip" + ["lxc"]="https://download.cloudstack.org/systemvm/$CS_VERSION/systemvmtemplate-$CS_VERSION.$CS_MINOR_VERSION-kvm.qcow2.bz2" + ["ovm3"]="https://download.cloudstack.org/systemvm/$CS_VERSION/systemvmtemplate-$CS_VERSION.$CS_MINOR_VERSION-ovm.raw.bz2" ) + + +PARENTPATH="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )/dist/systemvm-templates/" +mkdir -p $PARENTPATH +METADATAFILE=${PARENTPATH}"metadata.ini" +echo > $METADATAFILE +SOURCEFILE=${PARENTPATH}'md5sum.txt' +createMetadataFile diff --git a/packaging/build-deb.sh b/packaging/build-deb.sh index 9ff2f2cca24d..3ec3ee12ff64 100755 --- a/packaging/build-deb.sh +++ b/packaging/build-deb.sh @@ -155,9 +155,6 @@ fi dch -b -v "${VERSION}~${DISTCODE}" -u low -m "Apache CloudStack Release ${VERSION}" sed -i '0,/ UNRELEASED;/s// unstable;/g' debian/changelog -apt-get install -y wget python3-lxml python3-pip -pip3 install configparser - dpkg-checkbuilddeps dpkg-buildpackage -uc -us -b diff --git a/packaging/package.sh b/packaging/package.sh index ffc452fc9e9f..380908be7ba7 100755 --- a/packaging/package.sh +++ b/packaging/package.sh @@ -79,9 +79,6 @@ function packaging() { DISTRO=$3 - yum install -y python3-lxml - pip3 install --user configparser - MVN=$(which mvn) if [ -z "$MVN" ] ; then MVN=$(locate bin/mvn | grep -e mvn$ | tail -1) From 324bc73983bdf6b57240ce840a9eef0afb070a21 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Thu, 5 Aug 2021 19:44:01 +0530 Subject: [PATCH 073/117] add logic to get code version --- .../main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java | 2 ++ .../java/com/cloud/upgrade/SystemVmTemplateRegistration.java | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java b/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java index 5a6055df6d3a..97f75e70c8f1 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java @@ -365,6 +365,8 @@ public void check() { } final CloudStackVersion currentVersion = CloudStackVersion.parse(currentVersionValue); + SystemVmTemplateRegistration.CS_MAJOR_VERSION = String.valueOf(currentVersion.getMajorRelease()) + "." + String.valueOf(currentVersion.getMinorRelease()); + SystemVmTemplateRegistration.CS_MINOR_VERSION = String.valueOf(currentVersion.getPatchRelease()); s_logger.info("DB version = " + dbVersion + " Code Version = " + currentVersion); if (dbVersion.compareTo(currentVersion) > 0) { diff --git a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java index 3a0e90c15731..6a105ba5341f 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java @@ -75,8 +75,8 @@ public class SystemVmTemplateRegistration { private static final String UPDATE_CONFIGURATION_TABLE = "UPDATE `cloud`.`configuration` SET value = ? WHERE name = ?"; private static final String UPDATE_TEMPLATE_TABLE_ON_FAILURE = "UPDATE vm_template set removed = ?, state = 'Inactive' where id = ?"; private static final String DELETE_TEMPLATE_REF_RECORD_ON_FAILURE = "DELETE from template_store_ref where template_id = ?"; - public static final String CS_MAJOR_VERSION = "4.16"; - public static final String CS_MINOR_VERSION = "0"; + public static String CS_MAJOR_VERSION = "4.16"; + public static String CS_MINOR_VERSION = "0"; private static class SystemVMTemplateDetails { From 433db66a7722d5785f7b76a9a968b2348c0ea5f9 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Fri, 6 Aug 2021 12:37:14 +0530 Subject: [PATCH 074/117] update naming convention + identify store to be used on a per zone basis --- .../java/com/cloud/upgrade/DatabaseUpgradeChecker.java | 2 +- .../cloud/upgrade/SystemVmTemplateRegistration.java | 10 +++++----- .../com/cloud/upgrade/dao/Upgrade41510to41600.java | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java b/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java index 97f75e70c8f1..684342678e07 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java @@ -366,7 +366,7 @@ public void check() { final CloudStackVersion currentVersion = CloudStackVersion.parse(currentVersionValue); SystemVmTemplateRegistration.CS_MAJOR_VERSION = String.valueOf(currentVersion.getMajorRelease()) + "." + String.valueOf(currentVersion.getMinorRelease()); - SystemVmTemplateRegistration.CS_MINOR_VERSION = String.valueOf(currentVersion.getPatchRelease()); + SystemVmTemplateRegistration.CS_TINY_VERSION = String.valueOf(currentVersion.getPatchRelease()); s_logger.info("DB version = " + dbVersion + " Code Version = " + currentVersion); if (dbVersion.compareTo(currentVersion) > 0) { diff --git a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java index 6a105ba5341f..8930416c2550 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java @@ -76,7 +76,7 @@ public class SystemVmTemplateRegistration { private static final String UPDATE_TEMPLATE_TABLE_ON_FAILURE = "UPDATE vm_template set removed = ?, state = 'Inactive' where id = ?"; private static final String DELETE_TEMPLATE_REF_RECORD_ON_FAILURE = "DELETE from template_store_ref where template_id = ?"; public static String CS_MAJOR_VERSION = "4.16"; - public static String CS_MINOR_VERSION = "0"; + public static String CS_TINY_VERSION = "0"; private static class SystemVMTemplateDetails { @@ -630,11 +630,10 @@ private static void unmountStore() { throw new CloudRuntimeException(msg); } } - public static void registerTemplate(Connection conn, Pair hypervisorAndTemplateName, Long zoneId) { + public static void registerTemplate(Connection conn, Pair hypervisorAndTemplateName, Pair storeUrlAndId) { Long templateId = null; try { Hypervisor.HypervisorType hypervisor = hypervisorAndTemplateName.first(); - Pair storeUrlAndId = getNfsStoreInZone(conn, zoneId); mountStore(storeUrlAndId.first()); final String templateName = UUID.randomUUID().toString(); Date created = new Date(DateUtil.currentGMTTime().getTime()); @@ -671,7 +670,7 @@ public static void registerTemplate(Connection conn, Pair configParams = new HashMap<>(); configParams.put(SystemVmTemplateRegistration.routerTemplateConfigurationNames.get(hypervisorAndTemplateName.first()), hypervisorAndTemplateName.second()); - configParams.put("minreq.sysvmtemplate.version", CS_MAJOR_VERSION + "." + CS_MINOR_VERSION); + configParams.put("minreq.sysvmtemplate.version", CS_MAJOR_VERSION + "." + CS_TINY_VERSION); updateConfigurationParams(conn, configParams); updateSystemVMEntries(conn, templateId, hypervisorAndTemplateName); } catch (Exception e) { @@ -750,6 +749,7 @@ public static void registerTemplates(Connection conn, Set zoneIds = getEligibleZoneIds(conn); for (Long zoneId : zoneIds) { + Pair storeUrlAndId = getNfsStoreInZone(conn, zoneId); List hypervisorList = fetchAllHypervisors(conn, zoneId); for (String hypervisor : hypervisorList) { Hypervisor.HypervisorType name = Hypervisor.HypervisorType.getType(hypervisor); @@ -759,7 +759,7 @@ public static void registerTemplates(Connection conn, Set configParams = new HashMap<>(); configParams.put(SystemVmTemplateRegistration.routerTemplateConfigurationNames.get(hypervisorAndTemplateName.getKey()), hypervisorAndTemplateName.getValue()); - configParams.put("minreq.sysvmtemplate.version", SystemVmTemplateRegistration.CS_MAJOR_VERSION + "." + SystemVmTemplateRegistration.CS_MINOR_VERSION); + configParams.put("minreq.sysvmtemplate.version", SystemVmTemplateRegistration.CS_MAJOR_VERSION + "." + SystemVmTemplateRegistration.CS_TINY_VERSION); SystemVmTemplateRegistration.updateConfigurationParams(conn, configParams); } else { From 33b4293a6e8571d30a83c73b8dccba4a64f055cf Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Fri, 6 Aug 2021 13:26:51 +0530 Subject: [PATCH 075/117] add timeout constant --- .../java/com/cloud/upgrade/SystemVmTemplateRegistration.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java index 4dc0ca876902..4566bebbc153 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java @@ -75,6 +75,7 @@ public class SystemVmTemplateRegistration { private static final String UPDATE_CONFIGURATION_TABLE = "UPDATE `cloud`.`configuration` SET value = ? WHERE name = ?"; private static final String UPDATE_TEMPLATE_TABLE_ON_FAILURE = "UPDATE vm_template set removed = ?, state = 'Inactive' where id = ?"; private static final String DELETE_TEMPLATE_REF_RECORD_ON_FAILURE = "DELETE from template_store_ref where template_id = ?"; + private static final Integer SCRIPT_TIMEOUT = 1800000; public static String CS_MAJOR_VERSION = "4.16"; public static String CS_TINY_VERSION = "0"; @@ -652,7 +653,7 @@ public static void registerTemplate(Connection conn, Pair Date: Fri, 6 Aug 2021 15:26:34 +0530 Subject: [PATCH 076/117] modify script permission --- scripts/storage/secondary/setup-sysvm-tmplt | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 scripts/storage/secondary/setup-sysvm-tmplt diff --git a/scripts/storage/secondary/setup-sysvm-tmplt b/scripts/storage/secondary/setup-sysvm-tmplt old mode 100644 new mode 100755 From 0378e1d752ff7ee794e6d30629d32a28a7e6e1e1 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Fri, 6 Aug 2021 17:38:41 +0530 Subject: [PATCH 077/117] cleanup + prevent hardcoded ssh location - as ubuntu places it at /var/lib/cloudstack/mgmt/.ssh vs /var/cloud. --- .../com/cloud/hypervisor/vmware/resource/VmwareResource.java | 3 ++- .../main/java/com/cloud/server/ConfigurationServerImpl.java | 2 -- .../apache/cloudstack/diagnostics/DiagnosticsServiceImpl.java | 3 ++- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java index 7d4e167a73a2..f13fb4d77cc2 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java @@ -422,7 +422,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa protected static File s_systemVmKeyFile = null; private static final Object s_syncLockObjectFetchKeyFile = new Object(); - protected static final String s_defaultPathSystemVmKeyFile = "/var/cloudstack/management/.ssh/id_rsa"; + private static final String homeDir = System.getProperty("user.home"); + protected static final String s_defaultPathSystemVmKeyFile = homeDir + "/.ssh/id_rsa"; public Gson getGson() { return _gson; diff --git a/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java b/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java index bc5595dbd42d..c00ed1d9a6e9 100644 --- a/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java +++ b/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java @@ -757,8 +757,6 @@ protected void copyPrivateKeyToHosts(String publicKeyPath, String privKeyPath) { } if (isOnWindows()) { scriptPath = scriptPath.replaceAll("\\\\" ,"/" ); - //systemVmIsoPath = systemVmIsoPath.replaceAll("\\\\" ,"/" ); - //publicKeyPath = publicKeyPath.replaceAll("\\\\" ,"/" ); privKeyPath = privKeyPath.replaceAll("\\\\" ,"/" ); } diff --git a/server/src/main/java/org/apache/cloudstack/diagnostics/DiagnosticsServiceImpl.java b/server/src/main/java/org/apache/cloudstack/diagnostics/DiagnosticsServiceImpl.java index 0184a44ff390..483f9aef7ef7 100644 --- a/server/src/main/java/org/apache/cloudstack/diagnostics/DiagnosticsServiceImpl.java +++ b/server/src/main/java/org/apache/cloudstack/diagnostics/DiagnosticsServiceImpl.java @@ -341,7 +341,8 @@ private Pair copyToSecondaryStorageVMware(final DataStore store boolean existsInSecondaryStore = dataDirectory.exists() || dataDirectory.mkdir(); if (existsInSecondaryStore) { // scp from system VM to mounted sec storage directory - File permKey = new File("/var/cloudstack/management/.ssh/id_rsa"); + String homeDir = System.getProperty("user.home"); + File permKey = new File(homeDir + "/.ssh/id_rsa"); SshHelper.scpFrom(vmSshIp, 3922, "root", permKey, dataDirectoryInSecondaryStore, diagnosticsFile); } From f1b9baf81a0227ca474449558a16f13b6af7b8c6 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Fri, 6 Aug 2021 22:48:18 +0530 Subject: [PATCH 078/117] fix failing test - delete k8s cluster --- .../smoke/test_kubernetes_clusters.py | 25 ++++++++++--------- 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/test/integration/smoke/test_kubernetes_clusters.py b/test/integration/smoke/test_kubernetes_clusters.py index 71419f8e4d2d..5ddba1873986 100644 --- a/test/integration/smoke/test_kubernetes_clusters.py +++ b/test/integration/smoke/test_kubernetes_clusters.py @@ -279,45 +279,46 @@ def stopKubernetesCluster(cls, cluster_id): return response - @classmethod - def deleteKubernetesClusterAndVerify(cls, cluster_id, verify = True, forced = False): + + def deleteKubernetesClusterAndVerify(self, cluster_id, verify = True, forced = False): """Delete Kubernetes cluster and check if it is really deleted""" + delete_response = {} forceDeleted = False try: - delete_response = cls.deleteKubernetesCluster(cluster_id) + delete_response = self.deleteKubernetesCluster(cluster_id) except Exception as e: if forced: - cluster = cls.listKubernetesCluster(cluster_id) + cluster = self.listKubernetesCluster(cluster_id) if cluster != None: if cluster.state in ['Starting', 'Running', 'Upgrading', 'Scaling']: - cls.stopKubernetesCluster(cluster_id) - cls.deleteKubernetesCluster(cluster_id) + self.stopKubernetesCluster(cluster_id) + self.deleteKubernetesCluster(cluster_id) else: forceDeleted = True for cluster_vm in cluster.virtualmachines: cmd = destroyVirtualMachine.destroyVirtualMachineCmd() cmd.id = cluster_vm.id cmd.expunge = True - cls.apiclient.destroyVirtualMachine(cmd) + self.apiclient.destroyVirtualMachine(cmd) cmd = deleteNetwork.deleteNetworkCmd() cmd.id = cluster.networkid cmd.forced = True - cls.apiclient.deleteNetwork(cmd) - cls.dbclient.execute("update kubernetes_cluster set state='Destroyed', removed=now() where uuid = '%s';" % cluster.id) + self.apiclient.deleteNetwork(cmd) + self.dbclient.execute("update kubernetes_cluster set state='Destroyed', removed=now() where uuid = '%s';" % cluster.id) else: raise Exception("Error: Exception during delete cluster : %s" % e) if verify == True and forceDeleted == False: - cls.assertEqual( + self.assertEqual( delete_response.success, True, "Check KubernetesCluster delete response {}, {}".format(delete_response.success, True) ) - db_cluster_removed = cls.dbclient.execute("select removed from kubernetes_cluster where uuid = '%s';" % cluster_id)[0][0] + db_cluster_removed = self.dbclient.execute("select removed from kubernetes_cluster where uuid = '%s';" % cluster_id)[0][0] - cls.assertNotEqual( + self.assertNotEqual( db_cluster_removed, None, "KubernetesCluster not removed in DB, {}".format(db_cluster_removed) From 6872512fb443d4b252753454b06bce2d3c2ce1ad Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Mon, 9 Aug 2021 11:13:50 +0530 Subject: [PATCH 079/117] fix test --- test/integration/smoke/test_kubernetes_clusters.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/test/integration/smoke/test_kubernetes_clusters.py b/test/integration/smoke/test_kubernetes_clusters.py index 5ddba1873986..aedebc5f0786 100644 --- a/test/integration/smoke/test_kubernetes_clusters.py +++ b/test/integration/smoke/test_kubernetes_clusters.py @@ -61,11 +61,11 @@ class TestKubernetesCluster(cloudstackTestCase): @classmethod def setUpClass(cls): - cls.testClient = super(TestKubernetesCluster, cls).getClsTestClient() - cls.apiclient = cls.testClient.getApiClient() - cls.services = cls.testClient.getParsedTestDataConfig() - cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) - cls.hypervisor = cls.testClient.getHypervisorInfo() + testClient = super(TestKubernetesCluster, cls).getClsTestClient() + cls.apiclient = testClient.getApiClient() + cls.services = testClient.getParsedTestDataConfig() + cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) + cls.hypervisor = testClient.getHypervisorInfo() cls.mgtSvrDetails = cls.config.__dict__["mgtSvr"][0].__dict__ cls.hypervisorNotSupported = False @@ -129,7 +129,8 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): if k8s_cluster != None and k8s_cluster.id != None: - cls.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True) + clsObj = TestKubernetesCluster() + clsObj.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True) version_delete_failed = False # Delete added Kubernetes supported version From 226879fe4ffa8ca5f5d09f9fcf59dc400a83849a Mon Sep 17 00:00:00 2001 From: davidjumani Date: Mon, 9 Aug 2021 15:59:02 +0530 Subject: [PATCH 080/117] Fix checkstyle --- .../cluster/KubernetesClusterManagerImpl.java | 26 ------------------- ...esClusterResourceModifierActionWorker.java | 2 +- .../KubernetesClusterStartWorker.java | 12 --------- .../management/MockAccountManager.java | 6 ----- 4 files changed, 1 insertion(+), 45 deletions(-) diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java index 67fe62c0ac75..f169d58b25c6 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java @@ -33,7 +33,6 @@ import java.util.concurrent.TimeUnit; import java.util.regex.Matcher; import java.util.regex.Pattern; -import java.util.UUID; import javax.inject.Inject; import javax.naming.ConfigurationException; @@ -201,8 +200,6 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne @Inject protected TemplateJoinDao templateJoinDao; @Inject - protected UserDao userDao; - @Inject protected AccountService accountService; @Inject protected AccountManager accountManager; @@ -1275,29 +1272,6 @@ public KubernetesClusterConfigResponse getKubernetesClusterConfig(GetKubernetesC return response; } - private String[] getServiceUserKeys(Account owner) { - if (owner == null) { - owner = CallContext.current().getCallingAccount(); - } - String username = owner.getAccountName() + "-" + KUBEADMIN_ACCOUNT_NAME; - UserAccount kubeadmin = accountService.getActiveUserAccount(username, owner.getDomainId()); - String[] keys = null; - if (kubeadmin == null) { - User kube = userDao.persist(new UserVO(owner.getAccountId(), username, UUID.randomUUID().toString(), owner.getAccountName(), - KUBEADMIN_ACCOUNT_NAME, "kubeadmin", null, UUID.randomUUID().toString(), User.Source.UNKNOWN)); - keys = accountService.createApiKeyAndSecretKey(kube.getId()); - } else { - String apiKey = kubeadmin.getApiKey(); - String secretKey = kubeadmin.getSecretKey(); - if (Strings.isNullOrEmpty(apiKey) || Strings.isNullOrEmpty(secretKey)) { - keys = accountService.createApiKeyAndSecretKey(kubeadmin.getId()); - } else { - keys = new String[]{apiKey, secretKey}; - } - } - return keys; - } - @Override public boolean scaleKubernetesCluster(ScaleKubernetesClusterCmd cmd) throws CloudRuntimeException { if (!KubernetesServiceEnabled.value()) { diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java index e7e47812991a..595aab939994 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java @@ -623,7 +623,7 @@ protected boolean autoscaleCluster(boolean enable, Long minSize, Long maxSize) { if (!result.first()) { logMessage(Level.INFO, "Autoscaling files missing. Adding them now", null); retrieveScriptFiles(); - copyAutoscalerScripts(publicIpAddress, sshPort); + copyScripts(publicIpAddress, sshPort); if (!createCloudStackSecret(keys)) { logTransitStateAndThrow(Level.ERROR, String.format("Failed to setup keys for Kubernetes cluster %s", diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java index e246a774833c..1fc41ee0dcd5 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java @@ -587,18 +587,6 @@ public boolean startKubernetesClusterOnCreate() { return true; } - private void copyAutoscalerScriptsToNodes(String publicIpAddress, int sshPort, List clusterVMs) { - for (int i = 0; i < clusterVMs.size(); ++i) { - try { - // Check for shared networks - int port = (sshPort == CLUSTER_NODES_DEFAULT_START_SSH_PORT) ? sshPort + i : sshPort; - copyAutoscalerScripts(publicIpAddress, port); - } catch (Exception e) { - throw new CloudRuntimeException(e); - } - } - } - public boolean startStoppedKubernetesCluster() throws CloudRuntimeException { init(); if (LOGGER.isInfoEnabled()) { diff --git a/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/MockAccountManager.java b/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/MockAccountManager.java index 55709041d6c2..5cd90c930089 100644 --- a/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/MockAccountManager.java +++ b/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/MockAccountManager.java @@ -457,12 +457,6 @@ public Map getKeys(GetUserKeysCmd cmd){ return null; } - - @Override - public Map getKeys(Long userId) { - return null; - } - @Override public Map getKeys(Long userId) { return null; From 26c2fa7bd305bd5f2dbf5b42c5cd7855d6b31054 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Mon, 9 Aug 2021 09:20:43 +0530 Subject: [PATCH 081/117] register sysvm template on addition of 1st store in a zone --- .../cloud/upgrade/DatabaseUpgradeChecker.java | 1 + .../upgrade/SystemVmTemplateRegistration.java | 31 +-------------- .../upgrade/dao/Upgrade41510to41600.java | 1 - .../storage/datastore/db/ImageStoreDao.java | 3 ++ .../datastore/db/ImageStoreDaoImpl.java | 9 +++++ .../com/cloud/storage/StorageManagerImpl.java | 38 +++++++++++++++++++ 6 files changed, 53 insertions(+), 30 deletions(-) diff --git a/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java b/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java index 2f855bc97c25..d96200983e0a 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java @@ -364,6 +364,7 @@ public void check() { return; } + SystemVmTemplateRegistration.parseMetadataFile(); final CloudStackVersion currentVersion = CloudStackVersion.parse(currentVersionValue); SystemVmTemplateRegistration.CS_MAJOR_VERSION = String.valueOf(currentVersion.getMajorRelease()) + "." + String.valueOf(currentVersion.getMinorRelease()); SystemVmTemplateRegistration.CS_TINY_VERSION = String.valueOf(currentVersion.getPatchRelease()); diff --git a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java index 4566bebbc153..0b56bbc635ff 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java @@ -293,7 +293,7 @@ private static String calculateChecksum(MessageDigest digest, File file) { } } - static long isTemplateAlreadyRegistered(Connection conn, Pair hypervisorAndTemplateName) { + public static long isTemplateAlreadyRegistered(Connection conn, Pair hypervisorAndTemplateName) { long templateId = -1; try { PreparedStatement pstmt = conn.prepareStatement("select id from `cloud`.`vm_template` where name = ? and removed is null order by id desc limit 1"); @@ -413,33 +413,6 @@ public static void mountStore(String storeUrl) { } } - private static String getTemplateFolderName(Connection conn) { - Long templateId = null; - try { - PreparedStatement pstmt = conn.prepareStatement(FETCH_FOLDER_NAME); - if(pstmt != null) { - ResultSet resultSet = pstmt.executeQuery(); - while (resultSet.next()) { - templateId = resultSet.getLong(1); - } - } - templateId += 1L; - } catch (SQLException e) { - String errMsg = "Failed to get folder name"; - LOGGER.error(errMsg); - throw new CloudRuntimeException(errMsg); - } - return String.valueOf(templateId); - } - - private static String getTemplateFolder(Connection conn) { - String folderName = getTemplateFolderName(conn); - if (folderName != null || !folderName.equals(0)) { - return folderName; - } else { - return "202"; - } - } private static List fetchAllHypervisors(Connection conn, Long zoneId) { List hypervisorList = new ArrayList<>(); @@ -619,7 +592,7 @@ private static void updateTemplateTablesOnFailure(Connection conn, long templat } } - private static void unmountStore() { + public static void unmountStore() { try { LOGGER.info("Unmounting store"); String umountCmd = String.format(umountCommand, TEMPORARY_SECONDARY_STORE); diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41510to41600.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41510to41600.java index a2e18d21e9f8..1dfc6a3ed4f3 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41510to41600.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41510to41600.java @@ -73,7 +73,6 @@ public void performDataMigration(Connection conn) { @SuppressWarnings("serial") public void updateSystemVmTemplates(final Connection conn) { LOG.debug("Updating System Vm template IDs"); - SystemVmTemplateRegistration.parseMetadataFile(); final Set hypervisorsListInUse = new HashSet(); try (PreparedStatement pstmt = conn.prepareStatement("select distinct(hypervisor_type) from `cloud`.`cluster` where removed is null"); ResultSet rs = pstmt.executeQuery()) { while (rs.next()) { diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDao.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDao.java index 71609a982ca8..7182aef0ad7f 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDao.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDao.java @@ -20,6 +20,7 @@ import java.util.List; +import com.cloud.storage.DataStoreRole; import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; import com.cloud.utils.db.GenericDao; @@ -42,4 +43,6 @@ public interface ImageStoreDao extends GenericDao { List listImageCacheStores(); List listStoresByZoneId(long zoneId); + + List listAllStoresInZone(long zoneId, String provider, DataStoreRole role); } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDaoImpl.java index 44ae96180774..789667073d99 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDaoImpl.java @@ -140,4 +140,13 @@ public List listStoresByZoneId(long zoneId) { sc.addAnd("dcId", SearchCriteria.Op.EQ, zoneId); return listBy(sc); } + + @Override + public List listAllStoresInZone(long zoneId, String provider, DataStoreRole role) { + SearchCriteria sc = createSearchCriteria(); + sc.setParameters("data_center_id", zoneId); + sc.setParameters("role", role); + sc.setParameters("image_provider_name", provider); + return listBy(sc); + } } diff --git a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java index d75f83357875..fcae969a21fe 100644 --- a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java @@ -22,8 +22,10 @@ import java.net.URI; import java.net.URISyntaxException; import java.net.UnknownHostException; +import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; +import java.sql.SQLException; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -40,11 +42,13 @@ import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; import javax.inject.Inject; import com.cloud.agent.api.GetStoragePoolCapabilitiesAnswer; import com.cloud.agent.api.GetStoragePoolCapabilitiesCommand; +import com.cloud.upgrade.SystemVmTemplateRegistration; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.command.admin.storage.CancelPrimaryStorageMaintenanceCmd; import org.apache.cloudstack.api.command.admin.storage.CreateSecondaryStagingStoreCmd; @@ -2722,6 +2726,40 @@ public ImageStore discoverImageStore(String name, String url, String providerNam // populate template_store_ref table _imageSrv.addSystemVMTemplatesToSecondary(store); _imageSrv.handleTemplateSync(store); + if (providerName.equals(DataStoreProvider.NFS_IMAGE) && zoneId != null) { + List stores = _imageStoreDao.listAllStoresInZone(zoneId, providerName, DataStoreRole.Image); + stores = stores.stream().filter(str -> str.getId() != store.getId()).collect(Collectors.toList()); + // Check if it's the only/first store in the zone + if (stores.size() == 0) { + List hypervisorTypes = _clusterDao.getAvailableHypervisorInZone(zoneId); + Set hypSet = new HashSet(hypervisorTypes); + TransactionLegacy txn = TransactionLegacy.open("AutomaticTemplateRegister"); + Connection conn; + try { + conn = txn.getConnection(); + Pair storeUrlAndId = new Pair<>(url, store.getId()); + for (HypervisorType hypervisorType : hypSet) { + try { + Pair hypervisorAndTemplateName = + new Pair<>(hypervisorType, SystemVmTemplateRegistration.NewTemplateNameList.get(hypervisorType)); + long templateId = SystemVmTemplateRegistration.isTemplateAlreadyRegistered(conn, hypervisorAndTemplateName); + if (templateId != -1) { + continue; + } + SystemVmTemplateRegistration.registerTemplate(conn, hypervisorAndTemplateName, storeUrlAndId); + } catch (CloudRuntimeException e) { + s_logger.error(String.format("Failed to register systemVM template for hypervisor: %s", hypervisorType.name())); + } + } + } catch (SQLException e) { + s_logger.error("Failed to register systemVM template(s)"); + } finally { + SystemVmTemplateRegistration.unmountStore(); + txn.commit(); + txn.close(); + } + } + } } // associate builtin template with zones associated with this image store From b9825010a9f1ab0dee9827442250809e2a79200d Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Mon, 9 Aug 2021 16:26:34 +0530 Subject: [PATCH 082/117] validate if template is registered/seeded in a fresh env and bypass registration on storage addition --- .../upgrade/SystemVmTemplateRegistration.java | 20 +++++++++++ .../com/cloud/storage/StorageManagerImpl.java | 34 +++++++++++++++++-- 2 files changed, 52 insertions(+), 2 deletions(-) diff --git a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java index 0b56bbc635ff..d479d16f24b9 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java @@ -266,6 +266,26 @@ public static enum TemplateFormat{ } }; + public static boolean validateIfSeeded(String url, String path) { + try { + mountStore(url); + int lastIdx = path.lastIndexOf('/'); + String partialDirPath = path.substring(0, lastIdx); + String templatePath = TEMPORARY_SECONDARY_STORE + "/" + partialDirPath; + File templateProps = new File(templatePath + "/template.properties"); + if (templateProps.exists()) { + LOGGER.info("SystemVM template already seeded, skipping registration"); + return true; + } + LOGGER.info("SystemVM template not seeded"); + return false; + } catch (Exception e) { + throw new CloudRuntimeException("Failed to verify if the template is seeded"); + } finally { + unmountStore(); + } + } + private static String calculateChecksum(MessageDigest digest, File file) { try { FileInputStream fis = new FileInputStream(file); diff --git a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java index fcae969a21fe..b6f084170ace 100644 --- a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java @@ -48,6 +48,7 @@ import com.cloud.agent.api.GetStoragePoolCapabilitiesAnswer; import com.cloud.agent.api.GetStoragePoolCapabilitiesCommand; +import com.cloud.network.router.VirtualNetworkApplianceManager; import com.cloud.upgrade.SystemVmTemplateRegistration; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.command.admin.storage.CancelPrimaryStorageMaintenanceCmd; @@ -2642,6 +2643,29 @@ public String getName() { return null; } + private String getValidTemplateName(Long zoneId, HypervisorType hType) { + String templateName = null; + switch (hType) { + case XenServer: + templateName = VirtualNetworkApplianceManager.RouterTemplateXen.valueIn(zoneId); + break; + case KVM: + templateName = VirtualNetworkApplianceManager.RouterTemplateKvm.valueIn(zoneId); + break; + case VMware: + templateName = VirtualNetworkApplianceManager.RouterTemplateVmware.valueIn(zoneId); + break; + case Hyperv: + templateName = VirtualNetworkApplianceManager.RouterTemplateHyperV.valueIn(zoneId); + break; + case LXC: + templateName = VirtualNetworkApplianceManager.RouterTemplateLxc.valueIn(zoneId); + break; + default: + break; + } + return templateName; + } @Override public ImageStore discoverImageStore(String name, String url, String providerName, Long zoneId, Map details) throws IllegalArgumentException, DiscoveryException, InvalidParameterValueException { DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(providerName); @@ -2740,11 +2764,17 @@ public ImageStore discoverImageStore(String name, String url, String providerNam Pair storeUrlAndId = new Pair<>(url, store.getId()); for (HypervisorType hypervisorType : hypSet) { try { + String templateName = getValidTemplateName(zoneId, hypervisorType); Pair hypervisorAndTemplateName = - new Pair<>(hypervisorType, SystemVmTemplateRegistration.NewTemplateNameList.get(hypervisorType)); + new Pair<>(hypervisorType, templateName); long templateId = SystemVmTemplateRegistration.isTemplateAlreadyRegistered(conn, hypervisorAndTemplateName); if (templateId != -1) { - continue; + TemplateDataStoreVO templateVO = _templateStoreDao.findByTemplate(templateId, DataStoreRole.Image); + if (templateVO != null) { + if (SystemVmTemplateRegistration.validateIfSeeded(url, templateVO.getInstallPath())) { + continue; + } + } } SystemVmTemplateRegistration.registerTemplate(conn, hypervisorAndTemplateName, storeUrlAndId); } catch (CloudRuntimeException e) { From fde018508ec380791b0893971bdd274ee5742a88 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Tue, 10 Aug 2021 09:35:08 +0530 Subject: [PATCH 083/117] remove unnecessary commit --- server/src/main/java/com/cloud/storage/StorageManagerImpl.java | 1 - 1 file changed, 1 deletion(-) diff --git a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java index b6f084170ace..475e098e4491 100644 --- a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java @@ -2785,7 +2785,6 @@ public ImageStore discoverImageStore(String name, String url, String providerNam s_logger.error("Failed to register systemVM template(s)"); } finally { SystemVmTemplateRegistration.unmountStore(); - txn.commit(); txn.close(); } } From c249c92e8b06c8629ec7d091d309d96629e9d3bb Mon Sep 17 00:00:00 2001 From: davidjumani Date: Tue, 10 Aug 2021 12:38:23 +0530 Subject: [PATCH 084/117] Fix setup-kube-system --- .../src/main/resources/conf/k8s-control-node-add.yml | 1 + .../src/main/resources/conf/k8s-control-node.yml | 1 + .../kubernetes-service/src/main/resources/conf/k8s-node.yml | 1 + 3 files changed, 3 insertions(+) diff --git a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-control-node-add.yml b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-control-node-add.yml index 9f8b88a7177f..bea272b691fa 100644 --- a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-control-node-add.yml +++ b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-control-node-add.yml @@ -132,6 +132,7 @@ write_files: if [ -e "${BINARIES_DIR}/autoscaler.yaml" ]; then mkdir -p /opt/autoscaler cp "${BINARIES_DIR}/autoscaler.yaml" /opt/autoscaler/autoscaler_tmpl.yaml + fi if [ -e "${BINARIES_DIR}/provider.yaml" ]; then mkdir -p /opt/provider cp "${BINARIES_DIR}/provider.yaml" /opt/provider/provider.yaml diff --git a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-control-node.yml b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-control-node.yml index 64c23c98e37e..df742231a436 100644 --- a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-control-node.yml +++ b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-control-node.yml @@ -154,6 +154,7 @@ write_files: if [ -e "${BINARIES_DIR}/autoscaler.yaml" ]; then mkdir -p /opt/autoscaler cp "${BINARIES_DIR}/autoscaler.yaml" /opt/autoscaler/autoscaler_tmpl.yaml + fi if [ -e "${BINARIES_DIR}/provider.yaml" ]; then mkdir -p /opt/provider cp "${BINARIES_DIR}/provider.yaml" /opt/provider/provider.yaml diff --git a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-node.yml b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-node.yml index fcfea3fbf1a7..97e4160d414f 100644 --- a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-node.yml +++ b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-node.yml @@ -132,6 +132,7 @@ write_files: if [ -e "${BINARIES_DIR}/autoscaler.yaml" ]; then mkdir -p /opt/autoscaler cp "${BINARIES_DIR}/autoscaler.yaml" /opt/autoscaler/autoscaler_tmpl.yaml + fi if [ -e "${BINARIES_DIR}/provider.yaml" ]; then mkdir -p /opt/provider cp "${BINARIES_DIR}/provider.yaml" /opt/provider/provider.yaml From 48b8552a864eac69f9d8b1160bc3db06a3e82d8e Mon Sep 17 00:00:00 2001 From: davidjumani Date: Tue, 10 Aug 2021 13:59:53 +0530 Subject: [PATCH 085/117] Add kubectl path in deployprovider --- .../src/main/resources/script/deploy-provider | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/integrations/kubernetes-service/src/main/resources/script/deploy-provider b/plugins/integrations/kubernetes-service/src/main/resources/script/deploy-provider index e707b5991e0e..ce71e21072b6 100755 --- a/plugins/integrations/kubernetes-service/src/main/resources/script/deploy-provider +++ b/plugins/integrations/kubernetes-service/src/main/resources/script/deploy-provider @@ -16,7 +16,7 @@ # specific language governing permissions and limitations # under the License. -(kubectl get pods -A | grep cloud-controller-manager) && exit 0 +(/opt/bin/kubectl get pods -A | grep cloud-controller-manager) && exit 0 if [ -e /opt/provider/provider.yaml ]; then /opt/bin/kubectl apply -f /opt/provider/provider.yaml From 6d37af84e8e0f03303504f92492a2bc3d433a9fc Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Tue, 10 Aug 2021 14:49:49 +0530 Subject: [PATCH 086/117] upgraded sysvm template + temporary changes wrt sysvm url path --- engine/schema/pom.xml | 12 +++++++---- engine/schema/templateConfig.sh | 20 +++++++++++++------ .../systemvmtemplate/http/preseed.cfg | 2 +- .../scripts/install_systemvm_packages.sh | 4 +++- .../appliance/systemvmtemplate/template.json | 4 ++-- 5 files changed, 28 insertions(+), 14 deletions(-) diff --git a/engine/schema/pom.xml b/engine/schema/pom.xml index 5f0729b2481b..ff371ae08a8e 100644 --- a/engine/schema/pom.xml +++ b/engine/schema/pom.xml @@ -71,7 +71,8 @@ wget - https://download.cloudstack.org/systemvm/4.16/md5sum.txt + + http://10.0.3.122/systemvmtemplate/custom/cks-debian/4.16/md5sum.txt ${basedir}/dist/systemvm-templates/ @@ -122,7 +123,8 @@ wget - https://download.cloudstack.org/systemvm/4.16/systemvmtemplate-4.16.0-kvm.qcow2.bz2 + + http://10.0.3.122/systemvmtemplate/custom/cks-debian/4.16/systemvmtemplate-4.16.0-kvm.qcow2.bz2 ${basedir}/dist/systemvm-templates/ @@ -132,7 +134,8 @@ wget - https://download.cloudstack.org/systemvm/4.16/systemvmtemplate-4.16.0-vmware.ova + + http://10.0.3.122/systemvmtemplate/custom/cks-debian/4.16/systemvmtemplate-4.16.0-vmware.ova ${basedir}/dist/systemvm-templates/ @@ -142,7 +145,8 @@ wget - https://download.cloudstack.org/systemvm/4.16/systemvmtemplate-4.16.0-xen.vhd.bz2 + + http://10.0.3.122/systemvmtemplate/custom/cks-debian/4.16/systemvmtemplate-4.16.0-xen.vhd.bz2 ${basedir}/dist/systemvm-templates/ diff --git a/engine/schema/templateConfig.sh b/engine/schema/templateConfig.sh index 686804435515..668a92e6f6eb 100644 --- a/engine/schema/templateConfig.sh +++ b/engine/schema/templateConfig.sh @@ -68,12 +68,20 @@ function createMetadataFile() { declare -A templates getTemplateVersion $1 -templates=( ["kvm"]="https://download.cloudstack.org/systemvm/${CS_VERSION}/systemvmtemplate-${CS_VERSION}.${CS_MINOR_VERSION}-kvm.qcow2.bz2" - ["vmware"]="https://download.cloudstack.org/systemvm/${CS_VERSION}/systemvmtemplate-${CS_VERSION}.${CS_MINOR_VERSION}-vmware.ova" - ["xenserver"]="https://download.cloudstack.org/systemvm/$CS_VERSION/systemvmtemplate-$CS_VERSION.$CS_MINOR_VERSION-xen.vhd.bz2" - ["hyperv"]="https://download.cloudstack.org/systemvm/$CS_VERSION/systemvmtemplate-$CS_VERSION.$CS_MINOR_VERSION-hyperv.vhd.zip" - ["lxc"]="https://download.cloudstack.org/systemvm/$CS_VERSION/systemvmtemplate-$CS_VERSION.$CS_MINOR_VERSION-kvm.qcow2.bz2" - ["ovm3"]="https://download.cloudstack.org/systemvm/$CS_VERSION/systemvmtemplate-$CS_VERSION.$CS_MINOR_VERSION-ovm.raw.bz2" ) +# TODO: Update correct url post testing +#templates=( ["kvm"]="https://download.cloudstack.org/systemvm/${CS_VERSION}/systemvmtemplate-${CS_VERSION}.${CS_MINOR_VERSION}-kvm.qcow2.bz2" +# ["vmware"]="https://download.cloudstack.org/systemvm/${CS_VERSION}/systemvmtemplate-${CS_VERSION}.${CS_MINOR_VERSION}-vmware.ova" +# ["xenserver"]="https://download.cloudstack.org/systemvm/$CS_VERSION/systemvmtemplate-$CS_VERSION.$CS_MINOR_VERSION-xen.vhd.bz2" +# ["hyperv"]="https://download.cloudstack.org/systemvm/$CS_VERSION/systemvmtemplate-$CS_VERSION.$CS_MINOR_VERSION-hyperv.vhd.zip" +# ["lxc"]="https://download.cloudstack.org/systemvm/$CS_VERSION/systemvmtemplate-$CS_VERSION.$CS_MINOR_VERSION-kvm.qcow2.bz2" +# ["ovm3"]="https://download.cloudstack.org/systemvm/$CS_VERSION/systemvmtemplate-$CS_VERSION.$CS_MINOR_VERSION-ovm.raw.bz2" ) + +templates=( ["kvm"]="http://10.0.3.122/systemvmtemplate/custom/cks-debian/${CS_VERSION}/systemvmtemplate-${CS_VERSION}.${CS_MINOR_VERSION}-kvm.qcow2.bz2" + ["vmware"]="http://10.0.3.122/systemvmtemplate/custom/cks-debian/${CS_VERSION}/systemvmtemplate-${CS_VERSION}.${CS_MINOR_VERSION}-vmware.ova" + ["xenserver"]="http://10.0.3.122/systemvmtemplate/custom/cks-debian/systemvmtemplate-$CS_VERSION.$CS_MINOR_VERSION-xen.vhd.bz2" + ["hyperv"]="http://10.0.3.122/systemvmtemplate/custom/cks-debian/$CS_VERSION/systemvmtemplate-$CS_VERSION.$CS_MINOR_VERSION-hyperv.vhd.zip" + ["lxc"]="http://10.0.3.122/systemvmtemplate/custom/cks-debian/$CS_VERSION/systemvmtemplate-$CS_VERSION.$CS_MINOR_VERSION-kvm.qcow2.bz2" + ["ovm3"]="http://10.0.3.122/systemvmtemplate/custom/cks-debian/$CS_VERSION/systemvmtemplate-$CS_VERSION.$CS_MINOR_VERSION-ovm.raw.bz2" ) PARENTPATH="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )/dist/systemvm-templates/" diff --git a/tools/appliance/systemvmtemplate/http/preseed.cfg b/tools/appliance/systemvmtemplate/http/preseed.cfg index 340bab3a3e14..ae71ed5c0638 100644 --- a/tools/appliance/systemvmtemplate/http/preseed.cfg +++ b/tools/appliance/systemvmtemplate/http/preseed.cfg @@ -69,7 +69,7 @@ d-i partman-auto/expert_recipe string \ 256 1000 256 linux-swap \ method{ swap } format{ } \ . \ - 2500 40 4000 ext4 \ + 2240 40 4000 ext4 \ method{ format } format{ } \ use_filesystem{ } filesystem{ ext4 } \ mountpoint{ / } \ diff --git a/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh b/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh index 0b9c107f1d58..5ce829cf2698 100644 --- a/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh +++ b/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh @@ -77,7 +77,9 @@ function install_packages() { sharutils genisoimage \ strongswan libcharon-extra-plugins libstrongswan-extra-plugins strongswan-charon strongswan-starter \ virt-what open-vm-tools qemu-guest-agent hyperv-daemons cloud-guest-utils \ - apt-transport-https ca-certificates curl gnupg gnupg-agent software-properties-common cloud-init + apt-transport-https ca-certificates curl gnupg gnupg-agent software-properties-common + + apt-get install -y python3-json-pointer python3-jsonschema cloud-init apt-get -y autoremove --purge apt-get clean diff --git a/tools/appliance/systemvmtemplate/template.json b/tools/appliance/systemvmtemplate/template.json index 1e33241cd893..f44a38776720 100644 --- a/tools/appliance/systemvmtemplate/template.json +++ b/tools/appliance/systemvmtemplate/template.json @@ -43,8 +43,8 @@ "disk_size": 3000, "disk_interface": "virtio", "net_device": "virtio-net", - "iso_url": "https://cdimage.debian.org/debian-cd/10.9.0/amd64/iso-cd/debian-10.9.0-amd64-netinst.iso", - "iso_checksum": "47d35187b4903e803209959434fb8b65ead3ad2a8f007eef1c3d3284f356ab9955aa7e15e24cb7af6a3859aa66837f5fa2e7441f936496ea447904f7dddfdc20", + "iso_url": "https://cdimage.debian.org/debian-cd/10.10.0/amd64/iso-cd/debian-10.10.0-amd64-netinst.iso", + "iso_checksum": "87b4c9dd135718304a1b3e68423fe1b03ed52eb67f60563ad14a846aeaaecf53c064dae0f128b2633041992bbc8124b68b6767b529d80487754959b38558e637", "iso_checksum_type": "sha512", "output_directory": "../dist", "http_directory": "http", From 8fea654d33ed3c69f468fb602a493d1d9d2ef97d Mon Sep 17 00:00:00 2001 From: davidjumani Date: Wed, 11 Aug 2021 12:01:17 +0530 Subject: [PATCH 087/117] Remove redundant validtion --- .../cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java | 2 -- 1 file changed, 2 deletions(-) diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java index f169d58b25c6..c59d8f6703e9 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java @@ -947,8 +947,6 @@ private void validateKubernetesClusterScaleParameters(ScaleKubernetesClusterCmd } private void validateKubernetesClusterUpgradeParameters(UpgradeKubernetesClusterCmd cmd) { - validateEndpointUrl(); - // Validate parameters validateEndpointUrl(); From 7812024962d92f6f64f340b8daeef1229c99bddb Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Wed, 11 Aug 2021 17:43:52 +0530 Subject: [PATCH 088/117] add dependency req for k8s versions > 1.18 + move conf / service file to nonoss repo --- scripts/util/create-kubernetes-binaries-iso.sh | 13 +++++++++++-- .../scripts/install_systemvm_packages.sh | 2 +- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/scripts/util/create-kubernetes-binaries-iso.sh b/scripts/util/create-kubernetes-binaries-iso.sh index 696840dabef0..ee434cdca85c 100755 --- a/scripts/util/create-kubernetes-binaries-iso.sh +++ b/scripts/util/create-kubernetes-binaries-iso.sh @@ -25,6 +25,7 @@ if [ $# -lt 6 ]; then fi RELEASE="v${2}" +VAL="1.18.0" output_dir="${1}" start_dir="$PWD" iso_dir="/tmp/iso" @@ -60,12 +61,20 @@ echo "Downloading kubelet.service ${RELEASE}..." cd "${start_dir}" kubelet_service_file="${working_dir}/kubelet.service" touch "${kubelet_service_file}" -curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/kubelet.service" | sed "s:/usr/bin:/opt/bin:g" > ${kubelet_service_file} +if [[ `echo "${2} $VAL" | awk '{print ($1 < $2)}'` == 1 ]]; then + curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/kubelet.service" | sed "s:/usr/bin:/opt/bin:g" > ${kubelet_service_file} +else + curl -sSL "https://raw.githubusercontent.com/shapeblue/cloudstack-nonoss/cks-deps/cks/kubelet.service" | sed "s:/usr/bin:/opt/bin:g" > ${kubelet_service_file} +fi echo "Downloading 10-kubeadm.conf ${RELEASE}..." kubeadm_conf_file="${working_dir}/10-kubeadm.conf" touch "${kubeadm_conf_file}" -curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/10-kubeadm.conf" | sed "s:/usr/bin:/opt/bin:g" > ${kubeadm_conf_file} +if [[ `echo "${2} $val" | awk '{print ($1 < $2)}'` == 1 ]]; then + curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/10-kubeadm.conf" | sed "s:/usr/bin:/opt/bin:g" > ${kubeadm_conf_file} +else + curl -sSL "https://raw.githubusercontent.com/shapeblue/cloudstack-nonoss/cks-deps/cks/10-kubeadm.conf" | sed "s:/usr/bin:/opt/bin:g" > ${kubeadm_conf_file} +fi NETWORK_CONFIG_URL="${5}" echo "Downloading network config ${NETWORK_CONFIG_URL}" diff --git a/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh b/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh index 5ce829cf2698..a5771d301844 100644 --- a/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh +++ b/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh @@ -77,7 +77,7 @@ function install_packages() { sharutils genisoimage \ strongswan libcharon-extra-plugins libstrongswan-extra-plugins strongswan-charon strongswan-starter \ virt-what open-vm-tools qemu-guest-agent hyperv-daemons cloud-guest-utils \ - apt-transport-https ca-certificates curl gnupg gnupg-agent software-properties-common + conntrack apt-transport-https ca-certificates curl gnupg gnupg-agent software-properties-common apt-get install -y python3-json-pointer python3-jsonschema cloud-init From f3ac89bbcc193b9920cedb5c7edc486190529d7e Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Wed, 11 Aug 2021 18:24:06 +0530 Subject: [PATCH 089/117] update branch --- scripts/util/create-kubernetes-binaries-iso.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/util/create-kubernetes-binaries-iso.sh b/scripts/util/create-kubernetes-binaries-iso.sh index ee434cdca85c..fc19caa470ce 100755 --- a/scripts/util/create-kubernetes-binaries-iso.sh +++ b/scripts/util/create-kubernetes-binaries-iso.sh @@ -64,7 +64,7 @@ touch "${kubelet_service_file}" if [[ `echo "${2} $VAL" | awk '{print ($1 < $2)}'` == 1 ]]; then curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/kubelet.service" | sed "s:/usr/bin:/opt/bin:g" > ${kubelet_service_file} else - curl -sSL "https://raw.githubusercontent.com/shapeblue/cloudstack-nonoss/cks-deps/cks/kubelet.service" | sed "s:/usr/bin:/opt/bin:g" > ${kubelet_service_file} + curl -sSL "https://raw.githubusercontent.com/shapeblue/cloudstack-nonoss/main/cks/kubelet.service" | sed "s:/usr/bin:/opt/bin:g" > ${kubelet_service_file} fi echo "Downloading 10-kubeadm.conf ${RELEASE}..." @@ -73,7 +73,7 @@ touch "${kubeadm_conf_file}" if [[ `echo "${2} $val" | awk '{print ($1 < $2)}'` == 1 ]]; then curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/10-kubeadm.conf" | sed "s:/usr/bin:/opt/bin:g" > ${kubeadm_conf_file} else - curl -sSL "https://raw.githubusercontent.com/shapeblue/cloudstack-nonoss/cks-deps/cks/10-kubeadm.conf" | sed "s:/usr/bin:/opt/bin:g" > ${kubeadm_conf_file} + curl -sSL "https://raw.githubusercontent.com/shapeblue/cloudstack-nonoss/main/cks/10-kubeadm.conf" | sed "s:/usr/bin:/opt/bin:g" > ${kubeadm_conf_file} fi NETWORK_CONFIG_URL="${5}" From c5be93abb2e5a8bc1b42db24451065d05e360cc3 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Fri, 13 Aug 2021 12:04:17 +0530 Subject: [PATCH 090/117] ssh key missing - iLB and elasticLB --- .../network/lb/ElasticLoadBalancerManagerImpl.java | 8 ++++++++ .../network/lb/InternalLoadBalancerVMManagerImpl.java | 10 ++++++++++ 2 files changed, 18 insertions(+) diff --git a/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java b/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java index bc35b34ea468..7968d55f4470 100644 --- a/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java +++ b/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java @@ -16,7 +16,9 @@ // under the License. package com.cloud.network.lb; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; +import java.util.Base64; import java.util.Collections; import java.util.HashSet; import java.util.List; @@ -476,7 +478,13 @@ public boolean finalizeVirtualMachineProfile(VirtualMachineProfile profile, Depl if (defaultDns2 != null) { buf.append(" dns2=").append(defaultDns2); } + String MsPublicKey = _configDao.getValue("ssh.publickey"); + String base64EncodedPublicKey = null; + if (MsPublicKey != null) { + base64EncodedPublicKey = Base64.getEncoder().encodeToString(MsPublicKey.getBytes(StandardCharsets.UTF_8)); + } + buf.append(" authorized_key=").append(base64EncodedPublicKey); if (s_logger.isDebugEnabled()) { s_logger.debug("Boot Args for " + profile + ": " + buf.toString()); } diff --git a/plugins/network-elements/internal-loadbalancer/src/main/java/org/apache/cloudstack/network/lb/InternalLoadBalancerVMManagerImpl.java b/plugins/network-elements/internal-loadbalancer/src/main/java/org/apache/cloudstack/network/lb/InternalLoadBalancerVMManagerImpl.java index 7232c1a032a3..d65aa01ab05a 100644 --- a/plugins/network-elements/internal-loadbalancer/src/main/java/org/apache/cloudstack/network/lb/InternalLoadBalancerVMManagerImpl.java +++ b/plugins/network-elements/internal-loadbalancer/src/main/java/org/apache/cloudstack/network/lb/InternalLoadBalancerVMManagerImpl.java @@ -16,8 +16,10 @@ // under the License. package org.apache.cloudstack.network.lb; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; +import java.util.Base64; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; @@ -218,6 +220,14 @@ public boolean finalizeVirtualMachineProfile(final VirtualMachineProfile profile buf.append(" localgw=").append(dest.getPod().getGateway()); } } + + String MsPublicKey = _configDao.getValue("ssh.publickey"); + String base64EncodedPublicKey = null; + if (MsPublicKey != null) { + base64EncodedPublicKey = Base64.getEncoder().encodeToString(MsPublicKey.getBytes(StandardCharsets.UTF_8)); + } + + buf.append(" authorized_key=").append(base64EncodedPublicKey); } if (controlNic == null) { From 62831c936851bb69766533b5e11b9ced798d86ae Mon Sep 17 00:00:00 2001 From: Rohit Yadav Date: Mon, 16 Aug 2021 13:54:30 +0530 Subject: [PATCH 091/117] systemvmtemplate: bump to Debian 11.0.0 systemvmtemplate Signed-off-by: Rohit Yadav --- tools/appliance/systemvmtemplate/scripts/apt_upgrade.sh | 6 ++---- .../systemvmtemplate/scripts/install_systemvm_packages.sh | 2 +- tools/appliance/systemvmtemplate/template.json | 4 ++-- 3 files changed, 5 insertions(+), 7 deletions(-) diff --git a/tools/appliance/systemvmtemplate/scripts/apt_upgrade.sh b/tools/appliance/systemvmtemplate/scripts/apt_upgrade.sh index 5699323f8941..0b0381aa244e 100644 --- a/tools/appliance/systemvmtemplate/scripts/apt_upgrade.sh +++ b/tools/appliance/systemvmtemplate/scripts/apt_upgrade.sh @@ -36,8 +36,8 @@ function add_backports() { sed -i '/deb-src/d' /etc/apt/sources.list sed -i '/backports/d' /etc/apt/sources.list sed -i '/security/d' /etc/apt/sources.list - echo 'deb http://http.debian.net/debian buster-backports main' >> /etc/apt/sources.list - echo 'deb http://security.debian.org/debian-security buster/updates main' >> /etc/apt/sources.list + echo 'deb http://http.debian.net/debian bullseye-backports main' >> /etc/apt/sources.list + echo 'deb http://security.debian.org/debian-security bullseye/updates main' >> /etc/apt/sources.list } function apt_upgrade() { @@ -56,8 +56,6 @@ function apt_upgrade() { apt-get -q -y upgrade apt-get -q -y dist-upgrade - apt-get -q -y upgrade -t buster-backports - apt-get -q -y dist-upgrade -t buster-backports apt-get -y autoremove --purge apt-get autoclean diff --git a/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh b/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh index d19fd06a8a7c..fcaeab6ec45f 100644 --- a/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh +++ b/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh @@ -42,7 +42,7 @@ function install_packages() { debconf_packages - local apt_get="apt-get --no-install-recommends -q -y -t buster-backports" + local apt_get="apt-get --no-install-recommends -q -y" ${apt_get} install grub-legacy \ rsyslog logrotate cron net-tools ifupdown tmux vim-tiny htop netbase iptables nftables \ diff --git a/tools/appliance/systemvmtemplate/template.json b/tools/appliance/systemvmtemplate/template.json index a0affce25c9a..4e1c78b117e4 100644 --- a/tools/appliance/systemvmtemplate/template.json +++ b/tools/appliance/systemvmtemplate/template.json @@ -27,8 +27,8 @@ "format": "qcow2", "headless": true, "http_directory": "http", - "iso_checksum": "sha512:87b4c9dd135718304a1b3e68423fe1b03ed52eb67f60563ad14a846aeaaecf53c064dae0f128b2633041992bbc8124b68b6767b529d80487754959b38558e637", - "iso_url": "https://cdimage.debian.org/debian-cd/10.10.0/amd64/iso-cd/debian-10.10.0-amd64-netinst.iso", + "iso_checksum": "sha512:5f6aed67b159d7ccc1a90df33cc8a314aa278728a6f50707ebf10c02e46664e383ca5fa19163b0a1c6a4cb77a39587881584b00b45f512b4a470f1138eaa1801", + "iso_url": "https://cdimage.debian.org/debian-cd/11.0.0/amd64/iso-cd/debian-11.0.0-amd64-netinst.iso", "net_device": "virtio-net", "output_directory": "../dist", "qemuargs": [ From 78f6423b7ff9362c63ca49821057490b47f4d3fe Mon Sep 17 00:00:00 2001 From: Rohit Yadav Date: Mon, 16 Aug 2021 14:16:32 +0530 Subject: [PATCH 092/117] fixes Signed-off-by: Rohit Yadav --- tools/appliance/systemvmtemplate/scripts/apt_upgrade.sh | 2 +- tools/appliance/systemvmtemplate/template.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/appliance/systemvmtemplate/scripts/apt_upgrade.sh b/tools/appliance/systemvmtemplate/scripts/apt_upgrade.sh index 0b0381aa244e..f20aa977e7a0 100644 --- a/tools/appliance/systemvmtemplate/scripts/apt_upgrade.sh +++ b/tools/appliance/systemvmtemplate/scripts/apt_upgrade.sh @@ -37,7 +37,7 @@ function add_backports() { sed -i '/backports/d' /etc/apt/sources.list sed -i '/security/d' /etc/apt/sources.list echo 'deb http://http.debian.net/debian bullseye-backports main' >> /etc/apt/sources.list - echo 'deb http://security.debian.org/debian-security bullseye/updates main' >> /etc/apt/sources.list + echo 'deb http://security.debian.org/debian-security bullseye-security main' >> /etc/apt/sources.list } function apt_upgrade() { diff --git a/tools/appliance/systemvmtemplate/template.json b/tools/appliance/systemvmtemplate/template.json index 4e1c78b117e4..4a1ac0a1c098 100644 --- a/tools/appliance/systemvmtemplate/template.json +++ b/tools/appliance/systemvmtemplate/template.json @@ -23,7 +23,7 @@ ], "boot_wait": "5s", "disk_interface": "virtio", - "disk_size": "2500M", + "disk_size": "4000M", "format": "qcow2", "headless": true, "http_directory": "http", From b187e51aa6ad352aab510857c5a115d3f9e729a3 Mon Sep 17 00:00:00 2001 From: Rohit Yadav Date: Mon, 16 Aug 2021 14:52:12 +0530 Subject: [PATCH 093/117] fix missing py3 packages (netaddr) and migrate one file to py3/flask Signed-off-by: Rohit Yadav --- .../debian/etc/systemd/system/baremetal-vr.service | 2 +- systemvm/debian/opt/cloud/bin/baremetal-vr.py | 8 ++++---- .../systemvmtemplate/scripts/apt_upgrade.sh | 1 - .../scripts/install_systemvm_packages.sh | 13 ++++++++----- 4 files changed, 13 insertions(+), 11 deletions(-) diff --git a/systemvm/debian/etc/systemd/system/baremetal-vr.service b/systemvm/debian/etc/systemd/system/baremetal-vr.service index f880f0ca18ac..a9f30f05e62e 100644 --- a/systemvm/debian/etc/systemd/system/baremetal-vr.service +++ b/systemvm/debian/etc/systemd/system/baremetal-vr.service @@ -8,5 +8,5 @@ WantedBy=multi-user.target [Service] Type=simple WorkingDirectory=/opt/cloud/bin -ExecStart=/usr/bin/python /opt/cloud/bin/baremetal-vr.py +ExecStart=/usr/bin/python3 /opt/cloud/bin/baremetal-vr.py Restart=on-failure diff --git a/systemvm/debian/opt/cloud/bin/baremetal-vr.py b/systemvm/debian/opt/cloud/bin/baremetal-vr.py index 1547d82c6602..862775a3b605 100755 --- a/systemvm/debian/opt/cloud/bin/baremetal-vr.py +++ b/systemvm/debian/opt/cloud/bin/baremetal-vr.py @@ -16,13 +16,13 @@ #under the License. import subprocess -import urllib import hmac import hashlib import base64 import traceback import logging import re +import urllib.request, urllib.parse, urllib.error from flask import Flask @@ -131,10 +131,10 @@ def _make_sign(self, mac): "mac": mac } - request = zip(reqs.keys(), reqs.values()) + request = list(zip(list(reqs.keys()), list(reqs.values()))) request.sort(key=lambda x: str.lower(x[0])) - hashStr = "&".join(["=".join([str.lower(r[0]), str.lower(urllib.quote_plus(str(r[1]))).replace("+", "%20").replace('=', '%3d')]) for r in request]) - sig = urllib.quote_plus(base64.encodestring(hmac.new(secretkey, hashStr, hashlib.sha1).digest()).strip()) + hashStr = "&".join(["=".join([str.lower(r[0]), str.lower(urllib.parse.quote_plus(str(r[1]))).replace("+", "%20").replace('=', '%3d')]) for r in request]) + sig = urllib.parse.quote_plus(base64.encodestring(hmac.new(secretkey, hashStr, hashlib.sha1).digest()).strip()) return sig def notify_provisioning_done(self, mac): diff --git a/tools/appliance/systemvmtemplate/scripts/apt_upgrade.sh b/tools/appliance/systemvmtemplate/scripts/apt_upgrade.sh index f20aa977e7a0..bca5077dafeb 100644 --- a/tools/appliance/systemvmtemplate/scripts/apt_upgrade.sh +++ b/tools/appliance/systemvmtemplate/scripts/apt_upgrade.sh @@ -60,7 +60,6 @@ function apt_upgrade() { apt-get -y autoremove --purge apt-get autoclean apt-get clean - reboot } return 2>/dev/null || apt_upgrade diff --git a/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh b/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh index fcaeab6ec45f..8f8a704aa04f 100644 --- a/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh +++ b/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh @@ -47,10 +47,11 @@ function install_packages() { ${apt_get} install grub-legacy \ rsyslog logrotate cron net-tools ifupdown tmux vim-tiny htop netbase iptables nftables \ openssh-server e2fsprogs tcpdump iftop socat wget coreutils systemd \ - python python3 bzip2 sed gawk diffutils grep gzip less tar telnet ftp rsync traceroute psmisc lsof procps \ + python python3 python3-flask ieee-data \ + bzip2 sed gawk diffutils grep gzip less tar telnet ftp rsync traceroute psmisc lsof procps \ inetutils-ping iputils-arping httping curl \ dnsutils zip unzip ethtool uuid file iproute2 acpid sudo \ - sysstat python-netaddr \ + sysstat \ apache2 ssl-cert \ dnsmasq dnsmasq-utils \ nfs-common \ @@ -59,12 +60,10 @@ function install_packages() { xenstore-utils libxenstore3.0 \ ipvsadm conntrackd libnetfilter-conntrack3 \ keepalived irqbalance \ - ipcalc \ openjdk-11-jre-headless \ - ipset \ + ipcalc ipset \ iptables-persistent \ libtcnative-1 libssl-dev libapr1-dev \ - python-flask \ haproxy \ haveged \ radvd \ @@ -72,6 +71,10 @@ function install_packages() { strongswan libcharon-extra-plugins libstrongswan-extra-plugins strongswan-charon strongswan-starter \ virt-what open-vm-tools qemu-guest-agent hyperv-daemons + # python2-netaddr workaround + wget https://github.com/shapeblue/cloudstack-nonoss/raw/main/python-netaddr_0.7.19-1_all.deb + dpkg -i python-netaddr_0.7.19-1_all.deb + apt-get -y autoremove --purge apt-get clean apt-get autoclean From bc6ef8dd9e3777eccad76200ac75ce9d3960a98c Mon Sep 17 00:00:00 2001 From: Rohit Yadav Date: Mon, 16 Aug 2021 15:48:04 +0530 Subject: [PATCH 094/117] fix strongswan starter systemd Signed-off-by: Rohit Yadav --- .../systemvmtemplate/scripts/configure_systemvm_services.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/appliance/systemvmtemplate/scripts/configure_systemvm_services.sh b/tools/appliance/systemvmtemplate/scripts/configure_systemvm_services.sh index 516085705afa..fa9e38a0e8ce 100644 --- a/tools/appliance/systemvmtemplate/scripts/configure_systemvm_services.sh +++ b/tools/appliance/systemvmtemplate/scripts/configure_systemvm_services.sh @@ -65,7 +65,7 @@ function install_cloud_scripts() { /etc/profile.d/cloud.sh /etc/cron.daily/* /etc/cron.hourly/* chmod +x /root/health_checks/* - chmod -x /etc/systemd/system/* + chmod -x /etc/systemd/system/* || true systemctl daemon-reload systemctl enable cloud-early-config @@ -108,7 +108,7 @@ function configure_services() { systemctl disable haproxy systemctl disable keepalived systemctl disable radvd - systemctl disable strongswan + systemctl disable strongswan-starter systemctl disable x11-common systemctl disable xl2tpd systemctl disable vgauth From 06381a36d4edadd61a5c8c5fdb5e6ae972f4f572 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Tue, 17 Aug 2021 23:31:23 +0530 Subject: [PATCH 095/117] address comments - part1 --- engine/schema/pom.xml | 47 +++++- .../upgrade/SystemVmTemplateRegistration.java | 146 ++++++++---------- pom.xml | 1 + systemvm/debian/opt/cloud/bin/setup/common.sh | 5 +- tools/travis/before_install.sh | 4 +- 5 files changed, 112 insertions(+), 91 deletions(-) diff --git a/engine/schema/pom.xml b/engine/schema/pom.xml index ff371ae08a8e..dde49e583cfb 100644 --- a/engine/schema/pom.xml +++ b/engine/schema/pom.xml @@ -60,6 +60,37 @@ + + org.codehaus.gmavenplus + gmavenplus-plugin + ${cs.gmaven.version} + + + generate-sources + + execute + + + + + true + + log.info "$project.version" + def projectVersion = project.version + String[] versionParts = projectVersion.split("\\.") + project.properties['cs.version'] = versionParts[0] + "." + versionParts[1] + project.properties['patch.version'] = versionParts[2] + + + + + org.codehaus.groovy + groovy-all + ${cs.groovy.version} + runtime + + + com.googlecode.maven-download-plugin download-maven-plugin @@ -71,8 +102,8 @@ wget - - http://10.0.3.122/systemvmtemplate/custom/cks-debian/4.16/md5sum.txt + http://10.0.3.122/systemvmtemplate/custom/cks-debian/${cs.version}/md5sum.txt + ${basedir}/dist/systemvm-templates/ @@ -123,8 +154,8 @@ wget - - http://10.0.3.122/systemvmtemplate/custom/cks-debian/4.16/systemvmtemplate-4.16.0-kvm.qcow2.bz2 + http://10.0.3.122/systemvmtemplate/custom/cks-debian/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-kvm.qcow2.bz2 + ${basedir}/dist/systemvm-templates/ @@ -134,8 +165,8 @@ wget - - http://10.0.3.122/systemvmtemplate/custom/cks-debian/4.16/systemvmtemplate-4.16.0-vmware.ova + http://10.0.3.122/systemvmtemplate/custom/cks-debian/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-vmware.ova + ${basedir}/dist/systemvm-templates/ @@ -145,8 +176,8 @@ wget - - http://10.0.3.122/systemvmtemplate/custom/cks-debian/4.16/systemvmtemplate-4.16.0-xen.vhd.bz2 + http://10.0.3.122/systemvmtemplate/custom/cks-debian/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-xen.vhd.bz2 + ${basedir}/dist/systemvm-templates/ diff --git a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java index d479d16f24b9..1e4454deea84 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java @@ -17,6 +17,7 @@ package com.cloud.upgrade; import com.cloud.hypervisor.Hypervisor; +import com.cloud.storage.Storage.ImageFormat; import com.cloud.utils.DateUtil; import com.cloud.utils.Pair; import com.cloud.utils.db.GlobalLock; @@ -60,22 +61,22 @@ public class SystemVmTemplateRegistration { private static final String TEMPORARY_SECONDARY_STORE = "/tmp/tmpSecStorage"; private static final String PARENT_TEMPLATE_FOLDER = TEMPORARY_SECONDARY_STORE; private static final String PARTIAL_TEMPLATE_FOLDER = "/template/tmpl/1/"; - private static final String FETCH_FOLDER_NAME = "SELECT id FROM vm_template ORDER BY id DESC LIMIT 1;"; // TODO: filter out only zones with NFS based 'Image' stores - to rule out image cache scenario - private static final String FETCH_DISTINCT_ELIGIBLE_ZONES = "SELECT DISTINCT(data_center_id) FROM `cloud`.`image_store` WHERE protocol = \"nfs\" AND removed is null"; - private static final String FETCH_DISTINCT_HYPERVISORS_IN_ZONE = "SELECT DISTINCT(hypervisor_type) FROM `cloud`.`cluster` where removed is null and data_center_id=?"; + private static final String FETCH_DISTINCT_ELIGIBLE_ZONES = "SELECT DISTINCT(data_center_id) FROM `cloud`.`image_store` WHERE protocol = \"nfs\" AND role = \"Image\" AND removed is null"; + private static final String FETCH_DISTINCT_HYPERVISORS_IN_ZONE = "SELECT DISTINCT(hypervisor_type) FROM `cloud`.`cluster` where data_center_id=? AND role = \"Image\" AND image_provider_name = \"NFS\" AND removed is null"; private static final String FETCH_IMAGE_STORE_PER_ZONE = "SELECT url,id FROM `cloud`.`image_store` WHERE data_center_id=? AND removed IS NULL LIMIT 1"; private static final String INSERT_VM_TEMPLATE_TABLE = "INSERT INTO `cloud`.`vm_template` (uuid, unique_name, name, public, featured, created, type, hvm, bits, account_id, url, checksum, enable_password, display_text, format, guest_os_id, cross_zones, hypervisor_type, state, deploy_as_is)" + "VALUES (?, ?, ?, 0, 0, ?, 'SYSTEM', 0, 64, 1, ?, ?, 0, ?, ?, ?, 1, ?, 'Inactive', ?)"; private static final String INSERT_TEMPLATE_STORE_REF_TABLE = "INSERT INTO `cloud`.`template_store_ref` (store_id, template_id, created, last_updated, job_id, download_pct, download_state, error_str, local_path, install_path, url, state, destroyed, is_copy," + " update_count, ref_cnt, store_role) VALUES (?, ?, ?, ?, NULL, 0, 'NOT_DOWNLOADED', NULL, NULL, ?, ?, 'Allocated', 0, 0, 0, 0, 'Image')"; - private static final String UPDATE_TEMPLATE_STORE_REF_TABLE = "UPDATE template_store_ref SET download_pct=100, download_state='DOWNLOADED', " + + private static final String UPDATE_TEMPLATE_STORE_REF_TABLE = "UPDATE `cloud`.`template_store_ref` SET download_pct=100, download_state='DOWNLOADED', " + "state='Ready', size=?, physical_size=?, last_updated=?, updated=? where template_id=?"; - private static final String UPDATE_VM_TEMPLATE_ENTRY = "UPDATE vm_template set size = ?, state = 'Active' where id = ?"; + private static final String UPDATE_VM_TEMPLATE_ENTRY = "UPDATE `cloud`.`vm_template` set size = ?, state = 'Active' where id = ?"; private static final String UPDATE_CONFIGURATION_TABLE = "UPDATE `cloud`.`configuration` SET value = ? WHERE name = ?"; - private static final String UPDATE_TEMPLATE_TABLE_ON_FAILURE = "UPDATE vm_template set removed = ?, state = 'Inactive' where id = ?"; - private static final String DELETE_TEMPLATE_REF_RECORD_ON_FAILURE = "DELETE from template_store_ref where template_id = ?"; + private static final String UPDATE_TEMPLATE_TABLE_ON_FAILURE = "UPDATE `cloud`.`vm_template` set removed = ?, state = 'Inactive' where id = ?"; + private static final String DELETE_TEMPLATE_REF_RECORD_ON_FAILURE = "DELETE from `cloud`.`template_store_ref` where template_id = ?"; private static final Integer SCRIPT_TIMEOUT = 1800000; + private static final Integer LOCK_WAIT_TIMEOUT = 1200; public static String CS_MAJOR_VERSION = "4.16"; public static String CS_TINY_VERSION = "0"; @@ -87,7 +88,7 @@ private static class SystemVMTemplateDetails { Date created; String url; String checksum; - TemplateFormat format; + ImageFormat format; Integer guestOsId; Hypervisor.HypervisorType hypervisorType; Long storeId; @@ -101,7 +102,7 @@ private static class SystemVMTemplateDetails { } SystemVMTemplateDetails(String uuid, String name, Date created, String url, String checksum, - TemplateFormat format, Integer guestOsId, Hypervisor.HypervisorType hypervisorType, + ImageFormat format, Integer guestOsId, Hypervisor.HypervisorType hypervisorType, Long storeId) { this.uuid = uuid; this.name = name; @@ -142,7 +143,7 @@ public String getChecksum() { return checksum; } - public TemplateFormat getFormat() { + public ImageFormat getFormat() { return format; } @@ -242,36 +243,23 @@ public void setUpdated(Date updated) { } }; - public static enum TemplateFormat{ - QCOW2("qcow2"), - RAW("raw"), - VHD("vhd"), - OVA("ova"); - - private final String fileExtension; - - TemplateFormat(String fileExtension) { - this.fileExtension = fileExtension; - } - } - - public static final Map hypervisorImageFormat = new HashMap() { + public static final Map hypervisorImageFormat = new HashMap() { { - put(Hypervisor.HypervisorType.KVM, TemplateFormat.QCOW2); - put(Hypervisor.HypervisorType.XenServer, TemplateFormat.VHD); - put(Hypervisor.HypervisorType.VMware, TemplateFormat.OVA); - put(Hypervisor.HypervisorType.Hyperv, TemplateFormat.VHD); - put(Hypervisor.HypervisorType.LXC, TemplateFormat.QCOW2); - put(Hypervisor.HypervisorType.Ovm3, TemplateFormat.RAW); + put(Hypervisor.HypervisorType.KVM, ImageFormat.QCOW2); + put(Hypervisor.HypervisorType.XenServer, ImageFormat.VHD); + put(Hypervisor.HypervisorType.VMware, ImageFormat.OVA); + put(Hypervisor.HypervisorType.Hyperv, ImageFormat.VHD); + put(Hypervisor.HypervisorType.LXC, ImageFormat.QCOW2); + put(Hypervisor.HypervisorType.Ovm3, ImageFormat.RAW); } }; public static boolean validateIfSeeded(String url, String path) { try { mountStore(url); - int lastIdx = path.lastIndexOf('/'); + int lastIdx = path.lastIndexOf(File.separator); String partialDirPath = path.substring(0, lastIdx); - String templatePath = TEMPORARY_SECONDARY_STORE + "/" + partialDirPath; + String templatePath = TEMPORARY_SECONDARY_STORE + File.separator + partialDirPath; File templateProps = new File(templatePath + "/template.properties"); if (templateProps.exists()) { LOGGER.info("SystemVM template already seeded, skipping registration"); @@ -307,9 +295,9 @@ private static String calculateChecksum(MessageDigest digest, File file) { } return sb.toString(); } catch (IOException e) { - String errMsg = String.format("Failed to calculate Checksum of template file: %s due to: %s ", file.getName(), e); - LOGGER.error(errMsg); - throw new CloudRuntimeException(errMsg); + String errMsg = String.format("Failed to calculate Checksum of template file: %s ", file.getName()); + LOGGER.error(errMsg, e); + throw new CloudRuntimeException(errMsg, e); } } @@ -324,9 +312,9 @@ public static long isTemplateAlreadyRegistered(Connection conn, Pair getEligibleZoneIds(Connection conn) { zones.add(rs.getLong(1)); } } catch (SQLException e) { - String errMsg = "Failed to fetch eligible zones for SystemVM template registration due to: %s"; - LOGGER.error(String.format(errMsg, e.getMessage())); - throw new CloudRuntimeException("Failed to fetch eligible zones for SystemVM template registration"); + String errMsg = "Failed to fetch eligible zones for SystemVM template registration"; + LOGGER.error(errMsg, e); + throw new CloudRuntimeException(errMsg, e); } return zones; } @@ -409,8 +397,8 @@ private static Pair getNfsStoreInZone(Connection conn, Long zoneId } } catch (SQLException e) { String errMsg = String.format("Failed to fetch NFS store in zone = %s for SystemVM template registration", zoneId); - LOGGER.error(errMsg + String.format("due to: %s", e.getMessage())); - throw new CloudRuntimeException(errMsg); + LOGGER.error(errMsg, e); + throw new CloudRuntimeException(errMsg, e); } return new Pair<>(url, storeId); } @@ -428,8 +416,8 @@ public static void mountStore(String storeUrl) { } } catch (Exception e) { String msg = "NFS Store URL is not in the correct format"; - LOGGER.error(msg); - throw new CloudRuntimeException(msg); + LOGGER.error(msg, e); + throw new CloudRuntimeException(msg, e); } } @@ -447,8 +435,8 @@ private static List fetchAllHypervisors(Connection conn, Long zoneId) { } } catch (SQLException e) { String errMsg = String.format("Failed to fetch distinct hypervisors in zone: %s for SystemVM template registration", zoneId); - LOGGER.error(errMsg + String.format("due to: %s", e.getMessage())); - throw new CloudRuntimeException(errMsg); + LOGGER.error(errMsg, e); + throw new CloudRuntimeException(errMsg, e); } return hypervisorList; } @@ -478,13 +466,13 @@ private static Long createTemplateObjectInDB(Connection conn, SystemVMTemplateDe id = rs.getLong(1); } } catch (final SQLException e) { - String errMsg = String.format("Failed to fetch template id %s", e.getMessage()); - LOGGER.error(errMsg); - throw new CloudRuntimeException(errMsg); + String errMsg = "Failed to fetch template id "; + LOGGER.error(errMsg, e); + throw new CloudRuntimeException(errMsg, e); } } } catch (Exception e) { - throw new CloudRuntimeException(String.format("Failed to create vm_template record for the systemVM template for hypervisor: %s, due to: %s", details.getHypervisorType().name(), e.getMessage())); + throw new CloudRuntimeException(String.format("Failed to create vm_template record for the systemVM template for hypervisor: %s", details.getHypervisorType().name()), e); } return id; } @@ -503,7 +491,7 @@ private static void createTemplateStoreRefEntry(Connection conn, SystemVMTemplat pstmt.executeUpdate(); } } catch (Exception e) { - throw new CloudRuntimeException(String.format("Failed to create template_store_ref record for the systemVM template for hypervisor: %s, due to: %s", details.getHypervisorType().name(), e.getMessage())); + throw new CloudRuntimeException(String.format("Failed to create template_store_ref record for the systemVM template for hypervisor: %s", details.getHypervisorType().name()), e); } } @@ -527,7 +515,7 @@ public static void updateDb(Connection conn, SystemVMTemplateDetails details) { pstmt.executeUpdate(); } } catch (Exception e) { - throw new CloudRuntimeException(String.format("Failed to update template_store_ref record for the systemVM template registered for hypervisor: %s, due to: %s", details.getHypervisorType().name(), e.getMessage())); + throw new CloudRuntimeException(String.format("Failed to update template_store_ref record for the systemVM template registered for hypervisor: %s", details.getHypervisorType().name()), e); } } @@ -540,10 +528,9 @@ public static void updateSystemVMEntries(Connection conn, Long templateId, Pair< update_templ_id_pstmt.setString(2, hypervisorAndTemplateName.first().toString()); update_templ_id_pstmt.executeUpdate(); } catch (SQLException e) { - String errMsg = String.format("updateSystemVmTemplates:Exception while setting template for %s to %s : %s",hypervisorAndTemplateName.first().toString(), templateId, - e.getMessage()); - LOGGER.error(errMsg); - throw new CloudRuntimeException(errMsg); + String errMsg = String.format("updateSystemVmTemplates:Exception while setting template for %s to %s",hypervisorAndTemplateName.first().toString(), templateId); + LOGGER.error(errMsg, e); + throw new CloudRuntimeException(errMsg, e); } } @@ -561,9 +548,9 @@ public static void updateConfigurationParams(Connection conn, Map hypervisorAndTemplateName, Pair storeUrlAndId) { @@ -639,7 +626,7 @@ public static void registerTemplate(Connection conn, Pair1.4.15 5.3.3 0.5.4 + 1.12.0 diff --git a/systemvm/debian/opt/cloud/bin/setup/common.sh b/systemvm/debian/opt/cloud/bin/setup/common.sh index 8a4de1a8f8f8..2a82b2b97489 100755 --- a/systemvm/debian/opt/cloud/bin/setup/common.sh +++ b/systemvm/debian/opt/cloud/bin/setup/common.sh @@ -759,8 +759,9 @@ parse_cmd_line() { ntpserverlist) export NTP_SERVER_LIST=$VALUE ;; - authorized_key) - export AUTHORIZED_KEYS=$VALUE + authorized_key) + export AUTHORIZED_KEYS=$VALUE + ;; esac done echo -e "\n\t}\n}" >> ${CHEF_TMP_FILE} diff --git a/tools/travis/before_install.sh b/tools/travis/before_install.sh index c8aecbb857ec..0bb5c112bd6f 100755 --- a/tools/travis/before_install.sh +++ b/tools/travis/before_install.sh @@ -125,8 +125,8 @@ echo -e "\nInstalling some python packages: " for ((i=0;i<$RETRY_COUNT;i++)) do - python3 -m pip install --user --upgrade urllib3 lxml paramiko nose texttable ipmisim pyopenssl pycrypto mock flask netaddr pylint pycodestyle six astroid configparser > /tmp/piplog - python2 -m pip install --user --upgrade urllib3 lxml paramiko nose texttable ipmisim pyopenssl pycrypto mock flask netaddr pylint pycodestyle six astroid configparser >> /tmp/piplog + python3 -m pip install --user --upgrade urllib3 lxml paramiko nose texttable ipmisim pyopenssl pycrypto mock flask netaddr pylint pycodestyle six astroid > /tmp/piplog + python2 -m pip install --user --upgrade urllib3 lxml paramiko nose texttable ipmisim pyopenssl pycrypto mock flask netaddr pylint pycodestyle six astroid >> /tmp/piplog if [[ $? -eq 0 ]]; then echo -e "\npython packages installed successfully" break; From c91c39652d301c9bfc9dfd227531e3b262d87218 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Wed, 18 Aug 2021 00:30:47 +0530 Subject: [PATCH 096/117] test updates + install latest packages --- .../upgrade/SystemVmTemplateRegistration.java | 12 +++---- .../smoke/test_kubernetes_clusters.py | 36 +++++++++---------- .../scripts/install_systemvm_packages.sh | 2 +- 3 files changed, 25 insertions(+), 25 deletions(-) diff --git a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java index 1e4454deea84..e37ca07b0851 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java @@ -20,6 +20,7 @@ import com.cloud.storage.Storage.ImageFormat; import com.cloud.utils.DateUtil; import com.cloud.utils.Pair; +import com.cloud.utils.UriUtils; import com.cloud.utils.db.GlobalLock; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.script.Script; @@ -32,6 +33,7 @@ import java.io.FileInputStream; import java.io.FileReader; import java.io.IOException; +import java.net.URI; import java.security.MessageDigest; import java.sql.Connection; import java.sql.Date; @@ -61,7 +63,6 @@ public class SystemVmTemplateRegistration { private static final String TEMPORARY_SECONDARY_STORE = "/tmp/tmpSecStorage"; private static final String PARENT_TEMPLATE_FOLDER = TEMPORARY_SECONDARY_STORE; private static final String PARTIAL_TEMPLATE_FOLDER = "/template/tmpl/1/"; - // TODO: filter out only zones with NFS based 'Image' stores - to rule out image cache scenario private static final String FETCH_DISTINCT_ELIGIBLE_ZONES = "SELECT DISTINCT(data_center_id) FROM `cloud`.`image_store` WHERE protocol = \"nfs\" AND role = \"Image\" AND removed is null"; private static final String FETCH_DISTINCT_HYPERVISORS_IN_ZONE = "SELECT DISTINCT(hypervisor_type) FROM `cloud`.`cluster` where data_center_id=? AND role = \"Image\" AND image_provider_name = \"NFS\" AND removed is null"; private static final String FETCH_IMAGE_STORE_PER_ZONE = "SELECT url,id FROM `cloud`.`image_store` WHERE data_center_id=? AND removed IS NULL LIMIT 1"; @@ -268,7 +269,7 @@ public static boolean validateIfSeeded(String url, String path) { LOGGER.info("SystemVM template not seeded"); return false; } catch (Exception e) { - throw new CloudRuntimeException("Failed to verify if the template is seeded"); + throw new CloudRuntimeException("Failed to verify if the template is seeded", e); } finally { unmountStore(); } @@ -406,10 +407,9 @@ private static Pair getNfsStoreInZone(Connection conn, Long zoneId public static void mountStore(String storeUrl) { try { if (storeUrl != null) { - String path = storeUrl.split("://")[1]; - int index = path.indexOf('/'); - String host = path.substring(0, index); - String mountPath = path.substring(index); + URI uri = new URI(UriUtils.encodeURIComponent(storeUrl)); + String host = uri.getHost(); + String mountPath = uri.getPath(); Script.runSimpleBashScript("mkdir -p " + TEMPORARY_SECONDARY_STORE); String mount = String.format(mountCommand, host + ":" + mountPath, TEMPORARY_SECONDARY_STORE); Script.runSimpleBashScript(mount); diff --git a/test/integration/smoke/test_kubernetes_clusters.py b/test/integration/smoke/test_kubernetes_clusters.py index 75012ade8ca4..0e0ee675a9ed 100644 --- a/test/integration/smoke/test_kubernetes_clusters.py +++ b/test/integration/smoke/test_kubernetes_clusters.py @@ -93,20 +93,20 @@ def setUpClass(cls): if cls.setup_failed == False: try: - cls.kubernetes_version_1_16_0 = cls.addKubernetesSupportedVersion(cls.services["cks_kubernetes_versions"]["1.16.0"]) - cls.kubernetes_version_ids.append(cls.kubernetes_version_1_16_0.id) + cls.kubernetes_version_1_20_9 = cls.addKubernetesSupportedVersion(cls.services["cks_kubernetes_versions"]["1.20.9"]) + cls.kubernetes_version_ids.append(cls.kubernetes_version_1_20_9.id) except Exception as e: cls.setup_failed = True cls.debug("Failed to get Kubernetes version ISO in ready state, version=%s, url=%s, %s" % - (cls.services["cks_kubernetes_versions"]["1.16.0"]["semanticversion"], cls.services["cks_kubernetes_versions"]["1.16.0"]["url"], e)) + (cls.services["cks_kubernetes_versions"]["1.20.9"]["semanticversion"], cls.services["cks_kubernetes_versions"]["1.20.9"]["url"], e)) if cls.setup_failed == False: try: - cls.kubernetes_version_1_16_3 = cls.addKubernetesSupportedVersion(cls.services["cks_kubernetes_versions"]["1.16.3"]) - cls.kubernetes_version_ids.append(cls.kubernetes_version_1_16_3.id) + cls.kubernetes_version_1_21_3 = cls.addKubernetesSupportedVersion(cls.services["cks_kubernetes_versions"]["1.21.3"]) + cls.kubernetes_version_ids.append(cls.kubernetes_version_1_21_3.id) except Exception as e: cls.setup_failed = True cls.debug("Failed to get Kubernetes version ISO in ready state, version=%s, url=%s, %s" % - (cls.services["cks_kubernetes_versions"]["1.16.3"]["semanticversion"], cls.services["cks_kubernetes_versions"]["1.16.3"]["url"], e)) + (cls.services["cks_kubernetes_versions"]["1.21.3"]["semanticversion"], cls.services["cks_kubernetes_versions"]["1.21.3"]["url"], e)) if cls.setup_failed == False: cks_offering_data = cls.services["cks_service_offering"] @@ -349,20 +349,20 @@ def test_01_invalid_upgrade_kubernetes_cluster(self): if self.setup_failed == True: self.fail("Setup incomplete") global k8s_cluster - k8s_cluster = self.getValidKubernetesCluster(version=self.kubernetes_version_1_16_3) + k8s_cluster = self.getValidKubernetesCluster(version=self.kubernetes_version_1_21_3) self.debug("Downgrading Kubernetes cluster with ID: %s to a lower version. This should fail!" % k8s_cluster.id) try: - k8s_cluster = self.upgradeKubernetesCluster(k8s_cluster.id, self.kubernetes_version_1_16_0.id) - self.debug("Invalid CKS Kubernetes HA cluster deployed with ID: %s. Deleting it and failing test." % self.kubernetes_version_1_16_0.id) + k8s_cluster = self.upgradeKubernetesCluster(k8s_cluster.id, self.kubernetes_version_1_20_9.id) + self.debug("Invalid CKS Kubernetes HA cluster deployed with ID: %s. Deleting it and failing test." % self.kubernetes_version_1_20_9.id) self.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True) self.fail("Kubernetes cluster downgrade to a lower Kubernetes supported version. Must be an error.") except Exception as e: self.debug("Upgrading Kubernetes cluster with invalid Kubernetes supported version check successful, API failure: %s" % e) self.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True) - self.verifyKubernetesClusterUpgrade(k8s_cluster, self.kubernetes_version_1_16_3.id) + self.verifyKubernetesClusterUpgrade(k8s_cluster, self.kubernetes_version_1_21_3.id) return @attr(tags=["advanced", "smoke"], required_hardware="true") @@ -376,17 +376,17 @@ def test_02_upgrade_kubernetes_cluster(self): if self.setup_failed == True: self.fail("Setup incomplete") global k8s_cluster - k8s_cluster = self.getValidKubernetesCluster(version=self.kubernetes_version_1_16_0) + k8s_cluster = self.getValidKubernetesCluster(version=self.kubernetes_version_1_20_9) time.sleep(self.services["sleep"]) self.debug("Upgrading Kubernetes cluster with ID: %s" % k8s_cluster.id) try: - k8s_cluster = self.upgradeKubernetesCluster(k8s_cluster.id, self.kubernetes_version_1_16_3.id) + k8s_cluster = self.upgradeKubernetesCluster(k8s_cluster.id, self.kubernetes_version_1_21_3.id) except Exception as e: self.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True) self.fail("Failed to upgrade Kubernetes cluster due to: %s" % e) - self.verifyKubernetesClusterUpgrade(k8s_cluster, self.kubernetes_version_1_16_3.id) + self.verifyKubernetesClusterUpgrade(k8s_cluster, self.kubernetes_version_1_21_3.id) return @attr(tags=["advanced", "smoke"], required_hardware="true") @@ -434,7 +434,7 @@ def test_04_autoscale_kubernetes_cluster(self): if self.setup_failed == True: self.fail("Setup incomplete") global k8s_cluster - k8s_cluster = self.getValidKubernetesCluster(version=self.kubernetes_version_1_16_3) + k8s_cluster = self.getValidKubernetesCluster(version=self.kubernetes_version_1_21_3) self.debug("Autoscaling Kubernetes cluster with ID: %s" % k8s_cluster.id) try: @@ -535,12 +535,12 @@ def test_08_upgrade_kubernetes_ha_cluster(self): self.debug("Upgrading HA Kubernetes cluster with ID: %s" % k8s_cluster.id) try: - k8s_cluster = self.upgradeKubernetesCluster(k8s_cluster.id, self.kubernetes_version_1_16_3.id) + k8s_cluster = self.upgradeKubernetesCluster(k8s_cluster.id, self.kubernetes_version_1_21_3.id) except Exception as e: self.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True) self.fail("Failed to upgrade Kubernetes HA cluster due to: %s" % e) - self.verifyKubernetesClusterUpgrade(k8s_cluster, self.kubernetes_version_1_16_3.id) + self.verifyKubernetesClusterUpgrade(k8s_cluster, self.kubernetes_version_1_21_3.id) self.debug("Kubernetes cluster with ID: %s successfully upgraded" % k8s_cluster.id) return @@ -641,7 +641,7 @@ def getValidKubernetesCluster(self, size=1, control_nodes=1, version={}): # Does a cluster already exist ? if cluster == None or cluster.id == None: if not version: - version = self.kubernetes_version_1_16_0 + version = self.kubernetes_version_1_20_9 self.debug("No existing cluster available, k8s_cluster: %s" % cluster) return self.createNewKubernetesCluster(version, size, control_nodes) @@ -651,7 +651,7 @@ def getValidKubernetesCluster(self, size=1, control_nodes=1, version={}): # Check the version only if specified valid = valid and cluster.kubernetesversionid == version.id else: - version = self.kubernetes_version_1_16_0 + version = self.kubernetes_version_1_20_9 if valid: cluster_id = cluster.id diff --git a/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh b/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh index c8d3e27d58fe..cc8be48363e3 100644 --- a/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh +++ b/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh @@ -105,7 +105,7 @@ function install_packages() { $(lsb_release -cs) \ stable" apt-get update - ${apt_get} install docker-ce=5:19.03.10~3-0~debian-buster docker-ce-cli=5:19.03.10~3-0~debian-buster containerd.io=1.3.7-1 + ${apt_get} install docker-ce docker-ce-cli containerd.io fi apt_clean From c3210fe7fb00dd339888c457f3e4adf14f5a4146 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Wed, 18 Aug 2021 15:31:08 +0530 Subject: [PATCH 097/117] intensify checks and queries + update authorised_keys decode logic --- .../com/cloud/upgrade/SystemVmTemplateRegistration.java | 9 ++++++--- systemvm/debian/opt/cloud/bin/setup/common.sh | 2 +- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java index e37ca07b0851..5d1437a37ce6 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java @@ -64,8 +64,8 @@ public class SystemVmTemplateRegistration { private static final String PARENT_TEMPLATE_FOLDER = TEMPORARY_SECONDARY_STORE; private static final String PARTIAL_TEMPLATE_FOLDER = "/template/tmpl/1/"; private static final String FETCH_DISTINCT_ELIGIBLE_ZONES = "SELECT DISTINCT(data_center_id) FROM `cloud`.`image_store` WHERE protocol = \"nfs\" AND role = \"Image\" AND removed is null"; - private static final String FETCH_DISTINCT_HYPERVISORS_IN_ZONE = "SELECT DISTINCT(hypervisor_type) FROM `cloud`.`cluster` where data_center_id=? AND role = \"Image\" AND image_provider_name = \"NFS\" AND removed is null"; - private static final String FETCH_IMAGE_STORE_PER_ZONE = "SELECT url,id FROM `cloud`.`image_store` WHERE data_center_id=? AND removed IS NULL LIMIT 1"; + private static final String FETCH_DISTINCT_HYPERVISORS_IN_ZONE = "SELECT DISTINCT(hypervisor_type) FROM `cloud`.`cluster` where removed is null AND data_center_id=?"; + private static final String FETCH_IMAGE_STORE_PER_ZONE = "SELECT url,id FROM `cloud`.`image_store` WHERE data_center_id=? AND role = \"Image\" AND image_provider_name = \"NFS\" AND removed IS NULL LIMIT 1"; private static final String INSERT_VM_TEMPLATE_TABLE = "INSERT INTO `cloud`.`vm_template` (uuid, unique_name, name, public, featured, created, type, hvm, bits, account_id, url, checksum, enable_password, display_text, format, guest_os_id, cross_zones, hypervisor_type, state, deploy_as_is)" + "VALUES (?, ?, ?, 0, 0, ?, 'SYSTEM', 0, 64, 1, ?, ?, 0, ?, ?, ?, 1, ?, 'Inactive', ?)"; private static final String INSERT_TEMPLATE_STORE_REF_TABLE = "INSERT INTO `cloud`.`template_store_ref` (store_id, template_id, created, last_updated, job_id, download_pct, download_state, error_str, local_path, install_path, url, state, destroyed, is_copy," + @@ -401,6 +401,9 @@ private static Pair getNfsStoreInZone(Connection conn, Long zoneId LOGGER.error(errMsg, e); throw new CloudRuntimeException(errMsg, e); } + if (url == null || storeId == null) { + throw new CloudRuntimeException(String.format("Failed to get an NFS store in zone: %s", zoneId)); + } return new Pair<>(url, storeId); } @@ -614,7 +617,6 @@ public static void registerTemplate(Connection conn, Pair zoneIds = getEligibleZoneIds(conn); for (Long zoneId : zoneIds) { Pair storeUrlAndId = getNfsStoreInZone(conn, zoneId); + mountStore(storeUrlAndId.first()); List hypervisorList = fetchAllHypervisors(conn, zoneId); for (String hypervisor : hypervisorList) { Hypervisor.HypervisorType name = Hypervisor.HypervisorType.getType(hypervisor); diff --git a/systemvm/debian/opt/cloud/bin/setup/common.sh b/systemvm/debian/opt/cloud/bin/setup/common.sh index 2a82b2b97489..60b88754bee3 100755 --- a/systemvm/debian/opt/cloud/bin/setup/common.sh +++ b/systemvm/debian/opt/cloud/bin/setup/common.sh @@ -775,7 +775,7 @@ parse_cmd_line() { if [ ! -z "$AUTHORIZED_KEYS" ] then echo "$AUTHORIZED_KEYS" > $TMP_KEY_PATH - base64Val=`base64 -d $TMP_KEY_PATH` + base64Val=$(base64 -d $TMP_KEY_PATH) echo "$base64Val" > $AUTHORIZED_KEYS_PATH chmod go-rwx $AUTHORIZED_KEYS_PATH rm -rf $TMP_KEY_PATH From 1bff8ea1f4addd93e2f657b40a75fcf55625fc0f Mon Sep 17 00:00:00 2001 From: Rohit Yadav Date: Wed, 18 Aug 2021 17:29:12 +0530 Subject: [PATCH 098/117] fix issues with merge conflict Signed-off-by: Rohit Yadav --- .../scripts/configure_systemvm_services.sh | 2 +- .../scripts/install_systemvm_packages.sh | 29 ++++++------------- 2 files changed, 10 insertions(+), 21 deletions(-) diff --git a/tools/appliance/systemvmtemplate/scripts/configure_systemvm_services.sh b/tools/appliance/systemvmtemplate/scripts/configure_systemvm_services.sh index cf8e4f96e6ed..db3eec5cb40c 100644 --- a/tools/appliance/systemvmtemplate/scripts/configure_systemvm_services.sh +++ b/tools/appliance/systemvmtemplate/scripts/configure_systemvm_services.sh @@ -41,7 +41,7 @@ function configure_issue() { __?.o/ Apache CloudStack SystemVM $CLOUDSTACK_RELEASE ( )# https://cloudstack.apache.org - (___(_) Debian GNU/Linux 10 \n \l + (___(_) Debian GNU/Linux 11 \n \l EOF } diff --git a/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh b/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh index cb4519e1282d..555a00c28edb 100644 --- a/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh +++ b/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh @@ -84,33 +84,22 @@ function install_packages() { wget https://github.com/shapeblue/cloudstack-nonoss/raw/main/python-netaddr_0.7.19-1_all.deb dpkg -i python-netaddr_0.7.19-1_all.deb - # python2-netaddr workaround - wget https://github.com/shapeblue/cloudstack-nonoss/raw/main/python-netaddr_0.7.19-1_all.deb - dpkg -i python-netaddr_0.7.19-1_all.deb - - apt-get -y autoremove --purge - apt-get clean - apt-get autoclean - apt_clean - ${apt_get} install links - - curl -fsSL https://download.docker.com/linux/debian/gpg | sudo apt-key add - - apt-key fingerprint 0EBFCD88 - #32 bit architecture support for vhd-util: not required for 32 bit template + # 32 bit architecture support for vhd-util if [ "${arch}" != "i386" ]; then dpkg --add-architecture i386 apt-get update ${apt_get} install libuuid1:i386 libc6:i386 - - add-apt-repository \ - "deb [arch=amd64] https://download.docker.com/linux/debian \ - $(lsb_release -cs) \ - stable" - apt-get update - ${apt_get} install docker-ce docker-ce-cli containerd.io fi + + # Install docker and containerd for CKS + curl -fsSL https://download.docker.com/linux/debian/gpg | sudo apt-key add - + apt-key fingerprint 0EBFCD88 + add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/debian $(lsb_release -cs) stable" + apt-get update + ${apt_get} install docker-ce docker-ce-cli containerd.io + apt_clean install_vhd_util From bcef568af68b4ade02a9cf562bb01d08977c47b1 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Wed, 18 Aug 2021 18:19:34 +0530 Subject: [PATCH 099/117] code refactor --- .../java/com/cloud/upgrade/SystemVmTemplateRegistration.java | 2 +- server/src/main/java/com/cloud/storage/StorageManagerImpl.java | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java index 5d1437a37ce6..9c6f8ace6234 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java @@ -619,7 +619,7 @@ public static void registerTemplate(Connection conn, Pair Date: Wed, 18 Aug 2021 19:09:49 +0530 Subject: [PATCH 100/117] use the correct urls for downloading / referencing systemvm templates --- engine/schema/pom.xml | 12 ++++-------- engine/schema/templateConfig.sh | 20 ++++++-------------- 2 files changed, 10 insertions(+), 22 deletions(-) diff --git a/engine/schema/pom.xml b/engine/schema/pom.xml index dde49e583cfb..ebd8f64b760d 100644 --- a/engine/schema/pom.xml +++ b/engine/schema/pom.xml @@ -102,8 +102,7 @@ wget - http://10.0.3.122/systemvmtemplate/custom/cks-debian/${cs.version}/md5sum.txt - + https://download.cloudstack.org/systemvm/${cs.version}/md5sum.txt ${basedir}/dist/systemvm-templates/ @@ -154,8 +153,7 @@ wget - http://10.0.3.122/systemvmtemplate/custom/cks-debian/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-kvm.qcow2.bz2 - + https://download.cloudstack.org/systemvm/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-kvm.qcow2.bz2 ${basedir}/dist/systemvm-templates/ @@ -165,8 +163,7 @@ wget - http://10.0.3.122/systemvmtemplate/custom/cks-debian/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-vmware.ova - + https://download.cloudstack.org/systemvm/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-vmware.ova ${basedir}/dist/systemvm-templates/ @@ -176,8 +173,7 @@ wget - http://10.0.3.122/systemvmtemplate/custom/cks-debian/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-xen.vhd.bz2 - + https://download.cloudstack.org/systemvm/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-xen.vhd.bz2 ${basedir}/dist/systemvm-templates/ diff --git a/engine/schema/templateConfig.sh b/engine/schema/templateConfig.sh index 668a92e6f6eb..686804435515 100644 --- a/engine/schema/templateConfig.sh +++ b/engine/schema/templateConfig.sh @@ -68,20 +68,12 @@ function createMetadataFile() { declare -A templates getTemplateVersion $1 -# TODO: Update correct url post testing -#templates=( ["kvm"]="https://download.cloudstack.org/systemvm/${CS_VERSION}/systemvmtemplate-${CS_VERSION}.${CS_MINOR_VERSION}-kvm.qcow2.bz2" -# ["vmware"]="https://download.cloudstack.org/systemvm/${CS_VERSION}/systemvmtemplate-${CS_VERSION}.${CS_MINOR_VERSION}-vmware.ova" -# ["xenserver"]="https://download.cloudstack.org/systemvm/$CS_VERSION/systemvmtemplate-$CS_VERSION.$CS_MINOR_VERSION-xen.vhd.bz2" -# ["hyperv"]="https://download.cloudstack.org/systemvm/$CS_VERSION/systemvmtemplate-$CS_VERSION.$CS_MINOR_VERSION-hyperv.vhd.zip" -# ["lxc"]="https://download.cloudstack.org/systemvm/$CS_VERSION/systemvmtemplate-$CS_VERSION.$CS_MINOR_VERSION-kvm.qcow2.bz2" -# ["ovm3"]="https://download.cloudstack.org/systemvm/$CS_VERSION/systemvmtemplate-$CS_VERSION.$CS_MINOR_VERSION-ovm.raw.bz2" ) - -templates=( ["kvm"]="http://10.0.3.122/systemvmtemplate/custom/cks-debian/${CS_VERSION}/systemvmtemplate-${CS_VERSION}.${CS_MINOR_VERSION}-kvm.qcow2.bz2" - ["vmware"]="http://10.0.3.122/systemvmtemplate/custom/cks-debian/${CS_VERSION}/systemvmtemplate-${CS_VERSION}.${CS_MINOR_VERSION}-vmware.ova" - ["xenserver"]="http://10.0.3.122/systemvmtemplate/custom/cks-debian/systemvmtemplate-$CS_VERSION.$CS_MINOR_VERSION-xen.vhd.bz2" - ["hyperv"]="http://10.0.3.122/systemvmtemplate/custom/cks-debian/$CS_VERSION/systemvmtemplate-$CS_VERSION.$CS_MINOR_VERSION-hyperv.vhd.zip" - ["lxc"]="http://10.0.3.122/systemvmtemplate/custom/cks-debian/$CS_VERSION/systemvmtemplate-$CS_VERSION.$CS_MINOR_VERSION-kvm.qcow2.bz2" - ["ovm3"]="http://10.0.3.122/systemvmtemplate/custom/cks-debian/$CS_VERSION/systemvmtemplate-$CS_VERSION.$CS_MINOR_VERSION-ovm.raw.bz2" ) +templates=( ["kvm"]="https://download.cloudstack.org/systemvm/${CS_VERSION}/systemvmtemplate-${CS_VERSION}.${CS_MINOR_VERSION}-kvm.qcow2.bz2" + ["vmware"]="https://download.cloudstack.org/systemvm/${CS_VERSION}/systemvmtemplate-${CS_VERSION}.${CS_MINOR_VERSION}-vmware.ova" + ["xenserver"]="https://download.cloudstack.org/systemvm/$CS_VERSION/systemvmtemplate-$CS_VERSION.$CS_MINOR_VERSION-xen.vhd.bz2" + ["hyperv"]="https://download.cloudstack.org/systemvm/$CS_VERSION/systemvmtemplate-$CS_VERSION.$CS_MINOR_VERSION-hyperv.vhd.zip" + ["lxc"]="https://download.cloudstack.org/systemvm/$CS_VERSION/systemvmtemplate-$CS_VERSION.$CS_MINOR_VERSION-kvm.qcow2.bz2" + ["ovm3"]="https://download.cloudstack.org/systemvm/$CS_VERSION/systemvmtemplate-$CS_VERSION.$CS_MINOR_VERSION-ovm.raw.bz2" ) PARENTPATH="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )/dist/systemvm-templates/" From b663500a98838651e2b3d907537e276f26ebea54 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Thu, 19 Aug 2021 16:02:02 +0530 Subject: [PATCH 101/117] update suse packaging to include sysvm templates --- packaging/suse15/cloud.spec | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/packaging/suse15/cloud.spec b/packaging/suse15/cloud.spec index 6ba8f9f50c5b..77125ec607ce 100644 --- a/packaging/suse15/cloud.spec +++ b/packaging/suse15/cloud.spec @@ -293,6 +293,11 @@ install -D server/target/conf/cloudstack-sudoers ${RPM_BUILD_ROOT}%{_sysconfdir} touch ${RPM_BUILD_ROOT}%{_localstatedir}/run/%{name}-management.pid #install -D server/target/conf/cloudstack-catalina.logrotate ${RPM_BUILD_ROOT}%{_sysconfdir}/logrotate.d/%{name}-catalina +# SystemVM template +mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/ +cp -r engine/schema/dist/systemvm-templates/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/ +rm -rf ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/md5sum.txt + # UI mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/ui mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-ui/ @@ -533,6 +538,7 @@ pip install --upgrade /usr/share/cloudstack-marvin/Marvin-*.tar.gz %{_datadir}/%{name}-management/conf %{_datadir}/%{name}-management/lib/*.jar %{_datadir}/%{name}-management/logs +%{_datadir}/%{name}-management/templates %attr(0755,root,root) %{_bindir}/%{name}-setup-databases %attr(0755,root,root) %{_bindir}/%{name}-migrate-databases %attr(0755,root,root) %{_bindir}/%{name}-set-guest-password From b5e03eb788d72947f699f2a6bd13cc83b4f08f22 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Mon, 23 Aug 2021 09:47:00 +0530 Subject: [PATCH 102/117] Fix query + cleanup --- .../storage/datastore/db/ImageStoreDao.java | 2 +- .../datastore/db/ImageStoreDaoImpl.java | 25 ++++++++++++------- .../storage/datastore/db/ImageStoreVO.java | 4 +++ scripts/storage/secondary/setup-sysvm-tmplt | 2 ++ 4 files changed, 23 insertions(+), 10 deletions(-) diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDao.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDao.java index 7182aef0ad7f..199f071a0562 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDao.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDao.java @@ -44,5 +44,5 @@ public interface ImageStoreDao extends GenericDao { List listStoresByZoneId(long zoneId); - List listAllStoresInZone(long zoneId, String provider, DataStoreRole role); + List listAllStoresInZone(Long zoneId, String provider, DataStoreRole role); } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDaoImpl.java index 789667073d99..0a2918187b78 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDaoImpl.java @@ -38,6 +38,7 @@ public class ImageStoreDaoImpl extends GenericDaoBase implem private SearchBuilder nameSearch; private SearchBuilder providerSearch; private SearchBuilder regionSearch; + private SearchBuilder storeSearch; @Override public boolean configure(String name, Map params) throws ConfigurationException { @@ -58,6 +59,12 @@ public boolean configure(String name, Map params) throws Configu regionSearch.and("role", regionSearch.entity().getRole(), SearchCriteria.Op.EQ); regionSearch.done(); + storeSearch = createSearchBuilder(); + storeSearch.and("providerName", storeSearch.entity().getProviderName(), SearchCriteria.Op.EQ); + storeSearch.and("role", storeSearch.entity().getRole(), SearchCriteria.Op.EQ); + storeSearch.and("dataCenterId", storeSearch.entity().getDcId(), SearchCriteria.Op.EQ); + storeSearch.done(); + return true; } @@ -76,6 +83,15 @@ public List findByProvider(String provider) { return listBy(sc); } + @Override + public List listAllStoresInZone(Long zoneId, String provider, DataStoreRole role) { + SearchCriteria sc = storeSearch.create(); + sc.setParameters("providerName", provider); + sc.setParameters("role", role); + sc.setParameters("dataCenterId", zoneId); + return listBy(sc); + } + @Override public List findByZone(ZoneScope scope, Boolean readonly) { SearchCriteria sc = createSearchCriteria(); @@ -140,13 +156,4 @@ public List listStoresByZoneId(long zoneId) { sc.addAnd("dcId", SearchCriteria.Op.EQ, zoneId); return listBy(sc); } - - @Override - public List listAllStoresInZone(long zoneId, String provider, DataStoreRole role) { - SearchCriteria sc = createSearchCriteria(); - sc.setParameters("data_center_id", zoneId); - sc.setParameters("role", role); - sc.setParameters("image_provider_name", provider); - return listBy(sc); - } } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreVO.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreVO.java index d24582714868..3ca9259c0997 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreVO.java @@ -135,6 +135,10 @@ public Long getDataCenterId() { return this.dcId; } + public Long getDcId() { + return this.dcId; + } + public ScopeType getScope() { return this.scope; } diff --git a/scripts/storage/secondary/setup-sysvm-tmplt b/scripts/storage/secondary/setup-sysvm-tmplt index e8579ba31b39..9c33e5fb6ff4 100755 --- a/scripts/storage/secondary/setup-sysvm-tmplt +++ b/scripts/storage/secondary/setup-sysvm-tmplt @@ -138,6 +138,8 @@ then tar xvf $tmpdestdir/$localfile -C $tmpdestdir &> /dev/null sudo cp $tmpdestdir/*.vmdk $tmpdestdir/*.mf $tmpdestdir/*.ovf $destdir/ rm -rf $tmpdestdir/*.vmdk $tmpdestdir/*.mf $tmpdestdir/*.ovf $tmpdestdir/*.ova +else + rm -rf $tmpdestdir/*.tmp fi From 191bc5dad38100e2f33c9ff206de1732687ecb2e Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Mon, 23 Aug 2021 19:07:45 +0530 Subject: [PATCH 103/117] clear maven cache if template checksum file is different --- engine/schema/pom.xml | 23 ++++++++++++++++++++++ engine/schema/validate-checksum | 35 +++++++++++++++++++++++++++++++++ 2 files changed, 58 insertions(+) create mode 100644 engine/schema/validate-checksum diff --git a/engine/schema/pom.xml b/engine/schema/pom.xml index ebd8f64b760d..0e12159fe812 100644 --- a/engine/schema/pom.xml +++ b/engine/schema/pom.xml @@ -104,6 +104,8 @@ https://download.cloudstack.org/systemvm/${cs.version}/md5sum.txt ${basedir}/dist/systemvm-templates/ + true + true @@ -142,6 +144,27 @@ + + org.codehaus.mojo + exec-maven-plugin + 1.2.1 + + + validate-checksums + generate-resources + + exec + + + bash + + validate-checksum + + + + + + com.googlecode.maven-download-plugin download-maven-plugin diff --git a/engine/schema/validate-checksum b/engine/schema/validate-checksum new file mode 100644 index 000000000000..091d7bce90cb --- /dev/null +++ b/engine/schema/validate-checksum @@ -0,0 +1,35 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +set -x +old_checksum_path="$HOME/.m2/repository/.cache/download-maven-plugin/" +new_checksum_path="./dist/systemvm-templates/" +file_name="md5sum.txt" + +count=$(ls -d ${old_checksum_path}${file_name}* | wc -l) +if [ "$count" -gt 1 ]; then + rm -rf $old_checksum_path* +else + echo $PWD + old_checksum=$(md5sum ${old_checksum_path}${file_name}* | awk '{print $1}') + new_checksum=$(md5sum ${new_checksum_path}${file_name} | awk '{print $1}') + if [ "$old_checksum" != "$new_checksum" ]; then + rm -rf $old_checksum_path* + else + echo "equal" + fi +fi From 97d20aac78e9c8b143073fca8d64ec8574020ba5 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Mon, 23 Aug 2021 20:01:34 +0530 Subject: [PATCH 104/117] address review comments + cleanup --- .../cloud/vm/VirtualMachineManagerImpl.java | 2 +- engine/schema/pom.xml | 42 +++++++++---------- .../upgrade/SystemVmTemplateRegistration.java | 38 ++++++++--------- engine/schema/validate-checksum | 2 - .../resource/LibvirtComputingResource.java | 1 - .../cluster/KubernetesClusterManagerImpl.java | 7 ++-- .../KubernetesClusterActionWorker.java | 1 - 7 files changed, 44 insertions(+), 49 deletions(-) diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java index ff4e08b57945..1a55286888da 100755 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java @@ -407,7 +407,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac static final ConfigKey SystemVmRootDiskSize = new ConfigKey("Advanced", Long.class, "systemvm.root.disk.size", "-1", - "root size (in GB) of systemvm and virtual routers", true); + "Size of root volume (in GB) of system VMs and virtual routers", true); ScheduledExecutorService _executor = null; diff --git a/engine/schema/pom.xml b/engine/schema/pom.xml index 0e12159fe812..234c7ed03a28 100644 --- a/engine/schema/pom.xml +++ b/engine/schema/pom.xml @@ -98,6 +98,7 @@ download-checksums + generate-resources wget @@ -110,6 +111,26 @@ + + org.codehaus.mojo + exec-maven-plugin + 1.2.1 + + + validate-checksums + generate-resources + + exec + + + bash + + validate-checksum + + + + + org.codehaus.mojo exec-maven-plugin @@ -144,27 +165,6 @@ - - org.codehaus.mojo - exec-maven-plugin - 1.2.1 - - - validate-checksums - generate-resources - - exec - - - bash - - validate-checksum - - - - - - com.googlecode.maven-download-plugin download-maven-plugin diff --git a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java index 9c6f8ace6234..74b651679d51 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java @@ -52,14 +52,14 @@ public class SystemVmTemplateRegistration { private static final Logger LOGGER = Logger.getLogger(SystemVmTemplateRegistration.class); - private static final String mountCommand = "sudo mount -t nfs %s %s"; - private static final String umountCommand = "sudo umount %s"; - private static final String hashAlgorithm = "MD5"; - private static final String relativeTemplatePath = "./engine/schema/dist/systemvm-templates/"; - private static final String AbsolutetemplatesPath = "/usr/share/cloudstack-management/templates/"; - private static final String templatesPath = fetchTemplatesPath(); - private static final String metadataFileName = "metadata.ini"; - private static final String metadataFile = templatesPath + metadataFileName; + private static final String MOUNT_COMMAND = "sudo mount -t nfs %s %s"; + private static final String UMOUNT_COMMAND = "sudo umount %s"; + private static final String HASH_ALGORITHM = "MD5"; + private static final String RELATIVE_TEMPLATE_PATH = "./engine/schema/dist/systemvm-templates/"; + private static final String ABSOLUTE_TEMPLATE_PATH = "/usr/share/cloudstack-management/templates/"; + private static final String TEMPLATES_PATH = fetchTemplatesPath(); + private static final String METADATA_FILE_NAME = "metadata.ini"; + private static final String METADATA_FILE = TEMPLATES_PATH + METADATA_FILE_NAME; private static final String TEMPORARY_SECONDARY_STORE = "/tmp/tmpSecStorage"; private static final String PARENT_TEMPLATE_FOLDER = TEMPORARY_SECONDARY_STORE; private static final String PARTIAL_TEMPLATE_FOLDER = "/template/tmpl/1/"; @@ -326,17 +326,17 @@ public static long isTemplateAlreadyRegistered(Connection conn, Pair validClusterStates = Arrays.asList(KubernetesCluster.State.Created, KubernetesCluster.State.Running, KubernetesCluster.State.Stopped); + if (!(validClusterStates.contains(kubernetesCluster.getState()))) { throw new PermissionDeniedException(String.format("Kubernetes cluster %s is in %s state and can not be scaled", kubernetesCluster.getName(), kubernetesCluster.getState().toString())); } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java index 0635f53fc877..503b49670e3f 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java @@ -333,7 +333,6 @@ protected Pair getKubernetesClusterServerIpSshPort(UserVm contr } protected void attachIsoKubernetesVMs(List clusterVMs, final KubernetesSupportedVersion kubernetesSupportedVersion) throws CloudRuntimeException { - //final long startTimeoutTime = System.currentTimeMillis() + KubernetesClusterService.KubernetesClusterStartTimeout.value() * 1000; KubernetesSupportedVersion version = kubernetesSupportedVersion; if (kubernetesSupportedVersion == null) { version = kubernetesSupportedVersionDao.findById(kubernetesCluster.getKubernetesVersionId()); From 9ee0b7b350376df2808c2aeb2288ad8b0866c35c Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Tue, 24 Aug 2021 12:11:23 +0530 Subject: [PATCH 105/117] If a template (sysvm for a hypervisor) is present in one zone, prevent re-registration of the template - r rather copy template to the store at the same path i.e., template_id and update template_store_ref only --- .../upgrade/SystemVmTemplateRegistration.java | 90 ++++++++++++++----- .../com/cloud/storage/StorageManagerImpl.java | 12 ++- 2 files changed, 75 insertions(+), 27 deletions(-) diff --git a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java index 74b651679d51..c7488d1833e6 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java @@ -18,6 +18,7 @@ import com.cloud.hypervisor.Hypervisor; import com.cloud.storage.Storage.ImageFormat; +import com.cloud.storage.VMTemplateVO; import com.cloud.utils.DateUtil; import com.cloud.utils.Pair; import com.cloud.utils.UriUtils; @@ -27,7 +28,6 @@ import org.apache.log4j.Logger; import org.ini4j.Ini; -import javax.naming.ConfigurationException; import java.io.BufferedReader; import java.io.File; import java.io.FileInputStream; @@ -498,14 +498,17 @@ private static void createTemplateStoreRefEntry(Connection conn, SystemVMTemplat } } - public static void updateDb(Connection conn, SystemVMTemplateDetails details) { + public static void updateDb(Connection conn, SystemVMTemplateDetails details, boolean updateTemplateDetails) { try { int i = 1; - PreparedStatement pstmt = conn.prepareStatement(UPDATE_VM_TEMPLATE_ENTRY); - if (pstmt != null) { - pstmt.setLong(i++, details.getSize()); - pstmt.setLong(i++, details.getId()); - pstmt.executeUpdate(); + PreparedStatement pstmt = null; + if (updateTemplateDetails) { + pstmt = conn.prepareStatement(UPDATE_VM_TEMPLATE_ENTRY); + if (pstmt != null) { + pstmt.setLong(i++, details.getSize()); + pstmt.setLong(i++, details.getId()); + pstmt.executeUpdate(); + } } i = 1; pstmt = conn.prepareStatement(UPDATE_TEMPLATE_STORE_REF_TABLE); @@ -613,6 +616,60 @@ public static void unmountStore() { throw new CloudRuntimeException(msg, e); } } + + private static void setupTemplate(String templateName, Pair hypervisorAndTemplateName, + String destTempFolder) throws CloudRuntimeException{ + String storageScriptsDir = "scripts/storage/secondary"; + String setupTmpltScript = Script.findScript(storageScriptsDir, "setup-sysvm-tmplt"); + if (setupTmpltScript == null) { + throw new CloudRuntimeException("Unable to find the createtmplt.sh"); + } + Script scr = new Script(setupTmpltScript, SCRIPT_TIMEOUT, LOGGER); + scr.add("-u", templateName); + scr.add("-f", TEMPLATES_PATH + fileNames.get(hypervisorAndTemplateName.first())); + scr.add("-h", hypervisorAndTemplateName.first().name().toLowerCase(Locale.ROOT)); + scr.add("-d", destTempFolder); + String result = scr.execute(); + if (result != null) { + String errMsg = String.format("failed to create template: %s ", result); + LOGGER.error(errMsg); + throw new CloudRuntimeException(errMsg); + } + + } + + public static void registerTemplate(Connection conn, Pair hypervisorAndTemplateName, + Pair storeUrlAndId, VMTemplateVO templateVO) { + Long templateId = null; + try { + Hypervisor.HypervisorType hypervisor = hypervisorAndTemplateName.first(); + final String templateName = UUID.randomUUID().toString(); + Date created = new Date(DateUtil.currentGMTTime().getTime()); + SystemVMTemplateDetails details = new SystemVMTemplateDetails(templateName, hypervisorAndTemplateName.second(), created, + templateVO.getUrl(), templateVO.getChecksum(), templateVO.getFormat(), (int) templateVO.getGuestOSId(), templateVO.getHypervisorType(), + storeUrlAndId.second()); + templateId = templateVO.getId(); + details.setId(templateId); + String destTempFolderName = String.valueOf(templateId); + String destTempFolder = PARENT_TEMPLATE_FOLDER + PARTIAL_TEMPLATE_FOLDER + destTempFolderName; + details.setInstallPath(PARTIAL_TEMPLATE_FOLDER + destTempFolderName + File.separator + templateName + "." + hypervisorImageFormat.get(hypervisor).getFileExtension()); + createTemplateStoreRefEntry(conn, details); + setupTemplate(templateName, hypervisorAndTemplateName, destTempFolder); + details.setInstallPath(PARTIAL_TEMPLATE_FOLDER + destTempFolderName + File.separator + templateName + "." + hypervisorImageFormat.get(hypervisor).getFileExtension()); + readTemplateProperties(destTempFolder + "/template.properties", details); + details.setUpdated(new Date(DateUtil.currentGMTTime().getTime())); + updateDb(conn, details, false); + } catch (Exception e) { + String errMsg = String.format("Failed to register template for hypervisor: %s", hypervisorAndTemplateName.first()); + LOGGER.error(errMsg, e); + if (templateId != null) { + updateTemplateTablesOnFailure(conn, templateId); + cleanupStore(templateId); + } + throw new CloudRuntimeException(errMsg, e); + } + + } public static void registerTemplate(Connection conn, Pair hypervisorAndTemplateName, Pair storeUrlAndId) { Long templateId = null; try { @@ -630,26 +687,11 @@ public static void registerTemplate(Connection conn, Pair configParams = new HashMap<>(); configParams.put(SystemVmTemplateRegistration.routerTemplateConfigurationNames.get(hypervisorAndTemplateName.first()), hypervisorAndTemplateName.second()); configParams.put("minreq.sysvmtemplate.version", CS_MAJOR_VERSION + "." + CS_TINY_VERSION); diff --git a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java index 3dec909ccac2..4903b2933403 100644 --- a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java @@ -2768,8 +2768,10 @@ public ImageStore discoverImageStore(String name, String url, String providerNam Pair hypervisorAndTemplateName = new Pair<>(hypervisorType, templateName); long templateId = SystemVmTemplateRegistration.isTemplateAlreadyRegistered(conn, hypervisorAndTemplateName); + VMTemplateVO vmTemplateVO = _templateDao.findById(templateId); + TemplateDataStoreVO templateVO = null; if (templateId != -1) { - TemplateDataStoreVO templateVO = _templateStoreDao.findByTemplate(templateId, DataStoreRole.Image); + templateVO = _templateStoreDao.findByTemplate(templateId, DataStoreRole.Image); if (templateVO != null) { if (SystemVmTemplateRegistration.validateIfSeeded(url, templateVO.getInstallPath())) { continue; @@ -2777,9 +2779,13 @@ public ImageStore discoverImageStore(String name, String url, String providerNam } } SystemVmTemplateRegistration.mountStore(storeUrlAndId.first()); - SystemVmTemplateRegistration.registerTemplate(conn, hypervisorAndTemplateName, storeUrlAndId); + if (templateVO != null && vmTemplateVO != null) { + SystemVmTemplateRegistration.registerTemplate(conn, hypervisorAndTemplateName, storeUrlAndId, vmTemplateVO); + } else { + SystemVmTemplateRegistration.registerTemplate(conn, hypervisorAndTemplateName, storeUrlAndId); + } } catch (CloudRuntimeException e) { - s_logger.error(String.format("Failed to register systemVM template for hypervisor: %s", hypervisorType.name())); + s_logger.error(String.format("Failed to register systemVM template for hypervisor: %s", hypervisorType.name()), e); } } } catch (SQLException e) { From 5a9274028b72bc732cbeac7e4efc3ed75e053190 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Wed, 25 Aug 2021 19:15:42 +0530 Subject: [PATCH 106/117] Addressed comments: 1. random temporary mount directory for secondary stores 2. use a java utility to do checksum validation 3. Use Dao for queries 4. mvn build - checksum validation to overcome caching issue 5. modularized code, and other changes --- engine/schema/pom.xml | 83 ++- .../com/cloud/storage/dao/VMTemplateDao.java | 2 + .../cloud/storage/dao/VMTemplateDaoImpl.java | 14 + .../cloud/upgrade/DatabaseUpgradeChecker.java | 3 +- .../upgrade/SystemVmTemplateRegistration.java | 663 +++++++++--------- .../dao/BasicTemplateDataStoreDaoImpl.java | 236 +++++++ .../upgrade/dao/Upgrade41510to41600.java | 26 +- .../java/com/cloud/vm/dao/VMInstanceDao.java | 3 + .../com/cloud/vm/dao/VMInstanceDaoImpl.java | 22 + .../storage/datastore/db/ImageStoreDao.java | 4 + .../datastore/db/ImageStoreDaoImpl.java | 37 +- engine/schema/templateConfig.sh | 1 - engine/schema/validate-checksum | 33 - packaging/centos7/cloud.spec | 1 - .../com/cloud/storage/StorageManagerImpl.java | 89 +-- 15 files changed, 753 insertions(+), 464 deletions(-) create mode 100644 engine/schema/src/main/java/com/cloud/upgrade/dao/BasicTemplateDataStoreDaoImpl.java delete mode 100644 engine/schema/validate-checksum diff --git a/engine/schema/pom.xml b/engine/schema/pom.xml index 234c7ed03a28..acd49a900a03 100644 --- a/engine/schema/pom.xml +++ b/engine/schema/pom.xml @@ -61,35 +61,26 @@ - org.codehaus.gmavenplus - gmavenplus-plugin - ${cs.gmaven.version} + org.codehaus.gmaven + gmaven-plugin + 1.5 - generate-sources + setproperty + validate execute + + + def projectVersion = project.version + String[] versionParts = projectVersion.tokenize('.') + pom.properties['cs.version'] = versionParts[0] + "." + versionParts[1] + pom.properties['patch.version'] = versionParts[2] + + - - true - - log.info "$project.version" - def projectVersion = project.version - String[] versionParts = projectVersion.split("\\.") - project.properties['cs.version'] = versionParts[0] + "." + versionParts[1] - project.properties['patch.version'] = versionParts[2] - - - - - org.codehaus.groovy - groovy-all - ${cs.groovy.version} - runtime - - com.googlecode.maven-download-plugin @@ -98,7 +89,7 @@ download-checksums - generate-resources + validate wget @@ -112,21 +103,36 @@ - org.codehaus.mojo - exec-maven-plugin - 1.2.1 + org.codehaus.gmaven + gmaven-plugin + 1.5 - validate-checksums - generate-resources + set-properties + generate-sources - exec + execute - bash - - validate-checksum - + + def csVersion = pom.properties['cs.version'] + def patch = pom.properties['patch.version'] + def templateList = [] + templateList.add("systemvmtemplate-${csVersion}.${patch}-kvm") + templateList.add("systemvmtemplate-${csVersion}.${patch}-vmware") + templateList.add("systemvmtemplate-${csVersion}.${patch}-xen") + templateList.add("systemvmtemplate-${csVersion}.${patch}-ovm") + templateList.add("systemvmtemplate-${csVersion}.${patch}-hyperv") + File file = new File("./engine/schema/dist/systemvm-templates/md5sum.txt") + def lines = file.readLines() + for (template in templateList) { + def data = lines.findAll { it.contains(template) } + if (data != null) { + def hypervisor = template.tokenize('-')[-1] + pom.properties["$hypervisor" + ".checksum"] = data[0].tokenize(' ')[0] + } + } + @@ -165,6 +171,11 @@ + + org.apache.maven.plugins + maven-resources-plugin + ${cs.resources-plugin.version} + com.googlecode.maven-download-plugin download-maven-plugin @@ -176,8 +187,10 @@ wget + true https://download.cloudstack.org/systemvm/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-kvm.qcow2.bz2 ${basedir}/dist/systemvm-templates/ + ${kvm.checksum} @@ -186,8 +199,10 @@ wget + true https://download.cloudstack.org/systemvm/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-vmware.ova ${basedir}/dist/systemvm-templates/ + ${vmware.checksum} @@ -196,8 +211,10 @@ wget + true https://download.cloudstack.org/systemvm/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-xen.vhd.bz2 ${basedir}/dist/systemvm-templates/ + ${xen.checksum} diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDao.java b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDao.java index 699336bc2c7b..e8e9208b4ce9 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDao.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDao.java @@ -81,4 +81,6 @@ public interface VMTemplateDao extends GenericDao, StateDao< void saveDetails(VMTemplateVO tmpl); List listByParentTemplatetId(long parentTemplatetId); + + VMTemplateVO findLatestTemplateByName(String name); } diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDaoImpl.java index b7e55b69b450..927193f35377 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDaoImpl.java @@ -229,6 +229,20 @@ public List listReadyTemplates() { return listIncludingRemovedBy(sc); } + + @Override + public VMTemplateVO findLatestTemplateByName(String name) { + SearchCriteria sc = createSearchCriteria(); + sc.addAnd("name", SearchCriteria.Op.EQ, name); + sc.addAnd("removed", SearchCriteria.Op.NULL); + Filter filter = new Filter(VMTemplateVO.class, "id", false, null, 1L); + List templates = listBy(sc, filter); + if ((templates != null) && !templates.isEmpty()) { + return templates.get(0); + } + return null; + } + @Override public List findIsosByIdAndPath(Long domainId, Long accountId, String path) { SearchCriteria sc = createSearchCriteria(); diff --git a/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java b/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java index d96200983e0a..79e6036d58c0 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java @@ -272,8 +272,6 @@ protected void upgrade(CloudStackVersion dbVersion, CloudStackVersion currentVer final DbUpgrade[] upgrades = calculateUpgradePath(dbVersion, currentVersion); - updateSystemVmTemplates(upgrades); - for (DbUpgrade upgrade : upgrades) { VersionVO version; s_logger.debug("Running upgrade " + upgrade.getClass().getSimpleName() + " to upgrade from " + upgrade.getUpgradableVersionRange()[0] + "-" + upgrade @@ -344,6 +342,7 @@ protected void upgrade(CloudStackVersion dbVersion, CloudStackVersion currentVer txn.close(); } } + updateSystemVmTemplates(upgrades); } @Override diff --git a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java index c7488d1833e6..6bddbb7f7533 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java @@ -16,25 +16,53 @@ // under the License. package com.cloud.upgrade; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.dc.dao.ClusterDaoImpl; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.dc.dao.DataCenterDaoImpl; import com.cloud.hypervisor.Hypervisor; +import com.cloud.storage.DataStoreRole; +import com.cloud.storage.Storage; import com.cloud.storage.Storage.ImageFormat; +import com.cloud.storage.VMTemplateStorageResourceAssoc; import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.storage.dao.VMTemplateDaoImpl; +import com.cloud.template.VirtualMachineTemplate; +import com.cloud.upgrade.dao.BasicTemplateDataStoreDaoImpl; +import com.cloud.user.Account; import com.cloud.utils.DateUtil; import com.cloud.utils.Pair; import com.cloud.utils.UriUtils; import com.cloud.utils.db.GlobalLock; +import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.script.Script; +import com.cloud.vm.dao.VMInstanceDao; +import com.cloud.vm.dao.VMInstanceDaoImpl; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.framework.config.dao.ConfigurationDaoImpl; +import org.apache.cloudstack.storage.datastore.db.ImageStoreDao; +import org.apache.cloudstack.storage.datastore.db.ImageStoreDaoImpl; +import org.apache.cloudstack.storage.datastore.db.ImageStoreVO; +import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; +import org.apache.commons.codec.digest.DigestUtils; import org.apache.log4j.Logger; import org.ini4j.Ini; +import javax.inject.Inject; import java.io.BufferedReader; import java.io.File; -import java.io.FileInputStream; import java.io.FileReader; import java.io.IOException; +import java.io.InputStream; import java.net.URI; -import java.security.MessageDigest; +import java.nio.file.Files; +import java.nio.file.Paths; import java.sql.Connection; import java.sql.Date; import java.sql.PreparedStatement; @@ -46,6 +74,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Random; import java.util.Set; import java.util.UUID; import java.util.stream.Collectors; @@ -60,27 +89,43 @@ public class SystemVmTemplateRegistration { private static final String TEMPLATES_PATH = fetchTemplatesPath(); private static final String METADATA_FILE_NAME = "metadata.ini"; private static final String METADATA_FILE = TEMPLATES_PATH + METADATA_FILE_NAME; - private static final String TEMPORARY_SECONDARY_STORE = "/tmp/tmpSecStorage"; - private static final String PARENT_TEMPLATE_FOLDER = TEMPORARY_SECONDARY_STORE; - private static final String PARTIAL_TEMPLATE_FOLDER = "/template/tmpl/1/"; - private static final String FETCH_DISTINCT_ELIGIBLE_ZONES = "SELECT DISTINCT(data_center_id) FROM `cloud`.`image_store` WHERE protocol = \"nfs\" AND role = \"Image\" AND removed is null"; - private static final String FETCH_DISTINCT_HYPERVISORS_IN_ZONE = "SELECT DISTINCT(hypervisor_type) FROM `cloud`.`cluster` where removed is null AND data_center_id=?"; - private static final String FETCH_IMAGE_STORE_PER_ZONE = "SELECT url,id FROM `cloud`.`image_store` WHERE data_center_id=? AND role = \"Image\" AND image_provider_name = \"NFS\" AND removed IS NULL LIMIT 1"; - private static final String INSERT_VM_TEMPLATE_TABLE = "INSERT INTO `cloud`.`vm_template` (uuid, unique_name, name, public, featured, created, type, hvm, bits, account_id, url, checksum, enable_password, display_text, format, guest_os_id, cross_zones, hypervisor_type, state, deploy_as_is)" + - "VALUES (?, ?, ?, 0, 0, ?, 'SYSTEM', 0, 64, 1, ?, ?, 0, ?, ?, ?, 1, ?, 'Inactive', ?)"; - private static final String INSERT_TEMPLATE_STORE_REF_TABLE = "INSERT INTO `cloud`.`template_store_ref` (store_id, template_id, created, last_updated, job_id, download_pct, download_state, error_str, local_path, install_path, url, state, destroyed, is_copy," + - " update_count, ref_cnt, store_role) VALUES (?, ?, ?, ?, NULL, 0, 'NOT_DOWNLOADED', NULL, NULL, ?, ?, 'Allocated', 0, 0, 0, 0, 'Image')"; - private static final String UPDATE_TEMPLATE_STORE_REF_TABLE = "UPDATE `cloud`.`template_store_ref` SET download_pct=100, download_state='DOWNLOADED', " + - "state='Ready', size=?, physical_size=?, last_updated=?, updated=? where template_id=?"; - private static final String UPDATE_VM_TEMPLATE_ENTRY = "UPDATE `cloud`.`vm_template` set size = ?, state = 'Active' where id = ?"; - private static final String UPDATE_CONFIGURATION_TABLE = "UPDATE `cloud`.`configuration` SET value = ? WHERE name = ?"; - private static final String UPDATE_TEMPLATE_TABLE_ON_FAILURE = "UPDATE `cloud`.`vm_template` set removed = ?, state = 'Inactive' where id = ?"; - private static final String DELETE_TEMPLATE_REF_RECORD_ON_FAILURE = "DELETE from `cloud`.`template_store_ref` where template_id = ?"; + public static final String TEMPORARY_SECONDARY_STORE = "/tmp/tmpSecStorage"; + // private static final String PARENT_TEMPLATE_FOLDER = TEMPORARY_SECONDARY_STORE; + private static final String PARTIAL_TEMPLATE_FOLDER = String.format("/template/tmpl/%d/", Account.ACCOUNT_ID_SYSTEM); + private static final String FETCH_REGISTERED_TEMPLATE_INSTALL_PATH = "SELECT install_path FROM `cloud`.`template_store_ref` where template_id = ? LIMIT 1"; + private static final String storageScriptsDir = "scripts/storage/secondary"; private static final Integer SCRIPT_TIMEOUT = 1800000; private static final Integer LOCK_WAIT_TIMEOUT = 1200; + private static final Integer TOKEN_LENGTH = 10; + public static String CS_MAJOR_VERSION = "4.16"; public static String CS_TINY_VERSION = "0"; + @Inject + DataCenterDao dataCenterDao; + @Inject + VMTemplateDao vmTemplateDao; + @Inject + TemplateDataStoreDao templateDataStoreDao; + @Inject + VMInstanceDao vmInstanceDao; + @Inject + ImageStoreDao imageStoreDao; + @Inject + ClusterDao clusterDao; + @Inject + ConfigurationDao configurationDao; + + public SystemVmTemplateRegistration() { + dataCenterDao = new DataCenterDaoImpl(); + vmTemplateDao = new VMTemplateDaoImpl(); + templateDataStoreDao = new BasicTemplateDataStoreDaoImpl(); + vmInstanceDao = new VMInstanceDaoImpl(); + imageStoreDao = new ImageStoreDaoImpl(); + clusterDao = new ClusterDaoImpl(); + configurationDao = new ConfigurationDaoImpl(); + } + private static class SystemVMTemplateDetails { Long id; String uuid; @@ -99,9 +144,6 @@ private static class SystemVMTemplateDetails { boolean deployAsIs; Date updated; - SystemVMTemplateDetails() { - } - SystemVMTemplateDetails(String uuid, String name, Date created, String url, String checksum, ImageFormat format, Integer guestOsId, Hypervisor.HypervisorType hypervisorType, Long storeId) { @@ -218,9 +260,9 @@ public void setUpdated(Date updated) { ); public static final Map NewTemplateNameList = new HashMap(); - public static final Map fileNames = new HashMap(); - public static final Map newTemplateUrl = new HashMap(); - public static final Map newTemplateChecksum = new HashMap(); + public static final Map FileNames = new HashMap(); + public static final Map NewTemplateUrl = new HashMap(); + public static final Map NewTemplateChecksum = new HashMap(); public static final Map routerTemplateConfigurationNames = new HashMap() { { @@ -255,12 +297,24 @@ public void setUpdated(Date updated) { } }; + public static String generateToken(int length) { + String charset = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; + Random rand = new Random(System.currentTimeMillis()); + StringBuffer sb = new StringBuffer(); + for (int i = 0; i < length; i++) { + int pos = rand.nextInt(charset.length()); + sb.append(charset.charAt(pos)); + } + return sb.toString(); + } + public static boolean validateIfSeeded(String url, String path) { + String filePath = TEMPORARY_SECONDARY_STORE + generateToken(TOKEN_LENGTH); try { - mountStore(url); + mountStore(url, filePath); int lastIdx = path.lastIndexOf(File.separator); String partialDirPath = path.substring(0, lastIdx); - String templatePath = TEMPORARY_SECONDARY_STORE + File.separator + partialDirPath; + String templatePath = filePath + File.separator + partialDirPath; File templateProps = new File(templatePath + "/template.properties"); if (templateProps.exists()) { LOGGER.info("SystemVM template already seeded, skipping registration"); @@ -269,58 +323,28 @@ public static boolean validateIfSeeded(String url, String path) { LOGGER.info("SystemVM template not seeded"); return false; } catch (Exception e) { + LOGGER.error("Failed to verify if the template is seeded", e); throw new CloudRuntimeException("Failed to verify if the template is seeded", e); } finally { - unmountStore(); + unmountStore(filePath); } } - private static String calculateChecksum(MessageDigest digest, File file) { - try { - FileInputStream fis = new FileInputStream(file); - byte[] byteArray = new byte[1024]; - int bytesCount = 0; - - while ((bytesCount = fis.read(byteArray)) != -1) { - digest.update(byteArray, 0, bytesCount); - } - - fis.close(); - byte[] bytes = digest.digest(); - - StringBuilder sb = new StringBuilder(); - for (byte aByte : bytes) { - sb.append(Integer - .toString((aByte & 0xff) + 0x100, 16) - .substring(1)); - } - return sb.toString(); + private String calculateChecksum(File file) { + try (InputStream is = Files.newInputStream(Paths.get(file.getPath()))) { + return DigestUtils.md5Hex(is); } catch (IOException e) { - String errMsg = String.format("Failed to calculate Checksum of template file: %s ", file.getName()); + String errMsg = "Failed to calculate template checksum"; LOGGER.error(errMsg, e); throw new CloudRuntimeException(errMsg, e); } } - public static long isTemplateAlreadyRegistered(Connection conn, Pair hypervisorAndTemplateName) { + public long getRegisteredTemplateId(Pair hypervisorAndTemplateName) { + VMTemplateVO vmTemplate = vmTemplateDao.findLatestTemplateByName(hypervisorAndTemplateName.second()); long templateId = -1; - try { - PreparedStatement pstmt = conn.prepareStatement("select id from `cloud`.`vm_template` where name = ? and removed is null order by id desc limit 1"); - // Get systemvm template id for corresponding hypervisor - pstmt.setString(1, hypervisorAndTemplateName.second()); - try (ResultSet rs = pstmt.executeQuery()) { - if (rs.next()) { - templateId = rs.getLong(1); - } - } catch (final SQLException e) { - String errMsg = "updateSystemVmTemplates: Exception caught while getting ids of SystemVM templates"; - LOGGER.error(errMsg, e); - throw new CloudRuntimeException(errMsg, e); - } - } catch (SQLException e) { - String errorMessage = "Unable to upgrade the database"; - LOGGER.error(errorMessage, e); - throw new CloudRuntimeException(errorMessage, e); + if (vmTemplate != null) { + templateId = vmTemplate.getId(); } return templateId; } @@ -347,7 +371,7 @@ private static String fetchTemplatesPath() { return templatePath; } - private static String getHypervisorName(String name) { + private String getHypervisorName(String name) { if (name.equals("xenserver")) { return "xen"; } @@ -358,7 +382,7 @@ private static String getHypervisorName(String name) { } - private static Hypervisor.HypervisorType getHypervisorType(String hypervisor) { + private Hypervisor.HypervisorType getHypervisorType(String hypervisor) { if (hypervisor.equalsIgnoreCase("xen")) { hypervisor = "xenserver"; } else if (hypervisor.equalsIgnoreCase("ovm")) { @@ -367,196 +391,120 @@ private static Hypervisor.HypervisorType getHypervisorType(String hypervisor) { return Hypervisor.HypervisorType.getType(hypervisor); } - private static List getEligibleZoneIds(Connection conn) { - List zones = new ArrayList(); - try { - PreparedStatement pstmt = conn.prepareStatement(FETCH_DISTINCT_ELIGIBLE_ZONES); - ResultSet rs = pstmt.executeQuery(); - while (rs.next()) { - zones.add(rs.getLong(1)); + private List getEligibleZoneIds() { + List zoneIds = new ArrayList<>(); + List stores = imageStoreDao.findByProtocol("nfs"); + for (ImageStoreVO store : stores) { + if (!zoneIds.contains(store.getDataCenterId())) { + zoneIds.add(store.getDataCenterId()); } - } catch (SQLException e) { - String errMsg = "Failed to fetch eligible zones for SystemVM template registration"; - LOGGER.error(errMsg, e); - throw new CloudRuntimeException(errMsg, e); } - return zones; + return zoneIds; } - private static Pair getNfsStoreInZone(Connection conn, Long zoneId) { + private Pair getNfsStoreInZone(Long zoneId) { String url = null; Long storeId = null; - try { - PreparedStatement pstmt = conn.prepareStatement(FETCH_IMAGE_STORE_PER_ZONE); - if(pstmt != null) { - pstmt.setLong(1, zoneId); - ResultSet resultSet = pstmt.executeQuery(); - while (resultSet.next()) { - url = resultSet.getString(1); - storeId = resultSet.getLong(2); - } - } - } catch (SQLException e) { + ImageStoreVO storeVO = imageStoreDao.findOneByZoneAndProtocol(zoneId, "nfs"); + if (storeVO == null) { String errMsg = String.format("Failed to fetch NFS store in zone = %s for SystemVM template registration", zoneId); - LOGGER.error(errMsg, e); - throw new CloudRuntimeException(errMsg, e); - } - if (url == null || storeId == null) { - throw new CloudRuntimeException(String.format("Failed to get an NFS store in zone: %s", zoneId)); + LOGGER.error(errMsg); + throw new CloudRuntimeException(errMsg); } + url = storeVO.getUrl(); + storeId = storeVO.getId(); return new Pair<>(url, storeId); } - public static void mountStore(String storeUrl) { + public static void mountStore(String storeUrl, String path) { try { if (storeUrl != null) { URI uri = new URI(UriUtils.encodeURIComponent(storeUrl)); String host = uri.getHost(); String mountPath = uri.getPath(); - Script.runSimpleBashScript("mkdir -p " + TEMPORARY_SECONDARY_STORE); - String mount = String.format(MOUNT_COMMAND, host + ":" + mountPath, TEMPORARY_SECONDARY_STORE); + Script.runSimpleBashScript("mkdir -p " + path); + String mount = String.format(MOUNT_COMMAND, host + ":" + mountPath, path); Script.runSimpleBashScript(mount); } } catch (Exception e) { String msg = "NFS Store URL is not in the correct format"; LOGGER.error(msg, e); throw new CloudRuntimeException(msg, e); - } } - private static List fetchAllHypervisors(Connection conn, Long zoneId) { + private List fetchAllHypervisors(Long zoneId) { List hypervisorList = new ArrayList<>(); - try { - PreparedStatement pstmt = conn.prepareStatement(FETCH_DISTINCT_HYPERVISORS_IN_ZONE); - if(pstmt != null) { - pstmt.setLong(1, zoneId); - ResultSet resultSet = pstmt.executeQuery(); - while (resultSet.next()) { - hypervisorList.add(resultSet.getString(1)); - } - } - } catch (SQLException e) { - String errMsg = String.format("Failed to fetch distinct hypervisors in zone: %s for SystemVM template registration", zoneId); - LOGGER.error(errMsg, e); - throw new CloudRuntimeException(errMsg, e); - } + List hypervisorTypes = clusterDao.getAvailableHypervisorInZone(zoneId); + hypervisorList = hypervisorTypes.stream().distinct().map(Enum::name).collect(Collectors.toList()); return hypervisorList; } - private static Long createTemplateObjectInDB(Connection conn, SystemVMTemplateDetails details) { - Long id = null; - try { - PreparedStatement pstmt = conn.prepareStatement(INSERT_VM_TEMPLATE_TABLE); - if (pstmt != null) { - int i = 1; - pstmt.setString(i++, details.getUuid()); - pstmt.setString(i++, details.getUuid()); - pstmt.setString(i++, details.getName()); - pstmt.setDate(i++, details.getCreated()); - pstmt.setString(i++, details.getUrl()); - pstmt.setString(i++, details.getChecksum()); - pstmt.setString(i++, details.getName()); - pstmt.setString(i++, details.getFormat().toString()); - pstmt.setLong(i++, details.getGuestOsId()); - pstmt.setString(i++, details.getHypervisorType().toString()); - pstmt.setBoolean(i++, details.getHypervisorType() == Hypervisor.HypervisorType.VMware); - pstmt.executeUpdate(); - - pstmt = conn.prepareStatement("SELECT id FROM vm_template ORDER BY id DESC LIMIT 1"); - try (ResultSet rs = pstmt.executeQuery()) { - if (rs.next()) { - id = rs.getLong(1); - } - } catch (final SQLException e) { - String errMsg = "Failed to fetch template id "; - LOGGER.error(errMsg, e); - throw new CloudRuntimeException(errMsg, e); - } - } - } catch (Exception e) { - throw new CloudRuntimeException(String.format("Failed to create vm_template record for the systemVM template for hypervisor: %s", details.getHypervisorType().name()), e); - } - return id; + private Long createTemplateObjectInDB(SystemVMTemplateDetails details) { + VMTemplateVO template = new VMTemplateVO(); + template.setUuid(details.getUuid()); + template.setUniqueName(details.getUuid()); + template.setName(details.getName()); + template.setPublicTemplate(false); + template.setFeatured(false); + template.setTemplateType(Storage.TemplateType.SYSTEM); + template.setRequiresHvm(true); + template.setBits(64); + template.setAccountId(Account.ACCOUNT_ID_SYSTEM); + template.setUrl(details.getUrl()); + template.setChecksum(details.getChecksum()); + template.setEnablePassword(false); + template.setDisplayText(details.getName()); + template.setFormat(details.getFormat()); + template.setGuestOSId(details.getGuestOsId()); + template.setCrossZones(true); + template.setHypervisorType(details.getHypervisorType()); + template.setState(VirtualMachineTemplate.State.Inactive); + template.setDeployAsIs(Hypervisor.HypervisorType.VMware.equals(details.getHypervisorType())); + template = vmTemplateDao.persist(template); + if (template == null) { + return null; + } + return template.getId(); } - private static void createTemplateStoreRefEntry(Connection conn, SystemVMTemplateDetails details) { - try { - PreparedStatement pstmt = conn.prepareStatement(INSERT_TEMPLATE_STORE_REF_TABLE); - if (pstmt != null) { - int i = 1; - pstmt.setLong(i++, details.getStoreId()); - pstmt.setLong(i++, details.getId()); - pstmt.setDate(i++, details.getCreated()); - pstmt.setDate(i++, details.getCreated()); - pstmt.setString(i++, details.getInstallPath()); - pstmt.setString(i++, details.getUrl()); - pstmt.executeUpdate(); - } - } catch (Exception e) { - throw new CloudRuntimeException(String.format("Failed to create template_store_ref record for the systemVM template for hypervisor: %s", details.getHypervisorType().name()), e); + private void createTemplateStoreRefEntry(SystemVMTemplateDetails details) { + TemplateDataStoreVO templateDataStoreVO = new TemplateDataStoreVO(details.storeId, details.getId(), details.getCreated(), 0, + VMTemplateStorageResourceAssoc.Status.NOT_DOWNLOADED, null, null, null, details.getInstallPath(), details.getUrl()); + templateDataStoreVO.setDataStoreRole(DataStoreRole.Image); + templateDataStoreVO = templateDataStoreDao.persist(templateDataStoreVO); + if (templateDataStoreVO == null) { + throw new CloudRuntimeException(String.format("Failed to create template_store_ref record for the systemVM template for hypervisor: %s", details.getHypervisorType().name())); } } - public static void updateDb(Connection conn, SystemVMTemplateDetails details, boolean updateTemplateDetails) { - try { - int i = 1; - PreparedStatement pstmt = null; - if (updateTemplateDetails) { - pstmt = conn.prepareStatement(UPDATE_VM_TEMPLATE_ENTRY); - if (pstmt != null) { - pstmt.setLong(i++, details.getSize()); - pstmt.setLong(i++, details.getId()); - pstmt.executeUpdate(); - } - } - i = 1; - pstmt = conn.prepareStatement(UPDATE_TEMPLATE_STORE_REF_TABLE); - if (pstmt != null) { - pstmt.setLong(i++, details.getSize()); - pstmt.setLong(i++, details.getPhysicalSize()); - pstmt.setDate(i++, details.getUpdated()); - pstmt.setDate(i++, details.getUpdated()); - pstmt.setLong(i++, details.getId()); - pstmt.executeUpdate(); - } - } catch (Exception e) { - throw new CloudRuntimeException(String.format("Failed to update template_store_ref record for the systemVM template registered for hypervisor: %s", details.getHypervisorType().name()), e); + public void updateTemplateDetails(SystemVMTemplateDetails details, boolean updateTemplateDetails) { + VMTemplateVO template = vmTemplateDao.findById(details.getId()); + if (updateTemplateDetails) { + template.setSize(details.getSize()); + template.setState(VirtualMachineTemplate.State.Active); + vmTemplateDao.update(template.getId(), template); + } + TemplateDataStoreVO templateDataStoreVO = templateDataStoreDao.findByStoreTemplate(details.getStoreId(), template.getId()); + templateDataStoreVO.setSize(details.getSize()); + templateDataStoreVO.setPhysicalSize(details.getPhysicalSize()); + templateDataStoreVO.setDownloadPercent(100); + templateDataStoreVO.setDownloadState(VMTemplateStorageResourceAssoc.Status.DOWNLOADED); + templateDataStoreVO.setLastUpdated(details.getUpdated()); + templateDataStoreVO.setState(ObjectInDataStoreStateMachine.State.Ready); + boolean updated = templateDataStoreDao.update(templateDataStoreVO.getId(), templateDataStoreVO); + if (!updated) { + throw new CloudRuntimeException("Failed to update template_store_ref entry for registered systemVM template"); } } - public static void updateSystemVMEntries(Connection conn, Long templateId, Pair hypervisorAndTemplateName) { - // update template ID of system Vms - try { - PreparedStatement update_templ_id_pstmt = conn - .prepareStatement("update `cloud`.`vm_instance` set vm_template_id = ? where type <> 'User' and hypervisor_type = ? and removed is NULL"); - update_templ_id_pstmt.setLong(1, templateId); - update_templ_id_pstmt.setString(2, hypervisorAndTemplateName.first().toString()); - update_templ_id_pstmt.executeUpdate(); - } catch (SQLException e) { - String errMsg = String.format("updateSystemVmTemplates:Exception while setting template for %s to %s",hypervisorAndTemplateName.first().toString(), templateId); - LOGGER.error(errMsg, e); - throw new CloudRuntimeException(errMsg, e); - } + public void updateSystemVMEntries(Long templateId, Hypervisor.HypervisorType hypervisorType) { + vmInstanceDao.updateSystemVmTemplateId(templateId, hypervisorType); } - public static void updateConfigurationParams(Connection conn, Map configParams) { - String key = null; - String value = null; - try { - PreparedStatement pstmt = conn.prepareStatement(UPDATE_CONFIGURATION_TABLE); - for (Map.Entry config : configParams.entrySet()) { - key = config.getKey(); - value = config.getValue(); - pstmt.setString(1, value); - pstmt.setString(2, key); - pstmt.executeUpdate(); - } - - } catch (final SQLException e) { - String errMsg = String.format("updateSystemVmTemplates: Exception while setting %s to %s ", key, value); - LOGGER.error(errMsg, e); - throw new CloudRuntimeException(errMsg, e); + public void updateConfigurationParams(Map configParams) { + for (Map.Entry config : configParams.entrySet()) { + configurationDao.update(config.getKey(), config.getValue()); } } @@ -583,50 +531,41 @@ private static void readTemplateProperties(String path, SystemVMTemplateDetails details.setPhysicalSize(physicalSize); } - private static void updateTemplateTablesOnFailure(Connection conn, long templateId) { - try { - PreparedStatement pstmt = conn.prepareStatement(UPDATE_TEMPLATE_TABLE_ON_FAILURE); - if (pstmt != null) { - Date removedTime = new Date(DateUtil.currentGMTTime().getTime()); - pstmt.setDate(1, removedTime); - pstmt.setLong(2, templateId); - pstmt.executeUpdate(); - } - - PreparedStatement pstmt1 = conn.prepareStatement(DELETE_TEMPLATE_REF_RECORD_ON_FAILURE); - if (pstmt1 != null) { - pstmt1.setLong(1, templateId); - pstmt1.executeUpdate(); - } - } catch (Exception e) { - String errMsg = "updateSystemVmTemplates: Exception while updating vm_template and template_store_ref tables on failure"; - LOGGER.error(errMsg, e); - throw new CloudRuntimeException(errMsg, e); - } + private void updateTemplateTablesOnFailure(long templateId) { + VMTemplateVO template = vmTemplateDao.createForUpdate(templateId); + template.setState(VirtualMachineTemplate.State.Inactive); + vmTemplateDao.update(template.getId(), template); + vmTemplateDao.remove(templateId); + TemplateDataStoreVO templateDataStoreVO = templateDataStoreDao.findByTemplate(template.getId(), DataStoreRole.Image); + templateDataStoreDao.remove(templateDataStoreVO.getId()); } - public static void unmountStore() { + public static void unmountStore(String filePath) { try { LOGGER.info("Unmounting store"); - String umountCmd = String.format(UMOUNT_COMMAND, TEMPORARY_SECONDARY_STORE); + String umountCmd = String.format(UMOUNT_COMMAND, filePath); Script.runSimpleBashScript(umountCmd); + try { + Files.deleteIfExists(Paths.get(filePath)); + } catch (IOException e) { + LOGGER.error(String.format("Failed to cleanup mounted store at: %s", filePath), e); + } } catch (Exception e) { - String msg = String.format("Failed to unmount store mounted at %s", TEMPORARY_SECONDARY_STORE); + String msg = String.format("Failed to unmount store mounted at %s", filePath); LOGGER.error(msg, e); throw new CloudRuntimeException(msg, e); } } - private static void setupTemplate(String templateName, Pair hypervisorAndTemplateName, - String destTempFolder) throws CloudRuntimeException{ - String storageScriptsDir = "scripts/storage/secondary"; + private void setupTemplate(String templateName, Pair hypervisorAndTemplateName, + String destTempFolder) throws CloudRuntimeException { String setupTmpltScript = Script.findScript(storageScriptsDir, "setup-sysvm-tmplt"); if (setupTmpltScript == null) { throw new CloudRuntimeException("Unable to find the createtmplt.sh"); } Script scr = new Script(setupTmpltScript, SCRIPT_TIMEOUT, LOGGER); scr.add("-u", templateName); - scr.add("-f", TEMPLATES_PATH + fileNames.get(hypervisorAndTemplateName.first())); + scr.add("-f", TEMPLATES_PATH + FileNames.get(hypervisorAndTemplateName.first())); scr.add("-h", hypervisorAndTemplateName.first().name().toLowerCase(Locale.ROOT)); scr.add("-d", destTempFolder); String result = scr.execute(); @@ -638,71 +577,67 @@ private static void setupTemplate(String templateName, Pair hypervisorAndTemplateName, - Pair storeUrlAndId, VMTemplateVO templateVO) { + private Long performTemplateRegistrationOperations(Pair hypervisorAndTemplateName, + String url, String checksum, ImageFormat format, long guestOsId, + Long storeId, Long templateId, String filePath, boolean updateTmpltDetails) { + final String templateName = UUID.randomUUID().toString(); + Hypervisor.HypervisorType hypervisor = hypervisorAndTemplateName.first(); + Date created = new Date(DateUtil.currentGMTTime().getTime()); + SystemVMTemplateDetails details = new SystemVMTemplateDetails(templateName, hypervisorAndTemplateName.second(), created, + url, checksum, format, (int) guestOsId, hypervisor, storeId); + if (templateId == null) { + templateId = createTemplateObjectInDB(details); + } + if (templateId == null) { + throw new CloudRuntimeException(String.format("Failed to register template for hypervisor: %s", hypervisor.name())); + } + details.setId(templateId); + String destTempFolderName = String.valueOf(templateId); + String destTempFolder = filePath + PARTIAL_TEMPLATE_FOLDER + destTempFolderName; + details.setInstallPath(PARTIAL_TEMPLATE_FOLDER + destTempFolderName + File.separator + templateName + "." + hypervisorImageFormat.get(hypervisor).getFileExtension()); + createTemplateStoreRefEntry(details); + setupTemplate(templateName, hypervisorAndTemplateName, destTempFolder); + readTemplateProperties(destTempFolder + "/template.properties", details); + details.setUpdated(new Date(DateUtil.currentGMTTime().getTime())); + updateTemplateDetails(details, updateTmpltDetails); + return templateId; + } + + public void registerTemplate(Pair hypervisorAndTemplateName, + Pair storeUrlAndId, VMTemplateVO templateVO, String filePath) { Long templateId = null; try { - Hypervisor.HypervisorType hypervisor = hypervisorAndTemplateName.first(); - final String templateName = UUID.randomUUID().toString(); - Date created = new Date(DateUtil.currentGMTTime().getTime()); - SystemVMTemplateDetails details = new SystemVMTemplateDetails(templateName, hypervisorAndTemplateName.second(), created, - templateVO.getUrl(), templateVO.getChecksum(), templateVO.getFormat(), (int) templateVO.getGuestOSId(), templateVO.getHypervisorType(), - storeUrlAndId.second()); templateId = templateVO.getId(); - details.setId(templateId); - String destTempFolderName = String.valueOf(templateId); - String destTempFolder = PARENT_TEMPLATE_FOLDER + PARTIAL_TEMPLATE_FOLDER + destTempFolderName; - details.setInstallPath(PARTIAL_TEMPLATE_FOLDER + destTempFolderName + File.separator + templateName + "." + hypervisorImageFormat.get(hypervisor).getFileExtension()); - createTemplateStoreRefEntry(conn, details); - setupTemplate(templateName, hypervisorAndTemplateName, destTempFolder); - details.setInstallPath(PARTIAL_TEMPLATE_FOLDER + destTempFolderName + File.separator + templateName + "." + hypervisorImageFormat.get(hypervisor).getFileExtension()); - readTemplateProperties(destTempFolder + "/template.properties", details); - details.setUpdated(new Date(DateUtil.currentGMTTime().getTime())); - updateDb(conn, details, false); + performTemplateRegistrationOperations(hypervisorAndTemplateName, templateVO.getUrl(), templateVO.getChecksum(), + templateVO.getFormat(), templateVO.getGuestOSId(), storeUrlAndId.second(), templateId, filePath, false); } catch (Exception e) { String errMsg = String.format("Failed to register template for hypervisor: %s", hypervisorAndTemplateName.first()); LOGGER.error(errMsg, e); if (templateId != null) { - updateTemplateTablesOnFailure(conn, templateId); - cleanupStore(templateId); + updateTemplateTablesOnFailure(templateId); + cleanupStore(templateId, filePath); } throw new CloudRuntimeException(errMsg, e); } - } - public static void registerTemplate(Connection conn, Pair hypervisorAndTemplateName, Pair storeUrlAndId) { + + public void registerTemplate(Pair hypervisorAndTemplateName, Pair storeUrlAndId, String filePath) { Long templateId = null; try { Hypervisor.HypervisorType hypervisor = hypervisorAndTemplateName.first(); - final String templateName = UUID.randomUUID().toString(); - Date created = new Date(DateUtil.currentGMTTime().getTime()); - SystemVMTemplateDetails details = new SystemVMTemplateDetails(templateName, hypervisorAndTemplateName.second(), created, - newTemplateUrl.get(hypervisor), newTemplateChecksum.get(hypervisor), hypervisorImageFormat.get(hypervisor), hypervisorGuestOsMap.get(hypervisor), hypervisor, storeUrlAndId.second()); - templateId = createTemplateObjectInDB(conn, details); - if (templateId == null) { - throw new CloudRuntimeException(String.format("Failed to register template for hypervisor: %s", hypervisor.name())); - } - details.setId(templateId); - String destTempFolderName = String.valueOf(templateId); - String destTempFolder = PARENT_TEMPLATE_FOLDER + PARTIAL_TEMPLATE_FOLDER + destTempFolderName; - details.setInstallPath(PARTIAL_TEMPLATE_FOLDER + destTempFolderName + File.separator + templateName + "." + hypervisorImageFormat.get(hypervisor).getFileExtension()); - createTemplateStoreRefEntry(conn, details); - setupTemplate(templateName, hypervisorAndTemplateName, destTempFolder); - details.setInstallPath(PARTIAL_TEMPLATE_FOLDER + destTempFolderName + File.separator + templateName + "." + hypervisorImageFormat.get(hypervisor).getFileExtension()); - readTemplateProperties(destTempFolder + "/template.properties", details); - details.setUpdated(new Date(DateUtil.currentGMTTime().getTime())); - updateDb(conn, details, true); + templateId = performTemplateRegistrationOperations(hypervisorAndTemplateName, NewTemplateUrl.get(hypervisor), NewTemplateChecksum.get(hypervisor), + hypervisorImageFormat.get(hypervisor), hypervisorGuestOsMap.get(hypervisor), storeUrlAndId.second(), null, filePath, true); Map configParams = new HashMap<>(); configParams.put(SystemVmTemplateRegistration.routerTemplateConfigurationNames.get(hypervisorAndTemplateName.first()), hypervisorAndTemplateName.second()); configParams.put("minreq.sysvmtemplate.version", CS_MAJOR_VERSION + "." + CS_TINY_VERSION); - updateConfigurationParams(conn, configParams); - updateSystemVMEntries(conn, templateId, hypervisorAndTemplateName); + updateConfigurationParams(configParams); + updateSystemVMEntries(templateId, hypervisorAndTemplateName.first()); } catch (Exception e) { String errMsg = String.format("Failed to register template for hypervisor: %s", hypervisorAndTemplateName.first()); LOGGER.error(errMsg, e); if (templateId != null) { - updateTemplateTablesOnFailure(conn, templateId); - cleanupStore(templateId); + updateTemplateTablesOnFailure(templateId); + cleanupStore(templateId, filePath); } throw new CloudRuntimeException(errMsg, e); } @@ -716,9 +651,9 @@ public static void parseMetadataFile() { String hypervisor = hypervisorType.name().toLowerCase(Locale.ROOT); Ini.Section section = ini.get(hypervisor); NewTemplateNameList.put(hypervisorType, section.get("templatename")); - fileNames.put(hypervisorType, section.get("filename")); - newTemplateChecksum.put(hypervisorType, section.get("checksum")); - newTemplateUrl.put(hypervisorType, section.get("downloadurl")); + FileNames.put(hypervisorType, section.get("filename")); + NewTemplateChecksum.put(hypervisorType, section.get("checksum")); + NewTemplateUrl.put(hypervisorType, section.get("downloadurl")); } } catch (Exception e) { String errMsg = String.format("Failed to parse systemVM template metadata file: %s", METADATA_FILE); @@ -727,70 +662,110 @@ public static void parseMetadataFile() { } } - private static void cleanupStore(Long templateId) { - String destTempFolder = PARENT_TEMPLATE_FOLDER + PARTIAL_TEMPLATE_FOLDER + String.valueOf(templateId); - Script.runSimpleBashScript("rm -rf " + destTempFolder); + private static String getTemplateStoreDetails(Connection conn, Long templateId) { + String installPath = null; + try { + PreparedStatement pstmt = conn.prepareStatement(FETCH_REGISTERED_TEMPLATE_INSTALL_PATH); + pstmt.setLong(1, templateId); + ResultSet rs = pstmt.executeQuery(); + while (rs.next()) { + installPath = rs.getString(1); + } + } catch (SQLException e) { + String errMsg = String.format("Failed to fetch template store record for template with id: %s", templateId); + LOGGER.error(errMsg, e); + throw new CloudRuntimeException(errMsg, e); + } + return installPath; + } + + private static void cleanupStore(Long templateId, String filePath) { + String destTempFolder = filePath + PARTIAL_TEMPLATE_FOLDER + String.valueOf(templateId); + try { + Files.deleteIfExists(Paths.get(destTempFolder)); + } catch (IOException e) { + LOGGER.error(String.format("Failed to cleanup mounted store at: %s", filePath), e); + } } - public static void registerTemplates(Connection conn, Set hypervisorsInUse) { + private void validateTemplates(Set hypervisorsInUse) { + Set hypervisors = hypervisorsInUse.stream().map(Enum::name). + map(name -> name.toLowerCase(Locale.ROOT)).map(this::getHypervisorName).collect(Collectors.toSet()); + List templates = new ArrayList<>(); + for (Hypervisor.HypervisorType hypervisorType : hypervisorsInUse) { + templates.add(FileNames.get(hypervisorType)); + } + + boolean templatesFound = true; + for (String hypervisor : hypervisors) { + String matchedTemplate = templates.stream().filter(x -> x.contains(hypervisor)).findAny().orElse(null); + if (matchedTemplate == null) { + templatesFound = false; + break; + } + + File tempFile = new File(TEMPLATES_PATH + matchedTemplate); + String templateChecksum = calculateChecksum(tempFile); + if (!templateChecksum.equals(NewTemplateChecksum.get(getHypervisorType(hypervisor)))) { + LOGGER.error(String.format("Checksum mismatch: %s != %s ", templateChecksum, NewTemplateChecksum.get(getHypervisorType(hypervisor)))); + templatesFound = false; + break; + } + } + + if (!templatesFound) { + String errMsg = "SystemVm template not found. Cannot upgrade system Vms"; + LOGGER.error(errMsg); + throw new CloudRuntimeException(errMsg); + } + } + + public void registerTemplates(Connection conn, Set hypervisorsInUse) { GlobalLock lock = GlobalLock.getInternLock("UpgradeDatabase-Lock"); try { LOGGER.info("Grabbing lock to register templates."); if (!lock.lock(LOCK_WAIT_TIMEOUT)) { throw new CloudRuntimeException("Unable to acquire lock to register SystemVM template."); } - // Check if templates path exists try { - Set hypervisors = hypervisorsInUse.stream().map(Enum::name). - map(name -> name.toLowerCase(Locale.ROOT)).map(SystemVmTemplateRegistration::getHypervisorName).collect(Collectors.toSet()); - List templates = new ArrayList<>(); - for (Hypervisor.HypervisorType hypervisorType : hypervisorsInUse) { - templates.add(fileNames.get(hypervisorType)); - } - - boolean templatesFound = true; - for (String hypervisor : hypervisors) { - String matchedTemplate = templates.stream().filter(x -> x.contains(hypervisor)).findAny().orElse(null); - if (matchedTemplate == null) { - templatesFound = false; - break; - } - MessageDigest mdigest = MessageDigest.getInstance(HASH_ALGORITHM); - File tempFile = new File(TEMPLATES_PATH + matchedTemplate); - String templateChecksum = calculateChecksum(mdigest, tempFile); - if (!templateChecksum.equals(newTemplateChecksum.get(getHypervisorType(hypervisor)))) { - LOGGER.error(String.format("Checksum mismatch: %s != %s ", templateChecksum, newTemplateChecksum.get(getHypervisorType(hypervisor)))); - templatesFound = false; - break; - } - } - - if (!templatesFound) { - String errMsg = "SystemVm template not found. Cannot upgrade system Vms"; - LOGGER.error(errMsg); - throw new CloudRuntimeException(errMsg); - } - + validateTemplates(hypervisorsInUse); // Perform Registration if templates not already registered - List zoneIds = getEligibleZoneIds(conn); - for (Long zoneId : zoneIds) { - Pair storeUrlAndId = getNfsStoreInZone(conn, zoneId); - mountStore(storeUrlAndId.first()); - List hypervisorList = fetchAllHypervisors(conn, zoneId); - for (String hypervisor : hypervisorList) { - Hypervisor.HypervisorType name = Hypervisor.HypervisorType.getType(hypervisor); - String templateName = NewTemplateNameList.get(name); - Pair hypervisorAndTemplateName = new Pair(name, templateName); - long templateId = isTemplateAlreadyRegistered(conn, hypervisorAndTemplateName); - if (templateId != -1) { - continue; + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(final TransactionStatus status) { + List zoneIds = getEligibleZoneIds(); + for (Long zoneId : zoneIds) { + String filePath = TEMPORARY_SECONDARY_STORE + generateToken(TOKEN_LENGTH); + try { + Pair storeUrlAndId = getNfsStoreInZone(zoneId); + mountStore(storeUrlAndId.first(), filePath); + List hypervisorList = fetchAllHypervisors(zoneId); + for (String hypervisor : hypervisorList) { + Hypervisor.HypervisorType name = Hypervisor.HypervisorType.getType(hypervisor); + String templateName = NewTemplateNameList.get(name); + Pair hypervisorAndTemplateName = new Pair(name, templateName); + long templateId = getRegisteredTemplateId(hypervisorAndTemplateName); + if (templateId != -1) { + VMTemplateVO templateVO = vmTemplateDao.findById(templateId); + String installPath = getTemplateStoreDetails(conn, templateId); + if (SystemVmTemplateRegistration.validateIfSeeded(storeUrlAndId.first(), installPath)) { + continue; + } else if (templateVO != null) { + registerTemplate(hypervisorAndTemplateName, storeUrlAndId, templateVO, filePath); + continue; + } + } + registerTemplate(hypervisorAndTemplateName, storeUrlAndId, filePath); + } + unmountStore(filePath); + } catch (Exception e) { + unmountStore(filePath); + throw new CloudRuntimeException("Failed to register systemVM template. Upgrade Failed"); + } } - registerTemplate(conn, hypervisorAndTemplateName, storeUrlAndId); } - unmountStore(); - } + }); } catch (Exception e) { - unmountStore(); throw new CloudRuntimeException("Failed to register systemVM template. Upgrade Failed"); } } finally { diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/BasicTemplateDataStoreDaoImpl.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/BasicTemplateDataStoreDaoImpl.java new file mode 100644 index 000000000000..1f9dc77fdc4e --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/BasicTemplateDataStoreDaoImpl.java @@ -0,0 +1,236 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.upgrade.dao; + +import java.util.List; +import java.util.Map; + +import javax.naming.ConfigurationException; + +import com.cloud.utils.db.Filter; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; + +import com.cloud.storage.DataStoreRole; +import com.cloud.storage.VMTemplateStorageResourceAssoc; +import com.cloud.template.VirtualMachineTemplate; +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; + +public class BasicTemplateDataStoreDaoImpl extends GenericDaoBase implements TemplateDataStoreDao { + private SearchBuilder templateRoleSearch; + private SearchBuilder storeTemplateSearch; + + public BasicTemplateDataStoreDaoImpl() { + super(); + templateRoleSearch = createSearchBuilder(); + templateRoleSearch.and("template_id", templateRoleSearch.entity().getTemplateId(), SearchCriteria.Op.EQ); + templateRoleSearch.and("store_role", templateRoleSearch.entity().getDataStoreRole(), SearchCriteria.Op.EQ); + templateRoleSearch.and("destroyed", templateRoleSearch.entity().getDestroyed(), SearchCriteria.Op.EQ); + templateRoleSearch.and("state", templateRoleSearch.entity().getState(), SearchCriteria.Op.EQ); + templateRoleSearch.done(); + + storeTemplateSearch = createSearchBuilder(); + storeTemplateSearch.and("template_id", storeTemplateSearch.entity().getTemplateId(), SearchCriteria.Op.EQ); + storeTemplateSearch.and("store_id", storeTemplateSearch.entity().getDataStoreId(), SearchCriteria.Op.EQ); + storeTemplateSearch.and("destroyed", storeTemplateSearch.entity().getDestroyed(), SearchCriteria.Op.EQ); + storeTemplateSearch.done(); + } + + @Override + public boolean configure(String name, Map params) throws ConfigurationException { + super.configure(name, params); + return true; + } + + @Override + public List listByStoreId(long id) { + return null; + } + + @Override + public List listDestroyed(long storeId) { + return null; + } + + @Override + public List listActiveOnCache(long id) { + return null; + } + + @Override + public void deletePrimaryRecordsForStore(long id) { + + } + + @Override + public void deletePrimaryRecordsForTemplate(long templateId) { + + } + + @Override + public List listByTemplateStore(long templateId, long storeId) { + return null; + } + + @Override + public List listByTemplateStoreStatus(long templateId, long storeId, ObjectInDataStoreStateMachine.State... states) { + return null; + } + + @Override + public List listByTemplateStoreDownloadStatus(long templateId, long storeId, VMTemplateStorageResourceAssoc.Status... status) { + return null; + } + + @Override + public List listByTemplateZoneDownloadStatus(long templateId, Long zoneId, VMTemplateStorageResourceAssoc.Status... status) { + return null; + } + + @Override + public TemplateDataStoreVO findByTemplateZoneDownloadStatus(long templateId, Long zoneId, VMTemplateStorageResourceAssoc.Status... status) { + return null; + } + + @Override + public TemplateDataStoreVO findByTemplateZoneStagingDownloadStatus(long templateId, Long zoneId, VMTemplateStorageResourceAssoc.Status... status) { + return null; + } + + @Override + public TemplateDataStoreVO findByStoreTemplate(long storeId, long templateId) { + SearchCriteria sc = storeTemplateSearch.create(); + sc.setParameters("store_id", storeId); + sc.setParameters("template_id", templateId); + sc.setParameters("destroyed", false); + Filter filter = new Filter(TemplateDataStoreVO.class, "id", false, 0L, 1L); + List templates = listBy(sc, filter); + if ((templates != null) && !templates.isEmpty()) { + return templates.get(0); + } + return null; + } + + @Override + public TemplateDataStoreVO findByStoreTemplate(long storeId, long templateId, boolean lock) { + return null; + } + + @Override + public TemplateDataStoreVO findByTemplate(long templateId, DataStoreRole role) { + SearchCriteria sc = templateRoleSearch.create(); + sc.setParameters("template_id", templateId); + sc.setParameters("store_role", role); + sc.setParameters("destroyed", false); + return findOneIncludingRemovedBy(sc); + } + + @Override + public TemplateDataStoreVO findReadyByTemplate(long templateId, DataStoreRole role) { + return null; + } + + @Override + public TemplateDataStoreVO findByTemplateZone(long templateId, Long zoneId, DataStoreRole role) { + return null; + } + + @Override + public List listByTemplate(long templateId) { + return null; + } + + @Override + public List listByTemplateNotBypassed(long templateId) { + return null; + } + + @Override + public TemplateDataStoreVO findByTemplateZoneReady(long templateId, Long zoneId) { + return null; + } + + @Override + public void duplicateCacheRecordsOnRegionStore(long storeId) { + + } + + @Override + public TemplateDataStoreVO findReadyOnCache(long templateId) { + return null; + } + + @Override + public List listOnCache(long templateId) { + return null; + } + + @Override + public void updateStoreRoleToCachce(long storeId) { + + } + + @Override + public List listTemplateDownloadUrls() { + return null; + } + + @Override + public void removeByTemplateStore(long templateId, long imageStoreId) { + + } + + @Override + public void expireDnldUrlsForZone(Long dcId) { + + } + + @Override + public List listByTemplateState(VirtualMachineTemplate.State... states) { + return null; + } + + @Override + public TemplateDataStoreVO createTemplateDirectDownloadEntry(long templateId, Long size) { + return null; + } + + @Override + public TemplateDataStoreVO getReadyBypassedTemplate(long templateId) { + return null; + } + + @Override + public boolean isTemplateMarkedForDirectDownload(long templateId) { + return false; + } + + @Override + public List listTemplateDownloadUrlsByStoreId(long storeId) { + return null; + } + + @Override + public boolean updateState(ObjectInDataStoreStateMachine.State currentState, ObjectInDataStoreStateMachine.Event event, ObjectInDataStoreStateMachine.State nextState, DataObjectInStore vo, Object data) { + return false; + } +} diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41510to41600.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41510to41600.java index 1dfc6a3ed4f3..d8ab2ee3ac43 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41510to41600.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41510to41600.java @@ -28,7 +28,6 @@ import java.util.Set; import com.cloud.upgrade.SystemVmTemplateRegistration; -import com.cloud.utils.Pair; import org.apache.log4j.Logger; import com.cloud.hypervisor.Hypervisor; @@ -37,6 +36,10 @@ public class Upgrade41510to41600 implements DbUpgrade, DbUpgradeSystemVmTemplate { final static Logger LOG = Logger.getLogger(Upgrade41510to41600.class); + private SystemVmTemplateRegistration systemVmTemplateRegistration; + + public Upgrade41510to41600() { + } @Override public String[] getUpgradableVersionRange() { @@ -66,13 +69,17 @@ public InputStream[] getPrepareScripts() { @Override public void performDataMigration(Connection conn) { - updateVMwareSystemvVMTemplateField(conn, SystemVmTemplateRegistration.NewTemplateNameList.get(Hypervisor.HypervisorType.VMware)); + } + + private void initSystemVmTemplateRegistration() { + systemVmTemplateRegistration = new SystemVmTemplateRegistration(); } @Override @SuppressWarnings("serial") public void updateSystemVmTemplates(final Connection conn) { LOG.debug("Updating System Vm template IDs"); + initSystemVmTemplateRegistration(); final Set hypervisorsListInUse = new HashSet(); try (PreparedStatement pstmt = conn.prepareStatement("select distinct(hypervisor_type) from `cloud`.`cluster` where removed is null"); ResultSet rs = pstmt.executeQuery()) { while (rs.next()) { @@ -104,7 +111,7 @@ public void updateSystemVmTemplates(final Connection conn) { throw new CloudRuntimeException("updateSystemVmTemplates:Exception while getting hypervisor types from clusters", e); } - for (final Map.Entry hypervisorAndTemplateName : SystemVmTemplateRegistration.NewTemplateNameList.entrySet()) { + for (final Map.Entry hypervisorAndTemplateName : systemVmTemplateRegistration.NewTemplateNameList.entrySet()) { LOG.debug("Updating " + hypervisorAndTemplateName.getKey() + " System Vms"); try (PreparedStatement pstmt = conn.prepareStatement("select id from `cloud`.`vm_template` where name = ? and removed is null order by id desc limit 1")) { // Get systemvm template id for corresponding hypervisor @@ -129,9 +136,9 @@ public void updateSystemVmTemplates(final Connection conn) { throw new CloudRuntimeException("updateSystemVmTemplates:Exception while updating template with id " + templateId + " to be marked as 'system'", e); } + updateVMwareSystemvVMTemplateField(conn, SystemVmTemplateRegistration.NewTemplateNameList.get(Hypervisor.HypervisorType.VMware)); // update template ID of system Vms - SystemVmTemplateRegistration.updateSystemVMEntries(conn, templateId, - new Pair(hypervisorAndTemplateName.getKey(), hypervisorAndTemplateName.getValue())); + systemVmTemplateRegistration.updateSystemVMEntries(templateId, hypervisorAndTemplateName.getKey()); // Change value of global configuration parameter router.template.* for the corresponding hypervisor // Change value of global configuration parameter - minreq.sysvmtemplate.version for the ACS version @@ -139,11 +146,11 @@ public void updateSystemVmTemplates(final Connection conn) { configParams.put(SystemVmTemplateRegistration.routerTemplateConfigurationNames.get(hypervisorAndTemplateName.getKey()), hypervisorAndTemplateName.getValue()); configParams.put("minreq.sysvmtemplate.version", SystemVmTemplateRegistration.CS_MAJOR_VERSION + "." + SystemVmTemplateRegistration.CS_TINY_VERSION); - SystemVmTemplateRegistration.updateConfigurationParams(conn, configParams); + systemVmTemplateRegistration.updateConfigurationParams(configParams); } else { if (hypervisorsListInUse.contains(hypervisorAndTemplateName.getKey())) { try { - SystemVmTemplateRegistration.registerTemplates(conn, hypervisorsListInUse); + systemVmTemplateRegistration.registerTemplates(conn, hypervisorsListInUse); break; } catch (final Exception e) { throw new CloudRuntimeException(getUpgradedVersion() + hypervisorAndTemplateName.getKey() + " SystemVm template not found. Cannot upgrade system Vms"); @@ -155,8 +162,8 @@ public void updateSystemVmTemplates(final Connection conn) { // hypervisor try (PreparedStatement update_templ_url_pstmt = conn .prepareStatement("UPDATE `cloud`.`vm_template` SET url = ? , checksum = ? WHERE hypervisor_type = ? AND type = 'SYSTEM' AND removed is null order by id desc limit 1");) { - update_templ_url_pstmt.setString(1, SystemVmTemplateRegistration.newTemplateUrl.get(hypervisorAndTemplateName.getKey())); - update_templ_url_pstmt.setString(2, SystemVmTemplateRegistration.newTemplateChecksum.get(hypervisorAndTemplateName.getKey())); + update_templ_url_pstmt.setString(1, SystemVmTemplateRegistration.NewTemplateUrl.get(hypervisorAndTemplateName.getKey())); + update_templ_url_pstmt.setString(2, SystemVmTemplateRegistration.NewTemplateChecksum.get(hypervisorAndTemplateName.getKey())); update_templ_url_pstmt.setString(3, hypervisorAndTemplateName.getKey().toString()); update_templ_url_pstmt.executeUpdate(); } catch (final SQLException e) { @@ -165,6 +172,7 @@ public void updateSystemVmTemplates(final Connection conn) { throw new CloudRuntimeException("updateSystemVmTemplates:Exception while updating 'url' and 'checksum' for hypervisor type " + hypervisorAndTemplateName.getKey().toString(), e); } + updateVMwareSystemvVMTemplateField(conn, SystemVmTemplateRegistration.NewTemplateNameList.get(Hypervisor.HypervisorType.VMware)); } } } catch (final SQLException e) { diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDao.java b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDao.java index 3ac023e2dfd8..0bbcb37ea0a2 100755 --- a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDao.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDao.java @@ -21,6 +21,7 @@ import java.util.List; import java.util.Map; +import com.cloud.hypervisor.Hypervisor; import com.cloud.utils.Pair; import com.cloud.utils.db.GenericDao; import com.cloud.utils.fsm.StateDao; @@ -159,4 +160,6 @@ public interface VMInstanceDao extends GenericDao, StateDao< List listNonMigratingVmsByHostEqualsLastHost(long hostId); + void updateSystemVmTemplateId(long templateId, Hypervisor.HypervisorType hypervisorType); + } diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java index 22a197a13d32..7ceff5eb5953 100755 --- a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java @@ -28,6 +28,7 @@ import javax.annotation.PostConstruct; import javax.inject.Inject; +import com.cloud.hypervisor.Hypervisor; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -126,6 +127,8 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem private static final String COUNT_VMS_BASED_ON_VGPU_TYPES2 = "GROUP BY offering.service_offering_id) results GROUP BY pci, type"; + private static final String UPDATE_SYSTEM_VM_TEMPLATE_ID_FOR_HYPERVISOR = "UPDATE `cloud`.`vm_instance` SET vm_template_id = ? WHERE type <> 'User' AND hypervisor_type = ? AND removed is NULL"; + @Inject protected HostDao _hostDao; @@ -941,4 +944,23 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } }); } + + + @Override + public void updateSystemVmTemplateId(long templateId, Hypervisor.HypervisorType hypervisorType) { + TransactionLegacy txn = TransactionLegacy.currentTxn(); + + StringBuilder sql = new StringBuilder(UPDATE_SYSTEM_VM_TEMPLATE_ID_FOR_HYPERVISOR); + try { + PreparedStatement updateStatement = txn.prepareAutoCloseStatement(sql.toString()); + updateStatement.setLong(1, templateId); + updateStatement.setString(2, hypervisorType.toString()); + updateStatement.executeUpdate(); + } catch (SQLException e) { + throw new CloudRuntimeException("DB Exception on: " + sql, e); + } catch (Throwable e) { + throw new CloudRuntimeException("Caught: " + sql, e); + } + + } } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDao.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDao.java index 199f071a0562..ba9825c3c868 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDao.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDao.java @@ -45,4 +45,8 @@ public interface ImageStoreDao extends GenericDao { List listStoresByZoneId(long zoneId); List listAllStoresInZone(Long zoneId, String provider, DataStoreRole role); + + List findByProtocol(String protocol); + + ImageStoreVO findOneByZoneAndProtocol(long zoneId, String protocol); } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDaoImpl.java index 0a2918187b78..3468b6008d99 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDaoImpl.java @@ -23,6 +23,7 @@ import javax.naming.ConfigurationException; +import com.cloud.utils.db.Filter; import org.springframework.stereotype.Component; import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; @@ -39,7 +40,22 @@ public class ImageStoreDaoImpl extends GenericDaoBase implem private SearchBuilder providerSearch; private SearchBuilder regionSearch; private SearchBuilder storeSearch; - + private SearchBuilder protocolSearch; + private SearchBuilder zoneProtocolSearch; + + public ImageStoreDaoImpl() { + super(); + protocolSearch = createSearchBuilder(); + protocolSearch.and("protocol", protocolSearch.entity().getProtocol(), SearchCriteria.Op.EQ); + protocolSearch.and("role", protocolSearch.entity().getRole(), SearchCriteria.Op.EQ); + protocolSearch.done(); + + zoneProtocolSearch = createSearchBuilder(); + zoneProtocolSearch.and("dataCenterId", zoneProtocolSearch.entity().getDcId(), SearchCriteria.Op.EQ); + zoneProtocolSearch.and("protocol", zoneProtocolSearch.entity().getProtocol(), SearchCriteria.Op.EQ); + zoneProtocolSearch.and("role", zoneProtocolSearch.entity().getRole(), SearchCriteria.Op.EQ); + zoneProtocolSearch.done(); + } @Override public boolean configure(String name, Map params) throws ConfigurationException { super.configure(name, params); @@ -156,4 +172,23 @@ public List listStoresByZoneId(long zoneId) { sc.addAnd("dcId", SearchCriteria.Op.EQ, zoneId); return listBy(sc); } + + @Override + public List findByProtocol(String protocol) { + SearchCriteria sc = protocolSearch.create(); + sc.setParameters("protocol", protocol); + sc.setParameters("role", DataStoreRole.Image); + return listBy(sc); + } + + @Override + public ImageStoreVO findOneByZoneAndProtocol(long dataCenterId, String protocol) { + SearchCriteria sc = zoneProtocolSearch.create(); + sc.setParameters("dataCenterId", dataCenterId); + sc.setParameters("protocol", protocol); + sc.setParameters("role", DataStoreRole.Image); + Filter filter = new Filter(1); + List results = listBy(sc, filter); + return results.size() == 0 ? null : results.get(0); + } } diff --git a/engine/schema/templateConfig.sh b/engine/schema/templateConfig.sh index 686804435515..d981ccd49b52 100644 --- a/engine/schema/templateConfig.sh +++ b/engine/schema/templateConfig.sh @@ -63,7 +63,6 @@ function createMetadataFile() { echo -e "["$section"]\ntemplatename = $templatename\nchecksum = $checksum\ndownloadurl = $downloadurl\nfilename = $filename\n" >> $METADATAFILE done - } declare -A templates diff --git a/engine/schema/validate-checksum b/engine/schema/validate-checksum deleted file mode 100644 index a5a0deddb44b..000000000000 --- a/engine/schema/validate-checksum +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -set -x -old_checksum_path="$HOME/.m2/repository/.cache/download-maven-plugin/" -new_checksum_path="./dist/systemvm-templates/" -file_name="md5sum.txt" - -count=$(ls -d ${old_checksum_path}${file_name}* | wc -l) -if [ "$count" -gt 1 ]; then - rm -rf $old_checksum_path* -else - echo $PWD - old_checksum=$(md5sum ${old_checksum_path}${file_name}* | awk '{print $1}') - new_checksum=$(md5sum ${new_checksum_path}${file_name} | awk '{print $1}') - if [ "$old_checksum" != "$new_checksum" ]; then - rm -rf $old_checksum_path* - fi -fi diff --git a/packaging/centos7/cloud.spec b/packaging/centos7/cloud.spec index 1ab538b22ada..56091e8af268 100644 --- a/packaging/centos7/cloud.spec +++ b/packaging/centos7/cloud.spec @@ -303,7 +303,6 @@ mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/ cp -r engine/schema/dist/systemvm-templates/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/ rm -rf ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/md5sum.txt - # UI mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/ui mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-ui/ diff --git a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java index 4903b2933403..03b581916480 100644 --- a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java @@ -22,10 +22,8 @@ import java.net.URI; import java.net.URISyntaxException; import java.net.UnknownHostException; -import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; -import java.sql.SQLException; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -2750,51 +2748,62 @@ public ImageStore discoverImageStore(String name, String url, String providerNam // populate template_store_ref table _imageSrv.addSystemVMTemplatesToSecondary(store); _imageSrv.handleTemplateSync(store); - if (providerName.equals(DataStoreProvider.NFS_IMAGE) && zoneId != null) { - List stores = _imageStoreDao.listAllStoresInZone(zoneId, providerName, DataStoreRole.Image); - stores = stores.stream().filter(str -> str.getId() != store.getId()).collect(Collectors.toList()); - // Check if it's the only/first store in the zone - if (stores.size() == 0) { - List hypervisorTypes = _clusterDao.getAvailableHypervisorInZone(zoneId); - Set hypSet = new HashSet(hypervisorTypes); - TransactionLegacy txn = TransactionLegacy.open("AutomaticTemplateRegister"); - Connection conn; - try { - conn = txn.getConnection(); - Pair storeUrlAndId = new Pair<>(url, store.getId()); - for (HypervisorType hypervisorType : hypSet) { + if (DataStoreProvider.NFS_IMAGE.equals(providerName) && zoneId != null) { + String finalProviderName = providerName; + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(final TransactionStatus status) { + List stores = _imageStoreDao.listAllStoresInZone(zoneId, finalProviderName, DataStoreRole.Image); + stores = stores.stream().filter(str -> str.getId() != store.getId()).collect(Collectors.toList()); + // Check if it's the only/first store in the zone + if (stores.size() == 0) { + List hypervisorTypes = _clusterDao.getAvailableHypervisorInZone(zoneId); + Set hypSet = new HashSet(hypervisorTypes); + TransactionLegacy txn = TransactionLegacy.open("AutomaticTemplateRegister"); + SystemVmTemplateRegistration systemVmTemplateRegistration = new SystemVmTemplateRegistration(); + String filePath = SystemVmTemplateRegistration.TEMPORARY_SECONDARY_STORE + SystemVmTemplateRegistration.generateToken(10); try { - String templateName = getValidTemplateName(zoneId, hypervisorType); - Pair hypervisorAndTemplateName = - new Pair<>(hypervisorType, templateName); - long templateId = SystemVmTemplateRegistration.isTemplateAlreadyRegistered(conn, hypervisorAndTemplateName); - VMTemplateVO vmTemplateVO = _templateDao.findById(templateId); - TemplateDataStoreVO templateVO = null; - if (templateId != -1) { - templateVO = _templateStoreDao.findByTemplate(templateId, DataStoreRole.Image); - if (templateVO != null) { - if (SystemVmTemplateRegistration.validateIfSeeded(url, templateVO.getInstallPath())) { - continue; + Pair storeUrlAndId = new Pair<>(url, store.getId()); + for (HypervisorType hypervisorType : hypSet) { + try { + String templateName = getValidTemplateName(zoneId, hypervisorType); + Pair hypervisorAndTemplateName = + new Pair<>(hypervisorType, templateName); + long templateId = systemVmTemplateRegistration.getRegisteredTemplateId(hypervisorAndTemplateName); + VMTemplateVO vmTemplateVO = _templateDao.findById(templateId); + TemplateDataStoreVO templateVO = null; + if (templateId != -1) { + templateVO = _templateStoreDao.findByTemplate(templateId, DataStoreRole.Image); + if (templateVO != null) { + try { + if (SystemVmTemplateRegistration.validateIfSeeded(url, templateVO.getInstallPath())) { + continue; + } + } catch (Exception e) { + s_logger.error("Failed to validated if template is seeded", e); + } + } + } + SystemVmTemplateRegistration.mountStore(storeUrlAndId.first(), filePath); + if (templateVO != null && vmTemplateVO != null) { + systemVmTemplateRegistration.registerTemplate(hypervisorAndTemplateName, storeUrlAndId, vmTemplateVO, filePath); + } else { + systemVmTemplateRegistration.registerTemplate(hypervisorAndTemplateName, storeUrlAndId, filePath); } + } catch (CloudRuntimeException e) { + SystemVmTemplateRegistration.unmountStore(filePath); + s_logger.error(String.format("Failed to register systemVM template for hypervisor: %s", hypervisorType.name()), e); } } - SystemVmTemplateRegistration.mountStore(storeUrlAndId.first()); - if (templateVO != null && vmTemplateVO != null) { - SystemVmTemplateRegistration.registerTemplate(conn, hypervisorAndTemplateName, storeUrlAndId, vmTemplateVO); - } else { - SystemVmTemplateRegistration.registerTemplate(conn, hypervisorAndTemplateName, storeUrlAndId); - } - } catch (CloudRuntimeException e) { - s_logger.error(String.format("Failed to register systemVM template for hypervisor: %s", hypervisorType.name()), e); + } catch (Exception e) { + s_logger.error("Failed to register systemVM template(s)"); + } finally { + SystemVmTemplateRegistration.unmountStore(filePath); + txn.close(); } } - } catch (SQLException e) { - s_logger.error("Failed to register systemVM template(s)"); - } finally { - SystemVmTemplateRegistration.unmountStore(); - txn.close(); } - } + }); } } From 7196216185e30ac3810aa8315b1abddf2b6cd6ef Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Thu, 26 Aug 2021 10:43:17 +0530 Subject: [PATCH 107/117] Address Review comments - part2: 1. extract repeated code to a common location - wrt adding encoded public key to the cmdline args 2. changed name of folder containing systemvm templates on MS 3. Use Files for creating folders as opposed to bash cmds 4. extract code --- debian/rules | 6 +- .../java/com/cloud/vm/VirtualMachineGuru.java | 12 ++ .../upgrade/SystemVmTemplateRegistration.java | 13 +- engine/schema/templateConfig.sh | 1 - packaging/centos7/cloud.spec | 6 +- packaging/centos8/cloud.spec | 6 +- packaging/suse15/cloud.spec | 6 +- .../lb/ElasticLoadBalancerManagerImpl.java | 11 +- .../lb/InternalLoadBalancerVMManagerImpl.java | 12 +- .../consoleproxy/ConsoleProxyManagerImpl.java | 11 +- .../VirtualNetworkApplianceManagerImpl.java | 11 +- .../com/cloud/storage/StorageManagerImpl.java | 116 +++++++++--------- .../cloud/storage/VolumeApiServiceImpl.java | 12 +- .../SecondaryStorageManagerImpl.java | 11 +- .../opt/cloud/bin/setup/cloud-early-config | 4 +- 15 files changed, 112 insertions(+), 126 deletions(-) diff --git a/debian/rules b/debian/rules index 7139ee85ada2..ed1559a46dfe 100755 --- a/debian/rules +++ b/debian/rules @@ -66,7 +66,7 @@ override_dh_auto_install: mkdir -p $(DESTDIR)/usr/share/$(PACKAGE)-management mkdir -p $(DESTDIR)/usr/share/$(PACKAGE)-management/lib mkdir -p $(DESTDIR)/usr/share/$(PACKAGE)-management/setup - mkdir -p $(DESTDIR)/usr/share/$(PACKAGE)-management/templates + mkdir -p $(DESTDIR)/usr/share/$(PACKAGE)-management/templates/systemvm mkdir $(DESTDIR)/var/log/$(PACKAGE)/management mkdir $(DESTDIR)/var/cache/$(PACKAGE)/management mkdir $(DESTDIR)/var/log/$(PACKAGE)/ipallocator @@ -79,8 +79,8 @@ override_dh_auto_install: cp client/target/conf/* $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/management/ cp client/target/cloud-client-ui-$(VERSION).jar $(DESTDIR)/usr/share/$(PACKAGE)-management/lib/cloudstack-$(VERSION).jar cp client/target/lib/*jar $(DESTDIR)/usr/share/$(PACKAGE)-management/lib/ - cp -r engine/schema/dist/systemvm-templates/* $(DESTDIR)/usr/share/$(PACKAGE)-management/templates/ - rm -rf $(DESTDIR)/usr/share/$(PACKAGE)-management/templates/md5sum.txt + cp -r engine/schema/dist/systemvm-templates/* $(DESTDIR)/usr/share/$(PACKAGE)-management/templates/systemvm/ + rm -rf $(DESTDIR)/usr/share/$(PACKAGE)-management/templates/systemvm/md5sum.txt # nast hack for a couple of configuration files mv $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/server/cloudstack-limits.conf $(DESTDIR)/$(SYSCONFDIR)/security/limits.d/ diff --git a/engine/api/src/main/java/com/cloud/vm/VirtualMachineGuru.java b/engine/api/src/main/java/com/cloud/vm/VirtualMachineGuru.java index d6d123cd2434..7611df820c80 100644 --- a/engine/api/src/main/java/com/cloud/vm/VirtualMachineGuru.java +++ b/engine/api/src/main/java/com/cloud/vm/VirtualMachineGuru.java @@ -20,6 +20,10 @@ import com.cloud.agent.manager.Commands; import com.cloud.deploy.DeployDestination; import com.cloud.exception.ResourceUnavailableException; + +import java.nio.charset.StandardCharsets; +import java.util.Base64; + /** * A VirtualMachineGuru knows how to process a certain type of virtual machine. * @@ -60,4 +64,12 @@ public interface VirtualMachineGuru { void prepareStop(VirtualMachineProfile profile); void finalizeUnmanage(VirtualMachine vm); + + static String getEncodedMsPublicKey(String pubKey) { + String base64EncodedPublicKey = null; + if (pubKey != null) { + base64EncodedPublicKey = Base64.getEncoder().encodeToString(pubKey.getBytes(StandardCharsets.UTF_8)); + } + return base64EncodedPublicKey; + } } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java index 6bddbb7f7533..056657eda988 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java @@ -83,14 +83,12 @@ public class SystemVmTemplateRegistration { private static final Logger LOGGER = Logger.getLogger(SystemVmTemplateRegistration.class); private static final String MOUNT_COMMAND = "sudo mount -t nfs %s %s"; private static final String UMOUNT_COMMAND = "sudo umount %s"; - private static final String HASH_ALGORITHM = "MD5"; private static final String RELATIVE_TEMPLATE_PATH = "./engine/schema/dist/systemvm-templates/"; - private static final String ABSOLUTE_TEMPLATE_PATH = "/usr/share/cloudstack-management/templates/"; + private static final String ABSOLUTE_TEMPLATE_PATH = "/usr/share/cloudstack-management/templates/systemvm"; private static final String TEMPLATES_PATH = fetchTemplatesPath(); private static final String METADATA_FILE_NAME = "metadata.ini"; private static final String METADATA_FILE = TEMPLATES_PATH + METADATA_FILE_NAME; public static final String TEMPORARY_SECONDARY_STORE = "/tmp/tmpSecStorage"; - // private static final String PARENT_TEMPLATE_FOLDER = TEMPORARY_SECONDARY_STORE; private static final String PARTIAL_TEMPLATE_FOLDER = String.format("/template/tmpl/%d/", Account.ACCOUNT_ID_SYSTEM); private static final String FETCH_REGISTERED_TEMPLATE_INSTALL_PATH = "SELECT install_path FROM `cloud`.`template_store_ref` where template_id = ? LIMIT 1"; private static final String storageScriptsDir = "scripts/storage/secondary"; @@ -98,8 +96,8 @@ public class SystemVmTemplateRegistration { private static final Integer LOCK_WAIT_TIMEOUT = 1200; private static final Integer TOKEN_LENGTH = 10; - public static String CS_MAJOR_VERSION = "4.16"; - public static String CS_TINY_VERSION = "0"; + public static String CS_MAJOR_VERSION = null; + public static String CS_TINY_VERSION = null; @Inject DataCenterDao dataCenterDao; @@ -422,7 +420,10 @@ public static void mountStore(String storeUrl, String path) { URI uri = new URI(UriUtils.encodeURIComponent(storeUrl)); String host = uri.getHost(); String mountPath = uri.getPath(); - Script.runSimpleBashScript("mkdir -p " + path); + boolean fileCreated = new File(path).mkdirs(); + if (!fileCreated) { + throw new CloudRuntimeException("Failed to created file for mounting store to copy systemVM templates"); + } String mount = String.format(MOUNT_COMMAND, host + ":" + mountPath, path); Script.runSimpleBashScript(mount); } diff --git a/engine/schema/templateConfig.sh b/engine/schema/templateConfig.sh index d981ccd49b52..d39eb124fef6 100644 --- a/engine/schema/templateConfig.sh +++ b/engine/schema/templateConfig.sh @@ -62,7 +62,6 @@ function createMetadataFile() { filename=$(echo ${downloadurl##*'/'}) echo -e "["$section"]\ntemplatename = $templatename\nchecksum = $checksum\ndownloadurl = $downloadurl\nfilename = $filename\n" >> $METADATAFILE done - } declare -A templates diff --git a/packaging/centos7/cloud.spec b/packaging/centos7/cloud.spec index 56091e8af268..dd2c658ae63b 100644 --- a/packaging/centos7/cloud.spec +++ b/packaging/centos7/cloud.spec @@ -299,9 +299,9 @@ touch ${RPM_BUILD_ROOT}%{_localstatedir}/run/%{name}-management.pid #install -D server/target/conf/cloudstack-catalina.logrotate ${RPM_BUILD_ROOT}%{_sysconfdir}/logrotate.d/%{name}-catalina # SystemVM template -mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/ -cp -r engine/schema/dist/systemvm-templates/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/ -rm -rf ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/md5sum.txt +mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/systemvm +cp -r engine/schema/dist/systemvm-templates/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/systemvm +rm -rf ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/systemvm/md5sum.txt # UI mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/ui diff --git a/packaging/centos8/cloud.spec b/packaging/centos8/cloud.spec index 46c98b9048fb..6fbb58f6a342 100644 --- a/packaging/centos8/cloud.spec +++ b/packaging/centos8/cloud.spec @@ -292,9 +292,9 @@ touch ${RPM_BUILD_ROOT}%{_localstatedir}/run/%{name}-management.pid #install -D server/target/conf/cloudstack-catalina.logrotate ${RPM_BUILD_ROOT}%{_sysconfdir}/logrotate.d/%{name}-catalina # SystemVM template -mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/ -cp -r engine/schema/dist/systemvm-templates/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/ -rm -rf ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/md5sum.txt +mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/systemvm +cp -r engine/schema/dist/systemvm-templates/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/systemvm +rm -rf ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/systemvm/md5sum.txt # UI mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/ui diff --git a/packaging/suse15/cloud.spec b/packaging/suse15/cloud.spec index 77125ec607ce..30300c6be471 100644 --- a/packaging/suse15/cloud.spec +++ b/packaging/suse15/cloud.spec @@ -294,9 +294,9 @@ touch ${RPM_BUILD_ROOT}%{_localstatedir}/run/%{name}-management.pid #install -D server/target/conf/cloudstack-catalina.logrotate ${RPM_BUILD_ROOT}%{_sysconfdir}/logrotate.d/%{name}-catalina # SystemVM template -mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/ -cp -r engine/schema/dist/systemvm-templates/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/ -rm -rf ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/md5sum.txt +mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/systemvm +cp -r engine/schema/dist/systemvm-templates/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/systemvm +rm -rf ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/systemvm/md5sum.txt # UI mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/ui diff --git a/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java b/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java index 7968d55f4470..6975f76e9681 100644 --- a/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java +++ b/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java @@ -16,9 +16,7 @@ // under the License. package com.cloud.network.lb; -import java.nio.charset.StandardCharsets; import java.util.ArrayList; -import java.util.Base64; import java.util.Collections; import java.util.HashSet; import java.util.List; @@ -478,13 +476,8 @@ public boolean finalizeVirtualMachineProfile(VirtualMachineProfile profile, Depl if (defaultDns2 != null) { buf.append(" dns2=").append(defaultDns2); } - String MsPublicKey = _configDao.getValue("ssh.publickey"); - String base64EncodedPublicKey = null; - if (MsPublicKey != null) { - base64EncodedPublicKey = Base64.getEncoder().encodeToString(MsPublicKey.getBytes(StandardCharsets.UTF_8)); - } - - buf.append(" authorized_key=").append(base64EncodedPublicKey); + String msPublicKey = _configDao.getValue("ssh.publickey"); + buf.append(" authorized_key=").append(VirtualMachineGuru.getEncodedMsPublicKey(msPublicKey)); if (s_logger.isDebugEnabled()) { s_logger.debug("Boot Args for " + profile + ": " + buf.toString()); } diff --git a/plugins/network-elements/internal-loadbalancer/src/main/java/org/apache/cloudstack/network/lb/InternalLoadBalancerVMManagerImpl.java b/plugins/network-elements/internal-loadbalancer/src/main/java/org/apache/cloudstack/network/lb/InternalLoadBalancerVMManagerImpl.java index d65aa01ab05a..ee880cb6b666 100644 --- a/plugins/network-elements/internal-loadbalancer/src/main/java/org/apache/cloudstack/network/lb/InternalLoadBalancerVMManagerImpl.java +++ b/plugins/network-elements/internal-loadbalancer/src/main/java/org/apache/cloudstack/network/lb/InternalLoadBalancerVMManagerImpl.java @@ -16,10 +16,8 @@ // under the License. package org.apache.cloudstack.network.lb; -import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; -import java.util.Base64; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; @@ -220,14 +218,8 @@ public boolean finalizeVirtualMachineProfile(final VirtualMachineProfile profile buf.append(" localgw=").append(dest.getPod().getGateway()); } } - - String MsPublicKey = _configDao.getValue("ssh.publickey"); - String base64EncodedPublicKey = null; - if (MsPublicKey != null) { - base64EncodedPublicKey = Base64.getEncoder().encodeToString(MsPublicKey.getBytes(StandardCharsets.UTF_8)); - } - - buf.append(" authorized_key=").append(base64EncodedPublicKey); + String msPublicKey = _configDao.getValue("ssh.publickey"); + buf.append(" authorized_key=").append(VirtualMachineGuru.getEncodedMsPublicKey(msPublicKey)); } if (controlNic == null) { diff --git a/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java b/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java index 59a729124285..a3177fa77054 100644 --- a/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java +++ b/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java @@ -17,10 +17,8 @@ package com.cloud.consoleproxy; import java.nio.charset.Charset; -import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; -import java.util.Base64; import java.util.Date; import java.util.HashMap; import java.util.Iterator; @@ -1227,13 +1225,8 @@ public boolean finalizeVirtualMachineProfile(VirtualMachineProfile profile, Depl buf.append(" disable_rp_filter=true"); } - String MsPublicKey = configurationDao.getValue("ssh.publickey"); - String base64EncodedPublicKey = null; - if (MsPublicKey != null) { - base64EncodedPublicKey = Base64.getEncoder().encodeToString(MsPublicKey.getBytes(StandardCharsets.UTF_8)); - } - - buf.append(" authorized_key=").append(base64EncodedPublicKey); + String msPublicKey = configurationDao.getValue("ssh.publickey"); + buf.append(" authorized_key=").append(VirtualMachineGuru.getEncodedMsPublicKey(msPublicKey)); boolean externalDhcp = false; String externalDhcpStr = configurationDao.getValue("direct.attach.network.externalIpAllocator.enabled"); diff --git a/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java b/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java index ed0dca1e5568..3e2e3b37c65b 100644 --- a/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java +++ b/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java @@ -20,14 +20,12 @@ import java.lang.reflect.Type; import java.math.BigInteger; import java.nio.charset.Charset; -import java.nio.charset.StandardCharsets; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Arrays; -import java.util.Base64; import java.util.Calendar; import java.util.Collections; import java.util.Date; @@ -1941,13 +1939,8 @@ public boolean finalizeVirtualMachineProfile(final VirtualMachineProfile profile if (Boolean.valueOf(_configDao.getValue("system.vm.random.password"))) { buf.append(" vmpassword=").append(_configDao.getValue("system.vm.password")); } - String MsPublicKey = _configDao.getValue("ssh.publickey"); - String base64EncodedPublicKey = null; - if (MsPublicKey != null) { - base64EncodedPublicKey = Base64.getEncoder().encodeToString(MsPublicKey.getBytes(StandardCharsets.UTF_8)); - } - - buf.append(" authorized_key=").append(base64EncodedPublicKey); + String msPublicKey = _configDao.getValue("ssh.publickey"); + buf.append(" authorized_key=").append(VirtualMachineGuru.getEncodedMsPublicKey(msPublicKey)); NicProfile controlNic = null; String defaultDns1 = null; diff --git a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java index 03b581916480..8db8fa3e65c7 100644 --- a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java @@ -2748,63 +2748,7 @@ public ImageStore discoverImageStore(String name, String url, String providerNam // populate template_store_ref table _imageSrv.addSystemVMTemplatesToSecondary(store); _imageSrv.handleTemplateSync(store); - if (DataStoreProvider.NFS_IMAGE.equals(providerName) && zoneId != null) { - String finalProviderName = providerName; - Transaction.execute(new TransactionCallbackNoReturn() { - @Override - public void doInTransactionWithoutResult(final TransactionStatus status) { - List stores = _imageStoreDao.listAllStoresInZone(zoneId, finalProviderName, DataStoreRole.Image); - stores = stores.stream().filter(str -> str.getId() != store.getId()).collect(Collectors.toList()); - // Check if it's the only/first store in the zone - if (stores.size() == 0) { - List hypervisorTypes = _clusterDao.getAvailableHypervisorInZone(zoneId); - Set hypSet = new HashSet(hypervisorTypes); - TransactionLegacy txn = TransactionLegacy.open("AutomaticTemplateRegister"); - SystemVmTemplateRegistration systemVmTemplateRegistration = new SystemVmTemplateRegistration(); - String filePath = SystemVmTemplateRegistration.TEMPORARY_SECONDARY_STORE + SystemVmTemplateRegistration.generateToken(10); - try { - Pair storeUrlAndId = new Pair<>(url, store.getId()); - for (HypervisorType hypervisorType : hypSet) { - try { - String templateName = getValidTemplateName(zoneId, hypervisorType); - Pair hypervisorAndTemplateName = - new Pair<>(hypervisorType, templateName); - long templateId = systemVmTemplateRegistration.getRegisteredTemplateId(hypervisorAndTemplateName); - VMTemplateVO vmTemplateVO = _templateDao.findById(templateId); - TemplateDataStoreVO templateVO = null; - if (templateId != -1) { - templateVO = _templateStoreDao.findByTemplate(templateId, DataStoreRole.Image); - if (templateVO != null) { - try { - if (SystemVmTemplateRegistration.validateIfSeeded(url, templateVO.getInstallPath())) { - continue; - } - } catch (Exception e) { - s_logger.error("Failed to validated if template is seeded", e); - } - } - } - SystemVmTemplateRegistration.mountStore(storeUrlAndId.first(), filePath); - if (templateVO != null && vmTemplateVO != null) { - systemVmTemplateRegistration.registerTemplate(hypervisorAndTemplateName, storeUrlAndId, vmTemplateVO, filePath); - } else { - systemVmTemplateRegistration.registerTemplate(hypervisorAndTemplateName, storeUrlAndId, filePath); - } - } catch (CloudRuntimeException e) { - SystemVmTemplateRegistration.unmountStore(filePath); - s_logger.error(String.format("Failed to register systemVM template for hypervisor: %s", hypervisorType.name()), e); - } - } - } catch (Exception e) { - s_logger.error("Failed to register systemVM template(s)"); - } finally { - SystemVmTemplateRegistration.unmountStore(filePath); - txn.close(); - } - } - } - }); - } + registerSystemVmTemplateOnFirstNfsStore(zoneId, providerName, url, store); } // associate builtin template with zones associated with this image store @@ -2818,6 +2762,64 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { return (ImageStore)_dataStoreMgr.getDataStore(store.getId(), DataStoreRole.Image); } + private void registerSystemVmTemplateOnFirstNfsStore(Long zoneId, String providerName, String url, DataStore store) { + if (DataStoreProvider.NFS_IMAGE.equals(providerName) && zoneId != null) { + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(final TransactionStatus status) { + List stores = _imageStoreDao.listAllStoresInZone(zoneId, providerName, DataStoreRole.Image); + stores = stores.stream().filter(str -> str.getId() != store.getId()).collect(Collectors.toList()); + // Check if it's the only/first store in the zone + if (stores.size() == 0) { + List hypervisorTypes = _clusterDao.getAvailableHypervisorInZone(zoneId); + Set hypSet = new HashSet(hypervisorTypes); + TransactionLegacy txn = TransactionLegacy.open("AutomaticTemplateRegister"); + SystemVmTemplateRegistration systemVmTemplateRegistration = new SystemVmTemplateRegistration(); + String filePath = SystemVmTemplateRegistration.TEMPORARY_SECONDARY_STORE + SystemVmTemplateRegistration.generateToken(10); + try { + Pair storeUrlAndId = new Pair<>(url, store.getId()); + for (HypervisorType hypervisorType : hypSet) { + try { + String templateName = getValidTemplateName(zoneId, hypervisorType); + Pair hypervisorAndTemplateName = + new Pair<>(hypervisorType, templateName); + long templateId = systemVmTemplateRegistration.getRegisteredTemplateId(hypervisorAndTemplateName); + VMTemplateVO vmTemplateVO = _templateDao.findById(templateId); + TemplateDataStoreVO templateVO = null; + if (templateId != -1) { + templateVO = _templateStoreDao.findByTemplate(templateId, DataStoreRole.Image); + if (templateVO != null) { + try { + if (SystemVmTemplateRegistration.validateIfSeeded(url, templateVO.getInstallPath())) { + continue; + } + } catch (Exception e) { + s_logger.error("Failed to validated if template is seeded", e); + } + } + } + SystemVmTemplateRegistration.mountStore(storeUrlAndId.first(), filePath); + if (templateVO != null && vmTemplateVO != null) { + systemVmTemplateRegistration.registerTemplate(hypervisorAndTemplateName, storeUrlAndId, vmTemplateVO, filePath); + } else { + systemVmTemplateRegistration.registerTemplate(hypervisorAndTemplateName, storeUrlAndId, filePath); + } + } catch (CloudRuntimeException e) { + SystemVmTemplateRegistration.unmountStore(filePath); + s_logger.error(String.format("Failed to register systemVM template for hypervisor: %s", hypervisorType.name()), e); + } + } + } catch (Exception e) { + s_logger.error("Failed to register systemVM template(s)"); + } finally { + SystemVmTemplateRegistration.unmountStore(filePath); + txn.close(); + } + } + } + }); + } + } @Override public ImageStore migrateToObjectStore(String name, String url, String providerName, Map details) throws DiscoveryException, InvalidParameterValueException { // check if current cloud is ready to migrate, we only support cloud with only NFS secondary storages diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java index 70a1520ca781..da2b46462568 100644 --- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java @@ -2778,6 +2778,14 @@ private Snapshot orchestrateTakeVolumeSnapshot(Long volumeId, Long policyId, Lon return volService.takeSnapshot(volume); } + private boolean isOperationSupported(VMTemplateVO template, UserVmVO userVm) { + if (template != null && template.getTemplateType() == Storage.TemplateType.SYSTEM && + (userVm == null || !UserVmManager.CKS_NODE.equals(userVm.getUserVmType()))) { + return false; + } + return true; + } + @Override @ActionEvent(eventType = EventTypes.EVENT_SNAPSHOT_CREATE, eventDescription = "allocating snapshot", create = true) public Snapshot allocSnapshot(Long volumeId, Long policyId, String snapshotName, Snapshot.LocationType locationType) throws ResourceAllocationException { @@ -2811,7 +2819,7 @@ public Snapshot allocSnapshot(Long volumeId, Long policyId, String snapshotName, if (instanceId != null) { userVmVO = _userVmDao.findById(instanceId); } - if (template != null && template.getTemplateType() == Storage.TemplateType.SYSTEM && (userVmVO == null || !UserVmManager.CKS_NODE.equals(userVmVO.getUserVmType()))) { + if (!isOperationSupported(template, userVmVO)) { throw new InvalidParameterValueException("VolumeId: " + volumeId + " is for System VM , Creating snapshot against System VM volumes is not supported"); } } @@ -2873,7 +2881,7 @@ public Snapshot allocSnapshotForVm(Long vmId, Long volumeId, String snapshotName if (instanceId != null) { userVmVO = _userVmDao.findById(instanceId); } - if (template != null && template.getTemplateType() == Storage.TemplateType.SYSTEM && (userVmVO == null || !UserVmManager.CKS_NODE.equals(userVmVO.getUserVmType()))) { + if (!isOperationSupported(template, userVmVO)) { throw new InvalidParameterValueException("VolumeId: " + volumeId + " is for System VM , Creating snapshot against System VM volumes is not supported"); } } diff --git a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java index 77529f235d72..99539a29dcb3 100644 --- a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java +++ b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java @@ -18,10 +18,8 @@ import java.net.URI; import java.net.URISyntaxException; -import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; -import java.util.Base64; import java.util.Collections; import java.util.Date; import java.util.HashMap; @@ -1086,13 +1084,8 @@ public boolean finalizeVirtualMachineProfile(VirtualMachineProfile profile, Depl buf.append(" guid=").append(profile.getVirtualMachine().getHostName()); buf.append(" workers=").append(_configDao.getValue("workers")); - String MsPublicKey = _configDao.getValue("ssh.publickey"); - String base64EncodedPublicKey = null; - if (MsPublicKey != null) { - base64EncodedPublicKey = Base64.getEncoder().encodeToString(MsPublicKey.getBytes(StandardCharsets.UTF_8)); - } - - buf.append(" authorized_key=").append(base64EncodedPublicKey); + String msPublicKey = _configDao.getValue("ssh.publickey"); + buf.append(" authorized_key=").append(VirtualMachineGuru.getEncodedMsPublicKey(msPublicKey)); if (_configDao.isPremium()) { s_logger.debug("VMWare hypervisor was configured, informing secondary storage VM to load the PremiumSecondaryStorageResource."); diff --git a/systemvm/debian/opt/cloud/bin/setup/cloud-early-config b/systemvm/debian/opt/cloud/bin/setup/cloud-early-config index 917fa632cd42..d0ebd0b68146 100755 --- a/systemvm/debian/opt/cloud/bin/setup/cloud-early-config +++ b/systemvm/debian/opt/cloud/bin/setup/cloud-early-config @@ -89,8 +89,8 @@ start() { echo "" > /root/.ssh/known_hosts if which growpart > /dev/null; then - ROOT_MOUNT_POINT=`df -h / | tail -n 1 | cut -d' ' -f1` - ROOT_DISK=`echo $ROOT_MOUNT_POINT | sed 's/[0-9]*$//g'` + ROOT_MOUNT_POINT=$(df -h / | tail -n 1 | cut -d' ' -f1) + ROOT_DISK=$(echo $ROOT_MOUNT_POINT | sed 's/[0-9]*$//g') growpart $ROOT_DISK 2 growpart $ROOT_DISK 6 resize2fs $ROOT_MOUNT_POINT From 788247e1aef237d66fc33eb9febc610b7550d02d Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Thu, 26 Aug 2021 13:53:49 +0530 Subject: [PATCH 108/117] fix filepath --- .../java/com/cloud/upgrade/SystemVmTemplateRegistration.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java index 056657eda988..3c69d20ea1b8 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java @@ -84,7 +84,7 @@ public class SystemVmTemplateRegistration { private static final String MOUNT_COMMAND = "sudo mount -t nfs %s %s"; private static final String UMOUNT_COMMAND = "sudo umount %s"; private static final String RELATIVE_TEMPLATE_PATH = "./engine/schema/dist/systemvm-templates/"; - private static final String ABSOLUTE_TEMPLATE_PATH = "/usr/share/cloudstack-management/templates/systemvm"; + private static final String ABSOLUTE_TEMPLATE_PATH = "/usr/share/cloudstack-management/templates/systemvm/"; private static final String TEMPLATES_PATH = fetchTemplatesPath(); private static final String METADATA_FILE_NAME = "metadata.ini"; private static final String METADATA_FILE = TEMPLATES_PATH + METADATA_FILE_NAME; From 3249c8b7c45c39dc6a84321e46cf0c9789a03f94 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Thu, 26 Aug 2021 15:52:10 +0530 Subject: [PATCH 109/117] register template with same name in the 2 stores --- .../com/cloud/upgrade/SystemVmTemplateRegistration.java | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java index 3c69d20ea1b8..ac7d2000a6d1 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java @@ -578,10 +578,9 @@ private void setupTemplate(String templateName, Pair hypervisorAndTemplateName, + private Long performTemplateRegistrationOperations(String templateName, Pair hypervisorAndTemplateName, String url, String checksum, ImageFormat format, long guestOsId, Long storeId, Long templateId, String filePath, boolean updateTmpltDetails) { - final String templateName = UUID.randomUUID().toString(); Hypervisor.HypervisorType hypervisor = hypervisorAndTemplateName.first(); Date created = new Date(DateUtil.currentGMTTime().getTime()); SystemVMTemplateDetails details = new SystemVMTemplateDetails(templateName, hypervisorAndTemplateName.second(), created, @@ -609,7 +608,8 @@ public void registerTemplate(Pair hypervisorA Long templateId = null; try { templateId = templateVO.getId(); - performTemplateRegistrationOperations(hypervisorAndTemplateName, templateVO.getUrl(), templateVO.getChecksum(), + String templateUniqName = templateVO.getUniqueName(); + performTemplateRegistrationOperations(templateUniqName, hypervisorAndTemplateName, templateVO.getUrl(), templateVO.getChecksum(), templateVO.getFormat(), templateVO.getGuestOSId(), storeUrlAndId.second(), templateId, filePath, false); } catch (Exception e) { String errMsg = String.format("Failed to register template for hypervisor: %s", hypervisorAndTemplateName.first()); @@ -626,7 +626,8 @@ public void registerTemplate(Pair hypervisorA Long templateId = null; try { Hypervisor.HypervisorType hypervisor = hypervisorAndTemplateName.first(); - templateId = performTemplateRegistrationOperations(hypervisorAndTemplateName, NewTemplateUrl.get(hypervisor), NewTemplateChecksum.get(hypervisor), + String templateUniqName = UUID.randomUUID().toString(); + templateId = performTemplateRegistrationOperations(templateUniqName, hypervisorAndTemplateName, NewTemplateUrl.get(hypervisor), NewTemplateChecksum.get(hypervisor), hypervisorImageFormat.get(hypervisor), hypervisorGuestOsMap.get(hypervisor), storeUrlAndId.second(), null, filePath, true); Map configParams = new HashMap<>(); configParams.put(SystemVmTemplateRegistration.routerTemplateConfigurationNames.get(hypervisorAndTemplateName.first()), hypervisorAndTemplateName.second()); From 3c3b33683cba1c81dbb816124a081d0bc784051e Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Thu, 26 Aug 2021 19:20:39 +0530 Subject: [PATCH 110/117] Not to use uuid as unique name to avoid unnecessary re-syncs + list templates refactor --- .../upgrade/SystemVmTemplateRegistration.java | 14 +++++++++----- .../upgrade/dao/BasicTemplateDataStoreDaoImpl.java | 2 +- .../storage/datastore/db/TemplateDataStoreDao.java | 2 +- .../storage/image/db/TemplateDataStoreDaoImpl.java | 4 +++- scripts/storage/secondary/setup-sysvm-tmplt | 2 +- .../cloud/api/query/dao/TemplateJoinDaoImpl.java | 6 ++++-- 6 files changed, 19 insertions(+), 11 deletions(-) diff --git a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java index ac7d2000a6d1..5b436ab46bba 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java @@ -466,6 +466,11 @@ private Long createTemplateObjectInDB(SystemVMTemplateDetails details) { if (template == null) { return null; } + template.setUniqueName(String.format("routing-%s" , String.valueOf(template.getId()))); + boolean updated = vmTemplateDao.update(template.getId(), template); + if (!updated) { + throw new CloudRuntimeException("Failed to add template details to database"); + } return template.getId(); } @@ -578,10 +583,11 @@ private void setupTemplate(String templateName, Pair hypervisorAndTemplateName, + private Long performTemplateRegistrationOperations(Pair hypervisorAndTemplateName, String url, String checksum, ImageFormat format, long guestOsId, Long storeId, Long templateId, String filePath, boolean updateTmpltDetails) { Hypervisor.HypervisorType hypervisor = hypervisorAndTemplateName.first(); + String templateName = UUID.randomUUID().toString(); Date created = new Date(DateUtil.currentGMTTime().getTime()); SystemVMTemplateDetails details = new SystemVMTemplateDetails(templateName, hypervisorAndTemplateName.second(), created, url, checksum, format, (int) guestOsId, hypervisor, storeId); @@ -608,8 +614,7 @@ public void registerTemplate(Pair hypervisorA Long templateId = null; try { templateId = templateVO.getId(); - String templateUniqName = templateVO.getUniqueName(); - performTemplateRegistrationOperations(templateUniqName, hypervisorAndTemplateName, templateVO.getUrl(), templateVO.getChecksum(), + performTemplateRegistrationOperations(hypervisorAndTemplateName, templateVO.getUrl(), templateVO.getChecksum(), templateVO.getFormat(), templateVO.getGuestOSId(), storeUrlAndId.second(), templateId, filePath, false); } catch (Exception e) { String errMsg = String.format("Failed to register template for hypervisor: %s", hypervisorAndTemplateName.first()); @@ -626,8 +631,7 @@ public void registerTemplate(Pair hypervisorA Long templateId = null; try { Hypervisor.HypervisorType hypervisor = hypervisorAndTemplateName.first(); - String templateUniqName = UUID.randomUUID().toString(); - templateId = performTemplateRegistrationOperations(templateUniqName, hypervisorAndTemplateName, NewTemplateUrl.get(hypervisor), NewTemplateChecksum.get(hypervisor), + templateId = performTemplateRegistrationOperations(hypervisorAndTemplateName, NewTemplateUrl.get(hypervisor), NewTemplateChecksum.get(hypervisor), hypervisorImageFormat.get(hypervisor), hypervisorGuestOsMap.get(hypervisor), storeUrlAndId.second(), null, filePath, true); Map configParams = new HashMap<>(); configParams.put(SystemVmTemplateRegistration.routerTemplateConfigurationNames.get(hypervisorAndTemplateName.first()), hypervisorAndTemplateName.second()); diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/BasicTemplateDataStoreDaoImpl.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/BasicTemplateDataStoreDaoImpl.java index 1f9dc77fdc4e..3ea63d059a68 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/BasicTemplateDataStoreDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/BasicTemplateDataStoreDaoImpl.java @@ -160,7 +160,7 @@ public List listByTemplate(long templateId) { } @Override - public List listByTemplateNotBypassed(long templateId) { + public List listByTemplateNotBypassed(long templateId, Long... storeIds) { return null; } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/TemplateDataStoreDao.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/TemplateDataStoreDao.java index 77e88a9466e3..f8e210ac326f 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/TemplateDataStoreDao.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/TemplateDataStoreDao.java @@ -66,7 +66,7 @@ public interface TemplateDataStoreDao extends GenericDao listByTemplate(long templateId); - List listByTemplateNotBypassed(long templateId); + List listByTemplateNotBypassed(long templateId, Long... storeIds); TemplateDataStoreVO findByTemplateZoneReady(long templateId, Long zoneId); diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/db/TemplateDataStoreDaoImpl.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/db/TemplateDataStoreDaoImpl.java index ea53825eab0e..8aa411412b34 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/db/TemplateDataStoreDaoImpl.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/db/TemplateDataStoreDaoImpl.java @@ -99,6 +99,7 @@ public boolean configure(String name, Map params) throws Configu templateSearch.and("template_id", templateSearch.entity().getTemplateId(), SearchCriteria.Op.EQ); templateSearch.and("download_state", templateSearch.entity().getDownloadState(), SearchCriteria.Op.NEQ); templateSearch.and("destroyed", templateSearch.entity().getDestroyed(), SearchCriteria.Op.EQ); + templateSearch.and("storeids", templateSearch.entity().getDataStoreId(), Op.IN); templateSearch.done(); templateRoleSearch = createSearchBuilder(); @@ -421,11 +422,12 @@ public List listByTemplate(long templateId) { } @Override - public List listByTemplateNotBypassed(long templateId) { + public List listByTemplateNotBypassed(long templateId, Long... storeIds) { SearchCriteria sc = templateSearch.create(); sc.setParameters("template_id", templateId); sc.setParameters("download_state", Status.BYPASSED); sc.setParameters("destroyed", false); + sc.setParameters("storeids", storeIds); return search(sc, null); } diff --git a/scripts/storage/secondary/setup-sysvm-tmplt b/scripts/storage/secondary/setup-sysvm-tmplt index 9c33e5fb6ff4..fa33f33f0408 100755 --- a/scripts/storage/secondary/setup-sysvm-tmplt +++ b/scripts/storage/secondary/setup-sysvm-tmplt @@ -157,7 +157,7 @@ echo "$ext=true" >> $tmpdestdir/template.properties echo "id=$templateId" >> $tmpdestdir/template.properties echo "public=true" >> $tmpdestdir/template.properties echo "$ext.filename=$localfile" >> $tmpdestdir/template.properties -echo "uniquename=$uuid" >> $tmpdestdir/template.properties +echo "uniquename=routing-$templateId" >> $tmpdestdir/template.properties echo "$ext.virtualsize=$vrtmpltsize" >> $tmpdestdir/template.properties echo "virtualsize=$vrtmpltsize" >> $tmpdestdir/template.properties echo "$ext.size=$tmpltsize" >> $tmpdestdir/template.properties diff --git a/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java index d733489f0faf..6d3ac64b7ac5 100644 --- a/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java @@ -156,8 +156,10 @@ private String getTemplateStatus(TemplateJoinVO template) { @Override public TemplateResponse newTemplateResponse(EnumSet detailsView, ResponseView view, TemplateJoinVO template) { - List templatesInStore = _templateStoreDao.listByTemplateNotBypassed(template.getId()); - List> downloadProgressDetails = new ArrayList(); + List storesInZone = dataStoreDao.listStoresByZoneId(template.getDataCenterId()); + Long[] storeIds = storesInZone.stream().map(ImageStoreVO::getId).toArray(Long[]::new); + List templatesInStore = _templateStoreDao.listByTemplateNotBypassed(template.getId(), storeIds); + List> downloadProgressDetails = new ArrayList<>(); HashMap downloadDetailInImageStores = null; for (TemplateDataStoreVO templateInStore : templatesInStore) { downloadDetailInImageStores = new HashMap<>(); From 5fa86f5a1a1c90971cd1676f6f6aa0e27b281183 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Fri, 27 Aug 2021 11:51:01 +0530 Subject: [PATCH 111/117] reduce no of db updates + revert vmwareResource change --- .../upgrade/SystemVmTemplateRegistration.java | 8 ++------ .../vmware/resource/VmwareResource.java | 20 ++++++++++++++----- 2 files changed, 17 insertions(+), 11 deletions(-) diff --git a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java index 5b436ab46bba..f093d16bced6 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java @@ -442,9 +442,10 @@ private List fetchAllHypervisors(Long zoneId) { } private Long createTemplateObjectInDB(SystemVMTemplateDetails details) { + Long templateId = vmTemplateDao.getNextInSequence(Long.class, "id"); VMTemplateVO template = new VMTemplateVO(); template.setUuid(details.getUuid()); - template.setUniqueName(details.getUuid()); + template.setUniqueName(String.format("routing-%s" , String.valueOf(templateId))); template.setName(details.getName()); template.setPublicTemplate(false); template.setFeatured(false); @@ -466,11 +467,6 @@ private Long createTemplateObjectInDB(SystemVMTemplateDetails details) { if (template == null) { return null; } - template.setUniqueName(String.format("routing-%s" , String.valueOf(template.getId()))); - boolean updated = vmTemplateDao.update(template.getId(), template); - if (!updated) { - throw new CloudRuntimeException("Failed to add template details to database"); - } return template.getId(); } diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java index 51f5b9ac52c8..ae2b8230fac4 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java @@ -25,6 +25,7 @@ import java.net.ConnectException; import java.net.InetSocketAddress; import java.net.URI; +import java.net.URL; import java.nio.channels.SocketChannel; import java.rmi.RemoteException; import java.util.ArrayList; @@ -47,6 +48,7 @@ import javax.naming.ConfigurationException; import javax.xml.datatype.XMLGregorianCalendar; +import com.cloud.utils.script.Script; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.storage.command.CopyCommand; import org.apache.cloudstack.storage.command.StorageSubSystemCommand; @@ -423,8 +425,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa protected static File s_systemVmKeyFile = null; private static final Object s_syncLockObjectFetchKeyFile = new Object(); - private static final String homeDir = System.getProperty("user.home"); - protected static final String s_defaultPathSystemVmKeyFile = homeDir + "/.ssh/id_rsa"; + protected static final String s_relativePathSystemVmKeyFileInstallDir = "scripts/vm/systemvm/id_rsa.cloud"; + protected static final String s_defaultPathSystemVmKeyFile = "/usr/share/cloudstack-common/scripts/vm/systemvm/id_rsa.cloud"; public Gson getGson() { return _gson; @@ -7049,10 +7051,18 @@ private static void syncFetchSystemVmKeyFile() { } private static File fetchSystemVmKeyFile() { - String filePath = s_defaultPathSystemVmKeyFile; - File keyFile = new File(filePath); + String filePath = s_relativePathSystemVmKeyFileInstallDir; s_logger.debug("Looking for file [" + filePath + "] in the classpath."); - + URL url = Script.class.getClassLoader().getResource(filePath); + File keyFile = null; + if (url != null) { + keyFile = new File(url.getPath()); + } + if (keyFile == null || !keyFile.exists()) { + filePath = s_defaultPathSystemVmKeyFile; + keyFile = new File(filePath); + s_logger.debug("Looking for file [" + filePath + "] in the classpath."); + } if (!keyFile.exists()) { s_logger.error("Unable to locate id_rsa.cloud in your setup at " + keyFile.toString()); } From e1532adaecab523a584f890deb9dfdd721235ff3 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Tue, 31 Aug 2021 11:26:52 +0530 Subject: [PATCH 112/117] Address comments: 1. Move upgrade logic completely to new class 2. Use Files for creation of temp directories 3. Use Daos for existing sql queries in the upgrade path 4. Define constants for guest os ids --- .../java/com/cloud/dc/dao/ClusterDao.java | 3 + .../java/com/cloud/dc/dao/ClusterDaoImpl.java | 18 ++ .../com/cloud/storage/dao/VMTemplateDao.java | 3 + .../cloud/storage/dao/VMTemplateDaoImpl.java | 19 ++ .../upgrade/SystemVmTemplateRegistration.java | 185 ++++++++++++------ .../upgrade/dao/Upgrade41510to41600.java | 122 +----------- .../com/cloud/storage/StorageManagerImpl.java | 14 +- tools/marvin/marvin/config/test_data.py | 2 +- 8 files changed, 186 insertions(+), 180 deletions(-) diff --git a/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDao.java b/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDao.java index de8d604fb4ce..ab9c5cab8c4a 100644 --- a/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDao.java +++ b/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDao.java @@ -22,6 +22,7 @@ import java.util.List; import java.util.Map; +import java.util.Set; public interface ClusterDao extends GenericDao { List listByPodId(long podId); @@ -34,6 +35,8 @@ public interface ClusterDao extends GenericDao { List getAvailableHypervisorInZone(Long zoneId); + Set getDistictAvailableHypervisorsAcrossClusters(); + List listByDcHyType(long dcId, String hyType); Map> getPodClusterIdMap(List clusterIds); diff --git a/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDaoImpl.java b/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDaoImpl.java index b1fce6195ba5..4d9bedba9669 100644 --- a/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDaoImpl.java @@ -39,8 +39,10 @@ import java.sql.SQLException; import java.util.ArrayList; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; @Component public class ClusterDaoImpl extends GenericDaoBase implements ClusterDao { @@ -51,6 +53,7 @@ public class ClusterDaoImpl extends GenericDaoBase implements C protected final SearchBuilder ZoneSearch; protected final SearchBuilder ZoneHyTypeSearch; protected final SearchBuilder ZoneClusterSearch; + protected final SearchBuilder ClusterSearch; protected GenericSearchBuilder ClusterIdSearch; @@ -97,6 +100,10 @@ public ClusterDaoImpl() { ClusterIdSearch.selectFields(ClusterIdSearch.entity().getId()); ClusterIdSearch.and("dataCenterId", ClusterIdSearch.entity().getDataCenterId(), Op.EQ); ClusterIdSearch.done(); + + ClusterSearch = createSearchBuilder(); + ClusterSearch.select(null, Func.DISTINCT, ClusterSearch.entity().getHypervisorType()); + ClusterIdSearch.done(); } @Override @@ -154,6 +161,17 @@ public List getAvailableHypervisorInZone(Long zoneId) { return hypers; } + @Override + public Set getDistictAvailableHypervisorsAcrossClusters() { + SearchCriteria sc = ClusterSearch.create(); + List clusters = listBy(sc); + Set hypers = new HashSet<>(); + for (ClusterVO cluster : clusters) { + hypers.add(cluster.getHypervisorType()); + } + return hypers; + } + @Override public Map> getPodClusterIdMap(List clusterIds) { TransactionLegacy txn = TransactionLegacy.currentTxn(); diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDao.java b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDao.java index e8e9208b4ce9..63221e745d8d 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDao.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDao.java @@ -20,6 +20,7 @@ import java.util.Map; import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.storage.Storage; import com.cloud.storage.VMTemplateVO; import com.cloud.template.VirtualMachineTemplate; import com.cloud.utils.db.GenericDao; @@ -72,6 +73,8 @@ public interface VMTemplateDao extends GenericDao, StateDao< VMTemplateVO findRoutingTemplate(HypervisorType type, String templateName); + VMTemplateVO findLatestTemplateByTypeAndHypervisor(HypervisorType hypervisorType, Storage.TemplateType type); + public Long countTemplatesForAccount(long accountId); public List listUnRemovedTemplatesByStates(VirtualMachineTemplate.State ...states); diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDaoImpl.java index 927193f35377..74d210be0deb 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDaoImpl.java @@ -97,6 +97,7 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem private SearchBuilder AllFieldsSearch; protected SearchBuilder ParentTemplateIdSearch; private SearchBuilder InactiveUnremovedTmpltSearch; + private SearchBuilder LatestTemplateByHypervisorTypeSearch; @Inject ResourceTagDao _tagsDao; @@ -105,6 +106,11 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem private String consoleProxyTmpltName; public VMTemplateDaoImpl() { + super(); + LatestTemplateByHypervisorTypeSearch = createSearchBuilder(); + LatestTemplateByHypervisorTypeSearch.and("hypervisorType", LatestTemplateByHypervisorTypeSearch.entity().getHypervisorType(), SearchCriteria.Op.EQ); + LatestTemplateByHypervisorTypeSearch.and("templateType", LatestTemplateByHypervisorTypeSearch.entity().getTemplateType(), SearchCriteria.Op.EQ); + LatestTemplateByHypervisorTypeSearch.and("removed", LatestTemplateByHypervisorTypeSearch.entity().getRemoved(), SearchCriteria.Op.NULL); } @Override @@ -601,6 +607,19 @@ public VMTemplateVO findRoutingTemplate(HypervisorType hType, String templateNam } } + @Override + public VMTemplateVO findLatestTemplateByTypeAndHypervisor(HypervisorType hypervisorType, TemplateType type) { + SearchCriteria sc = LatestTemplateByHypervisorTypeSearch.create(); + sc.setParameters("hypervisorType", hypervisorType); + sc.setParameters("templateType", type); + Filter filter = new Filter(VMTemplateVO.class, "id", false, null, 1L); + List templates = listBy(sc, filter); + if (templates != null && !templates.isEmpty()) { + return templates.get(0); + } + return null; + } + @Override public Long countTemplatesForAccount(long accountId) { SearchCriteria sc = CountTemplatesByAccount.create(); diff --git a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java index f093d16bced6..0616537a60b4 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java @@ -62,19 +62,17 @@ import java.io.InputStream; import java.net.URI; import java.nio.file.Files; +import java.nio.file.Path; import java.nio.file.Paths; import java.sql.Connection; import java.sql.Date; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Map; -import java.util.Random; import java.util.Set; import java.util.UUID; import java.util.stream.Collectors; @@ -88,13 +86,15 @@ public class SystemVmTemplateRegistration { private static final String TEMPLATES_PATH = fetchTemplatesPath(); private static final String METADATA_FILE_NAME = "metadata.ini"; private static final String METADATA_FILE = TEMPLATES_PATH + METADATA_FILE_NAME; - public static final String TEMPORARY_SECONDARY_STORE = "/tmp/tmpSecStorage"; + public static final String TEMPORARY_SECONDARY_STORE = "tmp"; private static final String PARTIAL_TEMPLATE_FOLDER = String.format("/template/tmpl/%d/", Account.ACCOUNT_ID_SYSTEM); - private static final String FETCH_REGISTERED_TEMPLATE_INSTALL_PATH = "SELECT install_path FROM `cloud`.`template_store_ref` where template_id = ? LIMIT 1"; private static final String storageScriptsDir = "scripts/storage/secondary"; + private static final Integer OTHER_LINUX_ID = 99; + private static final Integer LINUX_5_ID = 15; + private static final Integer LINUX_7_ID = 183; private static final Integer SCRIPT_TIMEOUT = 1800000; private static final Integer LOCK_WAIT_TIMEOUT = 1200; - private static final Integer TOKEN_LENGTH = 10; + public static String CS_MAJOR_VERSION = null; public static String CS_TINY_VERSION = null; @@ -262,7 +262,7 @@ public void setUpdated(Date updated) { public static final Map NewTemplateUrl = new HashMap(); public static final Map NewTemplateChecksum = new HashMap(); - public static final Map routerTemplateConfigurationNames = new HashMap() { + public static final Map RouterTemplateConfigurationNames = new HashMap() { { put(Hypervisor.HypervisorType.KVM, "router.template.kvm"); put(Hypervisor.HypervisorType.VMware, "router.template.vmware"); @@ -275,12 +275,12 @@ public void setUpdated(Date updated) { public static final Map hypervisorGuestOsMap = new HashMap() { { - put(Hypervisor.HypervisorType.KVM, 15); - put(Hypervisor.HypervisorType.XenServer, 99); - put(Hypervisor.HypervisorType.VMware, 99); - put(Hypervisor.HypervisorType.Hyperv, 15); - put(Hypervisor.HypervisorType.LXC, 15); - put(Hypervisor.HypervisorType.Ovm3, 183); + put(Hypervisor.HypervisorType.KVM, LINUX_5_ID); + put(Hypervisor.HypervisorType.XenServer, OTHER_LINUX_ID); + put(Hypervisor.HypervisorType.VMware, OTHER_LINUX_ID); + put(Hypervisor.HypervisorType.Hyperv, LINUX_5_ID); + put(Hypervisor.HypervisorType.LXC, LINUX_5_ID); + put(Hypervisor.HypervisorType.Ovm3, LINUX_7_ID); } }; @@ -295,20 +295,13 @@ public void setUpdated(Date updated) { } }; - public static String generateToken(int length) { - String charset = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; - Random rand = new Random(System.currentTimeMillis()); - StringBuffer sb = new StringBuffer(); - for (int i = 0; i < length; i++) { - int pos = rand.nextInt(charset.length()); - sb.append(charset.charAt(pos)); - } - return sb.toString(); - } - public static boolean validateIfSeeded(String url, String path) { - String filePath = TEMPORARY_SECONDARY_STORE + generateToken(TOKEN_LENGTH); + String filePath = null; try { + filePath = Files.createTempDirectory(TEMPORARY_SECONDARY_STORE).toString(); + if (filePath == null) { + throw new CloudRuntimeException("Failed to create temporary directory to mount secondary store"); + } mountStore(url, filePath); int lastIdx = path.lastIndexOf(File.separator); String partialDirPath = path.substring(0, lastIdx); @@ -325,6 +318,11 @@ public static boolean validateIfSeeded(String url, String path) { throw new CloudRuntimeException("Failed to verify if the template is seeded", e); } finally { unmountStore(filePath); + try { + Files.delete(Path.of(filePath)); + } catch (IOException e) { + LOGGER.error(String.format("Failed to delete temporary directory: %s", filePath)); + } } } @@ -338,9 +336,9 @@ private String calculateChecksum(File file) { } } - public long getRegisteredTemplateId(Pair hypervisorAndTemplateName) { + public Long getRegisteredTemplateId(Pair hypervisorAndTemplateName) { VMTemplateVO vmTemplate = vmTemplateDao.findLatestTemplateByName(hypervisorAndTemplateName.second()); - long templateId = -1; + Long templateId = null; if (vmTemplate != null) { templateId = vmTemplate.getId(); } @@ -420,10 +418,6 @@ public static void mountStore(String storeUrl, String path) { URI uri = new URI(UriUtils.encodeURIComponent(storeUrl)); String host = uri.getHost(); String mountPath = uri.getPath(); - boolean fileCreated = new File(path).mkdirs(); - if (!fileCreated) { - throw new CloudRuntimeException("Failed to created file for mounting store to copy systemVM templates"); - } String mount = String.format(MOUNT_COMMAND, host + ":" + mountPath, path); Script.runSimpleBashScript(mount); } @@ -506,7 +500,10 @@ public void updateSystemVMEntries(Long templateId, Hypervisor.HypervisorType hyp public void updateConfigurationParams(Map configParams) { for (Map.Entry config : configParams.entrySet()) { - configurationDao.update(config.getKey(), config.getValue()); + boolean updated = configurationDao.update(config.getKey(), config.getValue()); + if (!updated) { + throw new CloudRuntimeException(String.format("Failed to update configuration parameter %s", config.getKey())); + } } } @@ -630,7 +627,7 @@ public void registerTemplate(Pair hypervisorA templateId = performTemplateRegistrationOperations(hypervisorAndTemplateName, NewTemplateUrl.get(hypervisor), NewTemplateChecksum.get(hypervisor), hypervisorImageFormat.get(hypervisor), hypervisorGuestOsMap.get(hypervisor), storeUrlAndId.second(), null, filePath, true); Map configParams = new HashMap<>(); - configParams.put(SystemVmTemplateRegistration.routerTemplateConfigurationNames.get(hypervisorAndTemplateName.first()), hypervisorAndTemplateName.second()); + configParams.put(RouterTemplateConfigurationNames.get(hypervisorAndTemplateName.first()), hypervisorAndTemplateName.second()); configParams.put("minreq.sysvmtemplate.version", CS_MAJOR_VERSION + "." + CS_TINY_VERSION); updateConfigurationParams(configParams); updateSystemVMEntries(templateId, hypervisorAndTemplateName.first()); @@ -664,23 +661,6 @@ public static void parseMetadataFile() { } } - private static String getTemplateStoreDetails(Connection conn, Long templateId) { - String installPath = null; - try { - PreparedStatement pstmt = conn.prepareStatement(FETCH_REGISTERED_TEMPLATE_INSTALL_PATH); - pstmt.setLong(1, templateId); - ResultSet rs = pstmt.executeQuery(); - while (rs.next()) { - installPath = rs.getString(1); - } - } catch (SQLException e) { - String errMsg = String.format("Failed to fetch template store record for template with id: %s", templateId); - LOGGER.error(errMsg, e); - throw new CloudRuntimeException(errMsg, e); - } - return installPath; - } - private static void cleanupStore(Long templateId, String filePath) { String destTempFolder = filePath + PARTIAL_TEMPLATE_FOLDER + String.valueOf(templateId); try { @@ -722,7 +702,7 @@ private void validateTemplates(Set hypervisorsInUse) } } - public void registerTemplates(Connection conn, Set hypervisorsInUse) { + public void registerTemplates(Set hypervisorsInUse) { GlobalLock lock = GlobalLock.getInternLock("UpgradeDatabase-Lock"); try { LOGGER.info("Grabbing lock to register templates."); @@ -737,8 +717,12 @@ public void registerTemplates(Connection conn, Set hy public void doInTransactionWithoutResult(final TransactionStatus status) { List zoneIds = getEligibleZoneIds(); for (Long zoneId : zoneIds) { - String filePath = TEMPORARY_SECONDARY_STORE + generateToken(TOKEN_LENGTH); + String filePath = null; try { + filePath = Files.createTempDirectory(TEMPORARY_SECONDARY_STORE).toString(); + if (filePath == null) { + throw new CloudRuntimeException("Failed to create temporary file path to mount the store"); + } Pair storeUrlAndId = getNfsStoreInZone(zoneId); mountStore(storeUrlAndId.first(), filePath); List hypervisorList = fetchAllHypervisors(zoneId); @@ -746,11 +730,12 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { Hypervisor.HypervisorType name = Hypervisor.HypervisorType.getType(hypervisor); String templateName = NewTemplateNameList.get(name); Pair hypervisorAndTemplateName = new Pair(name, templateName); - long templateId = getRegisteredTemplateId(hypervisorAndTemplateName); - if (templateId != -1) { + Long templateId = getRegisteredTemplateId(hypervisorAndTemplateName); + if (templateId != null) { VMTemplateVO templateVO = vmTemplateDao.findById(templateId); - String installPath = getTemplateStoreDetails(conn, templateId); - if (SystemVmTemplateRegistration.validateIfSeeded(storeUrlAndId.first(), installPath)) { + TemplateDataStoreVO templateDataStoreVO = templateDataStoreDao.findByTemplate(templateId, DataStoreRole.Image); + String installPath = templateDataStoreVO.getInstallPath(); + if (validateIfSeeded(storeUrlAndId.first(), installPath)) { continue; } else if (templateVO != null) { registerTemplate(hypervisorAndTemplateName, storeUrlAndId, templateVO, filePath); @@ -775,4 +760,90 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { lock.releaseRef(); } } + + private void updateRegisteredTemplateDetails(Long templateId, Map.Entry hypervisorAndTemplateName) { + VMTemplateVO templateVO = vmTemplateDao.findById(templateId); + templateVO.setTemplateType(Storage.TemplateType.SYSTEM); + if (Hypervisor.HypervisorType.VMware == templateVO.getHypervisorType()) { + templateVO.setDeployAsIs(true); + } + boolean updated = vmTemplateDao.update(templateVO.getId(), templateVO); + if (!updated) { + String errMsg = String.format("updateSystemVmTemplates:Exception while updating template with id %s to be marked as 'system'", templateId); + LOGGER.error(errMsg); + throw new CloudRuntimeException(errMsg); + } + + updateSystemVMEntries(templateId, hypervisorAndTemplateName.getKey()); + + // Change value of global configuration parameter router.template.* for the corresponding hypervisor and minreq.sysvmtemplate.version for the ACS version + Map configParams = new HashMap<>(); + configParams.put(RouterTemplateConfigurationNames.get(hypervisorAndTemplateName.getKey()), hypervisorAndTemplateName.getValue()); + configParams.put("minreq.sysvmtemplate.version", CS_MAJOR_VERSION + "." + CS_TINY_VERSION); + updateConfigurationParams(configParams); + } + + private void updateTemplateUrlAndChecksum(VMTemplateVO templateVO, Map.Entry hypervisorAndTemplateName) { + templateVO.setUrl(NewTemplateUrl.get(hypervisorAndTemplateName.getKey())); + templateVO.setChecksum(NewTemplateChecksum.get(hypervisorAndTemplateName.getKey())); + if (Hypervisor.HypervisorType.VMware == templateVO.getHypervisorType()) { + templateVO.setDeployAsIs(true); + } + boolean updated = vmTemplateDao.update(templateVO.getId(), templateVO); + if (!updated) { + String errMsg = String.format("updateSystemVmTemplates:Exception while updating 'url' and 'checksum' for hypervisor type %s", hypervisorAndTemplateName.getKey().name()); + LOGGER.error(errMsg); + throw new CloudRuntimeException(errMsg); + } + } + + public void updateSystemVmTemplates(final Connection conn) { + LOGGER.debug("Updating System Vm template IDs"); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(final TransactionStatus status) { + Set hypervisorsListInUse = new HashSet(); + try { + hypervisorsListInUse = clusterDao.getDistictAvailableHypervisorsAcrossClusters(); + + } catch (final Exception e) { + LOGGER.error("updateSystemVmTemplates: Exception caught while getting hypervisor types from clusters: " + e.getMessage()); + throw new CloudRuntimeException("updateSystemVmTemplates:Exception while getting hypervisor types from clusters", e); + } + + for (final Map.Entry hypervisorAndTemplateName : NewTemplateNameList.entrySet()) { + LOGGER.debug("Updating " + hypervisorAndTemplateName.getKey() + " System Vms"); + Long templateId = getRegisteredTemplateId(new Pair<>(hypervisorAndTemplateName.getKey(), hypervisorAndTemplateName.getValue())); + try { + // change template type to SYSTEM + if (templateId != null) { + updateRegisteredTemplateDetails(templateId, hypervisorAndTemplateName); + } else { + if (hypervisorsListInUse.contains(hypervisorAndTemplateName.getKey())) { + try { + registerTemplates(hypervisorsListInUse); + break; + } catch (final Exception e) { + throw new CloudRuntimeException(String.format("%s.%s %s SystemVm template not found. Cannot upgrade system Vms", CS_MAJOR_VERSION, CS_TINY_VERSION, hypervisorAndTemplateName.getKey())); + } + } else { + LOGGER.warn(String.format("%s.%s %s SystemVm template not found. Cannot upgrade system Vms hypervisor is not used, so not failing upgrade", + CS_MAJOR_VERSION, CS_TINY_VERSION, hypervisorAndTemplateName.getKey())); + // Update the latest template URLs for corresponding hypervisor + VMTemplateVO templateVO = vmTemplateDao.findLatestTemplateByTypeAndHypervisor(hypervisorAndTemplateName.getKey(), Storage.TemplateType.SYSTEM); + if (templateVO != null) { + updateTemplateUrlAndChecksum(templateVO, hypervisorAndTemplateName); + } + } + } + } catch (final Exception e) { + String errMsg = "updateSystemVmTemplates:Exception while getting ids of templates"; + LOGGER.error(errMsg, e); + throw new CloudRuntimeException(errMsg, e); + } + } + LOGGER.debug("Updating System Vm Template IDs Complete"); + } + }); + } } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41510to41600.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41510to41600.java index d8ab2ee3ac43..cb80e7263d10 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41510to41600.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41510to41600.java @@ -19,18 +19,10 @@ import java.io.InputStream; import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; import com.cloud.upgrade.SystemVmTemplateRegistration; import org.apache.log4j.Logger; -import com.cloud.hypervisor.Hypervisor; import com.cloud.utils.exception.CloudRuntimeException; public class Upgrade41510to41600 implements DbUpgrade, DbUpgradeSystemVmTemplate { @@ -80,116 +72,10 @@ private void initSystemVmTemplateRegistration() { public void updateSystemVmTemplates(final Connection conn) { LOG.debug("Updating System Vm template IDs"); initSystemVmTemplateRegistration(); - final Set hypervisorsListInUse = new HashSet(); - try (PreparedStatement pstmt = conn.prepareStatement("select distinct(hypervisor_type) from `cloud`.`cluster` where removed is null"); ResultSet rs = pstmt.executeQuery()) { - while (rs.next()) { - switch (Hypervisor.HypervisorType.getType(rs.getString(1))) { - case XenServer: - hypervisorsListInUse.add(Hypervisor.HypervisorType.XenServer); - break; - case KVM: - hypervisorsListInUse.add(Hypervisor.HypervisorType.KVM); - break; - case VMware: - hypervisorsListInUse.add(Hypervisor.HypervisorType.VMware); - break; - case Hyperv: - hypervisorsListInUse.add(Hypervisor.HypervisorType.Hyperv); - break; - case LXC: - hypervisorsListInUse.add(Hypervisor.HypervisorType.LXC); - break; - case Ovm3: - hypervisorsListInUse.add(Hypervisor.HypervisorType.Ovm3); - break; - default: - break; - } - } - } catch (final SQLException e) { - LOG.error("updateSystemVmTemplates: Exception caught while getting hypervisor types from clusters: " + e.getMessage()); - throw new CloudRuntimeException("updateSystemVmTemplates:Exception while getting hypervisor types from clusters", e); - } - - for (final Map.Entry hypervisorAndTemplateName : systemVmTemplateRegistration.NewTemplateNameList.entrySet()) { - LOG.debug("Updating " + hypervisorAndTemplateName.getKey() + " System Vms"); - try (PreparedStatement pstmt = conn.prepareStatement("select id from `cloud`.`vm_template` where name = ? and removed is null order by id desc limit 1")) { - // Get systemvm template id for corresponding hypervisor - long templateId = -1; - pstmt.setString(1, hypervisorAndTemplateName.getValue()); - try (ResultSet rs = pstmt.executeQuery()) { - if (rs.next()) { - templateId = rs.getLong(1); - } - } catch (final SQLException e) { - LOG.error("updateSystemVmTemplates: Exception caught while getting ids of templates: " + e.getMessage()); - throw new CloudRuntimeException("updateSystemVmTemplates: Exception caught while getting ids of templates", e); - } - - // change template type to SYSTEM - if (templateId != -1) { - try (PreparedStatement templ_type_pstmt = conn.prepareStatement("update `cloud`.`vm_template` set type='SYSTEM' where id = ?");) { - templ_type_pstmt.setLong(1, templateId); - templ_type_pstmt.executeUpdate(); - } catch (final SQLException e) { - LOG.error("updateSystemVmTemplates:Exception while updating template with id " + templateId + " to be marked as 'system': " + e.getMessage()); - throw new CloudRuntimeException("updateSystemVmTemplates:Exception while updating template with id " + templateId + " to be marked as 'system'", e); - } - - updateVMwareSystemvVMTemplateField(conn, SystemVmTemplateRegistration.NewTemplateNameList.get(Hypervisor.HypervisorType.VMware)); - // update template ID of system Vms - systemVmTemplateRegistration.updateSystemVMEntries(templateId, hypervisorAndTemplateName.getKey()); - - // Change value of global configuration parameter router.template.* for the corresponding hypervisor - // Change value of global configuration parameter - minreq.sysvmtemplate.version for the ACS version - Map configParams = new HashMap<>(); - configParams.put(SystemVmTemplateRegistration.routerTemplateConfigurationNames.get(hypervisorAndTemplateName.getKey()), hypervisorAndTemplateName.getValue()); - configParams.put("minreq.sysvmtemplate.version", SystemVmTemplateRegistration.CS_MAJOR_VERSION + "." + SystemVmTemplateRegistration.CS_TINY_VERSION); - - systemVmTemplateRegistration.updateConfigurationParams(configParams); - } else { - if (hypervisorsListInUse.contains(hypervisorAndTemplateName.getKey())) { - try { - systemVmTemplateRegistration.registerTemplates(conn, hypervisorsListInUse); - break; - } catch (final Exception e) { - throw new CloudRuntimeException(getUpgradedVersion() + hypervisorAndTemplateName.getKey() + " SystemVm template not found. Cannot upgrade system Vms"); - } - } else { - LOG.warn(getUpgradedVersion() + hypervisorAndTemplateName.getKey() + " SystemVm template not found. " + hypervisorAndTemplateName.getKey() - + " hypervisor is not used, so not failing upgrade"); - // Update the latest template URLs for corresponding - // hypervisor - try (PreparedStatement update_templ_url_pstmt = conn - .prepareStatement("UPDATE `cloud`.`vm_template` SET url = ? , checksum = ? WHERE hypervisor_type = ? AND type = 'SYSTEM' AND removed is null order by id desc limit 1");) { - update_templ_url_pstmt.setString(1, SystemVmTemplateRegistration.NewTemplateUrl.get(hypervisorAndTemplateName.getKey())); - update_templ_url_pstmt.setString(2, SystemVmTemplateRegistration.NewTemplateChecksum.get(hypervisorAndTemplateName.getKey())); - update_templ_url_pstmt.setString(3, hypervisorAndTemplateName.getKey().toString()); - update_templ_url_pstmt.executeUpdate(); - } catch (final SQLException e) { - LOG.error("updateSystemVmTemplates:Exception while updating 'url' and 'checksum' for hypervisor type " - + hypervisorAndTemplateName.getKey().toString() + ": " + e.getMessage()); - throw new CloudRuntimeException("updateSystemVmTemplates:Exception while updating 'url' and 'checksum' for hypervisor type " - + hypervisorAndTemplateName.getKey().toString(), e); - } - updateVMwareSystemvVMTemplateField(conn, SystemVmTemplateRegistration.NewTemplateNameList.get(Hypervisor.HypervisorType.VMware)); - } - } - } catch (final SQLException e) { - LOG.error("updateSystemVmTemplates:Exception while getting ids of templates: " + e.getMessage()); - throw new CloudRuntimeException("updateSystemVmTemplates:Exception while getting ids of templates", e); - } - } - LOG.debug("Updating System Vm Template IDs Complete"); - } - - private void updateVMwareSystemvVMTemplateField(final Connection conn, String templateName) { - try (PreparedStatement update_templ_vmware_pstmt = conn - .prepareStatement("UPDATE `cloud`.`vm_template` SET deploy_as_is = 1 WHERE name = '"+ templateName +"' AND removed is null order by id desc limit 1");) { - update_templ_vmware_pstmt.executeUpdate(); - } catch (final SQLException e) { - LOG.error("updateSystemVmTemplates:Exception while updating 'deploy_as_is' for VMWare hypervisor type : " + e.getMessage()); - throw new CloudRuntimeException("updateSystemVmTemplates:Exception while updating deploy_as_is for VMware hypervisor type ", e); + try { + systemVmTemplateRegistration.updateSystemVmTemplates(conn); + } catch (Exception e) { + throw new CloudRuntimeException("Failed to find / register SystemVM template(s)"); } } diff --git a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java index 8db8fa3e65c7..213957faac85 100644 --- a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java @@ -22,6 +22,7 @@ import java.net.URI; import java.net.URISyntaxException; import java.net.UnknownHostException; +import java.nio.file.Files; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.util.ArrayList; @@ -2775,18 +2776,23 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { Set hypSet = new HashSet(hypervisorTypes); TransactionLegacy txn = TransactionLegacy.open("AutomaticTemplateRegister"); SystemVmTemplateRegistration systemVmTemplateRegistration = new SystemVmTemplateRegistration(); - String filePath = SystemVmTemplateRegistration.TEMPORARY_SECONDARY_STORE + SystemVmTemplateRegistration.generateToken(10); + String filePath = null; try { + filePath = Files.createTempDirectory(SystemVmTemplateRegistration.TEMPORARY_SECONDARY_STORE).toString(); + if (filePath == null) { + throw new CloudRuntimeException("Failed to create temporary file path to mount the store"); + } Pair storeUrlAndId = new Pair<>(url, store.getId()); for (HypervisorType hypervisorType : hypSet) { try { String templateName = getValidTemplateName(zoneId, hypervisorType); Pair hypervisorAndTemplateName = new Pair<>(hypervisorType, templateName); - long templateId = systemVmTemplateRegistration.getRegisteredTemplateId(hypervisorAndTemplateName); - VMTemplateVO vmTemplateVO = _templateDao.findById(templateId); + Long templateId = systemVmTemplateRegistration.getRegisteredTemplateId(hypervisorAndTemplateName); + VMTemplateVO vmTemplateVO = null; TemplateDataStoreVO templateVO = null; - if (templateId != -1) { + if (templateId != null) { + vmTemplateVO = _templateDao.findById(templateId); templateVO = _templateStoreDao.findByTemplate(templateId, DataStoreRole.Image); if (templateVO != null) { try { diff --git a/tools/marvin/marvin/config/test_data.py b/tools/marvin/marvin/config/test_data.py index 48169aaf853d..f39ca4f90b4a 100644 --- a/tools/marvin/marvin/config/test_data.py +++ b/tools/marvin/marvin/config/test_data.py @@ -2052,7 +2052,7 @@ }, "1.16.3": { "semanticversion": "1.16.3", - "url": "http://download.cloudstack.org/cks/as-1.16.3.iso", + "url": "http://download.cloudstack.org/cks/setup-1.16.3.iso", "mincpunumber": 2, "minmemory": 2048 } From 9ba1d6a7ca91a5f0ad13da27b14aa55e8d303cf9 Mon Sep 17 00:00:00 2001 From: davidjumani Date: Tue, 21 Sep 2021 16:52:50 +0530 Subject: [PATCH 113/117] Fixing missing label --- ui/public/locales/en.json | 1 + 1 file changed, 1 insertion(+) diff --git a/ui/public/locales/en.json b/ui/public/locales/en.json index 5cd54cf14ca3..a56908ac6571 100644 --- a/ui/public/locales/en.json +++ b/ui/public/locales/en.json @@ -1443,6 +1443,7 @@ "label.miniops": "Min IOPS", "label.minmaxiops": "Min IOPS / Max IOPS", "label.minmemory": "Min Memory (in MB)", +"label.minsize": "Minimum size", "label.minute.past.hour": "minute(s) past the hour", "label.minutes.past.hour": "minutes(s) past the hour", "label.monday": "Monday", From 75abaf9920ade43c9fe3e4f8667db7a46a68d3ef Mon Sep 17 00:00:00 2001 From: davidjumani Date: Tue, 21 Sep 2021 16:55:23 +0530 Subject: [PATCH 114/117] Fix confusing error message --- .../cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java index c9ce5344f7c5..aec96fe67785 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java @@ -869,7 +869,7 @@ private void validateKubernetesClusterScaleParameters(ScaleKubernetesClusterCmd throw new InvalidParameterValueException("Minsize must be at least than 1"); } if (maxSize <= minSize) { - throw new InvalidParameterValueException("Maxsize must be greater than or equal to minsize"); + throw new InvalidParameterValueException("Maxsize must be greater than minsize"); } if (maxSize + kubernetesCluster.getControlNodeCount() > maxClusterSize) { throw new InvalidParameterValueException( From 6deb6f05376aa817671810e4e55920248b46d449 Mon Sep 17 00:00:00 2001 From: davidjumani Date: Tue, 28 Sep 2021 16:22:37 +0530 Subject: [PATCH 115/117] Add taint on control nodes --- .../KubernetesClusterActionWorker.java | 27 +++++++++++++++++++ .../KubernetesClusterStartWorker.java | 1 + 2 files changed, 28 insertions(+) diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java index 503b49670e3f..4f865a1dd84b 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java @@ -479,6 +479,33 @@ protected void copyScriptFile(String nodeAddress, final int sshPort, File file, } } + protected boolean taintControlNodes() { + StringBuilder commands = new StringBuilder(); + List vmMapVOList = getKubernetesClusterVMMaps(); + for(KubernetesClusterVmMapVO vmMap :vmMapVOList) { + if(!vmMap.isControlNode()) { + continue; + } + String name = userVmDao.findById(vmMap.getVmId()).getName(); + String command = String.format("sudo /opt/bin/kubectl annotate node %s cluster-autoscaler.kubernetes.io/scale-down-disabled=true ; ", name); + commands.append(command); + } + try { + File pkFile = getManagementServerSshPublicKeyFile(); + Pair publicIpSshPort = getKubernetesClusterServerIpSshPort(null); + publicIpAddress = publicIpSshPort.first(); + sshPort = publicIpSshPort.second(); + + Pair result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, + pkFile, null, commands.toString(), 10000, 10000, 60000); + return result.first(); + } catch (Exception e) { + String msg = String.format("Failed to taint control nodes on : %s : %s", kubernetesCluster.getName(), e.getMessage()); + logMessage(Level.ERROR, msg, e); + return false; + } + } + protected boolean deployProvider() { Network network = networkDao.findById(kubernetesCluster.getNetworkId()); // Since the provider creates IP addresses, don't deploy it unless the underlying network supports it diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java index 1fc41ee0dcd5..b6da75dba81b 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java @@ -582,6 +582,7 @@ public boolean startKubernetesClusterOnCreate() { if (!isKubernetesClusterDashboardServiceRunning(true, startTimeoutTime)) { logTransitStateAndThrow(Level.ERROR, String.format("Failed to setup Kubernetes cluster : %s in usable state as unable to get Dashboard service running for the cluster", kubernetesCluster.getName()), kubernetesCluster.getId(),KubernetesCluster.Event.OperationFailed); } + taintControlNodes(); deployProvider(); stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded); return true; From e8b93ddc950748124ff1956536567029b673fae7 Mon Sep 17 00:00:00 2001 From: davidjumani Date: Tue, 28 Sep 2021 18:44:32 +0530 Subject: [PATCH 116/117] Fix vm name --- .../cluster/actionworkers/KubernetesClusterActionWorker.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java index 4f865a1dd84b..8e6c49efb920 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java @@ -486,7 +486,7 @@ protected boolean taintControlNodes() { if(!vmMap.isControlNode()) { continue; } - String name = userVmDao.findById(vmMap.getVmId()).getName(); + String name = userVmDao.findById(vmMap.getVmId()).getDisplayName(); String command = String.format("sudo /opt/bin/kubectl annotate node %s cluster-autoscaler.kubernetes.io/scale-down-disabled=true ; ", name); commands.append(command); } From c49ba7debfef736bd758287c49cb2afc04d3af7c Mon Sep 17 00:00:00 2001 From: davidjumani Date: Wed, 29 Sep 2021 09:17:42 +0530 Subject: [PATCH 117/117] Fix name again --- .../cluster/actionworkers/KubernetesClusterActionWorker.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java index 8e6c49efb920..62d45a3e028d 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java @@ -486,7 +486,7 @@ protected boolean taintControlNodes() { if(!vmMap.isControlNode()) { continue; } - String name = userVmDao.findById(vmMap.getVmId()).getDisplayName(); + String name = userVmDao.findById(vmMap.getVmId()).getDisplayName().toLowerCase(); String command = String.format("sudo /opt/bin/kubectl annotate node %s cluster-autoscaler.kubernetes.io/scale-down-disabled=true ; ", name); commands.append(command); }