diff --git a/api/src/main/java/com/cloud/agent/api/StoragePoolInfo.java b/api/src/main/java/com/cloud/agent/api/StoragePoolInfo.java index 7d1e2389070e..d923694a854d 100644 --- a/api/src/main/java/com/cloud/agent/api/StoragePoolInfo.java +++ b/api/src/main/java/com/cloud/agent/api/StoragePoolInfo.java @@ -28,6 +28,7 @@ public class StoragePoolInfo { StoragePoolType poolType; long capacityBytes; long availableBytes; + String name; Map details; protected StoragePoolInfo() { @@ -67,14 +68,34 @@ public String getHost() { return host; } + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public void setLocalPath(String localPath) { + this.localPath = localPath; + } + public String getLocalPath() { return localPath; } + public void setHostPath(String hostPath) { + this.hostPath = hostPath; + } + public String getHostPath() { return hostPath; } + public void setPoolType(StoragePoolType poolType) { + this.poolType = poolType; + } + public StoragePoolType getPoolType() { return poolType; } diff --git a/api/src/main/java/com/cloud/agent/api/storage/OVFConfigurationTO.java b/api/src/main/java/com/cloud/agent/api/storage/OVFConfigurationTO.java new file mode 100644 index 000000000000..28176807ee18 --- /dev/null +++ b/api/src/main/java/com/cloud/agent/api/storage/OVFConfigurationTO.java @@ -0,0 +1,55 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +package com.cloud.agent.api.storage; + +import java.util.List; + +public class OVFConfigurationTO { + + private final String id; + private final String label; + private final String description; + private List hardwareItems; + + public OVFConfigurationTO(String id, String label, String description) { + this.id = id.toLowerCase(); + this.label = label; + this.description = description; + } + + public String getId() { + return id; + } + + public String getLabel() { + return label; + } + + public String getDescription() { + return description; + } + + public void setHardwareItems(List items) { + this.hardwareItems = items; + } + + public List getHardwareItems() { + return hardwareItems; + } +} diff --git a/api/src/main/java/com/cloud/agent/api/storage/OVFHelper.java b/api/src/main/java/com/cloud/agent/api/storage/OVFHelper.java index 15d63587490a..9c1a727bf3d3 100644 --- a/api/src/main/java/com/cloud/agent/api/storage/OVFHelper.java +++ b/api/src/main/java/com/cloud/agent/api/storage/OVFHelper.java @@ -22,8 +22,14 @@ import java.io.StringReader; import java.io.StringWriter; import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedList; import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import javax.xml.parsers.DocumentBuilder; import javax.xml.parsers.DocumentBuilderFactory; import javax.xml.parsers.ParserConfigurationException; import javax.xml.transform.Transformer; @@ -34,6 +40,8 @@ import com.cloud.configuration.Resource.ResourceType; import com.cloud.exception.InternalErrorException; +import org.apache.cloudstack.api.net.NetworkPrerequisiteTO; +import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang.StringUtils; import org.apache.commons.lang.math.NumberUtils; import org.apache.log4j.Logger; @@ -41,6 +49,9 @@ import org.w3c.dom.Element; import org.w3c.dom.Node; import org.w3c.dom.NodeList; +import org.w3c.dom.traversal.DocumentTraversal; +import org.w3c.dom.traversal.NodeFilter; +import org.w3c.dom.traversal.NodeIterator; import org.xml.sax.InputSource; import org.xml.sax.SAXException; @@ -48,7 +59,7 @@ import com.cloud.utils.exception.CloudRuntimeException; public class OVFHelper { - private static final Logger s_logger = Logger.getLogger(OVFHelper.class); + private static final Logger LOGGER = Logger.getLogger(OVFHelper.class); /** * Get disk virtual size given its values on fields: 'ovf:capacity' and 'ovf:capacityAllocationUnits' @@ -84,7 +95,9 @@ private String getChildNodeValue(Node node, String childNodeName) { NodeList childNodes = node.getChildNodes(); for (int i = 0; i < childNodes.getLength(); i++) { Node value = childNodes.item(i); - if (value != null && value.getNodeName().equals(childNodeName)) { + // Also match if the child's name has a suffix: + // Example: + if (value != null && (value.getNodeName().equals(childNodeName)) || value.getNodeName().endsWith(":" + childNodeName)) { return value.getTextContent(); } } @@ -119,8 +132,11 @@ protected OVFPropertyTO createOVFPropertyFromNode(Node node) { /** * Retrieve OVF properties from a parsed OVF file, with attribute 'ovf:userConfigurable' set to true */ - private List getConfigurableOVFPropertiesFromDocument(Document doc) { + public List getConfigurableOVFPropertiesFromDocument(Document doc) { List props = new ArrayList<>(); + if (doc == null) { + return props; + } NodeList properties = doc.getElementsByTagName("Property"); if (properties != null) { for (int i = 0; i < properties.getLength(); i++) { @@ -138,103 +154,83 @@ private List getConfigurableOVFPropertiesFromDocument(Document do } /** - * Get properties from OVF file located on ovfFilePath + * Get properties from OVF XML string */ - public List getOVFPropertiesFromFile(String ovfFilePath) throws ParserConfigurationException, IOException, SAXException { - if (StringUtils.isBlank(ovfFilePath)) { - return new ArrayList<>(); - } - File ovfFile = new File(ovfFilePath); - final Document doc = DocumentBuilderFactory.newInstance().newDocumentBuilder().parse(ovfFile); + protected List getOVFPropertiesFromXmlString(final String ovfString) throws ParserConfigurationException, IOException, SAXException { + InputSource is = new InputSource(new StringReader(ovfString)); + final Document doc = DocumentBuilderFactory.newInstance().newDocumentBuilder().parse(is); return getConfigurableOVFPropertiesFromDocument(doc); } - /** - * Get properties from OVF XML string - */ - protected List getOVFPropertiesXmlString(final String ovfFilePath) throws ParserConfigurationException, IOException, SAXException { - InputSource is = new InputSource(new StringReader(ovfFilePath)); + protected List getOVFDeploymentOptionsFromXmlString(final String ovfString) throws ParserConfigurationException, IOException, SAXException { + InputSource is = new InputSource(new StringReader(ovfString)); final Document doc = DocumentBuilderFactory.newInstance().newDocumentBuilder().parse(is); - return getConfigurableOVFPropertiesFromDocument(doc); + return getDeploymentOptionsFromDocumentTree(doc); } - public List getOVFVolumeInfo(final String ovfFilePath) { + protected List getOVFVirtualHardwareSectionFromXmlString(final String ovfString) throws ParserConfigurationException, IOException, SAXException { + InputSource is = new InputSource(new StringReader(ovfString)); + final Document doc = DocumentBuilderFactory.newInstance().newDocumentBuilder().parse(is); + return getVirtualHardwareItemsFromDocumentTree(doc); + } + + public List getOVFVolumeInfoFromFile(final String ovfFilePath) throws InternalErrorException { if (StringUtils.isBlank(ovfFilePath)) { - return new ArrayList(); + return new ArrayList<>(); } - ArrayList vf = new ArrayList(); - ArrayList vd = new ArrayList(); + Document doc = getDocumentFromFile(ovfFilePath); + + return getOVFVolumeInfoFromFile(ovfFilePath, doc); + } + + public List getOVFVolumeInfoFromFile(String ovfFilePath, Document doc) throws InternalErrorException { + if (org.apache.commons.lang.StringUtils.isBlank(ovfFilePath)) { + return null; + } File ovfFile = new File(ovfFilePath); - try { - final Document doc = DocumentBuilderFactory.newInstance().newDocumentBuilder().parse(new File(ovfFilePath)); - NodeList disks = doc.getElementsByTagName("Disk"); - NodeList files = doc.getElementsByTagName("File"); - NodeList items = doc.getElementsByTagName("Item"); - boolean toggle = true; - for (int j = 0; j < files.getLength(); j++) { - Element file = (Element)files.item(j); - OVFFile of = new OVFFile(); - of._href = file.getAttribute("ovf:href"); - if (of._href.endsWith("vmdk") || of._href.endsWith("iso")) { - of._id = file.getAttribute("ovf:id"); - String size = file.getAttribute("ovf:size"); - if (StringUtils.isNotBlank(size)) { - of._size = Long.parseLong(size); - } else { - String dataDiskPath = ovfFile.getParent() + File.separator + of._href; - File this_file = new File(dataDiskPath); - of._size = this_file.length(); - } - of.isIso = of._href.endsWith("iso"); - if (toggle && !of.isIso) { - of._bootable = true; - toggle = !toggle; - } - vf.add(of); - } - } - for (int i = 0; i < disks.getLength(); i++) { - Element disk = (Element)disks.item(i); - OVFDisk od = new OVFDisk(); - String virtualSize = disk.getAttribute("ovf:capacity"); - od._capacity = NumberUtils.toLong(virtualSize, 0L); - String allocationUnits = disk.getAttribute("ovf:capacityAllocationUnits"); - od._diskId = disk.getAttribute("ovf:diskId"); - od._fileRef = disk.getAttribute("ovf:fileRef"); - od._populatedSize = NumberUtils.toLong(disk.getAttribute("ovf:populatedSize")); - - if ((od._capacity != 0) && (allocationUnits != null)) { - - long units = 1; - if (allocationUnits.equalsIgnoreCase("KB") || allocationUnits.equalsIgnoreCase("KiloBytes") || allocationUnits.equalsIgnoreCase("byte * 2^10")) { - units = ResourceType.bytesToKiB; - } else if (allocationUnits.equalsIgnoreCase("MB") || allocationUnits.equalsIgnoreCase("MegaBytes") || allocationUnits.equalsIgnoreCase("byte * 2^20")) { - units = ResourceType.bytesToMiB; - } else if (allocationUnits.equalsIgnoreCase("GB") || allocationUnits.equalsIgnoreCase("GigaBytes") || allocationUnits.equalsIgnoreCase("byte * 2^30")) { - units = ResourceType.bytesToGiB; - } - od._capacity = od._capacity * units; - } - od._controller = getControllerType(items, od._diskId); - vd.add(od); - } + NodeList disks = doc.getElementsByTagName("Disk"); + NodeList files = doc.getElementsByTagName("File"); + NodeList items = doc.getElementsByTagName("Item"); - } catch (SAXException | IOException | ParserConfigurationException e) { - s_logger.error("Unexpected exception caught while parsing ovf file:" + ovfFilePath, e); - throw new CloudRuntimeException(e); + List vf = extractFilesFromOvfDocumentTree(ovfFile, files); + + List vd = extractDisksFromOvfDocumentTree(disks, items); + + List diskTOs = matchDisksToFilesAndGenerateDiskTOs(ovfFile, vf, vd); + + moveFirstIsoToEndOfDiskList(diskTOs); + + return diskTOs; + } + + /** + * check if first disk is an iso move it to the end. the semantics of this are not complete as more than one ISO may be there and theoretically an OVA may only contain ISOs + * + */ + private void moveFirstIsoToEndOfDiskList(List diskTOs) { + DatadiskTO fd = diskTOs.get(0); + if (fd.isIso()) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("moving first disk to the end as it is an ISO"); + } + diskTOs.remove(0); + diskTOs.add(fd); } + } - List disksTO = new ArrayList(); + private List matchDisksToFilesAndGenerateDiskTOs(File ovfFile, List vf, List vd) throws InternalErrorException { + List diskTOs = new ArrayList<>(); + int diskNumber = 0; for (OVFFile of : vf) { if (StringUtils.isBlank(of._id)){ - s_logger.error("The ovf file info is incomplete file info"); - throw new CloudRuntimeException("The ovf file info has incomplete file info"); + LOGGER.error("The ovf file info is incomplete file info"); + throw new InternalErrorException("The ovf file info has incomplete file info"); } OVFDisk cdisk = getDisk(of._id, vd); if (cdisk == null && !of.isIso){ - s_logger.error("The ovf file info has incomplete disk info"); - throw new CloudRuntimeException("The ovf file info has incomplete disk info"); + LOGGER.error("The ovf file info has incomplete disk info"); + throw new InternalErrorException("The ovf file info has incomplete disk info"); } Long capacity = cdisk == null ? of._size : cdisk._capacity; String controller = ""; @@ -248,18 +244,94 @@ public List getOVFVolumeInfo(final String ovfFilePath) { String dataDiskPath = ovfFile.getParent() + File.separator + of._href; File f = new File(dataDiskPath); if (!f.exists() || f.isDirectory()) { - s_logger.error("One of the attached disk or iso does not exists " + dataDiskPath); - throw new CloudRuntimeException("One of the attached disk or iso as stated on OVF does not exists " + dataDiskPath); + LOGGER.error("One of the attached disk or iso does not exists " + dataDiskPath); + throw new InternalErrorException("One of the attached disk or iso as stated on OVF does not exists " + dataDiskPath); } - disksTO.add(new DatadiskTO(dataDiskPath, capacity, of._size, of._id, of.isIso, of._bootable, controller, controllerSubType)); + diskTOs.add(new DatadiskTO(dataDiskPath, capacity, of._size, of._id, of.isIso, of._bootable, controller, controllerSubType, diskNumber)); + diskNumber++; } - //check if first disk is an iso move it to the end - DatadiskTO fd = disksTO.get(0); - if (fd.isIso()) { - disksTO.remove(0); - disksTO.add(fd); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("found %d file definitions in %s",diskTOs.size(), ovfFile.getPath())); + } + return diskTOs; + } + + private List extractDisksFromOvfDocumentTree(NodeList disks, NodeList items) { + ArrayList vd = new ArrayList<>(); + for (int i = 0; i < disks.getLength(); i++) { + Element disk = (Element)disks.item(i); + OVFDisk od = new OVFDisk(); + String virtualSize = disk.getAttribute("ovf:capacity"); + od._capacity = NumberUtils.toLong(virtualSize, 0L); + String allocationUnits = disk.getAttribute("ovf:capacityAllocationUnits"); + od._diskId = disk.getAttribute("ovf:diskId"); + od._fileRef = disk.getAttribute("ovf:fileRef"); + od._populatedSize = NumberUtils.toLong(disk.getAttribute("ovf:populatedSize")); + + if ((od._capacity != 0) && (allocationUnits != null)) { + + long units = 1; + if (allocationUnits.equalsIgnoreCase("KB") || allocationUnits.equalsIgnoreCase("KiloBytes") || allocationUnits.equalsIgnoreCase("byte * 2^10")) { + units = ResourceType.bytesToKiB; + } else if (allocationUnits.equalsIgnoreCase("MB") || allocationUnits.equalsIgnoreCase("MegaBytes") || allocationUnits.equalsIgnoreCase("byte * 2^20")) { + units = ResourceType.bytesToMiB; + } else if (allocationUnits.equalsIgnoreCase("GB") || allocationUnits.equalsIgnoreCase("GigaBytes") || allocationUnits.equalsIgnoreCase("byte * 2^30")) { + units = ResourceType.bytesToGiB; + } + od._capacity = od._capacity * units; + } + od._controller = getControllerType(items, od._diskId); + vd.add(od); + } + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("found %d disk definitions",vd.size())); + } + return vd; + } + + private List extractFilesFromOvfDocumentTree( File ovfFile, NodeList files) { + ArrayList vf = new ArrayList<>(); + boolean toggle = true; + for (int j = 0; j < files.getLength(); j++) { + Element file = (Element)files.item(j); + OVFFile of = new OVFFile(); + of._href = file.getAttribute("ovf:href"); + if (of._href.endsWith("vmdk") || of._href.endsWith("iso")) { + of._id = file.getAttribute("ovf:id"); + String size = file.getAttribute("ovf:size"); + if (StringUtils.isNotBlank(size)) { + of._size = Long.parseLong(size); + } else { + String dataDiskPath = ovfFile.getParent() + File.separator + of._href; + File this_file = new File(dataDiskPath); + of._size = this_file.length(); + } + of.isIso = of._href.endsWith("iso"); + if (toggle && !of.isIso) { + of._bootable = true; + toggle = !toggle; + } + vf.add(of); + } + } + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("found %d file definitions in %s",vf.size(), ovfFile.getPath())); + } + return vf; + } + + public Document getDocumentFromFile(String ovfFilePath) { + if (org.apache.commons.lang.StringUtils.isBlank(ovfFilePath)) { + return null; + } + DocumentBuilderFactory documentBuilderFactory = DocumentBuilderFactory.newDefaultInstance(); + try { + DocumentBuilder builder = documentBuilderFactory.newDocumentBuilder(); + return builder.parse(new File(ovfFilePath)); + } catch (SAXException | IOException | ParserConfigurationException e) { + LOGGER.error("Unexpected exception caught while parsing ovf file:" + ovfFilePath, e); + throw new CloudRuntimeException(e); } - return disksTO; } private OVFDiskController getControllerType(final NodeList itemList, final String diskId) { @@ -330,56 +402,61 @@ private OVFDiskController getController(Element controllerItem) { return dc; } - public void rewriteOVFFile(final String origOvfFilePath, final String newOvfFilePath, final String diskName) { - try { - final Document doc = DocumentBuilderFactory.newInstance().newDocumentBuilder().parse(new File(origOvfFilePath)); - NodeList disks = doc.getElementsByTagName("Disk"); - NodeList files = doc.getElementsByTagName("File"); - NodeList items = doc.getElementsByTagName("Item"); - String keepfile = null; - List toremove = new ArrayList(); - for (int j = 0; j < files.getLength(); j++) { - Element file = (Element)files.item(j); - String href = file.getAttribute("ovf:href"); - if (diskName.equals(href)) { - keepfile = file.getAttribute("ovf:id"); - } else { - toremove.add(file); - } + public void rewriteOVFFileForSingleDisk(final String origOvfFilePath, final String newOvfFilePath, final String diskName) { + final Document doc = getDocumentFromFile(origOvfFilePath); + + NodeList disks = doc.getElementsByTagName("Disk"); + NodeList files = doc.getElementsByTagName("File"); + NodeList items = doc.getElementsByTagName("Item"); + String keepfile = null; + List toremove = new ArrayList<>(); + for (int j = 0; j < files.getLength(); j++) { + Element file = (Element)files.item(j); + String href = file.getAttribute("ovf:href"); + if (diskName.equals(href)) { + keepfile = file.getAttribute("ovf:id"); + } else { + toremove.add(file); } - String keepdisk = null; - for (int i = 0; i < disks.getLength(); i++) { - Element disk = (Element)disks.item(i); - String fileRef = disk.getAttribute("ovf:fileRef"); - if (keepfile == null) { - s_logger.info("FATAL: OVA format error"); - } else if (keepfile.equals(fileRef)) { - keepdisk = disk.getAttribute("ovf:diskId"); - } else { - toremove.add(disk); - } + } + String keepdisk = null; + for (int i = 0; i < disks.getLength(); i++) { + Element disk = (Element)disks.item(i); + String fileRef = disk.getAttribute("ovf:fileRef"); + if (keepfile == null) { + LOGGER.info("FATAL: OVA format error"); + } else if (keepfile.equals(fileRef)) { + keepdisk = disk.getAttribute("ovf:diskId"); + } else { + toremove.add(disk); } - for (int k = 0; k < items.getLength(); k++) { - Element item = (Element)items.item(k); - NodeList cn = item.getChildNodes(); - for (int l = 0; l < cn.getLength(); l++) { - if (cn.item(l) instanceof Element) { - Element el = (Element)cn.item(l); - if ("rasd:HostResource".equals(el.getNodeName()) - && !(el.getTextContent().contains("ovf:/file/" + keepdisk) || el.getTextContent().contains("ovf:/disk/" + keepdisk))) { - toremove.add(item); - break; - } + } + for (int k = 0; k < items.getLength(); k++) { + Element item = (Element)items.item(k); + NodeList cn = item.getChildNodes(); + for (int l = 0; l < cn.getLength(); l++) { + if (cn.item(l) instanceof Element) { + Element el = (Element)cn.item(l); + if ("rasd:HostResource".equals(el.getNodeName()) + && !(el.getTextContent().contains("ovf:/file/" + keepdisk) || el.getTextContent().contains("ovf:/disk/" + keepdisk))) { + toremove.add(item); + break; } } } + } - for (Element rme : toremove) { - if (rme.getParentNode() != null) { - rme.getParentNode().removeChild(rme); - } + for (Element rme : toremove) { + if (rme.getParentNode() != null) { + rme.getParentNode().removeChild(rme); } + } + writeDocumentToFile(newOvfFilePath, doc); + } + + private void writeDocumentToFile(String newOvfFilePath, Document doc) { + try { final StringWriter writer = new StringWriter(); final StreamResult result = new StreamResult(writer); final TransformerFactory tf = TransformerFactory.newInstance(); @@ -389,8 +466,8 @@ public void rewriteOVFFile(final String origOvfFilePath, final String newOvfFile PrintWriter outfile = new PrintWriter(newOvfFilePath); outfile.write(writer.toString()); outfile.close(); - } catch (SAXException | IOException | ParserConfigurationException | TransformerException e) { - s_logger.info("Unexpected exception caught while removing network elements from OVF:" + e.getMessage(), e); + } catch (IOException | TransformerException e) { + LOGGER.info("Unexpected exception caught while rewriting OVF:" + e.getMessage(), e); throw new CloudRuntimeException(e); } } @@ -404,6 +481,231 @@ OVFDisk getDisk(String fileRef, List disks) { return null; } + public List getNetPrerequisitesFromDocument(Document doc) throws InternalErrorException { + if (doc == null) { + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("no document to parse; returning no prerequiste networks"); + } + return Collections.emptyList(); + } + + Map nets = getNetworksFromDocumentTree(doc); + + checkForOnlyOneSystemNode(doc); + + matchNicsToNets(nets, doc); + + return new ArrayList<>(nets.values()); + } + + private void matchNicsToNets(Map nets, Node systemElement) { + final DocumentTraversal traversal = (DocumentTraversal) systemElement; + final NodeIterator iterator = traversal.createNodeIterator(systemElement, NodeFilter.SHOW_ELEMENT, null, true); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("starting out with %d network-prerequisites, parsing hardware",nets.size())); + } + int nicCount = 0; + for (Node n = iterator.nextNode(); n != null; n = iterator.nextNode()) { + final Element e = (Element) n; + if ("rasd:Connection".equals(e.getTagName())) { + nicCount++; + String name = e.getTextContent(); // should be in our nets + if(nets.get(name) == null) { + if(LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("found a nic definition without a network definition byname %s, adding it to the list.", name)); + } + nets.put(name, new NetworkPrerequisiteTO()); + } + NetworkPrerequisiteTO thisNet = nets.get(name); + if (e.getParentNode() != null) { + fillNicPrerequisites(thisNet,e.getParentNode()); + } + } + } + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("ending up with %d network-prerequisites, parsed %d nics", nets.size(), nicCount)); + } + } + + /** + * get all the stuff from parent node + * TODO check for completeness and optionality + * + * @param nic the object to carry through the system + * @param parentNode the xml container node for nic data + */ + private void fillNicPrerequisites(NetworkPrerequisiteTO nic, Node parentNode) { +// * 7 + try { + nic.setAddressOnParent(Integer.parseInt(getChildNodeValue(parentNode, "rasd:AddressOnParent"))); + } catch (NumberFormatException e) { + LOGGER.warn("encountered element of type \"rasd:AddressOnParent\", that could not be parse to an integer number: " + getChildNodeValue(parentNode, "rasd:AddressOnParent")); + } +// * true + nic.setAutomaticAllocation(Boolean.parseBoolean(getChildNodeValue(parentNode, "rasd:AutomaticAllocation"))); +// * Management0-0 + // covoured in parent +// * E1000 Ethernet adapter on "Management Network" + nic.setNicDescription(getChildNodeValue(parentNode, "rasd:Description")); +// * Network adapter 1 + nic.setElementName(getChildNodeValue(parentNode, "rasd:ElementName")); +// * 6 + try { + nic.setInstanceID(Integer.parseInt(getChildNodeValue(parentNode, "rasd:InstanceID"))); + } catch (NumberFormatException e) { + LOGGER.warn("encountered element of type \"rasd:InstanceID\", that could not be parse to an integer number: " + getChildNodeValue(parentNode, "rasd:InstanceID")); + } +// * E1000 + nic.setResourceSubType(getChildNodeValue(parentNode, "rasd:ResourceSubType")); +// * 10 + nic.setResourceType(getChildNodeValue(parentNode, "rasd:ResourceType")); + } + + private void checkForOnlyOneSystemNode(Document doc) throws InternalErrorException { + // get hardware VirtualSystem, for now we support only one of those + NodeList systemElements = doc.getElementsByTagName("VirtualSystem"); + if (systemElements.getLength() != 1) { + String msg = "found " + systemElements.getLength() + " system definitions in OVA, can only handle exactly one."; + LOGGER.warn(msg); + throw new InternalErrorException(msg); + } + } + + private Map getNetworksFromDocumentTree(Document doc) { + NodeList networkElements = doc.getElementsByTagName("Network"); + Map nets = new HashMap<>(); + for (int i = 0; i < networkElements.getLength(); i++) { + + Element networkElement = (Element)networkElements.item(i); + String networkName = networkElement.getAttribute("ovf:name"); + + String description = getChildNodeValue(networkElement, "Description"); + + NetworkPrerequisiteTO network = new NetworkPrerequisiteTO(); + network.setName(networkName); + network.setNetworkDescription(description); + + nets.put(networkName,network); + } + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("found %d networks in template", nets.size())); + } + return nets; + } + + /** + * Retrieve the virtual hardware section and its deployment options as configurations + */ + public OVFVirtualHardwareSectionTO getVirtualHardwareSectionFromDocument(Document doc) { + List configurations = getDeploymentOptionsFromDocumentTree(doc); + List items = getVirtualHardwareItemsFromDocumentTree(doc); + if (CollectionUtils.isNotEmpty(configurations)) { + for (OVFConfigurationTO configuration : configurations) { + List confItems = items.stream(). + filter(x -> StringUtils.isNotBlank(x.getConfigurationIds()) + && x.getConfigurationIds().toLowerCase().contains(configuration.getId())) + .collect(Collectors.toList()); + configuration.setHardwareItems(confItems); + } + } + List commonItems = null; + if (CollectionUtils.isNotEmpty(items)) { + commonItems = items.stream().filter(x -> StringUtils.isBlank(x.getConfigurationIds())).collect(Collectors.toList()); + } + return new OVFVirtualHardwareSectionTO(configurations, commonItems); + } + + private List getDeploymentOptionsFromDocumentTree(Document doc) { + List options = new ArrayList<>(); + if (doc == null) { + return options; + } + NodeList deploymentOptionSection = doc.getElementsByTagName("DeploymentOptionSection"); + if (deploymentOptionSection.getLength() == 0) { + return options; + } + Node hardwareSectionNode = deploymentOptionSection.item(0); + NodeList childNodes = hardwareSectionNode.getChildNodes(); + for (int i = 0; i < childNodes.getLength(); i++) { + Node node = childNodes.item(i); + if (node != null && node.getNodeName().equals("Configuration")) { + Element configuration = (Element) node; + String configurationId = configuration.getAttribute("ovf:id"); + String description = getChildNodeValue(configuration, "Description"); + String label = getChildNodeValue(configuration, "Label"); + //getVirtualHardwareItemsFromDocumentTree(doc); + OVFConfigurationTO option = new OVFConfigurationTO(configurationId, label, description); + options.add(option); + } + } + return options; + } + + private List getVirtualHardwareItemsFromDocumentTree(Document doc) { + List items = new LinkedList<>(); + if (doc == null) { + return items; + } + NodeList hardwareSection = doc.getElementsByTagName("VirtualHardwareSection"); + if (hardwareSection.getLength() == 0) { + return items; + } + Node hardwareSectionNode = hardwareSection.item(0); + NodeList childNodes = hardwareSectionNode.getChildNodes(); + for (int i = 0; i < childNodes.getLength(); i++) { + Node node = childNodes.item(i); + if (node != null && node.getNodeName().equals("Item")) { + Element configuration = (Element) node; + String configurationIds = configuration.getAttribute("ovf:configuration"); + String allocationUnits = getChildNodeValue(configuration, "AllocationUnits"); + String description = getChildNodeValue(configuration, "Description"); + String elementName = getChildNodeValue(configuration, "ElementName"); + String instanceID = getChildNodeValue(configuration, "InstanceID"); + String limit = getChildNodeValue(configuration, "Limit"); + String reservation = getChildNodeValue(configuration, "Reservation"); + String resourceType = getChildNodeValue(configuration, "ResourceType"); + String virtualQuantity = getChildNodeValue(configuration, "VirtualQuantity"); + OVFVirtualHardwareItemTO item = new OVFVirtualHardwareItemTO(); + item.setConfigurationIds(configurationIds); + item.setAllocationUnits(allocationUnits); + item.setDescription(description); + item.setElementName(elementName); + item.setInstanceId(instanceID); + item.setLimit(getLongValueFromString(limit)); + item.setReservation(getLongValueFromString(reservation)); + Integer resType = getIntValueFromString(resourceType); + if (resType != null) { + item.setResourceType(OVFVirtualHardwareItemTO.getResourceTypeFromId(resType)); + } + item.setVirtualQuantity(getLongValueFromString(virtualQuantity)); + items.add(item); + } + } + return items; + } + + private Long getLongValueFromString(String value) { + if (StringUtils.isNotBlank(value)) { + try { + return Long.parseLong(value); + } catch (NumberFormatException e) { + LOGGER.debug("Could not parse the value: " + value + ", ignoring it"); + } + } + return null; + } + + private Integer getIntValueFromString(String value) { + if (StringUtils.isNotBlank(value)) { + try { + return Integer.parseInt(value); + } catch (NumberFormatException e) { + LOGGER.debug("Could not parse the value: " + value + ", ignoring it"); + } + } + return null; + } + class OVFFile { // public String _href; diff --git a/api/src/main/java/com/cloud/agent/api/storage/OVFProperty.java b/api/src/main/java/com/cloud/agent/api/storage/OVFProperty.java index ac9ae7721b0f..6946781efcea 100644 --- a/api/src/main/java/com/cloud/agent/api/storage/OVFProperty.java +++ b/api/src/main/java/com/cloud/agent/api/storage/OVFProperty.java @@ -19,6 +19,7 @@ package com.cloud.agent.api.storage; +// FR37 rename public interface OVFProperty { Long getTemplateId(); diff --git a/api/src/main/java/com/cloud/agent/api/storage/OVFPropertyTO.java b/api/src/main/java/com/cloud/agent/api/storage/OVFPropertyTO.java index abf743ae713d..f9a5965f9876 100644 --- a/api/src/main/java/com/cloud/agent/api/storage/OVFPropertyTO.java +++ b/api/src/main/java/com/cloud/agent/api/storage/OVFPropertyTO.java @@ -23,6 +23,7 @@ /** * Used to represent travel objects like: + // FR37 rename * * * Select the route/gateway type. diff --git a/api/src/main/java/com/cloud/agent/api/storage/OVFVirtualHardwareItemTO.java b/api/src/main/java/com/cloud/agent/api/storage/OVFVirtualHardwareItemTO.java new file mode 100644 index 000000000000..4a8670b13098 --- /dev/null +++ b/api/src/main/java/com/cloud/agent/api/storage/OVFVirtualHardwareItemTO.java @@ -0,0 +1,365 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.agent.api.storage; + +// From: https://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData.xsd +public class OVFVirtualHardwareItemTO { + + //From: https://schemas.dmtf.org/wbem/cim-html/2/CIM_ResourceAllocationSettingData.html + public enum HardwareResourceType { + Other("Other", 1), + ComputerSystem ("Computer System", 2), + Processor("Processor", 3), + Memory("Memory", 4), + IDEController("IDE Controller", 5), + ParallelSCSIHBA("Parallel SCSI HBA", 6), + FC_HBA("FC HBA", 7), + iSCSI_HBA("iSCSI HBA", 8), + IB_HCA("IB HCA", 9), + EthernetAdapter("Ethernet Adaptor", 10), + OtherNetworkAdapter("Other Network Adaptor", 11), + IO_Slot("I/O Slot", 12), + IO_Device("I/O Device", 13), + FloppyDrive("Floppy Drive", 14), + CD_Drive("CD Drive", 15), + DVD_Drive("DVD Drive", 16), + DiskDrive("Disk Drive", 17), + TapeDrive("Tape Drive", 18), + StorageExtent("Storage Extent", 19), + OtherStorageDevice("Other Storage Device", 20), + SerialPort("Serial Port", 21), + ParallelPort("Parallel Port", 22), + USBController("USB Controller", 23), + GraphicsController("Graphics Controller", 24), + IEEE_1394_Controller("IEEE 1394 Controller", 25), + PartitionableUnit("Partitionable Unit", 26), + BasePartitionableUnit("base Partitionable Unit", 27), + PowerSupply("Power", 28), + CoolingCapacity("Cooling Capacity", 29), + EthernetSwitchPort("Ethernet Switch Port", 30), + LogicalDisk("Logical Disk", 31), + StorageVolume("Storage Volume", 32), + EthernetConnection("Ethernet Connection", 33), + DMTF_reserved("DMTF Reserved", 35), + VendorReserved("Vendor Reserved", 32768); + + private String name; + private int id; + + HardwareResourceType(String name, int id) { + this.name = name; + this.id = id; + } + + public String getName() { + return name; + } + } + + public static HardwareResourceType getResourceTypeFromId(int id) { + if (id <= 33) { + for (HardwareResourceType type : HardwareResourceType.values()) { + if (type.id == id) { + return type; + } + } + } else if (id <= 32767) { + return HardwareResourceType.DMTF_reserved; + } + return HardwareResourceType.VendorReserved; + } + + public enum CustomerVisibility { + Unknown, PassedThrough, Virtualized, NotRepresented, DMTFReserved, VendorReserved; + } + + public enum MappingBehavior { + Unknown, NotSupported, Dedicated, SoftAffinity, HardAffinity, DMTFReserved, VendorReserved; + } + + private String address; + private String addressOnParent; + private String allocationUnits; + private boolean automaticAllocation; + private boolean automaticDeallocation; + private String caption; + private String changeableType; + private String componentSetting; + private String configurationName; + private String connection; + private CustomerVisibility customerVisibility; + private String description; + private String elementName; + private Long generation; + private String hostResource; + private String instanceId; + private Long limit; + private MappingBehavior mappingBehavior; + private String otherResourceType; + private String parent; + private String poolId; + private Long reservation; + private String resourceSubtype; + private HardwareResourceType resourceType; + private String soId; + private String soOrgId; + private Long virtualQuantity; + private String virtualQuantityUnits; + private int weight; + + private String configurationIds; + + public String getConfigurationIds() { + return configurationIds; + } + + public void setConfigurationIds(String configurationIds) { + this.configurationIds = configurationIds; + } + + public String getAddress() { + return address; + } + + public void setAddress(String address) { + this.address = address; + } + + public String getAddressOnParent() { + return addressOnParent; + } + + public void setAddressOnParent(String addressOnParent) { + this.addressOnParent = addressOnParent; + } + + public String getAllocationUnits() { + return allocationUnits; + } + + public void setAllocationUnits(String allocationUnits) { + this.allocationUnits = allocationUnits; + } + + public boolean isAutomaticAllocation() { + return automaticAllocation; + } + + public void setAutomaticAllocation(boolean automaticAllocation) { + this.automaticAllocation = automaticAllocation; + } + + public boolean isAutomaticDeallocation() { + return automaticDeallocation; + } + + public void setAutomaticDeallocation(boolean automaticDeallocation) { + this.automaticDeallocation = automaticDeallocation; + } + + public String getCaption() { + return caption; + } + + public void setCaption(String caption) { + this.caption = caption; + } + + public String getChangeableType() { + return changeableType; + } + + public void setChangeableType(String changeableType) { + this.changeableType = changeableType; + } + + public String getComponentSetting() { + return componentSetting; + } + + public void setComponentSetting(String componentSetting) { + this.componentSetting = componentSetting; + } + + public String getConfigurationName() { + return configurationName; + } + + public void setConfigurationName(String configurationName) { + this.configurationName = configurationName; + } + + public String getConnection() { + return connection; + } + + public void setConnection(String connection) { + this.connection = connection; + } + + public CustomerVisibility getCustomerVisibility() { + return customerVisibility; + } + + public void setCustomerVisibility(CustomerVisibility customerVisibility) { + this.customerVisibility = customerVisibility; + } + + public String getDescription() { + return description; + } + + public void setDescription(String description) { + this.description = description; + } + + public String getElementName() { + return elementName; + } + + public void setElementName(String elementName) { + this.elementName = elementName; + } + + public Long getGeneration() { + return generation; + } + + public void setGeneration(Long generation) { + this.generation = generation; + } + + public String getHostResource() { + return hostResource; + } + + public void setHostResource(String hostResource) { + this.hostResource = hostResource; + } + + public String getInstanceId() { + return instanceId; + } + + public void setInstanceId(String instanceId) { + this.instanceId = instanceId; + } + + public Long getLimit() { + return limit; + } + + public void setLimit(Long limit) { + this.limit = limit; + } + + public MappingBehavior getMappingBehavior() { + return mappingBehavior; + } + + public void setMappingBehavior(MappingBehavior mappingBehavior) { + this.mappingBehavior = mappingBehavior; + } + + public String getOtherResourceType() { + return otherResourceType; + } + + public void setOtherResourceType(String otherResourceType) { + this.otherResourceType = otherResourceType; + } + + public String getParent() { + return parent; + } + + public void setParent(String parent) { + this.parent = parent; + } + + public String getPoolId() { + return poolId; + } + + public void setPoolId(String poolId) { + this.poolId = poolId; + } + + public Long getReservation() { + return reservation; + } + + public void setReservation(Long reservation) { + this.reservation = reservation; + } + + public String getResourceSubtype() { + return resourceSubtype; + } + + public void setResourceSubtype(String resourceSubtype) { + this.resourceSubtype = resourceSubtype; + } + + public HardwareResourceType getResourceType() { + return resourceType; + } + + public void setResourceType(HardwareResourceType resourceType) { + this.resourceType = resourceType; + } + + public String getSoId() { + return soId; + } + + public void setSoId(String soId) { + this.soId = soId; + } + + public String getSoOrgId() { + return soOrgId; + } + + public void setSoOrgId(String soOrgId) { + this.soOrgId = soOrgId; + } + + public Long getVirtualQuantity() { + return virtualQuantity; + } + + public void setVirtualQuantity(Long virtualQuantity) { + this.virtualQuantity = virtualQuantity; + } + + public String getVirtualQuantityUnits() { + return virtualQuantityUnits; + } + + public void setVirtualQuantityUnits(String virtualQuantityUnits) { + this.virtualQuantityUnits = virtualQuantityUnits; + } + + public int getWeight() { + return weight; + } + + public void setWeight(int weight) { + this.weight = weight; + } +} diff --git a/api/src/main/java/com/cloud/agent/api/storage/OVFVirtualHardwareSectionTO.java b/api/src/main/java/com/cloud/agent/api/storage/OVFVirtualHardwareSectionTO.java new file mode 100644 index 000000000000..76503ddb787e --- /dev/null +++ b/api/src/main/java/com/cloud/agent/api/storage/OVFVirtualHardwareSectionTO.java @@ -0,0 +1,43 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +package com.cloud.agent.api.storage; + +import java.util.List; + +public class OVFVirtualHardwareSectionTO { + + public OVFVirtualHardwareSectionTO() { + } + + private List configurations; + private List commonHardwareItems; + + public OVFVirtualHardwareSectionTO(List configurations, List commonHardwareItems) { + this.configurations = configurations; + this.commonHardwareItems = commonHardwareItems; + } + + public List getConfigurations() { + return configurations; + } + + public List getCommonHardwareItems() { + return commonHardwareItems; + } +} diff --git a/api/src/main/java/com/cloud/agent/api/to/DatadiskTO.java b/api/src/main/java/com/cloud/agent/api/to/DatadiskTO.java index 1d3f91e25dbe..490b56e40119 100644 --- a/api/src/main/java/com/cloud/agent/api/to/DatadiskTO.java +++ b/api/src/main/java/com/cloud/agent/api/to/DatadiskTO.java @@ -27,6 +27,7 @@ public class DatadiskTO { private boolean isIso; private String diskController; private String diskControllerSubType; + private int diskNumber; public DatadiskTO() { } @@ -38,7 +39,8 @@ public DatadiskTO(String path, long virtualSize, long fileSize, boolean bootable this.bootable = bootable; } - public DatadiskTO(String path, long virtualSize, long fileSize, String diskId, boolean isIso, boolean bootable, String controller, String controllerSubType) { + public DatadiskTO(String path, long virtualSize, long fileSize, String diskId, boolean isIso, boolean bootable, + String controller, String controllerSubType, int diskNumber) { this.path = path; this.virtualSize = virtualSize; this.fileSize = fileSize; @@ -47,6 +49,7 @@ public DatadiskTO(String path, long virtualSize, long fileSize, String diskId, b this.isIso = isIso; this.diskController = controller; this.diskControllerSubType = controllerSubType; + this.diskNumber = diskNumber; } public String getPath() { @@ -105,4 +108,7 @@ public void setDiskControllerSubType(String diskControllerSubType) { this.diskControllerSubType = diskControllerSubType; } + public int getDiskNumber() { + return this.diskNumber; + } } \ No newline at end of file diff --git a/api/src/main/java/com/cloud/agent/api/to/VirtualMachineTO.java b/api/src/main/java/com/cloud/agent/api/to/VirtualMachineTO.java index dceacf0e65bc..9d671d2ba1fb 100644 --- a/api/src/main/java/com/cloud/agent/api/to/VirtualMachineTO.java +++ b/api/src/main/java/com/cloud/agent/api/to/VirtualMachineTO.java @@ -83,6 +83,12 @@ public class VirtualMachineTO { Map extraConfig = new HashMap<>(); @LogLevel(LogLevel.Log4jLevel.Off) Pair> ovfProperties; + /** + * to locate the template on primary storage to use for deploy as is + */ + String templateLocation = null; + String templateName = null; + String templatePrimaryStoreUuid; public VirtualMachineTO(long id, String instanceName, VirtualMachine.Type type, int cpus, Integer speed, long minRam, long maxRam, BootloaderType bootloader, String os, boolean enableHA, boolean limitCpuUse, String vncPassword) { @@ -402,4 +408,28 @@ public boolean isEnterHardwareSetup() { public void setEnterHardwareSetup(boolean enterHardwareSetup) { this.enterHardwareSetup = enterHardwareSetup; } + + public String getTemplateLocation() { + return templateLocation; + } + + public void setTemplateLocation(String templateLocation) { + this.templateLocation = templateLocation; + } + + public String getTemplateName() { + return templateName; + } + + public void setTemplateName(String templateName) { + this.templateName = templateName; + } + + public String getTemplatePrimaryStoreUuid() { + return templatePrimaryStoreUuid; + } + + public void setTemplatePrimaryStoreUuid(String templatePrimaryStoreUuid) { + this.templatePrimaryStoreUuid = templatePrimaryStoreUuid; + } } diff --git a/api/src/main/java/com/cloud/dc/VsphereStoragePolicy.java b/api/src/main/java/com/cloud/dc/VsphereStoragePolicy.java new file mode 100644 index 000000000000..ca0ed5447003 --- /dev/null +++ b/api/src/main/java/com/cloud/dc/VsphereStoragePolicy.java @@ -0,0 +1,31 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.dc; + +import org.apache.cloudstack.api.Identity; +import org.apache.cloudstack.api.InternalIdentity; + +public interface VsphereStoragePolicy extends Identity, InternalIdentity { + + long getZoneId(); + + String getPolicyId(); + + String getName(); + + String getDescription(); +} diff --git a/api/src/main/java/com/cloud/event/EventTypes.java b/api/src/main/java/com/cloud/event/EventTypes.java index 5ea31a781786..19de913d9d9a 100644 --- a/api/src/main/java/com/cloud/event/EventTypes.java +++ b/api/src/main/java/com/cloud/event/EventTypes.java @@ -605,6 +605,9 @@ public class EventTypes { public static final String EVENT_POD_ROLLING_MAINTENANCE = "POD.ROLLING.MAINTENANCE"; public static final String EVENT_ZONE_ROLLING_MAINTENANCE = "ZONE.ROLLING.MAINTENANCE"; + // Storage Policies + public static final String EVENT_IMPORT_VCENTER_STORAGE_POLICIES = "IMPORT.VCENTER.STORAGE.POLICIES"; + static { // TODO: need a way to force author adding event types to declare the entity details as well, with out braking @@ -1011,6 +1014,8 @@ public class EventTypes { entityEventDetails.put(EVENT_POD_ROLLING_MAINTENANCE, PodResponse.class); entityEventDetails.put(EVENT_CLUSTER_ROLLING_MAINTENANCE, ClusterResponse.class); entityEventDetails.put(EVENT_HOST_ROLLING_MAINTENANCE, HostResponse.class); + + entityEventDetails.put(EVENT_IMPORT_VCENTER_STORAGE_POLICIES, "StoragePolicies"); } public static String getEntityForEvent(String eventName) { diff --git a/api/src/main/java/com/cloud/storage/ImageStore.java b/api/src/main/java/com/cloud/storage/ImageStore.java index c019b17421dd..58c6d859afcd 100644 --- a/api/src/main/java/com/cloud/storage/ImageStore.java +++ b/api/src/main/java/com/cloud/storage/ImageStore.java @@ -21,6 +21,12 @@ public interface ImageStore extends Identity, InternalIdentity { + String ACS_PROPERTY_PREFIX = "ACS-property-"; + String REQUIRED_NETWORK_PREFIX = "ACS-network-"; + String DISK_DEFINITION_PREFIX = "ACS-disk-"; + String OVF_HARDWARE_CONFIGURATION_PREFIX = "ACS-configuration-"; + String OVF_HARDWARE_ITEM_PREFIX = "ACS-hardware-item-"; + /** * @return name of the object store. */ diff --git a/api/src/main/java/com/cloud/storage/Storage.java b/api/src/main/java/com/cloud/storage/Storage.java index 82bc5f6d4e5a..73f5b42c316c 100644 --- a/api/src/main/java/com/cloud/storage/Storage.java +++ b/api/src/main/java/com/cloud/storage/Storage.java @@ -16,11 +16,11 @@ // under the License. package com.cloud.storage; +import org.apache.commons.lang.NotImplementedException; + import java.util.ArrayList; import java.util.List; -import org.apache.commons.lang.NotImplementedException; - public class Storage { public static enum ImageFormat { QCOW2(true, true, false, "qcow2"), @@ -41,13 +41,6 @@ public static enum ImageFormat { private final boolean supportSnapshot; private final String fileExtension; - private ImageFormat(boolean supportThinProvisioning, boolean supportSparse, boolean supportSnapshot) { - this.supportThinProvisioning = supportThinProvisioning; - this.supportSparse = supportSparse; - this.supportSnapshot = supportSnapshot; - fileExtension = null; - } - private ImageFormat(boolean supportThinProvisioning, boolean supportSparse, boolean supportSnapshot, String fileExtension) { this.supportThinProvisioning = supportThinProvisioning; this.supportSparse = supportSparse; @@ -135,7 +128,8 @@ public static enum StoragePoolType { OCFS2(true, false), SMB(true, false), Gluster(true, false), - ManagedNFS(true, false); + ManagedNFS(true, false), + DatastoreCluster(true, true); // for VMware, to abstract pool of clusters private final boolean shared; private final boolean overprovisioning; diff --git a/api/src/main/java/com/cloud/template/VirtualMachineTemplate.java b/api/src/main/java/com/cloud/template/VirtualMachineTemplate.java index 5177e51d4015..4e6c5e2a056b 100644 --- a/api/src/main/java/com/cloud/template/VirtualMachineTemplate.java +++ b/api/src/main/java/com/cloud/template/VirtualMachineTemplate.java @@ -138,4 +138,7 @@ public enum TemplateFilter { void incrUpdatedCount(); Date getUpdated(); + + boolean isDeployAsIs(); + } diff --git a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java index 073c94049106..38f13f4e96e1 100644 --- a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java @@ -260,7 +260,8 @@ public class ApiConstants { public static final String OUTOFBANDMANAGEMENT_POWERSTATE = "outofbandmanagementpowerstate"; public static final String OUTOFBANDMANAGEMENT_ENABLED = "outofbandmanagementenabled"; public static final String OUTPUT = "output"; - public static final String OVF_PROPERTIES = "ovfproperties"; + public static final String PROPERTIES = "properties"; + public static final String ACS_PROPERTY = "ACS-property"; public static final String PARAMS = "params"; public static final String PARENT_ID = "parentid"; public static final String PARENT_DOMAIN_ID = "parentdomainid"; @@ -813,6 +814,10 @@ public class ApiConstants { public static final String BOOT_TYPE = "boottype"; public static final String BOOT_MODE = "bootmode"; public static final String BOOT_INTO_SETUP = "bootintosetup"; + public static final String DEPLOY_AS_IS = "deployasis"; + public static final String CROSS_ZONES = "crossZones"; + public static final String TEMPLATETYPE = "templatetype"; + public static final String SOURCETEMPLATEID = "sourcetemplateid"; public enum BootType { UEFI, BIOS; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateDiskOfferingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateDiskOfferingCmd.java index f0ca5fb851a1..6b9ddce75c26 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateDiskOfferingCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateDiskOfferingCmd.java @@ -28,6 +28,7 @@ import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.DiskOfferingResponse; import org.apache.cloudstack.api.response.DomainResponse; +import org.apache.cloudstack.api.response.VsphereStoragePoliciesResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.commons.collections.CollectionUtils; import org.apache.log4j.Logger; @@ -151,6 +152,9 @@ public class CreateDiskOfferingCmd extends BaseCmd { since = "4.14") private String cacheMode; + @Parameter(name = ApiConstants.STORAGE_POLICY, type = CommandType.UUID, entityType = VsphereStoragePoliciesResponse.class,required = false, description = "Name of the storage policy defined at vCenter, this is applicable only for VMware") + private Long storagePolicy; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -273,6 +277,9 @@ public String getCacheMode() { return cacheMode; } + public Long getStoragePolicy() { + return storagePolicy; + } ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/template/RegisterTemplateCmdByAdmin.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/template/RegisterTemplateCmdByAdmin.java index 28593755c115..13c0d5b3a12b 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/template/RegisterTemplateCmdByAdmin.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/template/RegisterTemplateCmdByAdmin.java @@ -17,10 +17,32 @@ package org.apache.cloudstack.api.command.admin.template; import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ResponseObject.ResponseView; import org.apache.cloudstack.api.command.user.template.RegisterTemplateCmd; import org.apache.cloudstack.api.response.TemplateResponse; @APICommand(name = "registerTemplate", description = "Registers an existing template into the CloudStack cloud.", responseObject = TemplateResponse.class, responseView = ResponseView.Full, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) -public class RegisterTemplateCmdByAdmin extends RegisterTemplateCmd {} +public class RegisterTemplateCmdByAdmin extends RegisterTemplateCmd { + + ///////////////////////////////////////////////////// + //////////////// +API parameter ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name= ApiConstants.DEPLOY_AS_IS, + type = CommandType.BOOLEAN, + description = "true if template should not strip and define disks and networks but leave those to the template definition", + since = "4.15" + ) + private Boolean deployAsIs; + + ///////////////////////////////////////////////////// + /////////////////// +Accessor /////////////////////// + ///////////////////////////////////////////////////// + + public Boolean isDeployAsIs() { + return (deployAsIs == null) ? true : deployAsIs; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ImportUnmanagedInstanceCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ImportUnmanagedInstanceCmd.java index 10321cc035d4..7127bac8d785 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ImportUnmanagedInstanceCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ImportUnmanagedInstanceCmd.java @@ -204,6 +204,9 @@ public Map getNicNetworkList() { for (Map entry : (Collection>)nicNetworkList.values()) { String nic = entry.get(VmDetailConstants.NIC); String networkUuid = entry.get(VmDetailConstants.NETWORK); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("nic, '%s', goes on net, '%s'", nic, networkUuid)); + } if (Strings.isNullOrEmpty(nic) || Strings.isNullOrEmpty(networkUuid) || _entityMgr.findByUuid(Network.class, networkUuid) == null) { throw new InvalidParameterValueException(String.format("Network ID: %s for NIC ID: %s is invalid", networkUuid, nic)); } @@ -219,11 +222,14 @@ public Map getNicIpAddressList() { for (Map entry : (Collection>)nicIpAddressList.values()) { String nic = entry.get(VmDetailConstants.NIC); String ipAddress = Strings.emptyToNull(entry.get(VmDetailConstants.IP4_ADDRESS)); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("nic, '%s', gets ip, '%s'", nic, ipAddress)); + } if (Strings.isNullOrEmpty(nic)) { throw new InvalidParameterValueException(String.format("NIC ID: '%s' is invalid for IP address mapping", nic)); } if (Strings.isNullOrEmpty(ipAddress)) { - throw new InvalidParameterValueException(String.format("IP address '%s' for NIC ID: %s is invalid", ipAddress, nic)); + throw new InvalidParameterValueException(String.format("Empty IP address for NIC ID: %s is invalid", nic)); } if (!Strings.isNullOrEmpty(ipAddress) && !ipAddress.equals("auto") && !NetUtils.isValidIp4(ipAddress)) { throw new InvalidParameterValueException(String.format("IP address '%s' for NIC ID: %s is invalid", ipAddress, nic)); @@ -239,12 +245,15 @@ public Map getDataDiskToDiskOfferingList() { Map dataDiskToDiskOfferingMap = new HashMap<>(); if (MapUtils.isNotEmpty(dataDiskToDiskOfferingList)) { for (Map entry : (Collection>)dataDiskToDiskOfferingList.values()) { - String nic = entry.get(VmDetailConstants.DISK); + String disk = entry.get(VmDetailConstants.DISK); String offeringUuid = entry.get(VmDetailConstants.DISK_OFFERING); - if (Strings.isNullOrEmpty(nic) || Strings.isNullOrEmpty(offeringUuid) || _entityMgr.findByUuid(DiskOffering.class, offeringUuid) == null) { - throw new InvalidParameterValueException(String.format("Disk offering ID: %s for disk ID: %s is invalid", offeringUuid, nic)); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("disk, '%s', gets offering, '%s'", disk, offeringUuid)); + } + if (Strings.isNullOrEmpty(disk) || Strings.isNullOrEmpty(offeringUuid) || _entityMgr.findByUuid(DiskOffering.class, offeringUuid) == null) { + throw new InvalidParameterValueException(String.format("Disk offering ID: %s for disk ID: %s is invalid", offeringUuid, disk)); } - dataDiskToDiskOfferingMap.put(nic, _entityMgr.findByUuid(DiskOffering.class, offeringUuid).getId()); + dataDiskToDiskOfferingMap.put(disk, _entityMgr.findByUuid(DiskOffering.class, offeringUuid).getId()); } } return dataDiskToDiskOfferingMap; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java index b4514b1c759b..ecdcfd58be81 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java @@ -27,8 +27,6 @@ import javax.annotation.Nonnull; -import com.cloud.utils.StringUtils; - import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.affinity.AffinityGroupResponse; import org.apache.cloudstack.api.ACL; @@ -69,6 +67,7 @@ import com.cloud.offering.DiskOffering; import com.cloud.template.VirtualMachineTemplate; import com.cloud.uservm.UserVm; +import com.cloud.utils.StringUtils; import com.cloud.utils.net.Dhcp; import com.cloud.utils.net.NetUtils; import com.cloud.vm.VirtualMachine; @@ -221,10 +220,10 @@ public class DeployVMCmd extends BaseAsyncCreateCustomIdCmd implements SecurityG @Parameter(name = ApiConstants.COPY_IMAGE_TAGS, type = CommandType.BOOLEAN, since = "4.13", description = "if true the image tags (if any) will be copied to the VM, default value is false") private Boolean copyImageTags; - @Parameter(name = ApiConstants.OVF_PROPERTIES, type = CommandType.MAP, since = "4.13", - description = "used to specify the OVF properties.") + @Parameter(name = ApiConstants.PROPERTIES, type = CommandType.MAP, since = "4.15", + description = "used to specify the vApp properties.") @LogLevel(LogLevel.Log4jLevel.Off) - private Map vmOvfProperties; + private Map vAppProperties; ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// @@ -312,10 +311,10 @@ public ApiConstants.BootMode getBootMode() { } - public Map getVmOVFProperties() { + public Map getVmProperties() { Map map = new HashMap<>(); - if (MapUtils.isNotEmpty(vmOvfProperties)) { - Collection parameterCollection = vmOvfProperties.values(); + if (MapUtils.isNotEmpty(vAppProperties)) { + Collection parameterCollection = vAppProperties.values(); Iterator iterator = parameterCollection.iterator(); while (iterator.hasNext()) { HashMap entry = (HashMap)iterator.next(); diff --git a/api/src/main/java/org/apache/cloudstack/api/net/NetworkPrerequisite.java b/api/src/main/java/org/apache/cloudstack/api/net/NetworkPrerequisite.java new file mode 100644 index 000000000000..60872cff910f --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/net/NetworkPrerequisite.java @@ -0,0 +1,37 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.net; + +public interface NetworkPrerequisite { + int getAddressOnParent(); + + boolean isAutomaticAllocation(); + + String getNicDescription(); + + String getElementName(); + + int getInstanceID(); + + String getResourceSubType(); + + String getResourceType(); + + String getName(); + + String getNetworkDescription(); +} diff --git a/api/src/main/java/org/apache/cloudstack/api/net/NetworkPrerequisiteTO.java b/api/src/main/java/org/apache/cloudstack/api/net/NetworkPrerequisiteTO.java new file mode 100644 index 000000000000..39b45957e495 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/net/NetworkPrerequisiteTO.java @@ -0,0 +1,124 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.net; + +/** + * container for the network prerequisites as found in the appliance template + * + * for OVA: + * {code} + * + * Management Network Interface + * + * {code} + * {code} + * + * 7 + * true + * Management0-0 + * E1000 Ethernet adapter on "Management Network" + * Network adapter 1 + * 6 + * E1000 + * 10 + * + * {code} + */ +public class NetworkPrerequisiteTO implements NetworkPrerequisite { + String name; // attribute on Network should match on Item (virtual hardware) + String networkDescription; + + int addressOnParent; // or String? + boolean automaticAllocation; + String nicDescription; + String elementName; + int InstanceID; // or String? + String resourceSubType; + String resourceType; // or int? + + @Override public int getAddressOnParent() { + return addressOnParent; + } + + public void setAddressOnParent(int addressOnParent) { + this.addressOnParent = addressOnParent; + } + + @Override public boolean isAutomaticAllocation() { + return automaticAllocation; + } + + public void setAutomaticAllocation(boolean automaticAllocation) { + this.automaticAllocation = automaticAllocation; + } + + @Override public String getNicDescription() { + return nicDescription; + } + + public void setNicDescription(String nicDescription) { + this.nicDescription = nicDescription; + } + + @Override public String getElementName() { + return elementName; + } + + public void setElementName(String elementName) { + this.elementName = elementName; + } + + @Override public int getInstanceID() { + return InstanceID; + } + + public void setInstanceID(int instanceID) { + InstanceID = instanceID; + } + + @Override public String getResourceSubType() { + return resourceSubType; + } + + public void setResourceSubType(String resourceSubType) { + this.resourceSubType = resourceSubType; + } + + @Override public String getResourceType() { + return resourceType; + } + + public void setResourceType(String resourceType) { + this.resourceType = resourceType; + } + + @Override public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + @Override public String getNetworkDescription() { + return networkDescription; + } + + public void setNetworkDescription(String networkDescription) { + this.networkDescription = networkDescription; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/response/TemplateOVFPropertyResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/TemplateOVFPropertyResponse.java index 83455a3fe6e2..668bbe789259 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/TemplateOVFPropertyResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/TemplateOVFPropertyResponse.java @@ -16,13 +16,18 @@ // under the License. package org.apache.cloudstack.api.response; -import com.cloud.agent.api.storage.OVFProperty; -import com.cloud.serializer.Param; -import com.google.gson.annotations.SerializedName; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.BaseResponse; import org.apache.cloudstack.api.EntityReference; +import com.cloud.agent.api.storage.OVFProperty; +import com.cloud.serializer.Param; +import com.google.gson.annotations.SerializedName; + +/** + * the placeholder of parameters to fill for deployment + // FR37 TODO remname for generic use + */ @EntityReference(value = OVFProperty.class) public class TemplateOVFPropertyResponse extends BaseResponse { @@ -58,6 +63,19 @@ public class TemplateOVFPropertyResponse extends BaseResponse { @Param(description = "the ovf property label") private String description; + @Override + public boolean equals(Object other) { + if (!(other instanceof TemplateOVFPropertyResponse)) { + return false; + } + return key != null && key.equals(((TemplateOVFPropertyResponse)other).key); + } + + @Override + public int hashCode() { + return key.hashCode(); + } + public String getKey() { return key; } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/TemplateResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/TemplateResponse.java index 81fc2f37b0d6..ed843d9dd42e 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/TemplateResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/TemplateResponse.java @@ -17,6 +17,8 @@ package org.apache.cloudstack.api.response; import java.util.Date; +import java.util.HashMap; +import java.util.HashSet; import java.util.LinkedHashSet; import java.util.Map; import java.util.Set; @@ -54,7 +56,7 @@ public class TemplateResponse extends BaseResponseWithTagInformation implements @Param(description = "the date this template was created") private Date created; - @SerializedName("removed") + @SerializedName(ApiConstants.REMOVED) @Param(description = "the date this template was removed") private Date removed; @@ -79,7 +81,7 @@ public class TemplateResponse extends BaseResponseWithTagInformation implements @Param(description = "true if this template is a featured template, false otherwise") private boolean featured; - @SerializedName("crossZones") + @SerializedName(ApiConstants.CROSS_ZONES) @Param(description = "true if the template is managed across all Zones, false otherwise") private boolean crossZones; @@ -121,7 +123,7 @@ public class TemplateResponse extends BaseResponseWithTagInformation implements @Param(description = "the physical size of the template") private Long physicalSize; - @SerializedName("templatetype") + @SerializedName(ApiConstants.TEMPLATETYPE) @Param(description = "the type of the template") private String templateType; @@ -145,7 +147,7 @@ public class TemplateResponse extends BaseResponseWithTagInformation implements @Param(description = "checksum of the template") private String checksum; - @SerializedName("sourcetemplateid") + @SerializedName(ApiConstants.SOURCETEMPLATEID) @Param(description = "the template ID of the parent template if present") private String sourcetemplateId; @@ -153,7 +155,7 @@ public class TemplateResponse extends BaseResponseWithTagInformation implements @Param(description = "the ID of the secondary storage host for the template") private String hostId; - @SerializedName("hostname") + @SerializedName(ApiConstants.HOST_NAME) @Param(description = "the name of the secondary storage host for the template") private String hostName; @@ -171,7 +173,7 @@ public class TemplateResponse extends BaseResponseWithTagInformation implements @SerializedName(ApiConstants.DETAILS) @Param(description = "additional key/value details tied with template") - private Map details; + private Map details; @SerializedName(ApiConstants.BITS) @Param(description = "the processor bit size", since = "4.10") @@ -189,18 +191,28 @@ public class TemplateResponse extends BaseResponseWithTagInformation implements @Param(description = "KVM Only: true if template is directly downloaded to Primary Storage bypassing Secondary Storage") private Boolean directDownload; + @SerializedName(ApiConstants.DEPLOY_AS_IS) + @Param(description = "Vmware Only: true if template is deployed without orchestrating disks and networks but \"as-is\" defined in the template.") + private Boolean deployAsIs; + @SerializedName("parenttemplateid") @Param(description = "if Datadisk template, then id of the root disk template this template belongs to") + @Deprecated(since = "4.15") private String parentTemplateId; @SerializedName("childtemplates") @Param(description = "if root disk template, then ids of the datas disk templates this template owns") + @Deprecated(since = "4.15") private Set childTemplates; @SerializedName(ApiConstants.REQUIRES_HVM) @Param(description = "true if template requires HVM enabled, false otherwise") private Boolean requiresHvm; + @SerializedName(ApiConstants.PROPERTIES) + @Param(description = "the list required properties for deployment", responseObject = TemplateOVFPropertyResponse.class) + private Set properties; + public TemplateResponse() { tags = new LinkedHashSet<>(); } @@ -351,14 +363,32 @@ public void setProjectName(String projectName) { this.projectName = projectName; } - public Map getDetails() { + public Map getDetails() { return this.details; } - public void setDetails(Map details) { + public void setDetails(Map details) { this.details = details; } + public void addDetail(String key, String value) { + if (this.details == null) { + setDetails(new HashMap<>()); + } + this.details.put(key,value); + } + + public Set getProperties() { + return this.properties; + } + + public void addProperty(TemplateOVFPropertyResponse property) { + if (this.properties == null) { + this.properties = new HashSet(); + } + this.properties.add(property); + } + public void setTags(Set tags) { this.tags = tags; } @@ -387,6 +417,10 @@ public Boolean getDirectDownload() { return directDownload; } + public void setDeployAsIs(Boolean deployAsIs) { + this.deployAsIs = deployAsIs; + } + public void setParentTemplateId(String parentTemplateId) { this.parentTemplateId = parentTemplateId; } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/VsphereStoragePoliciesResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/VsphereStoragePoliciesResponse.java new file mode 100644 index 000000000000..63c49f148790 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/response/VsphereStoragePoliciesResponse.java @@ -0,0 +1,89 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.response; + +import com.cloud.dc.VsphereStoragePolicy; +import com.cloud.serializer.Param; +import com.google.gson.annotations.SerializedName; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseResponse; +import org.apache.cloudstack.api.EntityReference; + + +@EntityReference(value = VsphereStoragePolicy.class) +public class VsphereStoragePoliciesResponse extends BaseResponse { + + @SerializedName(ApiConstants.ID) + @Param(description = "the ID of the Storage Policy") + private String id; + + @SerializedName(ApiConstants.ZONE_ID) + @Param(description = "the ID of the Zone") + private String zoneId; + + @SerializedName(ApiConstants.POLICY_ID) + @Param(description = "the identifier of the Storage Policy in vSphere DataCenter") + private String policyId; + + @SerializedName(ApiConstants.NAME) + @Param(description = "the name of the Storage Policy") + private String name; + + @SerializedName(ApiConstants.DESCRIPTION) + @Param(description = "the description of the Storage Policy") + private String description; + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getZoneId() { + return zoneId; + } + + public void setZoneId(String zoneId) { + this.zoneId = zoneId; + } + + public String getPolicyId() { + return policyId; + } + + public void setPolicyId(String policyId) { + this.policyId = policyId; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getDescription() { + return description; + } + + public void setDescription(String description) { + this.description = description; + } +} diff --git a/api/src/test/java/com/cloud/agent/api/storage/OVFHelperTest.java b/api/src/test/java/com/cloud/agent/api/storage/OVFHelperTest.java index 8aa9852fb257..f1bbb6346cb6 100644 --- a/api/src/test/java/com/cloud/agent/api/storage/OVFHelperTest.java +++ b/api/src/test/java/com/cloud/agent/api/storage/OVFHelperTest.java @@ -40,16 +40,245 @@ public class OVFHelperTest { "" + ""; + private String ovfFileDeploymentOptionsSection = + "\n" + + " Deployment Configuration information\n" + + " \n" + + " \n" + + " Use this option to deploy an ASAv with a maximum throughput of 100 Mbps (uses 1 vCPU and 2 GB of memory).\n" + + " \n" + + " \n" + + " \n" + + " Use this option to deploy an ASAv with a maximum throughput of 1 Gbps (uses 1 vCPU and 2 GB of memory).\n" + + " \n" + + " \n" + + " \n" + + " Use this option to deploy an ASAv with a maximum throughput of 2 Gbps (uses 4 vCPUs and 8 GB of memory).\n" + + " \n" + + " "; + + private String ovfFileVirtualHardwareSection = + "\n" + + " Virtual hardware requirements\n" + + " \n" + + " Virtual Hardware Family\n" + + " 0\n" + + " ASAv\n" + + " vmx-08,vmx-09\n" + + " \n" + + " \n" + + " hertz * 10^6\n" + + " Number of Virtual CPUs\n" + + " 1 virtual CPU(s)\n" + + " 1\n" + + " 5000\n" + + " 1000\n" + + " 3\n" + + " 1\n" + + " \n" + + " \n" + + " hertz * 10^6\n" + + " Number of Virtual CPUs\n" + + " 4 virtual CPU(s)\n" + + " 1\n" + + " 20000\n" + + " 1000\n" + + " 3\n" + + " 4\n" + + " \n" + + " \n" + + " byte * 2^20\n" + + " Memory Size\n" + + " 2048MB of memory\n" + + " 2\n" + + " 2048\n" + + " 2048\n" + + " 4\n" + + " 2048\n" + + " \n" + + " \n" + + " byte * 2^20\n" + + " Memory Size\n" + + " 8192MB of memory\n" + + " 2\n" + + " 8192\n" + + " 8192\n" + + " 4\n" + + " 8192\n" + + " \n" + + " \n" + + " 0\n" + + " SCSI Controller\n" + + " SCSI controller 0\n" + + " 3\n" + + " lsilogic\n" + + " 6\n" + + " \n" + + " \n" + + " 0\n" + + " IDE Controller\n" + + " IDE 0\n" + + " 4\n" + + " 5\n" + + " \n" + + " \n" + + " 0\n" + + " true\n" + + " CD/DVD Drive\n" + + " 5\n" + + " 4\n" + + " 15\n" + + " \n" + + " \n" + + " 1\n" + + " true\n" + + " CD/DVD Drive\n" + + " ovf:/file/file3\n" + + " 18\n" + + " 4\n" + + " 15\n" + + " \n" + + " \n" + + " 7\n" + + " true\n" + + " Management0-0\n" + + " E1000 Ethernet adapter on \"Management Network\"\n" + + " Network adapter 1\n" + + " 6\n" + + " E1000\n" + + " 10\n" + + " \n" + + " \n" + + " 0\n" + + " Hard disk 1\n" + + " ovf:/disk/vmdisk1\n" + + " 7\n" + + " 3\n" + + " 17\n" + + " \n" + + " \n" + + " 1\n" + + " Hard disk 2\n" + + " ovf:/disk/vmdisk2\n" + + " 8\n" + + " 3\n" + + " 17\n" + + " \n" + + " \n" + + " 8\n" + + " true\n" + + " GigabitEthernet0-0\n" + + " General purpose E1000 Ethernet adapter\n" + + " Network adapter 2\n" + + " 9\n" + + " E1000\n" + + " 10\n" + + " \n" + + " \n" + + " 9\n" + + " true\n" + + " GigabitEthernet0-1\n" + + " General purpose E1000 Ethernet adapter\n" + + " Network adapter 3\n" + + " 10\n" + + " E1000\n" + + " 10\n" + + " \n" + + " \n" + + " 10\n" + + " true\n" + + " GigabitEthernet0-2\n" + + " General purpose E1000 Ethernet adapter\n" + + " Network adapter 4\n" + + " 11\n" + + " E1000\n" + + " 10\n" + + " \n" + + " \n" + + " 11\n" + + " true\n" + + " GigabitEthernet0-3\n" + + " General purpose E1000 Ethernet adapter\n" + + " Network adapter 5\n" + + " 12\n" + + " E1000\n" + + " 10\n" + + " \n" + + " \n" + + " 12\n" + + " true\n" + + " GigabitEthernet0-4\n" + + " General purpose E1000 Ethernet adapter\n" + + " Network adapter 6\n" + + " 13\n" + + " E1000\n" + + " 10\n" + + " \n" + + " \n" + + " 13\n" + + " true\n" + + " GigabitEthernet0-5\n" + + " General purpose E1000 Ethernet adapter\n" + + " Network adapter 7\n" + + " 14\n" + + " E1000\n" + + " 10\n" + + " \n" + + " \n" + + " 14\n" + + " true\n" + + " GigabitEthernet0-6\n" + + " General purpose E1000 Ethernet adapter\n" + + " Network adapter 8\n" + + " 15\n" + + " E1000\n" + + " 10\n" + + " \n" + + " \n" + + " 15\n" + + " true\n" + + " GigabitEthernet0-7\n" + + " General purpose E1000 Ethernet adapter\n" + + " Network adapter 9\n" + + " 16\n" + + " E1000\n" + + " 10\n" + + " \n" + + " \n" + + " 16\n" + + " true\n" + + " GigabitEthernet0-8\n" + + " Default HA failover E1000 Ethernet adapter, or additional standalone general purpose adapter\n" + + " Network adapter 10\n" + + " 17\n" + + " E1000\n" + + " 10\n" + + " \n" + + " \n" + + " "; + private OVFHelper ovfHelper = new OVFHelper(); @Test public void testGetOVFPropertiesValidOVF() throws IOException, SAXException, ParserConfigurationException { - List props = ovfHelper.getOVFPropertiesXmlString(ovfFileProductSection); + List props = ovfHelper.getOVFPropertiesFromXmlString(ovfFileProductSection); Assert.assertEquals(2, props.size()); } @Test(expected = SAXParseException.class) public void testGetOVFPropertiesInvalidOVF() throws IOException, SAXException, ParserConfigurationException { - ovfHelper.getOVFPropertiesXmlString(ovfFileProductSection + "xxxxxxxxxxxxxxxxx"); + ovfHelper.getOVFPropertiesFromXmlString(ovfFileProductSection + "xxxxxxxxxxxxxxxxx"); + } + + @Test + public void testGetOVFDeploymentOptionsValidOVF() throws IOException, SAXException, ParserConfigurationException { + List options = ovfHelper.getOVFDeploymentOptionsFromXmlString(ovfFileDeploymentOptionsSection); + Assert.assertEquals(3, options.size()); + } + + @Test + public void testGetOVFVirtualHardwareSectionValidOVF() throws IOException, SAXException, ParserConfigurationException { + List items = ovfHelper.getOVFVirtualHardwareSectionFromXmlString(ovfFileVirtualHardwareSection); + Assert.assertEquals(20, items.size()); } } diff --git a/api/src/test/java/com/cloud/storage/StorageTest.java b/api/src/test/java/com/cloud/storage/StorageTest.java index 332a8060d08d..61909e72e96e 100644 --- a/api/src/test/java/com/cloud/storage/StorageTest.java +++ b/api/src/test/java/com/cloud/storage/StorageTest.java @@ -45,6 +45,7 @@ public void isSharedStoragePool() { Assert.assertTrue(StoragePoolType.SMB.isShared()); Assert.assertTrue(StoragePoolType.Gluster.isShared()); Assert.assertTrue(StoragePoolType.ManagedNFS.isShared()); + Assert.assertTrue(StoragePoolType.DatastoreCluster.isShared()); } @Test @@ -66,5 +67,6 @@ public void supportsOverprovisioningStoragePool() { Assert.assertFalse(StoragePoolType.SMB.supportsOverProvisioning()); Assert.assertFalse(StoragePoolType.Gluster.supportsOverProvisioning()); Assert.assertFalse(StoragePoolType.ManagedNFS.supportsOverProvisioning()); + Assert.assertTrue(StoragePoolType.DatastoreCluster.supportsOverProvisioning()); } } diff --git a/check_diff b/check_diff new file mode 100644 index 000000000000..78b27ecd7559 --- /dev/null +++ b/check_diff @@ -0,0 +1,103 @@ +diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +index 47d3013f22..2dbf257fb2 100644 +--- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java ++++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +@@ -5551,11 +5551,11 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir + throw new InvalidParameterValueException("Unable to find the VM by id=" + vmId); + } + +- if (vm.getState() != State.Stopped) { +- InvalidParameterValueException ex = new InvalidParameterValueException("VM is not Stopped, unable to migrate the vm having the specified id"); +- ex.addProxyObject(vm.getUuid(), "vmId"); +- throw ex; +- } ++// if (vm.getState() != State.Stopped) { ++// InvalidParameterValueException ex = new InvalidParameterValueException("VM is not Stopped, unable to migrate the vm having the specified id"); ++// ex.addProxyObject(vm.getUuid(), "vmId"); ++// throw ex; ++// } + + if (vm.getType() != VirtualMachine.Type.User) { + // OffLineVmwareMigration: *WHY* ? +@@ -5634,14 +5634,14 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir + throw new InvalidParameterValueException("Unable to find the VM by id=" + vmId); + } + // business logic +- if (vm.getState() != State.Running) { +- if (s_logger.isDebugEnabled()) { +- s_logger.debug("VM is not Running, unable to migrate the vm " + vm); +- } +- InvalidParameterValueException ex = new InvalidParameterValueException("VM is not Running, unable to migrate the vm with specified id"); +- ex.addProxyObject(vm.getUuid(), "vmId"); +- throw ex; +- } ++// if (vm.getState() != State.Running) { ++// if (s_logger.isDebugEnabled()) { ++// s_logger.debug("VM is not Running, unable to migrate the vm " + vm); ++// } ++// InvalidParameterValueException ex = new InvalidParameterValueException("VM is not Running, unable to migrate the vm with specified id"); ++// ex.addProxyObject(vm.getUuid(), "vmId"); ++// throw ex; ++// } + + checkIfHostOfVMIsInPrepareForMaintenanceState(vm.getHostId(), vmId, "Migrate"); + +@@ -5987,15 +5987,15 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir + } + + // OfflineVmwareMigration: this would be it ;) if multiple paths exist: unify +- if (vm.getState() != State.Running) { +- // OfflineVmwareMigration: and not vmware +- if (s_logger.isDebugEnabled()) { +- s_logger.debug("VM is not Running, unable to migrate the vm " + vm); +- } +- CloudRuntimeException ex = new CloudRuntimeException("VM is not Running, unable to migrate the vm with" + " specified id"); +- ex.addProxyObject(vm.getUuid(), "vmId"); +- throw ex; +- } ++// if (vm.getState() != State.Running) { ++// // OfflineVmwareMigration: and not vmware ++// if (s_logger.isDebugEnabled()) { ++// s_logger.debug("VM is not Running, unable to migrate the vm " + vm); ++// } ++// CloudRuntimeException ex = new CloudRuntimeException("VM is not Running, unable to migrate the vm with" + " specified id"); ++// ex.addProxyObject(vm.getUuid(), "vmId"); ++// throw ex; ++// } + + if(serviceOfferingDetailsDao.findDetail(vm.getServiceOfferingId(), GPU.Keys.pciDevice.toString()) != null) { + throw new InvalidParameterValueException("Live Migration of GPU enabled VM is not supported"); +diff --git a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java +index 62ecc9a5d5..36e102cb20 100644 +--- a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java ++++ b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java +@@ -34,8 +34,6 @@ import java.util.concurrent.ExecutorService; + import java.util.concurrent.Executors; + import java.util.concurrent.Future; + +-import com.vmware.vim25.VStorageObject; +-import com.vmware.vim25.VStorageObjectConfigInfo; + import org.apache.commons.collections.CollectionUtils; + import org.apache.log4j.Logger; + import org.apache.commons.lang.StringUtils; +@@ -2430,13 +2428,13 @@ public class VirtualMachineMO extends BaseMO { + String deviceNumbering = getDeviceBusName(devices, device); + + s_logger.info("Disk backing : " + diskBackingInfo.getFileName() + " matches ==> " + deviceNumbering); +- if (((VirtualDisk) device).getVDiskId() == null) { +- s_logger.debug("vDiskid does not exist for volume " + vmdkDatastorePath + " registering the disk now"); +- VirtualStorageObjectManagerMO vStorageObjectManagerMO = new VirtualStorageObjectManagerMO(getOwnerDatacenter().first().getContext()); +- VStorageObject vStorageObject = vStorageObjectManagerMO.registerVirtualDisk(dsBackingFile, null, getOwnerDatacenter().first().getName()); +- VStorageObjectConfigInfo diskConfigInfo = vStorageObject.getConfig(); +- ((VirtualDisk) device).setVDiskId(diskConfigInfo.getId()); +- } ++// if (((VirtualDisk) device).getVDiskId() == null) { ++// s_logger.debug("vDiskid does not exist for volume " + vmdkDatastorePath + " registering the disk now"); ++// VirtualStorageObjectManagerMO vStorageObjectManagerMO = new VirtualStorageObjectManagerMO(getOwnerDatacenter().first().getContext()); ++// VStorageObject vStorageObject = vStorageObjectManagerMO.registerVirtualDisk(dsBackingFile, null, getOwnerDatacenter().first().getName()); ++// VStorageObjectConfigInfo diskConfigInfo = vStorageObject.getConfig(); ++// ((VirtualDisk) device).setVDiskId(diskConfigInfo.getId()); ++// } + return new Pair<>((VirtualDisk)device, deviceNumbering); + } + diff --git a/core/src/main/java/com/cloud/agent/api/ModifyStoragePoolAnswer.java b/core/src/main/java/com/cloud/agent/api/ModifyStoragePoolAnswer.java index 6e6dadc67f7a..f7a7e1ced067 100644 --- a/core/src/main/java/com/cloud/agent/api/ModifyStoragePoolAnswer.java +++ b/core/src/main/java/com/cloud/agent/api/ModifyStoragePoolAnswer.java @@ -19,6 +19,7 @@ package com.cloud.agent.api; +import java.util.List; import java.util.Map; import com.cloud.storage.template.TemplateProp; @@ -27,6 +28,8 @@ public class ModifyStoragePoolAnswer extends Answer { private StoragePoolInfo poolInfo; private Map templateInfo; private String localDatastoreName; + private String poolType; + private List datastoreClusterChildren; public ModifyStoragePoolAnswer(ModifyStoragePoolCommand cmd, long capacityBytes, long availableBytes, Map tInfo) { super(cmd); @@ -61,4 +64,20 @@ public void setLocalDatastoreName(String localDatastoreName) { public String getLocalDatastoreName() { return localDatastoreName; } + + public String getPoolType() { + return poolType; + } + + public void setPoolType(String poolType) { + this.poolType = poolType; + } + + public List getDatastoreClusterChildren() { + return datastoreClusterChildren; + } + + public void setDatastoreClusterChildren(List datastoreClusterChildren) { + this.datastoreClusterChildren = datastoreClusterChildren; + } } diff --git a/core/src/main/java/com/cloud/agent/api/storage/DownloadAnswer.java b/core/src/main/java/com/cloud/agent/api/storage/DownloadAnswer.java index 9859c3f83d01..bf77b227c5de 100644 --- a/core/src/main/java/com/cloud/agent/api/storage/DownloadAnswer.java +++ b/core/src/main/java/com/cloud/agent/api/storage/DownloadAnswer.java @@ -25,8 +25,10 @@ import com.cloud.agent.api.Answer; import com.cloud.agent.api.Command; import com.cloud.agent.api.LogLevel; +import com.cloud.agent.api.to.DatadiskTO; import com.cloud.storage.VMTemplateStorageResourceAssoc; import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; +import org.apache.cloudstack.api.net.NetworkPrerequisiteTO; public class DownloadAnswer extends Answer { private String jobId; @@ -38,8 +40,15 @@ public class DownloadAnswer extends Answer { private long templateSize = 0L; private long templatePhySicalSize = 0L; private String checkSum; + @LogLevel(LogLevel.Log4jLevel.Off) private List ovfProperties; + @LogLevel(LogLevel.Log4jLevel.Off) + private List networkRequirements; + @LogLevel(LogLevel.Log4jLevel.Off) + private List disks; + @LogLevel(LogLevel.Log4jLevel.Off) + private OVFVirtualHardwareSectionTO ovfHardwareSection; public String getCheckSum() { return checkSum; @@ -157,4 +166,28 @@ public List getOvfProperties() { public void setOvfProperties(List ovfProperties) { this.ovfProperties = ovfProperties; } + + public List getNetworkRequirements() { + return networkRequirements; + } + + public void setNetworkRequirements(List networkRequirements) { + this.networkRequirements = networkRequirements; + } + + public List getDisks() { + return disks; + } + + public void setDisks(List disks) { + this.disks = disks; + } + + public OVFVirtualHardwareSectionTO getOvfHardwareSection() { + return ovfHardwareSection; + } + + public void setOvfHardwareSection(OVFVirtualHardwareSectionTO ovfHardwareSection) { + this.ovfHardwareSection = ovfHardwareSection; + } } diff --git a/core/src/main/java/com/cloud/resource/ServerResource.java b/core/src/main/java/com/cloud/resource/ServerResource.java index 9030db72f00b..6aa04ea8d7bc 100644 --- a/core/src/main/java/com/cloud/resource/ServerResource.java +++ b/core/src/main/java/com/cloud/resource/ServerResource.java @@ -31,6 +31,8 @@ * ServerResource is a generic container to execute commands sent */ public interface ServerResource extends Manager { + String ORIGINAL_FILE_EXTENSION = ".orig"; + /** * @return Host.Type type of the computing server we have. */ diff --git a/core/src/main/java/com/cloud/storage/resource/StorageProcessor.java b/core/src/main/java/com/cloud/storage/resource/StorageProcessor.java index f940e22f45a1..c81d492d8cdc 100644 --- a/core/src/main/java/com/cloud/storage/resource/StorageProcessor.java +++ b/core/src/main/java/com/cloud/storage/resource/StorageProcessor.java @@ -21,6 +21,7 @@ import org.apache.cloudstack.agent.directdownload.DirectDownloadCommand; import org.apache.cloudstack.storage.command.AttachCommand; +import org.apache.cloudstack.storage.command.CheckDataStoreStoragePolicyComplainceCommand; import org.apache.cloudstack.storage.command.CopyCommand; import org.apache.cloudstack.storage.command.CreateObjectCommand; import org.apache.cloudstack.storage.command.DeleteCommand; @@ -33,6 +34,9 @@ import com.cloud.agent.api.Answer; public interface StorageProcessor { + String REQUEST_TEMPLATE_RELOAD = "request template reload"; + String COPY_NOT_NEEDED_FOR_DEPLOY_AS_IS = "copy volume not needed for deploy as is"; + public Answer copyTemplateToPrimaryStorage(CopyCommand cmd); public Answer cloneVolumeFromBaseTemplate(CopyCommand cmd); @@ -76,4 +80,6 @@ public interface StorageProcessor { public Answer handleDownloadTemplateToPrimaryStorage(DirectDownloadCommand cmd); Answer copyVolumeFromPrimaryToPrimary(CopyCommand cmd); + + public Answer CheckDataStoreStoragePolicyComplaince(CheckDataStoreStoragePolicyComplainceCommand cmd); } diff --git a/core/src/main/java/com/cloud/storage/resource/StorageSubsystemCommandHandlerBase.java b/core/src/main/java/com/cloud/storage/resource/StorageSubsystemCommandHandlerBase.java index 17b9b700d6c0..c9212a19b734 100644 --- a/core/src/main/java/com/cloud/storage/resource/StorageSubsystemCommandHandlerBase.java +++ b/core/src/main/java/com/cloud/storage/resource/StorageSubsystemCommandHandlerBase.java @@ -21,6 +21,7 @@ import org.apache.cloudstack.agent.directdownload.DirectDownloadCommand; import org.apache.cloudstack.storage.to.VolumeObjectTO; +import org.apache.cloudstack.storage.command.CheckDataStoreStoragePolicyComplainceCommand; import org.apache.log4j.Logger; import org.apache.cloudstack.storage.command.AttachCommand; @@ -71,6 +72,8 @@ public Answer handleStorageCommands(StorageSubSystemCommand command) { return processor.resignature((ResignatureCommand) command); } else if (command instanceof DirectDownloadCommand) { return processor.handleDownloadTemplateToPrimaryStorage((DirectDownloadCommand) command); + } else if (command instanceof CheckDataStoreStoragePolicyComplainceCommand) { + return processor.CheckDataStoreStoragePolicyComplaince((CheckDataStoreStoragePolicyComplainceCommand) command); } return new Answer((Command)command, false, "not implemented yet"); @@ -89,7 +92,7 @@ protected Answer execute(CopyCommand cmd) { return processor.copyTemplateToPrimaryStorage(cmd); } else if (srcData.getObjectType() == DataObjectType.TEMPLATE && srcDataStore.getRole() == DataStoreRole.Primary && destDataStore.getRole() == DataStoreRole.Primary) { - //clone template to a volume + // FR37 pretend to clone template to a volume but actually create a cloned vm return processor.cloneVolumeFromBaseTemplate(cmd); } else if (srcData.getObjectType() == DataObjectType.VOLUME && (srcData.getDataStore().getRole() == DataStoreRole.ImageCache || srcDataStore.getRole() == DataStoreRole.Image)) { diff --git a/core/src/main/java/com/cloud/storage/template/OVAProcessor.java b/core/src/main/java/com/cloud/storage/template/OVAProcessor.java index d771c67acec6..e110a91f8c90 100644 --- a/core/src/main/java/com/cloud/storage/template/OVAProcessor.java +++ b/core/src/main/java/com/cloud/storage/template/OVAProcessor.java @@ -20,13 +20,19 @@ package com.cloud.storage.template; import java.io.File; +import java.io.IOException; import java.util.List; import java.util.Map; import javax.naming.ConfigurationException; import javax.xml.parsers.DocumentBuilderFactory; +import javax.xml.parsers.ParserConfigurationException; +import com.cloud.agent.api.storage.OVFConfigurationTO; import com.cloud.agent.api.storage.OVFPropertyTO; +import com.cloud.agent.api.storage.OVFVirtualHardwareItemTO; +import com.cloud.agent.api.storage.OVFVirtualHardwareSectionTO; +import org.apache.cloudstack.api.net.NetworkPrerequisiteTO; import org.apache.commons.collections.CollectionUtils; import org.apache.log4j.Logger; import org.w3c.dom.Document; @@ -41,9 +47,13 @@ import com.cloud.utils.Pair; import com.cloud.utils.component.AdapterBase; import com.cloud.utils.script.Script; +import org.xml.sax.SAXException; +/** + * processes the content of an OVA for registration of a template + */ public class OVAProcessor extends AdapterBase implements Processor { - private static final Logger s_logger = Logger.getLogger(OVAProcessor.class); + private static final Logger LOGGER = Logger.getLogger(OVAProcessor.class); StorageLayer _storage; @Override @@ -53,73 +63,140 @@ public FormatInfo process(String templatePath, ImageFormat format, String templa @Override public FormatInfo process(String templatePath, ImageFormat format, String templateName, long processTimeout) throws InternalErrorException { - if (format != null) { - if (s_logger.isInfoEnabled()) { - s_logger.info("We currently don't handle conversion from " + format + " to OVA."); - } + if (! conversionChecks(format)){ return null; } - s_logger.info("Template processing. templatePath: " + templatePath + ", templateName: " + templateName); + LOGGER.info("Template processing. templatePath: " + templatePath + ", templateName: " + templateName); String templateFilePath = templatePath + File.separator + templateName + "." + ImageFormat.OVA.getFileExtension(); if (!_storage.exists(templateFilePath)) { - if (s_logger.isInfoEnabled()) { - s_logger.info("Unable to find the vmware template file: " + templateFilePath); + if (LOGGER.isInfoEnabled()) { + LOGGER.info("Unable to find the vmware template file: " + templateFilePath); } return null; } - s_logger.info("Template processing - untar OVA package. templatePath: " + templatePath + ", templateName: " + templateName); - String templateFileFullPath = templatePath + File.separator + templateName + "." + ImageFormat.OVA.getFileExtension(); - File templateFile = new File(templateFileFullPath); - Script command = new Script("tar", processTimeout, s_logger); - command.add("--no-same-owner"); - command.add("--no-same-permissions"); - command.add("-xf", templateFileFullPath); - command.setWorkDir(templateFile.getParent()); - String result = command.execute(); - if (result != null) { - s_logger.info("failed to untar OVA package due to " + result + ". templatePath: " + templatePath + ", templateName: " + templateName); - throw new InternalErrorException("failed to untar OVA package"); + String templateFileFullPath = unpackOva(templatePath, templateName, processTimeout); + + setFileSystemAccessRights(templatePath); + + FormatInfo info = createFormatInfo(templatePath, templateName, templateFilePath, templateFileFullPath); + + // The intention is to use the ova file as is for deployment and use processing result only for + // - property assessment and + // - reconsiliation of + // - - disks, + // - - networks and + // - - compute dimensions. + return info; + } + + private FormatInfo createFormatInfo(String templatePath, String templateName, String templateFilePath, String templateFileFullPath) throws InternalErrorException { + FormatInfo info = new FormatInfo(); + info.format = ImageFormat.OVA; + info.filename = templateName + "." + ImageFormat.OVA.getFileExtension(); + info.size = _storage.getSize(templateFilePath); + info.virtualSize = getTemplateVirtualSize(templatePath, info.filename); + validateOva(templateFileFullPath, info); + + return info; + } + + /** + * side effect; properties are added to the info + * + * @throws InternalErrorException on an invalid ova contents + */ + private void validateOva(String templateFileFullPath, FormatInfo info) throws InternalErrorException { + String ovfFilePath = getOVFFilePath(templateFileFullPath); + OVFHelper ovfHelper = new OVFHelper(); + Document doc = ovfHelper.getDocumentFromFile(ovfFilePath); + + List disks = ovfHelper.getOVFVolumeInfoFromFile(ovfFilePath, doc); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("Found %d disks in template %s", CollectionUtils.isNotEmpty(disks) ? disks.size() : 0, ovfFilePath)); + } + if (CollectionUtils.isNotEmpty(disks)) { + info.disks = disks; + } + + List nets = ovfHelper.getNetPrerequisitesFromDocument(doc); + if (CollectionUtils.isNotEmpty(nets)) { + LOGGER.info("Found " + nets.size() + " prerequisite networks"); + info.networks = nets; + } else if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("no net prerequisites found in template %s", ovfFilePath)); + } + + List ovfProperties = ovfHelper.getConfigurableOVFPropertiesFromDocument(doc); + if (CollectionUtils.isNotEmpty(ovfProperties)) { + LOGGER.info("Found " + ovfProperties.size() + " configurable OVF properties"); + info.ovfProperties = ovfProperties; + } else if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("no ovf properties found in template %s", ovfFilePath)); } - command = new Script("chmod", 0, s_logger); + OVFVirtualHardwareSectionTO hardwareSection = ovfHelper.getVirtualHardwareSectionFromDocument(doc); + List configurations = hardwareSection.getConfigurations(); + if (CollectionUtils.isNotEmpty(configurations)) { + LOGGER.info("Found " + configurations.size() + " deployment option configurations"); + } + List hardwareItems = hardwareSection.getCommonHardwareItems(); + if (CollectionUtils.isNotEmpty(hardwareItems)) { + LOGGER.info("Found " + hardwareItems.size() + " virtual hardware items"); + } + info.hardwareSection = hardwareSection; + + // FR37 TODO add any user queries that are required for this OVA + } + + private void setFileSystemAccessRights(String templatePath) { + Script command; + String result; + + command = new Script("chmod", 0, LOGGER); command.add("-R"); command.add("666", templatePath); result = command.execute(); if (result != null) { - s_logger.warn("Unable to set permissions for files in " + templatePath + " due to " + result); + LOGGER.warn("Unable to set permissions for files in " + templatePath + " due to " + result); } - command = new Script("chmod", 0, s_logger); + command = new Script("chmod", 0, LOGGER); command.add("777", templatePath); result = command.execute(); if (result != null) { - s_logger.warn("Unable to set permissions for " + templatePath + " due to " + result); + LOGGER.warn("Unable to set permissions for " + templatePath + " due to " + result); } + } - FormatInfo info = new FormatInfo(); - info.format = ImageFormat.OVA; - info.filename = templateName + "." + ImageFormat.OVA.getFileExtension(); - info.size = _storage.getSize(templateFilePath); - info.virtualSize = getTemplateVirtualSize(templatePath, info.filename); + private String unpackOva(String templatePath, String templateName, long processTimeout) throws InternalErrorException { + LOGGER.info("Template processing - untar OVA package. templatePath: " + templatePath + ", templateName: " + templateName); + String templateFileFullPath = templatePath + File.separator + templateName + "." + ImageFormat.OVA.getFileExtension(); + File templateFile = new File(templateFileFullPath); + Script command = new Script("tar", processTimeout, LOGGER); + command.add("--no-same-owner"); + command.add("--no-same-permissions"); + command.add("-xf", templateFileFullPath); + command.setWorkDir(templateFile.getParent()); + String result = command.execute(); + if (result != null) { + LOGGER.info("failed to untar OVA package due to " + result + ". templatePath: " + templatePath + ", templateName: " + templateName); + throw new InternalErrorException("failed to untar OVA package"); + } + return templateFileFullPath; + } - //vaidate ova - String ovfFile = getOVFFilePath(templateFileFullPath); - try { - OVFHelper ovfHelper = new OVFHelper(); - List disks = ovfHelper.getOVFVolumeInfo(ovfFile); - List ovfProperties = ovfHelper.getOVFPropertiesFromFile(ovfFile); - if (CollectionUtils.isNotEmpty(ovfProperties)) { - s_logger.info("Found " + ovfProperties.size() + " configurable OVF properties"); - info.ovfProperties = ovfProperties; + private boolean conversionChecks(ImageFormat format) { + if (format != null) { + if (LOGGER.isInfoEnabled()) { + LOGGER.info("We currently don't handle conversion from " + format + " to OVA."); } - } catch (Exception e) { - s_logger.info("The ovf file " + ovfFile + " is invalid ", e); - throw new InternalErrorException("OVA package has bad ovf file " + e.getMessage(), e); + return false; } - // delete original OVA file - // templateFile.delete(); - return info; + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("We are handling format " + format + "."); + } + return true; } @Override @@ -128,34 +205,43 @@ public long getVirtualSize(File file) { long size = getTemplateVirtualSize(file.getParent(), file.getName()); return size; } catch (Exception e) { - s_logger.info("[ignored]" + LOGGER.info("[ignored]" + "failed to get virtual template size for ova: " + e.getLocalizedMessage()); } return file.length(); } + /** + * gets the virtual size from the OVF file meta data. + * + * @return the accumulative virtual size of the disk definitions in the OVF + * @throws InternalErrorException + */ public long getTemplateVirtualSize(String templatePath, String templateName) throws InternalErrorException { - // get the virtual size from the OVF file meta data long virtualSize = 0; String templateFileFullPath = templatePath.endsWith(File.separator) ? templatePath : templatePath + File.separator; templateFileFullPath += templateName.endsWith(ImageFormat.OVA.getFileExtension()) ? templateName : templateName + "." + ImageFormat.OVA.getFileExtension(); String ovfFileName = getOVFFilePath(templateFileFullPath); if (ovfFileName == null) { String msg = "Unable to locate OVF file in template package directory: " + templatePath; - s_logger.error(msg); + LOGGER.error(msg); throw new InternalErrorException(msg); } try { Document ovfDoc = null; ovfDoc = DocumentBuilderFactory.newInstance().newDocumentBuilder().parse(new File(ovfFileName)); - Element disk = (Element)ovfDoc.getElementsByTagName("Disk").item(0); - virtualSize = Long.parseLong(disk.getAttribute("ovf:capacity")); - String allocationUnits = disk.getAttribute("ovf:capacityAllocationUnits"); - virtualSize = OVFHelper.getDiskVirtualSize(virtualSize, allocationUnits, ovfFileName); + NodeList diskElements = ovfDoc.getElementsByTagName("Disk"); + for (int i = 0; i < diskElements.getLength(); i++) { + Element disk = (Element)diskElements.item(i); + long diskSize = Long.parseLong(disk.getAttribute("ovf:capacity")); + String allocationUnits = disk.getAttribute("ovf:capacityAllocationUnits"); + diskSize = OVFHelper.getDiskVirtualSize(diskSize, allocationUnits, ovfFileName); + virtualSize += diskSize; + } return virtualSize; - } catch (Exception e) { + } catch (InternalErrorException | IOException | NumberFormatException | ParserConfigurationException | SAXException e) { String msg = "getTemplateVirtualSize: Unable to parse OVF XML document " + templatePath + " to get the virtual disk " + templateName + " size due to " + e; - s_logger.error(msg); + LOGGER.error(msg); throw new InternalErrorException(msg); } } @@ -187,9 +273,9 @@ public Pair getDiskDetails(String ovfFilePath, String diskName) thro } } return new Pair(virtualSize, fileSize); - } catch (Exception e) { + } catch (InternalErrorException | IOException | NumberFormatException | ParserConfigurationException | SAXException e) { String msg = "getDiskDetails: Unable to parse OVF XML document " + ovfFilePath + " to get the virtual disk " + diskName + " size due to " + e; - s_logger.error(msg); + LOGGER.error(msg); throw new InternalErrorException(msg); } } diff --git a/core/src/main/java/com/cloud/storage/template/Processor.java b/core/src/main/java/com/cloud/storage/template/Processor.java index 4bb714a7ab9d..d17d371bc9ed 100644 --- a/core/src/main/java/com/cloud/storage/template/Processor.java +++ b/core/src/main/java/com/cloud/storage/template/Processor.java @@ -24,9 +24,12 @@ import java.util.List; import com.cloud.agent.api.storage.OVFPropertyTO; +import com.cloud.agent.api.storage.OVFVirtualHardwareSectionTO; +import com.cloud.agent.api.to.DatadiskTO; import com.cloud.exception.InternalErrorException; import com.cloud.storage.Storage.ImageFormat; import com.cloud.utils.component.Adapter; +import org.apache.cloudstack.api.net.NetworkPrerequisiteTO; /** * Generic interface to process different types of image formats @@ -48,13 +51,16 @@ public interface Processor extends Adapter { FormatInfo process(String templatePath, ImageFormat format, String templateName, long processTimeout) throws InternalErrorException; - public static class FormatInfo { + class FormatInfo { public ImageFormat format; public long size; public long virtualSize; public String filename; public boolean isCorrupted; public List ovfProperties; + public List networks; + public List disks; + public OVFVirtualHardwareSectionTO hardwareSection; } long getVirtualSize(File file) throws IOException; diff --git a/core/src/main/java/org/apache/cloudstack/storage/command/CheckDataStoreStoragePolicyComplainceCommand.java b/core/src/main/java/org/apache/cloudstack/storage/command/CheckDataStoreStoragePolicyComplainceCommand.java new file mode 100644 index 000000000000..f9544b873ef2 --- /dev/null +++ b/core/src/main/java/org/apache/cloudstack/storage/command/CheckDataStoreStoragePolicyComplainceCommand.java @@ -0,0 +1,61 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package org.apache.cloudstack.storage.command; + +import com.cloud.agent.api.to.StorageFilerTO; + +public class CheckDataStoreStoragePolicyComplainceCommand extends StorageSubSystemCommand { + + String storagePolicyId; + private StorageFilerTO storagePool; + + public CheckDataStoreStoragePolicyComplainceCommand(String storagePolicyId, StorageFilerTO storagePool) { + super(); + + this.storagePolicyId = storagePolicyId; + this.storagePool = storagePool; + } + + @Override + public void setExecuteInSequence(boolean inSeq) { + } + + @Override + public boolean executeInSequence() { + return false; + } + + + public String getStoragePolicyId() { + return storagePolicyId; + } + + public void setStoragePolicyId(String storagePolicyId) { + this.storagePolicyId = storagePolicyId; + } + + public StorageFilerTO getStoragePool() { + return storagePool; + } + + public void setStoragePool(StorageFilerTO storagePool) { + this.storagePool = storagePool; + } +} diff --git a/core/src/main/java/org/apache/cloudstack/storage/to/TemplateObjectTO.java b/core/src/main/java/org/apache/cloudstack/storage/to/TemplateObjectTO.java index cc2eaadea073..16a5e4ccecd1 100644 --- a/core/src/main/java/org/apache/cloudstack/storage/to/TemplateObjectTO.java +++ b/core/src/main/java/org/apache/cloudstack/storage/to/TemplateObjectTO.java @@ -47,6 +47,7 @@ public class TemplateObjectTO implements DataTO { private boolean bootable; private String uniqueName; private boolean directDownload; + private boolean deployAsIs; public TemplateObjectTO() { @@ -82,6 +83,7 @@ public TemplateObjectTO(TemplateInfo template) { this.imageDataStore = template.getDataStore().getTO(); } this.hypervisorType = template.getHypervisorType(); + this.deployAsIs = template.isDeployAsIs(); } @Override @@ -244,6 +246,14 @@ public void setDirectDownload(boolean directDownload) { this.directDownload = directDownload; } + public boolean isDeployAsIs() { + return deployAsIs; + } + + public void setDeployAsIs(boolean deployAsIs) { + this.deployAsIs = deployAsIs; + } + @Override public String toString() { return new StringBuilder("TemplateTO[id=").append(id).append("|origUrl=").append(origUrl).append("|name").append(name).append("]").toString(); diff --git a/core/src/test/java/com/cloud/agent/api/storage/DownloadAnswerTest.java b/core/src/test/java/com/cloud/agent/api/storage/DownloadAnswerTest.java new file mode 100644 index 000000000000..62bb3d65c83f --- /dev/null +++ b/core/src/test/java/com/cloud/agent/api/storage/DownloadAnswerTest.java @@ -0,0 +1,58 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.agent.api.storage; + +import com.cloud.agent.api.Answer; +import com.cloud.serializer.GsonHelper; +import com.cloud.storage.VMTemplateStorageResourceAssoc; +import com.google.gson.Gson; +import org.apache.cloudstack.api.net.NetworkPrerequisiteTO; +import org.junit.Assert; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; + +public class DownloadAnswerTest { + Gson gson = GsonHelper.getGson(); + + VMTemplateStorageResourceAssoc.Status status = VMTemplateStorageResourceAssoc.Status.DOWNLOADED; + DownloadAnswer answer = new DownloadAnswer("nothin wrong", status); + + @Test + public void redeserialise () + { + String json = gson.toJson(answer); + DownloadAnswer received = gson.fromJson(json, DownloadAnswer.class); + Assert.assertEquals(received,answer); + } + @Test + public void properties () + { + List properties = new ArrayList<>(); + properties.add(new OVFPropertyTO()); + List networks = new ArrayList<>(); + networks.add(new NetworkPrerequisiteTO()); + + answer.setOvfProperties(properties); + answer.setNetworkRequirements(networks); + + String json = gson.toJson(answer); + Answer received = gson.fromJson(json, Answer.class); + Assert.assertEquals(received,answer); + } +} \ No newline at end of file diff --git a/core/src/test/java/com/cloud/agent/transport/RequestTest.java b/core/src/test/java/com/cloud/agent/transport/RequestTest.java index 21766ba038fa..7fc1754126f3 100644 --- a/core/src/test/java/com/cloud/agent/transport/RequestTest.java +++ b/core/src/test/java/com/cloud/agent/transport/RequestTest.java @@ -56,7 +56,6 @@ * * */ - public class RequestTest extends TestCase { private static final Logger s_logger = Logger.getLogger(RequestTest.class); diff --git a/deps/install-non-oss.sh b/deps/install-non-oss.sh index a387050d5390..d73f8c5d6b87 100755 --- a/deps/install-non-oss.sh +++ b/deps/install-non-oss.sh @@ -39,3 +39,18 @@ mvn install:install-file -Dfile=vim25_65.jar -DgroupId=com.cloud.com.vmwa # From https://my.vmware.com/group/vmware/details?downloadGroup=WEBCLIENTSDK67U2&productId=742 mvn install:install-file -Dfile=vim25_67.jar -DgroupId=com.cloud.com.vmware -DartifactId=vmware-vim25 -Dversion=6.7 -Dpackaging=jar + +# From https://my.vmware.com/group/vmware/get-download?downloadGroup=VS-MGMT-SDK65 +mvn install:install-file -Dfile=pbm_65.jar -DgroupId=com.cloud.com.vmware -DartifactId=vmware-pbm -Dversion=6.5 -Dpackaging=jar + +# From https://my.vmware.com/group/vmware/details?downloadGroup=WEBCLIENTSDK67U2&productId=742 +mvn install:install-file -Dfile=pbm_67.jar -DgroupId=com.cloud.com.vmware -DartifactId=vmware-pbm -Dversion=6.7 -Dpackaging=jar + +# From https://github.com/vmware/vsphere-automation-sdk-java/tree/master/lib +mvn install:install-file -Dfile=vapi-runtime-2.15.0.jar -DgroupId=com.vmware.vapi -DartifactId=vapi-runtime -Dversion=2.15.0 -Dpackaging=jar + +# From https://github.com/vmware/vsphere-automation-sdk-java/tree/master/lib +mvn install:install-file -Dfile=vapi-authentication-2.15.0.jar -DgroupId=com.vmware.vapi -DartifactId=vapi-authentication -Dversion=2.15.0 -Dpackaging=jar + +# From https://github.com/vmware/vsphere-automation-sdk-java/tree/master/lib +mvn install:install-file -Dfile=vsphereautomation-client-sdk-3.3.0.jar -DgroupId=com.vmware.vsphereautomation.client -DartifactId=vsphereautomation-client-sdk -Dversion=3.3.0 -Dpackaging=jar \ No newline at end of file diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java b/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java index 9458de763538..356ec17c30bf 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java @@ -18,6 +18,7 @@ */ package org.apache.cloudstack.engine.orchestration.service; +import java.util.List; import java.util.Map; import java.util.Set; @@ -117,8 +118,11 @@ DiskProfile allocateRawVolume(Type type, String name, DiskOffering offering, Lon boolean canVmRestartOnAnotherServer(long vmId); - DiskProfile allocateTemplatedVolume(Type type, String name, DiskOffering offering, Long rootDisksize, Long minIops, Long maxIops, VirtualMachineTemplate template, VirtualMachine vm, - Account owner); + /** + * Allocate a volume or multiple volumes in case of template is registered with the 'deploy-as-is' option, allowing multiple disks + */ + List allocateTemplatedVolumes(Type type, String name, DiskOffering offering, Long rootDisksize, Long minIops, Long maxIops, VirtualMachineTemplate template, VirtualMachine vm, + Account owner); String getVmNameFromVolumeId(long volumeId); diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateInfo.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateInfo.java index 0f7cc6f9de56..0e7c90bccd2f 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateInfo.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateInfo.java @@ -27,4 +27,6 @@ public interface TemplateInfo extends DataObject, VirtualMachineTemplate { String getInstallPath(); boolean isDirectDownload(); + + boolean isDeployAsIs(); } diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateService.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateService.java index f70a7813ae09..0856de47ee7d 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateService.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateService.java @@ -18,6 +18,7 @@ */ package org.apache.cloudstack.engine.subsystem.api.storage; +import com.cloud.agent.api.to.DatadiskTO; import org.apache.cloudstack.framework.async.AsyncCallFuture; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.apache.cloudstack.storage.command.CommandResult; @@ -25,6 +26,8 @@ import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.storage.StoragePool; +import java.util.List; + public interface TemplateService { class TemplateApiResult extends CommandResult { @@ -47,7 +50,7 @@ public TemplateInfo getTemplate() { AsyncCallFuture createTemplateFromVolumeAsync(VolumeInfo volume, TemplateInfo template, DataStore store); - boolean createOvaDataDiskTemplates(TemplateInfo parentTemplate); + boolean createOvaDataDiskTemplates(TemplateInfo parentTemplate, boolean deployAsIs); AsyncCallFuture deleteTemplateAsync(TemplateInfo template); @@ -72,4 +75,6 @@ public TemplateInfo getTemplate() { void associateCrosszoneTemplatesToZone(long dcId); AsyncCallFuture createDatadiskTemplateAsync(TemplateInfo parentTemplate, TemplateInfo dataDiskTemplate, String path, String diskId, long fileSize, boolean bootable); + + List getTemplateDatadisksOnImageStore(TemplateInfo templateInfo); } diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeInfo.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeInfo.java index f4a738109013..1a7b22446b37 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeInfo.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeInfo.java @@ -83,4 +83,6 @@ public interface VolumeInfo extends DataObject, Volume { boolean isDirectDownload(); void setDirectDownload(boolean directDownload); + + boolean isDeployAsIs(); } diff --git a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java index 62a241be4345..87c58afcfd39 100644 --- a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java +++ b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java @@ -206,6 +206,8 @@ public interface StorageManager extends StorageService { boolean storagePoolHasEnoughSpaceForResize(StoragePool pool, long currentSize, long newSiz); + boolean isStoragePoolComplaintWithStoragePolicy(List volumes, StoragePool pool) throws StorageUnavailableException; + boolean registerHostListener(String providerUuid, HypervisorHostListener listener); void connectHostToSharedPool(long hostId, long poolId) throws StorageUnavailableException, StorageConflictException; @@ -230,4 +232,6 @@ public interface StorageManager extends StorageService { DiskTO getDiskWithThrottling(DataTO volTO, Volume.Type volumeType, long deviceId, String path, long offeringId, long diskOfferingId); + boolean isStoragePoolDatastoreClusterParent(StoragePool pool); + } diff --git a/engine/components-api/src/main/java/com/cloud/template/TemplateManager.java b/engine/components-api/src/main/java/com/cloud/template/TemplateManager.java index 2dc6296fc51f..7b1633c0355f 100644 --- a/engine/components-api/src/main/java/com/cloud/template/TemplateManager.java +++ b/engine/components-api/src/main/java/com/cloud/template/TemplateManager.java @@ -18,7 +18,9 @@ import java.util.List; +import com.cloud.agent.api.to.DatadiskTO; import com.cloud.deploy.DeployDestination; +import com.cloud.storage.DataStoreRole; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; import org.apache.cloudstack.framework.config.ConfigKey; @@ -133,4 +135,5 @@ public interface TemplateManager { public static final String MESSAGE_REGISTER_PUBLIC_TEMPLATE_EVENT = "Message.RegisterPublicTemplate.Event"; public static final String MESSAGE_RESET_TEMPLATE_PERMISSION_EVENT = "Message.ResetTemplatePermission.Event"; + List getTemplateDisksOnImageStore(Long templateId, DataStoreRole role); } diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java index 7a16971ee71d..c158cc510188 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java @@ -614,8 +614,7 @@ public ClusteredAgentHandler(final Task.Type type, final Link link, final byte[] @Override protected void doTask(final Task task) throws TaskExecutionException { - final TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); - try { + try (final TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);) { if (task.getType() != Task.Type.DATA) { super.doTask(task); return; @@ -723,8 +722,6 @@ protected void doTask(final Task task) throws TaskExecutionException { final String message = String.format("UnsupportedVersionException occured when executing taks! Error '%s'", e.getMessage()); s_logger.error(message); throw new TaskExecutionException(message, e); - } finally { - txn.close(); } } } diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java index 0ae249c584f8..942d3cf11141 100755 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java @@ -413,6 +413,8 @@ public void allocate(final String vmInstanceName, final VirtualMachineTemplate t final LinkedHashMap> auxiliaryNetworks, final DeploymentPlan plan, final HypervisorType hyperType, final Map> extraDhcpOptions, final Map datadiskTemplateToDiskOfferingMap) throws InsufficientCapacityException { + s_logger.info(String.format("allocating virtual machine from template:%s with hostname:%s and %d networks", template.getUuid(), vmInstanceName, auxiliaryNetworks.size())); + final VMInstanceVO vm = _vmDao.findVMByInstanceName(vmInstanceName); final Account owner = _entityMgr.findById(Account.class, vm.getAccountId()); @@ -454,7 +456,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) throws } else if (template.getFormat() == ImageFormat.BAREMETAL) { // Do nothing } else { - volumeMgr.allocateTemplatedVolume(Type.ROOT, "ROOT-" + vmFinal.getId(), rootDiskOfferingInfo.getDiskOffering(), rootDiskOfferingInfo.getSize(), + volumeMgr.allocateTemplatedVolumes(Type.ROOT, "ROOT-" + vmFinal.getId(), rootDiskOfferingInfo.getDiskOffering(), rootDiskOfferingInfo.getSize(), rootDiskOfferingInfo.getMinIops(), rootDiskOfferingInfo.getMaxIops(), template, vmFinal, owner); } @@ -1096,7 +1098,7 @@ public void orchestrateStart(final String vmUuid, final Map volumes = _volsDao.findCreatedByInstance(vm.getId()); for (final VolumeVO volume : volumes) { if (!_storagePoolDao.findById(volume.getPoolId()).getScope().equals(ScopeType.ZONE)) { diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/CloudOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/CloudOrchestrator.java index 91e9b6f57bdb..6d9ac41d990d 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/CloudOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/CloudOrchestrator.java @@ -36,6 +36,7 @@ import org.apache.cloudstack.engine.cloud.entity.api.VolumeEntity; import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService; import org.apache.cloudstack.engine.service.api.OrchestrationService; +import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.deploy.DeploymentPlan; @@ -64,6 +65,7 @@ @Component public class CloudOrchestrator implements OrchestrationService { + private static final Logger LOGGER = Logger.getLogger(CloudOrchestrator.class); @Inject private VMEntityManager vmEntityManager; @@ -99,6 +101,7 @@ public class CloudOrchestrator implements OrchestrationService { VolumeOrchestrationService _volumeMgr; public CloudOrchestrator() { + LOGGER.info("We are starting to use the cloud orchestrator \\o/"); } public VirtualMachineEntity createFromScratch(String uuid, String iso, String os, String hypervisor, String hostName, int cpu, int speed, long memory, @@ -159,8 +162,9 @@ public VirtualMachineEntity createVirtualMachine(String id, String owner, String int speed, long memory, Long diskSize, List computeTags, List rootDiskTags, Map networkNicMap, DeploymentPlan plan, Long rootDiskSize, Map> extraDhcpOptionMap, Map dataDiskTemplateToDiskOfferingMap) throws InsufficientCapacityException { - // VirtualMachineEntityImpl vmEntity = new VirtualMachineEntityImpl(id, owner, hostName, displayName, cpu, speed, memory, computeTags, rootDiskTags, networks, - // vmEntityManager); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("creating virtual machine using template %s with hostname %s", templateId, hostName)); + } LinkedHashMap> networkIpMap = new LinkedHashMap>(); for (String uuid : networkNicMap.keySet()) { diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java index e16bde62b3ff..0234aa456c5c 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java @@ -43,8 +43,10 @@ import com.cloud.event.UsageEventUtils; import com.cloud.network.dao.NetworkDetailVO; import com.cloud.network.dao.NetworkDetailsDao; +import com.cloud.storage.dao.VMTemplateDetailsDao; import org.apache.cloudstack.acl.ControlledEntity.ACLType; import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.net.NetworkPrerequisiteTO; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.cloud.entity.api.db.VMNetworkMapVO; import org.apache.cloudstack.engine.cloud.entity.api.db.dao.VMNetworkMapDao; @@ -299,6 +301,8 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra RemoteAccessVpnDao _remoteAccessVpnDao; @Inject VpcVirtualNetworkApplianceService _routerService; + @Inject + VMTemplateDetailsDao templateDetailsDao; List networkGurus; @@ -750,16 +754,12 @@ public void allocate(final VirtualMachineProfile vm, final LinkedHashMap() { @Override public void doInTransactionWithoutResult(final TransactionStatus status) throws InsufficientCapacityException { - int deviceId = 0; - int size = 0; - for (final Network ntwk : networks.keySet()) { - final List profiles = networks.get(ntwk); - if (profiles != null && !profiles.isEmpty()) { - size = size + profiles.size(); - } else { - size = size + 1; - } + if (s_logger.isTraceEnabled()) { + s_logger.trace(String.format("allocating networks for %s(template %s); %d networks",vm.getInstanceName(), vm.getTemplate().getUuid(), networks.size())); } + int deviceId = 0; + int size; + size = determineNumberOfNicsRequired(); final boolean[] deviceIds = new boolean[size]; Arrays.fill(deviceIds, false); @@ -767,8 +767,9 @@ public void doInTransactionWithoutResult(final TransactionStatus status) throws final List nics = new ArrayList(size); NicProfile defaultNic = null; + Network nextNetwork = null; for (final Map.Entry> network : networks.entrySet()) { - final Network config = network.getKey(); + nextNetwork = network.getKey(); List requestedProfiles = network.getValue(); if (requestedProfiles == null) { requestedProfiles = new ArrayList(); @@ -778,54 +779,113 @@ public void doInTransactionWithoutResult(final TransactionStatus status) throws } for (final NicProfile requested : requestedProfiles) { - Boolean isDefaultNic = false; - if (vm != null && requested != null && requested.isDefaultNic()) { - isDefaultNic = true; - } + Pair newDeviceInfo = addRequestedNicToNicListWithDeviceNumberAndRetrieveDefaultDevice( requested, deviceIds, deviceId, nextNetwork, nics, defaultNic); + defaultNic = newDeviceInfo.first(); + deviceId = newDeviceInfo.second(); + } + } + createExtraNics(size, nics, nextNetwork); - while (deviceIds[deviceId] && deviceId < deviceIds.length) { - deviceId++; - } + if (nics.size() == 1) { + nics.get(0).setDefaultNic(true); + } + } - final Pair vmNicPair = allocateNic(requested, config, isDefaultNic, deviceId, vm); - NicProfile vmNic = null; - if (vmNicPair != null) { - vmNic = vmNicPair.first(); - if (vmNic == null) { - continue; - } - deviceId = vmNicPair.second(); - } + /** + * private transaction method to check and add devices to the nic list and update the info + */ + Pair addRequestedNicToNicListWithDeviceNumberAndRetrieveDefaultDevice(NicProfile requested, boolean[] deviceIds, int deviceId, Network nextNetwork, List nics, NicProfile defaultNic) + throws InsufficientAddressCapacityException, InsufficientVirtualNetworkCapacityException { + Pair rc = new Pair<>(null,null); + Boolean isDefaultNic = false; + if (vm != null && requested != null && requested.isDefaultNic()) { + isDefaultNic = true; + } - final int devId = vmNic.getDeviceId(); - if (devId >= deviceIds.length) { - throw new IllegalArgumentException("Device id for nic is too large: " + vmNic); - } - if (deviceIds[devId]) { - throw new IllegalArgumentException("Conflicting device id for two different nics: " + vmNic); - } + while (deviceIds[deviceId] && deviceId < deviceIds.length) { + deviceId++; + } + + final Pair vmNicPair = allocateNic(requested, nextNetwork, isDefaultNic, deviceId, vm); + NicProfile vmNic = null; + if (vmNicPair != null) { + vmNic = vmNicPair.first(); + if (vmNic == null) { + return rc; + } + deviceId = vmNicPair.second(); + } - deviceIds[devId] = true; + final int devId = vmNic.getDeviceId(); + if (devId >= deviceIds.length) { + throw new IllegalArgumentException("Device id for nic is too large: " + vmNic); + } + if (deviceIds[devId]) { + throw new IllegalArgumentException("Conflicting device id for two different nics: " + vmNic); + } - if (vmNic.isDefaultNic()) { - if (defaultNic != null) { - throw new IllegalArgumentException("You cannot specify two nics as default nics: nic 1 = " + defaultNic + "; nic 2 = " + vmNic); - } - defaultNic = vmNic; - } + deviceIds[devId] = true; - nics.add(vmNic); - vm.addNic(vmNic); - saveExtraDhcpOptions(config.getUuid(), vmNic.getId(), extraDhcpOptions); + if (vmNic.isDefaultNic()) { + if (defaultNic != null) { + throw new IllegalArgumentException("You cannot specify two nics as default nics: nic 1 = " + defaultNic + "; nic 2 = " + vmNic); } + defaultNic = vmNic; } - if (nics.size() != size) { - s_logger.warn("Number of nics " + nics.size() + " doesn't match number of requested nics " + size); - throw new CloudRuntimeException("Number of nics " + nics.size() + " doesn't match number of requested networks " + size); + + nics.add(vmNic); + vm.addNic(vmNic); + saveExtraDhcpOptions(nextNetwork.getUuid(), vmNic.getId(), extraDhcpOptions); + rc.first(defaultNic); + rc.second(deviceId); + return rc; + } + + /** + * private transaction method to run over the objects and determine nic requirements + * @return the total numer of nics required + */ + private int determineNumberOfNicsRequired() { + int size = 0; + for (final Network ntwk : networks.keySet()) { + final List profiles = networks.get(ntwk); + if (profiles != null && !profiles.isEmpty()) { + size = size + profiles.size(); + } else { + size = size + 1; + } } - if (nics.size() == 1) { - nics.get(0).setDefaultNic(true); + List netprereqs = templateDetailsDao.listNetworkRequirementsByTemplateId(vm.getTemplate().getId()); + // FR37 hack: add last network untill enough ids + if (size < netprereqs.size()) { + size = netprereqs.size(); + } + return size; + } + + /** + * private transaction method to add nics as required + * @param size the number needed + * @param nics the list of nics present + * @param finalNetwork the network to add the nics to + * @throws InsufficientVirtualNetworkCapacityException great + * @throws InsufficientAddressCapacityException also magnificent, as the name sugests + */ + private void createExtraNics(int size, List nics, Network finalNetwork) throws InsufficientVirtualNetworkCapacityException, InsufficientAddressCapacityException { + if (nics.size() != size) { + s_logger.warn("Number of nics " + nics.size() + " doesn't match number of requested nics " + size); + if (nics.size() > size) { + throw new CloudRuntimeException("Number of nics " + nics.size() + " doesn't match number of requested networks " + size); + } else { + if (finalNetwork == null) { + throw new CloudRuntimeException(String.format("can not assign network to %d remaining required NICs", size - nics.size())); + } + // create extra + for ( int extraNicNum = nics.size() ; extraNicNum < size; extraNicNum ++) { + final Pair vmNicPair = allocateNic(new NicProfile(), finalNetwork, false, extraNicNum, vm); + } + } } } }); @@ -2347,12 +2407,12 @@ private Network createGuestNetwork(final long networkOfferingId, final String na URI secondaryUri = isNotBlank(isolatedPvlan) ? BroadcastDomainType.fromString(isolatedPvlan) : null; //don't allow to specify vlan tag used by physical network for dynamic vlan allocation if (!(bypassVlanOverlapCheck && ntwkOff.getGuestType() == GuestType.Shared) && _dcDao.findVnet(zoneId, pNtwk.getId(), BroadcastDomainType.getValue(uri)).size() > 0) { - throw new InvalidParameterValueException("The VLAN tag " + vlanId + " is already being used for dynamic vlan allocation for the guest network in zone " + throw new InvalidParameterValueException("The VLAN tag to use for new guest network, " + vlanId + " is already being used for dynamic vlan allocation for the guest network in zone " + zone.getName()); } if (secondaryUri != null && !(bypassVlanOverlapCheck && ntwkOff.getGuestType() == GuestType.Shared) && _dcDao.findVnet(zoneId, pNtwk.getId(), BroadcastDomainType.getValue(secondaryUri)).size() > 0) { - throw new InvalidParameterValueException("The VLAN tag " + isolatedPvlan + " is already being used for dynamic vlan allocation for the guest network in zone " + throw new InvalidParameterValueException("The VLAN tag for isolated PVLAN " + isolatedPvlan + " is already being used for dynamic vlan allocation for the guest network in zone " + zone.getName()); } if (! UuidUtils.validateUUID(vlanId)){ diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java index c18167509377..39e57d6d319b 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java @@ -19,6 +19,7 @@ package org.apache.cloudstack.engine.orchestration; import java.util.ArrayList; +import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -26,10 +27,13 @@ import java.util.Set; import java.util.UUID; import java.util.concurrent.ExecutionException; +import java.util.stream.Collectors; import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.agent.api.to.DatadiskTO; +import com.cloud.storage.dao.VMTemplateDetailsDao; import org.apache.cloudstack.api.command.admin.vm.MigrateVMCmd; import org.apache.cloudstack.api.command.admin.volume.MigrateVolumeCmdByAdmin; import org.apache.cloudstack.api.command.user.volume.MigrateVolumeCmd; @@ -68,6 +72,7 @@ import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; +import org.apache.commons.collections.CollectionUtils; import org.apache.log4j.Logger; import com.cloud.agent.api.to.DataTO; @@ -140,6 +145,8 @@ import com.cloud.vm.dao.UserVmCloneSettingDao; import com.cloud.vm.dao.UserVmDao; +import static com.cloud.storage.resource.StorageProcessor.REQUEST_TEMPLATE_RELOAD; + public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrationService, Configurable { public enum UserVmCloneType { @@ -196,6 +203,8 @@ public enum UserVmCloneType { protected UserVmCloneSettingDao _vmCloneSettingDao; @Inject StorageStrategyFactory _storageStrategyFactory; + @Inject + VMTemplateDetailsDao templateDetailsDao; private final StateMachine2 _volStateMachine; protected List _storagePoolAllocators; @@ -562,7 +571,7 @@ public VolumeInfo createVolume(VolumeInfo volume, VirtualMachine vm, VirtualMach try { VolumeApiResult result = future.get(); if (result.isFailed()) { - if (result.getResult().contains("request template reload") && (i == 0)) { + if (result.getResult().contains(REQUEST_TEMPLATE_RELOAD) && (i == 0)) { s_logger.debug("Retry template re-deploy for vmware"); continue; } else { @@ -705,19 +714,23 @@ public DiskProfile allocateRawVolume(Type type, String name, DiskOffering offeri return toDiskProfile(vol, offering); } - @Override - public DiskProfile allocateTemplatedVolume(Type type, String name, DiskOffering offering, Long rootDisksize, Long minIops, Long maxIops, VirtualMachineTemplate template, VirtualMachine vm, - Account owner) { + private DiskProfile allocateTemplatedVolume(Type type, String name, DiskOffering offering, Long rootDisksize, Long minIops, Long maxIops, VirtualMachineTemplate template, VirtualMachine vm, + Account owner, long deviceId) { assert (template.getFormat() != ImageFormat.ISO) : "ISO is not a template really...."; Long size = _tmpltMgr.getTemplateSize(template.getId(), vm.getDataCenterId()); if (rootDisksize != null) { - rootDisksize = rootDisksize * 1024 * 1024 * 1024; - if (rootDisksize > size) { - s_logger.debug("Using root disk size of " + rootDisksize + " Bytes for volume " + name); + if (template.isDeployAsIs()) { + // Volume size specified from template deploy-as-is size = rootDisksize; } else { - s_logger.debug("Using root disk size of " + size + " Bytes for volume " + name + "since specified root disk size of " + rootDisksize + " Bytes is smaller than template"); + rootDisksize = rootDisksize * 1024 * 1024 * 1024; + if (rootDisksize > size) { + s_logger.debug("Using root disk size of " + rootDisksize + " Bytes for volume " + name); + size = rootDisksize; + } else { + s_logger.debug("Using root disk size of " + size + " Bytes for volume " + name + "since specified root disk size of " + rootDisksize + " Bytes is smaller than template"); + } } } @@ -731,13 +744,9 @@ public DiskProfile allocateTemplatedVolume(Type type, String name, DiskOffering } vol.setTemplateId(template.getId()); - if (type.equals(Type.ROOT)) { - vol.setDeviceId(0l); - if (!vm.getType().equals(VirtualMachine.Type.User)) { - vol.setRecreatable(true); - } - } else { - vol.setDeviceId(1l); + vol.setDeviceId(deviceId); + if (type.equals(Type.ROOT) && !vm.getType().equals(VirtualMachine.Type.User)) { + vol.setRecreatable(true); } if (vm.getType() == VirtualMachine.Type.User) { @@ -765,6 +774,44 @@ public DiskProfile allocateTemplatedVolume(Type type, String name, DiskOffering return toDiskProfile(vol, offering); } + @Override + public List allocateTemplatedVolumes(Type type, String name, DiskOffering offering, Long rootDisksize, Long minIops, Long maxIops, VirtualMachineTemplate template, VirtualMachine vm, + Account owner) { + int volumesNumber = 1; + List templateAsIsDisks = null; + if (template.isDeployAsIs()) { + templateAsIsDisks = _tmpltMgr.getTemplateDisksOnImageStore(template.getId(), DataStoreRole.Image); + if (CollectionUtils.isNotEmpty(templateAsIsDisks)) { + templateAsIsDisks = templateAsIsDisks.stream() + .filter(x -> !x.isIso()) + .sorted(Comparator.comparing(DatadiskTO::getDiskNumber)) + .collect(Collectors.toList()); + } + volumesNumber = templateAsIsDisks.size(); + } + + if (volumesNumber < 1) { + throw new CloudRuntimeException("Unable to create any volume from template " + template.getName()); + } + + List profiles = new ArrayList<>(); + + for (int number = 0; number < volumesNumber; number++) { + String volumeName = name; + Long volumeSize = rootDisksize; + long deviceId = type.equals(Type.ROOT) ? 0L : 1L; + if (template.isDeployAsIs()) { + int volumeNameSuffix = templateAsIsDisks.get(number).getDiskNumber(); + volumeName = String.format("%s-%d", volumeName, volumeNameSuffix); + volumeSize = templateAsIsDisks.get(number).getVirtualSize(); + deviceId = templateAsIsDisks.get(number).getDiskNumber(); + } + s_logger.info(String.format("adding disk object %s to %s", volumeName, vm.getInstanceName())); + profiles.add(allocateTemplatedVolume(type, volumeName, offering, volumeSize, minIops, maxIops, template, vm, owner, deviceId)); + } + return profiles; + } + private ImageFormat getSupportedImageFormatForCluster(HypervisorType hyperType) { if (hyperType == HypervisorType.XenServer) { return ImageFormat.VHD; @@ -1174,6 +1221,10 @@ private static class VolumeTask { private List getTasks(List vols, Map destVols, VirtualMachineProfile vm) throws StorageUnavailableException { boolean recreate = RecreatableSystemVmEnabled.value(); List tasks = new ArrayList(); + // FR37 TODO: is it this easy? +// if (vm.getTemplate().isDeployAsIs()) { +// return tasks; +// } for (VolumeVO vol : vols) { StoragePoolVO assignedPool = null; if (destVols != null) { @@ -1327,7 +1378,7 @@ private Pair recreateVolume(VolumeVO vol, VirtualMachinePro try { result = future.get(); if (result.isFailed()) { - if (result.getResult().contains("request template reload") && (i == 0)) { + if (result.getResult().contains(REQUEST_TEMPLATE_RELOAD) && (i == 0)) { s_logger.debug("Retry template re-deploy for vmware"); continue; } else { diff --git a/engine/schema/src/main/java/com/cloud/dc/VsphereStoragePolicyVO.java b/engine/schema/src/main/java/com/cloud/dc/VsphereStoragePolicyVO.java new file mode 100644 index 000000000000..1415d15ec776 --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/dc/VsphereStoragePolicyVO.java @@ -0,0 +1,126 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.dc;; + +import com.cloud.utils.DateUtil; +import com.cloud.utils.db.GenericDao; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; +import javax.persistence.Temporal; +import javax.persistence.TemporalType; +import java.util.Date; +import java.util.UUID; + +@Entity +@Table(name = "vsphere_storage_policy") +public class VsphereStoragePolicyVO implements VsphereStoragePolicy { + + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + @Column(name = "id") + private long id; + + @Column(name = "uuid") + private String uuid; + + @Column(name = "zone_id") + private long zoneId; + + @Column(name = "policy_id") + private String policyId; + + @Column(name = "name") + private String name; + + @Column(name = "description") + private String description; + + @Column(name = "update_time", updatable = true) + @Temporal(value = TemporalType.TIMESTAMP) + private Date updateTime; + + @Column(name = GenericDao.REMOVED_COLUMN) + private Date removed; + + public VsphereStoragePolicyVO(long zoneId, String policyId, String name, String description) { + this.uuid = UUID.randomUUID().toString(); + this.zoneId = zoneId; + this.policyId = policyId; + this.name = name; + this.description = description; + this.updateTime = DateUtil.currentGMTTime(); + } + + public VsphereStoragePolicyVO() { + uuid = UUID.randomUUID().toString(); + } + public VsphereStoragePolicyVO(long id) { + this.id = id; + uuid = UUID.randomUUID().toString(); + } + + @Override + public long getId() { + return id; + } + + @Override + public String getUuid() { + return uuid; + } + + @Override + public long getZoneId() { + return zoneId; + } + + @Override + public String getPolicyId() { + return policyId; + } + + @Override + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + @Override + public String getDescription() { + return description; + } + + public void setDescription(String description) { + this.description = description; + } + + public Date getUpdateTime() { + return updateTime; + } + + public Date getRemoved() { + return removed; + } +} diff --git a/engine/schema/src/main/java/com/cloud/dc/dao/VsphereStoragePolicyDao.java b/engine/schema/src/main/java/com/cloud/dc/dao/VsphereStoragePolicyDao.java new file mode 100644 index 000000000000..6e79b5e6f6ee --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/dc/dao/VsphereStoragePolicyDao.java @@ -0,0 +1,30 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.dc.dao; + +import com.cloud.dc.VsphereStoragePolicyVO; +import com.cloud.utils.db.GenericDao; + +import java.util.List; + +public interface VsphereStoragePolicyDao extends GenericDao { + + public VsphereStoragePolicyVO findByPolicyId(Long zoneId, String policyId); + + public List findByZoneId(Long zoneId); + +} diff --git a/engine/schema/src/main/java/com/cloud/dc/dao/VsphereStoragePolicyDaoImpl.java b/engine/schema/src/main/java/com/cloud/dc/dao/VsphereStoragePolicyDaoImpl.java new file mode 100644 index 000000000000..0cdb6ad74220 --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/dc/dao/VsphereStoragePolicyDaoImpl.java @@ -0,0 +1,64 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.dc.dao; + +import com.cloud.dc.VsphereStoragePolicyVO; +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; + +import java.util.List; + +@Component +public class VsphereStoragePolicyDaoImpl extends GenericDaoBase implements VsphereStoragePolicyDao { + + protected static final Logger LOGGER = Logger.getLogger(VsphereStoragePolicyDaoImpl.class); + + private final SearchBuilder zoneSearch; + private final SearchBuilder policySearch; + + public VsphereStoragePolicyDaoImpl() { + super(); + + zoneSearch = createSearchBuilder(); + zoneSearch.and("zoneId", zoneSearch.entity().getZoneId(), SearchCriteria.Op.EQ); + zoneSearch.done(); + + policySearch = createSearchBuilder(); + policySearch.and("zoneId", policySearch.entity().getZoneId(), SearchCriteria.Op.EQ); + policySearch.and("policyId", policySearch.entity().getPolicyId(), SearchCriteria.Op.EQ); + policySearch.done(); + } + + @Override + public VsphereStoragePolicyVO findByPolicyId(Long zoneId, String policyId) { + SearchCriteria sc = policySearch.create(); + sc.setParameters("zoneId", zoneId); + sc.setParameters("policyId", policyId); + return findOneBy(sc); + } + + @Override + public List findByZoneId(Long zoneId) { + SearchCriteria sc = zoneSearch.create(); + sc.setParameters("zoneId", zoneId); + + return listBy(sc); + } +} diff --git a/engine/schema/src/main/java/com/cloud/storage/TemplateOVFPropertyVO.java b/engine/schema/src/main/java/com/cloud/storage/TemplateOVFPropertyVO.java index 425b1f22e453..cd7f04b71d6e 100644 --- a/engine/schema/src/main/java/com/cloud/storage/TemplateOVFPropertyVO.java +++ b/engine/schema/src/main/java/com/cloud/storage/TemplateOVFPropertyVO.java @@ -28,6 +28,7 @@ @Entity @Table(name = "template_ovf_properties") +@Deprecated(since = "now" , forRemoval = true) public class TemplateOVFPropertyVO implements OVFProperty { @Id diff --git a/engine/schema/src/main/java/com/cloud/storage/VMTemplateDetailVO.java b/engine/schema/src/main/java/com/cloud/storage/VMTemplateDetailVO.java index 5010edfa762c..68cf45a41da3 100755 --- a/engine/schema/src/main/java/com/cloud/storage/VMTemplateDetailVO.java +++ b/engine/schema/src/main/java/com/cloud/storage/VMTemplateDetailVO.java @@ -39,7 +39,7 @@ public class VMTemplateDetailVO implements ResourceDetail { @Column(name = "name") private String name; - @Column(name = "value", length = 1024) + @Column(name = "value", length = 8191) private String value; @Column(name = "display") diff --git a/engine/schema/src/main/java/com/cloud/storage/VMTemplateVO.java b/engine/schema/src/main/java/com/cloud/storage/VMTemplateVO.java index af04099f9a2a..5bf8cd2a573a 100644 --- a/engine/schema/src/main/java/com/cloud/storage/VMTemplateVO.java +++ b/engine/schema/src/main/java/com/cloud/storage/VMTemplateVO.java @@ -152,6 +152,9 @@ public class VMTemplateVO implements VirtualMachineTemplate { @Column(name = "parent_template_id") private Long parentTemplateId; + @Column(name = "deploy_as_is") + private boolean deployAsIs; + @Override public String getUniqueName() { return uniqueName; @@ -192,9 +195,9 @@ private VMTemplateVO(long id, String name, ImageFormat format, boolean isPublic, uuid = UUID.randomUUID().toString(); } - public VMTemplateVO(long id, String name, ImageFormat format, boolean isPublic, boolean featured, boolean isExtractable, TemplateType type, String url, - boolean requiresHvm, int bits, long accountId, String cksum, String displayText, boolean enablePassword, long guestOSId, boolean bootable, - HypervisorType hyperType, String templateTag, Map details, boolean sshKeyEnabled, boolean isDynamicallyScalable, boolean directDownload) { + public VMTemplateVO(long id, String name, ImageFormat format, boolean isPublic, boolean featured, boolean isExtractable, TemplateType type, String url, boolean requiresHvm, int bits, long accountId, String cksum, String displayText, boolean enablePassword, long guestOSId, boolean bootable, + HypervisorType hyperType, String templateTag, Map details, boolean sshKeyEnabled, boolean isDynamicallyScalable, boolean directDownload, + boolean deployAsIs) { this(id, name, format, @@ -219,6 +222,7 @@ public VMTemplateVO(long id, String name, ImageFormat format, boolean isPublic, dynamicallyScalable = isDynamicallyScalable; state = State.Active; this.directDownload = directDownload; + this.deployAsIs = deployAsIs; } public static VMTemplateVO createPreHostIso(Long id, String uniqueName, String name, ImageFormat format, boolean isPublic, boolean featured, TemplateType type, @@ -637,4 +641,11 @@ public void setParentTemplateId(Long parentTemplateId) { this.parentTemplateId = parentTemplateId; } + @Override public boolean isDeployAsIs() { + return deployAsIs; + } + + public void setDeployAsIs(boolean deployAsIs) { + this.deployAsIs = deployAsIs; + } } diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolDetailsDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolDetailsDaoImpl.java index 1e74d25632b7..0c39a8c581aa 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolDetailsDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolDetailsDaoImpl.java @@ -21,11 +21,19 @@ import org.apache.cloudstack.framework.config.ConfigKey.Scope; import org.apache.cloudstack.framework.config.ScopedConfigStorage; import org.apache.cloudstack.resourcedetail.ResourceDetailsDaoBase; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; + +import javax.inject.Inject; +import java.util.List; public class StoragePoolDetailsDaoImpl extends ResourceDetailsDaoBase implements StoragePoolDetailsDao, ScopedConfigStorage { + @Inject + PrimaryDataStoreDao _storagePoolDao; + public StoragePoolDetailsDaoImpl() { } @@ -42,6 +50,10 @@ public String getConfigValue(long id, ConfigKey key) { @Override public void addDetail(long resourceId, String key, String value, boolean display) { + List ChildPools = _storagePoolDao.listChildStoragePoolsInDatastoreCluster(resourceId); + for(StoragePoolVO childPool : ChildPools) { + super.addDetail(new StoragePoolDetailVO(childPool.getId(), key, value, display)); + } super.addDetail(new StoragePoolDetailVO(resourceId, key, value, display)); } } diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/TemplateOVFPropertiesDao.java b/engine/schema/src/main/java/com/cloud/storage/dao/TemplateOVFPropertiesDao.java index eb78f2023acc..bf8fabced0de 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/TemplateOVFPropertiesDao.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/TemplateOVFPropertiesDao.java @@ -22,6 +22,7 @@ import java.util.List; +@Deprecated public interface TemplateOVFPropertiesDao extends GenericDao { boolean existsOption(long templateId, String key); diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/TemplateOVFPropertiesDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/TemplateOVFPropertiesDaoImpl.java index cf6a280b0348..ef50d4df6cb1 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/TemplateOVFPropertiesDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/TemplateOVFPropertiesDaoImpl.java @@ -29,6 +29,7 @@ import java.util.List; @Component +@Deprecated public class TemplateOVFPropertiesDaoImpl extends GenericDaoBase implements TemplateOVFPropertiesDao { private final static Logger s_logger = Logger.getLogger(TemplateOVFPropertiesDaoImpl.class); diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDetailsDao.java b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDetailsDao.java index fe69630ae2e5..b067a1df2523 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDetailsDao.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDetailsDao.java @@ -16,11 +16,24 @@ // under the License. package com.cloud.storage.dao; +import com.cloud.agent.api.storage.OVFPropertyTO; +import com.cloud.agent.api.to.DatadiskTO; +import org.apache.cloudstack.api.net.NetworkPrerequisiteTO; import org.apache.cloudstack.resourcedetail.ResourceDetailsDao; import com.cloud.storage.VMTemplateDetailVO; import com.cloud.utils.db.GenericDao; +import java.util.List; + public interface VMTemplateDetailsDao extends GenericDao, ResourceDetailsDao { + boolean existsOption(long templateId, String key); + OVFPropertyTO findPropertyByTemplateAndKey(long templateId, String key); + void saveOptions(List opts); + List listPropertiesByTemplateId(long templateId); + List listNetworkRequirementsByTemplateId(long templateId); + List listDisksByTemplateId(long templateId); + + List listDetailsByTemplateId(long templateId, String prefix); } diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDetailsDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDetailsDaoImpl.java index 3e7072f6bf01..32de5cd34acf 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDetailsDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDetailsDaoImpl.java @@ -17,17 +17,120 @@ package com.cloud.storage.dao; +import com.cloud.agent.api.storage.OVFPropertyTO; +import com.cloud.agent.api.to.DatadiskTO; +import com.cloud.storage.ImageStore; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.TransactionLegacy; +import com.google.gson.Gson; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.net.NetworkPrerequisiteTO; +import org.apache.commons.collections.CollectionUtils; +import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.resourcedetail.ResourceDetailsDaoBase; import com.cloud.storage.VMTemplateDetailVO; +import java.util.ArrayList; +import java.util.List; + @Component public class VMTemplateDetailsDaoImpl extends ResourceDetailsDaoBase implements VMTemplateDetailsDao { + private final static Logger LOGGER = Logger.getLogger(VMTemplateDetailsDaoImpl.class); + + Gson gson = new Gson(); + + SearchBuilder OptionsSearchBuilder; + + public VMTemplateDetailsDaoImpl() { + super(); + OptionsSearchBuilder = createSearchBuilder(); + OptionsSearchBuilder.and("resourceId", OptionsSearchBuilder.entity().getResourceId(), SearchCriteria.Op.EQ); + OptionsSearchBuilder.and("name", OptionsSearchBuilder.entity().getName(), SearchCriteria.Op.EQ); + OptionsSearchBuilder.done(); + } + @Override public void addDetail(long resourceId, String key, String value, boolean display) { super.addDetail(new VMTemplateDetailVO(resourceId, key, value, display)); } + + @Override + public boolean existsOption(long templateId, String key) { + return findPropertyByTemplateAndKey(templateId, key) != null; + } + + @Override + public OVFPropertyTO findPropertyByTemplateAndKey(long templateId, String key) { + SearchCriteria sc = OptionsSearchBuilder.create(); + sc.setParameters("resourceId", templateId); + sc.setParameters("name", key.startsWith(ApiConstants.ACS_PROPERTY) ? key : ApiConstants.ACS_PROPERTY + "-" + key); + OVFPropertyTO property = null; + VMTemplateDetailVO detail = findOneBy(sc); + if (detail != null) { + property = gson.fromJson(detail.getValue(), OVFPropertyTO.class); + } + return property; + } + + @Override + public void saveOptions(List opts) { + if (CollectionUtils.isEmpty(opts)) { + return; + } + TransactionLegacy txn = TransactionLegacy.currentTxn(); + txn.start(); + for (OVFPropertyTO opt : opts) { + String json = gson.toJson(opt); + VMTemplateDetailVO templateDetailVO = new VMTemplateDetailVO(opt.getTemplateId(), ApiConstants.ACS_PROPERTY + "-" + opt.getKey(), json, opt.isUserConfigurable()); + persist(templateDetailVO); + } + txn.commit(); + } + + @Override + public List listPropertiesByTemplateId(long templateId) { + List ovfProperties = listDetailsByTemplateId(templateId, ImageStore.ACS_PROPERTY_PREFIX); + List properties = new ArrayList<>(); + for (VMTemplateDetailVO property : ovfProperties) { + OVFPropertyTO ovfPropertyTO = gson.fromJson(property.getValue(), OVFPropertyTO.class); + properties.add(ovfPropertyTO); + } + return properties; + } + + @Override + public List listNetworkRequirementsByTemplateId(long templateId) { + List networkDetails = listDetailsByTemplateId(templateId, ImageStore.REQUIRED_NETWORK_PREFIX); + List networkPrereqs = new ArrayList<>(); + for (VMTemplateDetailVO property : networkDetails) { + NetworkPrerequisiteTO ovfPropertyTO = gson.fromJson(property.getValue(), NetworkPrerequisiteTO.class); + networkPrereqs.add(ovfPropertyTO); + } + return networkPrereqs; + } + + @Override + public List listDisksByTemplateId(long templateId) { + List diskDefinitions = listDetailsByTemplateId(templateId, ImageStore.DISK_DEFINITION_PREFIX); + List disks = new ArrayList<>(); + for (VMTemplateDetailVO detail : diskDefinitions) { + DatadiskTO datadiskTO = gson.fromJson(detail.getValue(), DatadiskTO.class); + disks.add(datadiskTO); + } + return disks; + } + + @Override + public List listDetailsByTemplateId(long templateId, String prefix) { + SearchCriteria ssc = createSearchCriteria(); + ssc.addAnd("resourceId", SearchCriteria.Op.EQ, templateId); + ssc.addAnd("name", SearchCriteria.Op.LIKE, prefix + "%"); + + return search(ssc, null); + } } diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplatePoolDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplatePoolDaoImpl.java index 32874701128d..023139947377 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplatePoolDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplatePoolDaoImpl.java @@ -264,7 +264,7 @@ public List listByTemplateStates(long templateId, VMTem @Override public VMTemplateStoragePoolVO findByHostTemplate(Long hostId, Long templateId) { List result = listByHostTemplate(hostId, templateId); - return (result.size() == 0) ? null : result.get(1); + return (result.size() == 0) ? null : result.get(0); // Not sure how this index outofbound has survived the years, or what the rationale was getting the second/result.get(1) } @Override diff --git a/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/dao/DiskOfferingDetailsDao.java b/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/dao/DiskOfferingDetailsDao.java index e201ae27fdc8..815f1693ee54 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/dao/DiskOfferingDetailsDao.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/dao/DiskOfferingDetailsDao.java @@ -26,4 +26,5 @@ public interface DiskOfferingDetailsDao extends GenericDao, ResourceDetailsDao { List findDomainIds(final long resourceId); List findZoneIds(final long resourceId); + String getDetail(Long diskOfferingId, String key); } \ No newline at end of file diff --git a/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/dao/DiskOfferingDetailsDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/dao/DiskOfferingDetailsDaoImpl.java index da0ec5bc580d..5408f2d7f036 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/dao/DiskOfferingDetailsDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/dao/DiskOfferingDetailsDaoImpl.java @@ -56,4 +56,14 @@ public List findZoneIds(long resourceId) { } return zoneIds; } + + @Override + public String getDetail(Long diskOfferingId, String key) { + String detailValue = null; + DiskOfferingDetailVO diskOfferingDetail = findDetail(diskOfferingId, key); + if (diskOfferingDetail != null) { + detailValue = diskOfferingDetail.getValue(); + } + return detailValue; + } } \ No newline at end of file diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java index 2398e91c90c7..5712411a48e6 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java @@ -123,4 +123,8 @@ public interface PrimaryDataStoreDao extends GenericDao { List listLocalStoragePoolByPath(long datacenterId, String path); void deletePoolTags(long poolId); + + List listChildStoragePoolsInDatastoreCluster(long poolId); + + Integer countAll(); } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java index fee9dc28bd02..1d1e0a0198db 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java @@ -552,4 +552,19 @@ public List findZoneWideStoragePoolsByHypervisor(long dataCenterI public void deletePoolTags(long poolId) { _tagsDao.deleteTags(poolId); } + + @Override + public List listChildStoragePoolsInDatastoreCluster(long poolId) { + QueryBuilder sc = QueryBuilder.create(StoragePoolVO.class); + sc.and(sc.entity().getParent(), Op.EQ, poolId); + return sc.list(); + } + + @Override + public Integer countAll() { + SearchCriteria sc = createSearchCriteria(); + sc.addAnd("parent", SearchCriteria.Op.EQ, 0); + sc.addAnd("removed", SearchCriteria.Op.NULL); + return getCount(sc); + } } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java index 24fcaa03f56c..c7eb72b469e1 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java @@ -119,6 +119,9 @@ public class StoragePoolVO implements StoragePool { @Enumerated(value = EnumType.STRING) private HypervisorType hypervisor; + @Column(name = "parent") + private Long parent = 0L; + @Override public long getId() { return id; @@ -373,6 +376,14 @@ public boolean isLocal() { return !isShared(); } + public Long getParent() { + return parent; + } + + public void setParent(Long parent) { + this.parent = parent; + } + @Override public boolean isInMaintenance() { return status == StoragePoolStatus.PrepareForMaintenance || status == StoragePoolStatus.Maintenance || status == StoragePoolStatus.ErrorInMaintenance || diff --git a/engine/schema/src/main/resources/META-INF/cloudstack/core/spring-engine-schema-core-daos-context.xml b/engine/schema/src/main/resources/META-INF/cloudstack/core/spring-engine-schema-core-daos-context.xml index 7faf85cef4b5..98e6c4584959 100644 --- a/engine/schema/src/main/resources/META-INF/cloudstack/core/spring-engine-schema-core-daos-context.xml +++ b/engine/schema/src/main/resources/META-INF/cloudstack/core/spring-engine-schema-core-daos-context.xml @@ -293,4 +293,5 @@ + diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41400to41500.sql b/engine/schema/src/main/resources/META-INF/db/schema-41400to41500.sql index 6ec5dcd5b818..32c2fc8e4558 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-41400to41500.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-41400to41500.sql @@ -31,4 +31,255 @@ UPDATE `cloud`.`roles` SET `is_default` = 1 WHERE id IN (1, 2, 3, 4); INSERT INTO `cloud`.`roles` (`uuid`, `name`, `role_type`, `description`, `is_default`) VALUES (UUID(), 'Read-Only Admin - Default', 'Admin', 'Default read-only admin role', 1); INSERT INTO `cloud`.`roles` (`uuid`, `name`, `role_type`, `description`, `is_default`) VALUES (UUID(), 'Read-Only User - Default', 'User', 'Default read-only user role', 1); INSERT INTO `cloud`.`roles` (`uuid`, `name`, `role_type`, `description`, `is_default`) VALUES (UUID(), 'Support Admin - Default', 'Admin', 'Default support admin role', 1); -INSERT INTO `cloud`.`roles` (`uuid`, `name`, `role_type`, `description`, `is_default`) VALUES (UUID(), 'Support User - Default', 'User', 'Default support user role', 1); \ No newline at end of file +INSERT INTO `cloud`.`roles` (`uuid`, `name`, `role_type`, `description`, `is_default`) VALUES (UUID(), 'Support User - Default', 'User', 'Default support user role', 1); + +CREATE TABLE IF NOT EXISTS `cloud`.`vsphere_storage_policy` ( + `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT, + `uuid` varchar(255) UNIQUE, + `zone_id` bigint(20) unsigned NOT NULL COMMENT 'id of the zone', + `policy_id` varchar(255) NOT NULL COMMENT 'the identifier of the Storage Policy in vSphere DataCenter', + `name` varchar(255) NOT NULL COMMENT 'name of the storage policy', + `description` text COMMENT 'description of the storage policy', + `update_time` datetime COMMENT 'last updated when policy imported', + `removed` datetime COMMENT 'date removed', + PRIMARY KEY (`id`), + KEY `fk_vsphere_storage_policy__zone_id` (`zone_id`), + UNIQUE KEY (`zone_id`, `policy_id`), + CONSTRAINT `fk_vsphere_storage_policy__zone_id` FOREIGN KEY (`zone_id`) REFERENCES `data_center` (`id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +ALTER TABLE `cloud`.`storage_pool` ADD COLUMN `parent` BIGINT(20) UNSIGNED NOT NULL DEFAULT 0 COMMENT 'ID of the Datastore cluster (storage pool) if this is a child in that Datastore cluster'; + +-- Added parent column to support datastore clusters in vmware vsphere +DROP VIEW IF EXISTS `cloud`.`storage_pool_view`; +CREATE VIEW `cloud`.`storage_pool_view` AS + SELECT + `storage_pool`.`id` AS `id`, + `storage_pool`.`uuid` AS `uuid`, + `storage_pool`.`name` AS `name`, + `storage_pool`.`status` AS `status`, + `storage_pool`.`path` AS `path`, + `storage_pool`.`pool_type` AS `pool_type`, + `storage_pool`.`host_address` AS `host_address`, + `storage_pool`.`created` AS `created`, + `storage_pool`.`removed` AS `removed`, + `storage_pool`.`capacity_bytes` AS `capacity_bytes`, + `storage_pool`.`capacity_iops` AS `capacity_iops`, + `storage_pool`.`scope` AS `scope`, + `storage_pool`.`hypervisor` AS `hypervisor`, + `storage_pool`.`storage_provider_name` AS `storage_provider_name`, + `storage_pool`.`parent` AS `parent`, + `cluster`.`id` AS `cluster_id`, + `cluster`.`uuid` AS `cluster_uuid`, + `cluster`.`name` AS `cluster_name`, + `cluster`.`cluster_type` AS `cluster_type`, + `data_center`.`id` AS `data_center_id`, + `data_center`.`uuid` AS `data_center_uuid`, + `data_center`.`name` AS `data_center_name`, + `data_center`.`networktype` AS `data_center_type`, + `host_pod_ref`.`id` AS `pod_id`, + `host_pod_ref`.`uuid` AS `pod_uuid`, + `host_pod_ref`.`name` AS `pod_name`, + `storage_pool_tags`.`tag` AS `tag`, + `op_host_capacity`.`used_capacity` AS `disk_used_capacity`, + `op_host_capacity`.`reserved_capacity` AS `disk_reserved_capacity`, + `async_job`.`id` AS `job_id`, + `async_job`.`uuid` AS `job_uuid`, + `async_job`.`job_status` AS `job_status`, + `async_job`.`account_id` AS `job_account_id` + FROM + ((((((`storage_pool` + LEFT JOIN `cluster` ON ((`storage_pool`.`cluster_id` = `cluster`.`id`))) + LEFT JOIN `data_center` ON ((`storage_pool`.`data_center_id` = `data_center`.`id`))) + LEFT JOIN `host_pod_ref` ON ((`storage_pool`.`pod_id` = `host_pod_ref`.`id`))) + LEFT JOIN `storage_pool_tags` ON (((`storage_pool_tags`.`pool_id` = `storage_pool`.`id`)))) + LEFT JOIN `op_host_capacity` ON (((`storage_pool`.`id` = `op_host_capacity`.`host_id`) + AND (`op_host_capacity`.`capacity_type` IN (3 , 9))))) + LEFT JOIN `async_job` ON (((`async_job`.`instance_id` = `storage_pool`.`id`) + AND (`async_job`.`instance_type` = 'StoragePool') + AND (`async_job`.`job_status` = 0)))); + +-- Add passthrough instruction for appliance deployments +ALTER TABLE `cloud`.`vm_template` ADD COLUMN `deploy_as_is` tinyint(1) NOT NULL DEFAULT '0' COMMENT 'True if the template should be deployed with disks and networks as defined'; + +ALTER TABLE `cloud`.`vm_template_details` MODIFY COLUMN `value` varchar(8192) CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL; + +-- Changes to template_view for both deploying multidisk OVA/vApp as is +DROP VIEW IF EXISTS `cloud`.`template_view`; +CREATE VIEW `cloud`.`template_view` AS + SELECT + `vm_template`.`id` AS `id`, + `vm_template`.`uuid` AS `uuid`, + `vm_template`.`unique_name` AS `unique_name`, + `vm_template`.`name` AS `name`, + `vm_template`.`public` AS `public`, + `vm_template`.`featured` AS `featured`, + `vm_template`.`type` AS `type`, + `vm_template`.`hvm` AS `hvm`, + `vm_template`.`bits` AS `bits`, + `vm_template`.`url` AS `url`, + `vm_template`.`format` AS `format`, + `vm_template`.`created` AS `created`, + `vm_template`.`checksum` AS `checksum`, + `vm_template`.`display_text` AS `display_text`, + `vm_template`.`enable_password` AS `enable_password`, + `vm_template`.`dynamically_scalable` AS `dynamically_scalable`, + `vm_template`.`state` AS `template_state`, + `vm_template`.`guest_os_id` AS `guest_os_id`, + `guest_os`.`uuid` AS `guest_os_uuid`, + `guest_os`.`display_name` AS `guest_os_name`, + `vm_template`.`bootable` AS `bootable`, + `vm_template`.`prepopulate` AS `prepopulate`, + `vm_template`.`cross_zones` AS `cross_zones`, + `vm_template`.`hypervisor_type` AS `hypervisor_type`, + `vm_template`.`extractable` AS `extractable`, + `vm_template`.`template_tag` AS `template_tag`, + `vm_template`.`sort_key` AS `sort_key`, + `vm_template`.`removed` AS `removed`, + `vm_template`.`enable_sshkey` AS `enable_sshkey`, + `parent_template`.`id` AS `parent_template_id`, + `parent_template`.`uuid` AS `parent_template_uuid`, + `source_template`.`id` AS `source_template_id`, + `source_template`.`uuid` AS `source_template_uuid`, + `account`.`id` AS `account_id`, + `account`.`uuid` AS `account_uuid`, + `account`.`account_name` AS `account_name`, + `account`.`type` AS `account_type`, + `domain`.`id` AS `domain_id`, + `domain`.`uuid` AS `domain_uuid`, + `domain`.`name` AS `domain_name`, + `domain`.`path` AS `domain_path`, + `projects`.`id` AS `project_id`, + `projects`.`uuid` AS `project_uuid`, + `projects`.`name` AS `project_name`, + `data_center`.`id` AS `data_center_id`, + `data_center`.`uuid` AS `data_center_uuid`, + `data_center`.`name` AS `data_center_name`, + `launch_permission`.`account_id` AS `lp_account_id`, + `template_store_ref`.`store_id` AS `store_id`, + `image_store`.`scope` AS `store_scope`, + `template_store_ref`.`state` AS `state`, + `template_store_ref`.`download_state` AS `download_state`, + `template_store_ref`.`download_pct` AS `download_pct`, + `template_store_ref`.`error_str` AS `error_str`, + `template_store_ref`.`size` AS `size`, + `template_store_ref`.physical_size AS `physical_size`, + `template_store_ref`.`destroyed` AS `destroyed`, + `template_store_ref`.`created` AS `created_on_store`, + `vm_template_details`.`name` AS `detail_name`, + `vm_template_details`.`value` AS `detail_value`, + `resource_tags`.`id` AS `tag_id`, + `resource_tags`.`uuid` AS `tag_uuid`, + `resource_tags`.`key` AS `tag_key`, + `resource_tags`.`value` AS `tag_value`, + `resource_tags`.`domain_id` AS `tag_domain_id`, + `domain`.`uuid` AS `tag_domain_uuid`, + `domain`.`name` AS `tag_domain_name`, + `resource_tags`.`account_id` AS `tag_account_id`, + `account`.`account_name` AS `tag_account_name`, + `resource_tags`.`resource_id` AS `tag_resource_id`, + `resource_tags`.`resource_uuid` AS `tag_resource_uuid`, + `resource_tags`.`resource_type` AS `tag_resource_type`, + `resource_tags`.`customer` AS `tag_customer`, + CONCAT(`vm_template`.`id`, + '_', + IFNULL(`data_center`.`id`, 0)) AS `temp_zone_pair`, + `vm_template`.`direct_download` AS `direct_download`, + `vm_template`.`deploy_as_is` AS `deploy_as_is` + FROM + (((((((((((((`vm_template` + JOIN `guest_os` ON ((`guest_os`.`id` = `vm_template`.`guest_os_id`))) + JOIN `account` ON ((`account`.`id` = `vm_template`.`account_id`))) + JOIN `domain` ON ((`domain`.`id` = `account`.`domain_id`))) + LEFT JOIN `projects` ON ((`projects`.`project_account_id` = `account`.`id`))) + LEFT JOIN `vm_template_details` ON ((`vm_template_details`.`template_id` = `vm_template`.`id`))) + LEFT JOIN `vm_template` `source_template` ON ((`source_template`.`id` = `vm_template`.`source_template_id`))) + LEFT JOIN `template_store_ref` ON (((`template_store_ref`.`template_id` = `vm_template`.`id`) + AND (`template_store_ref`.`store_role` = 'Image') + AND (`template_store_ref`.`destroyed` = 0)))) + LEFT JOIN `vm_template` `parent_template` ON ((`parent_template`.`id` = `vm_template`.`parent_template_id`))) + LEFT JOIN `image_store` ON ((ISNULL(`image_store`.`removed`) + AND (`template_store_ref`.`store_id` IS NOT NULL) + AND (`image_store`.`id` = `template_store_ref`.`store_id`)))) + LEFT JOIN `template_zone_ref` ON (((`template_zone_ref`.`template_id` = `vm_template`.`id`) + AND ISNULL(`template_store_ref`.`store_id`) + AND ISNULL(`template_zone_ref`.`removed`)))) + LEFT JOIN `data_center` ON (((`image_store`.`data_center_id` = `data_center`.`id`) + OR (`template_zone_ref`.`zone_id` = `data_center`.`id`)))) + LEFT JOIN `launch_permission` ON ((`launch_permission`.`template_id` = `vm_template`.`id`))) + LEFT JOIN `resource_tags` ON (((`resource_tags`.`resource_id` = `vm_template`.`id`) + AND ((`resource_tags`.`resource_type` = 'Template') + OR (`resource_tags`.`resource_type` = 'ISO'))))); + +-- Add the default offering for deploy-as-is templates. Not visible to the user +ALTER TABLE `cloud`.`disk_offering` MODIFY COLUMN `unique_name` varchar(255); +INSERT IGNORE INTO `cloud`.`disk_offering` (name, uuid, display_text, customized, unique_name, disk_size, system_use, type, display_offering) +VALUES ('Custom Deploy-as-is Instance', UUID(), 'Custom Deploy-as-is Instance', 1, 'ApacheCloudStack.org-Custom Deploy-as-is Instance', 0, 0, 'Service', 0); +INSERT IGNORE INTO `cloud`.`service_offering` (`id`, `nw_rate`, `mc_rate`) VALUES +((SELECT `id` FROM `cloud`.`disk_offering` WHERE `unique_name` = 'ApacheCloudStack.org-Custom Deploy-as-is Instance'), null, null); + +ALTER VIEW `cloud`.`service_offering_view` AS + SELECT + `service_offering`.`id` AS `id`, + `disk_offering`.`uuid` AS `uuid`, + `disk_offering`.`name` AS `name`, + `disk_offering`.`display_text` AS `display_text`, + `disk_offering`.`provisioning_type` AS `provisioning_type`, + `disk_offering`.`created` AS `created`, + `disk_offering`.`tags` AS `tags`, + `disk_offering`.`removed` AS `removed`, + `disk_offering`.`use_local_storage` AS `use_local_storage`, + `disk_offering`.`system_use` AS `system_use`, + `disk_offering`.`customized_iops` AS `customized_iops`, + `disk_offering`.`min_iops` AS `min_iops`, + `disk_offering`.`max_iops` AS `max_iops`, + `disk_offering`.`hv_ss_reserve` AS `hv_ss_reserve`, + `disk_offering`.`bytes_read_rate` AS `bytes_read_rate`, + `disk_offering`.`bytes_read_rate_max` AS `bytes_read_rate_max`, + `disk_offering`.`bytes_read_rate_max_length` AS `bytes_read_rate_max_length`, + `disk_offering`.`bytes_write_rate` AS `bytes_write_rate`, + `disk_offering`.`bytes_write_rate_max` AS `bytes_write_rate_max`, + `disk_offering`.`bytes_write_rate_max_length` AS `bytes_write_rate_max_length`, + `disk_offering`.`iops_read_rate` AS `iops_read_rate`, + `disk_offering`.`iops_read_rate_max` AS `iops_read_rate_max`, + `disk_offering`.`iops_read_rate_max_length` AS `iops_read_rate_max_length`, + `disk_offering`.`iops_write_rate` AS `iops_write_rate`, + `disk_offering`.`iops_write_rate_max` AS `iops_write_rate_max`, + `disk_offering`.`iops_write_rate_max_length` AS `iops_write_rate_max_length`, + `disk_offering`.`cache_mode` AS `cache_mode`, + `service_offering`.`cpu` AS `cpu`, + `service_offering`.`speed` AS `speed`, + `service_offering`.`ram_size` AS `ram_size`, + `service_offering`.`nw_rate` AS `nw_rate`, + `service_offering`.`mc_rate` AS `mc_rate`, + `service_offering`.`ha_enabled` AS `ha_enabled`, + `service_offering`.`limit_cpu_use` AS `limit_cpu_use`, + `service_offering`.`host_tag` AS `host_tag`, + `service_offering`.`default_use` AS `default_use`, + `service_offering`.`vm_type` AS `vm_type`, + `service_offering`.`sort_key` AS `sort_key`, + `service_offering`.`is_volatile` AS `is_volatile`, + `service_offering`.`deployment_planner` AS `deployment_planner`, + GROUP_CONCAT(DISTINCT(domain.id)) AS domain_id, + GROUP_CONCAT(DISTINCT(domain.uuid)) AS domain_uuid, + GROUP_CONCAT(DISTINCT(domain.name)) AS domain_name, + GROUP_CONCAT(DISTINCT(domain.path)) AS domain_path, + GROUP_CONCAT(DISTINCT(zone.id)) AS zone_id, + GROUP_CONCAT(DISTINCT(zone.uuid)) AS zone_uuid, + GROUP_CONCAT(DISTINCT(zone.name)) AS zone_name + FROM + `cloud`.`service_offering` + INNER JOIN + `cloud`.`disk_offering_view` AS `disk_offering` ON service_offering.id = disk_offering.id + LEFT JOIN + `cloud`.`service_offering_details` AS `domain_details` ON `domain_details`.`service_offering_id` = `disk_offering`.`id` AND `domain_details`.`name`='domainid' + LEFT JOIN + `cloud`.`domain` AS `domain` ON FIND_IN_SET(`domain`.`id`, `domain_details`.`value`) + LEFT JOIN + `cloud`.`service_offering_details` AS `zone_details` ON `zone_details`.`service_offering_id` = `disk_offering`.`id` AND `zone_details`.`name`='zoneid' + LEFT JOIN + `cloud`.`data_center` AS `zone` ON FIND_IN_SET(`zone`.`id`, `zone_details`.`value`) + WHERE + `disk_offering`.`state`='Active' + GROUP BY + `service_offering`.`id`; \ No newline at end of file diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java index edf824403e17..15bf3e0117d0 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java @@ -412,7 +412,7 @@ public void handleTemplateSync(DataStore store) { VirtualMachineTemplate.Event event = VirtualMachineTemplate.Event.OperationSucceeded; // For multi-disk OVA, check and create data disk templates if (tmplt.getFormat().equals(ImageFormat.OVA)) { - if (!createOvaDataDiskTemplates(_templateFactory.getTemplate(tmlpt.getId(), store))) { + if (!createOvaDataDiskTemplates(_templateFactory.getTemplate(tmlpt.getId(), store), tmplt.isDeployAsIs())) { event = VirtualMachineTemplate.Event.OperationFailed; } } @@ -710,7 +710,7 @@ protected Void createTemplateCallback(AsyncCallbackDispatcher getTemplateDatadisksOnImageStore(TemplateInfo templateInfo) { + ImageStoreEntity tmpltStore = (ImageStoreEntity)templateInfo.getDataStore(); + return tmpltStore.getDataDiskTemplates(templateInfo); + } + + @Override + public boolean createOvaDataDiskTemplates(TemplateInfo parentTemplate, boolean deployAsIs) { try { // Get Datadisk template (if any) for OVA List dataDiskTemplates = new ArrayList(); @@ -754,23 +760,27 @@ public boolean createOvaDataDiskTemplates(TemplateInfo parentTemplate) { details = new HashMap<>(); } } + for (DatadiskTO diskTemplate : dataDiskTemplates) { - if (!diskTemplate.isBootable()) { - createChildDataDiskTemplate(diskTemplate, templateVO, parentTemplate, imageStore, diskCount++); - if (!diskTemplate.isIso() && Strings.isNullOrEmpty(details.get(VmDetailConstants.DATA_DISK_CONTROLLER))){ - details.put(VmDetailConstants.DATA_DISK_CONTROLLER, getOvaDiskControllerDetails(diskTemplate, false)); - details.put(VmDetailConstants.DATA_DISK_CONTROLLER + diskTemplate.getDiskId(), getOvaDiskControllerDetails(diskTemplate, false)); - } - } else { - finalizeParentTemplate(diskTemplate, templateVO, parentTemplate, imageStore, diskCount++); - if (Strings.isNullOrEmpty(VmDetailConstants.ROOT_DISK_CONTROLLER)) { - final String rootDiskController = getOvaDiskControllerDetails(diskTemplate, true); - if (!Strings.isNullOrEmpty(rootDiskController)) { - details.put(VmDetailConstants.ROOT_DISK_CONTROLLER, rootDiskController); + if (!deployAsIs) { + if (!diskTemplate.isBootable()) { + createChildDataDiskTemplate(diskTemplate, templateVO, parentTemplate, imageStore, diskCount++); + if (!diskTemplate.isIso() && Strings.isNullOrEmpty(details.get(VmDetailConstants.DATA_DISK_CONTROLLER))){ + details.put(VmDetailConstants.DATA_DISK_CONTROLLER, getOvaDiskControllerDetails(diskTemplate, false)); + details.put(VmDetailConstants.DATA_DISK_CONTROLLER + diskTemplate.getDiskId(), getOvaDiskControllerDetails(diskTemplate, false)); + } + } else { + finalizeParentTemplate(diskTemplate, templateVO, parentTemplate, imageStore, diskCount++); + if (Strings.isNullOrEmpty(VmDetailConstants.ROOT_DISK_CONTROLLER)) { + final String rootDiskController = getOvaDiskControllerDetails(diskTemplate, true); + if (!Strings.isNullOrEmpty(rootDiskController)) { + details.put(VmDetailConstants.ROOT_DISK_CONTROLLER, rootDiskController); + } } } } } + templateVO.setDetails(details); _templateDao.saveDetails(templateVO); return true; @@ -789,7 +799,7 @@ private boolean createChildDataDiskTemplate(DatadiskTO dataDiskTemplate, VMTempl String templateName = dataDiskTemplate.isIso() ? dataDiskTemplate.getPath().substring(dataDiskTemplate.getPath().lastIndexOf(File.separator) + 1) : template.getName() + suffix + diskCount; VMTemplateVO templateVO = new VMTemplateVO(templateId, templateName, format, false, false, false, ttype, template.getUrl(), template.requiresHvm(), template.getBits(), template.getAccountId(), null, templateName, false, guestOsId, false, template.getHypervisorType(), null, - null, false, false, false); + null, false, false, false, false); if (dataDiskTemplate.isIso()){ templateVO.setUniqueName(templateName); } diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java index 25f27a23c1ed..cd7d76b31379 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java @@ -363,6 +363,14 @@ public boolean isDirectDownload() { return this.imageVO.isDirectDownload(); } + @Override + public boolean isDeployAsIs() { + if (this.imageVO == null) { + return false; + } + return this.imageVO.isDeployAsIs(); + } + public void setInstallPath(String installPath) { this.installPath = installPath; } diff --git a/engine/storage/integration-test/src/test/java/org/apache/cloudstack/storage/test/VolumeServiceTest.java b/engine/storage/integration-test/src/test/java/org/apache/cloudstack/storage/test/VolumeServiceTest.java index 9a680ed7fd38..ffa5c5692dd7 100644 --- a/engine/storage/integration-test/src/test/java/org/apache/cloudstack/storage/test/VolumeServiceTest.java +++ b/engine/storage/integration-test/src/test/java/org/apache/cloudstack/storage/test/VolumeServiceTest.java @@ -231,23 +231,7 @@ private TemplateInfo createTemplate() { DataStore store = createImageStore(); VMTemplateVO image = createImageData(); TemplateInfo template = imageDataFactory.getTemplate(image.getId(), store); - // AsyncCallFuture future = - // imageService.createTemplateAsync(template, store); - // future.get(); template = imageDataFactory.getTemplate(image.getId(), store); - /* - * imageProviderMgr.configure("image Provider", new HashMap()); VMTemplateVO image = createImageData(); - * ImageDataStoreProvider defaultProvider = - * imageProviderMgr.getProvider("DefaultProvider"); - * ImageDataStoreLifeCycle lifeCycle = - * defaultProvider.getLifeCycle(); ImageDataStore store = - * lifeCycle.registerDataStore("defaultHttpStore", new - * HashMap()); - * imageService.registerTemplate(image.getId(), - * store.getImageDataStoreId()); TemplateEntity te = - * imageService.getTemplateEntity(image.getId()); return te; - */ return template; } catch (Exception e) { Assert.fail("failed", e); diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java index 727d10af1300..68efe16fd7a2 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java @@ -26,6 +26,7 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.exception.StorageUnavailableException; import org.apache.log4j.Logger; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; @@ -215,6 +216,22 @@ protected boolean filter(ExcludeList avoid, StoragePool pool, DiskProfile dskCh, Volume volume = volumeDao.findById(dskCh.getVolumeId()); List requestVolumes = new ArrayList<>(); requestVolumes.add(volume); + if (dskCh.getHypervisorType() == HypervisorType.VMware) { + // Skip the parent datastore cluster, consider only child storage pools in it + if (pool.getPoolType() == Storage.StoragePoolType.DatastoreCluster && storageMgr.isStoragePoolDatastoreClusterParent(pool)) { + return false; + } + + try { + boolean isStoragePoolStoragepolicyComplaince = storageMgr.isStoragePoolComplaintWithStoragePolicy(requestVolumes, pool); + if (!isStoragePoolStoragepolicyComplaince) { + return false; + } + } catch (StorageUnavailableException e) { + s_logger.warn(String.format("Could not verify storage policy complaince against storage pool %s due to exception %s", pool.getUuid(), e.getMessage())); + return false; + } + } return storageMgr.storagePoolHasEnoughIops(requestVolumes, pool) && storageMgr.storagePoolHasEnoughSpace(requestVolumes, pool, plan.getClusterId()); } diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java index dec9b76dbc84..0bed019dca1e 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java @@ -24,15 +24,23 @@ import java.util.Date; import java.util.List; import java.util.Map; +import java.util.stream.Collectors; import javax.inject.Inject; +import com.cloud.agent.api.storage.OVFConfigurationTO; import com.cloud.agent.api.storage.OVFPropertyTO; +import com.cloud.agent.api.storage.OVFVirtualHardwareItemTO; +import com.cloud.agent.api.storage.OVFVirtualHardwareItemTO.HardwareResourceType; +import com.cloud.agent.api.storage.OVFVirtualHardwareSectionTO; +import com.cloud.storage.ImageStore; import com.cloud.storage.Upload; -import com.cloud.storage.dao.TemplateOVFPropertiesDao; -import com.cloud.storage.TemplateOVFPropertyVO; -import com.cloud.utils.crypt.DBEncryptionUtil; +import com.cloud.storage.VMTemplateDetailVO; +import com.google.gson.Gson; +import com.google.gson.GsonBuilder; +import org.apache.cloudstack.api.net.NetworkPrerequisiteTO; import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.lang.StringUtils; import org.apache.log4j.Logger; import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; @@ -77,7 +85,8 @@ import com.cloud.utils.exception.CloudRuntimeException; public abstract class BaseImageStoreDriverImpl implements ImageStoreDriver { - private static final Logger s_logger = Logger.getLogger(BaseImageStoreDriverImpl.class); + private static final Logger LOGGER = Logger.getLogger(BaseImageStoreDriverImpl.class); + @Inject protected VMTemplateDao _templateDao; @Inject @@ -97,18 +106,24 @@ public abstract class BaseImageStoreDriverImpl implements ImageStoreDriver { @Inject AlertManager _alertMgr; @Inject - VMTemplateDetailsDao _templateDetailsDao; + VMTemplateDetailsDao templateDetailsDao; @Inject DefaultEndPointSelector _defaultEpSelector; @Inject AccountDao _accountDao; @Inject ResourceLimitService _resourceLimitMgr; - @Inject - TemplateOVFPropertiesDao templateOvfPropertiesDao; protected String _proxy = null; + private static Gson gson; + + static { + GsonBuilder builder = new GsonBuilder(); + builder.disableHtmlEscaping(); + gson = builder.create(); + } + protected Proxy getHttpProxy() { if (_proxy == null) { return null; @@ -156,14 +171,14 @@ public void createAsync(DataStore dataStore, DataObject data, AsyncCompletionCal caller.setContext(context); if (data.getType() == DataObjectType.TEMPLATE) { caller.setCallback(caller.getTarget().createTemplateAsyncCallback(null, null)); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Downloading template to data store " + dataStore.getId()); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Downloading template to data store " + dataStore.getId()); } _downloadMonitor.downloadTemplateToStorage(data, caller); } else if (data.getType() == DataObjectType.VOLUME) { caller.setCallback(caller.getTarget().createVolumeAsyncCallback(null, null)); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Downloading volume to data store " + dataStore.getId()); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Downloading volume to data store " + dataStore.getId()); } _downloadMonitor.downloadVolumeToStorage(data, caller); } @@ -173,43 +188,107 @@ public void createAsync(DataStore dataStore, DataObject data, AsyncCompletionCal * Persist OVF properties as template details for template with id = templateId */ private void persistOVFProperties(List ovfProperties, long templateId) { - List listToPersist = new ArrayList<>(); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("saving properties for template %d as details", templateId)); + } for (OVFPropertyTO property : ovfProperties) { - if (!templateOvfPropertiesDao.existsOption(templateId, property.getKey())) { - TemplateOVFPropertyVO option = new TemplateOVFPropertyVO(templateId, property.getKey(), property.getType(), - property.getValue(), property.getQualifiers(), property.isUserConfigurable(), - property.getLabel(), property.getDescription(), property.isPassword()); - if (property.isPassword()) { - String encryptedPassword = DBEncryptionUtil.encrypt(property.getValue()); - option.setValue(encryptedPassword); - } - listToPersist.add(option); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("saving property %s for template %d as detail", property.getKey(), templateId)); + } + persistOvfPropertyAsSetOfTemplateDetails(templateId, property); + } + } + + private void persistOvfPropertyAsSetOfTemplateDetails(long templateId, OVFPropertyTO property) { + String key = property.getKey(); + String propKey = ImageStore.ACS_PROPERTY_PREFIX + key; + try { + String propValue = gson.toJson(property); + savePropertyAttribute(templateId, propKey, propValue); + } catch (RuntimeException re) { + LOGGER.error("gson marshalling of property object fails: " + propKey,re); + } + } + + private void persistNetworkRequirements(List networkRequirements, long templateId) { + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("saving network requirements for template %d as details", templateId)); + } + for (NetworkPrerequisiteTO network : networkRequirements) { + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("saving property %s for template %d as detail", network.getName(), templateId)); + } + persistRequiredNetworkAsASingleTemplateDetail(templateId, network); + } + } + + private void persistDiskDefinitions(List disks, long templateId) { + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("saving disk definitionsn for template %d as details", templateId)); + } + for (DatadiskTO disk : disks) { + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("saving property %s for template %d as detail", disk.getDiskId(), templateId)); } + persistDiskDefinitionAsASingleTemplateDetail(templateId, disk); + } + } + + private void persistRequiredNetworkAsASingleTemplateDetail(long templateId, NetworkPrerequisiteTO network) { + String key = network.getName(); + String propKey = ImageStore.REQUIRED_NETWORK_PREFIX + key; + try { + String propValue = gson.toJson(network); + savePropertyAttribute(templateId, propKey, propValue); + } catch (RuntimeException re) { + LOGGER.warn("gson marshalling of network object fails: " + propKey,re); + } + } + + private void persistDiskDefinitionAsASingleTemplateDetail(long templateId, DatadiskTO disk) { + String key = disk.getDiskId(); + String propKey = ImageStore.DISK_DEFINITION_PREFIX + key; + try { + String propValue = gson.toJson(disk); + savePropertyAttribute(templateId, propKey, propValue); + } catch (RuntimeException re) { + LOGGER.warn("gson marshalling of disk definition object fails: " + propKey,re); + } + } + + private void savePropertyAttribute(long templateId, String key, String value) { + if ( templateDetailsDao.findDetail(templateId,key) != null) { + LOGGER.debug(String.format("detail '%s' existed for template %d, deleting.", key, templateId)); + templateDetailsDao.removeDetail(templateId,key); } - if (CollectionUtils.isNotEmpty(listToPersist)) { - s_logger.debug("Persisting " + listToPersist.size() + " OVF properties for template " + templateId); - templateOvfPropertiesDao.saveOptions(listToPersist); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("template detail for template %d to save is '%s': '%s'", templateId, key, value)); } + VMTemplateDetailVO detailVO = new VMTemplateDetailVO(templateId, key, value, false); + LOGGER.debug("Persisting template details " + detailVO.getName() + " from OVF properties for template " + templateId); + templateDetailsDao.persist(detailVO); } protected Void createTemplateAsyncCallback(AsyncCallbackDispatcher callback, CreateContext context) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Performing image store createTemplate async callback"); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Performing image store createTemplate async callback"); } DownloadAnswer answer = callback.getResult(); DataObject obj = context.data; DataStore store = obj.getDataStore(); List ovfProperties = answer.getOvfProperties(); + List networkRequirements = answer.getNetworkRequirements(); + List disks = answer.getDisks(); + OVFVirtualHardwareSectionTO ovfHardwareSection = answer.getOvfHardwareSection(); TemplateDataStoreVO tmpltStoreVO = _templateStoreDao.findByStoreTemplate(store.getId(), obj.getId()); if (tmpltStoreVO != null) { if (tmpltStoreVO.getDownloadState() == VMTemplateStorageResourceAssoc.Status.DOWNLOADED) { - if (CollectionUtils.isNotEmpty(ovfProperties)) { - persistOVFProperties(ovfProperties, obj.getId()); - } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Template is already in DOWNLOADED state, ignore further incoming DownloadAnswer"); + persistExtraDetails(obj, ovfProperties, networkRequirements, disks, ovfHardwareSection); + processOVFHardwareSection(ovfHardwareSection, obj.getId()); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Template is already in DOWNLOADED state, ignore further incoming DownloadAnswer"); } return null; } @@ -240,16 +319,15 @@ protected Void createTemplateAsyncCallback(AsyncCallbackDispatcher ovfProperties, List networkRequirements, List disks, OVFVirtualHardwareSectionTO ovfHardwareSection) { + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("saving %d ovf properties for template '%s' as details", ovfProperties != null ? ovfProperties.size() : 0, obj.getUuid())); + } + if (CollectionUtils.isNotEmpty(ovfProperties)) { + persistOVFProperties(ovfProperties, obj.getId()); + } + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("saving %d required network requirements for template '%s' as details", networkRequirements != null ? networkRequirements.size() : 0, obj.getUuid())); + } + if (CollectionUtils.isNotEmpty(networkRequirements)) { + persistNetworkRequirements(networkRequirements, obj.getId()); + } + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("saving %d disks definitions for template '%s' as details", disks != null ? disks.size() : 0, obj.getUuid())); + } + if (CollectionUtils.isNotEmpty(disks)) { + persistDiskDefinitions(disks, obj.getId()); + } + persistOVFHardwareSectionAsTemplateDetails(ovfHardwareSection, obj.getId()); + } + + /** + * Process the OVF hardware section containing available deployment options (configuration) by matching them to service offerings + */ + private void processOVFHardwareSection(OVFVirtualHardwareSectionTO hardwareSectionTO, long templateId) { + if (hardwareSectionTO != null) { + LOGGER.debug("Processing the OVF hardware section for template with ID " + templateId); + List configurations = hardwareSectionTO.getConfigurations(); + processOVFConfigurations(configurations, templateId); + } + } + + private long calculateUnitsMultiplier(String allocationUnits, HardwareResourceType resourceType) { + long unitsMultiplier = 1; + if (StringUtils.isNotBlank(allocationUnits)) { + String[] split = allocationUnits.split("\\*"); + if (split.length > 1) { + String unit = split[0].trim(); + String prefix = split[1].trim(); + if (resourceType == HardwareResourceType.Processor && unit.equalsIgnoreCase("hertz")) { + if (prefix.equals("10^9")) { + // GHz - multiply by 1000 to get MHz + unitsMultiplier = 1000; + } + } else if (resourceType == HardwareResourceType.Memory && unit.equalsIgnoreCase("byte")) { + if (prefix.equals("2^20")) { + //MB - multiply by 1024 * 1024 to get bytes + unitsMultiplier = 1024 * 1024; + } + } + } + } + return unitsMultiplier; + } + + private void processOVFConfigurationItem(OVFVirtualHardwareItemTO item) { + String elementName = item.getElementName(); + Long limit = item.getLimit(); + Long reservation = item.getReservation(); + String allocationUnits = item.getAllocationUnits(); + Long virtualQuantity = item.getVirtualQuantity(); + long unitsMultiplier = calculateUnitsMultiplier(allocationUnits, item.getResourceType()); + long limitValue = limit * unitsMultiplier; + long reservationValue = reservation * unitsMultiplier; + LOGGER.info(String.format("Configuration name %s: - quantity: %s - limit: %s - reservation: %s", + elementName, virtualQuantity, limitValue, reservationValue)); + } + + private void processOVFConfigurations(List configurations, long templateId) { + if (CollectionUtils.isNotEmpty(configurations)) { + LOGGER.debug("Processing the OVF configurations for template with ID " + templateId); + for (OVFConfigurationTO configuration : configurations) { + LOGGER.debug(String.format("Processing the OVF configuration: %s (%s)", configuration.getId(), configuration.getLabel())); + List hardwareItems = configuration.getHardwareItems(); + if (CollectionUtils.isNotEmpty(hardwareItems)) { + LOGGER.debug("Found " + hardwareItems.size() + " hardware items for the configuration " + configuration.getId() + + ", filtering CPU and memory items"); + List filteredItems = hardwareItems.stream() + .filter(x -> x.getResourceType() == HardwareResourceType.Processor + || x.getResourceType() == HardwareResourceType.Memory) + .collect(Collectors.toList()); + for (OVFVirtualHardwareItemTO item : filteredItems) { + processOVFConfigurationItem(item); + } + } + } + } + } + + /** + * Persist template details for template with ID=templateId, with name=key and value=json(object) + */ + private void persistTemplateDetailGsonEncoded(long templateId, String key, Object object) { + try { + String propValue = gson.toJson(object); + savePropertyAttribute(templateId, key, propValue); + } catch (RuntimeException re) { + LOGGER.error("gson marshalling of property object fails: " + key, re); + } + } + + private void persistOVFHardwareSectionAsTemplateDetails(OVFVirtualHardwareSectionTO ovfHardwareSection, long templateId) { + if (ovfHardwareSection != null) { + if (CollectionUtils.isNotEmpty(ovfHardwareSection.getConfigurations())) { + for (OVFConfigurationTO configuration : ovfHardwareSection.getConfigurations()) { + String key = configuration.getId(); + String propKey = ImageStore.OVF_HARDWARE_CONFIGURATION_PREFIX + key; + persistTemplateDetailGsonEncoded(templateId, propKey, configuration); + } + } + if (CollectionUtils.isNotEmpty(ovfHardwareSection.getCommonHardwareItems())) { + for (OVFVirtualHardwareItemTO item : ovfHardwareSection.getCommonHardwareItems()) { + String key = item.getResourceType().getName().trim().replaceAll("\\s","") + "-" + item.getInstanceId(); + String propKey = ImageStore.OVF_HARDWARE_ITEM_PREFIX + key; + persistTemplateDetailGsonEncoded(templateId, propKey, item); + } + } + } + } + protected Void createVolumeAsyncCallback(AsyncCallbackDispatcher callback, CreateContext context) { DownloadAnswer answer = callback.getResult(); @@ -266,8 +465,8 @@ protected Void createTemplateAsyncCallback(AsyncCallbackDispatcher getDataDiskTemplates(DataObject obj) { List dataDiskDetails = new ArrayList(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Get the data disks present in the OVA template"); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Get the data disks present in the OVA template"); } DataStore store = obj.getDataStore(); GetDatadisksCommand cmd = new GetDatadisksCommand(obj.getTO()); @@ -360,7 +559,7 @@ public List getDataDiskTemplates(DataObject obj) { Answer answer = null; if (ep == null) { String errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; - s_logger.error(errMsg); + LOGGER.error(errMsg); answer = new Answer(cmd, false, errMsg); } else { answer = ep.sendMessage(cmd); @@ -379,14 +578,14 @@ public List getDataDiskTemplates(DataObject obj) { public Void createDataDiskTemplateAsync(TemplateInfo dataDiskTemplate, String path, String diskId, boolean bootable, long fileSize, AsyncCompletionCallback callback) { Answer answer = null; String errMsg = null; - if (s_logger.isDebugEnabled()) { - s_logger.debug("Create Datadisk template: " + dataDiskTemplate.getId()); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Create Datadisk template: " + dataDiskTemplate.getId()); } CreateDatadiskTemplateCommand cmd = new CreateDatadiskTemplateCommand(dataDiskTemplate.getTO(), path, diskId, fileSize, bootable); EndPoint ep = _defaultEpSelector.select(dataDiskTemplate.getDataStore()); if (ep == null) { errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; - s_logger.error(errMsg); + LOGGER.error(errMsg); answer = new Answer(cmd, false, errMsg); } else { answer = ep.sendMessage(cmd); diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/TemplateEntityImpl.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/TemplateEntityImpl.java deleted file mode 100644 index b027c42a86e3..000000000000 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/TemplateEntityImpl.java +++ /dev/null @@ -1,314 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.cloudstack.storage.image; - -import java.lang.reflect.Method; -import java.util.Date; -import java.util.List; -import java.util.Map; - -import org.apache.cloudstack.engine.cloud.entity.api.TemplateEntity; -import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; -import org.apache.cloudstack.storage.image.datastore.ImageStoreInfo; - -import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.storage.Storage.ImageFormat; -import com.cloud.storage.Storage.TemplateType; -import com.cloud.template.VirtualMachineTemplate; - -public class TemplateEntityImpl implements TemplateEntity { - protected TemplateInfo templateInfo; - - @Override - public State getState() { - return templateInfo.getState(); - } - - public TemplateEntityImpl(TemplateInfo templateInfo) { - this.templateInfo = templateInfo; - } - - public ImageStoreInfo getImageDataStore() { - return (ImageStoreInfo)templateInfo.getDataStore(); - } - - public long getImageDataStoreId() { - return getImageDataStore().getImageStoreId(); - } - - public TemplateInfo getTemplateInfo() { - return templateInfo; - } - - @Override - public String getUuid() { - return templateInfo.getUuid(); - } - - @Override - public long getId() { - return templateInfo.getId(); - } - - public String getExternalId() { - // TODO Auto-generated method stub - return null; - } - - @Override - public String getCurrentState() { - // TODO Auto-generated method stub - return null; - } - - @Override - public String getDesiredState() { - // TODO Auto-generated method stub - return null; - } - - @Override - public Date getCreatedTime() { - // TODO Auto-generated method stub - return null; - } - - @Override - public Date getLastUpdatedTime() { - // TODO Auto-generated method stub - return null; - } - - @Override - public String getOwner() { - // TODO Auto-generated method stub - return null; - } - - @Override - public Map getDetails() { - // TODO Auto-generated method stub - return null; - } - - @Override - public boolean isDynamicallyScalable() { - return false; - } - - @Override - public void addDetail(String name, String value) { - // TODO Auto-generated method stub - - } - - @Override - public void delDetail(String name, String value) { - // TODO Auto-generated method stub - - } - - @Override - public void updateDetail(String name, String value) { - // TODO Auto-generated method stub - - } - - @Override - public List getApplicableActions() { - // TODO Auto-generated method stub - return null; - } - - @Override - public boolean isFeatured() { - // TODO Auto-generated method stub - return false; - } - - @Override - public boolean isPublicTemplate() { - // TODO Auto-generated method stub - return false; - } - - @Override - public boolean isExtractable() { - // TODO Auto-generated method stub - return false; - } - - @Override - public String getName() { - // TODO Auto-generated method stub - return null; - } - - @Override - public ImageFormat getFormat() { - // TODO Auto-generated method stub - return null; - } - - @Override - public boolean isRequiresHvm() { - // TODO Auto-generated method stub - return false; - } - - @Override - public String getDisplayText() { - // TODO Auto-generated method stub - return null; - } - - @Override - public boolean isEnablePassword() { - // TODO Auto-generated method stub - return false; - } - - @Override - public boolean isEnableSshKey() { - // TODO Auto-generated method stub - return false; - } - - @Override - public boolean isCrossZones() { - // TODO Auto-generated method stub - return false; - } - - @Override - public Date getCreated() { - // TODO Auto-generated method stub - return null; - } - - @Override - public long getGuestOSId() { - // TODO Auto-generated method stub - return 0; - } - - @Override - public boolean isBootable() { - // TODO Auto-generated method stub - return false; - } - - @Override - public TemplateType getTemplateType() { - // TODO Auto-generated method stub - return null; - } - - @Override - public HypervisorType getHypervisorType() { - // TODO Auto-generated method stub - return null; - } - - @Override - public int getBits() { - // TODO Auto-generated method stub - return 0; - } - - @Override - public String getUniqueName() { - // TODO Auto-generated method stub - return null; - } - - @Override - public String getUrl() { - // TODO Auto-generated method stub - return null; - } - - @Override - public String getChecksum() { - // TODO Auto-generated method stub - return null; - } - - @Override - public Long getSourceTemplateId() { - // TODO Auto-generated method stub - return null; - } - - @Override - public String getTemplateTag() { - // TODO Auto-generated method stub - return null; - } - - @Override - public long getAccountId() { - // TODO Auto-generated method stub - return 0; - } - - @Override - public long getDomainId() { - // TODO Auto-generated method stub - return 0; - } - - @Override - public long getPhysicalSize() { - // TODO Auto-generated method stub - return 0; - } - - @Override - public long getVirtualSize() { - // TODO Auto-generated method stub - return 0; - } - - @Override - public Class getEntityType() { - return VirtualMachineTemplate.class; - } - - @Override - public long getUpdatedCount() { - // TODO Auto-generated method stub - return 0; - } - - @Override - public void incrUpdatedCount() { - // TODO Auto-generated method stub - } - - @Override - public Date getUpdated() { - return null; - } - - @Override - public Long getParentTemplateId() { - return null; - } -} diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java index 31b57080aec6..ac2c93567c33 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java @@ -161,6 +161,13 @@ public DataStore attachCluster(DataStore store) { pool.setScope(ScopeType.CLUSTER); pool.setStatus(StoragePoolStatus.Up); this.dataStoreDao.update(pool.getId(), pool); + if(pool.getPoolType() == StoragePoolType.DatastoreCluster && pool.getParent() == 0) { + List childDatastores = dataStoreDao.listChildStoragePoolsInDatastoreCluster(pool.getId()); + for (StoragePoolVO child : childDatastores) { + child.setScope(ScopeType.CLUSTER); + this.dataStoreDao.update(child.getId(), child); + } + } return dataStoreMgr.getDataStore(store.getId(), DataStoreRole.Primary); } @@ -178,6 +185,13 @@ public DataStore attachZone(DataStore store, HypervisorType hypervisor) { pool.setHypervisor(hypervisor); pool.setStatus(StoragePoolStatus.Up); this.dataStoreDao.update(pool.getId(), pool); + if(pool.getPoolType() == StoragePoolType.DatastoreCluster && pool.getParent() == 0) { + List childDatastores = dataStoreDao.listChildStoragePoolsInDatastoreCluster(pool.getId()); + for (StoragePoolVO child : childDatastores) { + child.setScope(ScopeType.ZONE); + this.dataStoreDao.update(child.getId(), child); + } + } return dataStoreMgr.getDataStore(store.getId(), DataStoreRole.Primary); } diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java index 64533d54d2f8..5c55a36c5b5b 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java @@ -18,28 +18,34 @@ */ package org.apache.cloudstack.storage.datastore.provider; -import java.util.List; - -import javax.inject.Inject; - -import org.apache.log4j.Logger; - -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; -import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; - import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; import com.cloud.agent.api.ModifyStoragePoolAnswer; import com.cloud.agent.api.ModifyStoragePoolCommand; +import com.cloud.agent.api.StoragePoolInfo; import com.cloud.alert.AlertManager; import com.cloud.exception.StorageConflictException; import com.cloud.storage.DataStoreRole; +import com.cloud.storage.Storage; import com.cloud.storage.StoragePool; import com.cloud.storage.StoragePoolHostVO; +import com.cloud.storage.StoragePoolStatus; import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.storage.dao.StoragePoolTagsDao; import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.commons.lang.StringUtils; +import org.apache.log4j.Logger; + +import javax.inject.Inject; +import java.util.HashMap; +import java.util.List; +import java.util.Map; public class DefaultHostListener implements HypervisorHostListener { private static final Logger s_logger = Logger.getLogger(DefaultHostListener.class); @@ -53,6 +59,10 @@ public class DefaultHostListener implements HypervisorHostListener { StoragePoolHostDao storagePoolHostDao; @Inject PrimaryDataStoreDao primaryStoreDao; + @Inject + StoragePoolDetailsDao storagePoolDetailsDao; + @Inject + StoragePoolTagsDao storagePoolTagsDao; @Override public boolean hostAdded(long hostId) { @@ -90,7 +100,52 @@ public boolean hostConnect(long hostId, long poolId) throws StorageConflictExcep } } } + StoragePoolVO poolVO = this.primaryStoreDao.findById(poolId); + updateStoragePoolHostVOAndDetails(poolVO, hostId, mspAnswer); + + for (ModifyStoragePoolAnswer childDataStoreAnswer : ((ModifyStoragePoolAnswer) answer).getDatastoreClusterChildren()) { + StoragePoolInfo childStoragePoolInfo = childDataStoreAnswer.getPoolInfo(); + StoragePoolVO dataStoreVO = primaryStoreDao.findPoolByUUID(childStoragePoolInfo.getUuid()); + if (dataStoreVO != null) { + continue; + } + dataStoreVO = new StoragePoolVO(); + dataStoreVO.setStorageProviderName(poolVO.getStorageProviderName()); + dataStoreVO.setHostAddress(childStoragePoolInfo.getHost()); + dataStoreVO.setPoolType(Storage.StoragePoolType.PreSetup); + dataStoreVO.setPath(childStoragePoolInfo.getHostPath()); + dataStoreVO.setPort(poolVO.getPort()); + dataStoreVO.setName(childStoragePoolInfo.getName()); + dataStoreVO.setUuid(childStoragePoolInfo.getUuid()); + dataStoreVO.setDataCenterId(poolVO.getDataCenterId()); + dataStoreVO.setPodId(poolVO.getPodId()); + dataStoreVO.setClusterId(poolVO.getClusterId()); + dataStoreVO.setStatus(StoragePoolStatus.Up); + dataStoreVO.setUserInfo(poolVO.getUserInfo()); + dataStoreVO.setManaged(poolVO.isManaged()); + dataStoreVO.setCapacityIops(poolVO.getCapacityIops()); + dataStoreVO.setCapacityBytes(childDataStoreAnswer.getPoolInfo().getCapacityBytes()); + dataStoreVO.setUsedBytes(childDataStoreAnswer.getPoolInfo().getCapacityBytes() - childDataStoreAnswer.getPoolInfo().getAvailableBytes()); + dataStoreVO.setHypervisor(poolVO.getHypervisor()); + dataStoreVO.setScope(poolVO.getScope()); + dataStoreVO.setParent(poolVO.getId()); + + Map details = new HashMap<>(); + if(StringUtils.isNotEmpty(childDataStoreAnswer.getPoolType())) { + details.put("pool_type", childDataStoreAnswer.getPoolType()); + } + + List storageTags = storagePoolTagsDao.getStoragePoolTags(poolId); + primaryStoreDao.persist(dataStoreVO, details, storageTags); + + updateStoragePoolHostVOAndDetails(dataStoreVO, hostId, childDataStoreAnswer); + } + s_logger.info("Connection established between storage pool " + pool + " and host " + hostId); + return true; + } + + private void updateStoragePoolHostVOAndDetails(StoragePool pool, long hostId, ModifyStoragePoolAnswer mspAnswer) { StoragePoolHostVO poolHost = storagePoolHostDao.findByPoolHost(pool.getId(), hostId); if (poolHost == null) { poolHost = new StoragePoolHostVO(pool.getId(), hostId, mspAnswer.getPoolInfo().getLocalPath().replaceAll("//", "/")); @@ -99,13 +154,17 @@ public boolean hostConnect(long hostId, long poolId) throws StorageConflictExcep poolHost.setLocalPath(mspAnswer.getPoolInfo().getLocalPath().replaceAll("//", "/")); } - StoragePoolVO poolVO = this.primaryStoreDao.findById(poolId); + StoragePoolVO poolVO = this.primaryStoreDao.findById(pool.getId()); poolVO.setUsedBytes(mspAnswer.getPoolInfo().getCapacityBytes() - mspAnswer.getPoolInfo().getAvailableBytes()); poolVO.setCapacityBytes(mspAnswer.getPoolInfo().getCapacityBytes()); + if(StringUtils.isNotEmpty(mspAnswer.getPoolType())) { + StoragePoolDetailVO poolType = storagePoolDetailsDao.findDetail(pool.getId(), "pool_type"); + if (poolType == null) { + StoragePoolDetailVO storagePoolDetailVO = new StoragePoolDetailVO(pool.getId(), "pool_type", mspAnswer.getPoolType(), false); + storagePoolDetailsDao.persist(storagePoolDetailVO); + } + } primaryStoreDao.update(pool.getId(), poolVO); - - s_logger.info("Connection established between storage pool " + pool + " and host " + hostId); - return true; } @Override diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java index 690a1124402d..505cbf3b3621 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java @@ -21,6 +21,8 @@ import javax.inject.Inject; import com.cloud.storage.MigrationOptions; +import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.dao.VMTemplateDao; import org.apache.log4j.Logger; import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; @@ -72,6 +74,9 @@ public class VolumeObject implements VolumeInfo { VMInstanceDao vmInstanceDao; @Inject DiskOfferingDao diskOfferingDao; + @Inject + VMTemplateDao templateDao; + private Object payload; private MigrationOptions migrationOptions; private boolean directDownload; @@ -705,6 +710,12 @@ public Long getRefCount() { return null; } + @Override + public boolean isDeployAsIs() { + VMTemplateVO template = templateDao.findById(getTemplateId()); + return template != null && template.isDeployAsIs(); + } + @Override public void processEventOnly(ObjectInDataStoreStateMachine.Event event, Answer answer) { try { diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java index 77413ad6c2b6..9bcb131e12f4 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java @@ -121,6 +121,8 @@ import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.VirtualMachine; +import static com.cloud.storage.resource.StorageProcessor.REQUEST_TEMPLATE_RELOAD; + @Component public class VolumeServiceImpl implements VolumeService { private static final Logger s_logger = Logger.getLogger(VolumeServiceImpl.class); @@ -572,7 +574,7 @@ protected void createBaseImageAsync(VolumeInfo volume, PrimaryDataStore dataStor s_logger.info("Unable to acquire lock on VMTemplateStoragePool " + templatePoolRefId); } templatePoolRef = _tmpltPoolDao.findByPoolTemplate(dataStore.getId(), template.getId()); - if (templatePoolRef != null && templatePoolRef.getState() == ObjectInDataStoreStateMachine.State.Ready) { + if (templatePoolRef != null && templatePoolRef.getState() == ObjectInDataStoreStateMachine.State.Ready && !template.isDeployAsIs()) { s_logger.info( "Unable to acquire lock on VMTemplateStoragePool " + templatePoolRefId + ", But Template " + template.getUniqueName() + " is already copied to primary storage, skip copying"); createVolumeFromBaseImageAsync(volume, templateOnPrimaryStoreObj, dataStore, future); @@ -585,7 +587,7 @@ protected void createBaseImageAsync(VolumeInfo volume, PrimaryDataStore dataStor s_logger.info("lock is acquired for VMTemplateStoragePool " + templatePoolRefId); } try { - if (templatePoolRef.getState() == ObjectInDataStoreStateMachine.State.Ready) { + if (templatePoolRef.getState() == ObjectInDataStoreStateMachine.State.Ready && !template.isDeployAsIs()) { s_logger.info("Template " + template.getUniqueName() + " is already copied to primary storage, skip copying"); createVolumeFromBaseImageAsync(volume, templateOnPrimaryStoreObj, dataStore, future); return; @@ -705,6 +707,7 @@ private class CreateVolumeFromBaseImageContext extends AsyncRpcContext { private final AsyncCallFuture future; private final DataObject templateOnStore; private final SnapshotInfo snapshot; + private boolean deployAsIs; public CreateVolumeFromBaseImageContext(AsyncCompletionCallback callback, DataObject vo, DataStore primaryStore, DataObject templateOnStore, AsyncCallFuture future, SnapshotInfo snapshot) { @@ -721,7 +724,8 @@ public AsyncCallFuture getFuture() { } @DB - protected void createVolumeFromBaseImageAsync(VolumeInfo volume, DataObject templateOnPrimaryStore, PrimaryDataStore pd, AsyncCallFuture future) { + protected void createVolumeFromBaseImageAsync(VolumeInfo volume, DataObject templateOnPrimaryStore, PrimaryDataStore pd, + AsyncCallFuture future) { DataObject volumeOnPrimaryStorage = pd.create(volume); volumeOnPrimaryStorage.processEvent(Event.CreateOnlyRequested); @@ -730,10 +734,23 @@ protected void createVolumeFromBaseImageAsync(VolumeInfo volume, DataObject temp caller.setCallback(caller.getTarget().createVolumeFromBaseImageCallBack(null, null)); caller.setContext(context); - motionSrv.copyAsync(context.templateOnStore, volumeOnPrimaryStorage, caller); + if (!volume.isDeployAsIs()) { + motionSrv.copyAsync(context.templateOnStore, volumeOnPrimaryStorage, caller); + } else { + Answer answer = new Answer(null); + CopyCommandResult result = new CopyCommandResult(null, null); + result.setSuccess(true); + caller.complete(result); + } + return; } + @DB + protected Void createTemplateAsIsCallback(AsyncCallbackDispatcher callback, AsyncCallFuture context) { + return null; + } + @DB protected Void createVolumeFromBaseImageCallBack(AsyncCallbackDispatcher callback, CreateVolumeFromBaseImageContext context) { DataObject vo = context.vo; @@ -750,7 +767,7 @@ protected Void createVolumeFromBaseImageCallBack(AsyncCallbackDispatcher createVolumeFromTemplateAsync(VolumeInfo volume, long dataStoreId, TemplateInfo template) { PrimaryDataStore pd = dataStoreMgr.getPrimaryDataStore(dataStoreId); TemplateInfo templateOnPrimaryStore = pd.getTemplate(template.getId()); - AsyncCallFuture future = new AsyncCallFuture(); + AsyncCallFuture future = new AsyncCallFuture<>(); if (templateOnPrimaryStore == null) { createBaseImageAsync(volume, pd, template, future); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java index 1df72de517c3..c761af139aca 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java @@ -45,6 +45,7 @@ import org.apache.cloudstack.agent.directdownload.NfsDirectDownloadCommand; import org.apache.cloudstack.storage.command.AttachAnswer; import org.apache.cloudstack.storage.command.AttachCommand; +import org.apache.cloudstack.storage.command.CheckDataStoreStoragePolicyComplainceCommand; import org.apache.cloudstack.storage.command.CopyCmdAnswer; import org.apache.cloudstack.storage.command.CopyCommand; import org.apache.cloudstack.storage.command.CreateObjectAnswer; @@ -1832,4 +1833,10 @@ protected boolean isEnoughSpaceForDownloadTemplateOnTemporaryLocation(Long templ } return availableBytes >= templateSize; } + + @Override + public Answer CheckDataStoreStoragePolicyComplaince(CheckDataStoreStoragePolicyComplainceCommand cmd) { + s_logger.info("'CheckDataStoreStoragePolicyComplainceCommand' not currently applicable for KVMStorageProcessor"); + return new Answer(cmd,false,"Not currently applicable for KVMStorageProcessor"); + } } diff --git a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3StorageProcessor.java b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3StorageProcessor.java index 7915586fca3f..dd58bb573d94 100644 --- a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3StorageProcessor.java +++ b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3StorageProcessor.java @@ -24,6 +24,7 @@ import org.apache.cloudstack.agent.directdownload.DirectDownloadCommand; import org.apache.cloudstack.storage.command.AttachAnswer; import org.apache.cloudstack.storage.command.AttachCommand; +import org.apache.cloudstack.storage.command.CheckDataStoreStoragePolicyComplainceCommand; import org.apache.cloudstack.storage.command.CopyCmdAnswer; import org.apache.cloudstack.storage.command.CopyCommand; import org.apache.cloudstack.storage.command.CreateObjectAnswer; @@ -826,6 +827,12 @@ public Answer handleDownloadTemplateToPrimaryStorage(DirectDownloadCommand cmd) return null; } + @Override + public Answer CheckDataStoreStoragePolicyComplaince(CheckDataStoreStoragePolicyComplainceCommand cmd) { + LOGGER.info("'CheckDataStoreStoragePolicyComplainceCommand' not applicable used for Ovm3StorageProcessor"); + return new Answer(cmd,false,"Not applicable used for Ovm3StorageProcessor"); + } + @Override public Answer copyVolumeFromPrimaryToPrimary(CopyCommand cmd) { return null; diff --git a/plugins/hypervisors/vmware/pom.xml b/plugins/hypervisors/vmware/pom.xml index f5489488f355..d17bb68bdfff 100644 --- a/plugins/hypervisors/vmware/pom.xml +++ b/plugins/hypervisors/vmware/pom.xml @@ -55,6 +55,24 @@ ${cs.vmware.api.version} compile + + com.vmware.vapi + vapi-runtime + ${vapi.version} + compile + + + com.vmware.vapi + vapi-authentication + ${vapi.version} + compile + + + com.vmware.vsphereautomation.client + vsphereautomation-client-sdk + ${vsphereautomationsdk.version} + compile + com.sun.org.apache.xml.internal resolver @@ -72,5 +90,11 @@ wsdl4j wsdl4j + + com.cloud.com.vmware + vmware-pbm + ${cs.vmware.api.version} + compile + diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VMwareGuru.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VMwareGuru.java index a2a086b0b6c7..07bdb520cb0c 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VMwareGuru.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VMwareGuru.java @@ -197,6 +197,9 @@ protected VMwareGuru() { public static final ConfigKey VmwareEnableNestedVirtualizationPerVM = new ConfigKey(Boolean.class, "vmware.nested.virtualization.perVM", "Advanced", "false", "When set to true this will enable nested virtualization per vm", true, ConfigKey.Scope.Global, null); + public static final ConfigKey VmwareImplementAsIsAndReconsiliate = new ConfigKey(Boolean.class, "vmware.dont.orchestrate.but.reconsiliate", "Advanced", "false", + "When set to true OVAs will be deployed as is to then discover disk/net/etc", true, ConfigKey.Scope.Global, null); + @Override public HypervisorType getHypervisorType() { return HypervisorType.VMware; } @@ -204,7 +207,7 @@ protected VMwareGuru() { @Override public VirtualMachineTO implement(VirtualMachineProfile vm) { vmwareVmImplementer.setGlobalNestedVirtualisationEnabled(VmwareEnableNestedVirtualization.value()); vmwareVmImplementer.setGlobalNestedVPerVMEnabled(VmwareEnableNestedVirtualizationPerVM.value()); - return vmwareVmImplementer.implement(vm, toVirtualMachineTO(vm), getClusterId(vm.getId())); + return vmwareVmImplementer.implement(vm, toVirtualMachineTO(vm), getClusterId(vm.getId()), VmwareImplementAsIsAndReconsiliate.value()); } long getClusterId(long vmId) { @@ -261,8 +264,13 @@ long getClusterId(long vmId) { return new Pair(Boolean.FALSE, new Long(hostId)); } - if (destData.getObjectType() == DataObjectType.VOLUME && destStoreTO.getRole() == DataStoreRole.Primary && srcData.getObjectType() == DataObjectType.TEMPLATE - && srcStoreTO.getRole() == DataStoreRole.Primary) { + if (destData.getObjectType() == DataObjectType.VOLUME && destStoreTO.getRole() == DataStoreRole.Primary + && srcData.getObjectType() == DataObjectType.TEMPLATE && srcStoreTO.getRole() == DataStoreRole.Primary) { + needDelegation = false; + } else + // FR37 TODO remove or as possible improvement: check if the template is meant to be deployed as is and delegate if it isn't + if (destData.getObjectType() == DataObjectType.TEMPLATE && destStoreTO.getRole() == DataStoreRole.Primary + && srcData.getObjectType() == DataObjectType.TEMPLATE && srcStoreTO.getRole() == DataStoreRole.Image) { needDelegation = false; } else { needDelegation = true; @@ -373,7 +381,7 @@ private static String resolveNameInGuid(String guid) { } @Override public ConfigKey[] getConfigKeys() { - return new ConfigKey[] {VmwareReserveCpu, VmwareReserveMemory, VmwareEnableNestedVirtualization, VmwareEnableNestedVirtualizationPerVM}; + return new ConfigKey[] {VmwareReserveCpu, VmwareReserveMemory, VmwareEnableNestedVirtualization, VmwareEnableNestedVirtualizationPerVM, VmwareImplementAsIsAndReconsiliate}; } @Override public List finalizeExpungeVolumes(VirtualMachine vm) { @@ -596,7 +604,7 @@ private Long getTemplateSize(VirtualMachineMO template, String vmInternalName, M private VMTemplateVO createVMTemplateRecord(String vmInternalName, long guestOsId, long accountId) { Long nextTemplateId = vmTemplateDao.getNextInSequence(Long.class, "id"); VMTemplateVO templateVO = new VMTemplateVO(nextTemplateId, "Imported-from-" + vmInternalName, Storage.ImageFormat.OVA, false, false, false, Storage.TemplateType.USER, null, - false, 64, accountId, null, "Template imported from VM " + vmInternalName, false, guestOsId, false, HypervisorType.VMware, null, null, false, false, false); + false, 64, accountId, null, "Template imported from VM " + vmInternalName, false, guestOsId, false, HypervisorType.VMware, null, null, false, false, false, false); return vmTemplateDao.persist(templateVO); } diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VmwareVmImplementer.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VmwareVmImplementer.java index c9f8b0c337a2..c3bff663814d 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VmwareVmImplementer.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VmwareVmImplementer.java @@ -33,15 +33,15 @@ import com.cloud.network.Networks; import com.cloud.network.dao.NetworkDao; import com.cloud.network.dao.NetworkVO; +import com.cloud.storage.DataStoreRole; import com.cloud.storage.GuestOSHypervisorVO; import com.cloud.storage.GuestOSVO; -import com.cloud.storage.TemplateOVFPropertyVO; import com.cloud.storage.VMTemplateStoragePoolVO; import com.cloud.storage.VMTemplateStorageResourceAssoc; import com.cloud.storage.Volume; import com.cloud.storage.dao.GuestOSDao; import com.cloud.storage.dao.GuestOSHypervisorDao; -import com.cloud.storage.dao.TemplateOVFPropertiesDao; +import com.cloud.storage.dao.VMTemplateDetailsDao; import com.cloud.storage.dao.VMTemplatePoolDao; import com.cloud.template.VirtualMachineTemplate; import com.cloud.utils.Pair; @@ -56,6 +56,7 @@ import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.to.TemplateObjectTO; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang.BooleanUtils; import org.apache.log4j.Logger; @@ -70,7 +71,7 @@ import java.util.stream.Collectors; class VmwareVmImplementer { - private static final Logger LOG = Logger.getLogger(VmwareVmImplementer.class); + private static final Logger LOGGER = Logger.getLogger(VmwareVmImplementer.class); @Inject DomainRouterDao domainRouterDao; @@ -89,10 +90,10 @@ class VmwareVmImplementer { @Inject PrimaryDataStoreDao storagePoolDao; @Inject - TemplateOVFPropertiesDao templateOVFPropertiesDao; - @Inject VMTemplatePoolDao templateStoragePoolDao; @Inject + VMTemplateDetailsDao templateDetailsDao; + @Inject VmwareManager vmwareMgr; private Boolean globalNestedVirtualisationEnabled; @@ -114,12 +115,19 @@ void setGlobalNestedVPerVMEnabled(Boolean globalNestedVPerVMEnabled) { this.globalNestedVPerVMEnabled = globalNestedVPerVMEnabled; } - VirtualMachineTO implement(VirtualMachineProfile vm, VirtualMachineTO to, long clusterId) { - to.setBootloader(VirtualMachineTemplate.BootloaderType.HVM); - + VirtualMachineTO implement(VirtualMachineProfile vm, VirtualMachineTO to, long clusterId, boolean deployAsIs) { + to.setBootloader(VirtualMachineTemplate.BootloaderType.HVM); + deployAsIs |= vm.getTemplate().isDeployAsIs(); + HostVO host = hostDao.findById(vm.getVirtualMachine().getHostId()); + // FR37 if VmwareImplementAsIsAndReconsiliate add secondary storage or some other encoding of the OVA file to the start command, + // FR37 so the url for the original OVA can be used for deployment + if (deployAsIs) { + // FR37 we need to make sure the primary storage for the template is known and whether this is a new deployment + storeTemplateLocationInTO(vm, to, host.getId()); + } Map details = to.getDetails(); if (details == null) - details = new HashMap(); + details = new HashMap<>(); VirtualMachine.Type vmType = vm.getType(); boolean userVm = !(vmType.equals(VirtualMachine.Type.DomainRouter) || vmType.equals(VirtualMachine.Type.ConsoleProxy) || vmType.equals(VirtualMachine.Type.SecondaryStorageVm)); @@ -133,7 +141,7 @@ VirtualMachineTO implement(VirtualMachineProfile vm, VirtualMachineTO to, long c try { VirtualEthernetCardType.valueOf(nicDeviceType); } catch (Exception e) { - LOG.warn("Invalid NIC device type " + nicDeviceType + " is specified in VM details, switch to default E1000"); + LOGGER.warn("Invalid NIC device type " + nicDeviceType + " is specified in VM details, switch to default E1000"); details.put(VmDetailConstants.NIC_ADAPTER, VirtualEthernetCardType.E1000.toString()); } } @@ -145,7 +153,7 @@ VirtualMachineTO implement(VirtualMachineProfile vm, VirtualMachineTO to, long c try { VirtualEthernetCardType.valueOf(nicDeviceType); } catch (Exception e) { - LOG.warn("Invalid NIC device type " + nicDeviceType + " is specified in VM details, switch to default E1000"); + LOGGER.warn("Invalid NIC device type " + nicDeviceType + " is specified in VM details, switch to default E1000"); details.put(VmDetailConstants.NIC_ADAPTER, VirtualEthernetCardType.E1000.toString()); } } @@ -172,7 +180,7 @@ VirtualMachineTO implement(VirtualMachineProfile vm, VirtualMachineTO to, long c GuestOSVO guestOS = guestOsDao.findByIdIncludingRemoved(vm.getVirtualMachine().getGuestOSId()); to.setOs(guestOS.getDisplayName()); to.setHostName(vm.getHostName()); - HostVO host = hostDao.findById(vm.getVirtualMachine().getHostId()); + GuestOSHypervisorVO guestOsMapping = null; if (host != null) { guestOsMapping = guestOsHypervisorDao.findByOsIdAndHypervisor(guestOS.getId(), Hypervisor.HypervisorType.VMware.toString(), host.getHypervisorVersion()); @@ -184,18 +192,66 @@ VirtualMachineTO implement(VirtualMachineProfile vm, VirtualMachineTO to, long c } List ovfProperties = getOvfPropertyList(vm, details); - handleOvfProperties(vm, to, details, ovfProperties); + // FR37 TODO add required nics here or let the start executor copy them from the base template? + setDetails(to, details); return to; } + private void storeTemplateLocationInTO(VirtualMachineProfile vm, VirtualMachineTO to, long hostId) { + VMTemplateStoragePoolVO templateStoragePoolVO = templateStoragePoolDao.findByHostTemplate(hostId, vm.getTemplate().getId()); + if (templateStoragePoolVO != null) { + long storePoolId = templateStoragePoolVO.getDataStoreId(); + + StoragePoolVO storagePoolVO = storagePoolDao.findById(storePoolId); + String relativeLocation = storagePoolVO.getUuid(); + + String templateName = templateStoragePoolVO.getInstallPath(); + createDiskTOForTemplateOVA(vm, storagePoolVO); + + to.setTemplateName(templateName); + to.setTemplateLocation(relativeLocation); + to.setTemplatePrimaryStoreUuid(storagePoolVO.getUuid()); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("deploying '%s' OVA as is from %s.", templateName, relativeLocation)); + } + } + } + + private void createDiskTOForTemplateOVA(VirtualMachineProfile vm, StoragePoolVO storagePoolVO) { + // FR37 store template in diskto with the pool as location + DiskTO disk = new DiskTO(); + TemplateObjectTO data = new TemplateObjectTO(vm.getTemplate()); + DataStoreTO store = new DataStoreTO() { + @Override public DataStoreRole getRole() { + return DataStoreRole.ImageCache; + } + + @Override public String getUuid() { + return storagePoolVO.getUuid(); + } + + @Override public String getUrl() { + return null; + } + + @Override public String getPathSeparator() { + return "/"; + } + }; + data.setDataStore(store); + disk.setData(data); + + vm.addDisk(disk); + } + private void setDetails(VirtualMachineTO to, Map details) { - if (LOG.isTraceEnabled()) { + if (LOGGER.isTraceEnabled()) { for (String key: details.keySet()) { - LOG.trace(String.format("Detail for VM %s: %s => %s",to.getName(), key, details.get(key))); + LOGGER.trace(String.format("Detail for VM %s: %s => %s",to.getName(), key, details.get(key))); } } to.setDetails(details); @@ -290,12 +346,8 @@ private void handleOvfProperties(VirtualMachineProfile vm, VirtualMachineTO to, if (CollectionUtils.isNotEmpty(ovfProperties)) { removeOvfPropertiesFromDetails(ovfProperties, details); String templateInstallPath = null; - List rootDiskList = vm.getDisks().stream().filter(x -> x.getType() == Volume.Type.ROOT).collect(Collectors.toList()); - if (rootDiskList.size() != 1) { - throw new CloudRuntimeException("Did not find only one root disk for VM " + vm.getHostName()); - } + DiskTO rootDiskTO = getRootDiskTOFromVM(vm); - DiskTO rootDiskTO = rootDiskList.get(0); DataStoreTO dataStore = rootDiskTO.getData().getDataStore(); StoragePoolVO storagePoolVO = storagePoolDao.findByUuid(dataStore.getUuid()); long dataCenterId = storagePoolVO.getDataCenterId(); @@ -317,19 +369,39 @@ private void handleOvfProperties(VirtualMachineProfile vm, VirtualMachineTO to, } } + private DiskTO getRootDiskTOFromVM(VirtualMachineProfile vm) { + DiskTO rootDiskTO; + List rootDiskList; + rootDiskList = vm.getDisks().stream().filter(x -> x.getType() == Volume.Type.ROOT).collect(Collectors.toList()); + if (rootDiskList.size() != 1) { + if (vm.getTemplate().isDeployAsIs()) { // FR37 dirty hack to avoid ISOs, the start command should have added a root disk to + rootDiskList = vm.getDisks().stream().filter(x -> x.getType() == null).collect(Collectors.toList()); + if (rootDiskList.size() < 1) { + throw new CloudRuntimeException("Did not find a template to serve as root disk for VM " + vm.getHostName()); + } + } else { + throw new CloudRuntimeException("Did not find only one root disk for VM " + vm.getHostName()); + } + } + rootDiskTO = rootDiskList.get(0); + return rootDiskTO; + } + + // TODO FR37 phase out ovf properties in favor of template details; propertyTO remains private List getOvfPropertyList(VirtualMachineProfile vm, Map details) { List ovfProperties = new ArrayList(); for (String detailKey : details.keySet()) { - if (detailKey.startsWith(ApiConstants.OVF_PROPERTIES)) { - String ovfPropKey = detailKey.replace(ApiConstants.OVF_PROPERTIES + "-", ""); - TemplateOVFPropertyVO templateOVFPropertyVO = templateOVFPropertiesDao.findByTemplateAndKey(vm.getTemplateId(), ovfPropKey); - if (templateOVFPropertyVO == null) { - LOG.warn(String.format("OVF property %s not found on template, discarding", ovfPropKey)); + if (detailKey.startsWith(ApiConstants.ACS_PROPERTY)) { + OVFPropertyTO propertyTO = templateDetailsDao.findPropertyByTemplateAndKey(vm.getTemplateId(), detailKey); + String vmPropertyKey = detailKey.replace(ApiConstants.ACS_PROPERTY + "-", ""); + if (propertyTO == null) { + LOGGER.warn(String.format("OVF property %s not found on template, discarding", vmPropertyKey)); continue; } - String ovfValue = details.get(detailKey); - boolean isPassword = templateOVFPropertyVO.isPassword(); - OVFPropertyTO propertyTO = new OVFPropertyTO(ovfPropKey, ovfValue, isPassword); + // FR37 the key is without acs prefix (in the TO) + propertyTO.setKey(vmPropertyKey); + // FR37 if the UI send the whole json we should just copy it otherwise take the json from the template and set the value on it + propertyTO.setValue(details.get(detailKey)); ovfProperties.add(propertyTO); } } @@ -389,7 +461,7 @@ Remove OVF properties from details to be sent to hypervisor (avoid duplicate dat private void removeOvfPropertiesFromDetails(List ovfProperties, Map details) { for (OVFPropertyTO propertyTO : ovfProperties) { String key = propertyTO.getKey(); - details.remove(ApiConstants.OVF_PROPERTIES + "-" + key); + details.remove(ApiConstants.PROPERTIES + "-" + key); } } @@ -405,8 +477,8 @@ protected void configureNestedVirtualization(Map details, Virtua Boolean globalNestedVPerVMEnabled = getGlobalNestedVPerVMEnabled(); Boolean shouldEnableNestedVirtualization = shouldEnableNestedVirtualization(globalNestedVirtualisationEnabled, globalNestedVPerVMEnabled, localNestedV); - if(LOG.isDebugEnabled()) { - LOG.debug(String.format( + if(LOGGER.isDebugEnabled()) { + LOGGER.debug(String.format( "Due to '%B'(globalNestedVirtualisationEnabled) and '%B'(globalNestedVPerVMEnabled) I'm adding a flag with value %B to the vm configuration for Nested Virtualisation.", globalNestedVirtualisationEnabled, globalNestedVPerVMEnabled, diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/VmwareDatacenterService.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/VmwareDatacenterService.java index 53792539ee8e..2e3f98d795a5 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/VmwareDatacenterService.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/VmwareDatacenterService.java @@ -17,17 +17,19 @@ package com.cloud.hypervisor.vmware; -import java.util.List; - +import com.cloud.dc.VsphereStoragePolicy; +import com.cloud.exception.DiscoveryException; +import com.cloud.exception.ResourceInUseException; +import com.cloud.utils.component.PluggableService; +import com.cloud.utils.exception.CloudRuntimeException; import org.apache.cloudstack.api.command.admin.zone.AddVmwareDcCmd; +import org.apache.cloudstack.api.command.admin.zone.ImportVsphereStoragePoliciesCmd; import org.apache.cloudstack.api.command.admin.zone.ListVmwareDcsCmd; +import org.apache.cloudstack.api.command.admin.zone.ListVsphereStoragePoliciesCmd; import org.apache.cloudstack.api.command.admin.zone.RemoveVmwareDcCmd; import org.apache.cloudstack.api.command.admin.zone.UpdateVmwareDcCmd; -import com.cloud.exception.DiscoveryException; -import com.cloud.exception.ResourceInUseException; -import com.cloud.utils.component.PluggableService; -import com.cloud.utils.exception.CloudRuntimeException; +import java.util.List; public interface VmwareDatacenterService extends PluggableService { @@ -38,4 +40,9 @@ public interface VmwareDatacenterService extends PluggableService { boolean removeVmwareDatacenter(RemoveVmwareDcCmd cmd) throws IllegalArgumentException, ResourceInUseException; List listVmwareDatacenters(ListVmwareDcsCmd cmd) throws IllegalArgumentException, CloudRuntimeException; + + List importVsphereStoragePolicies(ImportVsphereStoragePoliciesCmd cmd); + + List listVsphereStoragePolicies(ListVsphereStoragePoliciesCmd cmd); + } diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/manager/ContentLibraryService.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/manager/ContentLibraryService.java new file mode 100644 index 000000000000..e262fc91edc7 --- /dev/null +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/manager/ContentLibraryService.java @@ -0,0 +1,32 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.hypervisor.vmware.manager; + +import com.cloud.hypervisor.vmware.mo.DatastoreMO; +import com.cloud.hypervisor.vmware.mo.VirtualMachineMO; +import com.cloud.hypervisor.vmware.mo.VmwareHypervisorHost; +import com.cloud.hypervisor.vmware.util.VmwareContext; + +public interface ContentLibraryService { + boolean createContentLibrary(VmwareContext context, String primaryDatastoreName) throws Exception; + + boolean deleteContentLibrary(VmwareContext context, String primaryDatastoreName) throws Exception; + + boolean importOvf(VmwareContext context, String sourceOvfTemplateUri, String sourceOvfTemplateName, String targetDatastoreName, String targetOvfTemplateName) throws Exception; + + VirtualMachineMO deployOvf(VmwareContext context, String sourceovfTemplateName, String vmNameToDeploy, VmwareHypervisorHost targetHypervisorHost, DatastoreMO primaryDataStoreMO) throws Exception; +} diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/manager/ContentLibraryServiceImpl.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/manager/ContentLibraryServiceImpl.java new file mode 100644 index 000000000000..ef27edbfbe69 --- /dev/null +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/manager/ContentLibraryServiceImpl.java @@ -0,0 +1,72 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.hypervisor.vmware.manager; + +import com.vmware.vapi.std.errors.AlreadyExists; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; + +import com.cloud.hypervisor.vmware.mo.DatastoreMO; +import com.cloud.hypervisor.vmware.mo.VirtualMachineMO; +import com.cloud.hypervisor.vmware.mo.VmwareHypervisorHost; +import com.cloud.hypervisor.vmware.util.VmwareContext; +import com.cloud.hypervisor.vmware.util.ContentLibraryHelper; +import com.cloud.utils.Pair; + +import com.vmware.vim25.ManagedObjectReference; + +@Component +public class ContentLibraryServiceImpl implements ContentLibraryService { + private static final Logger LOGGER = Logger.getLogger(ContentLibraryServiceImpl.class); + + public ContentLibraryServiceImpl() { + } + + public boolean createContentLibrary(VmwareContext context, String primaryDatastoreName) throws Exception { + return ContentLibraryHelper.createContentLibrary(context, primaryDatastoreName, primaryDatastoreName); + } + + public boolean deleteContentLibrary(VmwareContext context, String primaryDatastoreName) throws Exception { + return ContentLibraryHelper.deleteContentLibrary(context, primaryDatastoreName, primaryDatastoreName); + } + + /** + * install a ovf in a contentlibrary if it doesn't already exist + */ + public boolean importOvf(VmwareContext context, String sourceOvfTemplateUri, String sourceOvfTemplateName, String targetDatastoreName, String targetOvfTemplateName) throws Exception { + try { + return ContentLibraryHelper.importOvfFromDatastore(context, sourceOvfTemplateUri, sourceOvfTemplateName, targetDatastoreName, targetOvfTemplateName); + } catch (AlreadyExists e) { + // TODO this is not safe, the already existing could be corrupt or not the intended one + return true; + } + } + + public VirtualMachineMO deployOvf(VmwareContext context, String sourceovfTemplateName, String vmNameToDeploy, VmwareHypervisorHost targetHypervisorHost, DatastoreMO primaryDataStoreMO) throws Exception { + String dsName = primaryDataStoreMO.getName(); + ManagedObjectReference morDatastore = primaryDataStoreMO.getMor(); + ManagedObjectReference morHostResourcePool = targetHypervisorHost.getHyperHostOwnerResourcePool(); + Pair deployResult = ContentLibraryHelper.deployOvf(context, dsName, sourceovfTemplateName, vmNameToDeploy, morHostResourcePool, morDatastore); + if (deployResult.first() == null) { + LOGGER.error("Deployment failed for the VM: " + vmNameToDeploy + ", due to error: " + deployResult.second()); + throw new Exception("Deployment failed for the VM: " + vmNameToDeploy + ", due to error: " + deployResult.second()); + } + + VirtualMachineMO vmMo = new VirtualMachineMO(context, deployResult.first()); + return vmMo; + } +} diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java index c4b939a11493..6c45bd517be5 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java @@ -16,43 +16,6 @@ // under the License. package com.cloud.hypervisor.vmware.manager; -import java.io.File; -import java.io.IOException; -import java.net.URI; -import java.net.URISyntaxException; -import java.net.URL; -import java.rmi.RemoteException; -import java.time.Duration; -import java.time.Instant; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Random; -import java.util.UUID; -import java.util.concurrent.Executors; -import java.util.concurrent.RejectedExecutionException; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; - -import javax.inject.Inject; -import javax.naming.ConfigurationException; - -import org.apache.cloudstack.api.command.admin.zone.AddVmwareDcCmd; -import org.apache.cloudstack.api.command.admin.zone.ListVmwareDcsCmd; -import org.apache.cloudstack.api.command.admin.zone.RemoveVmwareDcCmd; -import org.apache.cloudstack.api.command.admin.zone.UpdateVmwareDcCmd; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; -import org.apache.cloudstack.framework.config.ConfigKey; -import org.apache.cloudstack.framework.config.Configurable; -import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.cloudstack.framework.jobs.impl.AsyncJobManagerImpl; -import org.apache.cloudstack.management.ManagementServerHost; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.utils.identity.ManagementServerNode; -import org.apache.log4j.Logger; - import com.amazonaws.util.CollectionUtils; import com.cloud.agent.AgentManager; import com.cloud.agent.Listener; @@ -70,9 +33,12 @@ import com.cloud.dc.ClusterVO; import com.cloud.dc.ClusterVSMMapVO; import com.cloud.dc.DataCenterVO; +import com.cloud.dc.VsphereStoragePolicy; +import com.cloud.dc.VsphereStoragePolicyVO; import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.ClusterVSMMapDao; import com.cloud.dc.dao.DataCenterDao; +import com.cloud.dc.dao.VsphereStoragePolicyDao; import com.cloud.event.ActionEvent; import com.cloud.event.EventTypes; import com.cloud.exception.DiscoveredWithErrorException; @@ -102,6 +68,7 @@ import com.cloud.hypervisor.vmware.mo.HostFirewallSystemMO; import com.cloud.hypervisor.vmware.mo.HostMO; import com.cloud.hypervisor.vmware.mo.HypervisorHostHelper; +import com.cloud.hypervisor.vmware.mo.PbmProfileManagerMO; import com.cloud.hypervisor.vmware.mo.VirtualEthernetCardType; import com.cloud.hypervisor.vmware.mo.VirtualSwitchType; import com.cloud.hypervisor.vmware.mo.VmwareHostType; @@ -142,8 +109,48 @@ import com.cloud.vm.dao.UserVmCloneSettingDao; import com.cloud.vm.dao.VMInstanceDao; import com.google.common.base.Strings; +import com.vmware.pbm.PbmProfile; import com.vmware.vim25.AboutInfo; import com.vmware.vim25.ManagedObjectReference; +import org.apache.cloudstack.api.command.admin.zone.AddVmwareDcCmd; +import org.apache.cloudstack.api.command.admin.zone.ImportVsphereStoragePoliciesCmd; +import org.apache.cloudstack.api.command.admin.zone.ListVmwareDcsCmd; +import org.apache.cloudstack.api.command.admin.zone.ListVsphereStoragePoliciesCmd; +import org.apache.cloudstack.api.command.admin.zone.RemoveVmwareDcCmd; +import org.apache.cloudstack.api.command.admin.zone.UpdateVmwareDcCmd; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.framework.config.Configurable; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.framework.jobs.impl.AsyncJobManagerImpl; +import org.apache.cloudstack.management.ManagementServerHost; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.utils.identity.ManagementServerNode; +import org.apache.log4j.Logger; + +import javax.inject.Inject; +import javax.naming.ConfigurationException; +import java.io.File; +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.rmi.RemoteException; +import java.time.Duration; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.UUID; +import java.util.concurrent.Executors; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; public class VmwareManagerImpl extends ManagerBase implements VmwareManager, VmwareStorageMount, Listener, VmwareDatacenterService, Configurable { private static final Logger s_logger = Logger.getLogger(VmwareManagerImpl.class); @@ -208,6 +215,8 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw private UserVmCloneSettingDao cloneSettingDao; @Inject private TemplateManager templateManager; + @Inject + private VsphereStoragePolicyDao vsphereStoragePolicyDao; private String _mountParent; private StorageLayer _storage; @@ -1046,6 +1055,8 @@ public List> getCommands() { cmdList.add(UpdateVmwareDcCmd.class); cmdList.add(RemoveVmwareDcCmd.class); cmdList.add(ListVmwareDcsCmd.class); + cmdList.add(ImportVsphereStoragePoliciesCmd.class); + cmdList.add(ListVsphereStoragePoliciesCmd.class); return cmdList; } @@ -1173,6 +1184,7 @@ public VmwareDatacenterVO addVmwareDatacenter(AddVmwareDcCmd cmd) throws Resourc } context = null; } + importVsphereStoragePoliciesInternal(zoneId, vmwareDc.getId()); return vmwareDc; } @@ -1233,6 +1245,7 @@ public VmwareDatacenter doInTransaction(TransactionStatus status) { hostDetailsDao.persist(host.getId(), hostDetails); } } + importVsphereStoragePoliciesInternal(zoneId, vmwareDc.getId()); return vmwareDc; } return null; @@ -1383,6 +1396,79 @@ private void doesZoneExist(Long zoneId) throws InvalidParameterValueException { } } + @Override + public List importVsphereStoragePolicies(ImportVsphereStoragePoliciesCmd cmd) { + Long zoneId = cmd.getZoneId(); + // Validate Id of zone + doesZoneExist(zoneId); + + final VmwareDatacenterZoneMapVO vmwareDcZoneMap = vmwareDatacenterZoneMapDao.findByZoneId(zoneId); + // Check if zone is associated with VMware DC + if (vmwareDcZoneMap == null) { + throw new CloudRuntimeException("Zone " + zoneId + " is not associated with any VMware datacenter."); + } + + final long vmwareDcId = vmwareDcZoneMap.getVmwareDcId(); + return importVsphereStoragePoliciesInternal(zoneId, vmwareDcId); + } + + public List importVsphereStoragePoliciesInternal(Long zoneId, Long vmwareDcId) { + + // Get DC associated with this zone + VmwareDatacenterVO vmwareDatacenter = vmwareDcDao.findById(vmwareDcId); + String vmwareDcName = vmwareDatacenter.getVmwareDatacenterName(); + String vCenterHost = vmwareDatacenter.getVcenterHost(); + String userName = vmwareDatacenter.getUser(); + String password = vmwareDatacenter.getPassword(); + List storageProfiles = null; + try { + s_logger.debug(String.format("Importing vSphere Storage Policies for the vmware DC %d in zone %d", vmwareDcId, zoneId)); + VmwareContext context = VmwareContextFactory.getContext(vCenterHost, userName, password); + PbmProfileManagerMO profileManagerMO = new PbmProfileManagerMO(context); + storageProfiles = profileManagerMO.getStorageProfiles(); + s_logger.debug(String.format("Import vSphere Storage Policies for the vmware DC %d in zone %d is successful", vmwareDcId, zoneId)); + } catch (Exception e) { + String msg = String.format("Unable to list storage profiles from DC %s due to : %s", vmwareDcName, VmwareHelper.getExceptionMessage(e)); + s_logger.error(msg); + throw new CloudRuntimeException(msg); + } + + for (PbmProfile storageProfile : storageProfiles) { + VsphereStoragePolicyVO storagePolicyVO = vsphereStoragePolicyDao.findByPolicyId(zoneId, storageProfile.getProfileId().getUniqueId()); + if (storagePolicyVO == null) { + storagePolicyVO = new VsphereStoragePolicyVO(zoneId, storageProfile.getProfileId().getUniqueId(), storageProfile.getName(), storageProfile.getDescription()); + vsphereStoragePolicyDao.persist(storagePolicyVO); + } else { + storagePolicyVO.setDescription(storageProfile.getDescription()); + storagePolicyVO.setName(storageProfile.getName()); + vsphereStoragePolicyDao.update(storagePolicyVO.getId(), storagePolicyVO); + } + } + + List allStoragePolicies = vsphereStoragePolicyDao.listAll(); + List finalStorageProfiles = storageProfiles; + List needToMarkRemoved = allStoragePolicies.stream() + .filter(existingPolicy -> !finalStorageProfiles.stream() + .anyMatch(storageProfile -> storageProfile.getProfileId().getUniqueId().equals(existingPolicy.getPolicyId()))) + .collect(Collectors.toList()); + + for (VsphereStoragePolicyVO storagePolicy : needToMarkRemoved) { + vsphereStoragePolicyDao.remove(storagePolicy.getId()); + } + + List storagePolicies = vsphereStoragePolicyDao.listAll(); + return storagePolicies; + } + + @Override + public List listVsphereStoragePolicies(ListVsphereStoragePoliciesCmd cmd) { + List storagePolicies = vsphereStoragePolicyDao.findByZoneId(cmd.getZoneId()); + if (storagePolicies != null) { + return new ArrayList<>(storagePolicies); + } + return Collections.emptyList(); + } + @Override public boolean hasNexusVSM(Long clusterId) { ClusterVSMMapVO vsmMapVo = null; diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java index f17d613125c8..4c453cee19ca 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java @@ -21,6 +21,7 @@ import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.OutputStreamWriter; +import java.nio.charset.Charset; import java.rmi.RemoteException; import java.util.ArrayList; import java.util.HashMap; @@ -240,6 +241,7 @@ public String createOvaForVolume(VolumeObjectTO volume, int archiveTimeout) { } @Override + @Deprecated(since = "ages, look at VmwareStorageProcessor for this logic", forRemoval = true) public Answer execute(VmwareHostService hostService, PrimaryStorageDownloadCommand cmd) { String secondaryStorageUrl = cmd.getSecondaryStorageUrl(); assert (secondaryStorageUrl != null); @@ -273,7 +275,7 @@ public Answer execute(VmwareHostService hostService, PrimaryStorageDownloadComma try { VmwareHypervisorHost hyperHost = hostService.getHyperHost(context, cmd); - String templateUuidName = UUID.nameUUIDFromBytes((templateName + "@" + cmd.getPoolUuid() + "-" + hyperHost.getMor().getValue()).getBytes("UTF-8")).toString(); + String templateUuidName = UUID.nameUUIDFromBytes((templateName + "@" + cmd.getPoolUuid() + "-" + hyperHost.getMor().getValue()).getBytes(Charset.defaultCharset())).toString(); // truncate template name to 32 chars to ensure they work well with vSphere API's. templateUuidName = templateUuidName.replace("-", ""); @@ -591,7 +593,7 @@ private void copyTemplateFromSecondaryToPrimary(VmwareHypervisorHost hyperHost, } String vmName = templateUuid; - hyperHost.importVmFromOVF(srcFileName, vmName, datastoreMo, "thin"); + hyperHost.importVmFromOVF(srcFileName, vmName, datastoreMo, "thin", true); VirtualMachineMO vmMo = hyperHost.findVmOnHyperHost(vmName); if (vmMo == null) { @@ -801,7 +803,7 @@ private void postCreatePrivateTemplate(String installFullPath, long templateId, // TODO a bit ugly here BufferedWriter out = null; try { - out = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(installFullPath + "/template.properties"), "UTF-8")); + out = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(installFullPath + "/template.properties"), Charset.defaultCharset())); out.write("filename=" + templateName + ".ova"); out.newLine(); out.write("description="); @@ -841,7 +843,7 @@ private void writeMetaOvaForTemplate(String installFullPath, String ovfFilename, // TODO a bit ugly here BufferedWriter out = null; try { - out = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(installFullPath + "/" + templateName + ".ova.meta"), "UTF-8")); + out = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(installFullPath + "/" + templateName + ".ova.meta"), Charset.defaultCharset())); out.write("ova.filename=" + templateName + ".ova"); out.newLine(); out.write("version=1.0"); @@ -912,7 +914,7 @@ private void restoreVolumeFromSecStorage(VmwareHypervisorHost hyperHost, Datasto VirtualMachineMO clonedVm = null; try { - hyperHost.importVmFromOVF(srcOVFFileName, newVolumeName, primaryDsMo, "thin"); + hyperHost.importVmFromOVF(srcOVFFileName, newVolumeName, primaryDsMo, "thin", true); clonedVm = hyperHost.findVmOnHyperHost(newVolumeName); if (clonedVm == null) { throw new Exception("Unable to create container VM for volume creation"); @@ -987,7 +989,7 @@ private void exportVolumeToSecondaryStorage(VirtualMachineMO vmMo, String volume private Pair copyVolumeToSecStorage(VmwareHostService hostService, VmwareHypervisorHost hyperHost, CopyVolumeCommand cmd, String vmName, long volumeId, String poolId, String volumePath, String secStorageUrl, String workerVmName, Integer nfsVersion) throws Exception { - String volumeFolder = String.valueOf(volumeId) + "/"; + String volumeFolder = volumeId + "/"; VirtualMachineMO workerVm = null; VirtualMachineMO vmMo = null; String exportName = UUID.randomUUID().toString(); @@ -1052,7 +1054,7 @@ private String getVolumePathInDatastore(DatastoreMO dsMo, String volumeFileName, private Pair copyVolumeFromSecStorage(VmwareHypervisorHost hyperHost, long volumeId, DatastoreMO dsMo, String secStorageUrl, String exportName, Integer nfsVersion) throws Exception { - String volumeFolder = String.valueOf(volumeId) + "/"; + String volumeFolder = volumeId + "/"; String newVolume = UUID.randomUUID().toString().replace("-", ""); restoreVolumeFromSecStorage(hyperHost, dsMo, newVolume, secStorageUrl, "volumes/" + volumeFolder, exportName, nfsVersion); @@ -1066,7 +1068,7 @@ private String createOVAFromMetafile(String metafileName, int archiveTimeout) th Properties props = null; String ovaFileName = ""; s_logger.info("Creating OVA using MetaFile: " + metafileName); - try (FileInputStream strm = new FileInputStream(ova_metafile);) { + try (FileInputStream strm = new FileInputStream(ova_metafile)) { s_logger.info("loading properties from ova meta file: " + metafileName); props = new Properties(); @@ -1192,7 +1194,7 @@ public CreateVMSnapshotAnswer execute(VmwareHostService hostService, CreateVMSna List tasks = context.getVimClient().getDynamicProperty(taskmgr, "recentTask"); for (ManagedObjectReference taskMor : tasks) { - TaskInfo info = (TaskInfo)(context.getVimClient().getDynamicProperty(taskMor, "info")); + TaskInfo info = context.getVimClient().getDynamicProperty(taskMor, "info"); if (info.getEntityName().equals(cmd.getVmName()) && StringUtils.isNotBlank(info.getName()) && info.getName().equalsIgnoreCase("CreateSnapshot_Task")) { if (!(info.getState().equals(TaskInfoState.SUCCESS) || info.getState().equals(TaskInfoState.ERROR))) { @@ -1407,7 +1409,7 @@ public RevertToVMSnapshotAnswer execute(VmwareHostService hostService, RevertToV List tasks = context.getVimClient().getDynamicProperty(taskmgr, "recentTask"); for (ManagedObjectReference taskMor : tasks) { - TaskInfo info = (TaskInfo)(context.getVimClient().getDynamicProperty(taskMor, "info")); + TaskInfo info = context.getVimClient().getDynamicProperty(taskMor, "info"); if (info.getEntityName().equals(cmd.getVmName()) && StringUtils.isNotBlank(info.getName()) && info.getName().equalsIgnoreCase("RevertToSnapshot_Task")) { s_logger.debug("There is already a VM snapshot task running, wait for it"); diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/StartCommandExecutor.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/StartCommandExecutor.java new file mode 100644 index 000000000000..c46154682945 --- /dev/null +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/StartCommandExecutor.java @@ -0,0 +1,2247 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.hypervisor.vmware.resource; + +import java.io.File; +import java.rmi.RemoteException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import com.cloud.agent.api.to.DataTO; +import com.vmware.vim25.VirtualDiskFlatVer2BackingInfo; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.storage.configdrive.ConfigDrive; +import org.apache.cloudstack.storage.to.TemplateObjectTO; +import org.apache.cloudstack.storage.to.VolumeObjectTO; +import org.apache.cloudstack.utils.volume.VirtualMachineDiskInfo; +import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.lang.ArrayUtils; +import org.apache.commons.lang.StringUtils; +import org.apache.log4j.Logger; + +import com.cloud.agent.api.Command; +import com.cloud.agent.api.StartAnswer; +import com.cloud.agent.api.StartCommand; +import com.cloud.agent.api.storage.OVFPropertyTO; +import com.cloud.agent.api.to.DataStoreTO; +import com.cloud.agent.api.to.DiskTO; +import com.cloud.agent.api.to.NfsTO; +import com.cloud.agent.api.to.NicTO; +import com.cloud.agent.api.to.VirtualMachineTO; +import com.cloud.configuration.Resource; +import com.cloud.hypervisor.vmware.VmwareResourceException; +import com.cloud.hypervisor.vmware.manager.VmwareManager; +import com.cloud.hypervisor.vmware.mo.CustomFieldConstants; +import com.cloud.hypervisor.vmware.mo.DatacenterMO; +import com.cloud.hypervisor.vmware.mo.DatastoreFile; +import com.cloud.hypervisor.vmware.mo.DatastoreMO; +import com.cloud.hypervisor.vmware.mo.DiskControllerType; +import com.cloud.hypervisor.vmware.mo.HostMO; +import com.cloud.hypervisor.vmware.mo.HypervisorHostHelper; +import com.cloud.hypervisor.vmware.mo.TaskMO; +import com.cloud.hypervisor.vmware.mo.VirtualEthernetCardType; +import com.cloud.hypervisor.vmware.mo.VirtualMachineDiskInfoBuilder; +import com.cloud.hypervisor.vmware.mo.VirtualMachineMO; +import com.cloud.hypervisor.vmware.mo.VmwareHypervisorHost; +import com.cloud.hypervisor.vmware.util.VmwareContext; +import com.cloud.hypervisor.vmware.util.VmwareHelper; +import com.cloud.network.Networks; +import com.cloud.storage.Storage; +import com.cloud.storage.Volume; +import com.cloud.storage.resource.VmwareStorageLayoutHelper; +import com.cloud.storage.resource.VmwareStorageProcessor; +import com.cloud.utils.NumbersUtil; +import com.cloud.utils.Pair; +import com.cloud.utils.Ternary; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.nicira.nvp.plugin.NiciraNvpApiVersion; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.VmDetailConstants; +import com.vmware.vim25.BoolPolicy; +import com.vmware.vim25.DVPortConfigInfo; +import com.vmware.vim25.DVPortConfigSpec; +import com.vmware.vim25.DasVmPriority; +import com.vmware.vim25.DistributedVirtualPort; +import com.vmware.vim25.DistributedVirtualSwitchPortConnection; +import com.vmware.vim25.DistributedVirtualSwitchPortCriteria; +import com.vmware.vim25.ManagedObjectReference; +import com.vmware.vim25.OptionValue; +import com.vmware.vim25.VMwareDVSPortSetting; +import com.vmware.vim25.VirtualDevice; +import com.vmware.vim25.VirtualDeviceBackingInfo; +import com.vmware.vim25.VirtualDeviceConfigSpec; +import com.vmware.vim25.VirtualDeviceConfigSpecOperation; +import com.vmware.vim25.VirtualDisk; +import com.vmware.vim25.VirtualEthernetCard; +import com.vmware.vim25.VirtualEthernetCardDistributedVirtualPortBackingInfo; +import com.vmware.vim25.VirtualEthernetCardNetworkBackingInfo; +import com.vmware.vim25.VirtualEthernetCardOpaqueNetworkBackingInfo; +import com.vmware.vim25.VirtualMachineBootOptions; +import com.vmware.vim25.VirtualMachineConfigSpec; +import com.vmware.vim25.VirtualMachineFileInfo; +import com.vmware.vim25.VirtualMachineFileLayoutEx; +import com.vmware.vim25.VirtualMachineGuestOsIdentifier; +import com.vmware.vim25.VirtualUSBController; +import com.vmware.vim25.VmConfigInfo; +import com.vmware.vim25.VmwareDistributedVirtualSwitchVlanIdSpec; + +class StartCommandExecutor { + private static final Logger LOGGER = Logger.getLogger(StartCommandExecutor.class); + + private final VmwareResource vmwareResource; + + public StartCommandExecutor(VmwareResource vmwareResource) { + this.vmwareResource = vmwareResource; + } + + // FR37 the monster blob god method: reduce to 320 so far and counting + protected StartAnswer execute(StartCommand cmd) { + if (LOGGER.isInfoEnabled()) { + LOGGER.info("Executing resource StartCommand: " + vmwareResource.getGson().toJson(cmd)); + } + + VirtualMachineTO vmSpec = cmd.getVirtualMachine(); + + VirtualMachineData existingVm = null; + + Pair names = composeVmNames(vmSpec); + String vmInternalCSName = names.first(); + String vmNameOnVcenter = names.second(); + + DiskTO rootDiskTO = null; + + Pair controllerInfo = getDiskControllerInfo(vmSpec); + + Boolean systemVm = vmSpec.getType().isUsedBySystem(); + + VmwareContext context = vmwareResource.getServiceContext(); + DatacenterMO dcMo = null; + try { + VmwareManager mgr = context.getStockObject(VmwareManager.CONTEXT_STOCK_NAME); + VmwareHypervisorHost hyperHost = vmwareResource.getHyperHost(context); + dcMo = new DatacenterMO(hyperHost.getContext(), hyperHost.getHyperHostDatacenter()); + + // checkIfVmExistsInVcenter(vmInternalCSName, vmNameOnVcenter, dcMo); + // FR37 - We expect VM to be already cloned and available at this point + VirtualMachineMO vmMo = dcMo.findVm(vmInternalCSName); + if (vmMo == null) { + vmMo = dcMo.findVm(vmNameOnVcenter); + } + + DiskTO[] specDisks = vmSpec.getDisks(); + boolean installAsIs = StringUtils.isNotEmpty(vmSpec.getTemplateLocation()); + // FR37 if startcommand contains enough info: a template url/-location and flag; deploy OVA as is + if (vmMo == null && installAsIs) { + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("deploying OVA from %s as is", vmSpec.getTemplateLocation())); + } + getStorageProcessor().cloneVMFromTemplate(vmSpec.getTemplateName(), vmInternalCSName, vmSpec.getTemplatePrimaryStoreUuid()); + vmMo = dcMo.findVm(vmInternalCSName); + mapSpecDisksToClonedDisks(vmMo, vmInternalCSName, specDisks); + } + + // VM may not have been on the same host, relocate to expected host + if (vmMo != null) { + vmMo.relocate(hyperHost.getMor()); + // Get updated MO + vmMo = hyperHost.findVmOnHyperHost(vmMo.getVmName()); + } + + String guestOsId = translateGuestOsIdentifier(vmSpec.getArch(), vmSpec.getOs(), vmSpec.getPlatformEmulator()).value(); + DiskTO[] disks = validateDisks(specDisks); + NicTO[] nics = vmSpec.getNics(); + + // FIXME: disks logic here, why is disks/volumes during copy not set with pool ID? + // FR37 TODO if deployasis a new VM no datastores are known (yet) and we need to get the data store from the tvmspec content library / template location + HashMap> dataStoresDetails = inferDatastoreDetailsFromDiskInfo(hyperHost, context, disks, cmd); + if ((dataStoresDetails == null) || (dataStoresDetails.isEmpty())) { + String msg = "Unable to locate datastore details of the volumes to be attached"; + LOGGER.error(msg); + // throw a more specific Exception + // FR37 - this may not be necessary the cloned VM will have disks and knowledge of datastore paths/location? + // throw new Exception(msg); + } + + // FR37 - this may need checking, if the first datastore is the right one - ideally it should be datastore where first disk is hosted + DatastoreMO dsRootVolumeIsOn = null; // + if (! installAsIs) { + dsRootVolumeIsOn = getDatastoreThatRootDiskIsOn(dataStoresDetails, disks); + + if (dsRootVolumeIsOn == null) { + String msg = "Unable to locate datastore details of root volume"; + LOGGER.error(msg); + // throw a more specific Exception + throw new VmwareResourceException(msg); + } + } + + VirtualMachineDiskInfoBuilder diskInfoBuilder = null; + VirtualDevice[] nicDevices = null; + DiskControllerType systemVmScsiControllerType = DiskControllerType.lsilogic; + int firstScsiControllerBusNum = 0; + int numScsiControllerForSystemVm = 1; + boolean hasSnapshot = false; + if (vmMo != null) { + PrepareRunningVMForConfiguration prepareRunningVMForConfiguration = new PrepareRunningVMForConfiguration(vmInternalCSName, controllerInfo, systemVm, vmMo, + systemVmScsiControllerType, firstScsiControllerBusNum, numScsiControllerForSystemVm); + prepareRunningVMForConfiguration.invoke(); + diskInfoBuilder = prepareRunningVMForConfiguration.getDiskInfoBuilder(); + nicDevices = prepareRunningVMForConfiguration.getNicDevices(); + hasSnapshot = prepareRunningVMForConfiguration.isHasSnapshot(); + } else { + ManagedObjectReference morDc = hyperHost.getHyperHostDatacenter(); + assert (morDc != null); + + vmMo = hyperHost.findVmOnPeerHyperHost(vmInternalCSName); + if (vmMo != null) { + VirtualMachineRecycler virtualMachineRecycler = new VirtualMachineRecycler(vmInternalCSName, controllerInfo, systemVm, hyperHost, vmMo, + systemVmScsiControllerType, firstScsiControllerBusNum, numScsiControllerForSystemVm); + virtualMachineRecycler.invoke(); + diskInfoBuilder = virtualMachineRecycler.getDiskInfoBuilder(); + hasSnapshot = virtualMachineRecycler.isHasSnapshot(); + nicDevices = virtualMachineRecycler.getNicDevices(); + } else { + // FR37 we just didn't find a VM by name 'vmInternalCSName', so why is this here? + existingVm = unregisterOnOtherClusterButHoldOnToOldVmData(vmInternalCSName, dcMo); + + createNewVm(context, vmSpec, installAsIs, vmInternalCSName, vmNameOnVcenter, controllerInfo, systemVm, mgr, hyperHost, guestOsId, disks, dataStoresDetails, + dsRootVolumeIsOn); + } + + vmMo = hyperHost.findVmOnHyperHost(vmInternalCSName); + if (vmMo == null) { + throw new Exception("Failed to find the newly create or relocated VM. vmName: " + vmInternalCSName); + } + } + // vmMo should now be a stopped VM on the intended host + // The number of disks changed must be 0 for install as is, as the VM is a clone + int disksChanges = !installAsIs ? disks.length : 0; + int totalChangeDevices = disksChanges + nics.length; + int hackDeviceCount = 0; + if (diskInfoBuilder != null) { + hackDeviceCount += diskInfoBuilder.getDiskCount(); + } + hackDeviceCount += nicDevices == null ? 0 : nicDevices.length; + // vApp cdrom device + // HACK ALERT: ovf properties might not be the only or defining feature of vApps; needs checking + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("current count(s) desired: %d/ found:%d. now adding device to device count for vApp config ISO", totalChangeDevices, hackDeviceCount)); + } + if (vmSpec.getOvfProperties() != null) { + totalChangeDevices++; + } + + DiskTO volIso = null; + if (vmSpec.getType() != VirtualMachine.Type.User) { + // system VM needs a patch ISO + totalChangeDevices++; + } else { + volIso = getIsoDiskTO(disks); + if (volIso == null) + totalChangeDevices++; + } + + VirtualMachineConfigSpec vmConfigSpec = new VirtualMachineConfigSpec(); + + VmwareHelper.setBasicVmConfig(vmConfigSpec, vmSpec.getCpus(), vmSpec.getMaxSpeed(), vmwareResource.getReservedCpuMHZ(vmSpec), (int)(vmSpec.getMaxRam() / (1024 * 1024)), + vmwareResource.getReservedMemoryMb(vmSpec), guestOsId, vmSpec.getLimitCpuUse()); + + int numCoresPerSocket = adjustNumberOfCoresPerSocket(vmSpec, vmMo, vmConfigSpec); + + // Check for hotadd settings + vmConfigSpec.setMemoryHotAddEnabled(vmMo.isMemoryHotAddSupported(guestOsId)); + + String hostApiVersion = ((HostMO)hyperHost).getHostAboutInfo().getApiVersion(); + if (numCoresPerSocket > 1 && hostApiVersion.compareTo("5.0") < 0) { + LOGGER.warn("Dynamic scaling of CPU is not supported for Virtual Machines with multi-core vCPUs in case of ESXi hosts 4.1 and prior. Hence CpuHotAdd will not be" + + " enabled for Virtual Machine: " + vmInternalCSName); + vmConfigSpec.setCpuHotAddEnabled(false); + } else { + vmConfigSpec.setCpuHotAddEnabled(vmMo.isCpuHotAddSupported(guestOsId)); + } + + vmwareResource.configNestedHVSupport(vmMo, vmSpec, vmConfigSpec); + + VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[totalChangeDevices]; + int deviceCount = 0; + int ideUnitNumber = 0; + int scsiUnitNumber = 0; + int ideControllerKey = vmMo.getIDEDeviceControllerKey(); + int scsiControllerKey = vmMo.getScsiDeviceControllerKeyNoException(); + + IsoSetup isoSetup = new IsoSetup(vmSpec, mgr, hyperHost, vmMo, disks, volIso, deviceConfigSpecArray, deviceCount, ideUnitNumber); + isoSetup.invoke(); + deviceCount = isoSetup.getDeviceCount(); + ideUnitNumber = isoSetup.getIdeUnitNumber(); + + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("setting up %d disks with root from %s", diskInfoBuilder.getDiskCount(), vmwareResource.getGson().toJson(rootDiskTO))); + } + + DiskSetup diskSetup = new DiskSetup(vmSpec, rootDiskTO, controllerInfo, context, dcMo, hyperHost, vmMo, disks, dataStoresDetails, diskInfoBuilder, hasSnapshot, + deviceConfigSpecArray, deviceCount, ideUnitNumber, scsiUnitNumber, ideControllerKey, scsiControllerKey, installAsIs); + diskSetup.invoke(); + + rootDiskTO = diskSetup.getRootDiskTO(); + deviceCount = diskSetup.getDeviceCount(); + DiskTO[] sortedDisks = diskSetup.getSortedDisks(); + + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("seting up %d disks with root from %s", sortedDisks.length, vmwareResource.getGson().toJson(rootDiskTO))); + } + + deviceCount += setupUsbDevicesAndGetCount(vmInternalCSName, vmMo, guestOsId, deviceConfigSpecArray, deviceCount); + + NicSetup nicSetup = new NicSetup(cmd, vmSpec, vmInternalCSName, context, mgr, hyperHost, vmMo, nics, deviceConfigSpecArray, deviceCount, nicDevices); + nicSetup.invoke(); + + deviceCount = nicSetup.getDeviceCount(); + int nicMask = nicSetup.getNicMask(); + int nicCount = nicSetup.getNicCount(); + Map nicUuidToDvSwitchUuid = nicSetup.getNicUuidToDvSwitchUuid(); + + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("device count: %d; nic count %d", deviceCount, nicCount)); + } + for (int j = 0; j < deviceCount; j++) + vmConfigSpec.getDeviceChange().add(deviceConfigSpecArray[j]); + + // + // Setup VM options + // + + // pass boot arguments through machine.id & perform customized options to VMX + ArrayList extraOptions = new ArrayList<>(); + configBasicExtraOption(extraOptions, vmSpec); + configNvpExtraOption(extraOptions, vmSpec, nicUuidToDvSwitchUuid); + configCustomExtraOption(extraOptions, vmSpec); + + // config for NCC + VirtualMachine.Type vmType = cmd.getVirtualMachine().getType(); + if (vmType.equals(VirtualMachine.Type.NetScalerVm)) { + NicTO mgmtNic = vmSpec.getNics()[0]; + OptionValue option = new OptionValue(); + option.setKey("machine.id"); + option.setValue("ip=" + mgmtNic.getIp() + "&netmask=" + mgmtNic.getNetmask() + "&gateway=" + mgmtNic.getGateway()); + extraOptions.add(option); + } + + // config VNC + String keyboardLayout = null; + if (vmSpec.getDetails() != null) + keyboardLayout = vmSpec.getDetails().get(VmDetailConstants.KEYBOARD); + vmConfigSpec.getExtraConfig().addAll( + Arrays.asList(vmwareResource.configureVnc(extraOptions.toArray(new OptionValue[0]), hyperHost, vmInternalCSName, vmSpec.getVncPassword(), keyboardLayout))); + + // config video card + vmwareResource.configureVideoCard(vmMo, vmSpec, vmConfigSpec); + + // Set OVF properties (if available) + Pair> ovfPropsMap = vmSpec.getOvfProperties(); + VmConfigInfo templateVappConfig; + List ovfProperties; + if (ovfPropsMap != null) { + String vmTemplate = ovfPropsMap.first(); + LOGGER.info("Find VM template " + vmTemplate); + VirtualMachineMO vmTemplateMO = dcMo.findVm(vmTemplate); + templateVappConfig = vmTemplateMO.getConfigInfo().getVAppConfig(); + ovfProperties = ovfPropsMap.second(); + // Set OVF properties (if available) + if (CollectionUtils.isNotEmpty(ovfProperties)) { + LOGGER.info("Copying OVF properties from template and setting them to the values the user provided"); + vmwareResource.copyVAppConfigsFromTemplate(templateVappConfig, ovfProperties, vmConfigSpec); + } + } + + checkBootOptions(vmSpec, vmConfigSpec); + + // + // Configure VM + // + if (!vmMo.configureVm(vmConfigSpec)) { + throw new Exception("Failed to configure VM before start. vmName: " + vmInternalCSName); + } + // FR37 TODO reconcile disks now!!! they are configured, check and add them to the returning vmTO + if (vmSpec.getType() == VirtualMachine.Type.DomainRouter) { + hyperHost.setRestartPriorityForVM(vmMo, DasVmPriority.HIGH.value()); + } + + // Resizing root disk only when explicit requested by user + final Map vmDetails = cmd.getVirtualMachine().getDetails(); + if (rootDiskTO != null && !hasSnapshot && (vmDetails != null && vmDetails.containsKey(ApiConstants.ROOT_DISK_SIZE))) { + resizeRootDiskOnVMStart(vmMo, rootDiskTO, hyperHost, context); + } + + // + // Post Configuration + // + + vmMo.setCustomFieldValue(CustomFieldConstants.CLOUD_NIC_MASK, String.valueOf(nicMask)); + postNvpConfigBeforeStart(vmMo, vmSpec); + + Map> iqnToData = new HashMap<>(); + + postDiskConfigBeforeStart(vmMo, vmSpec, sortedDisks, iqnToData, hyperHost, context, installAsIs); + + // + // Power-on VM + // + if (!vmMo.powerOn()) { + throw new Exception("Failed to start VM. vmName: " + vmInternalCSName + " with hostname " + vmNameOnVcenter); + } + + if (installAsIs) { + // Set disks as the disks path and chain is retrieved from the cloned VM disks + cmd.getVirtualMachine().setDisks(disks); + } + + StartAnswer startAnswer = new StartAnswer(cmd); + + startAnswer.setIqnToData(iqnToData); + + deleteOldVersionOfTheStartedVM(existingVm, dcMo, vmMo); + + return startAnswer; + } catch (Throwable e) { + return handleStartFailure(cmd, vmSpec, existingVm, dcMo, e); + } finally { + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("finally done with %s", vmwareResource.getGson().toJson(cmd))); + } + } + } + + /** + * Modify the specDisks information to match the cloned VM's disks (from vmMo VM) + */ + private void mapSpecDisksToClonedDisks(VirtualMachineMO vmMo, String vmInternalCSName, DiskTO[] specDisks) { + try { + if (vmMo != null && ArrayUtils.isNotEmpty(specDisks)) { + List vmDisks = vmMo.getVirtualDisks(); + List sortedDisks = Arrays.asList(sortVolumesByDeviceId(specDisks)) + .stream() + .filter(x -> x.getType() == Volume.Type.ROOT) + .collect(Collectors.toList()); + if (sortedDisks.size() != vmDisks.size()) { + LOGGER.error("Different number of root disks spec vs cloned deploy-as-is VM disks: " + sortedDisks.size() + " - " + vmDisks.size()); + return; + } + for (int i = 0; i < sortedDisks.size(); i++) { + DiskTO specDisk = sortedDisks.get(i); + VirtualDisk vmDisk = vmDisks.get(i); + DataTO dataVolume = specDisk.getData(); + if (dataVolume instanceof VolumeObjectTO) { + VolumeObjectTO volumeObjectTO = (VolumeObjectTO) dataVolume; + if (!volumeObjectTO.getSize().equals(vmDisk.getCapacityInBytes())) { + LOGGER.info("Mapped disk size is not the same as the cloned VM disk size: " + + volumeObjectTO.getSize() + " - " + vmDisk.getCapacityInBytes()); + } + VirtualDeviceBackingInfo backingInfo = vmDisk.getBacking(); + if (backingInfo instanceof VirtualDiskFlatVer2BackingInfo) { + VirtualDiskFlatVer2BackingInfo backing = (VirtualDiskFlatVer2BackingInfo) backingInfo; + String fileName = backing.getFileName(); + if (StringUtils.isNotBlank(fileName)) { + String[] fileNameParts = fileName.split(" "); + String datastoreUuid = fileNameParts[0].replace("[", "").replace("]", ""); + String relativePath = fileNameParts[1].split("/")[1].replace(".vmdk", ""); + String vmSpecDatastoreUuid = volumeObjectTO.getDataStore().getUuid().replaceAll("-", ""); + if (!datastoreUuid.equals(vmSpecDatastoreUuid)) { + LOGGER.info("Mapped disk datastore UUID is not the same as the cloned VM datastore UUID: " + + datastoreUuid + " - " + vmSpecDatastoreUuid); + } + volumeObjectTO.setPath(relativePath); + specDisk.setPath(relativePath); + } + } + } + } + } + } catch (Exception e) { + String msg = "Error mapping deploy-as-is VM disks from cloned VM " + vmInternalCSName; + LOGGER.error(msg, e); + throw new CloudRuntimeException(e); + } + } + + private StartAnswer handleStartFailure(StartCommand cmd, VirtualMachineTO vmSpec, VirtualMachineData existingVm, DatacenterMO dcMo, Throwable e) { + if (e instanceof RemoteException) { + LOGGER.warn("Encounter remote exception to vCenter, invalidate VMware session context"); + vmwareResource.invalidateServiceContext(); + } + + String msg = "StartCommand failed due to " + VmwareHelper.getExceptionMessage(e); + LOGGER.warn(msg, e); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("didn't start VM %s", vmSpec.getName()), e); + } + StartAnswer startAnswer = new StartAnswer(cmd, msg); + if ( e instanceof VmAlreadyExistsInVcenter) { + startAnswer.setContextParam("stopRetry", "true"); + } + reRegisterExistingVm(existingVm, dcMo); + + return startAnswer; + } + + private void deleteOldVersionOfTheStartedVM(VirtualMachineData existingVm, DatacenterMO dcMo, VirtualMachineMO vmMo) throws Exception { + // Since VM was successfully powered-on, if there was an existing VM in a different cluster that was unregistered, delete all the files associated with it. + if (existingVm != null && existingVm.vmName != null && existingVm.vmFileLayout != null) { + List vmDatastoreNames = new ArrayList<>(); + for (DatastoreMO vmDatastore : vmMo.getAllDatastores()) { + vmDatastoreNames.add(vmDatastore.getName()); + } + // Don't delete files that are in a datastore that is being used by the new VM as well (zone-wide datastore). + List skipDatastores = new ArrayList<>(); + for (DatastoreMO existingDatastore : existingVm.datastores) { + if (vmDatastoreNames.contains(existingDatastore.getName())) { + skipDatastores.add(existingDatastore.getName()); + } + } + vmwareResource.deleteUnregisteredVmFiles(existingVm.vmFileLayout, dcMo, true, skipDatastores); + } + } + + private int adjustNumberOfCoresPerSocket(VirtualMachineTO vmSpec, VirtualMachineMO vmMo, VirtualMachineConfigSpec vmConfigSpec) throws Exception { + // Check for multi-cores per socket settings + int numCoresPerSocket = 1; + String coresPerSocket = vmSpec.getDetails().get(VmDetailConstants.CPU_CORE_PER_SOCKET); + if (coresPerSocket != null) { + String apiVersion = HypervisorHostHelper.getVcenterApiVersion(vmMo.getContext()); + // Property 'numCoresPerSocket' is supported since vSphere API 5.0 + if (apiVersion.compareTo("5.0") >= 0) { + numCoresPerSocket = NumbersUtil.parseInt(coresPerSocket, 1); + vmConfigSpec.setNumCoresPerSocket(numCoresPerSocket); + } + } + return numCoresPerSocket; + } + + /** + // Setup USB devices + */ + private int setupUsbDevicesAndGetCount(String vmInternalCSName, VirtualMachineMO vmMo, String guestOsId, VirtualDeviceConfigSpec[] deviceConfigSpecArray, int deviceCount) + throws Exception { + int usbDeviceCount = 0; + if (guestOsId.startsWith("darwin")) { //Mac OS + VirtualDevice[] devices = vmMo.getMatchedDevices(new Class[] {VirtualUSBController.class}); + if (devices.length == 0) { + LOGGER.debug("No USB Controller device on VM Start. Add USB Controller device for Mac OS VM " + vmInternalCSName); + + //For Mac OS X systems, the EHCI+UHCI controller is enabled by default and is required for USB mouse and keyboard access. + VirtualDevice usbControllerDevice = VmwareHelper.prepareUSBControllerDevice(); + deviceConfigSpecArray[deviceCount] = new VirtualDeviceConfigSpec(); + deviceConfigSpecArray[deviceCount].setDevice(usbControllerDevice); + deviceConfigSpecArray[deviceCount].setOperation(VirtualDeviceConfigSpecOperation.ADD); + + if (LOGGER.isDebugEnabled()) + LOGGER.debug("Prepare USB controller at new device " + vmwareResource.getGson().toJson(deviceConfigSpecArray[deviceCount])); + + deviceCount++; + usbDeviceCount++; + } else { + LOGGER.debug("USB Controller device exists on VM Start for Mac OS VM " + vmInternalCSName); + } + } + return usbDeviceCount; + } + + private void createNewVm(VmwareContext context, VirtualMachineTO vmSpec, boolean installAsIs, String vmInternalCSName, String vmNameOnVcenter, Pair controllerInfo, Boolean systemVm, + VmwareManager mgr, VmwareHypervisorHost hyperHost, String guestOsId, DiskTO[] disks, HashMap> dataStoresDetails, + DatastoreMO dsRootVolumeIsOn) throws Exception { + VirtualMachineMO vmMo; + Pair rootDiskDataStoreDetails = getRootDiskDataStoreDetails(disks, dataStoresDetails); + + assert (vmSpec.getMinSpeed() != null) && (rootDiskDataStoreDetails != null); + + boolean vmFolderExists = rootDiskDataStoreDetails.second().folderExists(String.format("[%s]", rootDiskDataStoreDetails.second().getName()), vmNameOnVcenter); + String vmxFileFullPath = dsRootVolumeIsOn.searchFileInSubFolders(vmNameOnVcenter + ".vmx", false, VmwareManager.s_vmwareSearchExcludeFolder.value()); + if (vmFolderExists && vmxFileFullPath != null) { // VM can be registered only if .vmx is present. + registerVm(vmNameOnVcenter, dsRootVolumeIsOn, vmwareResource); + vmMo = hyperHost.findVmOnHyperHost(vmInternalCSName); + if (vmMo != null) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Found registered vm " + vmInternalCSName + " at host " + hyperHost.getHyperHostName()); + } + } + tearDownVm(vmMo); + } else if (installAsIs) { +// first get all the MORs ManagedObjectReference morPool = hyperHost.getHyperHostOwnerResourcePool(); +// get the base VM vmMo = hyperHost.findVmOnHyperHost(vm.template.getPath()); +// do templateVm.createLinkedClone(vmInternalCSName, morBaseSnapshot, dcMo.getVmFolder(), morPool, morDatastore) +// or hyperHost....createLinkedOrFullClone(templateVm, volume, dcMo, vmMo, morDatastore, dsMo, vmInternalCSName, morPool); + vmMo = hyperHost.findVmOnHyperHost(vmInternalCSName); + // At this point vmMo points to the cloned VM + } else { + if (!hyperHost + .createBlankVm(vmNameOnVcenter, vmInternalCSName, vmSpec.getCpus(), vmSpec.getMaxSpeed(), vmwareResource.getReservedCpuMHZ(vmSpec), vmSpec.getLimitCpuUse(), (int)(vmSpec.getMaxRam() / Resource.ResourceType.bytesToMiB), vmwareResource.getReservedMemoryMb(vmSpec), guestOsId, + rootDiskDataStoreDetails.first(), false, controllerInfo, systemVm)) { + throw new Exception("Failed to create VM. vmName: " + vmInternalCSName); + } + } + } + + /** + * If a VM with the same name is found in a different cluster in the DC, unregister the old VM and configure a new VM (cold-migration). + */ + private VirtualMachineData unregisterOnOtherClusterButHoldOnToOldVmData(String vmInternalCSName, DatacenterMO dcMo) throws Exception { + VirtualMachineMO existingVmInDc = dcMo.findVm(vmInternalCSName); + VirtualMachineData existingVm = null; + if (existingVmInDc != null) { + existingVm = new VirtualMachineData(); + existingVm.vmName = existingVmInDc.getName(); + existingVm.vmFileInfo = existingVmInDc.getFileInfo(); + existingVm.vmFileLayout = existingVmInDc.getFileLayout(); + existingVm.datastores = existingVmInDc.getAllDatastores(); + LOGGER.info("Found VM: " + vmInternalCSName + " on a host in a different cluster. Unregistering the exisitng VM."); + existingVmInDc.unregisterVm(); + } + return existingVm; + } + + private Pair getRootDiskDataStoreDetails(DiskTO[] disks, HashMap> dataStoresDetails) { + Pair rootDiskDataStoreDetails = null; + for (DiskTO vol : disks) { + if (vol.getType() == Volume.Type.ROOT) { + Map details = vol.getDetails(); + boolean managed = false; + + if (details != null) { + managed = Boolean.parseBoolean(details.get(DiskTO.MANAGED)); + } + + if (managed) { + String datastoreName = VmwareResource.getDatastoreName(details.get(DiskTO.IQN)); + + rootDiskDataStoreDetails = dataStoresDetails.get(datastoreName); + } else { + DataStoreTO primaryStore = vol.getData().getDataStore(); + + rootDiskDataStoreDetails = dataStoresDetails.get(primaryStore.getUuid()); + } + } + } + return rootDiskDataStoreDetails; + } + + /** + * Since VM start failed, if there was an existing VM in a different cluster that was unregistered, register it back. + * + * @param dcMo is guaranteed to be not null since we have noticed there is an existing VM in the dc (using that mo) + */ + private void reRegisterExistingVm(VirtualMachineData existingVm, DatacenterMO dcMo) { + if (existingVm != null && existingVm.vmName != null && existingVm.vmFileInfo != null) { + LOGGER.debug("Since VM start failed, registering back an existing VM: " + existingVm.vmName + " that was unregistered"); + try { + DatastoreFile fileInDatastore = new DatastoreFile(existingVm.vmFileInfo.getVmPathName()); + DatastoreMO existingVmDsMo = new DatastoreMO(dcMo.getContext(), dcMo.findDatastore(fileInDatastore.getDatastoreName())); + registerVm(existingVm.vmName, existingVmDsMo, vmwareResource); + } catch (Exception ex) { + String message = "Failed to register an existing VM: " + existingVm.vmName + " due to " + VmwareHelper.getExceptionMessage(ex); + LOGGER.warn(message, ex); + } + } + } + + private void checkIfVmExistsInVcenter(String vmInternalCSName, String vmNameOnVcenter, DatacenterMO dcMo) throws VmAlreadyExistsInVcenter, Exception { + // Validate VM name is unique in Datacenter + VirtualMachineMO vmInVcenter = dcMo.checkIfVmAlreadyExistsInVcenter(vmNameOnVcenter, vmInternalCSName); + if (vmInVcenter != null) { + String msg = "VM with name: " + vmNameOnVcenter + " already exists in vCenter."; + LOGGER.error(msg); + throw new VmAlreadyExistsInVcenter(msg); + } + } + + private Pair getDiskControllerInfo(VirtualMachineTO vmSpec) { + String dataDiskController = vmSpec.getDetails().get(VmDetailConstants.DATA_DISK_CONTROLLER); + String rootDiskController = vmSpec.getDetails().get(VmDetailConstants.ROOT_DISK_CONTROLLER); + // If root disk controller is scsi, then data disk controller would also be scsi instead of using 'osdefault' + // This helps avoid mix of different scsi subtype controllers in instance. + if (DiskControllerType.osdefault == DiskControllerType.getType(dataDiskController) && DiskControllerType.lsilogic == DiskControllerType.getType(rootDiskController)) { + dataDiskController = DiskControllerType.scsi.toString(); + } + + // Validate the controller types + dataDiskController = DiskControllerType.getType(dataDiskController).toString(); + rootDiskController = DiskControllerType.getType(rootDiskController).toString(); + + if (DiskControllerType.getType(rootDiskController) == DiskControllerType.none) { + throw new CloudRuntimeException("Invalid root disk controller detected : " + rootDiskController); + } + if (DiskControllerType.getType(dataDiskController) == DiskControllerType.none) { + throw new CloudRuntimeException("Invalid data disk controller detected : " + dataDiskController); + } + + return new Pair<>(rootDiskController, dataDiskController); + } + + /** + * Registers the vm to the inventory given the vmx file. + * @param vmName + * @param dsMo + * @param vmwareResource + */ + private void registerVm(String vmName, DatastoreMO dsMo, VmwareResource vmwareResource) throws Exception { + + //1st param + VmwareHypervisorHost hyperHost = vmwareResource.getHyperHost(vmwareResource.getServiceContext()); + ManagedObjectReference dcMor = hyperHost.getHyperHostDatacenter(); + DatacenterMO dataCenterMo = new DatacenterMO(vmwareResource.getServiceContext(), dcMor); + ManagedObjectReference vmFolderMor = dataCenterMo.getVmFolder(); + + //2nd param + String vmxFilePath = dsMo.searchFileInSubFolders(vmName + ".vmx", false, VmwareManager.s_vmwareSearchExcludeFolder.value()); + + // 5th param + ManagedObjectReference morPool = hyperHost.getHyperHostOwnerResourcePool(); + + ManagedObjectReference morTask = vmwareResource.getServiceContext().getService().registerVMTask(vmFolderMor, vmxFilePath, vmName, false, morPool, hyperHost.getMor()); + boolean result = vmwareResource.getServiceContext().getVimClient().waitForTask(morTask); + if (!result) { + throw new Exception("Unable to register vm due to " + TaskMO.getTaskFailureInfo(vmwareResource.getServiceContext(), morTask)); + } else { + vmwareResource.getServiceContext().waitForTaskProgressDone(morTask); + } + + } + + // Pair + private Pair composeVmNames(VirtualMachineTO vmSpec) { + String vmInternalCSName = vmSpec.getName(); + String vmNameOnVcenter = vmSpec.getName(); + if (VmwareResource.instanceNameFlag && vmSpec.getHostName() != null) { + vmNameOnVcenter = vmSpec.getHostName(); + } + return new Pair(vmInternalCSName, vmNameOnVcenter); + } + + private VirtualMachineGuestOsIdentifier translateGuestOsIdentifier(String cpuArchitecture, String guestOs, String cloudGuestOs) { + if (cpuArchitecture == null) { + LOGGER.warn("CPU arch is not set, default to i386. guest os: " + guestOs); + cpuArchitecture = "i386"; + } + + if (cloudGuestOs == null) { + LOGGER.warn("Guest OS mapping name is not set for guest os: " + guestOs); + } + + VirtualMachineGuestOsIdentifier identifier = null; + try { + if (cloudGuestOs != null) { + identifier = VirtualMachineGuestOsIdentifier.fromValue(cloudGuestOs); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Using mapping name : " + identifier.toString()); + } + } + } catch (IllegalArgumentException e) { + LOGGER.warn("Unable to find Guest OS Identifier in VMware for mapping name: " + cloudGuestOs + ". Continuing with defaults."); + } + if (identifier != null) { + return identifier; + } + + if (cpuArchitecture.equalsIgnoreCase("x86_64")) { + return VirtualMachineGuestOsIdentifier.OTHER_GUEST_64; + } + return VirtualMachineGuestOsIdentifier.OTHER_GUEST; + } + + private DiskTO[] validateDisks(DiskTO[] disks) { + List validatedDisks = new ArrayList(); + + for (DiskTO vol : disks) { + if (vol.getType() != Volume.Type.ISO) { + VolumeObjectTO volumeTO = (VolumeObjectTO) vol.getData(); + DataStoreTO primaryStore = volumeTO.getDataStore(); + if (primaryStore.getUuid() != null && !primaryStore.getUuid().isEmpty()) { + validatedDisks.add(vol); + } + } else if (vol.getType() == Volume.Type.ISO) { + TemplateObjectTO templateTO = (TemplateObjectTO) vol.getData(); + if (templateTO.getPath() != null && !templateTO.getPath().isEmpty()) { + validatedDisks.add(vol); + } + } else { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Drop invalid disk option, volumeTO: " + vmwareResource.getGson().toJson(vol)); + } + } + } + Collections.sort(validatedDisks, (d1, d2) -> d1.getDiskSeq().compareTo(d2.getDiskSeq())); + return validatedDisks.toArray(new DiskTO[0]); + } + + private HashMap> inferDatastoreDetailsFromDiskInfo(VmwareHypervisorHost hyperHost, VmwareContext context, + DiskTO[] disks, Command cmd) throws Exception { + HashMap> mapIdToMors = new HashMap<>(); + + assert (hyperHost != null) && (context != null); + + for (DiskTO vol : disks) { + if (vol.getType() != Volume.Type.ISO) { + VolumeObjectTO volumeTO = (VolumeObjectTO) vol.getData(); + DataStoreTO primaryStore = volumeTO.getDataStore(); + String poolUuid = primaryStore.getUuid(); + + if (mapIdToMors.get(poolUuid) == null) { + boolean isManaged = false; + Map details = vol.getDetails(); + + if (details != null) { + isManaged = Boolean.parseBoolean(details.get(DiskTO.MANAGED)); + } + + if (isManaged) { + String iScsiName = details.get(DiskTO.IQN); // details should not be null for managed storage (it may or may not be null for non-managed storage) + String datastoreName = VmwareResource.getDatastoreName(iScsiName); + ManagedObjectReference morDatastore = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, datastoreName); + + // if the datastore is not present, we need to discover the iSCSI device that will support it, + // create the datastore, and create a VMDK file in the datastore + if (morDatastore == null) { + final String vmdkPath = vmwareResource.getVmdkPath(volumeTO.getPath()); + + morDatastore = getStorageProcessor().prepareManagedStorage(context, hyperHost, null, iScsiName, + details.get(DiskTO.STORAGE_HOST), Integer.parseInt(details.get(DiskTO.STORAGE_PORT)), + vmdkPath, + details.get(DiskTO.CHAP_INITIATOR_USERNAME), details.get(DiskTO.CHAP_INITIATOR_SECRET), + details.get(DiskTO.CHAP_TARGET_USERNAME), details.get(DiskTO.CHAP_TARGET_SECRET), + Long.parseLong(details.get(DiskTO.VOLUME_SIZE)), cmd); + + DatastoreMO dsMo = new DatastoreMO(vmwareResource.getServiceContext(), morDatastore); + + final String datastoreVolumePath; + + if (vmdkPath != null) { + datastoreVolumePath = dsMo.getDatastorePath(vmdkPath + VmwareResource.VMDK_EXTENSION); + } else { + datastoreVolumePath = dsMo.getDatastorePath(dsMo.getName() + VmwareResource.VMDK_EXTENSION); + } + + volumeTO.setPath(datastoreVolumePath); + vol.setPath(datastoreVolumePath); + } + + mapIdToMors.put(datastoreName, new Pair<>(morDatastore, new DatastoreMO(context, morDatastore))); + } else { + ManagedObjectReference morDatastore = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, poolUuid); + + if (morDatastore == null) { + String msg = "Failed to get the mounted datastore for the volume's pool " + poolUuid; + + LOGGER.error(msg); + + throw new Exception(msg); + } + + mapIdToMors.put(poolUuid, new Pair<>(morDatastore, new DatastoreMO(context, morDatastore))); + } + } + } + } + + return mapIdToMors; + } + + private VmwareStorageProcessor getStorageProcessor() { + return vmwareResource.getStorageProcessor(); + } + + private DatastoreMO getDatastoreThatRootDiskIsOn(HashMap> dataStoresDetails, DiskTO disks[]) { + Pair rootDiskDataStoreDetails = null; + + for (DiskTO vol : disks) { + if (vol.getType() == Volume.Type.ROOT) { + Map details = vol.getDetails(); + boolean managed = false; + + if (details != null) { + managed = Boolean.parseBoolean(details.get(DiskTO.MANAGED)); + } + + if (managed) { + String datastoreName = VmwareResource.getDatastoreName(details.get(DiskTO.IQN)); + + rootDiskDataStoreDetails = dataStoresDetails.get(datastoreName); + + break; + } else { + DataStoreTO primaryStore = vol.getData().getDataStore(); + + rootDiskDataStoreDetails = dataStoresDetails.get(primaryStore.getUuid()); + + break; + } + } + } + + if (rootDiskDataStoreDetails != null) { + return rootDiskDataStoreDetails.second(); + } + + return null; + } + + private void ensureDiskControllers(VirtualMachineMO vmMo, Pair controllerInfo) throws Exception { + if (vmMo == null) { + return; + } + + String msg; + String rootDiskController = controllerInfo.first(); + String dataDiskController = controllerInfo.second(); + String scsiDiskController; + String recommendedDiskController = null; + + if (VmwareHelper.isControllerOsRecommended(dataDiskController) || VmwareHelper.isControllerOsRecommended(rootDiskController)) { + recommendedDiskController = vmMo.getRecommendedDiskController(null); + } + scsiDiskController = HypervisorHostHelper.getScsiController(new Pair(rootDiskController, dataDiskController), recommendedDiskController); + if (scsiDiskController == null) { + return; + } + + vmMo.getScsiDeviceControllerKeyNoException(); + // This VM needs SCSI controllers. + // Get count of existing scsi controllers. Helps not to attempt to create more than the maximum allowed 4 + // Get maximum among the bus numbers in use by scsi controllers. Safe to pick maximum, because we always go sequential allocating bus numbers. + Ternary scsiControllerInfo = vmMo.getScsiControllerInfo(); + int requiredNumScsiControllers = VmwareHelper.MAX_SCSI_CONTROLLER_COUNT - scsiControllerInfo.first(); + int availableBusNum = scsiControllerInfo.second() + 1; // method returned current max. bus number + + if (requiredNumScsiControllers == 0) { + return; + } + if (scsiControllerInfo.first() > 0) { + // For VMs which already have a SCSI controller, do NOT attempt to add any more SCSI controllers & return the sub type. + // For Legacy VMs would have only 1 LsiLogic Parallel SCSI controller, and doesn't require more. + // For VMs created post device ordering support, 4 SCSI subtype controllers are ensured during deployment itself. No need to add more. + // For fresh VM deployment only, all required controllers should be ensured. + return; + } + ensureScsiDiskControllers(vmMo, scsiDiskController, requiredNumScsiControllers, availableBusNum); + } + + private void ensureScsiDiskControllers(VirtualMachineMO vmMo, String scsiDiskController, int requiredNumScsiControllers, int availableBusNum) throws Exception { + // Pick the sub type of scsi + if (DiskControllerType.getType(scsiDiskController) == DiskControllerType.pvscsi) { + if (!vmMo.isPvScsiSupported()) { + String msg = "This VM doesn't support Vmware Paravirtual SCSI controller for virtual disks, because the virtual hardware version is less than 7."; + throw new Exception(msg); + } + vmMo.ensurePvScsiDeviceController(requiredNumScsiControllers, availableBusNum); + } else if (DiskControllerType.getType(scsiDiskController) == DiskControllerType.lsisas1068) { + vmMo.ensureLsiLogicSasDeviceControllers(requiredNumScsiControllers, availableBusNum); + } else if (DiskControllerType.getType(scsiDiskController) == DiskControllerType.buslogic) { + vmMo.ensureBusLogicDeviceControllers(requiredNumScsiControllers, availableBusNum); + } else if (DiskControllerType.getType(scsiDiskController) == DiskControllerType.lsilogic) { + vmMo.ensureLsiLogicDeviceControllers(requiredNumScsiControllers, availableBusNum); + } + } + + private void tearDownVm(VirtualMachineMO vmMo) throws Exception { + + if (vmMo == null) + return; + + boolean hasSnapshot = false; + hasSnapshot = vmMo.hasSnapshot(); + if (!hasSnapshot) + vmMo.tearDownDevices(new Class[]{VirtualDisk.class, VirtualEthernetCard.class}); + else + vmMo.tearDownDevices(new Class[]{VirtualEthernetCard.class}); + vmMo.ensureScsiDeviceController(); + } + + private static DiskTO getIsoDiskTO(DiskTO[] disks) { + for (DiskTO vol : disks) { + if (vol.getType() == Volume.Type.ISO) { + return vol; + } + } + return null; + } + + private static DiskTO[] sortVolumesByDeviceId(DiskTO[] volumes) { + + List listForSort = new ArrayList(); + for (DiskTO vol : volumes) { + listForSort.add(vol); + } + Collections.sort(listForSort, new Comparator() { + + @Override + public int compare(DiskTO arg0, DiskTO arg1) { + if (arg0.getDiskSeq() < arg1.getDiskSeq()) { + return -1; + } else if (arg0.getDiskSeq().equals(arg1.getDiskSeq())) { + return 0; + } + + return 1; + } + }); + + return listForSort.toArray(new DiskTO[0]); + } + + private void postDiskConfigBeforeStart(VirtualMachineMO vmMo, VirtualMachineTO vmSpec, DiskTO[] sortedDisks, + Map> iqnToData, VmwareHypervisorHost hyperHost, VmwareContext context, boolean installAsIs) + throws Exception { + VirtualMachineDiskInfoBuilder diskInfoBuilder = vmMo.getDiskInfoBuilder(); + + for (DiskTO vol : sortedDisks) { + //TODO: Map existing disks to the ones returned in the answer + if (vol.getType() == Volume.Type.ISO) + continue; + + VolumeObjectTO volumeTO = (VolumeObjectTO) vol.getData(); + + VirtualMachineDiskInfo diskInfo = getMatchingExistingDisk(diskInfoBuilder, vol, hyperHost, context); + assert (diskInfo != null); + + String[] diskChain = diskInfo.getDiskChain(); + assert (diskChain.length > 0); + + Map details = vol.getDetails(); + boolean managed = false; + + if (details != null) { + managed = Boolean.parseBoolean(details.get(DiskTO.MANAGED)); + } + + DatastoreFile file = new DatastoreFile(diskChain[0]); + + if (managed) { + DatastoreFile originalFile = new DatastoreFile(volumeTO.getPath()); + + if (!file.getFileBaseName().equalsIgnoreCase(originalFile.getFileBaseName())) { + if (LOGGER.isInfoEnabled()) + LOGGER.info("Detected disk-chain top file change on volume: " + volumeTO.getId() + " " + volumeTO.getPath() + " -> " + diskChain[0]); + } + } else { + if (!file.getFileBaseName().equalsIgnoreCase(volumeTO.getPath())) { + if (LOGGER.isInfoEnabled()) + LOGGER.info("Detected disk-chain top file change on volume: " + volumeTO.getId() + " " + volumeTO.getPath() + " -> " + file.getFileBaseName()); + } + } + + VolumeObjectTO volInSpec = getVolumeInSpec(vmSpec, volumeTO); + + if (volInSpec != null) { + if (managed) { + Map data = new HashMap<>(); + + String datastoreVolumePath = diskChain[0]; + + data.put(StartAnswer.PATH, datastoreVolumePath); + data.put(StartAnswer.IMAGE_FORMAT, Storage.ImageFormat.OVA.toString()); + + iqnToData.put(details.get(DiskTO.IQN), data); + + vol.setPath(datastoreVolumePath); + volumeTO.setPath(datastoreVolumePath); + volInSpec.setPath(datastoreVolumePath); + } else { + volInSpec.setPath(file.getFileBaseName()); + } + volInSpec.setChainInfo(vmwareResource.getGson().toJson(diskInfo)); + } + } + } + + private void checkBootOptions(VirtualMachineTO vmSpec, VirtualMachineConfigSpec vmConfigSpec) { + String bootMode = null; + if (vmSpec.getDetails().containsKey(VmDetailConstants.BOOT_MODE)) { + bootMode = vmSpec.getDetails().get(VmDetailConstants.BOOT_MODE); + } + if (null == bootMode) { + bootMode = ApiConstants.BootType.BIOS.toString(); + } + + setBootOptions(vmSpec, bootMode, vmConfigSpec); + } + + private void setBootOptions(VirtualMachineTO vmSpec, String bootMode, VirtualMachineConfigSpec vmConfigSpec) { + VirtualMachineBootOptions bootOptions = null; + if (StringUtils.isNotBlank(bootMode) && !bootMode.equalsIgnoreCase("bios")) { + vmConfigSpec.setFirmware("efi"); + if (vmSpec.getDetails().containsKey(ApiConstants.BootType.UEFI.toString()) && "secure".equalsIgnoreCase(vmSpec.getDetails().get(ApiConstants.BootType.UEFI.toString()))) { + if (bootOptions == null) { + bootOptions = new VirtualMachineBootOptions(); + } + bootOptions.setEfiSecureBootEnabled(true); + } + } + if (vmSpec.isEnterHardwareSetup()) { + if (bootOptions == null) { + bootOptions = new VirtualMachineBootOptions(); + } + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(String.format("configuring VM '%s' to enter hardware setup",vmSpec.getName())); + } + bootOptions.setEnterBIOSSetup(vmSpec.isEnterHardwareSetup()); + } + if (bootOptions != null) { + vmConfigSpec.setBootOptions(bootOptions); + } + } + + private void resizeRootDiskOnVMStart(VirtualMachineMO vmMo, DiskTO rootDiskTO, VmwareHypervisorHost hyperHost, VmwareContext context) throws Exception { + final Pair vdisk = vmwareResource.getVirtualDiskInfo(vmMo, VmwareResource.appendFileType(rootDiskTO.getPath(), VmwareResource.VMDK_EXTENSION)); + assert (vdisk != null); + + Long reqSize = 0L; + final VolumeObjectTO volumeTO = ((VolumeObjectTO) rootDiskTO.getData()); + if (volumeTO != null) { + reqSize = volumeTO.getSize() / 1024; + } + final VirtualDisk disk = vdisk.first(); + if (reqSize > disk.getCapacityInKB()) { + final VirtualMachineDiskInfo diskInfo = getMatchingExistingDisk(vmMo.getDiskInfoBuilder(), rootDiskTO, hyperHost, context); + assert (diskInfo != null); + final String[] diskChain = diskInfo.getDiskChain(); + + if (diskChain != null && diskChain.length > 1) { + LOGGER.warn("Disk chain length for the VM is greater than one, this is not supported"); + throw new CloudRuntimeException("Unsupported VM disk chain length: " + diskChain.length); + } + + boolean resizingSupported = false; + String deviceBusName = diskInfo.getDiskDeviceBusName(); + if (deviceBusName != null && (deviceBusName.toLowerCase().contains("scsi") || deviceBusName.toLowerCase().contains("lsi"))) { + resizingSupported = true; + } + if (!resizingSupported) { + LOGGER.warn("Resizing of root disk is only support for scsi device/bus, the provide VM's disk device bus name is " + diskInfo.getDiskDeviceBusName()); + throw new CloudRuntimeException("Unsupported VM root disk device bus: " + diskInfo.getDiskDeviceBusName()); + } + + disk.setCapacityInKB(reqSize); + VirtualMachineConfigSpec vmConfigSpec = new VirtualMachineConfigSpec(); + VirtualDeviceConfigSpec deviceConfigSpec = new VirtualDeviceConfigSpec(); + deviceConfigSpec.setDevice(disk); + deviceConfigSpec.setOperation(VirtualDeviceConfigSpecOperation.EDIT); + vmConfigSpec.getDeviceChange().add(deviceConfigSpec); + if (!vmMo.configureVm(vmConfigSpec)) { + throw new Exception("Failed to configure VM for given root disk size. vmName: " + vmMo.getName()); + } + } + } + + private static void postNvpConfigBeforeStart(VirtualMachineMO vmMo, VirtualMachineTO vmSpec) throws Exception { + /** + * We need to configure the port on the DV switch after the host is + * connected. So make this happen between the configure and start of + * the VM + */ + int nicIndex = 0; + for (NicTO nicTo : sortNicsByDeviceId(vmSpec.getNics())) { + if (nicTo.getBroadcastType() == Networks.BroadcastDomainType.Lswitch) { + // We need to create a port with a unique vlan and pass the key to the nic device + LOGGER.trace("Nic " + nicTo.toString() + " is connected to an NVP logicalswitch"); + VirtualDevice nicVirtualDevice = vmMo.getNicDeviceByIndex(nicIndex); + if (nicVirtualDevice == null) { + throw new Exception("Failed to find a VirtualDevice for nic " + nicIndex); //FIXME Generic exceptions are bad + } + VirtualDeviceBackingInfo backing = nicVirtualDevice.getBacking(); + if (backing instanceof VirtualEthernetCardDistributedVirtualPortBackingInfo) { + // This NIC is connected to a Distributed Virtual Switch + VirtualEthernetCardDistributedVirtualPortBackingInfo portInfo = (VirtualEthernetCardDistributedVirtualPortBackingInfo) backing; + DistributedVirtualSwitchPortConnection port = portInfo.getPort(); + String portKey = port.getPortKey(); + String portGroupKey = port.getPortgroupKey(); + String dvSwitchUuid = port.getSwitchUuid(); + + LOGGER.debug("NIC " + nicTo.toString() + " is connected to dvSwitch " + dvSwitchUuid + " pg " + portGroupKey + " port " + portKey); + + ManagedObjectReference dvSwitchManager = vmMo.getContext().getVimClient().getServiceContent().getDvSwitchManager(); + ManagedObjectReference dvSwitch = vmMo.getContext().getVimClient().getService().queryDvsByUuid(dvSwitchManager, dvSwitchUuid); + + // Get all ports + DistributedVirtualSwitchPortCriteria criteria = new DistributedVirtualSwitchPortCriteria(); + criteria.setInside(true); + criteria.getPortgroupKey().add(portGroupKey); + List dvPorts = vmMo.getContext().getVimClient().getService().fetchDVPorts(dvSwitch, criteria); + + DistributedVirtualPort vmDvPort = null; + List usedVlans = new ArrayList(); + for (DistributedVirtualPort dvPort : dvPorts) { + // Find the port for this NIC by portkey + if (portKey.equals(dvPort.getKey())) { + vmDvPort = dvPort; + } + VMwareDVSPortSetting settings = (VMwareDVSPortSetting) dvPort.getConfig().getSetting(); + VmwareDistributedVirtualSwitchVlanIdSpec vlanId = (VmwareDistributedVirtualSwitchVlanIdSpec) settings.getVlan(); + LOGGER.trace("Found port " + dvPort.getKey() + " with vlan " + vlanId.getVlanId()); + if (vlanId.getVlanId() > 0 && vlanId.getVlanId() < 4095) { + usedVlans.add(vlanId.getVlanId()); + } + } + + if (vmDvPort == null) { + throw new Exception("Empty port list from dvSwitch for nic " + nicTo.toString()); + } + + DVPortConfigInfo dvPortConfigInfo = vmDvPort.getConfig(); + VMwareDVSPortSetting settings = (VMwareDVSPortSetting) dvPortConfigInfo.getSetting(); + + VmwareDistributedVirtualSwitchVlanIdSpec vlanId = (VmwareDistributedVirtualSwitchVlanIdSpec) settings.getVlan(); + BoolPolicy blocked = settings.getBlocked(); + if (blocked.isValue() == Boolean.TRUE) { + LOGGER.trace("Port is blocked, set a vlanid and unblock"); + DVPortConfigSpec dvPortConfigSpec = new DVPortConfigSpec(); + VMwareDVSPortSetting edittedSettings = new VMwareDVSPortSetting(); + // Unblock + blocked.setValue(Boolean.FALSE); + blocked.setInherited(Boolean.FALSE); + edittedSettings.setBlocked(blocked); + // Set vlan + int i; + for (i = 1; i < 4095; i++) { + if (!usedVlans.contains(i)) + break; + } + vlanId.setVlanId(i); // FIXME should be a determined + // based on usage + vlanId.setInherited(false); + edittedSettings.setVlan(vlanId); + + dvPortConfigSpec.setSetting(edittedSettings); + dvPortConfigSpec.setOperation("edit"); + dvPortConfigSpec.setKey(portKey); + List dvPortConfigSpecs = new ArrayList(); + dvPortConfigSpecs.add(dvPortConfigSpec); + ManagedObjectReference task = vmMo.getContext().getVimClient().getService().reconfigureDVPortTask(dvSwitch, dvPortConfigSpecs); + if (!vmMo.getContext().getVimClient().waitForTask(task)) { + throw new Exception("Failed to configure the dvSwitch port for nic " + nicTo.toString()); + } + LOGGER.debug("NIC " + nicTo.toString() + " connected to vlan " + i); + } else { + LOGGER.trace("Port already configured and set to vlan " + vlanId.getVlanId()); + } + } else if (backing instanceof VirtualEthernetCardNetworkBackingInfo) { + // This NIC is connected to a Virtual Switch + // Nothing to do + } else if (backing instanceof VirtualEthernetCardOpaqueNetworkBackingInfo) { + //if NSX API VERSION >= 4.2, connect to br-int (nsx.network), do not create portgroup else previous behaviour + //OK, connected to OpaqueNetwork + } else { + LOGGER.error("nic device backing is of type " + backing.getClass().getName()); + throw new Exception("Incompatible backing for a VirtualDevice for nic " + nicIndex); //FIXME Generic exceptions are bad + } + } + nicIndex++; + } + } + + private VirtualMachineDiskInfo getMatchingExistingDisk(VirtualMachineDiskInfoBuilder diskInfoBuilder, DiskTO vol, VmwareHypervisorHost hyperHost, VmwareContext context) + throws Exception { + if (diskInfoBuilder != null) { + VolumeObjectTO volume = (VolumeObjectTO) vol.getData(); + + String dsName = null; + String diskBackingFileBaseName = null; + + Map details = vol.getDetails(); + boolean isManaged = details != null && Boolean.parseBoolean(details.get(DiskTO.MANAGED)); + + if (isManaged) { + String iScsiName = details.get(DiskTO.IQN); + + // if the storage is managed, iScsiName should not be null + dsName = VmwareResource.getDatastoreName(iScsiName); + + diskBackingFileBaseName = new DatastoreFile(volume.getPath()).getFileBaseName(); + } else { + ManagedObjectReference morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, volume.getDataStore().getUuid()); + DatastoreMO dsMo = new DatastoreMO(context, morDs); + + dsName = dsMo.getName(); + + diskBackingFileBaseName = volume.getPath(); + } + + VirtualMachineDiskInfo diskInfo = diskInfoBuilder.getDiskInfoByBackingFileBaseName(diskBackingFileBaseName, dsName); + if (diskInfo != null) { + LOGGER.info("Found existing disk info from volume path: " + volume.getPath()); + return diskInfo; + } else { + String chainInfo = volume.getChainInfo(); + if (chainInfo != null) { + VirtualMachineDiskInfo infoInChain = vmwareResource.getGson().fromJson(chainInfo, VirtualMachineDiskInfo.class); + if (infoInChain != null) { + String[] disks = infoInChain.getDiskChain(); + if (disks.length > 0) { + for (String diskPath : disks) { + DatastoreFile file = new DatastoreFile(diskPath); + diskInfo = diskInfoBuilder.getDiskInfoByBackingFileBaseName(file.getFileBaseName(), dsName); + if (diskInfo != null) { + LOGGER.info("Found existing disk from chain info: " + diskPath); + return diskInfo; + } + } + } + + if (diskInfo == null) { + diskInfo = diskInfoBuilder.getDiskInfoByDeviceBusName(infoInChain.getDiskDeviceBusName()); + if (diskInfo != null) { + LOGGER.info("Found existing disk from from chain device bus information: " + infoInChain.getDiskDeviceBusName()); + return diskInfo; + } + } + } + } + } + } + + return null; + } + + private static VolumeObjectTO getVolumeInSpec(VirtualMachineTO vmSpec, VolumeObjectTO srcVol) { + for (DiskTO disk : vmSpec.getDisks()) { + if (disk.getData() instanceof VolumeObjectTO) { + VolumeObjectTO vol = (VolumeObjectTO) disk.getData(); + if (vol.getId() == srcVol.getId()) + return vol; + } + } + + return null; + } + + /** + * Generate the mac sequence from the nics. + */ + protected String generateMacSequence(NicTO[] nics) { + if (nics.length == 0) { + return ""; + } + + StringBuffer sbMacSequence = new StringBuffer(); + for (NicTO nicTo : sortNicsByDeviceId(nics)) { + sbMacSequence.append(nicTo.getMac()).append("|"); + } + if (!sbMacSequence.toString().isEmpty()) { + sbMacSequence.deleteCharAt(sbMacSequence.length() - 1); //Remove extra '|' char appended at the end + } + + return sbMacSequence.toString(); + } + + static NicTO[] sortNicsByDeviceId(NicTO[] nics) { + + List listForSort = new ArrayList(); + for (NicTO nic : nics) { + listForSort.add(nic); + } + Collections.sort(listForSort, new Comparator() { + + @Override + public int compare(NicTO arg0, NicTO arg1) { + if (arg0.getDeviceId() < arg1.getDeviceId()) { + return -1; + } else if (arg0.getDeviceId() == arg1.getDeviceId()) { + return 0; + } + + return 1; + } + }); + + return listForSort.toArray(new NicTO[0]); + } + + private static void configBasicExtraOption(List extraOptions, VirtualMachineTO vmSpec) { + OptionValue newVal = new OptionValue(); + newVal.setKey("machine.id"); + newVal.setValue(vmSpec.getBootArgs()); + extraOptions.add(newVal); + + newVal = new OptionValue(); + newVal.setKey("devices.hotplug"); + newVal.setValue("true"); + extraOptions.add(newVal); + } + + private static void configNvpExtraOption(List extraOptions, VirtualMachineTO vmSpec, Map nicUuidToDvSwitchUuid) { + /** + * Extra Config : nvp.vm-uuid = uuid + * - Required for Nicira NVP integration + */ + OptionValue newVal = new OptionValue(); + newVal.setKey("nvp.vm-uuid"); + newVal.setValue(vmSpec.getUuid()); + extraOptions.add(newVal); + + /** + * Extra Config : nvp.iface-id. = uuid + * - Required for Nicira NVP integration + */ + int nicNum = 0; + for (NicTO nicTo : sortNicsByDeviceId(vmSpec.getNics())) { + if (nicTo.getUuid() != null) { + newVal = new OptionValue(); + newVal.setKey("nvp.iface-id." + nicNum); + newVal.setValue(nicTo.getUuid()); + extraOptions.add(newVal); + } + nicNum++; + } + } + + private static void configCustomExtraOption(List extraOptions, VirtualMachineTO vmSpec) { + // we no longer to validation anymore + for (Map.Entry entry : vmSpec.getDetails().entrySet()) { + if (entry.getKey().equalsIgnoreCase(VmDetailConstants.BOOT_MODE)) { + continue; + } + OptionValue newVal = new OptionValue(); + newVal.setKey(entry.getKey()); + newVal.setValue(entry.getValue()); + extraOptions.add(newVal); + } + } + + private class VmAlreadyExistsInVcenter extends Exception { + public VmAlreadyExistsInVcenter(String msg) { + } + } + + private class VirtualMachineData { + String vmName = null; + VirtualMachineFileInfo vmFileInfo = null; + VirtualMachineFileLayoutEx vmFileLayout = null; + List datastores = new ArrayList<>(); + } + + private class VirtualMachineRecycler { + private String vmInternalCSName; + private Pair controllerInfo; + private Boolean systemVm; + private VmwareHypervisorHost hyperHost; + private VirtualMachineMO vmMo; + private DiskControllerType systemVmScsiControllerType; + private int firstScsiControllerBusNum; + private int numScsiControllerForSystemVm; + private VirtualMachineDiskInfoBuilder diskInfoBuilder; + private boolean hasSnapshot; + private VirtualDevice[] nicDevices; + + public VirtualMachineRecycler(String vmInternalCSName, Pair controllerInfo, Boolean systemVm, VmwareHypervisorHost hyperHost, VirtualMachineMO vmMo, + DiskControllerType systemVmScsiControllerType, int firstScsiControllerBusNum, int numScsiControllerForSystemVm) { + this.vmInternalCSName = vmInternalCSName; + this.controllerInfo = controllerInfo; + this.systemVm = systemVm; + this.hyperHost = hyperHost; + this.vmMo = vmMo; + this.systemVmScsiControllerType = systemVmScsiControllerType; + this.firstScsiControllerBusNum = firstScsiControllerBusNum; + this.numScsiControllerForSystemVm = numScsiControllerForSystemVm; + } + + public VirtualMachineDiskInfoBuilder getDiskInfoBuilder() { + return diskInfoBuilder; + } + + public boolean isHasSnapshot() { + return hasSnapshot; + } + + public VirtualMachineRecycler invoke() throws Exception { + if (LOGGER.isInfoEnabled()) { + LOGGER.info("Found vm " + vmInternalCSName + " at other host, relocate to " + hyperHost.getHyperHostName()); + } + + takeVmFromOtherHyperHost(hyperHost, vmInternalCSName); + + if (VmwareResource.getVmPowerState(vmMo) != VirtualMachine.PowerState.PowerOff) + vmMo.safePowerOff(vmwareResource.getShutdownWaitMs()); + + diskInfoBuilder = vmMo.getDiskInfoBuilder(); + hasSnapshot = vmMo.hasSnapshot(); + nicDevices = vmMo.getNicDevices(); + if (!hasSnapshot) + vmMo.tearDownDevices(new Class[] {VirtualDisk.class, VirtualEthernetCard.class}); + else + vmMo.tearDownDevices(new Class[] {VirtualEthernetCard.class}); + + if (systemVm) { + // System volumes doesn't require more than 1 SCSI controller as there is no requirement for data volumes. + ensureScsiDiskControllers(vmMo, systemVmScsiControllerType.toString(), numScsiControllerForSystemVm, firstScsiControllerBusNum); + } else { + ensureDiskControllers(vmMo, controllerInfo); + } + return this; + } + + private VirtualMachineMO takeVmFromOtherHyperHost(VmwareHypervisorHost hyperHost, String vmName) throws Exception { + + VirtualMachineMO vmMo = hyperHost.findVmOnPeerHyperHost(vmName); + if (vmMo != null) { + ManagedObjectReference morTargetPhysicalHost = hyperHost.findMigrationTarget(vmMo); + if (morTargetPhysicalHost == null) { + String msg = "VM " + vmName + " is on other host and we have no resource available to migrate and start it here"; + LOGGER.error(msg); + throw new Exception(msg); + } + + if (!vmMo.relocate(morTargetPhysicalHost)) { + String msg = "VM " + vmName + " is on other host and we failed to relocate it here"; + LOGGER.error(msg); + throw new Exception(msg); + } + + return vmMo; + } + return null; + } + + public VirtualDevice[] getNicDevices() { + return nicDevices; + } + } + + private class PrepareSytemVMPatchISOMethod { + private VmwareManager mgr; + private VmwareHypervisorHost hyperHost; + private VirtualMachineMO vmMo; + private VirtualDeviceConfigSpec[] deviceConfigSpecArray; + private int i; + private int ideUnitNumber; + + public PrepareSytemVMPatchISOMethod(VmwareManager mgr, VmwareHypervisorHost hyperHost, VirtualMachineMO vmMo, VirtualDeviceConfigSpec[] deviceConfigSpecArray, int i, int ideUnitNumber) { + this.mgr = mgr; + this.hyperHost = hyperHost; + this.vmMo = vmMo; + this.deviceConfigSpecArray = deviceConfigSpecArray; + this.i = i; + this.ideUnitNumber = ideUnitNumber; + } + + public int getI() { + return i; + } + + public int getIdeUnitNumber() { + return ideUnitNumber; + } + + public PrepareSytemVMPatchISOMethod invoke() throws Exception { + // attach ISO (for patching of system VM) + DatastoreMO secDsMo = getDatastoreMOForSecStore(mgr, hyperHost); + + deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec(); + Pair isoInfo = VmwareHelper + .prepareIsoDevice(vmMo, String.format("[%s] systemvm/%s", secDsMo.getName(), mgr.getSystemVMIsoFileNameOnDatastore()), secDsMo.getMor(), true, true, + ideUnitNumber++, i + 1); + deviceConfigSpecArray[i].setDevice(isoInfo.first()); + if (isoInfo.second()) { + if (LOGGER.isDebugEnabled()) + LOGGER.debug("Prepare ISO volume at new device " + vmwareResource.getGson().toJson(isoInfo.first())); + deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD); + } else { + if (LOGGER.isDebugEnabled()) + LOGGER.debug("Prepare ISO volume at existing device " + vmwareResource.getGson().toJson(isoInfo.first())); + deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.EDIT); + } + i++; + return this; + } + + } + private DatastoreMO getDatastoreMOForSecStore(VmwareManager mgr, VmwareHypervisorHost hyperHost) throws Exception { + Pair secStoreUrlAndId = mgr.getSecondaryStorageStoreUrlAndId(Long.parseLong(vmwareResource.getDcId())); + String secStoreUrl = secStoreUrlAndId.first(); + Long secStoreId = secStoreUrlAndId.second(); + if (secStoreUrl == null) { + String msg = "secondary storage for dc " + vmwareResource.getDcId() + " is not ready yet?"; + throw new Exception(msg); + } + mgr.prepareSecondaryStorageStore(secStoreUrl, secStoreId); + + ManagedObjectReference morSecDs = vmwareResource.prepareSecondaryDatastoreOnHost(secStoreUrl); + if (morSecDs == null) { + String msg = "Failed to prepare secondary storage on host, secondary store url: " + secStoreUrl; + throw new Exception(msg); + } + return new DatastoreMO(hyperHost.getContext(), morSecDs); + } + + /** + // Setup ROOT/DATA disk devices + */ + private class DiskSetup { + private VirtualMachineTO vmSpec; + private DiskTO rootDiskTO; + private Pair controllerInfo; + private VmwareContext context; + private DatacenterMO dcMo; + private VmwareHypervisorHost hyperHost; + private VirtualMachineMO vmMo; + private DiskTO[] disks; + private HashMap> dataStoresDetails; + private VirtualMachineDiskInfoBuilder diskInfoBuilder; + private boolean hasSnapshot; + private VirtualDeviceConfigSpec[] deviceConfigSpecArray; + private int deviceCount; + private int ideUnitNumber; + private int scsiUnitNumber; + private int ideControllerKey; + private int scsiControllerKey; + private DiskTO[] sortedDisks; + private boolean installAsIs; + + public DiskSetup(VirtualMachineTO vmSpec, DiskTO rootDiskTO, Pair controllerInfo, VmwareContext context, DatacenterMO dcMo, VmwareHypervisorHost hyperHost, + VirtualMachineMO vmMo, DiskTO[] disks, HashMap> dataStoresDetails, VirtualMachineDiskInfoBuilder diskInfoBuilder, + boolean hasSnapshot, VirtualDeviceConfigSpec[] deviceConfigSpecArray, int deviceCount, int ideUnitNumber, int scsiUnitNumber, int ideControllerKey, int scsiControllerKey, boolean installAsIs) { + this.vmSpec = vmSpec; + this.rootDiskTO = rootDiskTO; + this.controllerInfo = controllerInfo; + this.context = context; + this.dcMo = dcMo; + this.hyperHost = hyperHost; + this.vmMo = vmMo; + this.disks = disks; + this.dataStoresDetails = dataStoresDetails; + this.diskInfoBuilder = diskInfoBuilder; + this.hasSnapshot = hasSnapshot; + this.deviceConfigSpecArray = deviceConfigSpecArray; + this.deviceCount = deviceCount; + this.ideUnitNumber = ideUnitNumber; + this.scsiUnitNumber = scsiUnitNumber; + this.ideControllerKey = ideControllerKey; + this.scsiControllerKey = scsiControllerKey; + this.installAsIs = installAsIs; + } + + public DiskTO getRootDiskTO() { + return rootDiskTO; + } + + public int getDeviceCount() { + return deviceCount; + } + + public DiskTO[] getSortedDisks() { + return sortedDisks; + } + + public DiskSetup invoke() throws Exception { + int controllerKey; + sortedDisks = sortVolumesByDeviceId(disks); + for (DiskTO vol : sortedDisks) { + if (vol.getType() == Volume.Type.ISO || installAsIs) { + continue; + } + + VirtualMachineDiskInfo matchingExistingDisk = getMatchingExistingDisk(diskInfoBuilder, vol, hyperHost, context); + controllerKey = getDiskController(matchingExistingDisk, vol, vmSpec, ideControllerKey, scsiControllerKey); + String diskController = getDiskController(vmMo, matchingExistingDisk, vol, controllerInfo); + + if (DiskControllerType.getType(diskController) == DiskControllerType.osdefault) { + diskController = vmMo.getRecommendedDiskController(null); + } + if (DiskControllerType.getType(diskController) == DiskControllerType.ide) { + controllerKey = vmMo.getIDEControllerKey(ideUnitNumber); + if (vol.getType() == Volume.Type.DATADISK) { + // Could be result of flip due to user configured setting or "osdefault" for data disks + // Ensure maximum of 2 data volumes over IDE controller, 3 includeing root volume + if (vmMo.getNumberOfVirtualDisks() > 3) { + throw new CloudRuntimeException( + "Found more than 3 virtual disks attached to this VM [" + vmMo.getVmName() + "]. Unable to implement the disks over " + diskController + " controller, as maximum number of devices supported over IDE controller is 4 includeing CDROM device."); + } + } + } else { + if (VmwareHelper.isReservedScsiDeviceNumber(scsiUnitNumber)) { + scsiUnitNumber++; + } + + controllerKey = vmMo.getScsiDiskControllerKeyNoException(diskController, scsiUnitNumber); + if (controllerKey == -1) { + // This may happen for ROOT legacy VMs which doesn't have recommended disk controller when global configuration parameter 'vmware.root.disk.controller' is set to "osdefault" + // Retrieve existing controller and use. + Ternary vmScsiControllerInfo = vmMo.getScsiControllerInfo(); + DiskControllerType existingControllerType = vmScsiControllerInfo.third(); + controllerKey = vmMo.getScsiDiskControllerKeyNoException(existingControllerType.toString(), scsiUnitNumber); + } + } + if (!hasSnapshot) { + deviceConfigSpecArray[deviceCount] = new VirtualDeviceConfigSpec(); + + VolumeObjectTO volumeTO = (VolumeObjectTO)vol.getData(); + DataStoreTO primaryStore = volumeTO.getDataStore(); + Map details = vol.getDetails(); + boolean managed = false; + String iScsiName = null; + + if (details != null) { + managed = Boolean.parseBoolean(details.get(DiskTO.MANAGED)); + iScsiName = details.get(DiskTO.IQN); + } + + // if the storage is managed, iScsiName should not be null + String datastoreName = managed ? VmwareResource.getDatastoreName(iScsiName) : primaryStore.getUuid(); + Pair volumeDsDetails = dataStoresDetails.get(datastoreName); + + assert (volumeDsDetails != null); + + String[] diskChain = syncDiskChain(dcMo, vmMo, vmSpec, vol, matchingExistingDisk, dataStoresDetails); + + int deviceNumber = -1; + if (controllerKey == vmMo.getIDEControllerKey(ideUnitNumber)) { + deviceNumber = ideUnitNumber % VmwareHelper.MAX_ALLOWED_DEVICES_IDE_CONTROLLER; + ideUnitNumber++; + } else { + deviceNumber = scsiUnitNumber % VmwareHelper.MAX_ALLOWED_DEVICES_SCSI_CONTROLLER; + scsiUnitNumber++; + } + VirtualDevice device = VmwareHelper.prepareDiskDevice(vmMo, null, controllerKey, diskChain, volumeDsDetails.first(), deviceNumber, deviceCount + 1); + if (vol.getType() == Volume.Type.ROOT) + rootDiskTO = vol; + deviceConfigSpecArray[deviceCount].setDevice(device); + deviceConfigSpecArray[deviceCount].setOperation(VirtualDeviceConfigSpecOperation.ADD); + + if (LOGGER.isDebugEnabled()) + LOGGER.debug("Prepare volume at new device " + vmwareResource.getGson().toJson(device)); + + deviceCount++; + } else { + if (controllerKey == vmMo.getIDEControllerKey(ideUnitNumber)) + ideUnitNumber++; + else + scsiUnitNumber++; + } + } + return this; + } + + private int getDiskController(VirtualMachineDiskInfo matchingExistingDisk, DiskTO vol, VirtualMachineTO vmSpec, int ideControllerKey, int scsiControllerKey) { + + int controllerKey; + if (matchingExistingDisk != null) { + LOGGER.info("Chose disk controller based on existing information: " + matchingExistingDisk.getDiskDeviceBusName()); + if (matchingExistingDisk.getDiskDeviceBusName().startsWith("ide")) + return ideControllerKey; + else + return scsiControllerKey; + } + + if (vol.getType() == Volume.Type.ROOT) { + Map vmDetails = vmSpec.getDetails(); + if (vmDetails != null && vmDetails.get(VmDetailConstants.ROOT_DISK_CONTROLLER) != null) { + if (vmDetails.get(VmDetailConstants.ROOT_DISK_CONTROLLER).equalsIgnoreCase("scsi")) { + LOGGER.info("Chose disk controller for vol " + vol.getType() + " -> scsi, based on root disk controller settings: " + + vmDetails.get(VmDetailConstants.ROOT_DISK_CONTROLLER)); + controllerKey = scsiControllerKey; + } else { + LOGGER.info("Chose disk controller for vol " + vol.getType() + " -> ide, based on root disk controller settings: " + + vmDetails.get(VmDetailConstants.ROOT_DISK_CONTROLLER)); + controllerKey = ideControllerKey; + } + } else { + LOGGER.info("Chose disk controller for vol " + vol.getType() + " -> scsi. due to null root disk controller setting"); + controllerKey = scsiControllerKey; + } + + } else { + // DATA volume always use SCSI device + LOGGER.info("Chose disk controller for vol " + vol.getType() + " -> scsi"); + controllerKey = scsiControllerKey; + } + + return controllerKey; + } + + private String getDiskController(VirtualMachineMO vmMo, VirtualMachineDiskInfo matchingExistingDisk, DiskTO vol, Pair controllerInfo) throws Exception { + int controllerKey; + DiskControllerType controllerType = DiskControllerType.none; + if (matchingExistingDisk != null) { + String currentBusName = matchingExistingDisk.getDiskDeviceBusName(); + if (currentBusName != null) { + LOGGER.info("Chose disk controller based on existing information: " + currentBusName); + if (currentBusName.startsWith("ide")) { + controllerType = DiskControllerType.ide; + } else if (currentBusName.startsWith("scsi")) { + controllerType = DiskControllerType.scsi; + } + } + if (controllerType == DiskControllerType.scsi || controllerType == DiskControllerType.none) { + Ternary vmScsiControllerInfo = vmMo.getScsiControllerInfo(); + controllerType = vmScsiControllerInfo.third(); + } + return controllerType.toString(); + } + + if (vol.getType() == Volume.Type.ROOT) { + LOGGER.info("Chose disk controller for vol " + vol.getType() + " -> " + controllerInfo.first() + + ", based on root disk controller settings at global configuration setting."); + return controllerInfo.first(); + } else { + LOGGER.info("Chose disk controller for vol " + vol.getType() + " -> " + controllerInfo.second() + + ", based on default data disk controller setting i.e. Operating system recommended."); // Need to bring in global configuration setting & template level setting. + return controllerInfo.second(); + } + } + + // return the finalized disk chain for startup, from top to bottom + private String[] syncDiskChain(DatacenterMO dcMo, VirtualMachineMO vmMo, VirtualMachineTO vmSpec, DiskTO vol, VirtualMachineDiskInfo diskInfo, + HashMap> dataStoresDetails) throws Exception { + + VolumeObjectTO volumeTO = (VolumeObjectTO) vol.getData(); + DataStoreTO primaryStore = volumeTO.getDataStore(); + Map details = vol.getDetails(); + boolean isManaged = false; + String iScsiName = null; + + if (details != null) { + isManaged = Boolean.parseBoolean(details.get(DiskTO.MANAGED)); + iScsiName = details.get(DiskTO.IQN); + } + + // if the storage is managed, iScsiName should not be null + String datastoreName = isManaged ? VmwareResource.getDatastoreName(iScsiName) : primaryStore.getUuid(); + Pair volumeDsDetails = dataStoresDetails.get(datastoreName); + + if (volumeDsDetails == null) { + throw new Exception("Primary datastore " + primaryStore.getUuid() + " is not mounted on host."); + } + + DatastoreMO dsMo = volumeDsDetails.second(); + + // we will honor vCenter's meta if it exists + if (diskInfo != null) { + // to deal with run-time upgrade to maintain the new datastore folder structure + String disks[] = diskInfo.getDiskChain(); + for (int i = 0; i < disks.length; i++) { + DatastoreFile file = new DatastoreFile(disks[i]); + if (!isManaged && file.getDir() != null && file.getDir().isEmpty()) { + LOGGER.info("Perform run-time datastore folder upgrade. sync " + disks[i] + " to VM folder"); + disks[i] = VmwareStorageLayoutHelper.syncVolumeToVmDefaultFolder(dcMo, vmMo.getName(), dsMo, file.getFileBaseName(), VmwareManager.s_vmwareSearchExcludeFolder.value()); + } + } + return disks; + } + + final String datastoreDiskPath; + + if (isManaged) { + String vmdkPath = new DatastoreFile(volumeTO.getPath()).getFileBaseName(); + + if (volumeTO.getVolumeType() == Volume.Type.ROOT) { + if (vmdkPath == null) { + vmdkPath = volumeTO.getName(); + } + + datastoreDiskPath = VmwareStorageLayoutHelper.syncVolumeToVmDefaultFolder(dcMo, vmMo.getName(), dsMo, vmdkPath); + } else { + if (vmdkPath == null) { + vmdkPath = dsMo.getName(); + } + + datastoreDiskPath = dsMo.getDatastorePath(vmdkPath + VmwareResource.VMDK_EXTENSION); + } + } else { + datastoreDiskPath = VmwareStorageLayoutHelper.syncVolumeToVmDefaultFolder(dcMo, vmMo.getName(), dsMo, volumeTO.getPath(), VmwareManager.s_vmwareSearchExcludeFolder.value()); + } + + if (!dsMo.fileExists(datastoreDiskPath)) { + LOGGER.warn("Volume " + volumeTO.getId() + " does not seem to exist on datastore, out of sync? path: " + datastoreDiskPath); + } + + return new String[]{datastoreDiskPath}; + } + } + + /** + // Setup NIC devices + */ + private class NicSetup { + private StartCommand cmd; + private VirtualMachineTO vmSpec; + private String vmInternalCSName; + private VmwareContext context; + private VmwareManager mgr; + private VmwareHypervisorHost hyperHost; + private VirtualMachineMO vmMo; + private NicTO[] nics; + private VirtualDeviceConfigSpec[] deviceConfigSpecArray; + private int deviceCount; + private int nicMask; + private int nicCount; + private Map nicUuidToDvSwitchUuid; + private VirtualDevice[] nicDevices; + + public NicSetup(StartCommand cmd, VirtualMachineTO vmSpec, String vmInternalCSName, VmwareContext context, VmwareManager mgr, VmwareHypervisorHost hyperHost, + VirtualMachineMO vmMo, NicTO[] nics, VirtualDeviceConfigSpec[] deviceConfigSpecArray, int deviceCount, VirtualDevice[] nicDevices) { + this.cmd = cmd; + this.vmSpec = vmSpec; + this.vmInternalCSName = vmInternalCSName; + this.context = context; + this.mgr = mgr; + this.hyperHost = hyperHost; + this.vmMo = vmMo; + this.nics = nics; + this.deviceConfigSpecArray = deviceConfigSpecArray; + this.deviceCount = deviceCount; + this.nicDevices = nicDevices; + } + + public int getDeviceCount() { + return deviceCount; + } + + public int getNicMask() { + return nicMask; + } + + public int getNicCount() { + return nicCount; + } + + public Map getNicUuidToDvSwitchUuid() { + return nicUuidToDvSwitchUuid; + } + + public NicSetup invoke() throws Exception { + VirtualDevice nic; + nicMask = 0; + nicCount = 0; + + if (vmSpec.getType() == VirtualMachine.Type.DomainRouter) { + doDomainRouterSetup(); + } + + VirtualEthernetCardType nicDeviceType = VirtualEthernetCardType.valueOf(vmSpec.getDetails().get(VmDetailConstants.NIC_ADAPTER)); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("VM " + vmInternalCSName + " will be started with NIC device type: " + nicDeviceType); + } + + NiciraNvpApiVersion.logNiciraApiVersion(); + + if (LOGGER.isTraceEnabled()) { + LOGGER.debug(String.format("deciding for VM '%s' to use orchestrated NICs or as is.", vmInternalCSName)); + } + nicUuidToDvSwitchUuid = new HashMap<>(); + LOGGER.info(String.format("adding %d nics to VM '%s'", nics.length, vmInternalCSName)); + for (NicTO nicTo : sortNicsByDeviceId(nics)) { + LOGGER.info("Prepare NIC device based on NicTO: " + vmwareResource.getGson().toJson(nicTo)); + + boolean configureVServiceInNexus = (nicTo.getType() == Networks.TrafficType.Guest) && (vmSpec.getDetails().containsKey("ConfigureVServiceInNexus")); + VirtualMachine.Type vmType = cmd.getVirtualMachine().getType(); + Pair networkInfo = vmwareResource.prepareNetworkFromNicInfo(vmMo.getRunningHost(), nicTo, configureVServiceInNexus, vmType); + if ((nicTo.getBroadcastType() != Networks.BroadcastDomainType.Lswitch) || (nicTo.getBroadcastType() == Networks.BroadcastDomainType.Lswitch && NiciraNvpApiVersion.isApiVersionLowerThan("4.2"))) { + if (VmwareHelper.isDvPortGroup(networkInfo.first())) { + String dvSwitchUuid; + ManagedObjectReference dcMor = hyperHost.getHyperHostDatacenter(); + DatacenterMO dataCenterMo = new DatacenterMO(context, dcMor); + ManagedObjectReference dvsMor = dataCenterMo.getDvSwitchMor(networkInfo.first()); + dvSwitchUuid = dataCenterMo.getDvSwitchUuid(dvsMor); + LOGGER.info("Preparing NIC device on dvSwitch : " + dvSwitchUuid); + nic = VmwareHelper.prepareDvNicDevice(vmMo, networkInfo.first(), nicDeviceType, networkInfo.second(), dvSwitchUuid, nicTo.getMac(), deviceCount + 1, true, true); + if (nicTo.getUuid() != null) { + nicUuidToDvSwitchUuid.put(nicTo.getUuid(), dvSwitchUuid); + } + } else { + LOGGER.info("Preparing NIC device on network " + networkInfo.second()); + nic = VmwareHelper.prepareNicDevice(vmMo, networkInfo.first(), nicDeviceType, networkInfo.second(), nicTo.getMac(), deviceCount + 1, true, true); + } + } else { + //if NSX API VERSION >= 4.2, connect to br-int (nsx.network), do not create portgroup else previous behaviour + nic = VmwareHelper.prepareNicOpaque(vmMo, nicDeviceType, networkInfo.second(), nicTo.getMac(), deviceCount + 1, true, true); + } + + deviceConfigSpecArray[deviceCount] = new VirtualDeviceConfigSpec(); + deviceConfigSpecArray[deviceCount].setDevice(nic); + deviceConfigSpecArray[deviceCount].setOperation(VirtualDeviceConfigSpecOperation.ADD); + + if (LOGGER.isDebugEnabled()) + LOGGER.debug("Prepare NIC at new device " + vmwareResource.getGson().toJson(deviceConfigSpecArray[deviceCount])); + + // this is really a hacking for DomR, upon DomR startup, we will reset all the NIC allocation after eth3 + if (nicCount < 3) + nicMask |= (1 << nicCount); + + deviceCount++; + nicCount++; + } + return this; + } + + private void doDomainRouterSetup() throws Exception { + int extraPublicNics = mgr.getRouterExtraPublicNics(); + if (extraPublicNics > 0 && vmSpec.getDetails().containsKey("PeerRouterInstanceName")) { + //Set identical MAC address for RvR on extra public interfaces + String peerRouterInstanceName = vmSpec.getDetails().get("PeerRouterInstanceName"); + + VirtualMachineMO peerVmMo = hyperHost.findVmOnHyperHost(peerRouterInstanceName); + if (peerVmMo == null) { + peerVmMo = hyperHost.findVmOnPeerHyperHost(peerRouterInstanceName); + } + + if (peerVmMo != null) { + String oldMacSequence = generateMacSequence(nics); + + for (int nicIndex = nics.length - extraPublicNics; nicIndex < nics.length; nicIndex++) { + VirtualDevice nicDevice = peerVmMo.getNicDeviceByIndex(nics[nicIndex].getDeviceId()); + if (nicDevice != null) { + String mac = ((VirtualEthernetCard)nicDevice).getMacAddress(); + if (mac != null) { + LOGGER.info("Use same MAC as previous RvR, the MAC is " + mac + " for extra NIC with device id: " + nics[nicIndex].getDeviceId()); + nics[nicIndex].setMac(mac); + } + } + } + + if (!StringUtils.isBlank(vmSpec.getBootArgs())) { + String newMacSequence = generateMacSequence(nics); + vmSpec.setBootArgs(vmwareResource.replaceNicsMacSequenceInBootArgs(oldMacSequence, newMacSequence, vmSpec)); + } + } + } + } + } + + private class IsoSetup { + private VirtualMachineTO vmSpec; + private VmwareManager mgr; + private VmwareHypervisorHost hyperHost; + private VirtualMachineMO vmMo; + private DiskTO[] disks; + private DiskTO volIso; + private VirtualDeviceConfigSpec[] deviceConfigSpecArray; + private int deviceCount; + private int ideUnitNumber; + + public IsoSetup(VirtualMachineTO vmSpec, VmwareManager mgr, VmwareHypervisorHost hyperHost, VirtualMachineMO vmMo, DiskTO[] disks, DiskTO volIso, + VirtualDeviceConfigSpec[] deviceConfigSpecArray, int deviceCount, int ideUnitNumber) { + this.vmSpec = vmSpec; + this.mgr = mgr; + this.hyperHost = hyperHost; + this.vmMo = vmMo; + this.disks = disks; + this.volIso = volIso; + this.deviceConfigSpecArray = deviceConfigSpecArray; + this.deviceCount = deviceCount; + this.ideUnitNumber = ideUnitNumber; + } + + public int getDeviceCount() { + return deviceCount; + } + + public int getIdeUnitNumber() { + return ideUnitNumber; + } + + public IsoSetup invoke() throws Exception { + // + // Setup ISO device + // + + // vAPP ISO + // FR37 the native deploy mechs should create this for us + if (vmSpec.getOvfProperties() != null) { + if (LOGGER.isTraceEnabled()) { + // FR37 TODO add more usefull info (if we keep this bit + LOGGER.trace("adding iso for properties for 'xxx'"); + } + deviceConfigSpecArray[deviceCount] = new VirtualDeviceConfigSpec(); + Pair isoInfo = VmwareHelper.prepareIsoDevice(vmMo, null, null, true, true, ideUnitNumber++, deviceCount + 1); + deviceConfigSpecArray[deviceCount].setDevice(isoInfo.first()); + if (isoInfo.second()) { + if (LOGGER.isDebugEnabled()) + LOGGER.debug("Prepare vApp ISO volume at existing device " + vmwareResource.getGson().toJson(isoInfo.first())); + + deviceConfigSpecArray[deviceCount].setOperation(VirtualDeviceConfigSpecOperation.ADD); + } else { + if (LOGGER.isDebugEnabled()) + LOGGER.debug("Prepare vApp ISO volume at existing device " + vmwareResource.getGson().toJson(isoInfo.first())); + + deviceConfigSpecArray[deviceCount].setOperation(VirtualDeviceConfigSpecOperation.EDIT); + } + deviceCount++; + } + + // prepare systemvm patch ISO + if (vmSpec.getType() != VirtualMachine.Type.User) { + PrepareSytemVMPatchISOMethod prepareSytemVm = new PrepareSytemVMPatchISOMethod(mgr, hyperHost, vmMo, deviceConfigSpecArray, deviceCount, ideUnitNumber); + prepareSytemVm.invoke(); + deviceCount = prepareSytemVm.getI(); + ideUnitNumber = prepareSytemVm.getIdeUnitNumber(); + } else { + // Note: we will always plug a CDROM device + if (volIso != null) { + for (DiskTO vol : disks) { + if (vol.getType() == Volume.Type.ISO) { + + TemplateObjectTO iso = (TemplateObjectTO)vol.getData(); + + if (iso.getPath() != null && !iso.getPath().isEmpty()) { + DataStoreTO imageStore = iso.getDataStore(); + if (!(imageStore instanceof NfsTO)) { + LOGGER.debug("unsupported protocol"); + throw new Exception("unsupported protocol"); + } + NfsTO nfsImageStore = (NfsTO)imageStore; + String isoPath = nfsImageStore.getUrl() + File.separator + iso.getPath(); + Pair isoDatastoreInfo = getIsoDatastoreInfo(hyperHost, isoPath); + assert (isoDatastoreInfo != null); + assert (isoDatastoreInfo.second() != null); + + deviceConfigSpecArray[deviceCount] = new VirtualDeviceConfigSpec(); + Pair isoInfo = VmwareHelper + .prepareIsoDevice(vmMo, isoDatastoreInfo.first(), isoDatastoreInfo.second(), true, true, ideUnitNumber++, deviceCount + 1); + deviceConfigSpecArray[deviceCount].setDevice(isoInfo.first()); + if (isoInfo.second()) { + if (LOGGER.isDebugEnabled()) + LOGGER.debug("Prepare ISO volume at new device " + vmwareResource.getGson().toJson(isoInfo.first())); + deviceConfigSpecArray[deviceCount].setOperation(VirtualDeviceConfigSpecOperation.ADD); + } else { + if (LOGGER.isDebugEnabled()) + LOGGER.debug("Prepare ISO volume at existing device " + vmwareResource.getGson().toJson(isoInfo.first())); + deviceConfigSpecArray[deviceCount].setOperation(VirtualDeviceConfigSpecOperation.EDIT); + } + } + deviceCount++; + } + } + } else { + deviceConfigSpecArray[deviceCount] = new VirtualDeviceConfigSpec(); + Pair isoInfo = VmwareHelper.prepareIsoDevice(vmMo, null, null, true, true, ideUnitNumber++, deviceCount + 1); + deviceConfigSpecArray[deviceCount].setDevice(isoInfo.first()); + if (isoInfo.second()) { + if (LOGGER.isDebugEnabled()) + LOGGER.debug("Prepare ISO volume at existing device " + vmwareResource.getGson().toJson(isoInfo.first())); + + deviceConfigSpecArray[deviceCount].setOperation(VirtualDeviceConfigSpecOperation.ADD); + } else { + if (LOGGER.isDebugEnabled()) + LOGGER.debug("Prepare ISO volume at existing device " + vmwareResource.getGson().toJson(isoInfo.first())); + + deviceConfigSpecArray[deviceCount].setOperation(VirtualDeviceConfigSpecOperation.EDIT); + } + deviceCount++; + } + } + return this; + } + + // isoUrl sample content : + // nfs://192.168.10.231/export/home/kelven/vmware-test/secondary/template/tmpl/2/200//200-2-80f7ee58-6eff-3a2d-bcb0-59663edf6d26.iso + private Pair getIsoDatastoreInfo(VmwareHypervisorHost hyperHost, String isoUrl) throws Exception { + + assert (isoUrl != null); + int isoFileNameStartPos = isoUrl.lastIndexOf("/"); + if (isoFileNameStartPos < 0) { + throw new Exception("Invalid ISO path info"); + } + + String isoFileName = isoUrl.substring(isoFileNameStartPos); + + int templateRootPos = isoUrl.indexOf("template/tmpl"); + templateRootPos = (templateRootPos < 0 ? isoUrl.indexOf(ConfigDrive.CONFIGDRIVEDIR) : templateRootPos); + if (templateRootPos < 0) { + throw new Exception("Invalid ISO path info"); + } + + String storeUrl = isoUrl.substring(0, templateRootPos - 1); + String isoPath = isoUrl.substring(templateRootPos, isoFileNameStartPos); + + ManagedObjectReference morDs = vmwareResource.prepareSecondaryDatastoreOnHost(storeUrl); + DatastoreMO dsMo = new DatastoreMO(vmwareResource.getServiceContext(), morDs); + + return new Pair(String.format("[%s] %s%s", dsMo.getName(), isoPath, isoFileName), morDs); + } + } + + private class PrepareRunningVMForConfiguration { + private String vmInternalCSName; + private Pair controllerInfo; + private Boolean systemVm; + private VirtualMachineMO vmMo; + private DiskControllerType systemVmScsiControllerType; + private int firstScsiControllerBusNum; + private int numScsiControllerForSystemVm; + private VirtualMachineDiskInfoBuilder diskInfoBuilder; + private VirtualDevice[] nicDevices; + private boolean hasSnapshot; + + public PrepareRunningVMForConfiguration(String vmInternalCSName, Pair controllerInfo, Boolean systemVm, VirtualMachineMO vmMo, + DiskControllerType systemVmScsiControllerType, int firstScsiControllerBusNum, int numScsiControllerForSystemVm) { + this.vmInternalCSName = vmInternalCSName; + this.controllerInfo = controllerInfo; + this.systemVm = systemVm; + this.vmMo = vmMo; + this.systemVmScsiControllerType = systemVmScsiControllerType; + this.firstScsiControllerBusNum = firstScsiControllerBusNum; + this.numScsiControllerForSystemVm = numScsiControllerForSystemVm; + } + + public VirtualMachineDiskInfoBuilder getDiskInfoBuilder() { + return diskInfoBuilder; + } + + public VirtualDevice[] getNicDevices() { + return nicDevices; + } + + public boolean isHasSnapshot() { + return hasSnapshot; + } + + public PrepareRunningVMForConfiguration invoke() throws Exception { + LOGGER.info("VM " + vmInternalCSName + " already exists, tear down devices for reconfiguration"); + if (VmwareResource.getVmPowerState(vmMo) != VirtualMachine.PowerState.PowerOff) { + vmMo.safePowerOff(vmwareResource.getShutdownWaitMs()); + } + + // retrieve disk information before we tear down + diskInfoBuilder = vmMo.getDiskInfoBuilder(); + hasSnapshot = vmMo.hasSnapshot(); + nicDevices = vmMo.getNicDevices(); + // FR37 - only tear nics, and add nics per the provided nics list + if (LOGGER.isTraceEnabled()) { + String netMsg = "tearing down networks :"; + for (VirtualDevice nic : vmMo.getNicDevices()) { + netMsg += nic.getDeviceInfo().getLabel()+":"; + } + LOGGER.trace(netMsg); + } + // FR37 save vmMo.getNicDevices() to ensure recreation? + vmMo.tearDownDevices(new Class[] {VirtualEthernetCard.class}); + /* + if (!hasSnapshot) { + // FR37 do we need to do this, ever?: + vmMo.tearDownDevices(new Class[] {VirtualDisk.class}); + } + */ + if (systemVm) { + ensureScsiDiskControllers(vmMo, systemVmScsiControllerType.toString(), numScsiControllerForSystemVm, firstScsiControllerBusNum); + } else { + ensureDiskControllers(vmMo, controllerInfo); + } + return this; + } + } +} \ No newline at end of file diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareContextFactory.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareContextFactory.java index 3ed5939aac55..d2306a844a11 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareContextFactory.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareContextFactory.java @@ -60,14 +60,13 @@ public static VmwareContext create(String vCenterAddress, String vCenterUserName assert (vCenterUserName != null); assert (vCenterPassword != null); - String serviceUrl = "https://" + vCenterAddress + "/sdk/vimService"; if (s_logger.isDebugEnabled()) - s_logger.debug("initialize VmwareContext. url: " + serviceUrl + ", username: " + vCenterUserName + ", password: " + + s_logger.debug("initialize VmwareContext. vCenter: " + vCenterAddress + ", username: " + vCenterUserName + ", password: " + StringUtils.getMaskedPasswordForDisplay(vCenterPassword)); VmwareClient vimClient = new VmwareClient(vCenterAddress + "-" + s_seq++); vimClient.setVcenterSessionTimeout(s_vmwareMgr.getVcenterSessionTimeout()); - vimClient.connect(serviceUrl, vCenterUserName, vCenterPassword); + vimClient.connect(vCenterAddress, vCenterUserName, vCenterPassword); VmwareContext context = new VmwareContext(vimClient, vCenterAddress); context.registerStockObject(VmwareManager.CONTEXT_STOCK_NAME, s_vmwareMgr); diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java index 9051981291ff..ffc9f032bb53 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java @@ -16,51 +16,6 @@ // under the License. package com.cloud.hypervisor.vmware.resource; -import java.io.File; -import java.io.IOException; -import java.io.UnsupportedEncodingException; -import java.net.ConnectException; -import java.net.InetSocketAddress; -import java.net.URI; -import java.net.URL; -import java.nio.channels.SocketChannel; -import java.rmi.RemoteException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.Comparator; -import java.util.Date; -import java.util.EnumMap; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Random; -import java.util.Set; -import java.util.TimeZone; -import java.util.UUID; - -import javax.naming.ConfigurationException; -import javax.xml.datatype.XMLGregorianCalendar; - -import org.apache.cloudstack.api.ApiConstants; -import org.apache.cloudstack.storage.command.CopyCommand; -import org.apache.cloudstack.storage.command.StorageSubSystemCommand; -import org.apache.cloudstack.storage.configdrive.ConfigDrive; -import org.apache.cloudstack.storage.resource.NfsSecondaryStorageResource; -import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; -import org.apache.cloudstack.storage.to.TemplateObjectTO; -import org.apache.cloudstack.storage.to.VolumeObjectTO; -import org.apache.cloudstack.utils.volume.VirtualMachineDiskInfo; -import org.apache.cloudstack.vm.UnmanagedInstanceTO; -import org.apache.commons.collections.CollectionUtils; -import org.apache.commons.lang.StringUtils; -import org.apache.commons.lang.math.NumberUtils; -import org.apache.log4j.Logger; -import org.apache.log4j.NDC; -import org.joda.time.Duration; - import com.cloud.agent.IAgentControl; import com.cloud.agent.api.Answer; import com.cloud.agent.api.AttachIsoAnswer; @@ -146,7 +101,6 @@ import com.cloud.agent.api.SetupAnswer; import com.cloud.agent.api.SetupCommand; import com.cloud.agent.api.SetupGuestNetworkCommand; -import com.cloud.agent.api.StartAnswer; import com.cloud.agent.api.StartCommand; import com.cloud.agent.api.StartupCommand; import com.cloud.agent.api.StartupRoutingCommand; @@ -183,7 +137,6 @@ import com.cloud.agent.api.storage.ResizeVolumeAnswer; import com.cloud.agent.api.storage.ResizeVolumeCommand; import com.cloud.agent.api.to.DataStoreTO; -import com.cloud.agent.api.to.DiskTO; import com.cloud.agent.api.to.IpAddressTO; import com.cloud.agent.api.to.NfsTO; import com.cloud.agent.api.to.NicTO; @@ -200,6 +153,8 @@ import com.cloud.host.Host.Type; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.hypervisor.guru.VMwareGuru; +import com.cloud.hypervisor.vmware.manager.ContentLibraryService; +import com.cloud.hypervisor.vmware.manager.ContentLibraryServiceImpl; import com.cloud.hypervisor.vmware.manager.VmwareHostService; import com.cloud.hypervisor.vmware.manager.VmwareManager; import com.cloud.hypervisor.vmware.manager.VmwareStorageMount; @@ -217,7 +172,7 @@ import com.cloud.hypervisor.vmware.mo.HostStorageSystemMO; import com.cloud.hypervisor.vmware.mo.HypervisorHostHelper; import com.cloud.hypervisor.vmware.mo.NetworkDetails; -import com.cloud.hypervisor.vmware.mo.TaskMO; +import com.cloud.hypervisor.vmware.mo.StoragepodMO; import com.cloud.hypervisor.vmware.mo.VirtualEthernetCardType; import com.cloud.hypervisor.vmware.mo.VirtualMachineDiskInfoBuilder; import com.cloud.hypervisor.vmware.mo.VirtualMachineMO; @@ -256,7 +211,6 @@ import com.cloud.utils.mgmt.JmxUtil; import com.cloud.utils.mgmt.PropertyMapDynamicBean; import com.cloud.utils.net.NetUtils; -import com.cloud.utils.nicira.nvp.plugin.NiciraNvpApiVersion; import com.cloud.utils.script.Script; import com.cloud.utils.ssh.SshHelper; import com.cloud.vm.VirtualMachine; @@ -267,12 +221,8 @@ import com.google.gson.Gson; import com.vmware.vim25.AboutInfo; import com.vmware.vim25.ArrayUpdateOperation; -import com.vmware.vim25.BoolPolicy; import com.vmware.vim25.ComputeResourceSummary; import com.vmware.vim25.CustomFieldStringValue; -import com.vmware.vim25.DVPortConfigInfo; -import com.vmware.vim25.DVPortConfigSpec; -import com.vmware.vim25.DasVmPriority; import com.vmware.vim25.DatastoreInfo; import com.vmware.vim25.DatastoreSummary; import com.vmware.vim25.DistributedVirtualPort; @@ -297,6 +247,7 @@ import com.vmware.vim25.PerfMetricSeries; import com.vmware.vim25.PerfQuerySpec; import com.vmware.vim25.RuntimeFaultFaultMsg; +import com.vmware.vim25.StoragePodSummary; import com.vmware.vim25.ToolsUnavailableFaultMsg; import com.vmware.vim25.VAppOvfSectionInfo; import com.vmware.vim25.VAppOvfSectionSpec; @@ -316,11 +267,9 @@ import com.vmware.vim25.VirtualEthernetCard; import com.vmware.vim25.VirtualEthernetCardDistributedVirtualPortBackingInfo; import com.vmware.vim25.VirtualEthernetCardNetworkBackingInfo; -import com.vmware.vim25.VirtualEthernetCardOpaqueNetworkBackingInfo; import com.vmware.vim25.VirtualIDEController; -import com.vmware.vim25.VirtualMachineConfigSpec; import com.vmware.vim25.VirtualMachineBootOptions; -import com.vmware.vim25.VirtualMachineFileInfo; +import com.vmware.vim25.VirtualMachineConfigSpec; import com.vmware.vim25.VirtualMachineFileLayoutEx; import com.vmware.vim25.VirtualMachineFileLayoutExFileInfo; import com.vmware.vim25.VirtualMachineGuestOsIdentifier; @@ -332,7 +281,6 @@ import com.vmware.vim25.VirtualMachineVideoCard; import com.vmware.vim25.VirtualPCNet32; import com.vmware.vim25.VirtualSCSIController; -import com.vmware.vim25.VirtualUSBController; import com.vmware.vim25.VirtualVmxnet2; import com.vmware.vim25.VirtualVmxnet3; import com.vmware.vim25.VmConfigInfo; @@ -340,19 +288,61 @@ import com.vmware.vim25.VmfsDatastoreInfo; import com.vmware.vim25.VmwareDistributedVirtualSwitchPvlanSpec; import com.vmware.vim25.VmwareDistributedVirtualSwitchVlanIdSpec; +import org.apache.cloudstack.storage.command.CopyCommand; +import org.apache.cloudstack.storage.command.StorageSubSystemCommand; +import org.apache.cloudstack.storage.resource.NfsSecondaryStorageResource; +import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; +import org.apache.cloudstack.storage.to.VolumeObjectTO; +import org.apache.cloudstack.utils.volume.VirtualMachineDiskInfo; +import org.apache.cloudstack.vm.UnmanagedInstanceTO; +import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang.math.NumberUtils; +import org.apache.log4j.Logger; +import org.apache.log4j.NDC; +import org.joda.time.Duration; + +import javax.naming.ConfigurationException; +import javax.xml.datatype.XMLGregorianCalendar; +import java.io.File; +import java.io.IOException; +import java.io.UnsupportedEncodingException; +import java.net.ConnectException; +import java.net.InetSocketAddress; +import java.net.URI; +import java.net.URL; +import java.nio.channels.SocketChannel; +import java.rmi.RemoteException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.Date; +import java.util.EnumMap; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.Set; +import java.util.TimeZone; +import java.util.UUID; public class VmwareResource implements StoragePoolResource, ServerResource, VmwareHostService, VirtualRouterDeployer { private static final Logger s_logger = Logger.getLogger(VmwareResource.class); public static final String VMDK_EXTENSION = ".vmdk"; private static final Random RANDOM = new Random(System.nanoTime()); + private final StartCommandExecutor startCommandExecutor = new StartCommandExecutor(this); + + // FR37 Does this need to be a setting? + protected final int shutdownWaitMs = 300000; // wait up to 5 minutes for shutdown protected String _name; protected final long _opsTimeout = 900000; // 15 minutes time out to time - protected final int _shutdownWaitMs = 300000; // wait up to 5 minutes for shutdown - // out an operation protected final int _retry = 24; protected final int _sleep = 10000; @@ -360,7 +350,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa protected final int MazCmdMBean = 100; protected String _url; - protected String _dcId; + protected String dcId; protected String _pod; protected String _cluster; protected String _username; @@ -375,18 +365,20 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa protected Map _vsmCredentials = null; protected int _portsPerDvPortGroup; protected boolean _fullCloneFlag = false; - protected boolean _instanceNameFlag = false; + // FR37 move to global seting(s) + protected static boolean instanceNameFlag = false; protected boolean _recycleHungWorker = false; protected DiskControllerType _rootDiskController = DiskControllerType.ide; + protected final ContentLibraryService contentLibraryService = new ContentLibraryServiceImpl(); protected ManagedObjectReference _morHyperHost; protected final static ThreadLocal s_serviceContext = new ThreadLocal(); protected String _hostName; protected List _cmdMBeans = new ArrayList(); - protected Gson _gson; + protected Gson gson; protected volatile long _cmdSequence = 1; @@ -408,12 +400,24 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa protected static final String s_relativePathSystemVmKeyFileInstallDir = "scripts/vm/systemvm/id_rsa.cloud"; protected static final String s_defaultPathSystemVmKeyFile = "/usr/share/cloudstack-common/scripts/vm/systemvm/id_rsa.cloud"; - public Gson getGson() { - return _gson; + public VmwareResource() { + gson = GsonHelper.getGsonLogger(); } - public VmwareResource() { - _gson = GsonHelper.getGsonLogger(); + public VmwareStorageProcessor getStorageProcessor() { + return _storageProcessor; + } + + public int getShutdownWaitMs() { + return shutdownWaitMs; + } + + public String getDcId() { + return dcId; + } + + public Gson getGson() { + return gson; } private String getCommandLogTitle(Command cmd) { @@ -443,7 +447,7 @@ public Answer executeRequest(Command cmd) { Date startTime = DateUtil.currentGMTTime(); PropertyMapDynamicBean mbean = new PropertyMapDynamicBean(); mbean.addProp("StartTime", DateUtil.getDateDisplayString(TimeZone.getDefault(), startTime)); - mbean.addProp("Command", _gson.toJson(cmd)); + mbean.addProp("Command", gson.toJson(cmd)); mbean.addProp("Sequence", String.valueOf(cmdSequence)); mbean.addProp("Name", cmd.getClass().getSimpleName()); @@ -529,7 +533,7 @@ public Answer executeRequest(Command cmd) { } else if (clz == NetworkUsageCommand.class) { answer = execute((NetworkUsageCommand) cmd); } else if (clz == StartCommand.class) { - answer = execute((StartCommand) cmd); + answer = startCommandExecutor.execute((StartCommand)cmd); } else if (clz == CheckSshCommand.class) { answer = execute((CheckSshCommand) cmd); } else if (clz == CheckNetworkCommand.class) { @@ -575,7 +579,7 @@ public Answer executeRequest(Command cmd) { Date doneTime = DateUtil.currentGMTTime(); mbean.addProp("DoneTime", DateUtil.getDateDisplayString(TimeZone.getDefault(), doneTime)); - mbean.addProp("Answer", _gson.toJson(answer)); + mbean.addProp("Answer", gson.toJson(answer)); synchronized (this) { try { @@ -636,7 +640,7 @@ protected void reconfigureProcessorByHandler(EnumMap validatedDisks = new ArrayList(); - - for (DiskTO vol : disks) { - if (vol.getType() != Volume.Type.ISO) { - VolumeObjectTO volumeTO = (VolumeObjectTO) vol.getData(); - DataStoreTO primaryStore = volumeTO.getDataStore(); - if (primaryStore.getUuid() != null && !primaryStore.getUuid().isEmpty()) { - validatedDisks.add(vol); - } - } else if (vol.getType() == Volume.Type.ISO) { - TemplateObjectTO templateTO = (TemplateObjectTO) vol.getData(); - if (templateTO.getPath() != null && !templateTO.getPath().isEmpty()) { - validatedDisks.add(vol); - } - } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Drop invalid disk option, volumeTO: " + _gson.toJson(vol)); - } - } - } - Collections.sort(validatedDisks, (d1, d2) -> d1.getDiskSeq().compareTo(d2.getDiskSeq())); - return validatedDisks.toArray(new DiskTO[0]); - } - - private static DiskTO getIsoDiskTO(DiskTO[] disks) { - for (DiskTO vol : disks) { - if (vol.getType() == Volume.Type.ISO) { - return vol; - } - } - return null; - } - protected ScaleVmAnswer execute(ScaleVmCommand cmd) { VmwareContext context = getServiceContext(); @@ -1651,770 +1612,6 @@ protected ScaleVmAnswer execute(ScaleVmCommand cmd) { return new ScaleVmAnswer(cmd, true, null); } - protected void ensureDiskControllers(VirtualMachineMO vmMo, Pair controllerInfo) throws Exception { - if (vmMo == null) { - return; - } - - String msg; - String rootDiskController = controllerInfo.first(); - String dataDiskController = controllerInfo.second(); - String scsiDiskController; - String recommendedDiskController = null; - - if (VmwareHelper.isControllerOsRecommended(dataDiskController) || VmwareHelper.isControllerOsRecommended(rootDiskController)) { - recommendedDiskController = vmMo.getRecommendedDiskController(null); - } - scsiDiskController = HypervisorHostHelper.getScsiController(new Pair(rootDiskController, dataDiskController), recommendedDiskController); - if (scsiDiskController == null) { - return; - } - - vmMo.getScsiDeviceControllerKeyNoException(); - // This VM needs SCSI controllers. - // Get count of existing scsi controllers. Helps not to attempt to create more than the maximum allowed 4 - // Get maximum among the bus numbers in use by scsi controllers. Safe to pick maximum, because we always go sequential allocating bus numbers. - Ternary scsiControllerInfo = vmMo.getScsiControllerInfo(); - int requiredNumScsiControllers = VmwareHelper.MAX_SCSI_CONTROLLER_COUNT - scsiControllerInfo.first(); - int availableBusNum = scsiControllerInfo.second() + 1; // method returned current max. bus number - - if (requiredNumScsiControllers == 0) { - return; - } - if (scsiControllerInfo.first() > 0) { - // For VMs which already have a SCSI controller, do NOT attempt to add any more SCSI controllers & return the sub type. - // For Legacy VMs would have only 1 LsiLogic Parallel SCSI controller, and doesn't require more. - // For VMs created post device ordering support, 4 SCSI subtype controllers are ensured during deployment itself. No need to add more. - // For fresh VM deployment only, all required controllers should be ensured. - return; - } - ensureScsiDiskControllers(vmMo, scsiDiskController, requiredNumScsiControllers, availableBusNum); - } - - private void ensureScsiDiskControllers(VirtualMachineMO vmMo, String scsiDiskController, int requiredNumScsiControllers, int availableBusNum) throws Exception { - // Pick the sub type of scsi - if (DiskControllerType.getType(scsiDiskController) == DiskControllerType.pvscsi) { - if (!vmMo.isPvScsiSupported()) { - String msg = "This VM doesn't support Vmware Paravirtual SCSI controller for virtual disks, because the virtual hardware version is less than 7."; - throw new Exception(msg); - } - vmMo.ensurePvScsiDeviceController(requiredNumScsiControllers, availableBusNum); - } else if (DiskControllerType.getType(scsiDiskController) == DiskControllerType.lsisas1068) { - vmMo.ensureLsiLogicSasDeviceControllers(requiredNumScsiControllers, availableBusNum); - } else if (DiskControllerType.getType(scsiDiskController) == DiskControllerType.buslogic) { - vmMo.ensureBusLogicDeviceControllers(requiredNumScsiControllers, availableBusNum); - } else if (DiskControllerType.getType(scsiDiskController) == DiskControllerType.lsilogic) { - vmMo.ensureLsiLogicDeviceControllers(requiredNumScsiControllers, availableBusNum); - } - } - - protected StartAnswer execute(StartCommand cmd) { - if (s_logger.isInfoEnabled()) { - s_logger.info("Executing resource StartCommand: " + _gson.toJson(cmd)); - } - - VirtualMachineTO vmSpec = cmd.getVirtualMachine(); - boolean vmAlreadyExistsInVcenter = false; - - String existingVmName = null; - VirtualMachineFileInfo existingVmFileInfo = null; - VirtualMachineFileLayoutEx existingVmFileLayout = null; - List existingDatastores = new ArrayList(); - - Pair names = composeVmNames(vmSpec); - String vmInternalCSName = names.first(); - String vmNameOnVcenter = names.second(); - String dataDiskController = vmSpec.getDetails().get(VmDetailConstants.DATA_DISK_CONTROLLER); - String rootDiskController = vmSpec.getDetails().get(VmDetailConstants.ROOT_DISK_CONTROLLER); - DiskTO rootDiskTO = null; - String bootMode = null; - if (vmSpec.getDetails().containsKey(VmDetailConstants.BOOT_MODE)) { - bootMode = vmSpec.getDetails().get(VmDetailConstants.BOOT_MODE); - } - if (null == bootMode) { - bootMode = ApiConstants.BootType.BIOS.toString(); - } - - // If root disk controller is scsi, then data disk controller would also be scsi instead of using 'osdefault' - // This helps avoid mix of different scsi subtype controllers in instance. - if (DiskControllerType.osdefault == DiskControllerType.getType(dataDiskController) && DiskControllerType.lsilogic == DiskControllerType.getType(rootDiskController)) { - dataDiskController = DiskControllerType.scsi.toString(); - } - - // Validate the controller types - dataDiskController = DiskControllerType.getType(dataDiskController).toString(); - rootDiskController = DiskControllerType.getType(rootDiskController).toString(); - - if (DiskControllerType.getType(rootDiskController) == DiskControllerType.none) { - throw new CloudRuntimeException("Invalid root disk controller detected : " + rootDiskController); - } - if (DiskControllerType.getType(dataDiskController) == DiskControllerType.none) { - throw new CloudRuntimeException("Invalid data disk controller detected : " + dataDiskController); - } - - Pair controllerInfo = new Pair(rootDiskController, dataDiskController); - - Boolean systemVm = vmSpec.getType().isUsedBySystem(); - // Thus, vmInternalCSName always holds i-x-y, the cloudstack generated internal VM name. - VmwareContext context = getServiceContext(); - DatacenterMO dcMo = null; - try { - VmwareManager mgr = context.getStockObject(VmwareManager.CONTEXT_STOCK_NAME); - - VmwareHypervisorHost hyperHost = getHyperHost(context); - dcMo = new DatacenterMO(hyperHost.getContext(), hyperHost.getHyperHostDatacenter()); - - // Validate VM name is unique in Datacenter - VirtualMachineMO vmInVcenter = dcMo.checkIfVmAlreadyExistsInVcenter(vmNameOnVcenter, vmInternalCSName); - if (vmInVcenter != null) { - vmAlreadyExistsInVcenter = true; - String msg = "VM with name: " + vmNameOnVcenter + " already exists in vCenter."; - s_logger.error(msg); - throw new Exception(msg); - } - String guestOsId = translateGuestOsIdentifier(vmSpec.getArch(), vmSpec.getOs(), vmSpec.getPlatformEmulator()).value(); - DiskTO[] disks = validateDisks(vmSpec.getDisks()); - assert (disks.length > 0); - NicTO[] nics = vmSpec.getNics(); - - HashMap> dataStoresDetails = inferDatastoreDetailsFromDiskInfo(hyperHost, context, disks, cmd); - if ((dataStoresDetails == null) || (dataStoresDetails.isEmpty())) { - String msg = "Unable to locate datastore details of the volumes to be attached"; - s_logger.error(msg); - throw new Exception(msg); - } - - DatastoreMO dsRootVolumeIsOn = getDatastoreThatRootDiskIsOn(dataStoresDetails, disks); - if (dsRootVolumeIsOn == null) { - String msg = "Unable to locate datastore details of root volume"; - s_logger.error(msg); - throw new Exception(msg); - } - - VirtualMachineDiskInfoBuilder diskInfoBuilder = null; - VirtualMachineMO vmMo = hyperHost.findVmOnHyperHost(vmInternalCSName); - DiskControllerType systemVmScsiControllerType = DiskControllerType.lsilogic; - int firstScsiControllerBusNum = 0; - int numScsiControllerForSystemVm = 1; - boolean hasSnapshot = false; - if (vmMo != null) { - s_logger.info("VM " + vmInternalCSName + " already exists, tear down devices for reconfiguration"); - if (getVmPowerState(vmMo) != PowerState.PowerOff) - vmMo.safePowerOff(_shutdownWaitMs); - - // retrieve disk information before we tear down - diskInfoBuilder = vmMo.getDiskInfoBuilder(); - hasSnapshot = vmMo.hasSnapshot(); - if (!hasSnapshot) - vmMo.tearDownDevices(new Class[]{VirtualDisk.class, VirtualEthernetCard.class}); - else - vmMo.tearDownDevices(new Class[]{VirtualEthernetCard.class}); - if (systemVm) { - ensureScsiDiskControllers(vmMo, systemVmScsiControllerType.toString(), numScsiControllerForSystemVm, firstScsiControllerBusNum); - } else { - ensureDiskControllers(vmMo, controllerInfo); - } - } else { - ManagedObjectReference morDc = hyperHost.getHyperHostDatacenter(); - assert (morDc != null); - - vmMo = hyperHost.findVmOnPeerHyperHost(vmInternalCSName); - if (vmMo != null) { - if (s_logger.isInfoEnabled()) { - s_logger.info("Found vm " + vmInternalCSName + " at other host, relocate to " + hyperHost.getHyperHostName()); - } - - takeVmFromOtherHyperHost(hyperHost, vmInternalCSName); - - if (getVmPowerState(vmMo) != PowerState.PowerOff) - vmMo.safePowerOff(_shutdownWaitMs); - - diskInfoBuilder = vmMo.getDiskInfoBuilder(); - hasSnapshot = vmMo.hasSnapshot(); - if (!hasSnapshot) - vmMo.tearDownDevices(new Class[]{VirtualDisk.class, VirtualEthernetCard.class}); - else - vmMo.tearDownDevices(new Class[]{VirtualEthernetCard.class}); - - if (systemVm) { - // System volumes doesn't require more than 1 SCSI controller as there is no requirement for data volumes. - ensureScsiDiskControllers(vmMo, systemVmScsiControllerType.toString(), numScsiControllerForSystemVm, firstScsiControllerBusNum); - } else { - ensureDiskControllers(vmMo, controllerInfo); - } - } else { - // If a VM with the same name is found in a different cluster in the DC, unregister the old VM and configure a new VM (cold-migration). - VirtualMachineMO existingVmInDc = dcMo.findVm(vmInternalCSName); - if (existingVmInDc != null) { - s_logger.debug("Found VM: " + vmInternalCSName + " on a host in a different cluster. Unregistering the exisitng VM."); - existingVmName = existingVmInDc.getName(); - existingVmFileInfo = existingVmInDc.getFileInfo(); - existingVmFileLayout = existingVmInDc.getFileLayout(); - existingDatastores = existingVmInDc.getAllDatastores(); - existingVmInDc.unregisterVm(); - } - Pair rootDiskDataStoreDetails = null; - for (DiskTO vol : disks) { - if (vol.getType() == Volume.Type.ROOT) { - Map details = vol.getDetails(); - boolean managed = false; - - if (details != null) { - managed = Boolean.parseBoolean(details.get(DiskTO.MANAGED)); - } - - if (managed) { - String datastoreName = VmwareResource.getDatastoreName(details.get(DiskTO.IQN)); - - rootDiskDataStoreDetails = dataStoresDetails.get(datastoreName); - } else { - DataStoreTO primaryStore = vol.getData().getDataStore(); - - rootDiskDataStoreDetails = dataStoresDetails.get(primaryStore.getUuid()); - } - } - } - - assert (vmSpec.getMinSpeed() != null) && (rootDiskDataStoreDetails != null); - - boolean vmFolderExists = rootDiskDataStoreDetails.second().folderExists(String.format("[%s]", rootDiskDataStoreDetails.second().getName()), vmNameOnVcenter); - String vmxFileFullPath = dsRootVolumeIsOn.searchFileInSubFolders(vmNameOnVcenter + ".vmx", false, VmwareManager.s_vmwareSearchExcludeFolder.value()); - if (vmFolderExists && vmxFileFullPath != null) { // VM can be registered only if .vmx is present. - registerVm(vmNameOnVcenter, dsRootVolumeIsOn); - vmMo = hyperHost.findVmOnHyperHost(vmInternalCSName); - if (vmMo != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Found registered vm " + vmInternalCSName + " at host " + hyperHost.getHyperHostName()); - } - } - tearDownVm(vmMo); - } else if (!hyperHost.createBlankVm(vmNameOnVcenter, vmInternalCSName, vmSpec.getCpus(), vmSpec.getMaxSpeed().intValue(), getReservedCpuMHZ(vmSpec), - vmSpec.getLimitCpuUse(), (int) (vmSpec.getMaxRam() / ResourceType.bytesToMiB), getReservedMemoryMb(vmSpec), guestOsId, rootDiskDataStoreDetails.first(), false, - controllerInfo, systemVm)) { - throw new Exception("Failed to create VM. vmName: " + vmInternalCSName); - } - } - - vmMo = hyperHost.findVmOnHyperHost(vmInternalCSName); - if (vmMo == null) { - throw new Exception("Failed to find the newly create or relocated VM. vmName: " + vmInternalCSName); - } - } - - int totalChangeDevices = disks.length + nics.length; - - DiskTO volIso = null; - if (vmSpec.getType() != VirtualMachine.Type.User) { - // system VM needs a patch ISO - totalChangeDevices++; - } else { - volIso = getIsoDiskTO(disks); - if (volIso == null) - totalChangeDevices++; - } - - VirtualMachineConfigSpec vmConfigSpec = new VirtualMachineConfigSpec(); - - VmwareHelper.setBasicVmConfig(vmConfigSpec, vmSpec.getCpus(), vmSpec.getMaxSpeed(), getReservedCpuMHZ(vmSpec), (int) (vmSpec.getMaxRam() / (1024 * 1024)), - getReservedMemoryMb(vmSpec), guestOsId, vmSpec.getLimitCpuUse()); - - // Check for multi-cores per socket settings - int numCoresPerSocket = 1; - String coresPerSocket = vmSpec.getDetails().get(VmDetailConstants.CPU_CORE_PER_SOCKET); - if (coresPerSocket != null) { - String apiVersion = HypervisorHostHelper.getVcenterApiVersion(vmMo.getContext()); - // Property 'numCoresPerSocket' is supported since vSphere API 5.0 - if (apiVersion.compareTo("5.0") >= 0) { - numCoresPerSocket = NumbersUtil.parseInt(coresPerSocket, 1); - vmConfigSpec.setNumCoresPerSocket(numCoresPerSocket); - } - } - - // Check for hotadd settings - vmConfigSpec.setMemoryHotAddEnabled(vmMo.isMemoryHotAddSupported(guestOsId)); - - String hostApiVersion = ((HostMO) hyperHost).getHostAboutInfo().getApiVersion(); - if (numCoresPerSocket > 1 && hostApiVersion.compareTo("5.0") < 0) { - s_logger.warn("Dynamic scaling of CPU is not supported for Virtual Machines with multi-core vCPUs in case of ESXi hosts 4.1 and prior. Hence CpuHotAdd will not be" - + " enabled for Virtual Machine: " + vmInternalCSName); - vmConfigSpec.setCpuHotAddEnabled(false); - } else { - vmConfigSpec.setCpuHotAddEnabled(vmMo.isCpuHotAddSupported(guestOsId)); - } - - configNestedHVSupport(vmMo, vmSpec, vmConfigSpec); - - VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[totalChangeDevices]; - int i = 0; - int ideUnitNumber = 0; - int scsiUnitNumber = 0; - int ideControllerKey = vmMo.getIDEDeviceControllerKey(); - int scsiControllerKey = vmMo.getScsiDeviceControllerKeyNoException(); - int controllerKey; - - // - // Setup ISO device - // - - // prepare systemvm patch ISO - if (vmSpec.getType() != VirtualMachine.Type.User) { - // attach ISO (for patching of system VM) - Pair secStoreUrlAndId = mgr.getSecondaryStorageStoreUrlAndId(Long.parseLong(_dcId)); - String secStoreUrl = secStoreUrlAndId.first(); - Long secStoreId = secStoreUrlAndId.second(); - if (secStoreUrl == null) { - String msg = "secondary storage for dc " + _dcId + " is not ready yet?"; - throw new Exception(msg); - } - mgr.prepareSecondaryStorageStore(secStoreUrl, secStoreId); - - ManagedObjectReference morSecDs = prepareSecondaryDatastoreOnHost(secStoreUrl); - if (morSecDs == null) { - String msg = "Failed to prepare secondary storage on host, secondary store url: " + secStoreUrl; - throw new Exception(msg); - } - DatastoreMO secDsMo = new DatastoreMO(hyperHost.getContext(), morSecDs); - - deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec(); - Pair isoInfo = VmwareHelper.prepareIsoDevice(vmMo, - String.format("[%s] systemvm/%s", secDsMo.getName(), mgr.getSystemVMIsoFileNameOnDatastore()), secDsMo.getMor(), true, true, ideUnitNumber++, i + 1); - deviceConfigSpecArray[i].setDevice(isoInfo.first()); - if (isoInfo.second()) { - if (s_logger.isDebugEnabled()) - s_logger.debug("Prepare ISO volume at new device " + _gson.toJson(isoInfo.first())); - deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD); - } else { - if (s_logger.isDebugEnabled()) - s_logger.debug("Prepare ISO volume at existing device " + _gson.toJson(isoInfo.first())); - deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.EDIT); - } - i++; - } else { - // Note: we will always plug a CDROM device - if (volIso != null) { - for (DiskTO vol : disks) { - if (vol.getType() == Volume.Type.ISO) { - - TemplateObjectTO iso = (TemplateObjectTO) vol.getData(); - - if (iso.getPath() != null && !iso.getPath().isEmpty()) { - DataStoreTO imageStore = iso.getDataStore(); - if (!(imageStore instanceof NfsTO)) { - s_logger.debug("unsupported protocol"); - throw new Exception("unsupported protocol"); - } - NfsTO nfsImageStore = (NfsTO) imageStore; - String isoPath = nfsImageStore.getUrl() + File.separator + iso.getPath(); - Pair isoDatastoreInfo = getIsoDatastoreInfo(hyperHost, isoPath); - assert (isoDatastoreInfo != null); - assert (isoDatastoreInfo.second() != null); - - deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec(); - Pair isoInfo = - VmwareHelper.prepareIsoDevice(vmMo, isoDatastoreInfo.first(), isoDatastoreInfo.second(), true, true, ideUnitNumber++, i + 1); - deviceConfigSpecArray[i].setDevice(isoInfo.first()); - if (isoInfo.second()) { - if (s_logger.isDebugEnabled()) - s_logger.debug("Prepare ISO volume at new device " + _gson.toJson(isoInfo.first())); - deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD); - } else { - if (s_logger.isDebugEnabled()) - s_logger.debug("Prepare ISO volume at existing device " + _gson.toJson(isoInfo.first())); - deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.EDIT); - } - } - i++; - } - } - } else { - deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec(); - Pair isoInfo = VmwareHelper.prepareIsoDevice(vmMo, null, null, true, true, ideUnitNumber++, i + 1); - deviceConfigSpecArray[i].setDevice(isoInfo.first()); - if (isoInfo.second()) { - if (s_logger.isDebugEnabled()) - s_logger.debug("Prepare ISO volume at existing device " + _gson.toJson(isoInfo.first())); - - deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD); - } else { - if (s_logger.isDebugEnabled()) - s_logger.debug("Prepare ISO volume at existing device " + _gson.toJson(isoInfo.first())); - - deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.EDIT); - } - i++; - } - } - - - // - // Setup ROOT/DATA disk devices - // - DiskTO[] sortedDisks = sortVolumesByDeviceId(disks); - for (DiskTO vol : sortedDisks) { - if (vol.getType() == Volume.Type.ISO) - continue; - - VirtualMachineDiskInfo matchingExistingDisk = getMatchingExistingDisk(diskInfoBuilder, vol, hyperHost, context); - controllerKey = getDiskController(matchingExistingDisk, vol, vmSpec, ideControllerKey, scsiControllerKey); - String diskController = getDiskController(vmMo, matchingExistingDisk, vol, new Pair(rootDiskController, dataDiskController)); - - if (DiskControllerType.getType(diskController) == DiskControllerType.osdefault) { - diskController = vmMo.getRecommendedDiskController(null); - } - if (DiskControllerType.getType(diskController) == DiskControllerType.ide) { - controllerKey = vmMo.getIDEControllerKey(ideUnitNumber); - if (vol.getType() == Volume.Type.DATADISK) { - // Could be result of flip due to user configured setting or "osdefault" for data disks - // Ensure maximum of 2 data volumes over IDE controller, 3 includeing root volume - if (vmMo.getNumberOfVirtualDisks() > 3) { - throw new CloudRuntimeException("Found more than 3 virtual disks attached to this VM [" + vmMo.getVmName() + "]. Unable to implement the disks over " - + diskController + " controller, as maximum number of devices supported over IDE controller is 4 includeing CDROM device."); - } - } - } else { - if (VmwareHelper.isReservedScsiDeviceNumber(scsiUnitNumber)) { - scsiUnitNumber++; - } - - controllerKey = vmMo.getScsiDiskControllerKeyNoException(diskController, scsiUnitNumber); - if (controllerKey == -1) { - // This may happen for ROOT legacy VMs which doesn't have recommended disk controller when global configuration parameter 'vmware.root.disk.controller' is set to "osdefault" - // Retrieve existing controller and use. - Ternary vmScsiControllerInfo = vmMo.getScsiControllerInfo(); - DiskControllerType existingControllerType = vmScsiControllerInfo.third(); - controllerKey = vmMo.getScsiDiskControllerKeyNoException(existingControllerType.toString(), scsiUnitNumber); - } - } - if (!hasSnapshot) { - deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec(); - - VolumeObjectTO volumeTO = (VolumeObjectTO) vol.getData(); - DataStoreTO primaryStore = volumeTO.getDataStore(); - Map details = vol.getDetails(); - boolean managed = false; - String iScsiName = null; - - if (details != null) { - managed = Boolean.parseBoolean(details.get(DiskTO.MANAGED)); - iScsiName = details.get(DiskTO.IQN); - } - - // if the storage is managed, iScsiName should not be null - String datastoreName = managed ? VmwareResource.getDatastoreName(iScsiName) : primaryStore.getUuid(); - Pair volumeDsDetails = dataStoresDetails.get(datastoreName); - - assert (volumeDsDetails != null); - - String[] diskChain = syncDiskChain(dcMo, vmMo, vmSpec, vol, matchingExistingDisk, dataStoresDetails); - - int deviceNumber = -1; - if (controllerKey == vmMo.getIDEControllerKey(ideUnitNumber)) { - deviceNumber = ideUnitNumber % VmwareHelper.MAX_ALLOWED_DEVICES_IDE_CONTROLLER; - ideUnitNumber++; - } else { - deviceNumber = scsiUnitNumber % VmwareHelper.MAX_ALLOWED_DEVICES_SCSI_CONTROLLER; - scsiUnitNumber++; - } - - VirtualDevice device = VmwareHelper.prepareDiskDevice(vmMo, null, controllerKey, diskChain, volumeDsDetails.first(), deviceNumber, i + 1); - - if (vol.getType() == Volume.Type.ROOT) - rootDiskTO = vol; - deviceConfigSpecArray[i].setDevice(device); - deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD); - - if (s_logger.isDebugEnabled()) - s_logger.debug("Prepare volume at new device " + _gson.toJson(device)); - - i++; - } else { - if (controllerKey == vmMo.getIDEControllerKey(ideUnitNumber)) - ideUnitNumber++; - else - scsiUnitNumber++; - } - } - - // - // Setup USB devices - // - if (guestOsId.startsWith("darwin")) { //Mac OS - VirtualDevice[] devices = vmMo.getMatchedDevices(new Class[]{VirtualUSBController.class}); - if (devices.length == 0) { - s_logger.debug("No USB Controller device on VM Start. Add USB Controller device for Mac OS VM " + vmInternalCSName); - - //For Mac OS X systems, the EHCI+UHCI controller is enabled by default and is required for USB mouse and keyboard access. - VirtualDevice usbControllerDevice = VmwareHelper.prepareUSBControllerDevice(); - deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec(); - deviceConfigSpecArray[i].setDevice(usbControllerDevice); - deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD); - - if (s_logger.isDebugEnabled()) - s_logger.debug("Prepare USB controller at new device " + _gson.toJson(deviceConfigSpecArray[i])); - - i++; - } else { - s_logger.debug("USB Controller device exists on VM Start for Mac OS VM " + vmInternalCSName); - } - } - - // - // Setup NIC devices - // - VirtualDevice nic; - int nicMask = 0; - int nicCount = 0; - - if (vmSpec.getType() == VirtualMachine.Type.DomainRouter) { - int extraPublicNics = mgr.getRouterExtraPublicNics(); - if (extraPublicNics > 0 && vmSpec.getDetails().containsKey("PeerRouterInstanceName")) { - //Set identical MAC address for RvR on extra public interfaces - String peerRouterInstanceName = vmSpec.getDetails().get("PeerRouterInstanceName"); - - VirtualMachineMO peerVmMo = hyperHost.findVmOnHyperHost(peerRouterInstanceName); - if (peerVmMo == null) { - peerVmMo = hyperHost.findVmOnPeerHyperHost(peerRouterInstanceName); - } - - if (peerVmMo != null) { - String oldMacSequence = generateMacSequence(nics); - - for (int nicIndex = nics.length - extraPublicNics; nicIndex < nics.length; nicIndex++) { - VirtualDevice nicDevice = peerVmMo.getNicDeviceByIndex(nics[nicIndex].getDeviceId()); - if (nicDevice != null) { - String mac = ((VirtualEthernetCard) nicDevice).getMacAddress(); - if (mac != null) { - s_logger.info("Use same MAC as previous RvR, the MAC is " + mac + " for extra NIC with device id: " + nics[nicIndex].getDeviceId()); - nics[nicIndex].setMac(mac); - } - } - } - - if (!StringUtils.isBlank(vmSpec.getBootArgs())) { - String newMacSequence = generateMacSequence(nics); - vmSpec.setBootArgs(replaceNicsMacSequenceInBootArgs(oldMacSequence, newMacSequence, vmSpec)); - } - } - } - } - - VirtualEthernetCardType nicDeviceType = VirtualEthernetCardType.valueOf(vmSpec.getDetails().get(VmDetailConstants.NIC_ADAPTER)); - if (s_logger.isDebugEnabled()) - s_logger.debug("VM " + vmInternalCSName + " will be started with NIC device type: " + nicDeviceType); - - NiciraNvpApiVersion.logNiciraApiVersion(); - - Map nicUuidToDvSwitchUuid = new HashMap(); - for (NicTO nicTo : sortNicsByDeviceId(nics)) { - s_logger.info("Prepare NIC device based on NicTO: " + _gson.toJson(nicTo)); - - boolean configureVServiceInNexus = (nicTo.getType() == TrafficType.Guest) && (vmSpec.getDetails().containsKey("ConfigureVServiceInNexus")); - VirtualMachine.Type vmType = cmd.getVirtualMachine().getType(); - Pair networkInfo = prepareNetworkFromNicInfo(vmMo.getRunningHost(), nicTo, configureVServiceInNexus, vmType); - if ((nicTo.getBroadcastType() != BroadcastDomainType.Lswitch) - || (nicTo.getBroadcastType() == BroadcastDomainType.Lswitch && NiciraNvpApiVersion.isApiVersionLowerThan("4.2"))) { - if (VmwareHelper.isDvPortGroup(networkInfo.first())) { - String dvSwitchUuid; - ManagedObjectReference dcMor = hyperHost.getHyperHostDatacenter(); - DatacenterMO dataCenterMo = new DatacenterMO(context, dcMor); - ManagedObjectReference dvsMor = dataCenterMo.getDvSwitchMor(networkInfo.first()); - dvSwitchUuid = dataCenterMo.getDvSwitchUuid(dvsMor); - s_logger.info("Preparing NIC device on dvSwitch : " + dvSwitchUuid); - nic = VmwareHelper.prepareDvNicDevice(vmMo, networkInfo.first(), nicDeviceType, networkInfo.second(), dvSwitchUuid, - nicTo.getMac(), i + 1, true, true); - if (nicTo.getUuid() != null) { - nicUuidToDvSwitchUuid.put(nicTo.getUuid(), dvSwitchUuid); - } - } else { - s_logger.info("Preparing NIC device on network " + networkInfo.second()); - nic = VmwareHelper.prepareNicDevice(vmMo, networkInfo.first(), nicDeviceType, networkInfo.second(), - nicTo.getMac(), i + 1, true, true); - } - } else { - //if NSX API VERSION >= 4.2, connect to br-int (nsx.network), do not create portgroup else previous behaviour - nic = VmwareHelper.prepareNicOpaque(vmMo, nicDeviceType, networkInfo.second(), - nicTo.getMac(), i + 1, true, true); - } - - deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec(); - deviceConfigSpecArray[i].setDevice(nic); - deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD); - - if (s_logger.isDebugEnabled()) - s_logger.debug("Prepare NIC at new device " + _gson.toJson(deviceConfigSpecArray[i])); - - // this is really a hacking for DomR, upon DomR startup, we will reset all the NIC allocation after eth3 - if (nicCount < 3) - nicMask |= (1 << nicCount); - - i++; - nicCount++; - } - - for (int j = 0; j < i; j++) - vmConfigSpec.getDeviceChange().add(deviceConfigSpecArray[j]); - - // - // Setup VM options - // - - // pass boot arguments through machine.id & perform customized options to VMX - ArrayList extraOptions = new ArrayList(); - configBasicExtraOption(extraOptions, vmSpec); - configNvpExtraOption(extraOptions, vmSpec, nicUuidToDvSwitchUuid); - configCustomExtraOption(extraOptions, vmSpec); - - // config for NCC - VirtualMachine.Type vmType = cmd.getVirtualMachine().getType(); - if (vmType.equals(VirtualMachine.Type.NetScalerVm)) { - NicTO mgmtNic = vmSpec.getNics()[0]; - OptionValue option = new OptionValue(); - option.setKey("machine.id"); - option.setValue("ip=" + mgmtNic.getIp() + "&netmask=" + mgmtNic.getNetmask() + "&gateway=" + mgmtNic.getGateway()); - extraOptions.add(option); - } - - // config VNC - String keyboardLayout = null; - if (vmSpec.getDetails() != null) - keyboardLayout = vmSpec.getDetails().get(VmDetailConstants.KEYBOARD); - vmConfigSpec.getExtraConfig() - .addAll(Arrays.asList(configureVnc(extraOptions.toArray(new OptionValue[0]), hyperHost, vmInternalCSName, vmSpec.getVncPassword(), keyboardLayout))); - - // config video card - configureVideoCard(vmMo, vmSpec, vmConfigSpec); - - // Set OVF properties (if available) - Pair> ovfPropsMap = vmSpec.getOvfProperties(); - VmConfigInfo templateVappConfig = null; - List ovfProperties = null; - if (ovfPropsMap != null) { - String vmTemplate = ovfPropsMap.first(); - s_logger.info("Find VM template " + vmTemplate); - VirtualMachineMO vmTemplateMO = dcMo.findVm(vmTemplate); - templateVappConfig = vmTemplateMO.getConfigInfo().getVAppConfig(); - ovfProperties = ovfPropsMap.second(); - // Set OVF properties (if available) - if (CollectionUtils.isNotEmpty(ovfProperties)) { - s_logger.info("Copying OVF properties from template and setting them to the values the user provided"); - copyVAppConfigsFromTemplate(templateVappConfig, ovfProperties, vmConfigSpec); - } - } - - setBootOptions(vmSpec, bootMode, vmConfigSpec); - - // - // Configure VM - // - if (!vmMo.configureVm(vmConfigSpec)) { - throw new Exception("Failed to configure VM before start. vmName: " + vmInternalCSName); - } - - if (vmSpec.getType() == VirtualMachine.Type.DomainRouter) { - hyperHost.setRestartPriorityForVM(vmMo, DasVmPriority.HIGH.value()); - } - - // Resizing root disk only when explicit requested by user - final Map vmDetails = cmd.getVirtualMachine().getDetails(); - if (rootDiskTO != null && !hasSnapshot && (vmDetails != null && vmDetails.containsKey(ApiConstants.ROOT_DISK_SIZE))) { - resizeRootDiskOnVMStart(vmMo, rootDiskTO, hyperHost, context); - } - - // - // Post Configuration - // - - vmMo.setCustomFieldValue(CustomFieldConstants.CLOUD_NIC_MASK, String.valueOf(nicMask)); - postNvpConfigBeforeStart(vmMo, vmSpec); - - Map> iqnToData = new HashMap<>(); - - postDiskConfigBeforeStart(vmMo, vmSpec, sortedDisks, ideControllerKey, scsiControllerKey, iqnToData, hyperHost, context); - - // - // Power-on VM - // - if (!vmMo.powerOn()) { - throw new Exception("Failed to start VM. vmName: " + vmInternalCSName + " with hostname " + vmNameOnVcenter); - } - - StartAnswer startAnswer = new StartAnswer(cmd); - - startAnswer.setIqnToData(iqnToData); - - // Since VM was successfully powered-on, if there was an existing VM in a different cluster that was unregistered, delete all the files associated with it. - if (existingVmName != null && existingVmFileLayout != null) { - List vmDatastoreNames = new ArrayList(); - for (DatastoreMO vmDatastore : vmMo.getAllDatastores()) { - vmDatastoreNames.add(vmDatastore.getName()); - } - // Don't delete files that are in a datastore that is being used by the new VM as well (zone-wide datastore). - List skipDatastores = new ArrayList(); - for (DatastoreMO existingDatastore : existingDatastores) { - if (vmDatastoreNames.contains(existingDatastore.getName())) { - skipDatastores.add(existingDatastore.getName()); - } - } - deleteUnregisteredVmFiles(existingVmFileLayout, dcMo, true, skipDatastores); - } - - return startAnswer; - } catch (Throwable e) { - if (e instanceof RemoteException) { - s_logger.warn("Encounter remote exception to vCenter, invalidate VMware session context"); - invalidateServiceContext(); - } - - String msg = "StartCommand failed due to " + VmwareHelper.getExceptionMessage(e); - s_logger.warn(msg, e); - StartAnswer startAnswer = new StartAnswer(cmd, msg); - if (vmAlreadyExistsInVcenter) { - startAnswer.setContextParam("stopRetry", "true"); - } - - // Since VM start failed, if there was an existing VM in a different cluster that was unregistered, register it back. - if (existingVmName != null && existingVmFileInfo != null) { - s_logger.debug("Since VM start failed, registering back an existing VM: " + existingVmName + " that was unregistered"); - try { - DatastoreFile fileInDatastore = new DatastoreFile(existingVmFileInfo.getVmPathName()); - DatastoreMO existingVmDsMo = new DatastoreMO(dcMo.getContext(), dcMo.findDatastore(fileInDatastore.getDatastoreName())); - registerVm(existingVmName, existingVmDsMo); - } catch (Exception ex) { - String message = "Failed to register an existing VM: " + existingVmName + " due to " + VmwareHelper.getExceptionMessage(ex); - s_logger.warn(message, ex); - } - } - - return startAnswer; - } finally { - } - } - - private void setBootOptions(VirtualMachineTO vmSpec, String bootMode, VirtualMachineConfigSpec vmConfigSpec) { - VirtualMachineBootOptions bootOptions = null; - if (StringUtils.isNotBlank(bootMode) && !bootMode.equalsIgnoreCase("bios")) { - vmConfigSpec.setFirmware("efi"); - if (vmSpec.getDetails().containsKey(ApiConstants.BootType.UEFI.toString()) && "secure".equalsIgnoreCase(vmSpec.getDetails().get(ApiConstants.BootType.UEFI.toString()))) { - if (bootOptions == null) { - bootOptions = new VirtualMachineBootOptions(); - } - bootOptions.setEfiSecureBootEnabled(true); - } - } - if (vmSpec.isEnterHardwareSetup()) { - if (bootOptions == null) { - bootOptions = new VirtualMachineBootOptions(); - } - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("configuring VM '%s' to enter hardware setup",vmSpec.getName())); - } - bootOptions.setEnterBIOSSetup(vmSpec.isEnterHardwareSetup()); - } - if (bootOptions != null) { - vmConfigSpec.setBootOptions(bootOptions); - } - } - /** * Set the ovf section spec from existing vApp configuration */ @@ -2433,12 +1630,22 @@ protected List copyVAppConfigOvfSectionFromOVF(VmConfigInfo private Map> getOVFMap(List props) { Map> map = new HashMap<>(); for (OVFPropertyTO prop : props) { - Pair pair = new Pair<>(prop.getValue(), prop.isPassword()); + String value = getPropertyValue(prop); + Pair pair = new Pair<>(value, prop.isPassword()); map.put(prop.getKey(), pair); } return map; } + private String getPropertyValue(OVFPropertyTO prop) { + String type = prop.getType(); + String value = prop.getValue(); + if ("boolean".equalsIgnoreCase(type)) { + value = Boolean.parseBoolean(value) ? "True" : "False"; + } + return value; + } + /** * Set the properties section from existing vApp configuration and values set on ovfProperties */ @@ -2495,7 +1702,7 @@ protected void copyVAppConfigsFromTemplate(VmConfigInfo vAppConfig, vmConfig.setVAppConfig(vmConfigSpec); } - private String appendFileType(String path, String fileType) { + static String appendFileType(String path, String fileType) { if (path.toLowerCase().endsWith(fileType.toLowerCase())) { return path; } @@ -2503,68 +1710,6 @@ private String appendFileType(String path, String fileType) { return path + fileType; } - private void resizeRootDiskOnVMStart(VirtualMachineMO vmMo, DiskTO rootDiskTO, VmwareHypervisorHost hyperHost, VmwareContext context) throws Exception { - final Pair vdisk = getVirtualDiskInfo(vmMo, appendFileType(rootDiskTO.getPath(), VMDK_EXTENSION)); - assert (vdisk != null); - - Long reqSize = 0L; - final VolumeObjectTO volumeTO = ((VolumeObjectTO) rootDiskTO.getData()); - if (volumeTO != null) { - reqSize = volumeTO.getSize() / 1024; - } - final VirtualDisk disk = vdisk.first(); - if (reqSize > disk.getCapacityInKB()) { - final VirtualMachineDiskInfo diskInfo = getMatchingExistingDisk(vmMo.getDiskInfoBuilder(), rootDiskTO, hyperHost, context); - assert (diskInfo != null); - final String[] diskChain = diskInfo.getDiskChain(); - - if (diskChain != null && diskChain.length > 1) { - s_logger.warn("Disk chain length for the VM is greater than one, this is not supported"); - throw new CloudRuntimeException("Unsupported VM disk chain length: " + diskChain.length); - } - - boolean resizingSupported = false; - String deviceBusName = diskInfo.getDiskDeviceBusName(); - if (deviceBusName != null && (deviceBusName.toLowerCase().contains("scsi") || deviceBusName.toLowerCase().contains("lsi"))) { - resizingSupported = true; - } - if (!resizingSupported) { - s_logger.warn("Resizing of root disk is only support for scsi device/bus, the provide VM's disk device bus name is " + diskInfo.getDiskDeviceBusName()); - throw new CloudRuntimeException("Unsupported VM root disk device bus: " + diskInfo.getDiskDeviceBusName()); - } - - disk.setCapacityInKB(reqSize); - VirtualMachineConfigSpec vmConfigSpec = new VirtualMachineConfigSpec(); - VirtualDeviceConfigSpec deviceConfigSpec = new VirtualDeviceConfigSpec(); - deviceConfigSpec.setDevice(disk); - deviceConfigSpec.setOperation(VirtualDeviceConfigSpecOperation.EDIT); - vmConfigSpec.getDeviceChange().add(deviceConfigSpec); - if (!vmMo.configureVm(vmConfigSpec)) { - throw new Exception("Failed to configure VM for given root disk size. vmName: " + vmMo.getName()); - } - } - } - - - /** - * Generate the mac sequence from the nics. - */ - protected String generateMacSequence(NicTO[] nics) { - if (nics.length == 0) { - return ""; - } - - StringBuffer sbMacSequence = new StringBuffer(); - for (NicTO nicTo : sortNicsByDeviceId(nics)) { - sbMacSequence.append(nicTo.getMac()).append("|"); - } - if (!sbMacSequence.toString().isEmpty()) { - sbMacSequence.deleteCharAt(sbMacSequence.length() - 1); //Remove extra '|' char appended at the end - } - - return sbMacSequence.toString(); - } - /** * Update boot args with the new nic mac addresses. */ @@ -2644,113 +1789,21 @@ protected void configureSpecVideoCardNewVRamSize(VirtualMachineVideoCard videoCa arrayVideoCardConfigSpecs.setDevice(videoCard); arrayVideoCardConfigSpecs.setOperation(VirtualDeviceConfigSpecOperation.EDIT); - vmConfigSpec.getDeviceChange().add(arrayVideoCardConfigSpecs); - } - - private void tearDownVm(VirtualMachineMO vmMo) throws Exception { - - if (vmMo == null) - return; - - boolean hasSnapshot = false; - hasSnapshot = vmMo.hasSnapshot(); - if (!hasSnapshot) - vmMo.tearDownDevices(new Class[]{VirtualDisk.class, VirtualEthernetCard.class}); - else - vmMo.tearDownDevices(new Class[]{VirtualEthernetCard.class}); - vmMo.ensureScsiDeviceController(); - } - - int getReservedMemoryMb(VirtualMachineTO vmSpec) { - if (vmSpec.getDetails().get(VMwareGuru.VmwareReserveMemory.key()).equalsIgnoreCase("true")) { - return (int) (vmSpec.getMinRam() / ResourceType.bytesToMiB); - } - return 0; - } - - int getReservedCpuMHZ(VirtualMachineTO vmSpec) { - if (vmSpec.getDetails().get(VMwareGuru.VmwareReserveCpu.key()).equalsIgnoreCase("true")) { - return vmSpec.getMinSpeed() * vmSpec.getCpus(); - } - return 0; - } - - // return the finalized disk chain for startup, from top to bottom - private String[] syncDiskChain(DatacenterMO dcMo, VirtualMachineMO vmMo, VirtualMachineTO vmSpec, DiskTO vol, VirtualMachineDiskInfo diskInfo, - HashMap> dataStoresDetails) throws Exception { - - VolumeObjectTO volumeTO = (VolumeObjectTO) vol.getData(); - DataStoreTO primaryStore = volumeTO.getDataStore(); - Map details = vol.getDetails(); - boolean isManaged = false; - String iScsiName = null; - - if (details != null) { - isManaged = Boolean.parseBoolean(details.get(DiskTO.MANAGED)); - iScsiName = details.get(DiskTO.IQN); - } - - // if the storage is managed, iScsiName should not be null - String datastoreName = isManaged ? VmwareResource.getDatastoreName(iScsiName) : primaryStore.getUuid(); - Pair volumeDsDetails = dataStoresDetails.get(datastoreName); - - if (volumeDsDetails == null) { - throw new Exception("Primary datastore " + primaryStore.getUuid() + " is not mounted on host."); - } - - DatastoreMO dsMo = volumeDsDetails.second(); - - // we will honor vCenter's meta if it exists - if (diskInfo != null) { - // to deal with run-time upgrade to maintain the new datastore folder structure - String disks[] = diskInfo.getDiskChain(); - for (int i = 0; i < disks.length; i++) { - DatastoreFile file = new DatastoreFile(disks[i]); - if (!isManaged && file.getDir() != null && file.getDir().isEmpty()) { - s_logger.info("Perform run-time datastore folder upgrade. sync " + disks[i] + " to VM folder"); - disks[i] = VmwareStorageLayoutHelper.syncVolumeToVmDefaultFolder(dcMo, vmMo.getName(), dsMo, file.getFileBaseName(), VmwareManager.s_vmwareSearchExcludeFolder.value()); - } - } - return disks; - } - - final String datastoreDiskPath; - - if (isManaged) { - String vmdkPath = new DatastoreFile(volumeTO.getPath()).getFileBaseName(); - - if (volumeTO.getVolumeType() == Volume.Type.ROOT) { - if (vmdkPath == null) { - vmdkPath = volumeTO.getName(); - } - - datastoreDiskPath = VmwareStorageLayoutHelper.syncVolumeToVmDefaultFolder(dcMo, vmMo.getName(), dsMo, vmdkPath); - } else { - if (vmdkPath == null) { - vmdkPath = dsMo.getName(); - } - - datastoreDiskPath = dsMo.getDatastorePath(vmdkPath + VMDK_EXTENSION); - } - } else { - datastoreDiskPath = VmwareStorageLayoutHelper.syncVolumeToVmDefaultFolder(dcMo, vmMo.getName(), dsMo, volumeTO.getPath(), VmwareManager.s_vmwareSearchExcludeFolder.value()); - } - - if (!dsMo.fileExists(datastoreDiskPath)) { - s_logger.warn("Volume " + volumeTO.getId() + " does not seem to exist on datastore, out of sync? path: " + datastoreDiskPath); - } + vmConfigSpec.getDeviceChange().add(arrayVideoCardConfigSpecs); + } - return new String[]{datastoreDiskPath}; + int getReservedMemoryMb(VirtualMachineTO vmSpec) { + if (vmSpec.getDetails().get(VMwareGuru.VmwareReserveMemory.key()).equalsIgnoreCase("true")) { + return (int) (vmSpec.getMinRam() / ResourceType.bytesToMiB); + } + return 0; } - // Pair - private Pair composeVmNames(VirtualMachineTO vmSpec) { - String vmInternalCSName = vmSpec.getName(); - String vmNameOnVcenter = vmSpec.getName(); - if (_instanceNameFlag && vmSpec.getHostName() != null) { - vmNameOnVcenter = vmSpec.getHostName(); + int getReservedCpuMHZ(VirtualMachineTO vmSpec) { + if (vmSpec.getDetails().get(VMwareGuru.VmwareReserveCpu.key()).equalsIgnoreCase("true")) { + return vmSpec.getMinSpeed() * vmSpec.getCpus(); } - return new Pair(vmInternalCSName, vmNameOnVcenter); + return 0; } protected void configNestedHVSupport(VirtualMachineMO vmMo, VirtualMachineTO vmSpec, VirtualMachineConfigSpec vmConfigSpec) throws Exception { @@ -2778,357 +1831,6 @@ protected void configNestedHVSupport(VirtualMachineMO vmMo, VirtualMachineTO vmS } } - private static void configBasicExtraOption(List extraOptions, VirtualMachineTO vmSpec) { - OptionValue newVal = new OptionValue(); - newVal.setKey("machine.id"); - newVal.setValue(vmSpec.getBootArgs()); - extraOptions.add(newVal); - - newVal = new OptionValue(); - newVal.setKey("devices.hotplug"); - newVal.setValue("true"); - extraOptions.add(newVal); - } - - private static void configNvpExtraOption(List extraOptions, VirtualMachineTO vmSpec, Map nicUuidToDvSwitchUuid) { - /** - * Extra Config : nvp.vm-uuid = uuid - * - Required for Nicira NVP integration - */ - OptionValue newVal = new OptionValue(); - newVal.setKey("nvp.vm-uuid"); - newVal.setValue(vmSpec.getUuid()); - extraOptions.add(newVal); - - /** - * Extra Config : nvp.iface-id. = uuid - * - Required for Nicira NVP integration - */ - int nicNum = 0; - for (NicTO nicTo : sortNicsByDeviceId(vmSpec.getNics())) { - if (nicTo.getUuid() != null) { - newVal = new OptionValue(); - newVal.setKey("nvp.iface-id." + nicNum); - newVal.setValue(nicTo.getUuid()); - extraOptions.add(newVal); - } - nicNum++; - } - } - - private static void configCustomExtraOption(List extraOptions, VirtualMachineTO vmSpec) { - // we no longer to validation anymore - for (Map.Entry entry : vmSpec.getDetails().entrySet()) { - if (entry.getKey().equalsIgnoreCase(VmDetailConstants.BOOT_MODE)) { - continue; - } - OptionValue newVal = new OptionValue(); - newVal.setKey(entry.getKey()); - newVal.setValue(entry.getValue()); - extraOptions.add(newVal); - } - } - - private static void postNvpConfigBeforeStart(VirtualMachineMO vmMo, VirtualMachineTO vmSpec) throws Exception { - /** - * We need to configure the port on the DV switch after the host is - * connected. So make this happen between the configure and start of - * the VM - */ - int nicIndex = 0; - for (NicTO nicTo : sortNicsByDeviceId(vmSpec.getNics())) { - if (nicTo.getBroadcastType() == BroadcastDomainType.Lswitch) { - // We need to create a port with a unique vlan and pass the key to the nic device - s_logger.trace("Nic " + nicTo.toString() + " is connected to an NVP logicalswitch"); - VirtualDevice nicVirtualDevice = vmMo.getNicDeviceByIndex(nicIndex); - if (nicVirtualDevice == null) { - throw new Exception("Failed to find a VirtualDevice for nic " + nicIndex); //FIXME Generic exceptions are bad - } - VirtualDeviceBackingInfo backing = nicVirtualDevice.getBacking(); - if (backing instanceof VirtualEthernetCardDistributedVirtualPortBackingInfo) { - // This NIC is connected to a Distributed Virtual Switch - VirtualEthernetCardDistributedVirtualPortBackingInfo portInfo = (VirtualEthernetCardDistributedVirtualPortBackingInfo) backing; - DistributedVirtualSwitchPortConnection port = portInfo.getPort(); - String portKey = port.getPortKey(); - String portGroupKey = port.getPortgroupKey(); - String dvSwitchUuid = port.getSwitchUuid(); - - s_logger.debug("NIC " + nicTo.toString() + " is connected to dvSwitch " + dvSwitchUuid + " pg " + portGroupKey + " port " + portKey); - - ManagedObjectReference dvSwitchManager = vmMo.getContext().getVimClient().getServiceContent().getDvSwitchManager(); - ManagedObjectReference dvSwitch = vmMo.getContext().getVimClient().getService().queryDvsByUuid(dvSwitchManager, dvSwitchUuid); - - // Get all ports - DistributedVirtualSwitchPortCriteria criteria = new DistributedVirtualSwitchPortCriteria(); - criteria.setInside(true); - criteria.getPortgroupKey().add(portGroupKey); - List dvPorts = vmMo.getContext().getVimClient().getService().fetchDVPorts(dvSwitch, criteria); - - DistributedVirtualPort vmDvPort = null; - List usedVlans = new ArrayList(); - for (DistributedVirtualPort dvPort : dvPorts) { - // Find the port for this NIC by portkey - if (portKey.equals(dvPort.getKey())) { - vmDvPort = dvPort; - } - VMwareDVSPortSetting settings = (VMwareDVSPortSetting) dvPort.getConfig().getSetting(); - VmwareDistributedVirtualSwitchVlanIdSpec vlanId = (VmwareDistributedVirtualSwitchVlanIdSpec) settings.getVlan(); - s_logger.trace("Found port " + dvPort.getKey() + " with vlan " + vlanId.getVlanId()); - if (vlanId.getVlanId() > 0 && vlanId.getVlanId() < 4095) { - usedVlans.add(vlanId.getVlanId()); - } - } - - if (vmDvPort == null) { - throw new Exception("Empty port list from dvSwitch for nic " + nicTo.toString()); - } - - DVPortConfigInfo dvPortConfigInfo = vmDvPort.getConfig(); - VMwareDVSPortSetting settings = (VMwareDVSPortSetting) dvPortConfigInfo.getSetting(); - - VmwareDistributedVirtualSwitchVlanIdSpec vlanId = (VmwareDistributedVirtualSwitchVlanIdSpec) settings.getVlan(); - BoolPolicy blocked = settings.getBlocked(); - if (blocked.isValue() == Boolean.TRUE) { - s_logger.trace("Port is blocked, set a vlanid and unblock"); - DVPortConfigSpec dvPortConfigSpec = new DVPortConfigSpec(); - VMwareDVSPortSetting edittedSettings = new VMwareDVSPortSetting(); - // Unblock - blocked.setValue(Boolean.FALSE); - blocked.setInherited(Boolean.FALSE); - edittedSettings.setBlocked(blocked); - // Set vlan - int i; - for (i = 1; i < 4095; i++) { - if (!usedVlans.contains(i)) - break; - } - vlanId.setVlanId(i); // FIXME should be a determined - // based on usage - vlanId.setInherited(false); - edittedSettings.setVlan(vlanId); - - dvPortConfigSpec.setSetting(edittedSettings); - dvPortConfigSpec.setOperation("edit"); - dvPortConfigSpec.setKey(portKey); - List dvPortConfigSpecs = new ArrayList(); - dvPortConfigSpecs.add(dvPortConfigSpec); - ManagedObjectReference task = vmMo.getContext().getVimClient().getService().reconfigureDVPortTask(dvSwitch, dvPortConfigSpecs); - if (!vmMo.getContext().getVimClient().waitForTask(task)) { - throw new Exception("Failed to configure the dvSwitch port for nic " + nicTo.toString()); - } - s_logger.debug("NIC " + nicTo.toString() + " connected to vlan " + i); - } else { - s_logger.trace("Port already configured and set to vlan " + vlanId.getVlanId()); - } - } else if (backing instanceof VirtualEthernetCardNetworkBackingInfo) { - // This NIC is connected to a Virtual Switch - // Nothing to do - } else if (backing instanceof VirtualEthernetCardOpaqueNetworkBackingInfo) { - //if NSX API VERSION >= 4.2, connect to br-int (nsx.network), do not create portgroup else previous behaviour - //OK, connected to OpaqueNetwork - } else { - s_logger.error("nic device backing is of type " + backing.getClass().getName()); - throw new Exception("Incompatible backing for a VirtualDevice for nic " + nicIndex); //FIXME Generic exceptions are bad - } - } - nicIndex++; - } - } - - private VirtualMachineDiskInfo getMatchingExistingDisk(VirtualMachineDiskInfoBuilder diskInfoBuilder, DiskTO vol, VmwareHypervisorHost hyperHost, VmwareContext context) - throws Exception { - if (diskInfoBuilder != null) { - VolumeObjectTO volume = (VolumeObjectTO) vol.getData(); - - String dsName = null; - String diskBackingFileBaseName = null; - - Map details = vol.getDetails(); - boolean isManaged = details != null && Boolean.parseBoolean(details.get(DiskTO.MANAGED)); - - if (isManaged) { - String iScsiName = details.get(DiskTO.IQN); - - // if the storage is managed, iScsiName should not be null - dsName = VmwareResource.getDatastoreName(iScsiName); - - diskBackingFileBaseName = new DatastoreFile(volume.getPath()).getFileBaseName(); - } else { - ManagedObjectReference morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, volume.getDataStore().getUuid()); - DatastoreMO dsMo = new DatastoreMO(context, morDs); - - dsName = dsMo.getName(); - - diskBackingFileBaseName = volume.getPath(); - } - - VirtualMachineDiskInfo diskInfo = diskInfoBuilder.getDiskInfoByBackingFileBaseName(diskBackingFileBaseName, dsName); - if (diskInfo != null) { - s_logger.info("Found existing disk info from volume path: " + volume.getPath()); - return diskInfo; - } else { - String chainInfo = volume.getChainInfo(); - if (chainInfo != null) { - VirtualMachineDiskInfo infoInChain = _gson.fromJson(chainInfo, VirtualMachineDiskInfo.class); - if (infoInChain != null) { - String[] disks = infoInChain.getDiskChain(); - if (disks.length > 0) { - for (String diskPath : disks) { - DatastoreFile file = new DatastoreFile(diskPath); - diskInfo = diskInfoBuilder.getDiskInfoByBackingFileBaseName(file.getFileBaseName(), dsName); - if (diskInfo != null) { - s_logger.info("Found existing disk from chain info: " + diskPath); - return diskInfo; - } - } - } - - if (diskInfo == null) { - diskInfo = diskInfoBuilder.getDiskInfoByDeviceBusName(infoInChain.getDiskDeviceBusName()); - if (diskInfo != null) { - s_logger.info("Found existing disk from from chain device bus information: " + infoInChain.getDiskDeviceBusName()); - return diskInfo; - } - } - } - } - } - } - - return null; - } - - private int getDiskController(VirtualMachineDiskInfo matchingExistingDisk, DiskTO vol, VirtualMachineTO vmSpec, int ideControllerKey, int scsiControllerKey) { - - int controllerKey; - if (matchingExistingDisk != null) { - s_logger.info("Chose disk controller based on existing information: " + matchingExistingDisk.getDiskDeviceBusName()); - if (matchingExistingDisk.getDiskDeviceBusName().startsWith("ide")) - return ideControllerKey; - else - return scsiControllerKey; - } - - if (vol.getType() == Volume.Type.ROOT) { - Map vmDetails = vmSpec.getDetails(); - if (vmDetails != null && vmDetails.get(VmDetailConstants.ROOT_DISK_CONTROLLER) != null) { - if (vmDetails.get(VmDetailConstants.ROOT_DISK_CONTROLLER).equalsIgnoreCase("scsi")) { - s_logger.info("Chose disk controller for vol " + vol.getType() + " -> scsi, based on root disk controller settings: " - + vmDetails.get(VmDetailConstants.ROOT_DISK_CONTROLLER)); - controllerKey = scsiControllerKey; - } else { - s_logger.info("Chose disk controller for vol " + vol.getType() + " -> ide, based on root disk controller settings: " - + vmDetails.get(VmDetailConstants.ROOT_DISK_CONTROLLER)); - controllerKey = ideControllerKey; - } - } else { - s_logger.info("Chose disk controller for vol " + vol.getType() + " -> scsi. due to null root disk controller setting"); - controllerKey = scsiControllerKey; - } - - } else { - // DATA volume always use SCSI device - s_logger.info("Chose disk controller for vol " + vol.getType() + " -> scsi"); - controllerKey = scsiControllerKey; - } - - return controllerKey; - } - - private String getDiskController(VirtualMachineMO vmMo, VirtualMachineDiskInfo matchingExistingDisk, DiskTO vol, Pair controllerInfo) throws Exception { - int controllerKey; - DiskControllerType controllerType = DiskControllerType.none; - if (matchingExistingDisk != null) { - String currentBusName = matchingExistingDisk.getDiskDeviceBusName(); - if (currentBusName != null) { - s_logger.info("Chose disk controller based on existing information: " + currentBusName); - if (currentBusName.startsWith("ide")) { - controllerType = DiskControllerType.ide; - } else if (currentBusName.startsWith("scsi")) { - controllerType = DiskControllerType.scsi; - } - } - if (controllerType == DiskControllerType.scsi || controllerType == DiskControllerType.none) { - Ternary vmScsiControllerInfo = vmMo.getScsiControllerInfo(); - controllerType = vmScsiControllerInfo.third(); - } - return controllerType.toString(); - } - - if (vol.getType() == Volume.Type.ROOT) { - s_logger.info("Chose disk controller for vol " + vol.getType() + " -> " + controllerInfo.first() - + ", based on root disk controller settings at global configuration setting."); - return controllerInfo.first(); - } else { - s_logger.info("Chose disk controller for vol " + vol.getType() + " -> " + controllerInfo.second() - + ", based on default data disk controller setting i.e. Operating system recommended."); // Need to bring in global configuration setting & template level setting. - return controllerInfo.second(); - } - } - - private void postDiskConfigBeforeStart(VirtualMachineMO vmMo, VirtualMachineTO vmSpec, DiskTO[] sortedDisks, int ideControllerKey, - int scsiControllerKey, Map> iqnToData, VmwareHypervisorHost hyperHost, VmwareContext context) throws Exception { - VirtualMachineDiskInfoBuilder diskInfoBuilder = vmMo.getDiskInfoBuilder(); - - for (DiskTO vol : sortedDisks) { - if (vol.getType() == Volume.Type.ISO) - continue; - - VolumeObjectTO volumeTO = (VolumeObjectTO) vol.getData(); - - VirtualMachineDiskInfo diskInfo = getMatchingExistingDisk(diskInfoBuilder, vol, hyperHost, context); - assert (diskInfo != null); - - String[] diskChain = diskInfo.getDiskChain(); - assert (diskChain.length > 0); - - Map details = vol.getDetails(); - boolean managed = false; - - if (details != null) { - managed = Boolean.parseBoolean(details.get(DiskTO.MANAGED)); - } - - DatastoreFile file = new DatastoreFile(diskChain[0]); - - if (managed) { - DatastoreFile originalFile = new DatastoreFile(volumeTO.getPath()); - - if (!file.getFileBaseName().equalsIgnoreCase(originalFile.getFileBaseName())) { - if (s_logger.isInfoEnabled()) - s_logger.info("Detected disk-chain top file change on volume: " + volumeTO.getId() + " " + volumeTO.getPath() + " -> " + diskChain[0]); - } - } else { - if (!file.getFileBaseName().equalsIgnoreCase(volumeTO.getPath())) { - if (s_logger.isInfoEnabled()) - s_logger.info("Detected disk-chain top file change on volume: " + volumeTO.getId() + " " + volumeTO.getPath() + " -> " + file.getFileBaseName()); - } - } - - VolumeObjectTO volInSpec = getVolumeInSpec(vmSpec, volumeTO); - - if (volInSpec != null) { - if (managed) { - Map data = new HashMap<>(); - - String datastoreVolumePath = diskChain[0]; - - data.put(StartAnswer.PATH, datastoreVolumePath); - data.put(StartAnswer.IMAGE_FORMAT, Storage.ImageFormat.OVA.toString()); - - iqnToData.put(details.get(DiskTO.IQN), data); - - vol.setPath(datastoreVolumePath); - volumeTO.setPath(datastoreVolumePath); - volInSpec.setPath(datastoreVolumePath); - } else { - volInSpec.setPath(file.getFileBaseName()); - } - volInSpec.setChainInfo(_gson.toJson(diskInfo)); - } - } - } - private void checkAndDeleteDatastoreFile(String filePath, List skipDatastores, DatastoreMO dsMo, DatacenterMO dcMo) throws Exception { if (dsMo != null && dcMo != null && (skipDatastores == null || !skipDatastores.contains(dsMo.getName()))) { s_logger.debug("Deleting file: " + filePath); @@ -3136,7 +1838,7 @@ private void checkAndDeleteDatastoreFile(String filePath, List skipDatas } } - private void deleteUnregisteredVmFiles(VirtualMachineFileLayoutEx vmFileLayout, DatacenterMO dcMo, boolean deleteDisks, List skipDatastores) throws Exception { + void deleteUnregisteredVmFiles(VirtualMachineFileLayoutEx vmFileLayout, DatacenterMO dcMo, boolean deleteDisks, List skipDatastores) throws Exception { s_logger.debug("Deleting files associated with an existing VM that was unregistered"); DatastoreFile vmFolder = null; try { @@ -3170,64 +1872,6 @@ else if (file.getType().equals("config")) } } - private static VolumeObjectTO getVolumeInSpec(VirtualMachineTO vmSpec, VolumeObjectTO srcVol) { - for (DiskTO disk : vmSpec.getDisks()) { - if (disk.getData() instanceof VolumeObjectTO) { - VolumeObjectTO vol = (VolumeObjectTO) disk.getData(); - if (vol.getId() == srcVol.getId()) - return vol; - } - } - - return null; - } - - private static NicTO[] sortNicsByDeviceId(NicTO[] nics) { - - List listForSort = new ArrayList(); - for (NicTO nic : nics) { - listForSort.add(nic); - } - Collections.sort(listForSort, new Comparator() { - - @Override - public int compare(NicTO arg0, NicTO arg1) { - if (arg0.getDeviceId() < arg1.getDeviceId()) { - return -1; - } else if (arg0.getDeviceId() == arg1.getDeviceId()) { - return 0; - } - - return 1; - } - }); - - return listForSort.toArray(new NicTO[0]); - } - - private static DiskTO[] sortVolumesByDeviceId(DiskTO[] volumes) { - - List listForSort = new ArrayList(); - for (DiskTO vol : volumes) { - listForSort.add(vol); - } - Collections.sort(listForSort, new Comparator() { - - @Override - public int compare(DiskTO arg0, DiskTO arg1) { - if (arg0.getDiskSeq() < arg1.getDiskSeq()) { - return -1; - } else if (arg0.getDiskSeq().equals(arg1.getDiskSeq())) { - return 0; - } - - return 1; - } - }); - - return listForSort.toArray(new DiskTO[0]); - } - /** * Only call this for managed storage. * Ex. "[-iqn.2010-01.com.solidfire:4nhe.vol-1.27-0] i-2-18-VM/ROOT-18.vmdk" should return "i-2-18-VM/ROOT-18" @@ -3247,9 +1891,7 @@ public String getVmdkPath(String path) { path = path.substring(startIndex + search.length()); - final String search2 = VMDK_EXTENSION; - - int endIndex = path.indexOf(search2); + int endIndex = path.indexOf(VMDK_EXTENSION); if (endIndex == -1) { return null; @@ -3258,113 +1900,6 @@ public String getVmdkPath(String path) { return path.substring(0, endIndex).trim(); } - private HashMap> inferDatastoreDetailsFromDiskInfo(VmwareHypervisorHost hyperHost, VmwareContext context, - DiskTO[] disks, Command cmd) throws Exception { - HashMap> mapIdToMors = new HashMap<>(); - - assert (hyperHost != null) && (context != null); - - for (DiskTO vol : disks) { - if (vol.getType() != Volume.Type.ISO) { - VolumeObjectTO volumeTO = (VolumeObjectTO) vol.getData(); - DataStoreTO primaryStore = volumeTO.getDataStore(); - String poolUuid = primaryStore.getUuid(); - - if (mapIdToMors.get(poolUuid) == null) { - boolean isManaged = false; - Map details = vol.getDetails(); - - if (details != null) { - isManaged = Boolean.parseBoolean(details.get(DiskTO.MANAGED)); - } - - if (isManaged) { - String iScsiName = details.get(DiskTO.IQN); // details should not be null for managed storage (it may or may not be null for non-managed storage) - String datastoreName = VmwareResource.getDatastoreName(iScsiName); - ManagedObjectReference morDatastore = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, datastoreName); - - // if the datastore is not present, we need to discover the iSCSI device that will support it, - // create the datastore, and create a VMDK file in the datastore - if (morDatastore == null) { - final String vmdkPath = getVmdkPath(volumeTO.getPath()); - - morDatastore = _storageProcessor.prepareManagedStorage(context, hyperHost, null, iScsiName, - details.get(DiskTO.STORAGE_HOST), Integer.parseInt(details.get(DiskTO.STORAGE_PORT)), - vmdkPath, - details.get(DiskTO.CHAP_INITIATOR_USERNAME), details.get(DiskTO.CHAP_INITIATOR_SECRET), - details.get(DiskTO.CHAP_TARGET_USERNAME), details.get(DiskTO.CHAP_TARGET_SECRET), - Long.parseLong(details.get(DiskTO.VOLUME_SIZE)), cmd); - - DatastoreMO dsMo = new DatastoreMO(getServiceContext(), morDatastore); - - final String datastoreVolumePath; - - if (vmdkPath != null) { - datastoreVolumePath = dsMo.getDatastorePath(vmdkPath + VMDK_EXTENSION); - } else { - datastoreVolumePath = dsMo.getDatastorePath(dsMo.getName() + VMDK_EXTENSION); - } - - volumeTO.setPath(datastoreVolumePath); - vol.setPath(datastoreVolumePath); - } - - mapIdToMors.put(datastoreName, new Pair<>(morDatastore, new DatastoreMO(context, morDatastore))); - } else { - ManagedObjectReference morDatastore = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, poolUuid); - - if (morDatastore == null) { - String msg = "Failed to get the mounted datastore for the volume's pool " + poolUuid; - - s_logger.error(msg); - - throw new Exception(msg); - } - - mapIdToMors.put(poolUuid, new Pair<>(morDatastore, new DatastoreMO(context, morDatastore))); - } - } - } - } - - return mapIdToMors; - } - - private DatastoreMO getDatastoreThatRootDiskIsOn(HashMap> dataStoresDetails, DiskTO disks[]) { - Pair rootDiskDataStoreDetails = null; - - for (DiskTO vol : disks) { - if (vol.getType() == Volume.Type.ROOT) { - Map details = vol.getDetails(); - boolean managed = false; - - if (details != null) { - managed = Boolean.parseBoolean(details.get(DiskTO.MANAGED)); - } - - if (managed) { - String datastoreName = VmwareResource.getDatastoreName(details.get(DiskTO.IQN)); - - rootDiskDataStoreDetails = dataStoresDetails.get(datastoreName); - - break; - } else { - DataStoreTO primaryStore = vol.getData().getDataStore(); - - rootDiskDataStoreDetails = dataStoresDetails.get(primaryStore.getUuid()); - - break; - } - } - } - - if (rootDiskDataStoreDetails != null) { - return rootDiskDataStoreDetails.second(); - } - - return null; - } - private String getPvlanInfo(NicTO nicTo) { if (nicTo.getBroadcastType() == BroadcastDomainType.Pvlan) { return NetUtils.getIsolatedPvlanFromUri(nicTo.getBroadcastUri()); @@ -3405,7 +1940,7 @@ private String getVlanInfo(NicTO nicTo, String defaultVlan) { return defaultVlan; } - private Pair prepareNetworkFromNicInfo(HostMO hostMo, NicTO nicTo, boolean configureVServiceInNexus, VirtualMachine.Type vmType) + Pair prepareNetworkFromNicInfo(HostMO hostMo, NicTO nicTo, boolean configureVServiceInNexus, VirtualMachine.Type vmType) throws Exception { Ternary switchDetails = getTargetSwitch(nicTo); @@ -3506,58 +2041,9 @@ private String getNetworkNamePrefix(NicTO nicTo) throws Exception { } } - private VirtualMachineMO takeVmFromOtherHyperHost(VmwareHypervisorHost hyperHost, String vmName) throws Exception { - - VirtualMachineMO vmMo = hyperHost.findVmOnPeerHyperHost(vmName); - if (vmMo != null) { - ManagedObjectReference morTargetPhysicalHost = hyperHost.findMigrationTarget(vmMo); - if (morTargetPhysicalHost == null) { - String msg = "VM " + vmName + " is on other host and we have no resource available to migrate and start it here"; - s_logger.error(msg); - throw new Exception(msg); - } - - if (!vmMo.relocate(morTargetPhysicalHost)) { - String msg = "VM " + vmName + " is on other host and we failed to relocate it here"; - s_logger.error(msg); - throw new Exception(msg); - } - - return vmMo; - } - return null; - } - - // isoUrl sample content : - // nfs://192.168.10.231/export/home/kelven/vmware-test/secondary/template/tmpl/2/200//200-2-80f7ee58-6eff-3a2d-bcb0-59663edf6d26.iso - private Pair getIsoDatastoreInfo(VmwareHypervisorHost hyperHost, String isoUrl) throws Exception { - - assert (isoUrl != null); - int isoFileNameStartPos = isoUrl.lastIndexOf("/"); - if (isoFileNameStartPos < 0) { - throw new Exception("Invalid ISO path info"); - } - - String isoFileName = isoUrl.substring(isoFileNameStartPos); - - int templateRootPos = isoUrl.indexOf("template/tmpl"); - templateRootPos = (templateRootPos < 0 ? isoUrl.indexOf(ConfigDrive.CONFIGDRIVEDIR) : templateRootPos); - if (templateRootPos < 0) { - throw new Exception("Invalid ISO path info"); - } - - String storeUrl = isoUrl.substring(0, templateRootPos - 1); - String isoPath = isoUrl.substring(templateRootPos, isoFileNameStartPos); - - ManagedObjectReference morDs = prepareSecondaryDatastoreOnHost(storeUrl); - DatastoreMO dsMo = new DatastoreMO(getServiceContext(), morDs); - - return new Pair(String.format("[%s] %s%s", dsMo.getName(), isoPath, isoFileName), morDs); - } - protected Answer execute(ReadyCommand cmd) { if (s_logger.isInfoEnabled()) { - s_logger.info("Executing resource ReadyCommand: " + _gson.toJson(cmd)); + s_logger.info("Executing resource ReadyCommand: " + gson.toJson(cmd)); } try { @@ -3576,7 +2062,7 @@ protected Answer execute(ReadyCommand cmd) { protected Answer execute(GetHostStatsCommand cmd) { if (s_logger.isTraceEnabled()) { - s_logger.trace("Executing resource GetHostStatsCommand: " + _gson.toJson(cmd)); + s_logger.trace("Executing resource GetHostStatsCommand: " + gson.toJson(cmd)); } VmwareContext context = getServiceContext(); @@ -3601,7 +2087,7 @@ protected Answer execute(GetHostStatsCommand cmd) { } if (s_logger.isTraceEnabled()) { - s_logger.trace("GetHostStats Answer: " + _gson.toJson(answer)); + s_logger.trace("GetHostStats Answer: " + gson.toJson(answer)); } return answer; @@ -3609,7 +2095,7 @@ protected Answer execute(GetHostStatsCommand cmd) { protected Answer execute(GetVmStatsCommand cmd) { if (s_logger.isTraceEnabled()) { - s_logger.trace("Executing resource GetVmStatsCommand: " + _gson.toJson(cmd)); + s_logger.trace("Executing resource GetVmStatsCommand: " + gson.toJson(cmd)); } HashMap vmStatsMap = null; @@ -3644,7 +2130,7 @@ protected Answer execute(GetVmStatsCommand cmd) { Answer answer = new GetVmStatsAnswer(cmd, vmStatsMap); if (s_logger.isTraceEnabled()) { - s_logger.trace("Report GetVmStatsAnswer: " + _gson.toJson(answer)); + s_logger.trace("Report GetVmStatsAnswer: " + gson.toJson(answer)); } return answer; } @@ -3784,7 +2270,7 @@ protected GetVolumeStatsAnswer execute(GetVolumeStatsCommand cmd) { for (String chainInfo : cmd.getVolumeUuids()) { if (chainInfo != null) { - VirtualMachineDiskInfo infoInChain = _gson.fromJson(chainInfo, VirtualMachineDiskInfo.class); + VirtualMachineDiskInfo infoInChain = gson.fromJson(chainInfo, VirtualMachineDiskInfo.class); if (infoInChain != null) { String[] disks = infoInChain.getDiskChain(); if (disks.length > 0) { @@ -3819,7 +2305,7 @@ protected GetVolumeStatsAnswer execute(GetVolumeStatsCommand cmd) { protected Answer execute(CheckHealthCommand cmd) { if (s_logger.isInfoEnabled()) { - s_logger.info("Executing resource CheckHealthCommand: " + _gson.toJson(cmd)); + s_logger.info("Executing resource CheckHealthCommand: " + gson.toJson(cmd)); } try { @@ -3840,7 +2326,7 @@ protected Answer execute(CheckHealthCommand cmd) { protected Answer execute(StopCommand cmd) { if (s_logger.isInfoEnabled()) { - s_logger.info("Executing resource StopCommand: " + _gson.toJson(cmd)); + s_logger.info("Executing resource StopCommand: " + gson.toJson(cmd)); } // In the stop command, we're passed in the name of the VM as seen by cloudstack, @@ -3872,7 +2358,7 @@ protected Answer execute(StopCommand cmd) { if (cmd.isForceStop()) { success = vmMo.powerOff(); } else { - success = vmMo.safePowerOff(_shutdownWaitMs); + success = vmMo.safePowerOff(shutdownWaitMs); } if (!success) { msg = "Have problem in powering off VM " + cmd.getVmName() + ", let the process continue"; @@ -3906,7 +2392,7 @@ protected Answer execute(StopCommand cmd) { protected Answer execute(RebootRouterCommand cmd) { if (s_logger.isInfoEnabled()) { - s_logger.info("Executing resource RebootRouterCommand: " + _gson.toJson(cmd)); + s_logger.info("Executing resource RebootRouterCommand: " + gson.toJson(cmd)); } RebootAnswer answer = (RebootAnswer) execute((RebootCommand) cmd); @@ -3925,7 +2411,7 @@ protected Answer execute(RebootRouterCommand cmd) { protected Answer execute(RebootCommand cmd) { if (s_logger.isInfoEnabled()) { - s_logger.info("Executing resource RebootCommand: " + _gson.toJson(cmd)); + s_logger.info("Executing resource RebootCommand: " + gson.toJson(cmd)); } boolean toolsInstallerMounted = false; @@ -4015,7 +2501,7 @@ private boolean canSetEnableSetupConfig(VirtualMachineMO vmMo, VirtualMachineTO protected Answer execute(CheckVirtualMachineCommand cmd) { if (s_logger.isInfoEnabled()) { - s_logger.info("Executing resource CheckVirtualMachineCommand: " + _gson.toJson(cmd)); + s_logger.info("Executing resource CheckVirtualMachineCommand: " + gson.toJson(cmd)); } final String vmName = cmd.getVmName(); @@ -4048,7 +2534,7 @@ protected Answer execute(CheckVirtualMachineCommand cmd) { protected Answer execute(PrepareForMigrationCommand cmd) { if (s_logger.isInfoEnabled()) { - s_logger.info("Executing resource PrepareForMigrationCommand: " + _gson.toJson(cmd)); + s_logger.info("Executing resource PrepareForMigrationCommand: " + gson.toJson(cmd)); } VirtualMachineTO vm = cmd.getVirtualMachine(); @@ -4081,12 +2567,12 @@ protected Answer execute(PrepareForMigrationCommand cmd) { prepareNetworkFromNicInfo(new HostMO(getServiceContext(), _morHyperHost), nic, false, cmd.getVirtualMachine().getType()); } - List> secStoreUrlAndIdList = mgr.getSecondaryStorageStoresUrlAndIdList(Long.parseLong(_dcId)); + List> secStoreUrlAndIdList = mgr.getSecondaryStorageStoresUrlAndIdList(Long.parseLong(dcId)); for (Pair secStoreUrlAndId : secStoreUrlAndIdList) { String secStoreUrl = secStoreUrlAndId.first(); Long secStoreId = secStoreUrlAndId.second(); if (secStoreUrl == null) { - String msg = String.format("Secondary storage for dc %s is not ready yet?", _dcId); + String msg = String.format("Secondary storage for dc %s is not ready yet?", dcId); throw new Exception(msg); } @@ -4117,7 +2603,7 @@ protected Answer execute(MigrateVmToPoolCommand cmd) { if (s_logger.isInfoEnabled()) { s_logger.info(String.format("excuting MigrateVmToPoolCommand %s -> %s", cmd.getVmName(), cmd.getDestinationPool())); if (s_logger.isDebugEnabled()) { - s_logger.debug("MigrateVmToPoolCommand: " + _gson.toJson(cmd)); + s_logger.debug("MigrateVmToPoolCommand: " + gson.toJson(cmd)); } } @@ -4198,7 +2684,7 @@ Answer createAnswerForCmd(VirtualMachineMO vmMo, String poolUuid, Command cmd, M VirtualMachineDiskInfo diskInfo = diskInfoBuilder.getDiskInfoByBackingFileBaseName(newPath, poolUuid); newVol.setId(volumeDeviceKey.get(disk.getKey())); newVol.setPath(newPath); - newVol.setChainInfo(_gson.toJson(diskInfo)); + newVol.setChainInfo(gson.toJson(diskInfo)); volumeToList.add(newVol); } return new MigrateVmToPoolAnswer((MigrateVmToPoolCommand) cmd, volumeToList); @@ -4275,7 +2761,7 @@ private VirtualMachineMO getVirtualMachineMO(String vmName, VmwareHypervisorHost protected Answer execute(MigrateCommand cmd) { if (s_logger.isInfoEnabled()) { - s_logger.info("Executing resource MigrateCommand: " + _gson.toJson(cmd)); + s_logger.info("Executing resource MigrateCommand: " + gson.toJson(cmd)); } final String vmName = cmd.getVmName(); @@ -4318,7 +2804,7 @@ protected Answer execute(MigrateCommand cmd) { protected Answer execute(MigrateWithStorageCommand cmd) { if (s_logger.isInfoEnabled()) { - s_logger.info("Executing resource MigrateWithStorageCommand: " + _gson.toJson(cmd)); + s_logger.info("Executing resource MigrateWithStorageCommand: " + gson.toJson(cmd)); } VirtualMachineTO vmTo = cmd.getVirtualMachine(); @@ -4382,7 +2868,8 @@ protected Answer execute(MigrateWithStorageCommand cmd) { s_logger.debug("Preparing spec for volume : " + volume.getName()); morDsAtTarget = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(tgtHyperHost, filerTo.getUuid()); - morDsAtSource = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(srcHyperHost, filerTo.getUuid()); + morDsAtSource = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(srcHyperHost, volume.getPoolUuid()); + if (morDsAtTarget == null) { String msg = "Unable to find the target datastore: " + filerTo.getUuid() + " on target host: " + tgtHyperHost.getHyperHostName() + " to execute MigrateWithStorageCommand"; @@ -4410,9 +2897,8 @@ protected Answer execute(MigrateWithStorageCommand cmd) { s_logger.debug("Mounted datastore " + tgtDsHost + ":/" + tgtDsPath + " on " + _hostName); } } - // If datastore is VMFS and target datastore is not mounted or accessible to source host then fail migration. - if (filerTo.getType().equals(StoragePoolType.VMFS)) { + if (filerTo.getType().equals(StoragePoolType.VMFS) || filerTo.getType().equals(StoragePoolType.PreSetup)) { if (morDsAtSource == null) { s_logger.warn( "If host version is below 5.1, then target VMFS datastore(s) need to manually mounted on source host for a successful live storage migration."); @@ -4431,6 +2917,7 @@ protected Answer execute(MigrateWithStorageCommand cmd) { if (volume.getType() == Volume.Type.ROOT) { relocateSpec.setDatastore(morTgtDatastore); } + diskLocator = new VirtualMachineRelocateSpecDiskLocator(); diskLocator.setDatastore(morDsAtSource); Pair diskInfo = getVirtualDiskInfo(vmMo, appendFileType(volume.getPath(), VMDK_EXTENSION)); @@ -4455,8 +2942,9 @@ protected Answer execute(MigrateWithStorageCommand cmd) { diskLocators.add(diskLocator); } } - - relocateSpec.getDisk().addAll(diskLocators); + if (srcHyperHost.getHyperHostCluster().equals(tgtHyperHost.getHyperHostCluster())) { + relocateSpec.getDisk().addAll(diskLocators); + } // Prepare network at target before migration NicTO[] nics = vmTo.getNics(); @@ -4466,12 +2954,12 @@ protected Answer execute(MigrateWithStorageCommand cmd) { } // Ensure all secondary storage mounted on target host - List> secStoreUrlAndIdList = mgr.getSecondaryStorageStoresUrlAndIdList(Long.parseLong(_dcId)); + List> secStoreUrlAndIdList = mgr.getSecondaryStorageStoresUrlAndIdList(Long.parseLong(dcId)); for (Pair secStoreUrlAndId : secStoreUrlAndIdList) { String secStoreUrl = secStoreUrlAndId.first(); Long secStoreId = secStoreUrlAndId.second(); if (secStoreUrl == null) { - String msg = String.format("Secondary storage for dc %s is not ready yet?", _dcId); + String msg = String.format("Secondary storage for dc %s is not ready yet?", dcId); throw new Exception(msg); } @@ -4535,7 +3023,7 @@ protected Answer execute(MigrateWithStorageCommand cmd) { VirtualMachineDiskInfo diskInfo = diskInfoBuilder.getDiskInfoByBackingFileBaseName(newPath, poolName); newVol.setId(volumeId); newVol.setPath(newPath); - newVol.setChainInfo(_gson.toJson(diskInfo)); + newVol.setChainInfo(gson.toJson(diskInfo)); volumeToList.add(newVol); break; } @@ -4707,7 +3195,7 @@ private Answer execute(MigrateVolumeCommand cmd) { StorageFilerTO poolTo = cmd.getPool(); if (s_logger.isInfoEnabled()) { - s_logger.info("Executing resource MigrateVolumeCommand: " + _gson.toJson(cmd)); + s_logger.info("Executing resource MigrateVolumeCommand: " + gson.toJson(cmd)); } String vmName = cmd.getAttachedVmName(); @@ -4805,7 +3293,7 @@ private Answer execute(MigrateVolumeCommand cmd) { } } VirtualMachineDiskInfoBuilder diskInfoBuilder = vmMo.getDiskInfoBuilder(); - String chainInfo = _gson.toJson(diskInfoBuilder.getDiskInfoByBackingFileBaseName(volumePath, poolTo.getUuid().replace("-", ""))); + String chainInfo = gson.toJson(diskInfoBuilder.getDiskInfoByBackingFileBaseName(volumePath, poolTo.getUuid().replace("-", ""))); MigrateVolumeAnswer answer = new MigrateVolumeAnswer(cmd, true, null, volumePath); answer.setVolumeChainInfo(chainInfo); return answer; @@ -4816,7 +3304,7 @@ private Answer execute(MigrateVolumeCommand cmd) { } } - private Pair getVirtualDiskInfo(VirtualMachineMO vmMo, String srcDiskName) throws Exception { + Pair getVirtualDiskInfo(VirtualMachineMO vmMo, String srcDiskName) throws Exception { Pair deviceInfo = vmMo.getDiskDevice(srcDiskName); if (deviceInfo == null) { throw new Exception("No such disk device: " + srcDiskName); @@ -4850,6 +3338,9 @@ protected Answer execute(CreateStoragePoolCommand cmd) { _storageProcessor.prepareManagedDatastore(context, getHyperHost(context), cmd.getDetails().get(CreateStoragePoolCommand.DATASTORE_NAME), cmd.getDetails().get(CreateStoragePoolCommand.IQN), cmd.getDetails().get(CreateStoragePoolCommand.STORAGE_HOST), Integer.parseInt(cmd.getDetails().get(CreateStoragePoolCommand.STORAGE_PORT))); + + // TODO we might want to integrate content library soon, +// like: contentLibraryService.createContentLibrary(context, cmd.getDetails().get(CreateStoragePoolCommand.DATASTORE_NAME)); } catch (Exception ex) { return new Answer(cmd, false, "Issue creating datastore"); } @@ -4892,34 +3383,78 @@ protected Answer execute(ModifyTargetsCommand cmd) { protected Answer execute(ModifyStoragePoolCommand cmd) { if (s_logger.isInfoEnabled()) { - s_logger.info("Executing resource ModifyStoragePoolCommand: " + _gson.toJson(cmd)); + s_logger.info("Executing resource ModifyStoragePoolCommand: " + gson.toJson(cmd)); } try { VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext()); StorageFilerTO pool = cmd.getPool(); - if (pool.getType() != StoragePoolType.NetworkFilesystem && pool.getType() != StoragePoolType.VMFS) { + if (pool.getType() != StoragePoolType.NetworkFilesystem && pool.getType() != StoragePoolType.VMFS && pool.getType() != StoragePoolType.PreSetup && pool.getType() != StoragePoolType.DatastoreCluster) { throw new Exception("Unsupported storage pool type " + pool.getType()); } ManagedObjectReference morDatastore = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, pool.getUuid()); if (morDatastore == null) { - morDatastore = hyperHost.mountDatastore(pool.getType() == StoragePoolType.VMFS, pool.getHost(), pool.getPort(), pool.getPath(), pool.getUuid().replace("-", "")); + morDatastore = hyperHost.mountDatastore((pool.getType() == StoragePoolType.VMFS || pool.getType() == StoragePoolType.PreSetup), pool.getHost(), pool.getPort(), pool.getPath(), pool.getUuid().replace("-", "")); } assert (morDatastore != null); - DatastoreSummary summary = new DatastoreMO(getServiceContext(), morDatastore).getSummary(); + DatastoreMO dsMo = new DatastoreMO(getServiceContext(), morDatastore); +// in the 9hopefully near) future we will integrate contentLibraryService.createContentLibrary(getServiceContext(), dsMo.getName()); + + long capacity = 0; + long available = 0; + List childDatastoresModifyStoragePoolAnswers = new ArrayList<>(); + if (pool.getType() == StoragePoolType.DatastoreCluster) { + StoragepodMO datastoreClusterMo = new StoragepodMO(getServiceContext(), morDatastore); + StoragePodSummary dsClusterSummary = datastoreClusterMo.getDatastoreClusterSummary(); + capacity = dsClusterSummary.getCapacity(); + available = dsClusterSummary.getFreeSpace(); + + List childDatastoreMors = datastoreClusterMo.getDatastoresInDatastoreCluster(); + for (ManagedObjectReference childDsMor : childDatastoreMors) { + DatastoreMO childDsMo = new DatastoreMO(getServiceContext(), childDsMor); + + Map tInfo = new HashMap<>(); + DatastoreSummary summary = childDsMo.getDatastoreSummary();; + ModifyStoragePoolAnswer answer = new ModifyStoragePoolAnswer(cmd, summary.getCapacity(), summary.getFreeSpace(), tInfo); + StoragePoolInfo poolInfo = answer.getPoolInfo(); + poolInfo.setName(summary.getName()); + String datastoreClusterPath = pool.getPath(); + int pathstartPosition = datastoreClusterPath.lastIndexOf('/'); + String datacenterName = datastoreClusterPath.substring(0, pathstartPosition+1); + String childPath = datacenterName + summary.getName(); + poolInfo.setHostPath(childPath); + String uuid = UUID.nameUUIDFromBytes(((pool.getHost() + childPath)).getBytes()).toString(); + poolInfo.setUuid(uuid); + poolInfo.setLocalPath(cmd.LOCAL_PATH_PREFIX + File.separator + uuid); + + answer.setPoolInfo(poolInfo); + answer.setPoolType(summary.getType()); + answer.setLocalDatastoreName(morDatastore.getValue()); + + childDsMo.setCustomFieldValue(CustomFieldConstants.CLOUD_UUID, uuid); + HypervisorHostHelper.createBaseFolderInDatastore(childDsMo, hyperHost); + + childDatastoresModifyStoragePoolAnswers.add(answer); + } + } else { + HypervisorHostHelper.createBaseFolderInDatastore(dsMo, hyperHost); - long capacity = summary.getCapacity(); - long available = summary.getFreeSpace(); + DatastoreSummary summary = dsMo.getDatastoreSummary(); + capacity = summary.getCapacity(); + available = summary.getFreeSpace(); + } Map tInfo = new HashMap<>(); ModifyStoragePoolAnswer answer = new ModifyStoragePoolAnswer(cmd, capacity, available, tInfo); + answer.setDatastoreClusterChildren(childDatastoresModifyStoragePoolAnswers); - if (cmd.getAdd() && pool.getType() == StoragePoolType.VMFS) { + if (cmd.getAdd() && (pool.getType() == StoragePoolType.VMFS || pool.getType() == StoragePoolType.PreSetup) && pool.getType() != StoragePoolType.DatastoreCluster) { + answer.setPoolType(dsMo.getDatastoreType()); answer.setLocalDatastoreName(morDatastore.getValue()); } @@ -4952,11 +3487,13 @@ private void handleTargets(boolean add, ModifyTargetsCommand.TargetTypeToRemove protected Answer execute(DeleteStoragePoolCommand cmd) { if (s_logger.isInfoEnabled()) { - s_logger.info("Executing resource DeleteStoragePoolCommand: " + _gson.toJson(cmd)); + s_logger.info("Executing resource DeleteStoragePoolCommand: " + gson.toJson(cmd)); } try { + VmwareContext context = getServiceContext(); if (cmd.getRemoveDatastore()) { +// not yet needed as it is not yet used contentLibraryService.deleteContentLibrary(context, cmd.getDetails().get(DeleteStoragePoolCommand.DATASTORE_NAME)); _storageProcessor.handleDatastoreAndVmdkDetach(cmd, cmd.getDetails().get(DeleteStoragePoolCommand.DATASTORE_NAME), cmd.getDetails().get(DeleteStoragePoolCommand.IQN), cmd.getDetails().get(DeleteStoragePoolCommand.STORAGE_HOST), Integer.parseInt(cmd.getDetails().get(DeleteStoragePoolCommand.STORAGE_PORT))); @@ -4969,6 +3506,14 @@ protected Answer execute(DeleteStoragePoolCommand cmd) { // VmwareHypervisorHost hyperHost = this.getHyperHost(getServiceContext()); // hyperHost.unmountDatastore(pool.getUuid()); + VmwareHypervisorHost hyperHost = getHyperHost(context); + StorageFilerTO pool = cmd.getPool(); + ManagedObjectReference morDatastore = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, pool.getUuid()); + if (morDatastore != null) { + DatastoreMO dsMo = new DatastoreMO(context, morDatastore); +// this is not created yet contentLibraryService.deleteContentLibrary(context, dsMo.getName()); + } + return new Answer(cmd, true, "success"); } } catch (Throwable e) { @@ -4995,7 +3540,7 @@ public static String createDatastoreNameFromIqn(String iqn) { protected AttachIsoAnswer execute(AttachIsoCommand cmd) { if (s_logger.isInfoEnabled()) { - s_logger.info("Executing resource AttachIsoCommand: " + _gson.toJson(cmd)); + s_logger.info("Executing resource AttachIsoCommand: " + gson.toJson(cmd)); } try { @@ -5114,7 +3659,7 @@ private static String getSecondaryDatastoreUUID(String storeUrl) { protected Answer execute(ValidateSnapshotCommand cmd) { if (s_logger.isInfoEnabled()) { - s_logger.info("Executing resource ValidateSnapshotCommand: " + _gson.toJson(cmd)); + s_logger.info("Executing resource ValidateSnapshotCommand: " + gson.toJson(cmd)); } // the command is no longer available @@ -5127,7 +3672,7 @@ protected Answer execute(ValidateSnapshotCommand cmd) { protected Answer execute(ManageSnapshotCommand cmd) { if (s_logger.isInfoEnabled()) { - s_logger.info("Executing resource ManageSnapshotCommand: " + _gson.toJson(cmd)); + s_logger.info("Executing resource ManageSnapshotCommand: " + gson.toJson(cmd)); } long snapshotId = cmd.getSnapshotId(); @@ -5159,7 +3704,7 @@ protected Answer execute(ManageSnapshotCommand cmd) { protected Answer execute(BackupSnapshotCommand cmd) { if (s_logger.isInfoEnabled()) { - s_logger.info("Executing resource BackupSnapshotCommand: " + _gson.toJson(cmd)); + s_logger.info("Executing resource BackupSnapshotCommand: " + gson.toJson(cmd)); } try { @@ -5216,7 +3761,7 @@ protected Answer execute(RevertToVMSnapshotCommand cmd) { protected Answer execute(CreateVolumeFromSnapshotCommand cmd) { if (s_logger.isInfoEnabled()) { - s_logger.info("Executing resource CreateVolumeFromSnapshotCommand: " + _gson.toJson(cmd)); + s_logger.info("Executing resource CreateVolumeFromSnapshotCommand: " + gson.toJson(cmd)); } String details = null; @@ -5242,7 +3787,7 @@ protected Answer execute(CreateVolumeFromSnapshotCommand cmd) { protected Answer execute(CreatePrivateTemplateFromVolumeCommand cmd) { if (s_logger.isInfoEnabled()) { - s_logger.info("Executing resource CreatePrivateTemplateFromVolumeCommand: " + _gson.toJson(cmd)); + s_logger.info("Executing resource CreatePrivateTemplateFromVolumeCommand: " + gson.toJson(cmd)); } try { @@ -5269,7 +3814,7 @@ protected Answer execute(final UpgradeSnapshotCommand cmd) { protected Answer execute(CreatePrivateTemplateFromSnapshotCommand cmd) { if (s_logger.isInfoEnabled()) { - s_logger.info("Executing resource CreatePrivateTemplateFromSnapshotCommand: " + _gson.toJson(cmd)); + s_logger.info("Executing resource CreatePrivateTemplateFromSnapshotCommand: " + gson.toJson(cmd)); } try { @@ -5290,7 +3835,7 @@ protected Answer execute(CreatePrivateTemplateFromSnapshotCommand cmd) { protected Answer execute(GetStorageStatsCommand cmd) { if (s_logger.isTraceEnabled()) { - s_logger.trace("Executing resource GetStorageStatsCommand: " + _gson.toJson(cmd)); + s_logger.trace("Executing resource GetStorageStatsCommand: " + gson.toJson(cmd)); } try { @@ -5299,12 +3844,20 @@ protected Answer execute(GetStorageStatsCommand cmd) { ManagedObjectReference morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, cmd.getStorageId()); if (morDs != null) { - DatastoreMO datastoreMo = new DatastoreMO(context, morDs); - DatastoreSummary summary = datastoreMo.getSummary(); - assert (summary != null); + long capacity = 0; + long free = 0; + if (cmd.getPooltype() == StoragePoolType.DatastoreCluster) { + StoragepodMO datastoreClusterMo = new StoragepodMO(getServiceContext(), morDs); + StoragePodSummary summary = datastoreClusterMo.getDatastoreClusterSummary(); + capacity = summary.getCapacity(); + free = summary.getFreeSpace(); + } else { + DatastoreMO datastoreMo = new DatastoreMO(context, morDs); + DatastoreSummary summary = datastoreMo.getDatastoreSummary(); + capacity = summary.getCapacity(); + free = summary.getFreeSpace(); + } - long capacity = summary.getCapacity(); - long free = summary.getFreeSpace(); long used = capacity - free; if (s_logger.isDebugEnabled()) { @@ -5312,7 +3865,7 @@ protected Answer execute(GetStorageStatsCommand cmd) { + ", capacity: " + capacity + ", free: " + free + ", used: " + used); } - if (summary.getCapacity() <= 0) { + if (capacity <= 0) { s_logger.warn("Something is wrong with vSphere NFS datastore, rebooting ESX(ESXi) host should help"); } @@ -5339,7 +3892,7 @@ protected Answer execute(GetStorageStatsCommand cmd) { protected Answer execute(GetVncPortCommand cmd) { if (s_logger.isTraceEnabled()) { - s_logger.trace("Executing resource GetVncPortCommand: " + _gson.toJson(cmd)); + s_logger.trace("Executing resource GetVncPortCommand: " + gson.toJson(cmd)); } try { @@ -5381,7 +3934,7 @@ protected Answer execute(GetVncPortCommand cmd) { protected Answer execute(SetupCommand cmd) { if (s_logger.isInfoEnabled()) { - s_logger.info("Executing resource SetupCommand: " + _gson.toJson(cmd)); + s_logger.info("Executing resource SetupCommand: " + gson.toJson(cmd)); } return new SetupAnswer(cmd, false); @@ -5389,7 +3942,7 @@ protected Answer execute(SetupCommand cmd) { protected Answer execute(MaintainCommand cmd) { if (s_logger.isInfoEnabled()) { - s_logger.info("Executing resource MaintainCommand: " + _gson.toJson(cmd)); + s_logger.info("Executing resource MaintainCommand: " + gson.toJson(cmd)); } return new MaintainAnswer(cmd, "Put host in maintaince"); @@ -5397,7 +3950,7 @@ protected Answer execute(MaintainCommand cmd) { protected Answer execute(PingTestCommand cmd) { if (s_logger.isInfoEnabled()) { - s_logger.info("Executing resource PingTestCommand: " + _gson.toJson(cmd)); + s_logger.info("Executing resource PingTestCommand: " + gson.toJson(cmd)); } String controlIp = cmd.getRouterIp(); @@ -5441,7 +3994,7 @@ protected Answer execute(PingTestCommand cmd) { protected Answer execute(CheckOnHostCommand cmd) { if (s_logger.isInfoEnabled()) { - s_logger.info("Executing resource CheckOnHostCommand: " + _gson.toJson(cmd)); + s_logger.info("Executing resource CheckOnHostCommand: " + gson.toJson(cmd)); } return new CheckOnHostAnswer(cmd, null, "Not Implmeneted"); @@ -5458,7 +4011,7 @@ protected Answer execute(ModifySshKeysCommand cmd) { protected Answer execute(GetVmIpAddressCommand cmd) { if (s_logger.isTraceEnabled()) { - s_logger.trace("Executing resource command GetVmIpAddressCommand: " + _gson.toJson(cmd)); + s_logger.trace("Executing resource command GetVmIpAddressCommand: " + gson.toJson(cmd)); } String details = "Unable to find IP Address of VM. "; @@ -5504,7 +4057,7 @@ protected Answer execute(GetVmIpAddressCommand cmd) { answer = new Answer(cmd, result, details); if (s_logger.isTraceEnabled()) { - s_logger.trace("Returning GetVmIpAddressAnswer: " + _gson.toJson(answer)); + s_logger.trace("Returning GetVmIpAddressAnswer: " + gson.toJson(answer)); } return answer; } @@ -5512,7 +4065,7 @@ protected Answer execute(GetVmIpAddressCommand cmd) { @Override public PrimaryStorageDownloadAnswer execute(PrimaryStorageDownloadCommand cmd) { if (s_logger.isInfoEnabled()) { - s_logger.info("Executing resource PrimaryStorageDownloadCommand: " + _gson.toJson(cmd)); + s_logger.info("Executing resource PrimaryStorageDownloadCommand: " + gson.toJson(cmd)); } try { @@ -5541,7 +4094,7 @@ protected Answer execute(PvlanSetupCommand cmd) { protected Answer execute(UnregisterVMCommand cmd) { if (s_logger.isInfoEnabled()) { - s_logger.info("Executing resource UnregisterVMCommand: " + _gson.toJson(cmd)); + s_logger.info("Executing resource UnregisterVMCommand: " + gson.toJson(cmd)); } VmwareContext context = getServiceContext(); @@ -5591,7 +4144,7 @@ protected Answer execute(UnregisterVMCommand cmd) { * @return */ protected Answer execute(UnregisterNicCommand cmd) { - s_logger.info("Executing resource UnregisterNicCommand: " + _gson.toJson(cmd)); + s_logger.info("Executing resource UnregisterNicCommand: " + gson.toJson(cmd)); if (_guestTrafficInfo == null) { return new Answer(cmd, false, "No Guest Traffic Info found, unable to determine where to clean up"); @@ -5650,7 +4203,7 @@ public void cleanupNetwork(HostMO hostMo, NetworkDetails netDetails) { @Override public CopyVolumeAnswer execute(CopyVolumeCommand cmd) { if (s_logger.isInfoEnabled()) { - s_logger.info("Executing resource CopyVolumeCommand: " + _gson.toJson(cmd)); + s_logger.info("Executing resource CopyVolumeCommand: " + gson.toJson(cmd)); } try { @@ -5834,7 +4387,7 @@ private List initializeLocalStorage() { dsMo.setCustomFieldValue(CustomFieldConstants.CLOUD_UUID, poolUuid); } - DatastoreSummary dsSummary = dsMo.getSummary(); + DatastoreSummary dsSummary = dsMo.getDatastoreSummary(); String address = hostMo.getHostName(); StoragePoolInfo pInfo = new StoragePoolInfo(poolUuid, address, dsMo.getMor().getValue(), "", StoragePoolType.VMFS, dsSummary.getCapacity(), dsSummary.getFreeSpace()); @@ -5843,11 +4396,11 @@ private List initializeLocalStorage() { cmd.setPoolInfo(pInfo); cmd.setGuid(poolUuid); // give storage host the same UUID as the local storage pool itself cmd.setResourceType(Storage.StorageResourceType.STORAGE_POOL); - cmd.setDataCenter(_dcId); + cmd.setDataCenter(dcId); cmd.setPod(_pod); cmd.setCluster(_cluster); - s_logger.info("Add local storage startup command: " + _gson.toJson(cmd)); + s_logger.info("Add local storage startup command: " + gson.toJson(cmd)); storageCmds.add(cmd); } @@ -5891,7 +4444,7 @@ protected void fillHostInfo(StartupRoutingCommand cmd) { cmd.setHostDetails(details); cmd.setName(_url); cmd.setGuid(_guid); - cmd.setDataCenter(_dcId); + cmd.setDataCenter(dcId); cmd.setIqn(getIqn()); cmd.setPod(_pod); cmd.setCluster(_cluster); @@ -5929,7 +4482,7 @@ private void fillHostHardwareInfo(VmwareContext serviceContext, StartupRoutingCo VmwareHypervisorHostResourceSummary summary = hyperHost.getHyperHostResourceSummary(); if (s_logger.isInfoEnabled()) { - s_logger.info("Startup report on host hardware info. " + _gson.toJson(summary)); + s_logger.info("Startup report on host hardware info. " + gson.toJson(summary)); } cmd.setCaps("hvm"); @@ -5954,7 +4507,7 @@ private void fillHostNetworkInfo(VmwareContext serviceContext, StartupRoutingCom } if (s_logger.isInfoEnabled()) { - s_logger.info("Startup report on host network info. " + _gson.toJson(summary)); + s_logger.info("Startup report on host network info. " + gson.toJson(summary)); } cmd.setPrivateIpAddress(summary.getHostIp()); @@ -6061,35 +4614,6 @@ protected OptionValue[] configureVnc(OptionValue[] optionsToMerge, VmwareHypervi } } - private VirtualMachineGuestOsIdentifier translateGuestOsIdentifier(String cpuArchitecture, String guestOs, String cloudGuestOs) { - if (cpuArchitecture == null) { - s_logger.warn("CPU arch is not set, default to i386. guest os: " + guestOs); - cpuArchitecture = "i386"; - } - - if (cloudGuestOs == null) { - s_logger.warn("Guest OS mapping name is not set for guest os: " + guestOs); - } - - VirtualMachineGuestOsIdentifier identifier = null; - try { - if (cloudGuestOs != null) { - identifier = VirtualMachineGuestOsIdentifier.fromValue(cloudGuestOs); - s_logger.debug("Using mapping name : " + identifier.toString()); - } - } catch (IllegalArgumentException e) { - s_logger.warn("Unable to find Guest OS Identifier in VMware for mapping name: " + cloudGuestOs + ". Continuing with defaults."); - } - if (identifier != null) { - return identifier; - } - - if (cpuArchitecture.equalsIgnoreCase("x86_64")) { - return VirtualMachineGuestOsIdentifier.OTHER_GUEST_64; - } - return VirtualMachineGuestOsIdentifier.OTHER_GUEST; - } - private HashMap getHostVmStateReport() throws Exception { VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext()); @@ -6527,7 +5051,7 @@ public boolean configure(String name, Map params) throws Configu _url = (String) params.get("url"); _username = (String) params.get("username"); _password = (String) params.get("password"); - _dcId = (String) params.get("zone"); + dcId = (String) params.get("zone"); _pod = (String) params.get("pod"); _cluster = (String) params.get("cluster"); @@ -6550,6 +5074,8 @@ public boolean configure(String name, Map params) throws Configu CustomFieldsManagerMO cfmMo = new CustomFieldsManagerMO(context, context.getServiceContent().getCustomFieldsManager()); cfmMo.ensureCustomFieldDef("Datastore", CustomFieldConstants.CLOUD_UUID); + cfmMo.ensureCustomFieldDef("StoragePod", CustomFieldConstants.CLOUD_UUID); + if (_publicTrafficInfo != null && _publicTrafficInfo.getVirtualSwitchType() != VirtualSwitchType.StandardVirtualSwitch || _guestTrafficInfo != null && _guestTrafficInfo.getVirtualSwitchType() != VirtualSwitchType.StandardVirtualSwitch) { cfmMo.ensureCustomFieldDef("DistributedVirtualPortgroup", CustomFieldConstants.CLOUD_GC_DVP); @@ -6566,7 +5092,7 @@ public boolean configure(String name, Map params) throws Configu if (_guestTrafficInfo.getVirtualSwitchType() == VirtualSwitchType.NexusDistributedVirtualSwitch || _publicTrafficInfo.getVirtualSwitchType() == VirtualSwitchType.NexusDistributedVirtualSwitch) { - _privateNetworkVSwitchName = mgr.getPrivateVSwitchName(Long.parseLong(_dcId), HypervisorType.VMware); + _privateNetworkVSwitchName = mgr.getPrivateVSwitchName(Long.parseLong(dcId), HypervisorType.VMware); _vsmCredentials = mgr.getNexusVSMCredentialsByClusterId(Long.parseLong(_cluster)); } @@ -6603,16 +5129,16 @@ else if (value != null && value.equalsIgnoreCase("ide")) boolObj = (Boolean) params.get("vm.instancename.flag"); if (boolObj != null && boolObj.booleanValue()) { - _instanceNameFlag = true; + instanceNameFlag = true; } else { - _instanceNameFlag = false; + instanceNameFlag = false; } value = (String) params.get("scripts.timeout"); int timeout = NumbersUtil.parseInt(value, 1440) * 1000; storageNfsVersion = NfsSecondaryStorageResource.retrieveNfsVersionFromParams(params); - _storageProcessor = new VmwareStorageProcessor((VmwareHostService) this, _fullCloneFlag, (VmwareStorageMount) mgr, timeout, this, _shutdownWaitMs, null, + _storageProcessor = new VmwareStorageProcessor((VmwareHostService) this, _fullCloneFlag, (VmwareStorageMount) mgr, timeout, this, shutdownWaitMs, null, storageNfsVersion); storageHandler = new VmwareStorageSubsystemCommandHandler(_storageProcessor, storageNfsVersion); @@ -6769,7 +5295,7 @@ public void setRunLevel(int level) { @Override public Answer execute(DestroyCommand cmd) { if (s_logger.isInfoEnabled()) { - s_logger.info("Executing resource DestroyCommand to evict template from storage pool: " + _gson.toJson(cmd)); + s_logger.info("Executing resource DestroyCommand to evict template from storage pool: " + gson.toJson(cmd)); } try { @@ -7124,7 +5650,7 @@ private UnmanagedInstanceTO getUnmanagedInstance(VmwareHypervisorHost hyperHost, private Answer execute(GetUnmanagedInstancesCommand cmd) { if (s_logger.isInfoEnabled()) { - s_logger.info("Executing resource GetUnmanagedInstancesCommand " + _gson.toJson(cmd)); + s_logger.info("Executing resource GetUnmanagedInstancesCommand " + gson.toJson(cmd)); } VmwareContext context = getServiceContext(); diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareSecondaryStorageContextFactory.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareSecondaryStorageContextFactory.java index 6e19ba67bf50..44d2285ac36c 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareSecondaryStorageContextFactory.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareSecondaryStorageContextFactory.java @@ -41,10 +41,9 @@ public static VmwareContext create(String vCenterAddress, String vCenterUserName assert (vCenterUserName != null); assert (vCenterPassword != null); - String serviceUrl = "https://" + vCenterAddress + "/sdk/vimService"; VmwareClient vimClient = new VmwareClient(vCenterAddress + "-" + s_seq++); vimClient.setVcenterSessionTimeout(s_vCenterSessionTimeout); - vimClient.connect(serviceUrl, vCenterUserName, vCenterPassword); + vimClient.connect(vCenterAddress, vCenterUserName, vCenterPassword); VmwareContext context = new VmwareContext(vimClient, vCenterAddress); assert (context != null); diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareStorageLayoutHelper.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareStorageLayoutHelper.java index 9b2acbc5179e..164082c2a73c 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareStorageLayoutHelper.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareStorageLayoutHelper.java @@ -16,13 +16,19 @@ // under the License. package com.cloud.storage.resource; +import java.util.ArrayList; +import java.util.Arrays; import java.util.List; +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.framework.config.Configurable; import org.apache.log4j.Logger; import com.cloud.hypervisor.vmware.mo.DatacenterMO; import com.cloud.hypervisor.vmware.mo.DatastoreFile; import com.cloud.hypervisor.vmware.mo.DatastoreMO; +import com.cloud.hypervisor.vmware.mo.HypervisorHostHelper; + import com.cloud.utils.Pair; /** @@ -30,32 +36,93 @@ * To provide helper methods to handle storage layout in one place * */ -public class VmwareStorageLayoutHelper { +public class VmwareStorageLayoutHelper implements Configurable { private static final Logger s_logger = Logger.getLogger(VmwareStorageLayoutHelper.class); + static final ConfigKey VsphereLinkedCloneExtensions = new ConfigKey("Hidden", String.class, + "vsphere.linked.clone.extensions", "delta.vmdk,sesparse.vmdk", + "Comma separated list of linked clone disk formats allowed to handle storage in VMware", true); + + public static String[] getVmdkFilePairDatastorePath(DatastoreMO dsMo, String vmName, String vmdkName, VmwareStorageLayoutType layoutType, boolean linkedVmdk) throws Exception { - String[] filePair = new String[2]; + int i = 0; + String[] vSphereLinkedCloneExtensions = VsphereLinkedCloneExtensions.value().trim().split("\\s*,\\s*"); + String[] fileNames; + if (linkedVmdk) + fileNames = new String[vSphereLinkedCloneExtensions.length + 1]; + else + fileNames = new String[2]; + switch (layoutType) { case VMWARE: assert (vmName != null && !vmName.isEmpty()); - filePair[0] = getVmwareDatastorePathFromVmdkFileName(dsMo, vmName, vmdkName + ".vmdk"); + fileNames[i] = getVmwareDatastorePathFromVmdkFileName(dsMo, vmName, vmdkName + ".vmdk"); - if (linkedVmdk) - filePair[1] = getVmwareDatastorePathFromVmdkFileName(dsMo, vmName, vmdkName + "-delta.vmdk"); + if (linkedVmdk) { + for (int j=0 ; j < vSphereLinkedCloneExtensions.length; j++) { + fileNames[++i] = getVmwareDatastorePathFromVmdkFileName(dsMo, vmName, String.format("%s-%s",vmdkName, vSphereLinkedCloneExtensions[j])); + } + } else - filePair[1] = getVmwareDatastorePathFromVmdkFileName(dsMo, vmName, vmdkName + "-flat.vmdk"); - return filePair; + fileNames[i+1] = getVmwareDatastorePathFromVmdkFileName(dsMo, vmName, vmdkName + "-flat.vmdk"); + return fileNames; case CLOUDSTACK_LEGACY: - filePair[0] = getLegacyDatastorePathFromVmdkFileName(dsMo, vmdkName + ".vmdk"); + fileNames[i] = getDatastorePathBaseFolderFromVmdkFileName(dsMo, vmdkName + ".vmdk"); - if (linkedVmdk) - filePair[1] = getLegacyDatastorePathFromVmdkFileName(dsMo, vmdkName + "-delta.vmdk"); - else - filePair[1] = getLegacyDatastorePathFromVmdkFileName(dsMo, vmdkName + "-flat.vmdk"); - return filePair; + if (linkedVmdk) { + for (int j=0 ; j < vSphereLinkedCloneExtensions.length; j++) { + fileNames[++i] = getDatastorePathBaseFolderFromVmdkFileName(dsMo, String.format("%s-%s",vmdkName, vSphereLinkedCloneExtensions[j])); + } + } else + fileNames[i+1] = getDatastorePathBaseFolderFromVmdkFileName(dsMo, vmdkName + "-flat.vmdk"); + return fileNames; + + default: + assert (false); + break; + } + + assert (false); + return null; + } + + public static String[] getVmdkFilePairManagedDatastorePath(DatastoreMO dsMo, String vmName, String vmdkName, VmwareStorageLayoutType layoutType, boolean linkedVmdk) + throws Exception { + + int i = 0; + String[] vSphereLinkedCloneExtensions = VsphereLinkedCloneExtensions.value().trim().split("\\s*,\\s*"); + String[] fileNames; + if (linkedVmdk) + fileNames = new String[vSphereLinkedCloneExtensions.length + 1]; + else + fileNames = new String[2]; + + switch (layoutType) { + case VMWARE: + assert (vmName != null && !vmName.isEmpty()); + fileNames[i] = getVmwareDatastorePathFromVmdkFileName(dsMo, vmName, vmdkName + ".vmdk"); + + if (linkedVmdk) { + for (int j=0 ; j < vSphereLinkedCloneExtensions.length; j++) { + fileNames[++i] = getVmwareDatastorePathFromVmdkFileName(dsMo, vmName, String.format("%s-%s",vmdkName, vSphereLinkedCloneExtensions[j])); + } + } else + fileNames[i+1] = getVmwareDatastorePathFromVmdkFileName(dsMo, vmName, vmdkName + "-flat.vmdk"); + return fileNames; + + case CLOUDSTACK_LEGACY: + fileNames[i] = getDeprecatedLegacyDatastorePathFromVmdkFileName(dsMo, vmdkName + ".vmdk"); + + if (linkedVmdk) { + for (int j=0 ; j < vSphereLinkedCloneExtensions.length; j++) { + fileNames[++i] = getDeprecatedLegacyDatastorePathFromVmdkFileName(dsMo, String.format("%s-%s",vmdkName, vSphereLinkedCloneExtensions[j])); + } + } else + fileNames[i+1] = getDeprecatedLegacyDatastorePathFromVmdkFileName(dsMo, vmdkName + "-flat.vmdk"); + return fileNames; default: assert (false); @@ -121,16 +188,20 @@ public static String syncVolumeToVmDefaultFolder(DatacenterMO dcMo, String vmNam syncVolumeToRootFolder(dcMo, ds, vmdkName, vmName, excludeFolders); } - if (ds.fileExists(vmdkFullCloneModeLegacyPair[1])) { - s_logger.info("sync " + vmdkFullCloneModeLegacyPair[1] + "->" + vmdkFullCloneModePair[1]); + for (int i=1; i" + vmdkFullCloneModePair[i]); - ds.moveDatastoreFile(vmdkFullCloneModeLegacyPair[1], dcMo.getMor(), ds.getMor(), vmdkFullCloneModePair[1], dcMo.getMor(), true); + ds.moveDatastoreFile(vmdkFullCloneModeLegacyPair[i], dcMo.getMor(), ds.getMor(), vmdkFullCloneModePair[i], dcMo.getMor(), true); + } } - if (ds.fileExists(vmdkLinkedCloneModeLegacyPair[1])) { - s_logger.info("sync " + vmdkLinkedCloneModeLegacyPair[1] + "->" + vmdkLinkedCloneModePair[1]); + for (int i=1; i" + vmdkLinkedCloneModePair[i]); - ds.moveDatastoreFile(vmdkLinkedCloneModeLegacyPair[1], dcMo.getMor(), ds.getMor(), vmdkLinkedCloneModePair[1], dcMo.getMor(), true); + ds.moveDatastoreFile(vmdkLinkedCloneModeLegacyPair[i], dcMo.getMor(), ds.getMor(), vmdkLinkedCloneModePair[i], dcMo.getMor(), true); + } } if (ds.fileExists(vmdkLinkedCloneModeLegacyPair[0])) { @@ -157,24 +228,22 @@ public static void syncVolumeToRootFolder(DatacenterMO dcMo, DatastoreMO ds, Str } DatastoreFile srcDsFile = new DatastoreFile(fileDsFullPath); - String companionFilePath = srcDsFile.getCompanionPath(vmdkName + "-flat.vmdk"); - if (ds.fileExists(companionFilePath)) { - String targetPath = getLegacyDatastorePathFromVmdkFileName(ds, vmdkName + "-flat.vmdk"); - - s_logger.info("Fixup folder-synchronization. move " + companionFilePath + " -> " + targetPath); - ds.moveDatastoreFile(companionFilePath, dcMo.getMor(), ds.getMor(), targetPath, dcMo.getMor(), true); - } - companionFilePath = srcDsFile.getCompanionPath(vmdkName + "-delta.vmdk"); - if (ds.fileExists(companionFilePath)) { - String targetPath = getLegacyDatastorePathFromVmdkFileName(ds, vmdkName + "-delta.vmdk"); + List vSphereFileExtensions = new ArrayList<>(Arrays.asList(VsphereLinkedCloneExtensions.value().trim().split("\\s*,\\s*"))); + // add flat file format to the above list + vSphereFileExtensions.add("flat.vmdk"); + for (String linkedCloneExtension : vSphereFileExtensions) { + String companionFilePath = srcDsFile.getCompanionPath(String.format("%s-%s",vmdkName, linkedCloneExtension)); + if (ds.fileExists(companionFilePath)) { + String targetPath = getDatastorePathBaseFolderFromVmdkFileName(ds, String.format("%s-%s",vmdkName, linkedCloneExtension)); - s_logger.info("Fixup folder-synchronization. move " + companionFilePath + " -> " + targetPath); - ds.moveDatastoreFile(companionFilePath, dcMo.getMor(), ds.getMor(), targetPath, dcMo.getMor(), true); + s_logger.info("Fixup folder-synchronization. move " + companionFilePath + " -> " + targetPath); + ds.moveDatastoreFile(companionFilePath, dcMo.getMor(), ds.getMor(), targetPath, dcMo.getMor(), true); + } } // move the identity VMDK file the last - String targetPath = getLegacyDatastorePathFromVmdkFileName(ds, vmdkName + ".vmdk"); + String targetPath = getDatastorePathBaseFolderFromVmdkFileName(ds, vmdkName + ".vmdk"); s_logger.info("Fixup folder-synchronization. move " + fileDsFullPath + " -> " + targetPath); ds.moveDatastoreFile(fileDsFullPath, dcMo.getMor(), ds.getMor(), targetPath, dcMo.getMor(), true); @@ -199,18 +268,16 @@ public static void moveVolumeToRootFolder(DatacenterMO dcMo, List detach s_logger.info("Move " + file.getPath() + " -> " + targetFile.getPath()); dsMo.moveDatastoreFile(file.getPath(), dcMo.getMor(), dsMo.getMor(), targetFile.getPath(), dcMo.getMor(), true); - String pairSrcFilePath = file.getCompanionPath(file.getFileBaseName() + "-flat.vmdk"); - String pairTargetFilePath = targetFile.getCompanionPath(file.getFileBaseName() + "-flat.vmdk"); - if (dsMo.fileExists(pairSrcFilePath)) { - s_logger.info("Move " + pairSrcFilePath + " -> " + pairTargetFilePath); - dsMo.moveDatastoreFile(pairSrcFilePath, dcMo.getMor(), dsMo.getMor(), pairTargetFilePath, dcMo.getMor(), true); - } - - pairSrcFilePath = file.getCompanionPath(file.getFileBaseName() + "-delta.vmdk"); - pairTargetFilePath = targetFile.getCompanionPath(file.getFileBaseName() + "-delta.vmdk"); - if (dsMo.fileExists(pairSrcFilePath)) { - s_logger.info("Move " + pairSrcFilePath + " -> " + pairTargetFilePath); - dsMo.moveDatastoreFile(pairSrcFilePath, dcMo.getMor(), dsMo.getMor(), pairTargetFilePath, dcMo.getMor(), true); + List vSphereFileExtensions = new ArrayList<>(Arrays.asList(VsphereLinkedCloneExtensions.value().trim().split("\\s*,\\s*"))); + // add flat file format to the above list + vSphereFileExtensions.add("flat.vmdk"); + for (String linkedCloneExtension : vSphereFileExtensions) { + String pairSrcFilePath = file.getCompanionPath(String.format("%s-%s", file.getFileBaseName(), linkedCloneExtension)); + String pairTargetFilePath = targetFile.getCompanionPath(String.format("%s-%s", file.getFileBaseName(), linkedCloneExtension)); + if (dsMo.fileExists(pairSrcFilePath)) { + s_logger.info("Move " + pairSrcFilePath + " -> " + pairTargetFilePath); + dsMo.moveDatastoreFile(pairSrcFilePath, dcMo.getMor(), dsMo.getMor(), pairTargetFilePath, dcMo.getMor(), true); + } } } } else { @@ -287,32 +354,49 @@ public static void deleteVolumeVmdkFiles(DatastoreMO dsMo, String volumeName, Da s_logger.warn("Unable to locate VMDK file: " + fileName); } - fileName = volumeName + "-flat.vmdk"; - fileFullPath = getLegacyDatastorePathFromVmdkFileName(dsMo, fileName); - if (!dsMo.fileExists(fileFullPath)) - fileFullPath = dsMo.searchFileInSubFolders(fileName, false, excludeFolders); - if (fileFullPath != null) { - dsMo.deleteFile(fileFullPath, dcMo.getMor(), true, excludeFolders); - } else { - s_logger.warn("Unable to locate VMDK file: " + fileName); + List vSphereFileExtensions = new ArrayList<>(Arrays.asList(VsphereLinkedCloneExtensions.value().trim().split("\\s*,\\s*"))); + vSphereFileExtensions.add("flat.vmdk"); + for (String linkedCloneExtension : vSphereFileExtensions) { + fileFullPath = getLegacyDatastorePathFromVmdkFileName(dsMo, String.format("%s-%s", volumeName, linkedCloneExtension)); + if (!dsMo.fileExists(fileFullPath)) + fileFullPath = dsMo.searchFileInSubFolders(String.format("%s-%s", volumeName, linkedCloneExtension), false, excludeFolders); + if (fileFullPath != null) { + dsMo.deleteFile(fileFullPath, dcMo.getMor(), true, excludeFolders); + } else { + s_logger.warn("Unable to locate VMDK file: " + String.format("%s-%s", volumeName, linkedCloneExtension)); + } } + } - fileName = volumeName + "-delta.vmdk"; - fileFullPath = getLegacyDatastorePathFromVmdkFileName(dsMo, fileName); - if (!dsMo.fileExists(fileFullPath)) - fileFullPath = dsMo.searchFileInSubFolders(fileName, false, excludeFolders); - if (fileFullPath != null) { - dsMo.deleteFile(fileFullPath, dcMo.getMor(), true, excludeFolders); - } else { - s_logger.warn("Unable to locate VMDK file: " + fileName); + //This method call is for the volumes which actually exists + public static String getLegacyDatastorePathFromVmdkFileName(DatastoreMO dsMo, String vmdkFileName) throws Exception { + String vmdkDatastorePath = String.format("[%s] %s/%s", dsMo.getName(), HypervisorHostHelper.VSPHERE_DATASTORE_BASE_FOLDER, vmdkFileName); + if (!dsMo.fileExists(vmdkDatastorePath)) { + vmdkDatastorePath = getDeprecatedLegacyDatastorePathFromVmdkFileName(dsMo, vmdkFileName); } + return vmdkDatastorePath; } - public static String getLegacyDatastorePathFromVmdkFileName(DatastoreMO dsMo, String vmdkFileName) throws Exception { + //This method call is for the volumes to be created or can also be for volumes already exists + public static String getDatastorePathBaseFolderFromVmdkFileName(DatastoreMO dsMo, String vmdkFileName) throws Exception { + return String.format("[%s] %s/%s", dsMo.getName(), HypervisorHostHelper.VSPHERE_DATASTORE_BASE_FOLDER, vmdkFileName); + } + + public static String getDeprecatedLegacyDatastorePathFromVmdkFileName(DatastoreMO dsMo, String vmdkFileName) throws Exception { return String.format("[%s] %s", dsMo.getName(), vmdkFileName); } public static String getVmwareDatastorePathFromVmdkFileName(DatastoreMO dsMo, String vmName, String vmdkFileName) throws Exception { return String.format("[%s] %s/%s", dsMo.getName(), vmName, vmdkFileName); } + + @Override + public String getConfigComponentName() { + return VmwareStorageLayoutHelper.class.getSimpleName(); + } + + @Override + public ConfigKey[] getConfigKeys() { + return new ConfigKey[] {VsphereLinkedCloneExtensions}; + } } diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareStorageProcessor.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareStorageProcessor.java index 796db94f0ed5..fad4600193a7 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareStorageProcessor.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareStorageProcessor.java @@ -20,7 +20,6 @@ import java.io.File; import java.io.FileOutputStream; import java.io.OutputStreamWriter; -import java.io.UnsupportedEncodingException; import java.net.URI; import java.nio.charset.Charset; import java.rmi.RemoteException; @@ -37,6 +36,7 @@ import org.apache.cloudstack.agent.directdownload.DirectDownloadCommand; import org.apache.cloudstack.storage.command.AttachAnswer; import org.apache.cloudstack.storage.command.AttachCommand; +import org.apache.cloudstack.storage.command.CheckDataStoreStoragePolicyComplainceCommand; import org.apache.cloudstack.storage.command.CopyCmdAnswer; import org.apache.cloudstack.storage.command.CopyCommand; import org.apache.cloudstack.storage.command.CreateObjectAnswer; @@ -64,6 +64,9 @@ import com.cloud.agent.api.to.DataTO; import com.cloud.agent.api.to.DiskTO; import com.cloud.agent.api.to.NfsTO; +import com.cloud.hypervisor.vmware.VmwareResourceException; +import com.cloud.hypervisor.vmware.manager.ContentLibraryService; +import com.cloud.hypervisor.vmware.manager.ContentLibraryServiceImpl; import com.cloud.hypervisor.vmware.manager.VmwareHostService; import com.cloud.hypervisor.vmware.manager.VmwareManager; import com.cloud.hypervisor.vmware.manager.VmwareStorageMount; @@ -79,6 +82,7 @@ import com.cloud.hypervisor.vmware.mo.HypervisorHostHelper; import com.cloud.hypervisor.vmware.mo.NetworkDetails; import com.cloud.hypervisor.vmware.mo.VirtualMachineMO; +import com.cloud.hypervisor.vmware.mo.VirtualStorageObjectManagerMO; import com.cloud.hypervisor.vmware.mo.VmwareHypervisorHost; import com.cloud.hypervisor.vmware.resource.VmwareResource; import com.cloud.hypervisor.vmware.util.VmwareContext; @@ -98,7 +102,9 @@ import com.cloud.vm.VmDetailConstants; import com.google.common.base.Strings; import com.google.gson.Gson; +import com.vmware.vim25.BaseConfigInfoDiskFileBackingInfo; import com.vmware.vim25.DatastoreHostMount; +import com.vmware.vim25.DatastoreSummary; import com.vmware.vim25.HostHostBusAdapter; import com.vmware.vim25.HostInternetScsiHba; import com.vmware.vim25.HostInternetScsiHbaAuthenticationProperties; @@ -116,11 +122,13 @@ import com.vmware.vim25.HostUnresolvedVmfsVolume; import com.vmware.vim25.InvalidStateFaultMsg; import com.vmware.vim25.ManagedObjectReference; +import com.vmware.vim25.VStorageObject; import com.vmware.vim25.VirtualDeviceBackingInfo; import com.vmware.vim25.VirtualDeviceConfigSpec; import com.vmware.vim25.VirtualDeviceConfigSpecOperation; import com.vmware.vim25.VirtualDisk; import com.vmware.vim25.VirtualDiskFlatVer2BackingInfo; +import com.vmware.vim25.VirtualDiskType; import com.vmware.vim25.VirtualMachineConfigSpec; import com.vmware.vim25.VmConfigInfo; import com.vmware.vim25.VmfsDatastoreExpandSpec; @@ -146,6 +154,7 @@ public String getName() { private static final int DEFAULT_NFS_PORT = 2049; private static final int SECONDS_TO_WAIT_FOR_DATASTORE = 120; + private final ContentLibraryService contentLibraryService = new ContentLibraryServiceImpl(); private final VmwareHostService hostService; private boolean _fullCloneFlag; private final VmwareStorageMount mountService; @@ -477,11 +486,25 @@ private String getOVFFilePath(String srcOVAFileName) { return null; } - private Pair copyTemplateFromSecondaryToPrimary(VmwareHypervisorHost hyperHost, DatastoreMO datastoreMo, String secondaryStorageUrl, - String templatePathAtSecondaryStorage, String templateName, String templateUuid, - boolean createSnapshot, Integer nfsVersion) throws Exception { - s_logger.info("Executing copyTemplateFromSecondaryToPrimary. secondaryStorage: " + secondaryStorageUrl + ", templatePathAtSecondaryStorage: " + - templatePathAtSecondaryStorage + ", templateName: " + templateName); + private String getOVFFile(String srcOVAFileName) { + File file = new File(srcOVAFileName); + + String[] files = _storage.listFiles(file.getParent()); + if (files != null) { + for (String fileName : files) { + if (fileName.toLowerCase().endsWith(".ovf")) { + File ovfFile = new File(fileName); + return ovfFile.getName(); + } + } + } + return null; + } + + private Pair copyTemplateFromSecondaryToPrimary(VmwareHypervisorHost hyperHost, DatastoreMO datastoreMo, String secondaryStorageUrl, String templatePathAtSecondaryStorage, String templateName, String templateUuid, + boolean createSnapshot, Integer nfsVersion, boolean deployAsIs) throws Exception { + s_logger.info(String.format("Executing copyTemplateFromSecondaryToPrimary. secondaryStorage: %s, templatePathAtSecondaryStorage: %s, templateName: %s, deployAsIs: %s", + secondaryStorageUrl, templatePathAtSecondaryStorage, templateName, deployAsIs)); String secondaryMountPoint = mountService.getMountPoint(secondaryStorageUrl, nfsVersion); s_logger.info("Secondary storage mount point: " + secondaryMountPoint); @@ -489,7 +512,7 @@ private Pair copyTemplateFromSecondaryToPrimary(VmwareHy String srcOVAFileName = VmwareStorageLayoutHelper.getTemplateOnSecStorageFilePath(secondaryMountPoint, templatePathAtSecondaryStorage, templateName, ImageFormat.OVA.getFileExtension()); - + // FR37 consider extension: ova or ovf? String srcFileName = getOVFFilePath(srcOVAFileName); if (srcFileName == null) { Script command = new Script("tar", 0, s_logger); @@ -512,11 +535,14 @@ private Pair copyTemplateFromSecondaryToPrimary(VmwareHy throw new Exception(msg); } - String vmName = templateUuid; - hyperHost.importVmFromOVF(srcFileName, vmName, datastoreMo, "thin"); - - VirtualMachineMO vmMo = hyperHost.findVmOnHyperHost(vmName); + VirtualMachineMO vmMo; VmConfigInfo vAppConfig; + + if (s_logger.isTraceEnabled()) { + s_logger.trace(String.format("deploying new style == %b", deployAsIs)); + } + hyperHost.importVmFromOVF(srcFileName, templateUuid, datastoreMo, "thin", !deployAsIs); + vmMo = hyperHost.findVmOnHyperHost(templateUuid); if (vmMo == null) { String msg = "Failed to import OVA template. secondaryStorage: " + secondaryStorageUrl + ", templatePathAtSecondaryStorage: " + templatePathAtSecondaryStorage + @@ -558,9 +584,27 @@ private Pair copyTemplateFromSecondaryToPrimary(VmwareHy return new Pair<>(vmMo, virtualSize); } + private void deployTemplateToContentLibrary(VmwareHypervisorHost hyperHost, DatastoreMO datastoreMo, String secondaryStorageUrl, String templatePathAtSecondaryStorage, + String templateUuid, String srcOVAFileName, String srcFileName) throws Exception { + String storeName = getSecondaryDatastoreUUID(secondaryStorageUrl); + ManagedObjectReference morSecDatastore = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, storeName); + if (morSecDatastore == null) { + morSecDatastore = prepareSecondaryDatastoreOnHost(secondaryStorageUrl); + } + DatastoreMO secDsMo = new DatastoreMO(datastoreMo.getContext(), morSecDatastore); + DatastoreSummary secDatastoresummary = secDsMo.getDatastoreSummary(); + + String ovfFile = getOVFFile(srcOVAFileName); + boolean importResult = contentLibraryService.importOvf(datastoreMo.getContext(), secDatastoresummary.getUrl() + templatePathAtSecondaryStorage, ovfFile, datastoreMo.getName(), templateUuid); + if (!importResult) { + s_logger.warn("Failed to import ovf into the content library: " + srcFileName); + } + } + @Override public Answer copyTemplateToPrimaryStorage(CopyCommand cmd) { DataTO srcData = cmd.getSrcTO(); + // FR37 TODO find where TO is created and make sure deployAsIs is set correctly TemplateObjectTO template = (TemplateObjectTO)srcData; DataStoreTO srcStore = srcData.getDataStore(); @@ -571,7 +615,6 @@ public Answer copyTemplateToPrimaryStorage(CopyCommand cmd) { NfsTO nfsImageStore = (NfsTO)srcStore; DataTO destData = cmd.getDestTO(); DataStoreTO destStore = destData.getDataStore(); - DataStoreTO primaryStore = destStore; String secondaryStorageUrl = nfsImageStore.getUrl(); @@ -628,7 +671,7 @@ public Answer copyTemplateToPrimaryStorage(CopyCommand cmd) { DatastoreMO dsMo = null; try { - String storageUuid = managed ? managedStoragePoolName : primaryStore.getUuid(); + String storageUuid = managed ? managedStoragePoolName : destStore.getUuid(); String templateUuidName = deriveTemplateUuidOnHost(hyperHost, storageUuid, templateInfo.second()); DatacenterMO dcMo = new DatacenterMO(context, hyperHost.getHyperHostDatacenter()); VirtualMachineMO templateMo = VmwareHelper.pickOneVmOnRunningHost(dcMo.findVmByNameAndLabel(templateUuidName), true); @@ -655,25 +698,27 @@ public Answer copyTemplateToPrimaryStorage(CopyCommand cmd) { if (managed) { vmInfo = copyTemplateFromSecondaryToPrimary(hyperHost, dsMo, secondaryStorageUrl, templateInfo.first(), templateInfo.second(), - managedStoragePoolRootVolumeName, false, _nfsVersion); + managedStoragePoolRootVolumeName, false, _nfsVersion, template.isDeployAsIs()); VirtualMachineMO vmMo = vmInfo.first(); vmMo.unregisterVm(); - String[] vmwareLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, managedStoragePoolRootVolumeName, + String[] vmwareLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairManagedDatastorePath(dsMo, managedStoragePoolRootVolumeName, managedStoragePoolRootVolumeName, VmwareStorageLayoutType.VMWARE, false); - String[] legacyCloudStackLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, null, + String[] legacyCloudStackLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairManagedDatastorePath(dsMo, null, managedStoragePoolRootVolumeName, VmwareStorageLayoutType.CLOUDSTACK_LEGACY, false); dsMo.moveDatastoreFile(vmwareLayoutFilePair[0], dcMo.getMor(), dsMo.getMor(), legacyCloudStackLayoutFilePair[0], dcMo.getMor(), true); - dsMo.moveDatastoreFile(vmwareLayoutFilePair[1], dcMo.getMor(), dsMo.getMor(), legacyCloudStackLayoutFilePair[1], dcMo.getMor(), true); + for (int i=1; i template.getSize() ? true : _fullCloneFlag; - } - if (!_fullCloneFlag) { - createVMLinkedClone(vmTemplate, dcMo, vmdkName, morDatastore, morPool); + ManagedObjectReference morPool = hyperHost.getHyperHostOwnerResourcePool(); + vmMo = hyperHost.findVmOnHyperHost(template.getPath()); + createLinkedOrFullClone(template, volume, dcMo, vmMo, morDatastore, dsMo, cloneVMName, morPool); + // At this point vmMo points to the cloned VM + // TODO: should we check if vmMo has no vmdks i.e. a template with iso only? + vmMo = dcMo.findVm(cloneVMName); + vmdkFileBaseName = vmMo.getVmdkFileBaseNames().get(0); } else { - createVMFullClone(vmTemplate, dcMo, dsMo, vmdkName, morDatastore, morPool); - } - - vmMo = new ClusterMO(context, morCluster).findVmOnHyperHost(vmdkName); - assert (vmMo != null); - - vmdkFileBaseName = vmMo.getVmdkFileBaseNames().get(0); - s_logger.info("Move volume out of volume-wrapper VM " + vmdkFileBaseName); - String[] vmwareLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, vmdkName, vmdkFileBaseName, VmwareStorageLayoutType.VMWARE, !_fullCloneFlag); - String[] legacyCloudStackLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, vmdkName, vmdkFileBaseName, VmwareStorageLayoutType.CLOUDSTACK_LEGACY, !_fullCloneFlag); - - dsMo.moveDatastoreFile(vmwareLayoutFilePair[0], dcMo.getMor(), dsMo.getMor(), legacyCloudStackLayoutFilePair[0], dcMo.getMor(), true); - dsMo.moveDatastoreFile(vmwareLayoutFilePair[1], dcMo.getMor(), dsMo.getMor(), legacyCloudStackLayoutFilePair[1], dcMo.getMor(), true); - - s_logger.info("detach disks from volume-wrapper VM " + vmdkName); - vmMo.detachAllDisks(); - - s_logger.info("destroy volume-wrapper VM " + vmdkName); - vmMo.destroy(); - - String srcFile = dsMo.getDatastorePath(vmdkName, true); - - dsMo.deleteFile(srcFile, dcMo.getMor(), true, searchExcludedFolders); + VirtualMachineMO vmTemplate = VmwareHelper.pickOneVmOnRunningHost(dcMo.findVmByNameAndLabel(templatePath), true); + if (vmTemplate == null) { + String msg = String.format("Template host in vSphere is not in connected state, %s for %s", REQUEST_TEMPLATE_RELOAD, templatePath); + s_logger.warn(msg); + return new CopyCmdAnswer(msg); + } - if (dsMo.folderExists(String.format("[%s]", dsMo.getName()), vmdkName)) { - dsMo.deleteFolder(srcFile, dcMo.getMor()); + vmdkFileBaseName = cloneAndGetVmdkName(template, volume, searchExcludedFolders, context, hyperHost, dcMo, morDatastore, dsMo, cloneVMName, vmTemplate); } } - // restoreVM - move the new ROOT disk into corresponding VM folder - VirtualMachineMO restoreVmMo = dcMo.findVm(volume.getVmName()); - if (restoreVmMo != null) { - String vmNameInVcenter = restoreVmMo.getName(); // VM folder name in datastore will be VM's name in vCenter. - if (dsMo.folderExists(String.format("[%s]", dsMo.getName()), vmNameInVcenter)) { - VmwareStorageLayoutHelper.syncVolumeToVmDefaultFolder(dcMo, vmNameInVcenter, dsMo, vmdkFileBaseName, searchExcludedFolders); - } + if (!template.isDeployAsIs()) { // will have to be reconsiled in case of deployAsIs + restoreVmMo(volume, searchExcludedFolders, dcMo, dsMo, vmdkFileBaseName); } VolumeObjectTO newVol = new VolumeObjectTO(); newVol.setPath(vmdkFileBaseName); if (template.getSize() != null){ newVol.setSize(template.getSize()); - } - else { + } else { newVol.setSize(volume.getSize()); } return new CopyCmdAnswer(newVol); @@ -891,6 +915,67 @@ public Answer cloneVolumeFromBaseTemplate(CopyCommand cmd) { } } + private void createLinkedOrFullClone(TemplateObjectTO template, VolumeObjectTO volume, DatacenterMO dcMo, VirtualMachineMO vmMo, ManagedObjectReference morDatastore, + DatastoreMO dsMo, String cloneName, ManagedObjectReference morPool) throws Exception { + if (template.getSize() != null) { + _fullCloneFlag = volume.getSize() > template.getSize() || _fullCloneFlag; + } + if (!_fullCloneFlag) { + createVMLinkedClone(vmMo, dcMo, cloneName, morDatastore, morPool); + } else { + createVMFullClone(vmMo, dcMo, dsMo, cloneName, morDatastore, morPool); + } + } + + private void restoreVmMo(VolumeObjectTO volume, String searchExcludedFolders, DatacenterMO dcMo, DatastoreMO dsMo, String vmdkFileBaseName) throws Exception { + // restoreVM - move the new ROOT disk into corresponding VM folder + // FR37 TODO is this needed? + VirtualMachineMO restoreVmMo = dcMo.findVm(volume.getVmName()); + if (restoreVmMo != null) { + String vmNameInVcenter = restoreVmMo.getName(); // VM folder name in datastore will be VM's name in vCenter. + if (dsMo.folderExists(String.format("[%s]", dsMo.getName()), vmNameInVcenter)) { + VmwareStorageLayoutHelper.syncVolumeToVmDefaultFolder(dcMo, vmNameInVcenter, dsMo, vmdkFileBaseName, searchExcludedFolders); + } + } + } + + private String cloneAndGetVmdkName(TemplateObjectTO template, VolumeObjectTO volume, String searchExcludedFolders, VmwareContext context, VmwareHypervisorHost hyperHost, + DatacenterMO dcMo, ManagedObjectReference morDatastore, DatastoreMO dsMo, String cloneName, VirtualMachineMO vmTemplate) throws Exception { + VirtualMachineMO vmMo; + String vmdkFileBaseName; + ManagedObjectReference morPool = hyperHost.getHyperHostOwnerResourcePool(); + ManagedObjectReference morCluster = hyperHost.getHyperHostCluster(); + createLinkedOrFullClone(template, volume, dcMo, vmTemplate, morDatastore, dsMo, cloneName, morPool); + + vmMo = new ClusterMO(context, morCluster).findVmOnHyperHost(cloneName); + assert (vmMo != null); + + vmdkFileBaseName = vmMo.getVmdkFileBaseNames().get(0); + s_logger.info("Move volume out of volume-wrapper VM " + vmdkFileBaseName); + String[] vmwareLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, cloneName, vmdkFileBaseName, VmwareStorageLayoutType.VMWARE, !_fullCloneFlag); + String[] legacyCloudStackLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, cloneName, vmdkFileBaseName, VmwareStorageLayoutType.CLOUDSTACK_LEGACY, !_fullCloneFlag); + + dsMo.moveDatastoreFile(vmwareLayoutFilePair[0], dcMo.getMor(), dsMo.getMor(), legacyCloudStackLayoutFilePair[0], dcMo.getMor(), true); + for (int i=1; i copyVolumeFromSecStorage(VmwareHypervisorHost hyperHost, String srcVolumePath, DatastoreMO dsMo, String secStorageUrl, long wait, Integer nfsVersion) throws Exception { String volumeFolder; @@ -1063,7 +1148,7 @@ private void postCreatePrivateTemplate(String installFullPath, long templateId, // TODO a bit ugly here BufferedWriter out = null; try { - out = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(installFullPath + "/template.properties"),"UTF-8")); + out = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(installFullPath + "/template.properties"), Charset.defaultCharset())); out.write("filename=" + templateName + ".ova"); out.newLine(); out.write("description="); @@ -1242,7 +1327,7 @@ private void writeMetaOvaForTemplate(String installFullPath, String ovfFilename, // TODO a bit ugly here BufferedWriter out = null; try { - out = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(installFullPath + "/" + templateName + ".ova.meta"),"UTF-8")); + out = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(installFullPath + "/" + templateName + ".ova.meta"), Charset.defaultCharset())); out.write("ova.filename=" + templateName + ".ova"); out.newLine(); out.write("version=1.0"); @@ -2089,11 +2174,7 @@ private boolean expandVirtualDisk(VirtualMachineMO vmMo, String datastoreVolumeP private static String getSecondaryDatastoreUUID(String storeUrl) { String uuid = null; - try{ - uuid=UUID.nameUUIDFromBytes(storeUrl.getBytes("UTF-8")).toString(); - }catch(UnsupportedEncodingException e){ - s_logger.warn("Failed to create UUID from string " + storeUrl + ". Bad storeUrl or UTF-8 encoding error." ); - } + uuid=UUID.nameUUIDFromBytes(storeUrl.getBytes(Charset.defaultCharset())).toString(); return uuid; } @@ -2227,37 +2308,50 @@ public Answer createVolume(CreateObjectCommand cmd) { String volumeUuid = UUID.randomUUID().toString().replace("-", ""); String volumeDatastorePath = dsMo.getDatastorePath(volumeUuid + ".vmdk"); - String dummyVmName = hostService.getWorkerName(context, cmd, 0); - try { - s_logger.info("Create worker VM " + dummyVmName); - vmMo = HypervisorHostHelper.createWorkerVM(hyperHost, dsMo, dummyVmName); - if (vmMo == null) { - throw new Exception("Unable to create a dummy VM for volume creation"); - } - synchronized (this) { - try { - vmMo.createDisk(volumeDatastorePath, (int)(volume.getSize() / (1024L * 1024L)), morDatastore, vmMo.getScsiDeviceControllerKey()); - vmMo.detachDisk(volumeDatastorePath, false); + VirtualStorageObjectManagerMO vStorageObjectManagerMO = new VirtualStorageObjectManagerMO(context); + VStorageObject virtualDisk = vStorageObjectManagerMO.createDisk(morDatastore, VirtualDiskType.THIN, volume.getSize(), volumeDatastorePath, volumeUuid); + VolumeObjectTO newVol = new VolumeObjectTO(); + DatastoreFile file = new DatastoreFile(((BaseConfigInfoDiskFileBackingInfo)virtualDisk.getConfig().getBacking()).getFilePath()); + newVol.setPath(file.getFileBaseName()); + newVol.setSize(volume.getSize()); + return new CreateObjectAnswer(newVol); + + /* + * // This is old code which uses workervm to create disks + * String dummyVmName = hostService.getWorkerName(context, cmd, 0); + try { + s_logger.info("Create worker VM " + dummyVmName); + vmMo = HypervisorHostHelper.createWorkerVM(hyperHost, dsMo, dummyVmName); + if (vmMo == null) { + throw new Exception("Unable to create a dummy VM for volume creation"); } - catch (Exception e) { - s_logger.error("Deleting file " + volumeDatastorePath + " due to error: " + e.getMessage()); - VmwareStorageLayoutHelper.deleteVolumeVmdkFiles(dsMo, volumeUuid, dcMo, VmwareManager.s_vmwareSearchExcludeFolder.value()); - throw new CloudRuntimeException("Unable to create volume due to: " + e.getMessage()); + + synchronized (this) { + try { + vmMo.createDisk(volumeDatastorePath, (int)(volume.getSize() / (1024L * 1024L)), morDatastore, vmMo.getScsiDeviceControllerKey()); + vmMo.detachDisk(volumeDatastorePath, false); + } + catch (Exception e) { + s_logger.error("Deleting file " + volumeDatastorePath + " due to error: " + e.getMessage()); + VmwareStorageLayoutHelper.deleteVolumeVmdkFiles(dsMo, volumeUuid, dcMo, VmwareManager.s_vmwareSearchExcludeFolder.value()); + throw new CloudRuntimeException("Unable to create volume due to: " + e.getMessage()); + } } - } - VolumeObjectTO newVol = new VolumeObjectTO(); - newVol.setPath(volumeUuid); - newVol.setSize(volume.getSize()); - return new CreateObjectAnswer(newVol); - } finally { - s_logger.info("Destroy dummy VM after volume creation"); - if (vmMo != null) { - vmMo.detachAllDisks(); - vmMo.destroy(); + VolumeObjectTO newVol = new VolumeObjectTO(); + newVol.setPath(volumeUuid); + newVol.setSize(volume.getSize()); + return new CreateObjectAnswer(newVol); + } finally { + s_logger.info("Destroy dummy VM after volume creation"); + if (vmMo != null) { + vmMo.detachAllDisks(); + vmMo.destroy(); + } } - } + + * */ } catch (Throwable e) { if (e instanceof RemoteException) { s_logger.warn("Encounter remote exception to vCenter, invalidate VMware session context"); @@ -2919,7 +3013,7 @@ private void createVmdk(Command cmd, DatastoreMO dsMo, String vmdkDatastorePath, throw new Exception("Unable to create a dummy VM for volume creation"); } - Long volumeSizeToUse = volumeSize < dsMo.getSummary().getFreeSpace() ? volumeSize : dsMo.getSummary().getFreeSpace(); + Long volumeSizeToUse = volumeSize < dsMo.getDatastoreSummary().getFreeSpace() ? volumeSize : dsMo.getDatastoreSummary().getFreeSpace(); vmMo.createDisk(vmdkDatastorePath, getMBsFromBytes(volumeSizeToUse), dsMo.getMor(), vmMo.getScsiDeviceControllerKey()); vmMo.detachDisk(vmdkDatastorePath, false); @@ -3429,7 +3523,7 @@ private Long restoreVolumeFromSecStorage(VmwareHypervisorHost hyperHost, Datasto VirtualMachineMO clonedVm = null; try { - hyperHost.importVmFromOVF(srcOVFFileName, newVolumeName, primaryDsMo, "thin"); + hyperHost.importVmFromOVF(srcOVFFileName, newVolumeName, primaryDsMo, "thin", true); clonedVm = hyperHost.findVmOnHyperHost(newVolumeName); if (clonedVm == null) { throw new Exception("Unable to create container VM for volume creation"); @@ -3525,13 +3619,7 @@ public Answer forgetObject(ForgetObjectCmd cmd) { private static String deriveTemplateUuidOnHost(VmwareHypervisorHost hyperHost, String storeIdentifier, String templateName) { String templateUuid; - try { - templateUuid = UUID.nameUUIDFromBytes((templateName + "@" + storeIdentifier + "-" + hyperHost.getMor().getValue()).getBytes("UTF-8")).toString(); - } catch(UnsupportedEncodingException e){ - s_logger.warn("unexpected encoding error, using default Charset: " + e.getLocalizedMessage()); - templateUuid = UUID.nameUUIDFromBytes((templateName + "@" + storeIdentifier + "-" + hyperHost.getMor().getValue()).getBytes(Charset.defaultCharset())) - .toString(); - } + templateUuid = UUID.nameUUIDFromBytes((templateName + "@" + storeIdentifier + "-" + hyperHost.getMor().getValue()).getBytes(Charset.defaultCharset())).toString(); templateUuid = templateUuid.replaceAll("-", ""); return templateUuid; } @@ -3555,8 +3643,70 @@ public Answer handleDownloadTemplateToPrimaryStorage(DirectDownloadCommand cmd) return null; } + @Override + public Answer CheckDataStoreStoragePolicyComplaince(CheckDataStoreStoragePolicyComplainceCommand cmd) { + String primaryStorageNameLabel = cmd.getStoragePool().getUuid(); + String storagePolicyId = cmd.getStoragePolicyId(); + VmwareContext context = hostService.getServiceContext(cmd); + try { + VmwareHypervisorHost hyperHost = hostService.getHyperHost(context, cmd); + ManagedObjectReference morPrimaryDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, primaryStorageNameLabel); + if (morPrimaryDs == null) { + String msg = "Unable to find datastore: " + primaryStorageNameLabel; + s_logger.error(msg); + throw new Exception(msg); + } + + DatastoreMO primaryDsMo = new DatastoreMO(hyperHost.getContext(), morPrimaryDs); + boolean isDatastoreStoragePolicyComplaint = primaryDsMo.isDatastoreStoragePolicyComplaint(storagePolicyId); + + String failedMessage = String.format("DataStore %s is not complaince with storage policy id %s", primaryStorageNameLabel, storagePolicyId); + if (!isDatastoreStoragePolicyComplaint) + return new Answer(cmd, isDatastoreStoragePolicyComplaint, failedMessage); + else + return new Answer(cmd, isDatastoreStoragePolicyComplaint, null); + } catch (Throwable e) { + if (e instanceof RemoteException) { + hostService.invalidateServiceContext(context); + } + String details = String.format("Exception while checking if datastore %s is storage policy %s complaince : %s", primaryStorageNameLabel, storagePolicyId, VmwareHelper.getExceptionMessage(e)); + s_logger.error(details, e); + return new Answer(cmd, false, details); + } + } + @Override public Answer copyVolumeFromPrimaryToPrimary(CopyCommand cmd) { return null; } + + /** + * Return the cloned VM from the template + */ + public VirtualMachineMO cloneVMFromTemplate(String templateName, String cloneName, String templatePrimaryStoreUuid) { + try { + VmwareContext context = hostService.getServiceContext(null); + VmwareHypervisorHost hyperHost = hostService.getHyperHost(context, null); + DatacenterMO dcMo = new DatacenterMO(context, hyperHost.getHyperHostDatacenter()); + VirtualMachineMO templateMo = dcMo.findVm(templateName); + if (templateMo == null) { + throw new VmwareResourceException(String.format("Unable to find template %s in vSphere", templateName)); + } + ManagedObjectReference morDatastore = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, templatePrimaryStoreUuid); + DatastoreMO dsMo = new DatastoreMO(context, morDatastore); + ManagedObjectReference morPool = hyperHost.getHyperHostOwnerResourcePool(); + if (morDatastore == null) { + throw new VmwareResourceException("Unable to find datastore in vSphere"); + } + if (!_fullCloneFlag) { + createVMLinkedClone(templateMo, dcMo, cloneName, morDatastore, morPool); + } else { + createVMFullClone(templateMo, dcMo, dsMo, cloneName, morDatastore, morPool); + } + return dcMo.findVm(cloneName); + } catch (Throwable e) { + s_logger.error(String.format("Error cloning VM from template in primary storage: %s", e.getMessage()), e); + throw new VmwareResourceException(String.format("Unable to find template %s in vSphere", templateName)); + } + } } diff --git a/plugins/hypervisors/vmware/src/main/java/org/apache/cloudstack/api/command/admin/zone/ImportVsphereStoragePoliciesCmd.java b/plugins/hypervisors/vmware/src/main/java/org/apache/cloudstack/api/command/admin/zone/ImportVsphereStoragePoliciesCmd.java new file mode 100644 index 000000000000..ea5bacfb1f42 --- /dev/null +++ b/plugins/hypervisors/vmware/src/main/java/org/apache/cloudstack/api/command/admin/zone/ImportVsphereStoragePoliciesCmd.java @@ -0,0 +1,111 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.zone; + +import com.cloud.dc.DataCenter; +import com.cloud.dc.VsphereStoragePolicy; +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.NetworkRuleConflictException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.hypervisor.vmware.VmwareDatacenterService; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.VsphereStoragePoliciesResponse; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.api.response.ZoneResponse; +import org.apache.cloudstack.context.CallContext; +import org.apache.log4j.Logger; + +import javax.inject.Inject; +import java.util.ArrayList; +import java.util.List; + +@APICommand(name = ImportVsphereStoragePoliciesCmd.APINAME, description = "Import vSphere storage policies", + responseObject = VsphereStoragePoliciesResponse.class, + requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, + authorized = {RoleType.Admin}) +public class ImportVsphereStoragePoliciesCmd extends BaseCmd { + + public static final Logger LOGGER = Logger.getLogger(ImportVsphereStoragePoliciesCmd.class.getName()); + + public static final String APINAME = "importVsphereStoragePolicies"; + + @Inject + public VmwareDatacenterService _vmwareDatacenterService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.ZONE_ID, type = CommandType.UUID, entityType = ZoneResponse.class, + description = "ID of the zone") + private Long zoneId; + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException { + final DataCenter dataCenter = _resourceService.getZone(getZoneId()); + if (dataCenter == null) { + throw new ServerApiException(ApiErrorCode.PARAM_ERROR, "Unable to find zone by ID: " + getZoneId()); + } + + List storagePolicies = _vmwareDatacenterService.importVsphereStoragePolicies(this); + final ListResponse responseList = new ListResponse<>(); + final List storagePoliciesResponseList = new ArrayList<>(); + for (VsphereStoragePolicy storagePolicy : storagePolicies) { + final VsphereStoragePoliciesResponse storagePoliciesResponse = new VsphereStoragePoliciesResponse(); + storagePoliciesResponse.setZoneId(dataCenter.getUuid()); + storagePoliciesResponse.setId(storagePolicy.getUuid()); + storagePoliciesResponse.setName(storagePolicy.getName()); + storagePoliciesResponse.setPolicyId(storagePolicy.getPolicyId()); + storagePoliciesResponse.setDescription(storagePolicy.getDescription()); + storagePoliciesResponse.setObjectName("StoragePolicy"); + + storagePoliciesResponseList.add(storagePoliciesResponse); + } + responseList.setResponses(storagePoliciesResponseList); + responseList.setResponseName(getCommandName()); + setResponseObject(responseList); + } + + @Override + public String getCommandName() { + return APINAME.toLowerCase() + BaseCmd.RESPONSE_SUFFIX; + } + + @Override + public long getEntityOwnerId() { + return CallContext.current().getCallingAccountId(); + } + + public Long getZoneId() { + return zoneId; + } + +} diff --git a/plugins/hypervisors/vmware/src/main/java/org/apache/cloudstack/api/command/admin/zone/ListVsphereStoragePoliciesCmd.java b/plugins/hypervisors/vmware/src/main/java/org/apache/cloudstack/api/command/admin/zone/ListVsphereStoragePoliciesCmd.java new file mode 100644 index 000000000000..90e8e8805485 --- /dev/null +++ b/plugins/hypervisors/vmware/src/main/java/org/apache/cloudstack/api/command/admin/zone/ListVsphereStoragePoliciesCmd.java @@ -0,0 +1,109 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.zone; + +import com.cloud.dc.DataCenter; +import com.cloud.dc.VsphereStoragePolicy; +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.NetworkRuleConflictException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.hypervisor.vmware.VmwareDatacenterService; +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.api.response.VsphereStoragePoliciesResponse; +import org.apache.cloudstack.api.response.ZoneResponse; +import org.apache.cloudstack.context.CallContext; +import org.apache.log4j.Logger; + +import javax.inject.Inject; +import java.util.ArrayList; +import java.util.List; + +@APICommand(name = ListVsphereStoragePoliciesCmd.APINAME, description = "List vSphere storage policies", + responseObject = VsphereStoragePoliciesResponse.class, + requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, + authorized = {RoleType.Admin}) +public class ListVsphereStoragePoliciesCmd extends BaseCmd { + + public static final Logger LOGGER = Logger.getLogger(ListVsphereStoragePoliciesCmd.class.getName()); + + public static final String APINAME = "listVsphereStoragePolicies"; + + @Inject + public VmwareDatacenterService _vmwareDatacenterService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.ZONE_ID, type = CommandType.UUID, entityType = ZoneResponse.class, + description = "ID of the zone") + private Long zoneId; + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException { + final DataCenter dataCenter = _resourceService.getZone(getZoneId()); + if (dataCenter == null) { + throw new ServerApiException(ApiErrorCode.PARAM_ERROR, "Unable to find zone by ID: " + getZoneId()); + } + + List storagePolicies = _vmwareDatacenterService.listVsphereStoragePolicies(this); + final ListResponse responseList = new ListResponse<>(); + final List storagePoliciesResponseList = new ArrayList<>(); + for (VsphereStoragePolicy storagePolicy : storagePolicies) { + final VsphereStoragePoliciesResponse storagePoliciesResponse = new VsphereStoragePoliciesResponse(); + storagePoliciesResponse.setZoneId(dataCenter.getUuid()); + storagePoliciesResponse.setId(storagePolicy.getUuid()); + storagePoliciesResponse.setName(storagePolicy.getName()); + storagePoliciesResponse.setPolicyId(storagePolicy.getPolicyId()); + storagePoliciesResponse.setDescription(storagePolicy.getDescription()); + storagePoliciesResponse.setObjectName("StoragePolicy"); + + storagePoliciesResponseList.add(storagePoliciesResponse); + } + responseList.setResponses(storagePoliciesResponseList); + responseList.setResponseName(getCommandName()); + setResponseObject(responseList); + } + + @Override + public String getCommandName() { + return APINAME.toLowerCase() + BaseCmd.RESPONSE_SUFFIX; + } + + @Override + public long getEntityOwnerId() { + return CallContext.current().getCallingAccountId(); + } + + public Long getZoneId() { + return zoneId; + } +} diff --git a/plugins/hypervisors/vmware/src/main/resources/META-INF/cloudstack/core/spring-vmware-core-context.xml b/plugins/hypervisors/vmware/src/main/resources/META-INF/cloudstack/core/spring-vmware-core-context.xml index 3af2d1ac31fe..49ff8d199a65 100644 --- a/plugins/hypervisors/vmware/src/main/resources/META-INF/cloudstack/core/spring-vmware-core-context.xml +++ b/plugins/hypervisors/vmware/src/main/resources/META-INF/cloudstack/core/spring-vmware-core-context.xml @@ -31,13 +31,14 @@ class="com.cloud.hypervisor.vmware.manager.VmwareManagerImpl" /> + - \ No newline at end of file diff --git a/plugins/hypervisors/vmware/src/test/java/com/cloud/hypervisor/vmware/VmwareDatacenterApiUnitTest.java b/plugins/hypervisors/vmware/src/test/java/com/cloud/hypervisor/vmware/VmwareDatacenterApiUnitTest.java index eb041396459b..1249f6ce0047 100644 --- a/plugins/hypervisors/vmware/src/test/java/com/cloud/hypervisor/vmware/VmwareDatacenterApiUnitTest.java +++ b/plugins/hypervisors/vmware/src/test/java/com/cloud/hypervisor/vmware/VmwareDatacenterApiUnitTest.java @@ -31,6 +31,7 @@ import com.cloud.dc.dao.ClusterVSMMapDao; import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.HostPodDao; +import com.cloud.dc.dao.VsphereStoragePolicyDao; import com.cloud.event.dao.EventDao; import com.cloud.exception.DiscoveryException; import com.cloud.exception.InvalidParameterValueException; @@ -486,6 +487,11 @@ public TemplateManager templateManager() { return Mockito.mock(TemplateManager.class); } + @Bean + public VsphereStoragePolicyDao vsphereStoragePolicyDao() { + return Mockito.mock(VsphereStoragePolicyDao.class); + } + public static class Library implements TypeFilter { @Override diff --git a/plugins/hypervisors/vmware/src/test/java/com/cloud/hypervisor/vmware/manager/VmwareManagerImplTest.java b/plugins/hypervisors/vmware/src/test/java/com/cloud/hypervisor/vmware/manager/VmwareManagerImplTest.java index 8aa92f7d9c4d..80677e9d3ba3 100644 --- a/plugins/hypervisors/vmware/src/test/java/com/cloud/hypervisor/vmware/manager/VmwareManagerImplTest.java +++ b/plugins/hypervisors/vmware/src/test/java/com/cloud/hypervisor/vmware/manager/VmwareManagerImplTest.java @@ -105,6 +105,7 @@ public void updateVmwareDatacenterNormalUpdate() { Mockito.lenient().doReturn(hostDetails).when(hostDetailsDao).findDetails(Mockito.anyLong()); Mockito.doReturn("some-old-guid").when(hostDetails).get("guid"); Mockito.doReturn(hostDetails).when(hostDetailsDao).findDetails(Mockito.anyLong()); + Mockito.doReturn(null).when(vmwareManager).importVsphereStoragePoliciesInternal(Mockito.anyLong(), Mockito.anyLong()); final VmwareDatacenter vmwareDatacenter = vmwareManager.updateVmwareDatacenter(updateVmwareDcCmd); diff --git a/plugins/hypervisors/vmware/src/test/java/com/cloud/hypervisor/vmware/resource/StartCommandExecutorTest.java b/plugins/hypervisors/vmware/src/test/java/com/cloud/hypervisor/vmware/resource/StartCommandExecutorTest.java new file mode 100644 index 000000000000..dc2474ae7907 --- /dev/null +++ b/plugins/hypervisors/vmware/src/test/java/com/cloud/hypervisor/vmware/resource/StartCommandExecutorTest.java @@ -0,0 +1,54 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.hypervisor.vmware.resource; + +import com.cloud.agent.api.to.NicTO; +import org.junit.Test; +import org.mockito.InjectMocks; +import org.mockito.Spy; + +import static org.junit.Assert.assertEquals; + +public class StartCommandExecutorTest { + + @Spy + @InjectMocks + VmwareResource resource = new VmwareResource(); + + @Spy + @InjectMocks + StartCommandExecutor starter = new StartCommandExecutor(resource); + + @Test + public void generateMacSequence() { + final NicTO nicTo1 = new NicTO(); + nicTo1.setMac("01:23:45:67:89:AB"); + nicTo1.setDeviceId(1); + + final NicTO nicTo2 = new NicTO(); + nicTo2.setMac("02:00:65:b5:00:03"); + nicTo2.setDeviceId(0); + + //final NicTO [] nicTOs = {nicTO1, nicTO2, nicTO3}; + //final NicTO[] nics = new NicTO[]{nic}; + final NicTO[] nics = new NicTO[] {nicTo1, nicTo2}; + + String macSequence = starter.generateMacSequence(nics); + assertEquals(macSequence, "02:00:65:b5:00:03|01:23:45:67:89:AB"); + } +} diff --git a/plugins/hypervisors/vmware/src/test/java/com/cloud/hypervisor/vmware/resource/VmwareResourceTest.java b/plugins/hypervisors/vmware/src/test/java/com/cloud/hypervisor/vmware/resource/VmwareResourceTest.java index 7cebaf119106..a7a84c6eb0c7 100644 --- a/plugins/hypervisors/vmware/src/test/java/com/cloud/hypervisor/vmware/resource/VmwareResourceTest.java +++ b/plugins/hypervisors/vmware/src/test/java/com/cloud/hypervisor/vmware/resource/VmwareResourceTest.java @@ -55,7 +55,6 @@ import com.cloud.agent.api.ScaleVmCommand; import com.cloud.agent.api.to.DataTO; import com.cloud.agent.api.to.NfsTO; -import com.cloud.agent.api.to.NicTO; import com.cloud.agent.api.to.VirtualMachineTO; import com.cloud.agent.api.to.VolumeTO; import com.cloud.hypervisor.Hypervisor.HypervisorType; @@ -214,24 +213,6 @@ public void testScaleVMF1() throws Exception { verify(_resource).execute(cmd); } - @Test - public void testGenerateMacSequence() { - final NicTO nicTo1 = new NicTO(); - nicTo1.setMac("01:23:45:67:89:AB"); - nicTo1.setDeviceId(1); - - final NicTO nicTo2 = new NicTO(); - nicTo2.setMac("02:00:65:b5:00:03"); - nicTo2.setDeviceId(0); - - //final NicTO [] nicTOs = {nicTO1, nicTO2, nicTO3}; - //final NicTO[] nics = new NicTO[]{nic}; - final NicTO[] nics = new NicTO[] {nicTo1, nicTo2}; - - String macSequence = _resource.generateMacSequence(nics); - assertEquals(macSequence, "02:00:65:b5:00:03|01:23:45:67:89:AB"); - } - @Test public void testReplaceNicsMacSequenceInBootArgs() { String bootArgs = "nic_macs=02:00:65:b5:00:03|7C02:00:4f:1b:00:15|7C1e:00:54:00:00:0f|7C02:00:35:fa:00:11|7C02:00:47:40:00:12"; diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServerStorageProcessor.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServerStorageProcessor.java index e4c07d4ba79e..a7da201167c9 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServerStorageProcessor.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServerStorageProcessor.java @@ -34,6 +34,7 @@ import org.apache.cloudstack.agent.directdownload.DirectDownloadCommand; import org.apache.cloudstack.storage.command.AttachAnswer; import org.apache.cloudstack.storage.command.AttachCommand; +import org.apache.cloudstack.storage.command.CheckDataStoreStoragePolicyComplainceCommand; import org.apache.cloudstack.storage.command.CopyCmdAnswer; import org.apache.cloudstack.storage.command.CopyCommand; import org.apache.cloudstack.storage.command.CreateObjectAnswer; @@ -214,6 +215,12 @@ public Answer copyVolumeFromPrimaryToPrimary(CopyCommand cmd) { return null; } + @Override + public Answer CheckDataStoreStoragePolicyComplaince(CheckDataStoreStoragePolicyComplainceCommand cmd) { + s_logger.info("'CheckDataStoreStoragePolicyComplainceCommand' not applicable used for XenServerStorageProcessor"); + return new Answer(cmd,false,"Not applicable used for XenServerStorageProcessor"); + } + @Override public AttachAnswer attachIso(final AttachCommand cmd) { final DiskTO disk = cmd.getDisk(); diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/Xenserver625StorageProcessor.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/Xenserver625StorageProcessor.java index a2c8b708bf3a..12b70e85d054 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/Xenserver625StorageProcessor.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/Xenserver625StorageProcessor.java @@ -27,6 +27,7 @@ import java.util.Set; import java.util.UUID; +import org.apache.cloudstack.storage.command.CheckDataStoreStoragePolicyComplainceCommand; import org.apache.cloudstack.storage.command.CopyCmdAnswer; import org.apache.cloudstack.storage.command.CopyCommand; import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; @@ -910,6 +911,12 @@ public Answer createVolumeFromSnapshot(final CopyCommand cmd) { return new CopyCmdAnswer(details); } + @Override + public Answer CheckDataStoreStoragePolicyComplaince(CheckDataStoreStoragePolicyComplainceCommand cmd) { + s_logger.info("'CheckDataStoreStoragePolicyComplainceCommand' not applicable used for XenServerStorageProcessor"); + return new Answer(cmd,false,"Not applicable used for XenServerStorageProcessor"); + } + @Override public Answer copyVolumeFromPrimaryToSecondary(final CopyCommand cmd) { final Connection conn = hypervisorResource.getConnection(); diff --git a/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java b/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java index a500fdb63543..a4877fbc5efb 100644 --- a/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java +++ b/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java @@ -18,31 +18,6 @@ */ package org.apache.cloudstack.storage.datastore.lifecycle; -import java.io.UnsupportedEncodingException; -import java.net.URI; -import java.net.URISyntaxException; -import java.net.URLDecoder; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.UUID; - -import javax.inject.Inject; - -import org.apache.log4j.Logger; - -import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; -import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; -import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; -import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle; -import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters; -import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper; - import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; import com.cloud.agent.api.CreateStoragePoolCommand; @@ -77,6 +52,29 @@ import com.cloud.vm.dao.SecondaryStorageVmDao; import com.cloud.vm.dao.UserVmDao; import com.cloud.vm.dao.VMInstanceDao; +import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters; +import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper; +import org.apache.commons.lang.StringUtils; +import org.apache.log4j.Logger; + +import javax.inject.Inject; +import java.io.UnsupportedEncodingException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URLDecoder; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.UUID; public class CloudStackPrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle { private static final Logger s_logger = Logger.getLogger(CloudStackPrimaryDataStoreLifeCycleImpl.class); @@ -133,6 +131,7 @@ public DataStore initialize(Map dsInfos) { Long zoneId = (Long)dsInfos.get("zoneId"); String url = (String)dsInfos.get("url"); String providerName = (String)dsInfos.get("providerName"); + String hypervisorType = (String)dsInfos.get("hypervisorType"); if (clusterId != null && podId == null) { throw new InvalidParameterValueException("Cluster id requires pod id"); } @@ -254,6 +253,11 @@ public DataStore initialize(Map dsInfos) { parameters.setHost(storageHost); parameters.setPort(0); parameters.setPath(hostPath); + } else if (scheme.equalsIgnoreCase("DatastoreCluster")) { + parameters.setType(StoragePoolType.DatastoreCluster); + parameters.setHost(storageHost); + parameters.setPort(0); + parameters.setPath(hostPath); } else if (scheme.equalsIgnoreCase("iscsi")) { String[] tokens = hostPath.split("/"); int lun = NumbersUtil.parseInt(tokens[tokens.length - 1], -1); @@ -327,7 +331,7 @@ public DataStore initialize(Map dsInfos) { uuid = (String)existingUuid; } else if (scheme.equalsIgnoreCase("sharedmountpoint") || scheme.equalsIgnoreCase("clvm")) { uuid = UUID.randomUUID().toString(); - } else if (scheme.equalsIgnoreCase("PreSetup")) { + } else if (scheme.equalsIgnoreCase("PreSetup") && !(StringUtils.isNotBlank(hypervisorType) && HypervisorType.getType(hypervisorType).equals(HypervisorType.VMware))) { uuid = hostPath.replace("/", ""); } else { uuid = UUID.nameUUIDFromBytes((storageHost + hostPath).getBytes()).toString(); @@ -358,7 +362,7 @@ protected boolean createStoragePool(long hostId, StoragePool pool) { if (pool.getPoolType() != StoragePoolType.NetworkFilesystem && pool.getPoolType() != StoragePoolType.Filesystem && pool.getPoolType() != StoragePoolType.IscsiLUN && pool.getPoolType() != StoragePoolType.Iscsi && pool.getPoolType() != StoragePoolType.VMFS && - pool.getPoolType() != StoragePoolType.SharedMountPoint && pool.getPoolType() != StoragePoolType.PreSetup && pool.getPoolType() != StoragePoolType.OCFS2 && + pool.getPoolType() != StoragePoolType.SharedMountPoint && pool.getPoolType() != StoragePoolType.PreSetup && pool.getPoolType() != StoragePoolType.DatastoreCluster && pool.getPoolType() != StoragePoolType.OCFS2 && pool.getPoolType() != StoragePoolType.RBD && pool.getPoolType() != StoragePoolType.CLVM && pool.getPoolType() != StoragePoolType.SMB && pool.getPoolType() != StoragePoolType.Gluster) { s_logger.warn(" Doesn't support storage pool type " + pool.getPoolType()); diff --git a/pom.xml b/pom.xml index 8760aab3b4fc..bf2b8f4f937d 100644 --- a/pom.xml +++ b/pom.xml @@ -166,6 +166,8 @@ 8.5.47 1.0.0-build222 6.7 + 2.15.0 + 3.3.0 0.5.0 6.2.0-3.1 3.1.3 diff --git a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java index 2407a4fa6498..760ceda9d7cb 100644 --- a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java +++ b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java @@ -31,6 +31,11 @@ import javax.inject.Inject; +import com.cloud.agent.api.storage.OVFPropertyTO; +import com.cloud.storage.ImageStore; +import com.cloud.storage.VMTemplateDetailVO; +import com.cloud.storage.dao.VMTemplateDetailsDao; +import com.google.gson.Gson; import org.apache.cloudstack.acl.ControlledEntity.ACLType; import org.apache.cloudstack.affinity.AffinityGroupDomainMapVO; import org.apache.cloudstack.affinity.AffinityGroupResponse; @@ -123,7 +128,6 @@ import org.apache.log4j.Logger; import org.springframework.stereotype.Component; -import com.cloud.agent.api.storage.OVFProperty; import com.cloud.api.query.dao.AccountJoinDao; import com.cloud.api.query.dao.AffinityGroupJoinDao; import com.cloud.api.query.dao.AsyncJobJoinDao; @@ -211,11 +215,9 @@ import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Storage.TemplateType; import com.cloud.storage.StoragePoolTagVO; -import com.cloud.storage.TemplateOVFPropertyVO; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.Volume; import com.cloud.storage.dao.StoragePoolTagsDao; -import com.cloud.storage.dao.TemplateOVFPropertiesDao; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.tags.ResourceTagVO; import com.cloud.tags.dao.ResourceTagDao; @@ -399,8 +401,7 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q @Inject ManagementServerHostDao managementServerHostDao; - @Inject - TemplateOVFPropertiesDao templateOVFPropertiesDao; + @Inject VMTemplateDetailsDao vmTemplateDetailsDao; @Inject public VpcVirtualNetworkApplianceService routerService; @@ -2309,6 +2310,7 @@ private Pair, Integer> searchForStoragePoolsInternal(Lis sb.and("clusterId", sb.entity().getClusterId(), SearchCriteria.Op.EQ); sb.and("hostAddress", sb.entity().getHostAddress(), SearchCriteria.Op.EQ); sb.and("scope", sb.entity().getScope(), SearchCriteria.Op.EQ); + sb.and("parent", sb.entity().getParent(), Op.EQ); SearchCriteria sc = sb.create(); @@ -2346,6 +2348,7 @@ private Pair, Integer> searchForStoragePoolsInternal(Lis if (scopeType != null) { sc.setParameters("scope", scopeType.toString()); } + sc.setParameters("parent", 0); // search Pool details by ids Pair, Integer> uniquePoolPair = _poolJoinDao.searchAndCount(sc, searchFilter); @@ -3978,18 +3981,18 @@ public ListResponse listTemplateOVFProperties(ListT ListResponse response = new ListResponse<>(); List result = new ArrayList<>(); Long templateId = cmd.getTemplateId(); - List ovfProperties = templateOVFPropertiesDao.listByTemplateId(templateId); - for (OVFProperty property : ovfProperties) { - TemplateOVFPropertyResponse propertyResponse = new TemplateOVFPropertyResponse(); - propertyResponse.setKey(property.getKey()); - propertyResponse.setType(property.getType()); - propertyResponse.setValue(property.getValue()); - propertyResponse.setQualifiers(property.getQualifiers()); - propertyResponse.setUserConfigurable(property.isUserConfigurable()); - propertyResponse.setLabel(property.getLabel()); - propertyResponse.setDescription(property.getDescription()); - propertyResponse.setPassword(property.isPassword()); - propertyResponse.setObjectName("ovfproperty"); + + SearchCriteria ssc = vmTemplateDetailsDao.createSearchCriteria(); + ssc.addAnd("resourceId", Op.EQ, templateId); + ssc.addAnd("name", SearchCriteria.Op.LIKE, ImageStore.ACS_PROPERTY_PREFIX + "%"); + + List ovfProperties = vmTemplateDetailsDao.search(ssc, null); + + Gson gson = new Gson(); + for (VMTemplateDetailVO property : ovfProperties) { + OVFPropertyTO ovfPropertyTO = gson.fromJson(property.getValue(),OVFPropertyTO.class); + + TemplateOVFPropertyResponse propertyResponse = _templateJoinDao.createTemplateOVFPropertyResponse(ovfPropertyTO); result.add(propertyResponse); } response.setResponses(result); diff --git a/server/src/main/java/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java index b8b312bd267b..a248d71e72c6 100644 --- a/server/src/main/java/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java @@ -32,6 +32,8 @@ import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -52,6 +54,9 @@ public class StoragePoolJoinDaoImpl extends GenericDaoBase spSearch; private final SearchBuilder spIdSearch; @@ -94,6 +99,10 @@ public StoragePoolResponse newStoragePoolResponse(StoragePoolJoinVO pool) { poolResponse.setHypervisor(pool.getHypervisor().toString()); } + StoragePoolDetailVO poolType = storagePoolDetailsDao.findDetail(pool.getId(), "pool_type"); + if (poolType != null) { + poolResponse.setType(poolType.getValue()); + } long allocatedSize = pool.getUsedCapacity() + pool.getReservedCapacity(); poolResponse.setDiskSizeTotal(pool.getCapacityBytes()); poolResponse.setDiskSizeAllocated(allocatedSize); diff --git a/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDao.java b/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDao.java index c9d7eba48b2f..aff90dac9eab 100644 --- a/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDao.java +++ b/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDao.java @@ -18,7 +18,9 @@ import java.util.List; +import com.cloud.agent.api.storage.OVFPropertyTO; import org.apache.cloudstack.api.ResponseObject.ResponseView; +import org.apache.cloudstack.api.response.TemplateOVFPropertyResponse; import org.apache.cloudstack.api.response.TemplateResponse; import com.cloud.api.query.vo.TemplateJoinVO; @@ -49,4 +51,19 @@ public interface TemplateJoinDao extends GenericDao { Pair, Integer> searchIncludingRemovedAndCount(final SearchCriteria sc, final Filter filter); List findByDistinctIds(Long... ids); + + default TemplateOVFPropertyResponse createTemplateOVFPropertyResponse(OVFPropertyTO ovfPropertyTO) { + TemplateOVFPropertyResponse propertyResponse = new TemplateOVFPropertyResponse(); + + propertyResponse.setKey(ovfPropertyTO.getKey()); + propertyResponse.setType(ovfPropertyTO.getType()); + propertyResponse.setValue(ovfPropertyTO.getValue()); + propertyResponse.setQualifiers(ovfPropertyTO.getQualifiers()); + propertyResponse.setUserConfigurable(ovfPropertyTO.isUserConfigurable()); + propertyResponse.setLabel(ovfPropertyTO.getLabel()); + propertyResponse.setDescription(ovfPropertyTO.getDescription()); + propertyResponse.setPassword(ovfPropertyTO.isPassword()); + propertyResponse.setObjectName("ovfproperty"); + return propertyResponse; + } } diff --git a/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java index 27380ffaa936..a9bf0682d42d 100644 --- a/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java @@ -25,6 +25,10 @@ import javax.inject.Inject; +import com.cloud.agent.api.storage.OVFPropertyTO; +import com.cloud.storage.ImageStore; +import com.google.gson.Gson; +import com.google.gson.JsonSyntaxException; import org.apache.cloudstack.utils.security.DigestHelper; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -222,6 +226,7 @@ public TemplateResponse newTemplateResponse(ResponseView view, TemplateJoinVO te } templateResponse.setDirectDownload(template.isDirectDownload()); + templateResponse.setDeployAsIs(template.isDeployAsIs()); templateResponse.setRequiresHvm(template.isRequiresHvm()); //set template children disks @@ -290,15 +295,22 @@ public TemplateResponse newUpdateResponse(TemplateJoinVO result) { @Override public TemplateResponse setTemplateResponse(ResponseView view, TemplateResponse templateResponse, TemplateJoinVO template) { + Gson gson = new Gson(); // update details map - if (template.getDetailName() != null) { - Map details = templateResponse.getDetails(); - if (details == null) { - details = new HashMap<>(); + String key = template.getDetailName(); + if (key != null) { + // FR37 TODO check properties and network prerequisites and if details is one of those fill those instead of detail + if (key.startsWith(ImageStore.ACS_PROPERTY_PREFIX)) { + try { + OVFPropertyTO property = gson.fromJson(template.getDetailValue(), OVFPropertyTO.class); + templateResponse.addProperty(createTemplateOVFPropertyResponse(property)); + } catch (JsonSyntaxException e) { + s_logger.warn(String.format("found an unexpected property for template '%s'; %s: %s", + template.getUuid(), template.getDetailName(), template.getDetailValue())); + } } - details.put(template.getDetailName(), template.getDetailValue()); - templateResponse.setDetails(details); + templateResponse.addDetail(key, template.getDetailValue()); } // update tag information diff --git a/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java index 80c433ad71ca..8e489f89eca0 100644 --- a/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java @@ -322,8 +322,8 @@ public UserVmResponse newUserVmResponse(ResponseView view, String objectName, Us if (vmDetails != null) { Map resourceDetails = new HashMap(); for (UserVmDetailVO userVmDetailVO : vmDetails) { - if (!userVmDetailVO.getName().startsWith(ApiConstants.OVF_PROPERTIES) || - (UserVmManager.DisplayVMOVFProperties.value() && userVmDetailVO.getName().startsWith(ApiConstants.OVF_PROPERTIES))) { + if (!userVmDetailVO.getName().startsWith(ApiConstants.PROPERTIES) || + (UserVmManager.DisplayVMOVFProperties.value() && userVmDetailVO.getName().startsWith(ApiConstants.PROPERTIES))) { resourceDetails.put(userVmDetailVO.getName(), userVmDetailVO.getValue()); } if ((ApiConstants.BootType.UEFI.toString()).equalsIgnoreCase(userVmDetailVO.getName())) { diff --git a/server/src/main/java/com/cloud/api/query/vo/StoragePoolJoinVO.java b/server/src/main/java/com/cloud/api/query/vo/StoragePoolJoinVO.java index 565e290bd704..1831aaafac97 100644 --- a/server/src/main/java/com/cloud/api/query/vo/StoragePoolJoinVO.java +++ b/server/src/main/java/com/cloud/api/query/vo/StoragePoolJoinVO.java @@ -139,6 +139,9 @@ public class StoragePoolJoinVO extends BaseViewVO implements InternalIdentity, I @Column(name = "storage_provider_name") private String storageProviderName; + @Column(name = "parent") + private Long parent; + /** * @return the scope */ @@ -263,4 +266,8 @@ public int getJobStatus() { public String getStorageProviderName() { return storageProviderName; } + + public Long getParent() { + return parent; + } } diff --git a/server/src/main/java/com/cloud/api/query/vo/TemplateJoinVO.java b/server/src/main/java/com/cloud/api/query/vo/TemplateJoinVO.java index 25e3b0b5ff50..91bb76336ccc 100644 --- a/server/src/main/java/com/cloud/api/query/vo/TemplateJoinVO.java +++ b/server/src/main/java/com/cloud/api/query/vo/TemplateJoinVO.java @@ -231,6 +231,9 @@ public class TemplateJoinVO extends BaseViewWithTagInformationVO implements Cont @Column(name = "direct_download") private boolean directDownload; + @Column(name = "deploy_as_is") + private boolean deployAsIs; + public TemplateJoinVO() { } @@ -490,6 +493,10 @@ public boolean isDirectDownload() { return directDownload; } + public boolean isDeployAsIs() { + return deployAsIs; + } + public Object getParentTemplateId() { return parentTemplateId; } diff --git a/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java b/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java index 7e9c9d39c2b1..54e6cf64a83c 100755 --- a/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java +++ b/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java @@ -39,6 +39,7 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.dc.dao.VsphereStoragePolicyDao; import org.apache.cloudstack.acl.SecurityChecker; import org.apache.cloudstack.affinity.AffinityGroup; import org.apache.cloudstack.affinity.AffinityGroupService; @@ -386,6 +387,9 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati IndirectAgentLB _indirectAgentLB; @Inject private VMTemplateZoneDao templateZoneDao; + @Inject + VsphereStoragePolicyDao vsphereStoragePolicyDao; + // FIXME - why don't we have interface for DataCenterLinkLocalIpAddressDao? @Inject @@ -2820,7 +2824,7 @@ protected DiskOfferingVO createDiskOffering(final Long userId, final List Long bytesWriteRate, Long bytesWriteRateMax, Long bytesWriteRateMaxLength, Long iopsReadRate, Long iopsReadRateMax, Long iopsReadRateMaxLength, Long iopsWriteRate, Long iopsWriteRateMax, Long iopsWriteRateMaxLength, - final Integer hypervisorSnapshotReserve, String cacheMode) { + final Integer hypervisorSnapshotReserve, String cacheMode, final Long storagePolicyID) { long diskSize = 0;// special case for custom disk offerings if (numGibibytes != null && numGibibytes <= 0) { throw new InvalidParameterValueException("Please specify a disk size of at least 1 Gb."); @@ -2948,6 +2952,9 @@ protected DiskOfferingVO createDiskOffering(final Long userId, final List detailsVO.add(new DiskOfferingDetailVO(offering.getId(), ApiConstants.ZONE_ID, String.valueOf(zoneId), false)); } } + if (storagePolicyID != null) { + detailsVO.add(new DiskOfferingDetailVO(offering.getId(), ApiConstants.STORAGE_POLICY, String.valueOf(storagePolicyID), false)); + } if (!detailsVO.isEmpty()) { diskOfferingDetailsDao.saveDetails(detailsVO); } @@ -2969,6 +2976,7 @@ public DiskOffering createDiskOffering(final CreateDiskOfferingCmd cmd) { final String tags = cmd.getTags(); final List domainIds = cmd.getDomainIds(); final List zoneIds = cmd.getZoneIds(); + final Long storagePolicyId = cmd.getStoragePolicy(); // check if valid domain if (CollectionUtils.isNotEmpty(domainIds)) { @@ -3008,6 +3016,12 @@ public DiskOffering createDiskOffering(final CreateDiskOfferingCmd cmd) { } } + if (storagePolicyId != null) { + if (vsphereStoragePolicyDao.findById(storagePolicyId) == null) { + throw new InvalidParameterValueException("Please specify a valid vSphere storage policy id"); + } + } + final Boolean isCustomizedIops = cmd.isCustomizedIops(); final Long minIops = cmd.getMinIops(); final Long maxIops = cmd.getMaxIops(); @@ -3038,7 +3052,7 @@ public DiskOffering createDiskOffering(final CreateDiskOfferingCmd cmd) { localStorageRequired, isDisplayOfferingEnabled, isCustomizedIops, minIops, maxIops, bytesReadRate, bytesReadRateMax, bytesReadRateMaxLength, bytesWriteRate, bytesWriteRateMax, bytesWriteRateMaxLength, iopsReadRate, iopsReadRateMax, iopsReadRateMaxLength, iopsWriteRate, iopsWriteRateMax, iopsWriteRateMaxLength, - hypervisorSnapshotReserve, cacheMode); + hypervisorSnapshotReserve, cacheMode, storagePolicyId); } /** @@ -4007,7 +4021,7 @@ public Vlan createVlanAndPublicIpRange(final long zoneId, final long networkId, // Check if the vlan is being used if (!bypassVlanOverlapCheck && _zoneDao.findVnet(zoneId, physicalNetworkId, BroadcastDomainType.getValue(BroadcastDomainType.fromString(vlanId))).size() > 0) { - throw new InvalidParameterValueException("The VLAN tag " + vlanId + " is already being used for dynamic vlan allocation for the guest network in zone " + throw new InvalidParameterValueException("The VLAN tag to create; " + vlanId + " is already being used for dynamic vlan allocation for the guest network in zone " + zone.getName()); } diff --git a/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java b/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java index 26b36157dcdd..b628b3a9c3c8 100644 --- a/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java +++ b/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java @@ -31,6 +31,7 @@ import javax.naming.ConfigurationException; import com.cloud.utils.StringUtils; +import com.cloud.exception.StorageUnavailableException; import com.cloud.utils.db.Filter; import com.cloud.utils.fsm.StateMachine2; @@ -1268,6 +1269,18 @@ public int compare(Volume v1, Volume v2) { requestVolumes = new ArrayList(); requestVolumes.add(vol); + if (potentialHost.getHypervisorType() == HypervisorType.VMware) { + try { + boolean isStoragePoolStoragepolicyComplaince = _storageMgr.isStoragePoolComplaintWithStoragePolicy(requestVolumes, potentialSPool); + if (!isStoragePoolStoragepolicyComplaince) { + continue; + } + } catch (StorageUnavailableException e) { + s_logger.warn(String.format("Could not verify storage policy complaince against storage pool %s due to exception %s", potentialSPool.getUuid(), e.getMessage())); + continue; + } + } + if (!_storageMgr.storagePoolHasEnoughIops(requestVolumes, potentialSPool) || !_storageMgr.storagePoolHasEnoughSpace(requestVolumes, potentialSPool, potentialHost.getClusterId())) continue; diff --git a/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java b/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java index 2ae35fc08ad7..e241d0b71de6 100644 --- a/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java +++ b/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java @@ -215,7 +215,7 @@ protected VirtualMachineTO toVirtualMachineTO(VirtualMachineProfile vmProfile) { to.setNics(nics); to.setDisks(vmProfile.getDisks().toArray(new DiskTO[vmProfile.getDisks().size()])); - + // FR37 if this is a new VM to be deployed as is from a template we need to pass the mary storage somehow, now only for actual disks is a primary storage passed if (vmProfile.getTemplate().getBits() == 32) { to.setArch("i686"); } else { diff --git a/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java b/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java index 1d5b5821b467..3f5ba9b54f21 100644 --- a/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java +++ b/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java @@ -222,8 +222,9 @@ public void persistDefaultValues() throws InternalErrorException { s_logger.debug("Configuration server excluded plaintext authenticator"); // Save default service offerings - createServiceOffering(User.UID_SYSTEM, "Small Instance", 1, 512, 500, "Small Instance", ProvisioningType.THIN, false, false, null); - createServiceOffering(User.UID_SYSTEM, "Medium Instance", 1, 1024, 1000, "Medium Instance", ProvisioningType.THIN, false, false, null); + createServiceOffering(User.UID_SYSTEM, "Small Instance", 1, 512, 500, "Small Instance", ProvisioningType.THIN, false, false, null, true); + createServiceOffering(User.UID_SYSTEM, "Medium Instance", 1, 1024, 1000, "Medium Instance", ProvisioningType.THIN, false, false, null, true); + createServiceOffering(User.UID_SYSTEM, "Custom Deploy-as-is Instance", null, null, null, "Custom Deploy-as-is Instance", ProvisioningType.THIN, false, false, null, false); // Save default disk offerings createDefaultDiskOffering("Small", "Small Disk, 5 GB", ProvisioningType.THIN, 5, null, false, false); createDefaultDiskOffering("Medium", "Medium Disk, 20 GB", ProvisioningType.THIN, 20, null, false, false); @@ -930,12 +931,13 @@ private DiskOfferingVO createDefaultDiskOffering(String name, String description return newDiskOffering; } - private ServiceOfferingVO createServiceOffering(long userId, String name, int cpu, int ramSize, int speed, String displayText, - ProvisioningType provisioningType, boolean localStorageRequired, boolean offerHA, String tags) { + private ServiceOfferingVO createServiceOffering(long userId, String name, Integer cpu, Integer ramSize, Integer speed, String displayText, + ProvisioningType provisioningType, boolean localStorageRequired, boolean offerHA, String tags, boolean display) { tags = cleanupTags(tags); ServiceOfferingVO offering = new ServiceOfferingVO(name, cpu, ramSize, speed, null, null, offerHA, displayText, provisioningType, localStorageRequired, false, tags, false, null, false); offering.setUniqueName("Cloud.Com-" + name); + offering.setDisplayOffering(display); // leaving the above reference to cloud.com in as it is an identifyer and has no real world relevance offering = _serviceOfferingDao.persistSystemServiceOffering(offering); return offering; diff --git a/server/src/main/java/com/cloud/storage/ImageStoreUploadMonitorImpl.java b/server/src/main/java/com/cloud/storage/ImageStoreUploadMonitorImpl.java index a2f97e841278..fefea6cf8773 100755 --- a/server/src/main/java/com/cloud/storage/ImageStoreUploadMonitorImpl.java +++ b/server/src/main/java/com/cloud/storage/ImageStoreUploadMonitorImpl.java @@ -408,11 +408,12 @@ public void doInTransactionWithoutResult(TransactionStatus status) { VMTemplateVO templateUpdate = _templateDao.createForUpdate(); templateUpdate.setSize(answer.getVirtualSize()); _templateDao.update(tmpTemplate.getId(), templateUpdate); - // For multi-disk OVA, check and create data disk templates + + // For multi-disk OVA, check and create data disk templates or root disks as details if (tmpTemplate.getFormat().equals(Storage.ImageFormat.OVA)) { final DataStore store = dataStoreManager.getDataStore(templateDataStore.getDataStoreId(), templateDataStore.getDataStoreRole()); final TemplateInfo templateInfo = templateFactory.getTemplate(tmpTemplate.getId(), store); - if (!templateService.createOvaDataDiskTemplates(templateInfo)) { + if (!templateService.createOvaDataDiskTemplates(templateInfo, template.isDeployAsIs())) { tmpTemplateDataStore.setDownloadState(VMTemplateStorageResourceAssoc.Status.ABANDONED); tmpTemplateDataStore.setState(State.Failed); stateMachine.transitTo(tmpTemplate, VirtualMachineTemplate.Event.OperationFailed, null, _templateDao); diff --git a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java index 79343ab4725f..f5c7f5c435a1 100644 --- a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java @@ -39,6 +39,11 @@ import javax.inject.Inject; +import com.cloud.agent.api.to.StorageFilerTO; +import com.cloud.dc.VsphereStoragePolicyVO; +import com.cloud.dc.dao.VsphereStoragePolicyDao; +import com.cloud.utils.StringUtils; +import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.command.admin.storage.CancelPrimaryStorageMaintenanceCmd; import org.apache.cloudstack.api.command.admin.storage.CreateSecondaryStagingStoreCmd; import org.apache.cloudstack.api.command.admin.storage.CreateStoragePoolCmd; @@ -81,6 +86,8 @@ import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.cloudstack.management.ManagementServerHost; +import org.apache.cloudstack.resourcedetail.dao.DiskOfferingDetailsDao; +import org.apache.cloudstack.storage.command.CheckDataStoreStoragePolicyComplainceCommand; import org.apache.cloudstack.storage.command.DettachCommand; import org.apache.cloudstack.storage.datastore.db.ImageStoreDao; import org.apache.cloudstack.storage.datastore.db.ImageStoreDetailsDao; @@ -175,7 +182,6 @@ import com.cloud.utils.DateUtil; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; -import com.cloud.utils.StringUtils; import com.cloud.utils.UriUtils; import com.cloud.utils.component.ComponentContext; import com.cloud.utils.component.ManagerBase; @@ -296,6 +302,10 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C SnapshotService _snapshotService; @Inject StoragePoolTagsDao _storagePoolTagsDao; + @Inject + DiskOfferingDetailsDao _diskOfferingDetailsDao; + @Inject + VsphereStoragePolicyDao _vsphereStoragePolicyDao; protected List _discoverers; @@ -602,6 +612,7 @@ public DataStore createLocalStorage(Host host, StoragePoolInfo pInfo) throws Con params.put("zoneId", host.getDataCenterId()); params.put("clusterId", host.getClusterId()); params.put("podId", host.getPodId()); + params.put("hypervisorType", host.getHypervisorType()); params.put("url", pInfo.getPoolType().toString() + "://" + pInfo.getHost() + "/" + pInfo.getHostPath()); params.put("name", name); params.put("localStorage", true); @@ -687,6 +698,9 @@ public PrimaryDataStoreInfo createPool(CreateStoragePoolCmd cmd) throws Resource && hypervisorType != HypervisorType.Any) { throw new InvalidParameterValueException("zone wide storage pool is not supported for hypervisor type " + hypervisor); } + } else { + ClusterVO clusterVO = _clusterDao.findById(clusterId); + hypervisorType = clusterVO.getHypervisorType(); } Map details = extractApiParamAsMap(cmd.getDetails()); @@ -704,6 +718,7 @@ public PrimaryDataStoreInfo createPool(CreateStoragePoolCmd cmd) throws Resource params.put("zoneId", zone.getId()); params.put("clusterId", clusterId); params.put("podId", podId); + params.put("hypervisorType", hypervisorType.toString()); params.put("url", cmd.getUrl()); params.put("tags", cmd.getTags()); params.put("name", cmd.getStoragePoolName()); @@ -797,6 +812,12 @@ public PrimaryDataStoreInfo updateStoragePool(UpdateStoragePoolCmd cmd) throws I if (s_logger.isDebugEnabled()) { s_logger.debug("Updating Storage Pool Tags to :" + storagePoolTags); } + if (pool.getPoolType() == StoragePoolType.DatastoreCluster) { + List childStoragePools = _storagePoolDao.listChildStoragePoolsInDatastoreCluster(pool.getId()); + for (StoragePoolVO childPool : childStoragePools) { + _storagePoolTagsDao.persist(childPool.getId(), storagePoolTags); + } + } _storagePoolTagsDao.persist(pool.getId(), storagePoolTags); } @@ -893,10 +914,23 @@ public boolean deletePool(DeletePoolCmd cmd) { s_logger.warn("Unable to delete storage id: " + id + " due to it is not in Maintenance state"); throw new InvalidParameterValueException("Unable to delete storage due to it is not in Maintenance state, id: " + id); } - Pair vlms = _volsDao.getCountAndTotalByPool(id); + + if (sPool.getPoolType() == StoragePoolType.DatastoreCluster) { + // FR41 yet to handle on failure of deletion of any of the child storage pool + List childStoragePools = _storagePoolDao.listChildStoragePoolsInDatastoreCluster(sPool.getId()); + for (StoragePoolVO childPool : childStoragePools) { + deleteDataStoreInternal(childPool, forced); + } + } + return deleteDataStoreInternal(sPool, forced); + + } + + private boolean deleteDataStoreInternal(StoragePoolVO sPool, boolean forced) { + Pair vlms = _volsDao.getCountAndTotalByPool(sPool.getId()); if (forced) { if (vlms.first() > 0) { - Pair nonDstrdVlms = _volsDao.getNonDestroyedCountAndTotalByPool(id); + Pair nonDstrdVlms = _volsDao.getNonDestroyedCountAndTotalByPool(sPool.getId()); if (nonDstrdVlms.first() > 0) { throw new CloudRuntimeException("Cannot delete pool " + sPool.getName() + " as there are associated " + "non-destroyed vols for this pool"); } @@ -932,7 +966,7 @@ public boolean deletePool(DeletePoolCmd cmd) { } _storagePoolDao.releaseFromLockTable(lock.getId()); - s_logger.trace("Released lock for storage pool " + id); + s_logger.trace("Released lock for storage pool " + sPool.getId()); DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(sPool.getStorageProviderName()); DataStoreLifeCycle lifeCycle = storeProvider.getDataStoreLifeCycle(); @@ -1458,6 +1492,26 @@ public PrimaryDataStoreInfo preparePrimaryStorageForMaintenance(Long primaryStor DataStoreProvider provider = _dataStoreProviderMgr.getDataStoreProvider(primaryStorage.getStorageProviderName()); DataStoreLifeCycle lifeCycle = provider.getDataStoreLifeCycle(); DataStore store = _dataStoreMgr.getDataStore(primaryStorage.getId(), DataStoreRole.Primary); + + if (primaryStorage.getPoolType() == StoragePoolType.DatastoreCluster) { + // Before preparing the datastorecluster to maintenance mode, the storagepools in the datastore cluster needs to put in maintenance + List childDatastores = _storagePoolDao.listChildStoragePoolsInDatastoreCluster(primaryStorageId); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + for (StoragePoolVO childDatastore : childDatastores) { + // set the pool state to prepare for maintenance, so that VMs will not migrate to the storagepools in the same cluster + childDatastore.setStatus(StoragePoolStatus.PrepareForMaintenance); + _storagePoolDao.update(childDatastore.getId(), childDatastore); + } + } + }); + for (StoragePoolVO childDatastore : childDatastores) { + //FR41 need to handle when one of the primary stores is unable to put in maintenance mode + DataStore childStore = _dataStoreMgr.getDataStore(childDatastore.getId(), DataStoreRole.Primary); + lifeCycle.maintain(childStore); + } + } lifeCycle.maintain(store); return (PrimaryDataStoreInfo)_dataStoreMgr.getDataStore(primaryStorage.getId(), DataStoreRole.Primary); @@ -1485,6 +1539,14 @@ public PrimaryDataStoreInfo cancelPrimaryStorageForMaintenance(CancelPrimaryStor DataStoreProvider provider = _dataStoreProviderMgr.getDataStoreProvider(primaryStorage.getStorageProviderName()); DataStoreLifeCycle lifeCycle = provider.getDataStoreLifeCycle(); DataStore store = _dataStoreMgr.getDataStore(primaryStorage.getId(), DataStoreRole.Primary); + if (primaryStorage.getPoolType() == StoragePoolType.DatastoreCluster) { + //FR41 need to handle when one of the primary stores is unable to cancel the maintenance mode + List childDatastores = _storagePoolDao.listChildStoragePoolsInDatastoreCluster(primaryStorageId); + for (StoragePoolVO childDatastore : childDatastores) { + DataStore childStore = _dataStoreMgr.getDataStore(childDatastore.getId(), DataStoreRole.Primary); + lifeCycle.cancelMaintain(childStore); + } + } lifeCycle.cancelMaintain(store); return (PrimaryDataStoreInfo)_dataStoreMgr.getDataStore(primaryStorage.getId(), DataStoreRole.Primary); @@ -1859,6 +1921,48 @@ public boolean storagePoolHasEnoughSpaceForResize(StoragePool pool, long current } } + @Override + public boolean isStoragePoolComplaintWithStoragePolicy(List volumes, StoragePool pool) throws StorageUnavailableException { + if (volumes == null || volumes.isEmpty()) { + return false; + } + List> answers = new ArrayList>(); + + for (Volume volume : volumes) { + String storagePolicyId = _diskOfferingDetailsDao.getDetail(volume.getDiskOfferingId(), ApiConstants.STORAGE_POLICY); + if (org.apache.commons.lang.StringUtils.isNotEmpty(storagePolicyId)) { + VsphereStoragePolicyVO storagePolicyVO = _vsphereStoragePolicyDao.findById(Long.parseLong(storagePolicyId)); + List hostIds = getUpHostsInPool(pool.getId()); + Collections.shuffle(hostIds); + + if (hostIds == null || hostIds.isEmpty()) { + throw new StorageUnavailableException("Unable to send command to the pool " + pool.getName() + " due to there is no enabled hosts up in this cluster", pool.getId()); + } + try { + StorageFilerTO storageFilerTO = new StorageFilerTO(pool); + CheckDataStoreStoragePolicyComplainceCommand cmd = new CheckDataStoreStoragePolicyComplainceCommand(storagePolicyVO.getPolicyId(), storageFilerTO); + long targetHostId = _hvGuruMgr.getGuruProcessedCommandTargetHost(hostIds.get(0), cmd); + Answer answer = _agentMgr.send(targetHostId, cmd); + answers.add(new Pair<>(volume, answer)); + } catch (AgentUnavailableException e) { + s_logger.debug("Unable to send storage pool command to " + pool + " via " + hostIds.get(0), e); + throw new StorageUnavailableException("Unable to send command to the pool ", pool.getId()); + } catch (OperationTimedoutException e) { + s_logger.debug("Failed to process storage pool command to " + pool + " via " + hostIds.get(0), e); + throw new StorageUnavailableException("Failed to process storage command to the pool ", pool.getId()); + } + } + } + // check cummilative result for all volumes + for (Pair answer : answers) { + if (!answer.second().getResult()) { + s_logger.debug(String.format("Storage pool %s is not complaince with storage policy for volume %s", pool.getName(), answer.first().getName())); + return false; + } + } + return true; + } + private boolean checkPoolforSpace(StoragePool pool, long allocatedSizeWithTemplate, long totalAskingSize) { // allocated space includes templates StoragePoolVO poolVO = _storagePoolDao.findById(pool.getId()); @@ -2529,6 +2633,15 @@ public DiskTO getDiskWithThrottling(final DataTO volTO, final Volume.Type volume return disk; } + @Override + public boolean isStoragePoolDatastoreClusterParent(StoragePool pool) { + List childStoragePools = _storagePoolDao.listChildStoragePoolsInDatastoreCluster(pool.getId()); + if (childStoragePools != null && !childStoragePools.isEmpty()) { + return true; + } + return false; + } + private void setVolumeObjectTOThrottling(VolumeObjectTO volumeTO, final ServiceOffering offering, final DiskOffering diskOffering) { volumeTO.setBytesReadRate(getDiskBytesReadRate(offering, diskOffering)); volumeTO.setBytesWriteRate(getDiskBytesWriteRate(offering, diskOffering)); diff --git a/server/src/main/java/com/cloud/storage/StoragePoolAutomationImpl.java b/server/src/main/java/com/cloud/storage/StoragePoolAutomationImpl.java index 4ffd7d80b0df..15a72590fc45 100644 --- a/server/src/main/java/com/cloud/storage/StoragePoolAutomationImpl.java +++ b/server/src/main/java/com/cloud/storage/StoragePoolAutomationImpl.java @@ -117,9 +117,11 @@ public boolean maintain(DataStore store) { spes = primaryDataStoreDao.listBy(pool.getDataCenterId(), pool.getPodId(), pool.getClusterId(), ScopeType.CLUSTER); } for (StoragePoolVO sp : spes) { - if (sp.getStatus() == StoragePoolStatus.PrepareForMaintenance) { - throw new CloudRuntimeException("Only one storage pool in a cluster can be in PrepareForMaintenance mode, " + sp.getId() + - " is already in PrepareForMaintenance mode "); + if (sp.getParent() != pool.getParent()) { // If Datastore cluster is tried to prepare for maintenance then child storage pools are also kept in PrepareForMaintenance mode + if (sp.getStatus() == StoragePoolStatus.PrepareForMaintenance) { + throw new CloudRuntimeException("Only one storage pool in a cluster can be in PrepareForMaintenance mode, " + sp.getId() + + " is already in PrepareForMaintenance mode "); + } } } StoragePool storagePool = (StoragePool)store; diff --git a/server/src/main/java/com/cloud/storage/TemplateProfile.java b/server/src/main/java/com/cloud/storage/TemplateProfile.java index 304b652a589f..9eaafd1a9a68 100644 --- a/server/src/main/java/com/cloud/storage/TemplateProfile.java +++ b/server/src/main/java/com/cloud/storage/TemplateProfile.java @@ -52,6 +52,7 @@ public class TemplateProfile { Boolean isDynamicallyScalable; TemplateType templateType; Boolean directDownload; + Boolean deployAsIs; Long size; public TemplateProfile(Long templateId, Long userId, String name, String displayText, Integer bits, Boolean passwordEnabled, Boolean requiresHvm, String url, @@ -91,11 +92,10 @@ public TemplateProfile(Long userId, VMTemplateVO template, Long zoneId) { else this.zoneIdList = null; } - public TemplateProfile(Long templateId, Long userId, String name, String displayText, Integer bits, Boolean passwordEnabled, Boolean requiresHvm, String url, - Boolean isPublic, Boolean featured, Boolean isExtractable, ImageFormat format, Long guestOsId, List zoneId, - - HypervisorType hypervisorType, String accountName, Long domainId, Long accountId, String chksum, Boolean bootable, String templateTag, Map details, - Boolean sshKeyEnabled, Long imageStoreId, Boolean isDynamicallyScalable, TemplateType templateType, Boolean directDownload) { + public TemplateProfile(Long templateId, Long userId, String name, String displayText, Integer bits, Boolean passwordEnabled, Boolean requiresHvm, String url, Boolean isPublic, + Boolean featured, Boolean isExtractable, ImageFormat format, Long guestOsId, List zoneId, HypervisorType hypervisorType, String accountName, Long domainId, + Long accountId, String chksum, Boolean bootable, String templateTag, Map details, Boolean sshKeyEnabled, Boolean isDynamicallyScalable, TemplateType templateType, + Boolean directDownload, Boolean deployAsIs) { this(templateId, userId, name, @@ -122,6 +122,7 @@ public TemplateProfile(Long templateId, Long userId, String name, String display this.isDynamicallyScalable = isDynamicallyScalable; this.templateType = templateType; this.directDownload = directDownload; + this.deployAsIs = deployAsIs; } public Long getTemplateId() { @@ -168,18 +169,10 @@ public Boolean isPasswordEnabled() { return passwordEnabled; } - public void setPasswordEnabled(Boolean enabled) { - this.passwordEnabled = enabled; - } - public Boolean isRequiresHVM() { return requiresHvm; } - public void setRequiresHVM(Boolean hvm) { - this.requiresHvm = hvm; - } - public String getUrl() { return url; } @@ -224,10 +217,6 @@ public Long getGuestOsId() { return guestOsId; } - public void setGuestOsId(Long id) { - this.guestOsId = id; - } - public List getZoneIdList() { return zoneIdList; } @@ -260,10 +249,6 @@ public String getCheckSum() { return chksum; } - public void setCheckSum(String chksum) { - this.chksum = chksum; - } - public Boolean isBootable() { return this.bootable; } @@ -284,10 +269,6 @@ public String getTemplateTag() { return templateTag; } - public void setTemplateTag(String templateTag) { - this.templateTag = templateTag; - } - public Map getDetails() { return this.details; } @@ -296,10 +277,6 @@ public void setDetails(Map details) { this.details = details; } - public void setSshKeyEnabled(Boolean enabled) { - this.sshKeyEnbaled = enabled; - } - public Boolean isSshKeyEnabled() { return this.sshKeyEnbaled; } @@ -308,10 +285,6 @@ public Boolean IsDynamicallyScalable() { return this.isDynamicallyScalable; } - public void setScalabe(Boolean isDynamicallyScalabe) { - this.isDynamicallyScalable = isDynamicallyScalabe; - } - public TemplateType getTemplateType() { return templateType; } @@ -331,4 +304,8 @@ public Long getSize() { public void setSize(Long size) { this.size = size; } + + public boolean isDeployAsIs() { + return deployAsIs == null ? false : deployAsIs; + } } diff --git a/server/src/main/java/com/cloud/storage/upload/params/UploadParams.java b/server/src/main/java/com/cloud/storage/upload/params/UploadParams.java index 0d42b760b6d9..be8319c9e570 100644 --- a/server/src/main/java/com/cloud/storage/upload/params/UploadParams.java +++ b/server/src/main/java/com/cloud/storage/upload/params/UploadParams.java @@ -46,4 +46,5 @@ public interface UploadParams { boolean isDynamicallyScalable(); boolean isRoutingType(); boolean isDirectDownload(); + boolean isDeployAsIs(); } diff --git a/server/src/main/java/com/cloud/storage/upload/params/UploadParamsBase.java b/server/src/main/java/com/cloud/storage/upload/params/UploadParamsBase.java index 67b04f7b4800..e5bc1a3c906d 100644 --- a/server/src/main/java/com/cloud/storage/upload/params/UploadParamsBase.java +++ b/server/src/main/java/com/cloud/storage/upload/params/UploadParamsBase.java @@ -214,6 +214,11 @@ public boolean isDirectDownload() { return false; } + @Override + public boolean isDeployAsIs() { + return false; + } + void setIso(boolean iso) { isIso = iso; } diff --git a/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java b/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java index 85c4a77774e8..c42a6b507cc7 100644 --- a/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java +++ b/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java @@ -178,6 +178,9 @@ public TemplateProfile prepare(GetUploadParamsForIsoCmd cmd) throws ResourceAllo @Override public TemplateProfile prepare(RegisterTemplateCmd cmd) throws ResourceAllocationException { + if(s_logger.isTraceEnabled()) { + s_logger.trace(String.format("registering template for %s",cmd.getUrl())); + } TemplateProfile profile = super.prepare(cmd); String url = profile.getUrl(); UriUtils.validateUrl(cmd.getFormat(), url); diff --git a/server/src/main/java/com/cloud/template/TemplateAdapter.java b/server/src/main/java/com/cloud/template/TemplateAdapter.java index c048ceaf1fc2..f4b33c8dddd0 100644 --- a/server/src/main/java/com/cloud/template/TemplateAdapter.java +++ b/server/src/main/java/com/cloud/template/TemplateAdapter.java @@ -72,13 +72,12 @@ public String getName() { boolean delete(TemplateProfile profile); - TemplateProfile prepare(boolean isIso, Long userId, String name, String displayText, Integer bits, Boolean passwordEnabled, Boolean requiresHVM, String url, - Boolean isPublic, Boolean featured, Boolean isExtractable, String format, Long guestOSId, List zoneId, HypervisorType hypervisorType, String accountName, - Long domainId, String chksum, Boolean bootable, Map details, boolean directDownload) throws ResourceAllocationException; - - TemplateProfile prepare(boolean isIso, long userId, String name, String displayText, Integer bits, Boolean passwordEnabled, Boolean requiresHVM, String url, - Boolean isPublic, Boolean featured, Boolean isExtractable, String format, Long guestOSId, List zoneId, HypervisorType hypervisorType, String chksum, - Boolean bootable, String templateTag, Account templateOwner, Map details, Boolean sshKeyEnabled, String imageStoreUuid, Boolean isDynamicallyScalable, - TemplateType templateType, boolean directDownload) throws ResourceAllocationException; + TemplateProfile prepare(boolean isIso, Long userId, String name, String displayText, Integer bits, Boolean passwordEnabled, Boolean requiresHVM, String url, Boolean isPublic, + Boolean featured, Boolean isExtractable, String format, Long guestOSId, List zoneId, HypervisorType hypervisorType, String accountName, Long domainId, String chksum, Boolean bootable, Map details, boolean directDownload, + boolean deployAsIs) throws ResourceAllocationException; + + TemplateProfile prepare(boolean isIso, long userId, String name, String displayText, Integer bits, Boolean passwordEnabled, Boolean requiresHVM, String url, Boolean isPublic, + Boolean featured, Boolean isExtractable, String format, Long guestOSId, List zoneId, HypervisorType hypervisorType, String chksum, Boolean bootable, String templateTag, Account templateOwner, Map details, Boolean sshKeyEnabled, String imageStoreUuid, Boolean isDynamicallyScalable, + TemplateType templateType, boolean directDownload, boolean deployAsIs) throws ResourceAllocationException; } diff --git a/server/src/main/java/com/cloud/template/TemplateAdapterBase.java b/server/src/main/java/com/cloud/template/TemplateAdapterBase.java index 0e88c147f512..e7d8da21cd34 100644 --- a/server/src/main/java/com/cloud/template/TemplateAdapterBase.java +++ b/server/src/main/java/com/cloud/template/TemplateAdapterBase.java @@ -26,6 +26,7 @@ import com.cloud.storage.upload.params.IsoUploadParams; import com.cloud.storage.upload.params.TemplateUploadParams; import com.cloud.storage.upload.params.UploadParams; +import org.apache.cloudstack.api.command.admin.template.RegisterTemplateCmdByAdmin; import org.apache.cloudstack.api.command.user.iso.GetUploadParamsForIsoCmd; import org.apache.cloudstack.api.command.user.template.GetUploadParamsForTemplateCmd; import org.apache.commons.collections.CollectionUtils; @@ -128,17 +129,17 @@ public boolean stop() { @Override public TemplateProfile prepare(boolean isIso, Long userId, String name, String displayText, Integer bits, Boolean passwordEnabled, Boolean requiresHVM, String url, - Boolean isPublic, Boolean featured, Boolean isExtractable, String format, Long guestOSId, List zoneId, HypervisorType hypervisorType, String accountName, - Long domainId, String chksum, Boolean bootable, Map details, boolean directDownload) throws ResourceAllocationException { - return prepare(isIso, userId, name, displayText, bits, passwordEnabled, requiresHVM, url, isPublic, featured, isExtractable, format, guestOSId, zoneId, - hypervisorType, chksum, bootable, null, null, details, false, null, false, TemplateType.USER, directDownload); + Boolean isPublic, Boolean featured, Boolean isExtractable, String format, Long guestOSId, List zoneId, HypervisorType hypervisorType, String accountName, + Long domainId, String chksum, Boolean bootable, Map details, boolean directDownload, boolean deployAsIs) throws ResourceAllocationException { + return prepare(isIso, userId, name, displayText, bits, passwordEnabled, requiresHVM, url, isPublic, featured, isExtractable, format, guestOSId, zoneId, hypervisorType, + chksum, bootable, null, null, details, false, null, false, TemplateType.USER, directDownload, deployAsIs); } @Override public TemplateProfile prepare(boolean isIso, long userId, String name, String displayText, Integer bits, Boolean passwordEnabled, Boolean requiresHVM, String url, - Boolean isPublic, Boolean featured, Boolean isExtractable, String format, Long guestOSId, List zoneIdList, HypervisorType hypervisorType, String chksum, - Boolean bootable, String templateTag, Account templateOwner, Map details, Boolean sshkeyEnabled, String imageStoreUuid, Boolean isDynamicallyScalable, - TemplateType templateType, boolean directDownload) throws ResourceAllocationException { + Boolean isPublic, Boolean featured, Boolean isExtractable, String format, Long guestOSId, List zoneIdList, HypervisorType hypervisorType, String chksum, + Boolean bootable, String templateTag, Account templateOwner, Map details, Boolean sshkeyEnabled, String imageStoreUuid, Boolean isDynamicallyScalable, + TemplateType templateType, boolean directDownload, boolean deployAsIs) throws ResourceAllocationException { //Long accountId = null; // parameters verification @@ -257,7 +258,7 @@ public TemplateProfile prepare(boolean isIso, long userId, String name, String d CallContext.current().setEventDetails("Id: " + id + " name: " + name); return new TemplateProfile(id, userId, name, displayText, bits, passwordEnabled, requiresHVM, url, isPublic, featured, isExtractable, imgfmt, guestOSId, zoneIdList, hypervisorType, templateOwner.getAccountName(), templateOwner.getDomainId(), templateOwner.getAccountId(), chksum, bootable, templateTag, details, - sshkeyEnabled, null, isDynamicallyScalable, templateType, directDownload); + sshkeyEnabled, isDynamicallyScalable, templateType, directDownload, deployAsIs); } @@ -282,10 +283,15 @@ public TemplateProfile prepare(RegisterTemplateCmd cmd) throws ResourceAllocatio throw new InvalidParameterValueException("Hypervisor Type: " + cmd.getHypervisor() + " is invalid. Supported Hypervisor types are " + EnumUtils.listValues(HypervisorType.values()).replace("None, ", "")); } - + // for OVA, deploy as is will be default and only root can override + boolean deployAsIs = true; + if (cmd instanceof RegisterTemplateCmdByAdmin) { + deployAsIs = ((RegisterTemplateCmdByAdmin)cmd).isDeployAsIs(); + } return prepare(false, CallContext.current().getCallingUserId(), cmd.getTemplateName(), cmd.getDisplayText(), cmd.getBits(), cmd.isPasswordEnabled(), cmd.getRequiresHvm(), cmd.getUrl(), cmd.isPublic(), cmd.isFeatured(), cmd.isExtractable(), cmd.getFormat(), cmd.getOsTypeId(), zoneId, hypervisorType, cmd.getChecksum(), true, - cmd.getTemplateTag(), owner, cmd.getDetails(), cmd.isSshKeyEnabled(), null, cmd.isDynamicallyScalable(), isRouting ? TemplateType.ROUTING : TemplateType.USER, cmd.isDirectDownload()); + cmd.getTemplateTag(), owner, cmd.getDetails(), cmd.isSshKeyEnabled(), null, cmd.isDynamicallyScalable(), isRouting ? TemplateType.ROUTING : TemplateType.USER, + cmd.isDirectDownload(), deployAsIs); } @@ -316,7 +322,7 @@ private TemplateProfile prepareUploadParamsInternal(UploadParams params) throws params.isExtractable(), params.getFormat(), params.getGuestOSId(), zoneList, params.getHypervisorType(), params.getChecksum(), params.isBootable(), params.getTemplateTag(), owner, params.getDetails(), params.isSshKeyEnabled(), params.getImageStoreUuid(), - params.isDynamicallyScalable(), params.isRoutingType() ? TemplateType.ROUTING : TemplateType.USER, params.isDirectDownload()); + params.isDynamicallyScalable(), params.isRoutingType() ? TemplateType.ROUTING : TemplateType.USER, params.isDirectDownload(), false ); } @Override @@ -358,7 +364,7 @@ public TemplateProfile prepare(RegisterIsoCmd cmd) throws ResourceAllocationExce return prepare(true, CallContext.current().getCallingUserId(), cmd.getIsoName(), cmd.getDisplayText(), 64, cmd.isPasswordEnabled(), true, cmd.getUrl(), cmd.isPublic(), cmd.isFeatured(), cmd.isExtractable(), ImageFormat.ISO.toString(), cmd.getOsTypeId(), zoneList, HypervisorType.None, cmd.getChecksum(), cmd.isBootable(), null, - owner, null, false, cmd.getImageStoreUuid(), cmd.isDynamicallyScalable(), TemplateType.USER, cmd.isDirectDownload()); + owner, null, false, cmd.getImageStoreUuid(), cmd.isDynamicallyScalable(), TemplateType.USER, cmd.isDirectDownload(), false); } protected VMTemplateVO persistTemplate(TemplateProfile profile, VirtualMachineTemplate.State initialState) { @@ -367,7 +373,7 @@ protected VMTemplateVO persistTemplate(TemplateProfile profile, VirtualMachineTe new VMTemplateVO(profile.getTemplateId(), profile.getName(), profile.getFormat(), profile.isPublic(), profile.isFeatured(), profile.isExtractable(), profile.getTemplateType(), profile.getUrl(), profile.isRequiresHVM(), profile.getBits(), profile.getAccountId(), profile.getCheckSum(), profile.getDisplayText(), profile.isPasswordEnabled(), profile.getGuestOsId(), profile.isBootable(), profile.getHypervisorType(), - profile.getTemplateTag(), profile.getDetails(), profile.isSshKeyEnabled(), profile.IsDynamicallyScalable(), profile.isDirectDownload()); + profile.getTemplateTag(), profile.getDetails(), profile.isSshKeyEnabled(), profile.IsDynamicallyScalable(), profile.isDirectDownload(), profile.isDeployAsIs() ); template.setState(initialState); if (profile.isDirectDownload()) { diff --git a/server/src/main/java/com/cloud/template/TemplateManagerImpl.java b/server/src/main/java/com/cloud/template/TemplateManagerImpl.java index 749f272bf361..199628643740 100755 --- a/server/src/main/java/com/cloud/template/TemplateManagerImpl.java +++ b/server/src/main/java/com/cloud/template/TemplateManagerImpl.java @@ -32,6 +32,7 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.agent.api.to.DatadiskTO; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.BaseListTemplateOrIsoPermissionsCmd; @@ -205,7 +206,7 @@ import com.google.gson.GsonBuilder; public class TemplateManagerImpl extends ManagerBase implements TemplateManager, TemplateApiService, Configurable { - private final static Logger s_logger = Logger.getLogger(TemplateManagerImpl.class); + private final static Logger LOGGER = Logger.getLogger(TemplateManagerImpl.class); @Inject private VMTemplateDao _tmpltDao; @@ -334,11 +335,15 @@ public VirtualMachineTemplate registerTemplate(RegisterTemplateCmd cmd) throws U } if (cmd.isRoutingType() != null) { if (!_accountService.isRootAdmin(account.getId())) { + // FR37 than why is it not in RegisterTemplateCmdByAdmin at least? throw new PermissionDeniedException("Parameter isrouting can only be specified by a Root Admin, permission denied"); } } TemplateAdapter adapter = getAdapter(HypervisorType.getType(cmd.getHypervisor())); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("Template adapter: " + adapter); + } TemplateProfile profile = adapter.prepare(cmd); VMTemplateVO template = adapter.create(profile); @@ -484,7 +489,7 @@ public VirtualMachineTemplate prepareTemplate(long templateId, long zoneId, Long if (pool.getStatus() == StoragePoolStatus.Up && pool.getDataCenterId() == zoneId) { prepareTemplateInOneStoragePool(vmTemplate, pool); } else { - s_logger.warn("Skip loading template " + vmTemplate.getId() + " into primary storage " + pool.getId() + " as either the pool zone " + LOGGER.warn("Skip loading template " + vmTemplate.getId() + " into primary storage " + pool.getId() + " as either the pool zone " + pool.getDataCenterId() + " is different from the requested zone " + zoneId + " or the pool is currently not available."); } } @@ -588,7 +593,7 @@ public void prepareIsoForVmProfile(VirtualMachineProfile profile, DeployDestinat template = prepareIso(vm.getIsoId(), vm.getDataCenterId(), dest.getHost().getId(), poolId); if (template == null){ - s_logger.error("Failed to prepare ISO on secondary or cache storage"); + LOGGER.error("Failed to prepare ISO on secondary or cache storage"); throw new CloudRuntimeException("Failed to prepare ISO on secondary or cache storage"); } if (template.isBootable()) { @@ -615,22 +620,22 @@ public void prepareIsoForVmProfile(VirtualMachineProfile profile, DeployDestinat } private void prepareTemplateInOneStoragePool(final VMTemplateVO template, final StoragePoolVO pool) { - s_logger.info("Schedule to preload template " + template.getId() + " into primary storage " + pool.getId()); + LOGGER.info("Schedule to preload template " + template.getId() + " into primary storage " + pool.getId()); _preloadExecutor.execute(new ManagedContextRunnable() { @Override protected void runInContext() { try { reallyRun(); } catch (Throwable e) { - s_logger.warn("Unexpected exception ", e); + LOGGER.warn("Unexpected exception ", e); } } private void reallyRun() { - s_logger.info("Start to preload template " + template.getId() + " into primary storage " + pool.getId()); + LOGGER.info("Start to preload template " + template.getId() + " into primary storage " + pool.getId()); StoragePool pol = (StoragePool)_dataStoreMgr.getPrimaryDataStore(pool.getId()); prepareTemplateForCreate(template, pol); - s_logger.info("End of preloading template " + template.getId() + " into primary storage " + pool.getId()); + LOGGER.info("End of preloading template " + template.getId() + " into primary storage " + pool.getId()); } }); } @@ -641,7 +646,7 @@ public void prepareTemplateInAllStoragePools(final VMTemplateVO template, long z if (pool.getDataCenterId() == zoneId) { prepareTemplateInOneStoragePool(template, pool); } else { - s_logger.info("Skip loading template " + template.getId() + " into primary storage " + pool.getId() + " as pool zone " + pool.getDataCenterId() + + LOGGER.info("Skip loading template " + template.getId() + " into primary storage " + pool.getId() + " as pool zone " + pool.getDataCenterId() + " is different from the requested zone " + zoneId); } } @@ -663,8 +668,8 @@ public VMTemplateStoragePoolVO prepareTemplateForCreate(VMTemplateVO templ, Stor _tmpltPoolDao.update(templateStoragePoolRef.getId(), templateStoragePoolRef); if (templateStoragePoolRef.getDownloadState() == Status.DOWNLOADED) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Template " + templateId + " has already been downloaded to pool " + poolId); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Template " + templateId + " has already been downloaded to pool " + poolId); } return templateStoragePoolRef; @@ -673,7 +678,7 @@ public VMTemplateStoragePoolVO prepareTemplateForCreate(VMTemplateVO templ, Stor templateStoreRef = _tmplStoreDao.findByTemplateZoneDownloadStatus(templateId, pool.getDataCenterId(), VMTemplateStorageResourceAssoc.Status.DOWNLOADED); if (templateStoreRef == null) { - s_logger.error("Unable to find a secondary storage host who has completely downloaded the template."); + LOGGER.error("Unable to find a secondary storage host who has completely downloaded the template."); return null; } @@ -683,8 +688,8 @@ public VMTemplateStoragePoolVO prepareTemplateForCreate(VMTemplateVO templ, Stor } if (templateStoragePoolRef == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Downloading template " + templateId + " to pool " + poolId); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Downloading template " + templateId + " to pool " + poolId); } DataStore srcSecStore = _dataStoreMgr.getDataStore(templateStoreRef.getDataStoreId(), DataStoreRole.Image); TemplateInfo srcTemplate = _tmplFactory.getTemplate(templateId, srcSecStore); @@ -693,13 +698,13 @@ public VMTemplateStoragePoolVO prepareTemplateForCreate(VMTemplateVO templ, Stor try { TemplateApiResult result = future.get(); if (result.isFailed()) { - s_logger.debug("prepare template failed:" + result.getResult()); + LOGGER.debug("prepare template failed:" + result.getResult()); return null; } return _tmpltPoolDao.findByPoolTemplate(poolId, templateId); } catch (Exception ex) { - s_logger.debug("failed to copy template from image store:" + srcSecStore.getName() + " to primary storage"); + LOGGER.debug("failed to copy template from image store:" + srcSecStore.getName() + " to primary storage"); } } @@ -713,7 +718,7 @@ public String getChecksum(DataStore store, String templatePath, String algorithm Answer answer = null; if (ep == null) { String errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; - s_logger.error(errMsg); + LOGGER.error(errMsg); answer = new Answer(cmd, false, errMsg); } else { answer = ep.sendMessage(cmd); @@ -732,7 +737,7 @@ public boolean resetTemplateDownloadStateOnPool(long templateStoragePoolRefId) { VMTemplateStoragePoolVO templateStoragePoolRef = _tmpltPoolDao.acquireInLockTable(templateStoragePoolRefId, 1200); if (templateStoragePoolRef == null) { - s_logger.warn("resetTemplateDownloadStateOnPool failed - unable to lock TemplateStorgePoolRef " + templateStoragePoolRefId); + LOGGER.warn("resetTemplateDownloadStateOnPool failed - unable to lock TemplateStorgePoolRef " + templateStoragePoolRefId); return false; } @@ -790,7 +795,7 @@ public boolean copy(long userId, VMTemplateVO template, DataStore srcSecStore, D try { TemplateApiResult result = future.get(); if (result.isFailed()) { - s_logger.debug("copy template failed for image store " + dstSecStore.getName() + ":" + result.getResult()); + LOGGER.debug("copy template failed for image store " + dstSecStore.getName() + ":" + result.getResult()); continue; // try next image store } @@ -805,26 +810,26 @@ public boolean copy(long userId, VMTemplateVO template, DataStore srcSecStore, D List dataDiskTemplates = _tmpltDao.listByParentTemplatetId(template.getId()); if (dataDiskTemplates != null && !dataDiskTemplates.isEmpty()) { for (VMTemplateVO dataDiskTemplate : dataDiskTemplates) { - s_logger.debug("Copying " + dataDiskTemplates.size() + " for source template " + template.getId() + ". Copy all Datadisk templates to destination datastore " + dstSecStore.getName()); + LOGGER.debug("Copying " + dataDiskTemplates.size() + " for source template " + template.getId() + ". Copy all Datadisk templates to destination datastore " + dstSecStore.getName()); TemplateInfo srcDataDiskTemplate = _tmplFactory.getTemplate(dataDiskTemplate.getId(), srcSecStore); AsyncCallFuture dataDiskCopyFuture = _tmpltSvr.copyTemplate(srcDataDiskTemplate, dstSecStore); try { TemplateApiResult dataDiskCopyResult = dataDiskCopyFuture.get(); if (dataDiskCopyResult.isFailed()) { - s_logger.error("Copy of datadisk template: " + srcDataDiskTemplate.getId() + " to image store: " + dstSecStore.getName() + LOGGER.error("Copy of datadisk template: " + srcDataDiskTemplate.getId() + " to image store: " + dstSecStore.getName() + " failed with error: " + dataDiskCopyResult.getResult() + " , will try copying the next one"); continue; // Continue to copy next Datadisk template } _tmpltDao.addTemplateToZone(dataDiskTemplate, dstZoneId); _resourceLimitMgr.incrementResourceCount(dataDiskTemplate.getAccountId(), ResourceType.secondary_storage, dataDiskTemplate.getSize()); } catch (Exception ex) { - s_logger.error("Failed to copy datadisk template: " + srcDataDiskTemplate.getId() + " to image store: " + dstSecStore.getName() + LOGGER.error("Failed to copy datadisk template: " + srcDataDiskTemplate.getId() + " to image store: " + dstSecStore.getName() + " , will try copying the next one"); } } } } catch (Exception ex) { - s_logger.debug("failed to copy template to image store:" + dstSecStore.getName() + " ,will try next one"); + LOGGER.debug("failed to copy template to image store:" + dstSecStore.getName() + " ,will try next one"); } } return true; @@ -879,7 +884,7 @@ public VirtualMachineTemplate copyTemplate(CopyTemplateCmd cmd) throws StorageUn boolean success = false; if (template.getHypervisorType() == HypervisorType.BareMetal) { if (template.isCrossZones()) { - s_logger.debug("Template " + templateId + " is cross-zone, don't need to copy"); + LOGGER.debug("Template " + templateId + " is cross-zone, don't need to copy"); return template; } for (Long destZoneId: destZoneIds) { @@ -904,13 +909,13 @@ public VirtualMachineTemplate copyTemplate(CopyTemplateCmd cmd) throws StorageUn if (template.isCrossZones()) { // sync template from cache store to region store if it is not there, for cases where we are going to migrate existing NFS to S3. _tmpltSvr.syncTemplateToRegionStore(templateId, srcSecStore); - s_logger.debug("Template " + templateId + " is cross-zone, don't need to copy"); + LOGGER.debug("Template " + templateId + " is cross-zone, don't need to copy"); return template; } for (Long destZoneId : destZoneIds) { DataStore dstSecStore = getImageStore(destZoneId, templateId); if (dstSecStore != null) { - s_logger.debug("There is template " + templateId + " in secondary storage " + dstSecStore.getName() + + LOGGER.debug("There is template " + templateId + " in secondary storage " + dstSecStore.getName() + " in zone " + destZoneId + " , don't need to copy"); continue; } @@ -929,7 +934,7 @@ public VirtualMachineTemplate copyTemplate(CopyTemplateCmd cmd) throws StorageUn if ((destZoneIds != null) && (destZoneIds.size() > failedZones.size())){ if (!failedZones.isEmpty()) { - s_logger.debug("There were failures when copying template to zones: " + + LOGGER.debug("There were failures when copying template to zones: " + StringUtils.listToCsvTags(failedZones)); } return template; @@ -952,7 +957,7 @@ private boolean addTemplateToZone(VMTemplateVO template, long dstZoneId, long so _tmpltDao.addTemplateToZone(template, dstZoneId); return true; } catch (Exception ex) { - s_logger.debug("failed to copy template from Zone: " + sourceZone.getUuid() + " to Zone: " + dstZone.getUuid()); + LOGGER.debug("failed to copy template from Zone: " + sourceZone.getUuid() + " to Zone: " + dstZone.getUuid()); } return false; } @@ -1003,7 +1008,7 @@ public void evictTemplateFromStoragePool(VMTemplateStoragePoolVO templatePoolVO) VMTemplateStoragePoolVO templatePoolRef = _tmpltPoolDao.acquireInLockTable(templatePoolVO.getId()); if (templatePoolRef == null) { - s_logger.debug("Can't aquire the lock for template pool ref: " + templatePoolVO.getId()); + LOGGER.debug("Can't aquire the lock for template pool ref: " + templatePoolVO.getId()); return; } @@ -1012,8 +1017,8 @@ public void evictTemplateFromStoragePool(VMTemplateStoragePoolVO templatePoolVO) TemplateInfo template = _tmplFactory.getTemplate(templatePoolRef.getTemplateId(), pool); try { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Evicting " + templatePoolVO); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Evicting " + templatePoolVO); } if (pool.isManaged()) { @@ -1022,11 +1027,11 @@ public void evictTemplateFromStoragePool(VMTemplateStoragePoolVO templatePoolVO) TemplateApiResult result = future.get(); if (result.isFailed()) { - s_logger.debug("Failed to delete template " + template.getId() + " from storage pool " + pool.getId()); + LOGGER.debug("Failed to delete template " + template.getId() + " from storage pool " + pool.getId()); } else { // Remove the templatePoolVO. if (_tmpltPoolDao.remove(templatePoolVO.getId())) { - s_logger.debug("Successfully evicted template " + template.getName() + " from storage pool " + pool.getName()); + LOGGER.debug("Successfully evicted template " + template.getName() + " from storage pool " + pool.getName()); } } } else { @@ -1036,14 +1041,14 @@ public void evictTemplateFromStoragePool(VMTemplateStoragePoolVO templatePoolVO) if (answer != null && answer.getResult()) { // Remove the templatePoolVO. if (_tmpltPoolDao.remove(templatePoolVO.getId())) { - s_logger.debug("Successfully evicted template " + template.getName() + " from storage pool " + pool.getName()); + LOGGER.debug("Successfully evicted template " + template.getName() + " from storage pool " + pool.getName()); } } else { - s_logger.info("Will retry evict template " + template.getName() + " from storage pool " + pool.getName()); + LOGGER.info("Will retry evict template " + template.getName() + " from storage pool " + pool.getName()); } } } catch (StorageUnavailableException | InterruptedException | ExecutionException e) { - s_logger.info("Storage is unavailable currently. Will retry evicte template " + template.getName() + " from storage pool " + pool.getName()); + LOGGER.info("Storage is unavailable currently. Will retry evicte template " + template.getName() + " from storage pool " + pool.getName()); } finally { _tmpltPoolDao.releaseFromLockTable(templatePoolRef.getId()); } @@ -1086,14 +1091,14 @@ public boolean templateIsDeleteable(VMTemplateHostVO templateHostRef) { List nonExpungedVms = _vmInstanceDao.listNonExpungedByZoneAndTemplate(zoneId, templateId); if (!nonExpungedVms.isEmpty()) { - s_logger.debug("Template " + template.getName() + " in zone " + zone.getName() + + LOGGER.debug("Template " + template.getName() + " in zone " + zone.getName() + " is not deleteable because there are non-expunged VMs deployed from this template."); return false; } List userVmUsingIso = _userVmDao.listByIsoId(templateId); // check if there is any VM using this ISO. if (!userVmUsingIso.isEmpty()) { - s_logger.debug("ISO " + template.getName() + " in zone " + zone.getName() + " is not deleteable because it is attached to " + userVmUsingIso.size() + " VMs"); + LOGGER.debug("ISO " + template.getName() + " in zone " + zone.getName() + " is not deleteable because it is attached to " + userVmUsingIso.size() + " VMs"); return false; } // Check if there are any snapshots for the template in the template @@ -1102,7 +1107,7 @@ public boolean templateIsDeleteable(VMTemplateHostVO templateHostRef) { for (VolumeVO volume : volumes) { List snapshots = _snapshotDao.listByVolumeIdVersion(volume.getId(), "2.1"); if (!snapshots.isEmpty()) { - s_logger.debug("Template " + template.getName() + " in zone " + zone.getName() + + LOGGER.debug("Template " + template.getName() + " in zone " + zone.getName() + " is not deleteable because there are 2.1 snapshots using this template."); return false; } @@ -1120,7 +1125,7 @@ public boolean templateIsDeleteable(long templateId) { // always be copied to // primary storage before deploying VM. if (!userVmUsingIso.isEmpty()) { - s_logger.debug("ISO " + templateId + " is not deleteable because it is attached to " + userVmUsingIso.size() + " VMs"); + LOGGER.debug("ISO " + templateId + " is not deleteable because it is attached to " + userVmUsingIso.size() + " VMs"); return false; } @@ -1231,7 +1236,7 @@ public TemplateInfo prepareIso(long isoId, long dcId, Long hostId, Long poolId) } if (tmplt == null || tmplt.getFormat() != ImageFormat.ISO) { - s_logger.warn("ISO: " + isoId + " does not exist in vm_template table"); + LOGGER.warn("ISO: " + isoId + " does not exist in vm_template table"); return null; } @@ -1240,7 +1245,7 @@ public TemplateInfo prepareIso(long isoId, long dcId, Long hostId, Long poolId) Scope destScope = new ZoneScope(dcId); TemplateInfo cacheData = (TemplateInfo)cacheMgr.createCacheObject(tmplt, destScope); if (cacheData == null) { - s_logger.error("Failed in copy iso from S3 to cache storage"); + LOGGER.error("Failed in copy iso from S3 to cache storage"); return null; } return cacheData; @@ -1261,14 +1266,14 @@ private boolean attachISOToVM(long vmId, long isoId, boolean attach) { // prepare ISO ready to mount on hypervisor resource level TemplateInfo tmplt = prepareIso(isoId, vm.getDataCenterId(), vm.getHostId(), null); if (tmplt == null) { - s_logger.error("Failed to prepare ISO ready to mount on hypervisor resource level"); + LOGGER.error("Failed to prepare ISO ready to mount on hypervisor resource level"); throw new CloudRuntimeException("Failed to prepare ISO ready to mount on hypervisor resource level"); } String vmName = vm.getInstanceName(); HostVO host = _hostDao.findById(vm.getHostId()); if (host == null) { - s_logger.warn("Host: " + vm.getHostId() + " does not exist"); + LOGGER.warn("Host: " + vm.getHostId() + " does not exist"); return false; } @@ -1320,7 +1325,7 @@ public boolean deleteTemplate(DeleteTemplateCmd cmd) { } if(!cmd.isForced() && CollectionUtils.isNotEmpty(vmInstanceVOList)) { final String message = String.format("Unable to delete template with id: %1$s because VM instances: [%2$s] are using it.", templateId, Joiner.on(",").join(vmInstanceVOList)); - s_logger.warn(message); + LOGGER.warn(message); throw new InvalidParameterValueException(message); } @@ -1475,7 +1480,7 @@ public boolean updateTemplateOrIsoPermissions(BaseUpdateTemplateOrIsoPermissions // If the template is removed throw an error. if (template.getRemoved() != null) { - s_logger.error("unable to update permissions for " + mediaType + " with id " + id + " as it is removed "); + LOGGER.error("unable to update permissions for " + mediaType + " with id " + id + " as it is removed "); throw new InvalidParameterValueException("unable to update permissions for " + mediaType + " with id " + id + " as it is removed "); } @@ -1683,7 +1688,7 @@ public VirtualMachineTemplate createPrivateTemplate(CreateTemplateCmd command) t if (result.isFailed()) { privateTemplate = null; - s_logger.debug("Failed to create template" + result.getResult()); + LOGGER.debug("Failed to create template" + result.getResult()); throw new CloudRuntimeException("Failed to create template" + result.getResult()); } @@ -1703,10 +1708,10 @@ public VirtualMachineTemplate createPrivateTemplate(CreateTemplateCmd command) t privateTemplate.getSourceTemplateId(), srcTmpltStore.getPhysicalSize(), privateTemplate.getSize()); _usageEventDao.persist(usageEvent); } catch (InterruptedException e) { - s_logger.debug("Failed to create template", e); + LOGGER.debug("Failed to create template", e); throw new CloudRuntimeException("Failed to create template", e); } catch (ExecutionException e) { - s_logger.debug("Failed to create template", e); + LOGGER.debug("Failed to create template", e); throw new CloudRuntimeException("Failed to create template", e); } @@ -1814,8 +1819,8 @@ public VMTemplateVO createPrivateTemplateRecord(CreateTemplateCmd cmd, Account t // created if (!_volumeMgr.volumeInactive(volume)) { String msg = "Unable to create private template for volume: " + volume.getName() + "; volume is attached to a non-stopped VM, please stop the VM first"; - if (s_logger.isInfoEnabled()) { - s_logger.info(msg); + if (LOGGER.isInfoEnabled()) { + LOGGER.info(msg); } throw new CloudRuntimeException(msg); } @@ -1879,17 +1884,17 @@ public VMTemplateVO createPrivateTemplateRecord(CreateTemplateCmd cmd, Account t } String templateTag = cmd.getTemplateTag(); if (templateTag != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Adding template tag: " + templateTag); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Adding template tag: " + templateTag); } } privateTemplate = new VMTemplateVO(nextTemplateId, name, ImageFormat.RAW, isPublic, featured, isExtractable, TemplateType.USER, null, requiresHvmValue, bitsValue, templateOwner.getId(), null, description, - passwordEnabledValue, guestOS.getId(), true, hyperType, templateTag, cmd.getDetails(), sshKeyEnabledValue, isDynamicScalingEnabled, false); + passwordEnabledValue, guestOS.getId(), true, hyperType, templateTag, cmd.getDetails(), sshKeyEnabledValue, isDynamicScalingEnabled, false, false); if (sourceTemplateId != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("This template is getting created from other template, setting source template Id to: " + sourceTemplateId); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("This template is getting created from other template, setting source template Id to: " + sourceTemplateId); } } @@ -2211,4 +2216,15 @@ public List getTemplateAdapters() { public void setTemplateAdapters(List adapters) { _adapters = adapters; } + + @Override + public List getTemplateDisksOnImageStore(Long templateId, DataStoreRole role) { + TemplateInfo templateObject = _tmplFactory.getTemplate(templateId, role); + if (templateObject == null) { + String msg = String.format("Could not find template %s downloaded on store with role %s", templateId, role.toString()); + LOGGER.error(msg); + throw new CloudRuntimeException(msg); + } + return _tmpltSvr.getTemplateDatadisksOnImageStore(templateObject); + } } diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index ce9a41a8470c..6f8f7265a112 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -47,8 +47,8 @@ import javax.xml.parsers.DocumentBuilderFactory; import javax.xml.parsers.ParserConfigurationException; -import com.cloud.exception.UnsupportedServiceException; -import com.cloud.hypervisor.Hypervisor; +import com.cloud.storage.ImageStore; +import com.cloud.storage.VMTemplateDetailVO; import org.apache.cloudstack.acl.ControlledEntity.ACLType; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.affinity.AffinityGroupService; @@ -135,6 +135,7 @@ import com.cloud.agent.api.VmNetworkStatsEntry; import com.cloud.agent.api.VmStatsEntry; import com.cloud.agent.api.VolumeStatsEntry; +import com.cloud.agent.api.storage.OVFPropertyTO; import com.cloud.agent.api.to.DiskTO; import com.cloud.agent.api.to.NicTO; import com.cloud.agent.api.to.VirtualMachineTO; @@ -188,6 +189,7 @@ import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.exception.StorageUnavailableException; +import com.cloud.exception.UnsupportedServiceException; import com.cloud.exception.VirtualMachineMigrationException; import com.cloud.gpu.GPU; import com.cloud.ha.HighAvailabilityManager; @@ -195,6 +197,7 @@ import com.cloud.host.HostVO; import com.cloud.host.Status; import com.cloud.host.dao.HostDao; +import com.cloud.hypervisor.Hypervisor; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.hypervisor.HypervisorCapabilitiesVO; import com.cloud.hypervisor.dao.HypervisorCapabilitiesDao; @@ -257,7 +260,6 @@ import com.cloud.storage.Storage.TemplateType; import com.cloud.storage.StoragePool; import com.cloud.storage.StoragePoolStatus; -import com.cloud.storage.TemplateOVFPropertyVO; import com.cloud.storage.VMTemplateStorageResourceAssoc; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.VMTemplateZoneVO; @@ -268,7 +270,7 @@ import com.cloud.storage.dao.GuestOSCategoryDao; import com.cloud.storage.dao.GuestOSDao; import com.cloud.storage.dao.SnapshotDao; -import com.cloud.storage.dao.TemplateOVFPropertiesDao; +import com.cloud.storage.dao.VMTemplateDetailsDao; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VMTemplateZoneDao; import com.cloud.storage.dao.VolumeDao; @@ -349,6 +351,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir @Inject private VMTemplateDao _templateDao; @Inject + private VMTemplateDetailsDao templateDetailsDao; + @Inject private VMTemplateZoneDao _templateZoneDao; @Inject private TemplateDataStoreDao _templateStoreDao; @@ -500,8 +504,6 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir private DpdkHelper dpdkHelper; @Inject private ResourceTagDao resourceTagDao; - @Inject - private TemplateOVFPropertiesDao templateOVFPropertiesDao; private ScheduledExecutorService _executor = null; private ScheduledExecutorService _vmIpFetchExecutor = null; @@ -941,7 +943,7 @@ private UserVm rebootVirtualMachine(long userId, long vmId, boolean enterSetup) UserVmVO vm = _vmDao.findById(vmId); if (s_logger.isTraceEnabled()) { - s_logger.trace(String.format("reboot %s with enterSetup set to %s", vm.getInstanceName(), Boolean.toString(enterSetup))); + s_logger.trace(String.format("reboot %s with enterSetup set to %s", vm.getInstanceName(), enterSetup)); } if (vm == null || vm.getState() == State.Destroyed || vm.getState() == State.Expunging || vm.getRemoved() != null) { @@ -981,7 +983,7 @@ private UserVm rebootVirtualMachine(long userId, long vmId, boolean enterSetup) } Map params = null; if (enterSetup) { - params = new HashMap(); + params = new HashMap<>(); params.put(VirtualMachineProfile.Param.BootIntoSetup, Boolean.TRUE); if (s_logger.isTraceEnabled()) { s_logger.trace(String.format("Adding %s to paramlist", VirtualMachineProfile.Param.BootIntoSetup)); @@ -2486,10 +2488,12 @@ public UserVm updateVirtualMachine(UpdateVMCmd cmd) throws ResourceUnavailableEx } } for (String detailName : details.keySet()) { - if (detailName.startsWith(ApiConstants.OVF_PROPERTIES)) { - String ovfPropKey = detailName.replace(ApiConstants.OVF_PROPERTIES + "-", ""); - TemplateOVFPropertyVO ovfPropertyVO = templateOVFPropertiesDao.findByTemplateAndKey(vmInstance.getTemplateId(), ovfPropKey); - if (ovfPropertyVO != null && ovfPropertyVO.isPassword()) { + if (s_logger.isTraceEnabled()) { + s_logger.trace(String.format("looking for vm detail '%s'", detailName)); + } + if (detailName.startsWith(ApiConstants.ACS_PROPERTY)) { + OVFPropertyTO propertyTO = templateDetailsDao.findPropertyByTemplateAndKey(vmInstance.getTemplateId(),detailName); + if (propertyTO != null && propertyTO.isPassword()) { details.put(detailName, DBEncryptionUtil.encrypt(details.get(detailName))); } } @@ -3292,86 +3296,18 @@ public UserVm createAdvancedVirtualMachine(DataCenter zone, ServiceOffering serv _accountMgr.checkAccess(owner, _diskOfferingDao.findById(diskOfferingId), zone); List vpcSupportedHTypes = _vpcMgr.getSupportedVpcHypervisors(); + if (networkIdList == null || networkIdList.isEmpty()) { - NetworkVO defaultNetwork = null; - - // if no network is passed in - // Check if default virtual network offering has - // Availability=Required. If it's true, search for corresponding - // network - // * if network is found, use it. If more than 1 virtual network is - // found, throw an error - // * if network is not found, create a new one and use it - - List requiredOfferings = _networkOfferingDao.listByAvailability(Availability.Required, false); - if (requiredOfferings.size() < 1) { - throw new InvalidParameterValueException("Unable to find network offering with availability=" + Availability.Required - + " to automatically create the network as a part of vm creation"); - } - - if (requiredOfferings.get(0).getState() == NetworkOffering.State.Enabled) { - // get Virtual networks - List virtualNetworks = _networkModel.listNetworksForAccount(owner.getId(), zone.getId(), Network.GuestType.Isolated); - if (virtualNetworks == null) { - throw new InvalidParameterValueException("No (virtual) networks are found for account " + owner); - } - if (virtualNetworks.isEmpty()) { - long physicalNetworkId = _networkModel.findPhysicalNetworkId(zone.getId(), requiredOfferings.get(0).getTags(), requiredOfferings.get(0).getTrafficType()); - // Validate physical network - PhysicalNetwork physicalNetwork = _physicalNetworkDao.findById(physicalNetworkId); - if (physicalNetwork == null) { - throw new InvalidParameterValueException("Unable to find physical network with id: " + physicalNetworkId + " and tag: " - + requiredOfferings.get(0).getTags()); - } - s_logger.debug("Creating network for account " + owner + " from the network offering id=" + requiredOfferings.get(0).getId() + " as a part of deployVM process"); - Network newNetwork = _networkMgr.createGuestNetwork(requiredOfferings.get(0).getId(), owner.getAccountName() + "-network", owner.getAccountName() + "-network", - null, null, null, false, null, owner, null, physicalNetwork, zone.getId(), ACLType.Account, null, null, null, null, true, null, null, - null); - if (newNetwork != null) { - defaultNetwork = _networkDao.findById(newNetwork.getId()); - } - } else if (virtualNetworks.size() > 1) { - throw new InvalidParameterValueException("More than 1 default Isolated networks are found for account " + owner + "; please specify networkIds"); - } else { - defaultNetwork = _networkDao.findById(virtualNetworks.get(0).getId()); - } - } else { - throw new InvalidParameterValueException("Required network offering id=" + requiredOfferings.get(0).getId() + " is not in " + NetworkOffering.State.Enabled); - } - if (defaultNetwork != null) { - networkList.add(defaultNetwork); - } + addDefaultNetworkToNetworkList(zone, owner, networkList); } else { for (Long networkId : networkIdList) { - NetworkVO network = _networkDao.findById(networkId); - if (network == null) { - throw new InvalidParameterValueException("Unable to find network by id " + networkIdList.get(0).longValue()); - } - if (network.getVpcId() != null) { - // Only ISOs, XenServer, KVM, and VmWare template types are - // supported for vpc networks - if (template.getFormat() != ImageFormat.ISO && !vpcSupportedHTypes.contains(template.getHypervisorType())) { - throw new InvalidParameterValueException("Can't create vm from template with hypervisor " + template.getHypervisorType() + " in vpc network " + network); - } else if (template.getFormat() == ImageFormat.ISO && !vpcSupportedHTypes.contains(hypervisor)) { - // Only XenServer, KVM, and VMware hypervisors are supported - // for vpc networks - throw new InvalidParameterValueException("Can't create vm of hypervisor type " + hypervisor + " in vpc network"); - - } - } - - _networkModel.checkNetworkPermissions(owner, network); - - // don't allow to use system networks - NetworkOffering networkOffering = _entityMgr.findById(NetworkOffering.class, network.getNetworkOfferingId()); - if (networkOffering.isSystemOnly()) { - throw new InvalidParameterValueException("Network id=" + networkId + " is system only and can't be used for vm deployment"); - } + NetworkVO network = getNetworkToAddToNetworkList(template, owner, hypervisor, vpcSupportedHTypes, networkId); networkList.add(network); } } + verifyExtraDhcpOptionsNetwork(dhcpOptionsMap, networkList); return createVirtualMachine(zone, serviceOffering, template, hostName, displayName, owner, diskOfferingId, diskSize, networkList, null, group, httpmethod, userData, @@ -3379,6 +3315,93 @@ public UserVm createAdvancedVirtualMachine(DataCenter zone, ServiceOffering serv dataDiskTemplateToDiskOfferingMap, userVmOVFPropertiesMap); } + private NetworkVO getNetworkToAddToNetworkList(VirtualMachineTemplate template, Account owner, HypervisorType hypervisor, + List vpcSupportedHTypes, Long networkId) { + NetworkVO network = _networkDao.findById(networkId); + if (network == null) { + throw new InvalidParameterValueException("Unable to find network by id " + networkId); + } + if (network.getVpcId() != null) { + // Only ISOs, XenServer, KVM, and VmWare template types are + // supported for vpc networks + if (template.getFormat() != ImageFormat.ISO && !vpcSupportedHTypes.contains(template.getHypervisorType())) { + throw new InvalidParameterValueException("Can't create vm from template with hypervisor " + template.getHypervisorType() + " in vpc network " + network); + } else if (template.getFormat() == ImageFormat.ISO && !vpcSupportedHTypes.contains(hypervisor)) { + // Only XenServer, KVM, and VMware hypervisors are supported + // for vpc networks + throw new InvalidParameterValueException("Can't create vm of hypervisor type " + hypervisor + " in vpc network"); + } + } + + _networkModel.checkNetworkPermissions(owner, network); + + // don't allow to use system networks + NetworkOffering networkOffering = _entityMgr.findById(NetworkOffering.class, network.getNetworkOfferingId()); + if (networkOffering.isSystemOnly()) { + throw new InvalidParameterValueException("Network id=" + networkId + " is system only and can't be used for vm deployment"); + } + return network; + } + + private void addDefaultNetworkToNetworkList(DataCenter zone, Account owner, List networkList) throws InsufficientCapacityException, ResourceAllocationException { + NetworkVO defaultNetwork = null; + + // if no network is passed in + // Check if default virtual network offering has + // Availability=Required. If it's true, search for corresponding + // network + // * if network is found, use it. If more than 1 virtual network is + // found, throw an error + // * if network is not found, create a new one and use it + + List requiredOfferings = _networkOfferingDao.listByAvailability(Availability.Required, false); + if (requiredOfferings.size() < 1) { + throw new InvalidParameterValueException("Unable to find network offering with availability=" + Availability.Required + + " to automatically create the network as a part of vm creation"); + } + + if (requiredOfferings.get(0).getState() == NetworkOffering.State.Enabled) { + // get Virtual networks + List virtualNetworks = _networkModel.listNetworksForAccount(owner.getId(), zone.getId(), Network.GuestType.Isolated); + if (virtualNetworks == null) { + throw new InvalidParameterValueException("No (virtual) networks are found for account " + owner); + } + if (virtualNetworks.isEmpty()) { + defaultNetwork = createDefaultNetworkForAccount(zone, owner, requiredOfferings); + } else if (virtualNetworks.size() > 1) { + throw new InvalidParameterValueException("More than 1 default Isolated networks are found for account " + owner + "; please specify networkIds"); + } else { + defaultNetwork = _networkDao.findById(virtualNetworks.get(0).getId()); + } + } else { + throw new InvalidParameterValueException("Required network offering id=" + requiredOfferings.get(0).getId() + " is not in " + NetworkOffering.State.Enabled); + } + + if (defaultNetwork != null) { + networkList.add(defaultNetwork); + } + } + + private NetworkVO createDefaultNetworkForAccount(DataCenter zone, Account owner, List requiredOfferings) + throws InsufficientCapacityException, ResourceAllocationException { + NetworkVO defaultNetwork = null; + long physicalNetworkId = _networkModel.findPhysicalNetworkId(zone.getId(), requiredOfferings.get(0).getTags(), requiredOfferings.get(0).getTrafficType()); + // Validate physical network + PhysicalNetwork physicalNetwork = _physicalNetworkDao.findById(physicalNetworkId); + if (physicalNetwork == null) { + throw new InvalidParameterValueException("Unable to find physical network with id: " + physicalNetworkId + " and tag: " + + requiredOfferings.get(0).getTags()); + } + s_logger.debug("Creating network for account " + owner + " from the network offering id=" + requiredOfferings.get(0).getId() + " as a part of deployVM process"); + Network newNetwork = _networkMgr.createGuestNetwork(requiredOfferings.get(0).getId(), owner.getAccountName() + "-network", owner.getAccountName() + "-network", + null, null, null, false, null, owner, null, physicalNetwork, zone.getId(), ACLType.Account, null, null, null, null, true, null, null, + null); + if (newNetwork != null) { + defaultNetwork = _networkDao.findById(newNetwork.getId()); + } + return defaultNetwork; + } + private void verifyExtraDhcpOptionsNetwork(Map> dhcpOptionsMap, List networkList) throws InvalidParameterValueException { if (dhcpOptionsMap != null) { for (String networkUuid : dhcpOptionsMap.keySet()) { @@ -3469,41 +3492,8 @@ private UserVm createVirtualMachine(DataCenter zone, ServiceOffering serviceOffe // check if account/domain is with in resource limits to create a new vm boolean isIso = Storage.ImageFormat.ISO == template.getFormat(); long size = 0; - // custom root disk size, resizes base template to larger size - if (customParameters.containsKey(VmDetailConstants.ROOT_DISK_SIZE)) { - // only KVM, XenServer and VMware supports rootdisksize override - if (!(hypervisorType == HypervisorType.KVM || hypervisorType == HypervisorType.XenServer || hypervisorType == HypervisorType.VMware || hypervisorType == HypervisorType.Simulator)) { - throw new InvalidParameterValueException("Hypervisor " + hypervisorType + " does not support rootdisksize override"); - } + size = getTotalSizeOnDeploy(diskOfferingId, diskSize, customParameters, template, hypervisorType); - Long rootDiskSize = NumbersUtil.parseLong(customParameters.get(VmDetailConstants.ROOT_DISK_SIZE), -1); - if (rootDiskSize <= 0) { - throw new InvalidParameterValueException("Root disk size should be a positive number."); - } - size = rootDiskSize * GiB_TO_BYTES; - } else { - // For baremetal, size can be null - Long templateSize = _templateDao.findById(template.getId()).getSize(); - if (templateSize != null) { - size = templateSize; - } - } - if (diskOfferingId != null) { - DiskOfferingVO diskOffering = _diskOfferingDao.findById(diskOfferingId); - if (diskOffering != null && diskOffering.isCustomized()) { - if (diskSize == null) { - throw new InvalidParameterValueException("This disk offering requires a custom size specified"); - } - Long customDiskOfferingMaxSize = VolumeOrchestrationService.CustomDiskOfferingMaxSize.value(); - Long customDiskOfferingMinSize = VolumeOrchestrationService.CustomDiskOfferingMinSize.value(); - if ((diskSize < customDiskOfferingMinSize) || (diskSize > customDiskOfferingMaxSize)) { - throw new InvalidParameterValueException("VM Creation failed. Volume size: " + diskSize + "GB is out of allowed range. Max: " + customDiskOfferingMaxSize - + " Min:" + customDiskOfferingMinSize); - } - size += diskSize * GiB_TO_BYTES; - } - size += _diskOfferingDao.findById(diskOfferingId).getDiskSize(); - } if (! VirtualMachineManager.ResoureCountRunningVMsonly.value()) { resourceLimitCheck(owner, isDisplayVm, new Long(offering.getCpu()), new Long(offering.getRamSize())); } @@ -3802,6 +3792,52 @@ private UserVm createVirtualMachine(DataCenter zone, ServiceOffering serviceOffe return vm; } + /** + * custom root disk size, resizes base template to larger size + */ + private long getTotalSizeOnDeploy(Long diskOfferingId, Long diskSize, Map customParameters, VMTemplateVO template, HypervisorType hypervisorType) { + long size = 0; + if (template.isDeployAsIs()) { + // just get the size from the template + return template.getSize(); + } + if (customParameters.containsKey(VmDetailConstants.ROOT_DISK_SIZE)) { + // only KVM, XenServer and VMware supports rootdisksize override + if (!(hypervisorType == HypervisorType.KVM || hypervisorType == HypervisorType.XenServer || hypervisorType == HypervisorType.VMware || hypervisorType == HypervisorType.Simulator)) { + throw new InvalidParameterValueException("Hypervisor " + hypervisorType + " does not support rootdisksize override"); + } + + Long rootDiskSize = NumbersUtil.parseLong(customParameters.get(VmDetailConstants.ROOT_DISK_SIZE), -1); + if (rootDiskSize <= 0) { + throw new InvalidParameterValueException("Root disk size should be a positive number."); + } + size = rootDiskSize * GiB_TO_BYTES; + } else { + // For baremetal, size can be null + Long templateSize = _templateDao.findById(template.getId()).getSize(); + if (templateSize != null) { + size = templateSize; + } + } + if (diskOfferingId != null) { + DiskOfferingVO diskOffering = _diskOfferingDao.findById(diskOfferingId); + if (diskOffering != null && diskOffering.isCustomized()) { + if (diskSize == null) { + throw new InvalidParameterValueException("This disk offering requires a custom size specified"); + } + Long customDiskOfferingMaxSize = VolumeOrchestrationService.CustomDiskOfferingMaxSize.value(); + Long customDiskOfferingMinSize = VolumeOrchestrationService.CustomDiskOfferingMinSize.value(); + if ((diskSize < customDiskOfferingMinSize) || (diskSize > customDiskOfferingMaxSize)) { + throw new InvalidParameterValueException("VM Creation failed. Volume size: " + diskSize + "GB is out of allowed range. Max: " + customDiskOfferingMaxSize + + " Min:" + customDiskOfferingMinSize); + } + size += diskSize * GiB_TO_BYTES; + } + size += _diskOfferingDao.findById(diskOfferingId).getDiskSize(); + } + return size; + } + private void checkIfHostNameUniqueInNtwkDomain(String hostName, List networkList) { // Check that hostName is unique in the network domain Map> ntwkDomains = new HashMap>(); @@ -3944,27 +3980,11 @@ public UserVmVO doInTransaction(TransactionStatus status) throws InsufficientCap } vm.setDetail(VmDetailConstants.DEPLOY_VM, "true"); - if (MapUtils.isNotEmpty(userVmOVFPropertiesMap)) { - for (String key : userVmOVFPropertiesMap.keySet()) { - String detailKey = ApiConstants.OVF_PROPERTIES + "-" + key; - String value = userVmOVFPropertiesMap.get(key); - - // Sanitize boolean values to expected format and encrypt passwords - if (StringUtils.isNotBlank(value)) { - if (value.equalsIgnoreCase("True")) { - value = "True"; - } else if (value.equalsIgnoreCase("False")) { - value = "False"; - } else { - TemplateOVFPropertyVO ovfPropertyVO = templateOVFPropertiesDao.findByTemplateAndKey(vm.getTemplateId(), key); - if (ovfPropertyVO.isPassword()) { - value = DBEncryptionUtil.encrypt(value); - } - } - } - vm.setDetail(detailKey, value); - } - } + copyDiskDetailsToVm(vm, template); + + setPropertiesOnVM(vm, userVmOVFPropertiesMap); + + copyNetworkRequirementsToVm(vm, template); _vmDao.saveDetails(vm); if (!isImport) { @@ -4009,6 +4029,56 @@ public UserVmVO doInTransaction(TransactionStatus status) throws InsufficientCap }); } + private void copyNetworkRequirementsToVm(UserVmVO vm, VirtualMachineTemplate template) { + if (template.isDeployAsIs()) { // FR37 should be always when we are done + List details = templateDetailsDao.listDetailsByTemplateId(template.getId(), ImageStore.REQUIRED_NETWORK_PREFIX); + for (VMTemplateDetailVO detail : details) { + vm.setDetail(detail.getName(), detail.getValue()); + } + } + } + + private void copyDiskDetailsToVm(UserVmVO vm, VirtualMachineTemplate template) { + if (template.isDeployAsIs()) { // FR37 should be always when we are done + List details = templateDetailsDao.listDetailsByTemplateId(template.getId(), ImageStore.DISK_DEFINITION_PREFIX); + for (VMTemplateDetailVO detail : details) { + vm.setDetail(detail.getName(), detail.getValue()); + } + } + } + + /** + * take the properties and set them on the vm. + * consider should we be complete, and make sure all default values are copied as well if known? + * I.E. iterate over the template details as well to copy any that are not defined yet. + */ + private void setPropertiesOnVM(UserVmVO vm, Map userVmOVFPropertiesMap) { + if (MapUtils.isNotEmpty(userVmOVFPropertiesMap)) { + for (String key : userVmOVFPropertiesMap.keySet()) { + String detailKey = ApiConstants.ACS_PROPERTY + "-" + key; + String value = userVmOVFPropertiesMap.get(key); + + // Sanitize boolean values to expected format and encrypt passwords + if (StringUtils.isNotBlank(value)) { + if (value.equalsIgnoreCase("True")) { + value = "True"; + } else if (value.equalsIgnoreCase("False")) { + value = "False"; + } else { + OVFPropertyTO propertyTO = templateDetailsDao.findPropertyByTemplateAndKey(vm.getTemplateId(), key); + if (propertyTO != null && propertyTO.isPassword()) { + value = DBEncryptionUtil.encrypt(value); + } + } + } + if (s_logger.isTraceEnabled()) { + s_logger.trace(String.format("setting property '%s' as '%s' with value '%s'", key, detailKey, value)); + } + vm.setDetail(detailKey, value); + } + } + } + private UserVmVO commitUserVm(final DataCenter zone, final VirtualMachineTemplate template, final String hostName, final String displayName, final Account owner, final Long diskOfferingId, final Long diskSize, final String userData, final Account caller, final Boolean isDisplayVm, final String keyboard, final long accountId, final long userId, final ServiceOfferingVO offering, final boolean isIso, final String sshPublicKey, final LinkedHashMap networkNicMap, @@ -5078,7 +5148,7 @@ public UserVm createVirtualMachine(DeployVMCmd cmd) throws InsufficientCapacityE //Verify that all objects exist before passing them to the service Account owner = _accountService.getActiveAccountById(cmd.getEntityOwnerId()); - verifyDetails(cmd.getDetails()); + verifyIopsFromDetails(cmd.getDetails()); Long zoneId = cmd.getZoneId(); @@ -5142,7 +5212,7 @@ public UserVm createVirtualMachine(DeployVMCmd cmd) throws InsufficientCapacityE Boolean displayVm = cmd.isDisplayVm(); String keyboard = cmd.getKeyboard(); Map dataDiskTemplateToDiskOfferingMap = cmd.getDataDiskTemplateToDiskOfferingMap(); - Map userVmOVFProperties = cmd.getVmOVFProperties(); + Map userVmProperties = cmd.getVmProperties(); if (zone.getNetworkType() == NetworkType.Basic) { if (cmd.getNetworkIds() != null) { throw new InvalidParameterValueException("Can't specify network Ids in Basic zone"); @@ -5150,14 +5220,14 @@ public UserVm createVirtualMachine(DeployVMCmd cmd) throws InsufficientCapacityE vm = createBasicSecurityGroupVirtualMachine(zone, serviceOffering, template, getSecurityGroupIdList(cmd), owner, name, displayName, diskOfferingId, size , group , cmd.getHypervisor(), cmd.getHttpMethod(), userData , sshKeyPairName , cmd.getIpToNetworkMap(), addrs, displayVm , keyboard , cmd.getAffinityGroupIdList(), cmd.getDetails(), cmd.getCustomId(), cmd.getDhcpOptionsMap(), - dataDiskTemplateToDiskOfferingMap, userVmOVFProperties); + dataDiskTemplateToDiskOfferingMap, userVmProperties); } } else { if (zone.isSecurityGroupEnabled()) { vm = createAdvancedSecurityGroupVirtualMachine(zone, serviceOffering, template, cmd.getNetworkIds(), getSecurityGroupIdList(cmd), owner, name, displayName, diskOfferingId, size, group, cmd.getHypervisor(), cmd.getHttpMethod(), userData, sshKeyPairName, cmd.getIpToNetworkMap(), addrs, displayVm, keyboard, cmd.getAffinityGroupIdList(), cmd.getDetails(), cmd.getCustomId(), cmd.getDhcpOptionsMap(), - dataDiskTemplateToDiskOfferingMap, userVmOVFProperties); + dataDiskTemplateToDiskOfferingMap, userVmProperties); } else { if (cmd.getSecurityGroupIdList() != null && !cmd.getSecurityGroupIdList().isEmpty()) { @@ -5165,13 +5235,13 @@ public UserVm createVirtualMachine(DeployVMCmd cmd) throws InsufficientCapacityE } vm = createAdvancedVirtualMachine(zone, serviceOffering, template, cmd.getNetworkIds(), owner, name, displayName, diskOfferingId, size, group, cmd.getHypervisor(), cmd.getHttpMethod(), userData, sshKeyPairName, cmd.getIpToNetworkMap(), addrs, displayVm, keyboard, cmd.getAffinityGroupIdList(), cmd.getDetails(), - cmd.getCustomId(), cmd.getDhcpOptionsMap(), dataDiskTemplateToDiskOfferingMap, userVmOVFProperties); + cmd.getCustomId(), cmd.getDhcpOptionsMap(), dataDiskTemplateToDiskOfferingMap, userVmProperties); } } // check if this templateId has a child ISO List child_templates = _templateDao.listByParentTemplatetId(templateId); for (VMTemplateVO tmpl: child_templates){ - if (tmpl.getFormat() == Storage.ImageFormat.ISO){ + if (tmpl.getFormat() == Storage.ImageFormat.ISO){ // FR37 why only ISO? s_logger.info("MDOV trying to attach disk to the VM " + tmpl.getId() + " vmid=" + vm.getId()); _tmplService.attachIso(tmpl.getId(), vm.getId()); } @@ -5437,7 +5507,7 @@ protected List getSecurityGroupIdList(SecurityGroupAction cmd) { // this is an opportunity to verify that parameters that came in via the Details Map are OK // for example, minIops and maxIops should either both be specified or neither be specified and, // if specified, minIops should be <= maxIops - private void verifyDetails(Map details) { + private void verifyIopsFromDetails(Map details) { if (details != null) { String minIops = details.get("minIops"); String maxIops = details.get("maxIops"); @@ -7258,4 +7328,4 @@ private void checkUnmanagingVMVolumes(UserVmVO vm, List volumes) { } } } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java b/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java index 26f8675f2261..0d614349235b 100644 --- a/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java @@ -491,7 +491,7 @@ private StoragePool getStoragePool(final UnmanagedInstanceTO.Disk disk, final Da final String dsPath = disk.getDatastorePath(); final String dsType = disk.getDatastoreType(); final String dsName = disk.getDatastoreName(); - if (dsType.equals("VMFS")) { + if (dsType.equals(Storage.StoragePoolType.VMFS.toString())) { List pools = primaryDataStoreDao.listPoolsByCluster(cluster.getId()); pools.addAll(primaryDataStoreDao.listByDataCenterId(zone.getId())); for (StoragePool pool : pools) { diff --git a/server/src/test/java/com/cloud/api/query/QueryManagerImplTest.java b/server/src/test/java/com/cloud/api/query/QueryManagerImplTest.java new file mode 100644 index 000000000000..a90cc0d0fd0f --- /dev/null +++ b/server/src/test/java/com/cloud/api/query/QueryManagerImplTest.java @@ -0,0 +1,87 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.api.query; + +import com.cloud.agent.api.storage.OVFPropertyTO; +import com.cloud.api.query.dao.TemplateJoinDao; +import com.cloud.storage.ImageStore; +import com.cloud.storage.VMTemplateDetailVO; +import com.cloud.storage.dao.VMTemplateDetailsDao; +import com.cloud.utils.db.SearchCriteria; +import com.google.gson.Gson; +import org.apache.cloudstack.api.command.user.template.ListTemplateOVFProperties; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.api.response.TemplateOVFPropertyResponse; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.powermock.modules.junit4.PowerMockRunner; + +import java.util.ArrayList; +import java.util.List; + +import static org.junit.Assert.assertEquals; +import static org.mockito.ArgumentMatchers.any; +import static org.powermock.api.mockito.PowerMockito.when; + +@RunWith(PowerMockRunner.class) +public class QueryManagerImplTest { + public static final String TYPE = "type"; + + @Mock SearchCriteria sc; + @Mock VMTemplateDetailsDao vmTemplateDetailsDao; + @Mock TemplateJoinDao _templateJoinDao; + @Mock ListTemplateOVFProperties cmd; + + @InjectMocks + private QueryManagerImpl mgr = new QueryManagerImpl(); + + @Before + public void setup() { + } + + @Test public void listTemplateOVFProperties() { + when(vmTemplateDetailsDao.createSearchCriteria()).thenReturn(sc); + when(cmd.getTemplateId()).thenReturn(1L); + VMTemplateDetailVO detailsVO = createDetailVO("naam", TYPE, "value", "", "concise label", "very elaborate description"); + List list = createDetails(detailsVO); + when(vmTemplateDetailsDao.search(sc,null)).thenReturn(list); + when(_templateJoinDao.createTemplateOVFPropertyResponse(any())).thenReturn(new TemplateOVFPropertyResponse()); + + ListResponse result = mgr.listTemplateOVFProperties(cmd); + assertEquals("expecting 1 object returned",result.getCount().longValue(), 1l); + } + + List createDetails(VMTemplateDetailVO ... vos) { + List list = new ArrayList<>(); + for (VMTemplateDetailVO vo : vos) { + list.add(vo); + } + return list; + } + + private VMTemplateDetailVO createDetailVO(String name, String type, String value, String qualifiers, String label, String description) { + VMTemplateDetailVO vo = new VMTemplateDetailVO(); + vo.setName(ImageStore.ACS_PROPERTY_PREFIX + name); + OVFPropertyTO propertyTO = new OVFPropertyTO(name, type, value, qualifiers, true, label, description, false); + Gson gson = new Gson(); + vo.setValue(gson.toJson(propertyTO)); + return vo; + } +} \ No newline at end of file diff --git a/server/src/test/java/com/cloud/api/query/dao/TemplateJoinDaoImplTest.java b/server/src/test/java/com/cloud/api/query/dao/TemplateJoinDaoImplTest.java index a6b33edd1aa9..2bf43467c82e 100755 --- a/server/src/test/java/com/cloud/api/query/dao/TemplateJoinDaoImplTest.java +++ b/server/src/test/java/com/cloud/api/query/dao/TemplateJoinDaoImplTest.java @@ -16,9 +16,11 @@ // under the License. package com.cloud.api.query.dao; +import com.cloud.agent.api.storage.OVFPropertyTO; import com.cloud.hypervisor.Hypervisor; import com.cloud.storage.Storage; import com.cloud.user.Account; +import org.apache.cloudstack.api.response.TemplateOVFPropertyResponse; import org.apache.cloudstack.api.response.TemplateResponse; import org.junit.Assert; import org.junit.Before; @@ -35,6 +37,8 @@ import java.util.Date; import java.util.Map; +import static org.junit.Assert.assertEquals; + @RunWith(PowerMockRunner.class) @PrepareForTest(ApiDBUtils.class) public class TemplateJoinDaoImplTest extends GenericDaoBaseWithTagInformationBaseTest { @@ -116,4 +120,12 @@ private void populateTemplateJoinVO() { ReflectionTestUtils.setField(template, "detailValue", detailValue); } + public static final String TYPE = "type"; + @Test + public void createTemplateOVFPropertyResponse() { + OVFPropertyTO ovfPropertyTO = new OVFPropertyTO("naam", TYPE, "value", "", true, "concise label", "very elaborate description", false); + + TemplateOVFPropertyResponse response = _templateJoinDaoImpl.createTemplateOVFPropertyResponse(ovfPropertyTO); + assertEquals(String.format("type should not be '%s' but '%s'",response.getType(), TYPE), TYPE, response.getType()); + } } \ No newline at end of file diff --git a/server/src/test/resources/createNetworkOffering.xml b/server/src/test/resources/createNetworkOffering.xml index 8dee0e8a54ee..55343ef835d4 100644 --- a/server/src/test/resources/createNetworkOffering.xml +++ b/server/src/test/resources/createNetworkOffering.xml @@ -60,4 +60,5 @@ + diff --git a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java index ab98a812580f..d67d666121c9 100644 --- a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java +++ b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java @@ -53,6 +53,7 @@ import javax.naming.ConfigurationException; +import com.cloud.resource.ServerResource; import org.apache.cloudstack.framework.security.keystore.KeystoreManager; import org.apache.cloudstack.storage.command.CopyCmdAnswer; import org.apache.cloudstack.storage.command.CopyCommand; @@ -192,6 +193,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S private static final String POST_UPLOAD_KEY_LOCATION = "/etc/cloudstack/agent/ms-psk"; private static final Map updatableConfigData = Maps.newHashMap(); + static { updatableConfigData.put(PUBLIC_KEYS_FILE, METATDATA_DIR); @@ -445,7 +447,7 @@ public Answer execute(GetDatadisksCommand cmd) { Script command = new Script("cp", _timeout, s_logger); command.add(ovfFilePath); - command.add(ovfFilePath + ".orig"); + command.add(ovfFilePath + ServerResource.ORIGINAL_FILE_EXTENSION); String result = command.execute(); if (result != null) { String msg = "Unable to rename original OVF, error msg: " + result; @@ -455,7 +457,7 @@ public Answer execute(GetDatadisksCommand cmd) { s_logger.debug("Reading OVF " + ovfFilePath + " to retrive the number of disks present in OVA"); OVFHelper ovfHelper = new OVFHelper(); - List disks = ovfHelper.getOVFVolumeInfo(ovfFilePath); + List disks = ovfHelper.getOVFVolumeInfoFromFile(ovfFilePath); return new GetDatadisksAnswer(disks); } catch (Exception e) { String msg = "Get Datadisk Template Count failed due to " + e.getMessage(); @@ -513,7 +515,7 @@ public Answer execute(CreateDatadiskTemplateCommand cmd) { throw new Exception(msg); } command = new Script("cp", _timeout, s_logger); - command.add(ovfFilePath + ".orig"); + command.add(ovfFilePath + ServerResource.ORIGINAL_FILE_EXTENSION); command.add(newTmplDirAbsolute); result = command.execute(); if (result != null) { @@ -527,7 +529,7 @@ public Answer execute(CreateDatadiskTemplateCommand cmd) { // Create OVF for the disk String newOvfFilePath = newTmplDirAbsolute + File.separator + ovfFilePath.substring(ovfFilePath.lastIndexOf(File.separator) + 1); OVFHelper ovfHelper = new OVFHelper(); - ovfHelper.rewriteOVFFile(ovfFilePath + ".orig", newOvfFilePath, diskName); + ovfHelper.rewriteOVFFileForSingleDisk(ovfFilePath + ServerResource.ORIGINAL_FILE_EXTENSION, newOvfFilePath, diskName); postCreatePrivateTemplate(newTmplDirAbsolute, templateId, templateUniqueName, physicalSize, virtualSize); writeMetaOvaForTemplate(newTmplDirAbsolute, ovfFilePath.substring(ovfFilePath.lastIndexOf(File.separator) + 1), diskName, templateUniqueName, physicalSize); diff --git a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/template/DownloadManagerImpl.java b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/template/DownloadManagerImpl.java index 4149cd174d13..03930ba60095 100644 --- a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/template/DownloadManagerImpl.java +++ b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/template/DownloadManagerImpl.java @@ -38,6 +38,8 @@ import javax.naming.ConfigurationException; import com.cloud.agent.api.storage.OVFPropertyTO; +import com.cloud.agent.api.storage.OVFVirtualHardwareSectionTO; +import com.cloud.agent.api.to.DatadiskTO; import com.cloud.storage.template.Processor; import com.cloud.storage.template.S3TemplateDownloader; import com.cloud.storage.template.TemplateDownloader; @@ -55,6 +57,7 @@ import com.cloud.storage.template.TARProcessor; import com.cloud.storage.template.VhdProcessor; import com.cloud.storage.template.TemplateConstants; +import org.apache.cloudstack.api.net.NetworkPrerequisiteTO; import org.apache.cloudstack.storage.command.DownloadCommand; import org.apache.cloudstack.storage.command.DownloadCommand.ResourceType; import org.apache.cloudstack.storage.command.DownloadProgressCommand; @@ -126,6 +129,9 @@ private static class DownloadJob { private final long id; private final ResourceType resourceType; private List ovfProperties; + private List networks; + private List disks; + private OVFVirtualHardwareSectionTO hardwareSection; public DownloadJob(TemplateDownloader td, String jobId, long id, String tmpltName, ImageFormat format, boolean hvm, Long accountId, String descr, String cksum, String installPathPrefix, ResourceType resourceType) { @@ -228,6 +234,30 @@ public List getOvfProperties() { public void setOvfProperties(List ovfProperties) { this.ovfProperties = ovfProperties; } + + public List getNetworks() { + return networks; + } + + public void setNetworks(List networks) { + this.networks = networks; + } + + public List getDisks() { + return disks; + } + + public void setDisks(List disks) { + this.disks = disks; + } + + public void setVirtualHardwareSection(OVFVirtualHardwareSectionTO section) { + this.hardwareSection = section; + } + + public OVFVirtualHardwareSectionTO getVirtualHardwareSection() { + return this.hardwareSection; + } } public static final Logger LOGGER = Logger.getLogger(DownloadManagerImpl.class); @@ -507,7 +537,7 @@ private String postProcessAfterDownloadComplete(DownloadJob dnld, String resourc while (en.hasNext()) { Processor processor = en.next(); - FormatInfo info = null; + FormatInfo info; try { info = processor.process(resourcePath, null, templateName, this._processTimeout); } catch (InternalErrorException e) { @@ -524,6 +554,13 @@ private String postProcessAfterDownloadComplete(DownloadJob dnld, String resourc if (CollectionUtils.isNotEmpty(info.ovfProperties)) { dnld.setOvfProperties(info.ovfProperties); } + if (CollectionUtils.isNotEmpty(info.networks)) { + dnld.setNetworks(info.networks); + } + if (CollectionUtils.isNotEmpty(info.disks)) { + dnld.setDisks(info.disks); + } + dnld.setVirtualHardwareSection(info.hardwareSection); break; } } @@ -827,6 +864,13 @@ private DownloadAnswer handleDownloadProgressCmd(SecondaryStorageResource resour if (CollectionUtils.isNotEmpty(dj.getOvfProperties())) { answer.setOvfProperties(dj.getOvfProperties()); } + if (CollectionUtils.isNotEmpty(dj.getNetworks())) { + answer.setNetworkRequirements(dj.getNetworks()); + } + if (CollectionUtils.isNotEmpty(dj.getDisks())) { + answer.setDisks(dj.getDisks()); + } + answer.setOvfHardwareSection(dj.getVirtualHardwareSection()); jobs.remove(jobId); return answer; default: diff --git a/tools/apidoc/gen_toc.py b/tools/apidoc/gen_toc.py index ef98b1358987..d26ce228e832 100644 --- a/tools/apidoc/gen_toc.py +++ b/tools/apidoc/gen_toc.py @@ -195,8 +195,10 @@ 'KubernetesSupportedVersion': 'Kubernetes Service', 'KubernetesCluster': 'Kubernetes Service', 'UnmanagedInstance': 'Virtual Machine', - 'Rolling': 'Rolling Maintenance' - } + 'Rolling': 'Rolling Maintenance', + 'importVsphereStoragePolicies' : 'vSphere storage policies', + 'listVsphereStoragePolicies' : 'vSphere storage policies' +} categories = {} diff --git a/ui/l10n/en.js b/ui/l10n/en.js index 8e25c54b04cd..c81c2814e8b6 100644 --- a/ui/l10n/en.js +++ b/ui/l10n/en.js @@ -673,6 +673,7 @@ var dictionary = { "label.deleting.failed":"Deleting Failed", "label.deleting.processing":"Deleting....", "label.deny":"Deny", +"label.deploy.as.is":"Deploy As-Is", "label.deployment.planner":"Deployment planner", "label.description":"Description", "label.destination.physical.network.id":"Destination physical network ID", diff --git a/ui/scripts/docs.js b/ui/scripts/docs.js index 7f29f2b3ac2d..19a7ad0441b3 100755 --- a/ui/scripts/docs.js +++ b/ui/scripts/docs.js @@ -711,7 +711,7 @@ cloudStack.docs = { externalLink: '' }, helpPrimaryStorageProtocol: { - desc: 'For XenServer, choose NFS, iSCSI, or PreSetup. For KVM, choose NFS, SharedMountPoint, RDB, CLVM or Gluster. For vSphere, choose VMFS (iSCSI or FiberChannel) or NFS. For Hyper-V, choose SMB/CIFS. For LXC, choose NFS or SharedMountPoint. For OVM, choose NFS or ocfs2.', + desc: 'For XenServer, choose NFS, iSCSI, or PreSetup. For KVM, choose NFS, SharedMountPoint, RDB, CLVM or Gluster. For vSphere, choose PreSetup (VMFS or iSCSI or FiberChannel or vSAN or vVols) or NFS. For Hyper-V, choose SMB/CIFS. For LXC, choose NFS or SharedMountPoint. For OVM, choose NFS or ocfs2.', externalLink: '' }, helpPrimaryStorageServer: { @@ -1243,6 +1243,10 @@ cloudStack.docs = { desc: 'The Management Server will download the file from the specified URL, such as http://my.web.server/filename.vhd.gz', externalLink: '' }, + helpRegisterTemplateDeployAsIs: { + desc: 'Vmware Only: Deploy with specifications from OVF instead of orchestrated specs', + externalLink: '' + }, helpRegisterTemplateDirectDownload: { desc: 'KVM Only: Secondary Storage is bypassed and template/ISO is downloaded to Primary Storage on deployment', externalLink: '' diff --git a/ui/scripts/instanceWizard.js b/ui/scripts/instanceWizard.js index f06cd9046d48..70871f292fa4 100644 --- a/ui/scripts/instanceWizard.js +++ b/ui/scripts/instanceWizard.js @@ -1054,8 +1054,8 @@ } }); for (var k = 0; k < deployOvfProperties.length; k++) { - deployVmData["ovfproperties[" + k + "].key"] = deployOvfProperties[k].key; - deployVmData["ovfproperties[" + k + "].value"] = deployOvfProperties[k].value; + deployVmData["properties[" + k + "].key"] = deployOvfProperties[k].key; + deployVmData["properties[" + k + "].value"] = deployOvfProperties[k].value; } } diff --git a/ui/scripts/system.js b/ui/scripts/system.js index 29f428a4f02b..7f0ff95f996c 100755 --- a/ui/scripts/system.js +++ b/ui/scripts/system.js @@ -18728,8 +18728,8 @@ description: "nfs" }); items.push({ - id: "vmfs", - description: "vmfs" + id: "presetup", + description: "presetup" }); items.push({ id: "custom", @@ -18878,7 +18878,7 @@ $form.find('.form-item[rel=rbdsecret]').hide(); $form.find('.form-item[rel=glustervolume]').hide(); - } else if (protocol == "PreSetup") { + } else if (protocol == "PreSetup" && selectedClusterObj.hypervisortype != "VMware") { $form.find('.form-item[rel=server]').hide(); $form.find('.form-item[rel=server]').find(".value").find("input").val("localhost"); @@ -18976,7 +18976,7 @@ $form.find('.form-item[rel=rbdsecret]').hide(); $form.find('.form-item[rel=glustervolume]').hide(); - } else if (protocol == "vmfs") { + } else if (protocol == "presetup" && selectedClusterObj.hypervisortype == "VMware") { $form.find('.form-item[rel=server]').css('display', 'inline-block'); $form.find('.form-item[rel=server]').find(".value").find("input").val(""); @@ -19401,7 +19401,7 @@ array1.push("&details[0].user=" + args.data.smbUsername); array1.push("&details[1].password=" + encodeURIComponent(args.data.smbPassword)); array1.push("&details[2].domain=" + args.data.smbDomain); - } else if (args.data.protocol == "PreSetup") { + } else if (args.data.protocol == "PreSetup" && selectedClusterObj.hypervisortype != "VMware") { var path = args.data.path; if (path.substring(0, 1) != "/") path = "/" + path; @@ -19427,12 +19427,12 @@ var rbdid = args.data.rbdid; var rbdsecret = args.data.rbdsecret; url = rbdURL(rbdmonitor, rbdpool, rbdid, rbdsecret); - } else if (args.data.protocol == "vmfs") { + } else if (args.data.protocol == "presetup" && selectedClusterObj.hypervisortype == "VMware") { var path = args.data.vCenterDataCenter; if (path.substring(0, 1) != "/") path = "/" + path; path += "/" + args.data.vCenterDataStore; - url = vmfsURL("dummy", path); + url = presetupURL("dummy", path); } else if (args.data.protocol == "gluster") { var glustervolume = args.data.glustervolume; diff --git a/ui/scripts/templates.js b/ui/scripts/templates.js index 1516286f01a9..d8bcfec01e15 100644 --- a/ui/scripts/templates.js +++ b/ui/scripts/templates.js @@ -248,6 +248,7 @@ $form.find('.form-item[rel=rootDiskControllerType]').css('display', 'inline-block'); $form.find('.form-item[rel=nicAdapterType]').css('display', 'inline-block'); $form.find('.form-item[rel=keyboardType]').css('display', 'inline-block'); + $form.find('.form-item[rel=deployAsIs]').css('display', 'inline-block'); $form.find('.form-item[rel=xenserverToolsVersion61plus]').hide(); $form.find('.form-item[rel=rootDiskControllerTypeKVM]').hide(); $form.find('.form-item[rel=directdownload]').hide(); @@ -258,6 +259,7 @@ $form.find('.form-item[rel=keyboardType]').hide(); $form.find('.form-item[rel=rootDiskControllerTypeKVM]').hide(); $form.find('.form-item[rel=directdownload]').hide(); + $form.find('.form-item[rel=deployAsIs]').hide(); $form.find('.form-item[rel=requireshvm]').css('display', 'inline-block'); if (isAdmin()) { @@ -268,6 +270,7 @@ $form.find('.form-item[rel=nicAdapterType]').hide(); $form.find('.form-item[rel=keyboardType]').hide(); $form.find('.form-item[rel=xenserverToolsVersion61plus]').hide(); + $form.find('.form-item[rel=deployAsIs]').hide(); $form.find('.form-item[rel=rootDiskControllerTypeKVM]').css('display', 'inline-block'); $('#label_root_disk_controller').prop('selectedIndex', 2); $form.find('.form-item[rel=requireshvm]').css('display', 'inline-block'); @@ -281,6 +284,7 @@ $form.find('.form-item[rel=xenserverToolsVersion61plus]').hide(); $form.find('.form-item[rel=rootDiskControllerTypeKVM]').hide(); $form.find('.form-item[rel=directdownload]').hide(); + $form.find('.form-item[rel=deployAsIs]').hide(); $form.find('.form-item[rel=requireshvm]').css('display', 'inline-block'); } }); @@ -463,6 +467,13 @@ }); } }, + deployAsIs : { + label: 'label.deploy.as.is', + docID: 'helpRegisterTemplateDeployAsIs', + isBoolean: true, + dependsOn: 'hypervisor', + isHidden: true + }, // fields for hypervisor == "VMware" (ends here) format: { @@ -683,6 +694,11 @@ 'details[0].keyboard': args.data.keyboardType }); } + if (args.$form.find('.form-item[rel=deployAsIs]').css("display") != "none" && args.data.deployAsIs != "") { + $.extend(data, { + deployAsIs: (args.data.deployAsIs == "on") ? "true" : "false" + }); + } // for hypervisor == VMware (ends here) $.ajax({ @@ -1919,6 +1935,11 @@ isBoolean: true, converter: cloudStack.converters.toBooleanText }, + deployAsIs: { + label: 'label.deploy.as.is', + isBoolean: true, + converter: cloudStack.converters.toBooleanText + }, isextractable: { label: 'label.extractable.lower', isBoolean: true, @@ -2851,6 +2872,11 @@ docID: 'helpRegisterISOFeatured', isBoolean: true, isHidden: true + }, + deployAsIs : { + label: 'label.deploy.as.is', + docID: 'helpRegisterTemplateDeployAsIs', + isBoolean: true } } }, @@ -2864,7 +2890,8 @@ zoneid: args.data.zone, isextractable: (args.data.isExtractable == "on"), bootable: (args.data.isBootable == "on"), - directdownload: (args.data.directdownload == "on") + directdownload: (args.data.directdownload == "on"), + deployAsIs: (args.data.deployAsIs == "on") }; if (args.$form.find('.form-item[rel=osTypeId]').css("display") != "none") { @@ -2897,15 +2924,6 @@ data: items[0] }); - /* - if(items.length > 1) { - for(var i=1; i') - .attr({maxlength : length, type: fieldType}) + .attr({pattern : '.{' + minLen + ',' + maxLen + '}'}) + .attr({type: fieldType}) .addClass('name').val(_s(this[fields.value])) } } else { diff --git a/ui/scripts/zoneWizard.js b/ui/scripts/zoneWizard.js index f4d0e5db5214..b7e5d019ce5d 100755 --- a/ui/scripts/zoneWizard.js +++ b/ui/scripts/zoneWizard.js @@ -1434,8 +1434,8 @@ description: "nfs" }); items.push({ - id: "vmfs", - description: "vmfs" + id: "presetup", + description: "presetup" }); args.response.success({ data: items @@ -1576,7 +1576,7 @@ $form.find('[rel=rbdsecret]').hide(); $form.find('[rel=glustervolume]').hide(); - } else if (protocol == "PreSetup") { + } else if (protocol == "PreSetup" && selectedClusterObj.hypervisortype != "VMware") { $form.find('[rel=server]').hide(); $form.find('[rel=server]').find(".value").find("input").val("localhost"); @@ -1649,7 +1649,7 @@ $form.find('[rel=rbdsecret]').hide(); $form.find('[rel=glustervolume]').hide(); - } else if (protocol == "vmfs") { + } else if (protocol == "presetup" && selectedClusterObj.hypervisortype == "VMware") { $form.find('[rel=server]').css('display', 'block'); $form.find('[rel=server]').find(".value").find("input").val(""); @@ -4529,7 +4529,7 @@ array1.push("&details[0].user=" + args.data.primaryStorage.smbUsername); array1.push("&details[1].password=" + encodeURIComponent(args.data.primaryStorage.smbPassword)); array1.push("&details[2].domain=" + args.data.primaryStorage.smbDomain); - } else if (args.data.primaryStorage.protocol == "PreSetup") { + } else if (args.data.primaryStorage.protocol == "PreSetup" && selectedClusterObj.hypervisortype != "VMware") { var path = args.data.primaryStorage.path; if (path.substring(0, 1) != "/") path = "/" + path; @@ -4555,12 +4555,12 @@ var rbdid = args.data.primaryStorage.rbdid; var rbdsecret = args.data.primaryStorage.rbdsecret; url = rbdURL(rbdmonitor, rbdpool, rbdid, rbdsecret); - } else if (args.data.primaryStorage.protocol == "vmfs") { + } else if (args.data.primaryStorage.protocol == "presetup" && selectedClusterObj.hypervisortype == "VMware") { var path = args.data.primaryStorage.vCenterDataCenter; if (path.substring(0, 1) != "/") path = "/" + path; path += "/" + args.data.primaryStorage.vCenterDataStore; - url = vmfsURL("dummy", path); + url = presetupURL("dummy", path); } else { var iqn = args.data.primaryStorage.iqn; if (iqn.substring(0, 1) != "/") diff --git a/vmware-base/pom.xml b/vmware-base/pom.xml index c84580f90b4b..f245d392a2e3 100644 --- a/vmware-base/pom.xml +++ b/vmware-base/pom.xml @@ -57,6 +57,24 @@ ${cs.vmware.api.version} compile + + com.vmware.vapi + vapi-runtime + ${vapi.version} + compile + + + com.vmware.vapi + vapi-authentication + ${vapi.version} + compile + + + com.vmware.vsphereautomation.client + vsphereautomation-client-sdk + ${vsphereautomationsdk.version} + compile + org.apache.axis axis @@ -75,5 +93,11 @@ ${cs.jaxws.version} pom + + com.cloud.com.vmware + vmware-pbm + ${cs.vmware.api.version} + compile + diff --git a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/VmwareResourceException.java b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/VmwareResourceException.java new file mode 100644 index 000000000000..caea13fd6907 --- /dev/null +++ b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/VmwareResourceException.java @@ -0,0 +1,29 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.hypervisor.vmware; + +import com.cloud.utils.exception.CloudRuntimeException; + +public class VmwareResourceException extends CloudRuntimeException { + public VmwareResourceException(String message) { + super(message); + } + + public VmwareResourceException(String message, Throwable th) { + super(message, th); + } +} diff --git a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/ClusterMO.java b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/ClusterMO.java index b8afdc84cfde..1048abb3460c 100644 --- a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/ClusterMO.java +++ b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/ClusterMO.java @@ -385,7 +385,7 @@ public boolean createVm(VirtualMachineConfigSpec vmSpec) throws Exception { } @Override - public void importVmFromOVF(String ovfFilePath, String vmName, DatastoreMO dsMo, String diskOption) throws Exception { + public void importVmFromOVF(String ovfFilePath, String vmName, DatastoreMO dsMo, String diskOption, boolean stripNeworks) throws Exception { if (s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - importVmFromOVF(). target MOR: " + _mor.getValue() + ", ovfFilePath: " + ovfFilePath + ", vmName: " + vmName + ", datastore: " + dsMo.getMor().getValue() + ", diskOption: " + diskOption); @@ -396,7 +396,7 @@ public void importVmFromOVF(String ovfFilePath, String vmName, DatastoreMO dsMo, if (s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - importVmFromOVF(). resource pool: " + morRp.getValue()); - HypervisorHostHelper.importVmFromOVF(this, ovfFilePath, vmName, dsMo, diskOption, morRp, null); + HypervisorHostHelper.importVmFromOVF(this, ovfFilePath, vmName, dsMo, diskOption, morRp, null, stripNeworks); if (s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - importVmFromOVF() done"); diff --git a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/DatacenterMO.java b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/DatacenterMO.java index b0b91fb7d5b2..af89757d521f 100644 --- a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/DatacenterMO.java +++ b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/DatacenterMO.java @@ -186,6 +186,20 @@ public ManagedObjectReference findDatastore(String name) throws Exception { return null; } + public ManagedObjectReference listDatastore(String name) throws Exception { + assert (name != null); + + List ocs = getDatastorePropertiesOnDatacenter(new String[] {"name"}); + if (ocs != null) { + for (ObjectContent oc : ocs) { + if (oc.getPropSet().get(0).getVal().toString().equals(name)) { + return oc.getObj(); + } + } + } + return null; + } + public ManagedObjectReference findHost(String name) throws Exception { List ocs = getHostPropertiesOnDatacenterHostFolder(new String[] {"name"}); diff --git a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/DatastoreMO.java b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/DatastoreMO.java index fa0c380eb062..68f5cbda954d 100644 --- a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/DatastoreMO.java +++ b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/DatastoreMO.java @@ -16,11 +16,10 @@ // under the License. package com.cloud.hypervisor.vmware.mo; -import java.util.ArrayList; -import java.util.List; - -import org.apache.log4j.Logger; - +import com.cloud.exception.CloudException; +import com.cloud.hypervisor.vmware.util.VmwareContext; +import com.cloud.utils.Pair; +import com.vmware.pbm.PbmProfile; import com.vmware.vim25.DatastoreHostMount; import com.vmware.vim25.DatastoreSummary; import com.vmware.vim25.FileInfo; @@ -35,10 +34,10 @@ import com.vmware.vim25.PropertySpec; import com.vmware.vim25.SelectionSpec; import com.vmware.vim25.TraversalSpec; +import org.apache.log4j.Logger; -import com.cloud.exception.CloudException; -import com.cloud.hypervisor.vmware.util.VmwareContext; -import com.cloud.utils.Pair; +import java.util.ArrayList; +import java.util.List; public class DatastoreMO extends BaseMO { private static final Logger s_logger = Logger.getLogger(DatastoreMO.class); @@ -62,7 +61,7 @@ public String getName() throws Exception { return _name; } - public DatastoreSummary getSummary() throws Exception { + public DatastoreSummary getDatastoreSummary() throws Exception { return (DatastoreSummary)_context.getVimClient().getDynamicProperty(_mor, "summary"); } @@ -258,6 +257,12 @@ public boolean moveDatastoreFile(String srcFilePath, ManagedObjectReference morS if (!DatastoreFile.isFullDatastorePath(destFullPath)) destFullPath = String.format("[%s] %s", destDsName, destFilePath); + DatastoreMO srcDsMo = new DatastoreMO(_context, morDestDs); + if (!srcDsMo.fileExists(srcFullPath)) { + s_logger.error(String.format("Cannot move file to destination datastore due to file %s does not exists", srcFullPath)); + return false; + } + ManagedObjectReference morTask = _context.getService().moveDatastoreFileTask(morFileManager, srcFullPath, morSrcDc, destFullPath, morDestDc, forceOverwrite); boolean result = _context.getVimClient().waitForTask(morTask); @@ -265,52 +270,11 @@ public boolean moveDatastoreFile(String srcFilePath, ManagedObjectReference morS _context.waitForTaskProgressDone(morTask); return true; } else { - s_logger.error("VMware moveDatgastoreFile_Task failed due to " + TaskMO.getTaskFailureInfo(_context, morTask)); + s_logger.error("VMware moveDatastoreFile_Task failed due to " + TaskMO.getTaskFailureInfo(_context, morTask)); } return false; } - public String[] getVmdkFileChain(String rootVmdkDatastoreFullPath) throws Exception { - Pair dcPair = getOwnerDatacenter(); - - List files = new ArrayList<>(); - files.add(rootVmdkDatastoreFullPath); - - String currentVmdkFullPath = rootVmdkDatastoreFullPath; - while (true) { - String url = getContext().composeDatastoreBrowseUrl(dcPair.second(), currentVmdkFullPath); - byte[] content = getContext().getResourceContent(url); - if (content == null || content.length == 0) - break; - - VmdkFileDescriptor descriptor = new VmdkFileDescriptor(); - descriptor.parse(content); - - String parentFileName = descriptor.getParentFileName(); - if (parentFileName == null) - break; - - if (parentFileName.startsWith("/")) { - // when parent file is not at the same directory as it is, assume it is at parent directory - // this is only valid in Apache CloudStack primary storage deployment - DatastoreFile dsFile = new DatastoreFile(currentVmdkFullPath); - String dir = dsFile.getDir(); - if (dir != null && dir.lastIndexOf('/') > 0) - dir = dir.substring(0, dir.lastIndexOf('/')); - else - dir = ""; - - currentVmdkFullPath = new DatastoreFile(dsFile.getDatastoreName(), dir, parentFileName.substring(parentFileName.lastIndexOf('/') + 1)).getPath(); - files.add(currentVmdkFullPath); - } else { - currentVmdkFullPath = DatastoreFile.getCompanionDatastorePath(currentVmdkFullPath, parentFileName); - files.add(currentVmdkFullPath); - } - } - - return files.toArray(new String[0]); - } - @Deprecated public String[] listDirContent(String path) throws Exception { String fullPath = path; @@ -462,4 +426,19 @@ public boolean isAccessibleToHost(String hostValue) throws Exception { } return isAccessible; } + + public boolean isDatastoreStoragePolicyComplaint(String storagePolicyId) throws Exception { + PbmProfileManagerMO profMgrMo = new PbmProfileManagerMO(_context); + PbmProfile profile = profMgrMo.getStorageProfile(storagePolicyId); + + PbmPlacementSolverMO placementSolverMo = new PbmPlacementSolverMO(_context); + boolean isDatastoreCompatible = placementSolverMo.isDatastoreCompatibleWithStorageProfile(_mor, profile); + + return isDatastoreCompatible; + } + + public String getDatastoreType() throws Exception { + DatastoreSummary summary = _context.getVimClient().getDynamicProperty(getMor(), "summary"); + return summary.getType(); + } } diff --git a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HostDatastoreSystemMO.java b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HostDatastoreSystemMO.java index f38f610e145d..30798e31e194 100644 --- a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HostDatastoreSystemMO.java +++ b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HostDatastoreSystemMO.java @@ -16,11 +16,7 @@ // under the License. package com.cloud.hypervisor.vmware.mo; -import java.net.URI; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - +import com.cloud.hypervisor.vmware.util.VmwareContext; import com.vmware.vim25.CustomFieldStringValue; import com.vmware.vim25.DatastoreInfo; import com.vmware.vim25.DynamicProperty; @@ -35,12 +31,18 @@ import com.vmware.vim25.ObjectSpec; import com.vmware.vim25.PropertyFilterSpec; import com.vmware.vim25.PropertySpec; +import com.vmware.vim25.RetrieveOptions; +import com.vmware.vim25.RetrieveResult; +import com.vmware.vim25.SelectionSpec; import com.vmware.vim25.TraversalSpec; import com.vmware.vim25.VmfsDatastoreCreateSpec; import com.vmware.vim25.VmfsDatastoreExpandSpec; import com.vmware.vim25.VmfsDatastoreOption; -import com.cloud.hypervisor.vmware.util.VmwareContext; +import java.net.URI; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; public class HostDatastoreSystemMO extends BaseMO { @@ -53,6 +55,14 @@ public HostDatastoreSystemMO(VmwareContext context, String morType, String morVa } public ManagedObjectReference findDatastore(String name) throws Exception { + ManagedObjectReference morDatastore = findSpecificDatastore(name); + if (morDatastore == null) { + morDatastore = findDatastoreCluster(name); + } + return morDatastore; + } + + public ManagedObjectReference findSpecificDatastore(String name) throws Exception { // added Apache CloudStack specific name convention, we will use custom field "cloud.uuid" as datastore name as well CustomFieldsManagerMO cfmMo = new CustomFieldsManagerMO(_context, _context.getServiceContent().getCustomFieldsManager()); int key = cfmMo.getCustomFieldKey("Datastore", CustomFieldConstants.CLOUD_UUID); @@ -79,6 +89,33 @@ public ManagedObjectReference findDatastore(String name) throws Exception { return null; } + public ManagedObjectReference findDatastoreCluster(String name) throws Exception { + // added Apache CloudStack specific name convention, we will use custom field "cloud.uuid" as datastore name as well + CustomFieldsManagerMO cfmMo = new CustomFieldsManagerMO(_context, _context.getServiceContent().getCustomFieldsManager()); + int key = cfmMo.getCustomFieldKey("StoragePod", CustomFieldConstants.CLOUD_UUID); + assert (key != 0); + + List ocs = getDatastoreClusterPropertiesOnHostDatastoreSystem(new String[] {"name", String.format("value[%d]", key)}); + if (ocs != null) { + for (ObjectContent oc : ocs) { + if (oc.getPropSet().get(0).getVal().equals(name)) + return oc.getObj(); + + if (oc.getPropSet().size() > 1) { + DynamicProperty prop = oc.getPropSet().get(1); + if (prop != null && prop.getVal() != null) { + if (prop.getVal() instanceof CustomFieldStringValue) { + String val = ((CustomFieldStringValue)prop.getVal()).getValue(); + if (val.equalsIgnoreCase(name)) + return oc.getObj(); + } + } + } + } + } + return null; + } + public List queryUnresolvedVmfsVolumes() throws Exception { return _context.getService().queryUnresolvedVmfsVolumes(_mor); } @@ -251,4 +288,90 @@ public List getDatastorePropertiesOnHostDatastoreSystem(String[] return _context.getService().retrieveProperties(_context.getPropertyCollector(), pfSpecArr); } + + public List getDatastoreClusterPropertiesOnHostDatastoreSystem(String[] propertyPaths) throws Exception { + ManagedObjectReference retVal = null; + // Create Property Spec + PropertySpec propertySpec = new PropertySpec(); + propertySpec.setAll(Boolean.FALSE); + propertySpec.setType("StoragePod"); + propertySpec.getPathSet().addAll(Arrays.asList(propertyPaths)); + + // Now create Object Spec + ObjectSpec objectSpec = new ObjectSpec(); + objectSpec.setObj(getContext().getRootFolder()); + objectSpec.setSkip(Boolean.TRUE); + objectSpec.getSelectSet().addAll( + Arrays.asList(getStorageTraversalSpec())); + + // Create PropertyFilterSpec using the PropertySpec and ObjectPec + // created above. + PropertyFilterSpec propertyFilterSpec = new PropertyFilterSpec(); + propertyFilterSpec.getPropSet().add(propertySpec); + propertyFilterSpec.getObjectSet().add(objectSpec); + + List listpfs = new ArrayList(); + listpfs.add(propertyFilterSpec); + return retrievePropertiesAllObjects(listpfs); + } + + private SelectionSpec[] getStorageTraversalSpec() { + // create a traversal spec that start from root folder + + SelectionSpec ssFolders = new SelectionSpec(); + ssFolders.setName("visitFolders"); + + TraversalSpec datacenterSpec = new TraversalSpec(); + datacenterSpec.setName("dcTodf"); + datacenterSpec.setType("Datacenter"); + datacenterSpec.setPath("datastoreFolder"); + datacenterSpec.setSkip(Boolean.FALSE); + datacenterSpec.getSelectSet().add(ssFolders); + + TraversalSpec visitFolder = new TraversalSpec(); + visitFolder.setType("Folder"); + visitFolder.setName("visitFolders"); + visitFolder.setPath("childEntity"); + visitFolder.setSkip(Boolean.FALSE); + + List ssSpecList = new ArrayList(); + ssSpecList.add(datacenterSpec); + ssSpecList.add(ssFolders); + + visitFolder.getSelectSet().addAll(ssSpecList); + return (new SelectionSpec[]{visitFolder}); + } + + private List retrievePropertiesAllObjects( + List listpfs) throws Exception { + + RetrieveOptions propObjectRetrieveOpts = new RetrieveOptions(); + + List listobjcontent = new ArrayList(); + + RetrieveResult rslts = + getContext().getService().retrievePropertiesEx(getContext().getServiceContent().getPropertyCollector(), listpfs, + propObjectRetrieveOpts); + if (rslts != null && rslts.getObjects() != null + && !rslts.getObjects().isEmpty()) { + listobjcontent.addAll(rslts.getObjects()); + } + String token = null; + if (rslts != null && rslts.getToken() != null) { + token = rslts.getToken(); + } + while (token != null && !token.isEmpty()) { + rslts = + getContext().getService().continueRetrievePropertiesEx(getContext().getServiceContent().getPropertyCollector(), token); + token = null; + if (rslts != null) { + token = rslts.getToken(); + if (rslts.getObjects() != null && !rslts.getObjects().isEmpty()) { + listobjcontent.addAll(rslts.getObjects()); + } + } + } + return listobjcontent; + } + } diff --git a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HostMO.java b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HostMO.java index 7877db980f46..2b20a92adbc4 100644 --- a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HostMO.java +++ b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HostMO.java @@ -16,15 +16,9 @@ // under the License. package com.cloud.hypervisor.vmware.mo; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.regex.Pattern; - -import org.apache.log4j.Logger; - +import com.cloud.hypervisor.vmware.util.VmwareContext; +import com.cloud.hypervisor.vmware.util.VmwareHelper; +import com.cloud.utils.Pair; import com.google.gson.Gson; import com.vmware.vim25.AboutInfo; import com.vmware.vim25.AlreadyExistsFaultMsg; @@ -63,9 +57,14 @@ import com.vmware.vim25.TraversalSpec; import com.vmware.vim25.VirtualMachineConfigSpec; import com.vmware.vim25.VirtualNicManagerNetConfig; -import com.cloud.hypervisor.vmware.util.VmwareContext; -import com.cloud.hypervisor.vmware.util.VmwareHelper; -import com.cloud.utils.Pair; +import org.apache.log4j.Logger; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.regex.Pattern; public class HostMO extends BaseMO implements VmwareHypervisorHost { private static final Logger s_logger = Logger.getLogger(HostMO.class); @@ -766,19 +765,19 @@ public void importVmFromOVF(String ovfFilePath, String vmName, String datastoreN if (dsMo == null) throw new Exception("Invalid datastore name: " + datastoreName); - importVmFromOVF(ovfFilePath, vmName, dsMo, diskOption); + importVmFromOVF(ovfFilePath, vmName, dsMo, diskOption, true); if (s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - importVmFromOVF() done"); } @Override - public void importVmFromOVF(String ovfFilePath, String vmName, DatastoreMO dsMo, String diskOption) throws Exception { + public void importVmFromOVF(String ovfFilePath, String vmName, DatastoreMO dsMo, String diskOption, boolean stripNeworks) throws Exception { ManagedObjectReference morRp = getHyperHostOwnerResourcePool(); assert (morRp != null); - HypervisorHostHelper.importVmFromOVF(this, ovfFilePath, vmName, dsMo, diskOption, morRp, _mor); + HypervisorHostHelper.importVmFromOVF(this, ovfFilePath, vmName, dsMo, diskOption, morRp, _mor, stripNeworks); } @Override @@ -839,6 +838,7 @@ public ManagedObjectReference mountDatastore(boolean vmfsDatastore, String poolH s_logger.trace("vCenter API trace - mountDatastore(). target MOR: " + _mor.getValue() + ", vmfs: " + vmfsDatastore + ", poolHost: " + poolHostAddress + ", poolHostPort: " + poolHostPort + ", poolPath: " + poolPath + ", poolUuid: " + poolUuid); + DatastoreMO dsMo = null; HostDatastoreSystemMO hostDatastoreSystemMo = getHostDatastoreSystemMO(); ManagedObjectReference morDatastore = hostDatastoreSystemMo.findDatastore(poolUuid); if (morDatastore == null) { @@ -865,22 +865,30 @@ public ManagedObjectReference mountDatastore(boolean vmfsDatastore, String poolH s_logger.trace("vCenter API trace - mountDatastore() done(failed)"); throw new Exception(msg); } + dsMo = new DatastoreMO(_context, morDatastore); } else { morDatastore = _context.getDatastoreMorByPath(poolPath); if (morDatastore == null) { - String msg = "Unable to create VMFS datastore. host: " + poolHostAddress + ", port: " + poolHostPort + ", path: " + poolPath + ", uuid: " + poolUuid; - s_logger.error(msg); + morDatastore = findDatastore(_context.getDatastoreNameFromPath(poolPath)); + if (morDatastore == null) { + String msg = "Unable to create VMFS datastore. host: " + poolHostAddress + ", port: " + poolHostPort + ", path: " + poolPath + ", uuid: " + poolUuid; + s_logger.error(msg); - if (s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - mountDatastore() done(failed)"); - throw new Exception(msg); + if (s_logger.isTraceEnabled()) + s_logger.trace("vCenter API trace - mountDatastore() done(failed)"); + throw new Exception(msg); + } } - DatastoreMO dsMo = new DatastoreMO(_context, morDatastore); + dsMo = new DatastoreMO(_context, morDatastore); dsMo.setCustomFieldValue(CustomFieldConstants.CLOUD_UUID, poolUuid); } } + if (dsMo != null && !"StoragePod".equals(morDatastore.getType())) { + HypervisorHostHelper.createBaseFolderInDatastore(dsMo, this); + } + if (s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - mountDatastore() done(successfully)"); diff --git a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HttpNfcLeaseMO.java b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HttpNfcLeaseMO.java index 6e4980c1e91c..a8ebf6b2729b 100644 --- a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HttpNfcLeaseMO.java +++ b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HttpNfcLeaseMO.java @@ -119,6 +119,13 @@ public static long calcTotalBytes(OvfCreateImportSpecResult ovfImportResult) { return totalBytes; } + /** + * should be called {code}String readFileContents(String filePath){code}, does nothing special, like checking if this is indeed adhering to ovf format. + * + * @param ovfFilePath + * @return + * @throws IOException + */ public static String readOvfContent(String ovfFilePath) throws IOException { StringBuffer strContent = new StringBuffer(); BufferedReader in = new BufferedReader(new InputStreamReader(new FileInputStream(ovfFilePath),"UTF-8")); diff --git a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java index d9604ac01e21..618700804029 100644 --- a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java +++ b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java @@ -37,6 +37,18 @@ import javax.xml.transform.dom.DOMSource; import javax.xml.transform.stream.StreamResult; +import com.cloud.hypervisor.vmware.VmwareResourceException; +import com.vmware.vim25.ConcurrentAccessFaultMsg; +import com.vmware.vim25.DuplicateNameFaultMsg; +import com.vmware.vim25.FileFaultFaultMsg; +import com.vmware.vim25.InsufficientResourcesFaultFaultMsg; +import com.vmware.vim25.InvalidDatastoreFaultMsg; +import com.vmware.vim25.InvalidNameFaultMsg; +import com.vmware.vim25.InvalidStateFaultMsg; +import com.vmware.vim25.OutOfBoundsFaultMsg; +import com.vmware.vim25.RuntimeFaultFaultMsg; +import com.vmware.vim25.TaskInProgressFaultMsg; +import com.vmware.vim25.VmConfigFaultFaultMsg; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.commons.collections.MapUtils; import org.apache.commons.lang.StringUtils; @@ -54,6 +66,7 @@ import com.cloud.hypervisor.vmware.util.VmwareHelper; import com.cloud.network.Networks.BroadcastDomainType; import com.cloud.offering.NetworkOffering; +import com.cloud.storage.Storage.StoragePoolType; import com.cloud.utils.ActionDelegate; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; @@ -138,6 +151,7 @@ public class HypervisorHostHelper { private static final String UNTAGGED_VLAN_NAME = "untagged"; private static final String VMDK_PACK_DIR = "ova"; private static final String OVA_OPTION_KEY_BOOTDISK = "cloud.ova.bootdisk"; + public static final String VSPHERE_DATASTORE_BASE_FOLDER = "fcd"; public static VirtualMachineMO findVmFromObjectContent(VmwareContext context, ObjectContent[] ocs, String name, String instanceNameCustomField) { @@ -468,21 +482,6 @@ public static void updatePortProfile(VmwareContext context, String ethPortProfil } } - /** - * @param ethPortProfileName - * @param namePrefix - * @param hostMo - * @param vlanId - * @param networkRateMbps - * @param networkRateMulticastMbps - * @param timeOutMs - * @param vSwitchType - * @param numPorts - * @param details - * @return - * @throws Exception - */ - public static Pair prepareNetwork(String physicalNetwork, String namePrefix, HostMO hostMo, String vlanId, String secondaryvlanId, Integer networkRateMbps, Integer networkRateMulticastMbps, long timeOutMs, VirtualSwitchType vSwitchType, int numPorts, String gateway, boolean configureVServiceInNexus, BroadcastDomainType broadcastDomainType, Map vsmCredentials, Map details) throws Exception { @@ -1704,6 +1703,11 @@ public static String resolveHostNameInUrl(DatacenterMO dcMo, String url) { return url; } + /** + * removes the NetworkSection element from the {ovfString} if it is an ovf xml file + * @param ovfString input string + * @return like the input string but if xml elements by name {NetworkSection} removed + */ public static String removeOVFNetwork(final String ovfString) { if (ovfString == null || ovfString.isEmpty()) { return ovfString; @@ -1733,38 +1737,50 @@ public static String removeOVFNetwork(final String ovfString) { transformer.transform(domSource, result); return writer.toString(); } catch (SAXException | IOException | ParserConfigurationException | TransformerException e) { +// FR37 TODO this warn() should really be an error and the exception should be thrown??? s_logger.warn("Unexpected exception caught while removing network elements from OVF:", e); } return ovfString; } + /** + * deploys a new VM from a ovf spec. It ignores network, defaults locale to 'US' + * @param stripNetworks true if we are not deploying an AVO as is + * @throws Exception shoud be a VmwareResourceException + */ public static void importVmFromOVF(VmwareHypervisorHost host, String ovfFilePath, String vmName, DatastoreMO dsMo, String diskOption, ManagedObjectReference morRp, - ManagedObjectReference morHost) throws Exception { + ManagedObjectReference morHost, boolean stripNetworks) throws VmwareResourceException, IOException { assert (morRp != null); - OvfCreateImportSpecParams importSpecParams = new OvfCreateImportSpecParams(); - importSpecParams.setHostSystem(morHost); - importSpecParams.setLocale("US"); - importSpecParams.setEntityName(vmName); - importSpecParams.setDeploymentOption(""); - importSpecParams.setDiskProvisioning(diskOption); // diskOption: thin, thick, etc + OvfCreateImportSpecParams importSpecParams = createOvfCreateImportSpecParamsObject(vmName, diskOption, morHost); + + String ovfDescriptor = readTheOvfDescriptorAsString(ovfFilePath, stripNetworks); - String ovfDescriptor = removeOVFNetwork(HttpNfcLeaseMO.readOvfContent(ovfFilePath)); VmwareContext context = host.getContext(); - OvfCreateImportSpecResult ovfImportResult = - context.getService().createImportSpec(context.getServiceContent().getOvfManager(), ovfDescriptor, morRp, dsMo.getMor(), importSpecParams); + OvfCreateImportSpecResult ovfImportResult = null; + try { + ovfImportResult = context.getService().createImportSpec(context.getServiceContent().getOvfManager(), ovfDescriptor, morRp, dsMo.getMor(), importSpecParams); + } catch (ConcurrentAccessFaultMsg + | FileFaultFaultMsg + | InvalidDatastoreFaultMsg + | InvalidStateFaultMsg + | RuntimeFaultFaultMsg + | TaskInProgressFaultMsg + | VmConfigFaultFaultMsg error) { + throw new VmwareResourceException("ImportSpec creation failed", error); + } if (ovfImportResult == null) { String msg = "createImportSpec() failed. ovfFilePath: " + ovfFilePath + ", vmName: " + vmName + ", diskOption: " + diskOption; s_logger.error(msg); - throw new Exception(msg); + throw new VmwareResourceException(msg); } if(!ovfImportResult.getError().isEmpty()) { for (LocalizedMethodFault fault : ovfImportResult.getError()) { s_logger.error("createImportSpec error: " + fault.getLocalizedMessage()); } - throw new CloudException("Failed to create an import spec from " + ovfFilePath + ". Check log for details."); + throw new VmwareResourceException("Failed to create an import spec from " + ovfFilePath + ". Check log for details."); } if (!ovfImportResult.getWarning().isEmpty()) { @@ -1773,22 +1789,55 @@ public static void importVmFromOVF(VmwareHypervisorHost host, String ovfFilePath } } - DatacenterMO dcMo = new DatacenterMO(context, host.getHyperHostDatacenter()); - ManagedObjectReference morLease = context.getService().importVApp(morRp, ovfImportResult.getImportSpec(), dcMo.getVmFolder(), morHost); + DatacenterMO dcMo = null; + try { + dcMo = new DatacenterMO(context, host.getHyperHostDatacenter()); + } catch (Exception e) { + throw new VmwareResourceException(String.format("no datacenter for host '%s' available in context", context.getServerAddress()), e); + } + ManagedObjectReference folderMO = null; + try { + folderMO = dcMo.getVmFolder(); + } catch (Exception e) { + throw new VmwareResourceException("no management handle for VmFolder", e); + } + ManagedObjectReference morLease = null; + try { + morLease = context.getService().importVApp(morRp, ovfImportResult.getImportSpec(), folderMO, morHost); + } catch (DuplicateNameFaultMsg + | FileFaultFaultMsg + | InsufficientResourcesFaultFaultMsg + | InvalidDatastoreFaultMsg + | InvalidNameFaultMsg + | OutOfBoundsFaultMsg + | RuntimeFaultFaultMsg + | VmConfigFaultFaultMsg fault) { + throw new VmwareResourceException("import vApp failed",fault); + } if (morLease == null) { String msg = "importVApp() failed. ovfFilePath: " + ovfFilePath + ", vmName: " + vmName + ", diskOption: " + diskOption; s_logger.error(msg); - throw new Exception(msg); + throw new VmwareResourceException(msg); } boolean importSuccess = true; final HttpNfcLeaseMO leaseMo = new HttpNfcLeaseMO(context, morLease); - HttpNfcLeaseState state = leaseMo.waitState(new HttpNfcLeaseState[] {HttpNfcLeaseState.READY, HttpNfcLeaseState.ERROR}); + HttpNfcLeaseState state = null; + try { + state = leaseMo.waitState(new HttpNfcLeaseState[] {HttpNfcLeaseState.READY, HttpNfcLeaseState.ERROR}); + } catch (Exception e) { + throw new VmwareResourceException("exception while waiting for leaseMO", e); + } try { if (state == HttpNfcLeaseState.READY) { final long totalBytes = HttpNfcLeaseMO.calcTotalBytes(ovfImportResult); File ovfFile = new File(ovfFilePath); - HttpNfcLeaseInfo httpNfcLeaseInfo = leaseMo.getLeaseInfo(); + HttpNfcLeaseInfo httpNfcLeaseInfo = null; + try { + httpNfcLeaseInfo = leaseMo.getLeaseInfo(); + } catch (Exception e) { + throw new VmwareResourceException("error waiting for lease info", e); + } List deviceUrls = httpNfcLeaseInfo.getDeviceUrl(); long bytesAlreadyWritten = 0; @@ -1818,35 +1867,66 @@ public void action(Long param) { String erroMsg = "File upload task failed to complete due to: " + e.getMessage(); s_logger.error(erroMsg); importSuccess = false; // Set flag to cleanup the stale template left due to failed import operation, if any - throw new Exception(erroMsg, e); + throw new VmwareResourceException(erroMsg, e); } catch (Throwable th) { String errorMsg = "throwable caught during file upload task: " + th.getMessage(); s_logger.error(errorMsg); importSuccess = false; // Set flag to cleanup the stale template left due to failed import operation, if any - throw new Exception(errorMsg, th); + throw new VmwareResourceException(errorMsg, th); } finally { progressReporter.close(); } if (bytesAlreadyWritten == totalBytes) { - leaseMo.updateLeaseProgress(100); + try { + leaseMo.updateLeaseProgress(100); + } catch (Exception e) { + throw new VmwareResourceException("error while waiting for lease update", e); + } } } else if (state == HttpNfcLeaseState.ERROR) { - LocalizedMethodFault error = leaseMo.getLeaseError(); + LocalizedMethodFault error = null; + try { + error = leaseMo.getLeaseError(); + } catch (Exception e) { + throw new VmwareResourceException("error getting lease error", e); + } MethodFault fault = error.getFault(); String erroMsg = "Object creation on vCenter failed due to: Exception: " + fault.getClass().getName() + ", message: " + error.getLocalizedMessage(); s_logger.error(erroMsg); - throw new Exception(erroMsg); + throw new VmwareResourceException(erroMsg); } } finally { - if (!importSuccess) { - s_logger.error("Aborting the lease on " + vmName + " after import operation failed."); - leaseMo.abortLease(); - } else { - leaseMo.completeLease(); + try { + if (!importSuccess) { + s_logger.error("Aborting the lease on " + vmName + " after import operation failed."); + leaseMo.abortLease(); + } else { + leaseMo.completeLease(); + } + } catch (Exception e) { + throw new VmwareResourceException("error completing lease", e); } } } + private static OvfCreateImportSpecParams createOvfCreateImportSpecParamsObject(String vmName, String diskOption, ManagedObjectReference morHost) { + OvfCreateImportSpecParams importSpecParams = new OvfCreateImportSpecParams(); + importSpecParams.setHostSystem(morHost); + importSpecParams.setLocale("US"); + importSpecParams.setEntityName(vmName); + importSpecParams.setDeploymentOption(""); + importSpecParams.setDiskProvisioning(diskOption); // diskOption: thin, thick, etc + return importSpecParams; + } + + private static String readTheOvfDescriptorAsString(String ovfFilePath, boolean stripNetworks) throws IOException { + String ovfDescriptor = HttpNfcLeaseMO.readOvfContent(ovfFilePath); + if (stripNetworks) { + ovfDescriptor = removeOVFNetwork(ovfDescriptor); + } + return ovfDescriptor; + } + public static List> readOVF(VmwareHypervisorHost host, String ovfFilePath, DatastoreMO dsMo) throws Exception { List> ovfVolumeInfos = new ArrayList>(); List files = new ArrayList(); @@ -2077,4 +2157,26 @@ public static boolean isIdeController(String controller) { return DiskControllerType.getType(controller) == DiskControllerType.ide; } + public static void createBaseFolder(DatastoreMO dsMo, VmwareHypervisorHost hyperHost, StoragePoolType poolType) throws Exception { + if (poolType != null && poolType == StoragePoolType.DatastoreCluster) { + StoragepodMO storagepodMO = new StoragepodMO(hyperHost.getContext(), dsMo.getMor()); + List datastoresInCluster = storagepodMO.getDatastoresInDatastoreCluster(); + for (ManagedObjectReference datastore : datastoresInCluster) { + DatastoreMO childDsMo = new DatastoreMO(hyperHost.getContext(), datastore); + createBaseFolderInDatastore(childDsMo, hyperHost); + } + } else { + createBaseFolderInDatastore(dsMo, hyperHost); + } + } + + public static void createBaseFolderInDatastore(DatastoreMO dsMo, VmwareHypervisorHost hyperHost) throws Exception { + String dsPath = String.format("[%s]", dsMo.getName()); + String folderPath = String.format("[%s] %s", dsMo.getName(), VSPHERE_DATASTORE_BASE_FOLDER); + + if (!dsMo.folderExists(dsPath, VSPHERE_DATASTORE_BASE_FOLDER)) { + s_logger.info(String.format("vSphere datastore base folder: %s does not exist, now creating on datastore: %s", VSPHERE_DATASTORE_BASE_FOLDER, dsMo.getName())); + dsMo.makeDirectory(folderPath, hyperHost.getHyperHostDatacenter()); + } + } } diff --git a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/PbmPlacementSolverMO.java b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/PbmPlacementSolverMO.java new file mode 100644 index 000000000000..3eb909fc31bd --- /dev/null +++ b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/PbmPlacementSolverMO.java @@ -0,0 +1,68 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.hypervisor.vmware.mo; + +import com.cloud.hypervisor.vmware.util.VmwareContext; +import com.vmware.pbm.PbmPlacementCompatibilityResult; +import com.vmware.pbm.PbmPlacementHub; +import com.vmware.pbm.PbmProfile; +import com.vmware.pbm.PbmProfileId; +import com.vmware.vim25.ManagedObjectReference; +import org.apache.commons.collections.CollectionUtils; +import org.apache.log4j.Logger; + +import java.util.ArrayList; +import java.util.List; + +public class PbmPlacementSolverMO extends BaseMO { + + private static final Logger LOGGER = Logger.getLogger(PbmPlacementSolverMO.class); + + public PbmPlacementSolverMO (VmwareContext context) { + super(context, context.getPbmServiceContent().getPlacementSolver()); + } + + public PbmPlacementSolverMO(VmwareContext context, ManagedObjectReference morPlacementSolver) { + super(context, morPlacementSolver); + } + + public PbmPlacementSolverMO(VmwareContext context, String morType, String morValue) { + super(context, morType, morValue); + } + + public boolean isDatastoreCompatibleWithStorageProfile(ManagedObjectReference dsMor, PbmProfile profile) throws Exception { + boolean isDatastoreCompatibleWithStorageProfile = false; + + PbmPlacementHub placementHub = new PbmPlacementHub(); + placementHub.setHubId(dsMor.getValue()); + placementHub.setHubType(dsMor.getType()); + + List placementHubList = new ArrayList(); + placementHubList.add(placementHub); + PbmProfileId profileId = profile.getProfileId(); + List placementCompatibilityResultList = _context.getPbmService().pbmCheckCompatibility(_mor, placementHubList, profileId); + if (CollectionUtils.isNotEmpty(placementCompatibilityResultList)) { + for (PbmPlacementCompatibilityResult placementResult : placementCompatibilityResultList) { + // Check for error and warning + if (CollectionUtils.isEmpty(placementResult.getError()) && CollectionUtils.isEmpty(placementResult.getWarning())) { + isDatastoreCompatibleWithStorageProfile = true; + } + } + } + return isDatastoreCompatibleWithStorageProfile; + } +} diff --git a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/PbmProfileManagerMO.java b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/PbmProfileManagerMO.java new file mode 100644 index 000000000000..a4142ecde8ac --- /dev/null +++ b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/PbmProfileManagerMO.java @@ -0,0 +1,94 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.hypervisor.vmware.mo; + +import com.cloud.hypervisor.vmware.util.VmwareContext; + +import com.cloud.utils.exception.CloudRuntimeException; +import com.vmware.pbm.PbmCapabilityProfile; +import com.vmware.pbm.PbmProfile; +import com.vmware.pbm.PbmProfileCategoryEnum; +import com.vmware.pbm.PbmProfileId; +import com.vmware.pbm.PbmProfileResourceType; +import com.vmware.pbm.PbmProfileResourceTypeEnum; +import com.vmware.vim25.ManagedObjectReference; + +import org.apache.log4j.Logger; + +import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; + +public class PbmProfileManagerMO extends BaseMO { + + private static final Logger LOGGER = Logger.getLogger(PbmProfileManagerMO.class); + + public PbmProfileManagerMO (VmwareContext context) { + super(context, context.getPbmServiceContent().getProfileManager()); + } + + public PbmProfileManagerMO (VmwareContext context, ManagedObjectReference morProfileMgr) { + super(context, morProfileMgr); + } + + public PbmProfileManagerMO (VmwareContext context, String morType, String morValue) { + super(context, morType, morValue); + } + + public List getStorageProfileIds() throws Exception { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Querying vCenter " + _context.getServerAddress() + " for profiles"); + } + List profileIds = _context.getPbmService().pbmQueryProfile(_mor, getStorageResourceType(), null); + return profileIds; + } + + public List getStorageProfiles() throws Exception { + List profileIds = getStorageProfileIds(); + List profiles = _context.getPbmService().pbmRetrieveContent(_mor, profileIds); + + List requirementCategoryProfiles = profiles.stream() + .filter(x -> ((PbmCapabilityProfile)x).getProfileCategory().equals(PbmProfileCategoryEnum.REQUIREMENT.toString())) + .collect(Collectors.toList()); + return requirementCategoryProfiles; + } + + public PbmProfile getStorageProfile(String storageProfileId) throws Exception { + List profileIds = getStorageProfileIds(); + + PbmProfileId profileId = profileIds.stream() + .filter(x -> x.getUniqueId().equals(storageProfileId)) + .findFirst().orElse(null); + + if (profileId == null) { + String errMsg = String.format("Storage profile with id %s not found", storageProfileId); + LOGGER.debug(errMsg); + throw new CloudRuntimeException(errMsg); + } + + List profile = _context.getPbmService().pbmRetrieveContent(_mor, Collections.singletonList(profileId)); + return profile.get(0); + } + + private PbmProfileResourceType getStorageResourceType() { + PbmProfileResourceType resourceType = new PbmProfileResourceType(); + resourceType.setResourceType(PbmProfileResourceTypeEnum.STORAGE.value()); + return resourceType; + } +} + + diff --git a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/StoragepodMO.java b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/StoragepodMO.java new file mode 100644 index 000000000000..afa3a0221267 --- /dev/null +++ b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/StoragepodMO.java @@ -0,0 +1,48 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.hypervisor.vmware.mo; + +import com.cloud.hypervisor.vmware.util.VmwareContext; +import com.vmware.vim25.ManagedObjectReference; +import com.vmware.vim25.StoragePodSummary; +import org.apache.log4j.Logger; + +import java.util.List; + +public class StoragepodMO extends BaseMO { + + private static final Logger LOGGER = Logger.getLogger(StoragepodMO.class); + + public StoragepodMO(VmwareContext context, ManagedObjectReference mor) { + super(context, mor); + } + + public StoragepodMO(VmwareContext context, String morType, String morValue) { + super(context, morType, morValue); + } + + public StoragePodSummary getDatastoreClusterSummary() throws Exception { + return (StoragePodSummary)_context.getVimClient().getDynamicProperty(_mor, "summary"); + } + + public List getDatastoresInDatastoreCluster() throws Exception { + List datastoresInCluster = _context.getVimClient().getDynamicProperty(_mor, "childEntity"); + return datastoresInCluster; + } + +} diff --git a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/TaskMO.java b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/TaskMO.java index 65c6a6bd3626..9cf9d9554c42 100644 --- a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/TaskMO.java +++ b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/TaskMO.java @@ -77,4 +77,8 @@ public static String getTaskFailureInfo(VmwareContext context, ManagedObjectRefe return sb.toString(); } + + public static TaskInfo getTaskInfo(VmwareContext context, ManagedObjectReference morTask) throws Exception { + return (TaskInfo)context.getVimClient().getDynamicProperty(morTask, "info"); + } } diff --git a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java index 394505627a7a..c3f48d85c3d1 100644 --- a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java +++ b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java @@ -36,6 +36,7 @@ import org.apache.commons.collections.CollectionUtils; import org.apache.log4j.Logger; +import org.apache.commons.lang.StringUtils; import com.google.gson.Gson; import com.vmware.vim25.ArrayOfManagedObjectReference; @@ -60,6 +61,8 @@ import com.vmware.vim25.PropertyFilterSpec; import com.vmware.vim25.PropertySpec; import com.vmware.vim25.TraversalSpec; +import com.vmware.vim25.VStorageObject; +import com.vmware.vim25.VStorageObjectConfigInfo; import com.vmware.vim25.VirtualBusLogicController; import com.vmware.vim25.VirtualCdrom; import com.vmware.vim25.VirtualCdromIsoBackingInfo; @@ -1187,7 +1190,18 @@ public void createDisk(String vmdkDatastorePath, VirtualDiskType diskType, Virtu s_logger.trace("vCenter API trace - createDisk() done(successfully)"); } - public void updateVmdkAdapter(String vmdkFileName, String newAdapterType) throws Exception { + public void updateVmdkAdapter(String vmdkFileName, String diskController) throws Exception { + + DiskControllerType diskControllerType = DiskControllerType.getType(diskController); + VmdkAdapterType vmdkAdapterType = VmdkAdapterType.getAdapterType(diskControllerType); + if (vmdkAdapterType == VmdkAdapterType.none) { + String message = "Failed to attach disk due to invalid vmdk adapter type for vmdk file [" + + vmdkFileName + "] with controller : " + diskControllerType; + s_logger.debug(message); + throw new Exception(message); + } + + String newAdapterType = vmdkAdapterType.toString(); Pair vmdkInfo = getVmdkFileInfo(vmdkFileName); VmdkFileDescriptor vmdkFileDescriptor = vmdkInfo.first(); boolean isVmfsSparseFile = vmdkFileDescriptor.isVmfsSparseFile(); @@ -1232,6 +1246,10 @@ public void updateAdapterTypeIfRequired(String vmdkFileName) throws Exception { } } + public void attachDisk(String[] vmdkDatastorePathChain, ManagedObjectReference morDs) throws Exception { + attachDisk(vmdkDatastorePathChain, morDs, null); + } + public void attachDisk(String[] vmdkDatastorePathChain, ManagedObjectReference morDs, String diskController) throws Exception { if(s_logger.isTraceEnabled()) @@ -1254,24 +1272,20 @@ public void attachDisk(String[] vmdkDatastorePathChain, ManagedObjectReference m controllerKey = getIDEControllerKey(ideDeviceCount); unitNumber = getFreeUnitNumberOnIDEController(controllerKey); } else { - controllerKey = getScsiDiskControllerKey(diskController); + if (StringUtils.isNotBlank(diskController)) { + controllerKey = getScsiDiskControllerKey(diskController); + } else { + controllerKey = getScsiDeviceControllerKey(); + } unitNumber = -1; } + synchronized (_mor.getValue().intern()) { VirtualDevice newDisk = VmwareHelper.prepareDiskDevice(this, null, controllerKey, vmdkDatastorePathChain, morDs, unitNumber, 1); - controllerKey = newDisk.getControllerKey(); - unitNumber = newDisk.getUnitNumber(); - VirtualDiskFlatVer2BackingInfo backingInfo = (VirtualDiskFlatVer2BackingInfo)newDisk.getBacking(); - String vmdkFileName = backingInfo.getFileName(); - DiskControllerType diskControllerType = DiskControllerType.getType(diskController); - VmdkAdapterType vmdkAdapterType = VmdkAdapterType.getAdapterType(diskControllerType); - if (vmdkAdapterType == VmdkAdapterType.none) { - String message = "Failed to attach disk due to invalid vmdk adapter type for vmdk file [" + - vmdkFileName + "] with controller : " + diskControllerType; - s_logger.debug(message); - throw new Exception(message); + if (StringUtils.isNotBlank(diskController)) { + String vmdkFileName = vmdkDatastorePathChain[0]; + updateVmdkAdapter(vmdkFileName, diskController); } - updateVmdkAdapter(vmdkFileName, vmdkAdapterType.toString()); VirtualMachineConfigSpec reConfigSpec = new VirtualMachineConfigSpec(); VirtualDeviceConfigSpec deviceConfigSpec = new VirtualDeviceConfigSpec(); @@ -1311,69 +1325,6 @@ private int getControllerBusNumber(int controllerKey) throws Exception { } - public void attachDisk(String[] vmdkDatastorePathChain, ManagedObjectReference morDs) throws Exception { - - if (s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - attachDisk(). target MOR: " + _mor.getValue() + ", vmdkDatastorePath: " + new Gson().toJson(vmdkDatastorePathChain) + - ", datastore: " + morDs.getValue()); - - synchronized (_mor.getValue().intern()) { - VirtualDevice newDisk = VmwareHelper.prepareDiskDevice(this, null, getScsiDeviceControllerKey(), vmdkDatastorePathChain, morDs, -1, 1); - VirtualMachineConfigSpec reConfigSpec = new VirtualMachineConfigSpec(); - VirtualDeviceConfigSpec deviceConfigSpec = new VirtualDeviceConfigSpec(); - - deviceConfigSpec.setDevice(newDisk); - deviceConfigSpec.setOperation(VirtualDeviceConfigSpecOperation.ADD); - - reConfigSpec.getDeviceChange().add(deviceConfigSpec); - - ManagedObjectReference morTask = _context.getService().reconfigVMTask(_mor, reConfigSpec); - boolean result = _context.getVimClient().waitForTask(morTask); - - if (!result) { - if (s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - attachDisk() done(failed)"); - throw new Exception("Failed to attach disk due to " + TaskMO.getTaskFailureInfo(_context, morTask)); - } - - _context.waitForTaskProgressDone(morTask); - } - - if (s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - attachDisk() done(successfully)"); - } - - public void attachDisk(Pair[] vmdkDatastorePathChain, int controllerKey) throws Exception { - - if (s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - attachDisk(). target MOR: " + _mor.getValue() + ", vmdkDatastorePath: " + new Gson().toJson(vmdkDatastorePathChain)); - - synchronized (_mor.getValue().intern()) { - VirtualDevice newDisk = VmwareHelper.prepareDiskDevice(this, controllerKey, vmdkDatastorePathChain, -1, 1); - VirtualMachineConfigSpec reConfigSpec = new VirtualMachineConfigSpec(); - VirtualDeviceConfigSpec deviceConfigSpec = new VirtualDeviceConfigSpec(); - - deviceConfigSpec.setDevice(newDisk); - deviceConfigSpec.setOperation(VirtualDeviceConfigSpecOperation.ADD); - - reConfigSpec.getDeviceChange().add(deviceConfigSpec); - - ManagedObjectReference morTask = _context.getService().reconfigVMTask(_mor, reConfigSpec); - boolean result = _context.getVimClient().waitForTask(morTask); - - if (!result) { - if (s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - attachDisk() done(failed)"); - throw new Exception("Failed to attach disk due to " + TaskMO.getTaskFailureInfo(_context, morTask)); - } - - _context.waitForTaskProgressDone(morTask); - } - - if (s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - attachDisk() done(successfully)"); - } - // vmdkDatastorePath: [datastore name] vmdkFilePath public List> detachDisk(String vmdkDatastorePath, boolean deleteBackingFile) throws Exception { @@ -2479,7 +2430,7 @@ public Pair getDiskDevice(String vmdkDatastorePath) throws String deviceNumbering = getDeviceBusName(devices, device); s_logger.info("Disk backing : " + diskBackingInfo.getFileName() + " matches ==> " + deviceNumbering); - + registerVirtualDisk((VirtualDisk) device, dsBackingFile); return new Pair<>((VirtualDisk)device, deviceNumbering); } @@ -2550,15 +2501,17 @@ public Pair getDiskDevice(String vmdkDatastorePath, boolean if (matchExactly) { if (backingBaseName.equalsIgnoreCase(srcBaseName)) { String deviceNumbering = getDeviceBusName(devices, device); - s_logger.info("Disk backing : " + diskBackingInfo.getFileName() + " matches ==> " + deviceNumbering); + + registerVirtualDisk((VirtualDisk) device, dsBackingFile); return new Pair((VirtualDisk)device, deviceNumbering); } } else { if (backingBaseName.contains(trimmedSrcBaseName)) { String deviceNumbering = getDeviceBusName(devices, device); - s_logger.info("Disk backing : " + diskBackingInfo.getFileName() + " matches ==> " + deviceNumbering); + + registerVirtualDisk((VirtualDisk) device, dsBackingFile); return new Pair((VirtualDisk)device, deviceNumbering); } } @@ -2573,6 +2526,20 @@ public Pair getDiskDevice(String vmdkDatastorePath, boolean return null; } + public void registerVirtualDisk(VirtualDisk device, DatastoreFile dsBackingFile) { + if (((VirtualDisk) device).getVDiskId() == null) { + try { + s_logger.debug("vDiskid does not exist for volume " + dsBackingFile.getFileName() + " registering the disk now"); + VirtualStorageObjectManagerMO vStorageObjectManagerMO = new VirtualStorageObjectManagerMO(getOwnerDatacenter().first().getContext()); + VStorageObject vStorageObject = vStorageObjectManagerMO.registerVirtualDisk(dsBackingFile, null, getOwnerDatacenter().first().getName()); + VStorageObjectConfigInfo diskConfigInfo = vStorageObject.getConfig(); + ((VirtualDisk) device).setVDiskId(diskConfigInfo.getId()); + } catch (Exception e) { + s_logger.warn("Exception while trying to register a disk as first class disk to get the unique identifier, main operation still continues: " + e.getMessage()); + } + } + } + public String getDiskCurrentTopBackingFileInChain(String deviceBusName) throws Exception { List devices = _context.getVimClient().getDynamicProperty(_mor, "config.hardware.device"); if (devices != null && devices.size() > 0) { @@ -2622,12 +2589,14 @@ public VirtualMachineDiskInfoBuilder getDiskInfoBuilder() throws Exception { VirtualDeviceBackingInfo backingInfo = ((VirtualDisk)device).getBacking(); if (backingInfo instanceof VirtualDiskFlatVer2BackingInfo) { VirtualDiskFlatVer2BackingInfo diskBackingInfo = (VirtualDiskFlatVer2BackingInfo)backingInfo; - + String diskBackingFileName = diskBackingInfo.getFileName(); while (diskBackingInfo != null) { String deviceBusName = getDeviceBusName(devices, device); builder.addDisk(deviceBusName, diskBackingInfo.getFileName()); diskBackingInfo = diskBackingInfo.getParent(); } + DatastoreFile dsBackingFile = new DatastoreFile(diskBackingFileName); + registerVirtualDisk((VirtualDisk) device, dsBackingFile); } } } @@ -2646,6 +2615,8 @@ public List> getAllDiskDatastores() throws VirtualDeviceBackingInfo backingInfo = ((VirtualDisk)device).getBacking(); if (backingInfo instanceof VirtualDiskFlatVer2BackingInfo) { VirtualDiskFlatVer2BackingInfo diskBackingInfo = (VirtualDiskFlatVer2BackingInfo)backingInfo; + DatastoreFile dsBackingFile = new DatastoreFile(diskBackingInfo.getFileName()); + registerVirtualDisk((VirtualDisk) device, dsBackingFile); disks.add(new Pair(new Integer(device.getKey()), diskBackingInfo.getDatastore())); } } @@ -2754,6 +2725,10 @@ public List getVirtualDisks() throws Exception { for (VirtualDevice device : devices) { if (device instanceof VirtualDisk) { + VirtualDeviceBackingInfo backingInfo = device.getBacking(); + VirtualDiskFlatVer2BackingInfo diskBackingInfo = (VirtualDiskFlatVer2BackingInfo)backingInfo; + DatastoreFile dsBackingFile = new DatastoreFile(diskBackingInfo.getFileName()); + registerVirtualDisk((VirtualDisk) device, dsBackingFile); virtualDisks.add((VirtualDisk)device); } } @@ -2788,6 +2763,7 @@ public List detachAllDisksExcept(String vmdkBaseName, String deviceBusNa reConfigSpec.getDeviceChange().add(deviceConfigSpec); } + registerVirtualDisk((VirtualDisk) device, dsBackingFile); } } @@ -2815,6 +2791,23 @@ public VirtualDisk[] getAllDiskDevice() throws Exception { if (devices != null && devices.size() > 0) { for (VirtualDevice device : devices) { if (device instanceof VirtualDisk) { + if (((VirtualDisk) device).getVDiskId() == null) { + try { + // Register as first class disk + VirtualDeviceBackingInfo backingInfo = device.getBacking(); + if (backingInfo instanceof VirtualDiskFlatVer2BackingInfo) { + VirtualDiskFlatVer2BackingInfo diskBackingInfo = (VirtualDiskFlatVer2BackingInfo) backingInfo; + DatastoreFile dsBackingFile = new DatastoreFile(diskBackingInfo.getFileName()); + s_logger.debug("vDiskid does not exist for volume " + diskBackingInfo.getFileName() + " registering the disk now"); + VirtualStorageObjectManagerMO vStorageObjectManagerMO = new VirtualStorageObjectManagerMO(getOwnerDatacenter().first().getContext()); + VStorageObject vStorageObject = vStorageObjectManagerMO.registerVirtualDisk(dsBackingFile, null, getOwnerDatacenter().first().getName()); + VStorageObjectConfigInfo diskConfigInfo = vStorageObject.getConfig(); + ((VirtualDisk) device).setVDiskId(diskConfigInfo.getId()); + } + } catch (Exception e) { + s_logger.warn("Exception while trying to register a disk as first class disk to get the unique identifier, main operation still continues: " + e.getMessage()); + } + } deviceList.add((VirtualDisk)device); } } diff --git a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/VirtualStorageObjectManagerMO.java b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/VirtualStorageObjectManagerMO.java new file mode 100644 index 000000000000..d5f4eb3af060 --- /dev/null +++ b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/VirtualStorageObjectManagerMO.java @@ -0,0 +1,94 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.hypervisor.vmware.mo; + +import com.vmware.vim25.ID; +import com.vmware.vim25.TaskInfo; +import com.vmware.vim25.VStorageObject; +import com.vmware.vim25.VirtualDiskType; +import com.vmware.vim25.VslmCreateSpec; +import com.vmware.vim25.VslmCreateSpecDiskFileBackingSpec; +import org.apache.log4j.Logger; + +import com.vmware.vim25.ManagedObjectReference; + +import com.cloud.hypervisor.vmware.util.VmwareContext; + +public class VirtualStorageObjectManagerMO extends BaseMO { + @SuppressWarnings("unused") + private static final Logger LOGGER = Logger.getLogger(VirtualStorageObjectManagerMO.class); + + public VirtualStorageObjectManagerMO(VmwareContext context) { + super(context, context.getServiceContent().getVStorageObjectManager()); + } + + public VirtualStorageObjectManagerMO(VmwareContext context, ManagedObjectReference morDiskMgr) { + super(context, morDiskMgr); + } + + public VirtualStorageObjectManagerMO(VmwareContext context, String morType, String morValue) { + super(context, morType, morValue); + } + + public VStorageObject registerVirtualDisk(DatastoreFile datastoreFile, String name, String dcName) throws Exception { + StringBuilder sb = new StringBuilder(); + //https://10.2.2.254/folder/i-2-4-VM/89e3756d9b7444dc92388eb36ddd026b.vmdk?dcPath=datacenter-21&dsName=c84e4af9b6ac33e887a25d9242650091 + sb.append("https://").append(_context.getServerAddress()).append("/folder/"); + sb.append(datastoreFile.getRelativePath()); + sb.append("?dcPath="); + sb.append(dcName); + sb.append("&dsName="); + sb.append(datastoreFile.getDatastoreName()); + return _context.getService().registerDisk(_mor, sb.toString(), name); + } + + public VStorageObject retrieveVirtualDisk (ID id, ManagedObjectReference morDS) throws Exception { + return _context.getService().retrieveVStorageObject(_mor, id, morDS); + } + + public VStorageObject createDisk(ManagedObjectReference morDS, VirtualDiskType diskType, long currentSizeInBytes, String datastoreFilepath, String filename) throws Exception { + long currentSizeInMB = currentSizeInBytes/(1024*1024); + + VslmCreateSpecDiskFileBackingSpec diskFileBackingSpec = new VslmCreateSpecDiskFileBackingSpec(); + diskFileBackingSpec.setDatastore(morDS); + diskFileBackingSpec.setProvisioningType(diskType.value()); + // path should be just the folder name. For example, instead of '[datastore1] folder1/filename.vmdk' you would just do 'folder1'. + // path is introduced from 6.7. In 6.5 disk will be created in the default folder "fcd" + diskFileBackingSpec.setPath(null); + + VslmCreateSpec vslmCreateSpec = new VslmCreateSpec(); + vslmCreateSpec.setBackingSpec(diskFileBackingSpec); + vslmCreateSpec.setCapacityInMB(currentSizeInMB); + vslmCreateSpec.setName(filename); + + ManagedObjectReference morTask = _context.getService().createDiskTask(_mor, vslmCreateSpec); + boolean result = _context.getVimClient().waitForTask(morTask); + + VStorageObject vStorageObject = null; + if (result) { + _context.waitForTaskProgressDone(morTask); + //_context.getService().reconcileDatastoreInventoryTask(_mor, morDS); + TaskInfo taskInfo = TaskMO.getTaskInfo(_context, morTask); + vStorageObject = (VStorageObject)taskInfo.getResult(); + + } else { + LOGGER.error("VMware CreateDisk_Task failed due to " + TaskMO.getTaskFailureInfo(_context, morTask)); + } + + return vStorageObject; + } +} diff --git a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/VmwareHypervisorHost.java b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/VmwareHypervisorHost.java index a9ceb5d806ec..406157836535 100644 --- a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/VmwareHypervisorHost.java +++ b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/VmwareHypervisorHost.java @@ -65,7 +65,7 @@ boolean createBlankVm(String vmName, String vmInternalCSName, int cpuCount, int int memoryReserveMB, String guestOsIdentifier, ManagedObjectReference morDs, boolean snapshotDirToParent, Pair controllerInfo, Boolean systemVm) throws Exception; - void importVmFromOVF(String ovfFilePath, String vmName, DatastoreMO dsMo, String diskOption) throws Exception; + void importVmFromOVF(String ovfFilePath, String vmName, DatastoreMO dsMo, String diskOption, boolean stripNeworks) throws Exception; ObjectContent[] getVmPropertiesOnHyperHost(String[] propertyPaths) throws Exception; diff --git a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/util/ContentLibraryClient.java b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/util/ContentLibraryClient.java new file mode 100644 index 000000000000..fb2eee315f8a --- /dev/null +++ b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/util/ContentLibraryClient.java @@ -0,0 +1,166 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.hypervisor.vmware.util; + +import java.util.Map; + +import org.apache.commons.lang.StringUtils; +import org.apache.log4j.Logger; + +import com.vmware.cis.Session; +import com.vmware.content.Library; +import com.vmware.content.LocalLibrary; +import com.vmware.content.library.Item; +import com.vmware.content.library.item.UpdateSession; +import com.vmware.content.library.item.updatesession.File; + +import com.vmware.vapi.bindings.Service; +import com.vmware.vapi.bindings.StubConfiguration; +import com.vmware.vapi.bindings.StubFactory; +import com.vmware.vapi.cis.authn.ProtocolFactory; +import com.vmware.vapi.cis.authn.SecurityContextFactory; +import com.vmware.vapi.core.ApiProvider; +import com.vmware.vapi.core.ExecutionContext.SecurityContext; +import com.vmware.vapi.protocol.HttpConfiguration; +import com.vmware.vapi.protocol.ProtocolConnection; +import com.vmware.vapi.security.SessionSecurityContext; + +import com.vmware.vcenter.Datastore; +import com.vmware.vcenter.ovf.LibraryItem; + +public class ContentLibraryClient implements VmwareClientService { + private static final Logger s_logger = Logger.getLogger(ContentLibraryClient.class); + + public static final String VAPI_PATH = "/api"; + + //Configuration + public static final String HTTP_CONFIG = "httpconfig"; + + private Session sessionService; + private StubFactory stubFactory; + private StubConfiguration sessionStubConfig; + + private Library libraryService; + private LocalLibrary localLibraryService; + private Item itemService; + private LibraryItem libraryItemService; + private Datastore datastoreService; + private UpdateSession updateSessionService; + private File fileService; + + public ContentLibraryClient() { + } + + @Override + public boolean login(String vCenterAddress, String userName, String password, Map configProperties) throws Exception { + if(StringUtils.isBlank(vCenterAddress) || StringUtils.isBlank(userName) || StringUtils.isBlank(password)) { + s_logger.debug("Invalid vCenter credentials"); + return false; + } + + if (configProperties == null || configProperties.isEmpty()) { + s_logger.debug("Login failed, configuration properties required"); + return false; + } + + HttpConfiguration httpConfig = (HttpConfiguration) configProperties.get(HTTP_CONFIG); + if (httpConfig == null) { + s_logger.debug("Login failed, http configuration not found"); + return false; + } + + stubFactory = createApiStubFactory(vCenterAddress, httpConfig); + SecurityContext securityContext = SecurityContextFactory.createUserPassSecurityContext(userName, password.toCharArray()); + + sessionStubConfig = new StubConfiguration(securityContext); + Session session = stubFactory.createStub(Session.class, sessionStubConfig); + + char[] sessionId = session.create(); + SessionSecurityContext sessionSecurityContext = new SessionSecurityContext(sessionId); + sessionStubConfig.setSecurityContext(sessionSecurityContext); + sessionService = stubFactory.createStub(Session.class, sessionStubConfig); + + // Library services + libraryService = getService(Library.class); + localLibraryService = getService(LocalLibrary.class); + + // Library item services + itemService = getService(Item.class); + libraryItemService = getService(LibraryItem.class); + + datastoreService = getService(Datastore.class); + fileService = getService(File.class); + updateSessionService = getService(UpdateSession.class); + + return true; + } + + @Override + public boolean logout() throws Exception { + if (sessionService != null) { + sessionService.delete(); + } + + return true; + } + + private StubFactory createApiStubFactory(String server, HttpConfiguration httpConfig) throws Exception { + // Create a https connection with the vapi url + ProtocolFactory pf = new ProtocolFactory(); + String apiUrl = "https://" + server + VAPI_PATH; + + // Get a connection to the vapi url + ProtocolConnection connection = pf.getHttpConnection(apiUrl, null, httpConfig); + + // Initialize the stub factory with the api provider + ApiProvider provider = connection.getApiProvider(); + StubFactory stubFactory = new StubFactory(provider); + return stubFactory; + } + + public Library getLibrary() { + return this.libraryService; + } + + public LocalLibrary getLocalLibrary() { + return this.localLibraryService; + } + + public Item getItem() { + return this.itemService; + } + + public LibraryItem getLibraryItem() { + return this.libraryItemService; + } + + public Datastore getDatastore() { + return this.datastoreService; + } + + public File getFile() { + return this.fileService; + } + + public UpdateSession getUpdateSession() { + return this.updateSessionService; + } + + private T getService(Class serviceClass) { + return (T) stubFactory.createStub(serviceClass, sessionStubConfig); + } +} diff --git a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/util/ContentLibraryHelper.java b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/util/ContentLibraryHelper.java new file mode 100644 index 000000000000..d8e598508c05 --- /dev/null +++ b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/util/ContentLibraryHelper.java @@ -0,0 +1,393 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.hypervisor.vmware.util; + +import org.apache.commons.lang.StringUtils; +import org.apache.log4j.Logger; + +import java.net.URI; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.ExecutionException; + +import com.cloud.utils.db.GlobalLock; +import com.cloud.utils.Pair; + +import com.vmware.vim25.ManagedObjectReference; + +import com.vmware.content.library.ItemModel; +import com.vmware.content.LibraryModel; +import com.vmware.content.LibraryTypes; +import com.vmware.content.library.ItemTypes; +import com.vmware.content.library.StorageBacking; +import com.vmware.content.library.item.TransferEndpoint; +import com.vmware.content.library.item.UpdateSessionModel; + +import com.vmware.vapi.std.LocalizableMessage; + +import com.vmware.vcenter.Datastore; +import com.vmware.vcenter.DatastoreTypes; +import com.vmware.vcenter.ovf.OvfError; +import com.vmware.vcenter.ovf.OvfInfo; +import com.vmware.vcenter.ovf.OvfMessage; +import com.vmware.vcenter.ovf.OvfWarning; +import com.vmware.vcenter.ovf.ParseIssue; +import com.vmware.vcenter.ovf.LibraryItemTypes; +import com.vmware.vcenter.ovf.LibraryItemTypes.DeploymentResult; +import com.vmware.vcenter.ovf.LibraryItemTypes.DeploymentTarget; +import com.vmware.vcenter.ovf.LibraryItemTypes.ResourcePoolDeploymentSpec; + +public class ContentLibraryHelper { + private static final Logger LOGGER = Logger.getLogger(ContentLibraryHelper.class); + private static final int DEFAULT_LOCK_TIMEOUT_SECONDS = 5; + private static final String HEADING_ADDITIONAL_INFO = "Additional information :"; + + public static boolean createContentLibrary(VmwareContext context, String datastoreName, String libraryName) throws Exception { + if (StringUtils.isBlank(datastoreName) || StringUtils.isBlank(libraryName)) { + return false; + } + + GlobalLock lock = GlobalLock.getInternLock("ContentLibrary." + datastoreName + "-" + libraryName); + try { + if (lock.lock(DEFAULT_LOCK_TIMEOUT_SECONDS)) { + try { + if (getContentLibraryByName(context, libraryName) != null) { + LOGGER.info("Failed to create, content library with the given name: " + libraryName + " already exists"); + return false; + } + + // Build the storage backing for the library to be created + StorageBacking storageBacking = createStorageBacking(context, datastoreName); + if (storageBacking == null) { + LOGGER.error("Not able to create storage backing for datastore: " + datastoreName); + return false; + } + + // Build the specification for the library to be created + LibraryModel createSpec = new LibraryModel(); + createSpec.setName(libraryName); + createSpec.setDescription("Local content library for datastore " + datastoreName); + createSpec.setType(LibraryModel.LibraryType.LOCAL); + createSpec.setStorageBackings(Collections.singletonList(storageBacking)); + + // Create a content library + String clientToken = UUID.randomUUID().toString(); + String libraryId = context.getVimClient().getContentLibrary().getLocalLibrary().create(clientToken, createSpec); + if (StringUtils.isBlank(libraryId)) { + return false; + } + + LOGGER.info("Content library created: " + libraryName + " on the datastore: " + datastoreName); + return true; + } finally { + lock.unlock(); + } + + } else { + LOGGER.warn("Unable to lock local library to create content library: " + libraryName); + } + } finally { + lock.releaseRef(); + } + + return false; + } + + public static boolean deleteContentLibrary(VmwareContext context, String datastoreName, String libraryName) throws Exception { + if (StringUtils.isBlank(datastoreName) || StringUtils.isBlank(libraryName)) { + return false; + } + + String libraryId = getContentLibraryByName(context, libraryName); + if (libraryId == null) { + LOGGER.warn("Failed to delete, content library with the given name: " + libraryName + " doesn't exists"); + return false; + } + + LibraryModel localLibrary = context.getVimClient().getContentLibrary().getLocalLibrary().get(libraryId); + if (localLibrary == null) { + LOGGER.warn("Failed to delete, library: " + libraryName + " not found"); + return false; + } + + //Get the storage backing on the datastore + StorageBacking dsStorageBacking = createStorageBacking(context, datastoreName); + boolean canDelete = false; + for (Iterator iterator = localLibrary.getStorageBackings().iterator(); iterator.hasNext();) { + StorageBacking storageBacking = (StorageBacking) iterator.next(); + if(dsStorageBacking.equals(storageBacking)) { + canDelete = true; + break; + } + } + + if(!canDelete) { + LOGGER.warn("Can not delete, library: " + libraryName + " not found in datastore: " + datastoreName); + return false; + } + + // Delete the content library + context.getVimClient().getContentLibrary().getLocalLibrary().delete(localLibrary.getId()); + LOGGER.info("Deleted content library : " + libraryName + " on the datastore: " + datastoreName); + return true; + } + + public static boolean importOvfFromDatastore(VmwareContext context, String sourceOvfFileUri, String sourceOvfFileName, String targetLibraryName, String targetOvfName) throws InterruptedException, ExecutionException { + if (StringUtils.isBlank(sourceOvfFileUri) || StringUtils.isBlank(sourceOvfFileName) + || StringUtils.isBlank(targetLibraryName) || StringUtils.isBlank(targetOvfName)) { + return false; + } + + String libraryId = getContentLibraryByName(context, targetLibraryName); + if (libraryId == null) { + LOGGER.error("Failed to import ovf, library: " + targetLibraryName + " doesn't exists"); + return false; + } + + String itemId = createOvfItem(context, libraryId, targetOvfName); + + UpdateSessionModel updateSessionModel = new UpdateSessionModel(); + updateSessionModel.setLibraryItemId(itemId); + String sessionId = context.getVimClient().getContentLibrary().getUpdateSession().create(UUID.randomUUID().toString(), updateSessionModel); + + com.vmware.content.library.item.updatesession.FileTypes.AddSpec file = new com.vmware.content.library.item.updatesession.FileTypes.AddSpec(); + file.setName(sourceOvfFileName); + file.setSourceType(com.vmware.content.library.item.updatesession.FileTypes.SourceType.PULL); + + String sourceOvfUri = sourceOvfFileUri + sourceOvfFileName; + LOGGER.debug("Source ovf uri: " + sourceOvfUri + " to be imported to library: " + targetLibraryName + ", with name: " + targetOvfName); + + TransferEndpoint sourceEndPoint = new TransferEndpoint(); + sourceEndPoint.setUri(URI.create(sourceOvfUri)); + file.setSourceEndpoint(sourceEndPoint); + + context.getVimClient().getContentLibrary().getFile().add(sessionId, file); + context.getVimClient().getContentLibrary().getUpdateSession().complete(sessionId); + LOGGER.debug("Ovf: " + sourceOvfUri + " import initiated to library: " + targetLibraryName + ", with session: " + sessionId); + boolean status = waitForUpdateSession(context, sessionId); + LOGGER.debug("Ovf: " + sourceOvfUri + " import completed to library: " + targetLibraryName + ", with session: " + sessionId); + + return status; + } + + private static boolean waitForUpdateSession(VmwareContext context, String sessionId) { + if (StringUtils.isBlank(sessionId)) { + return false; + } + + UpdateSessionModel updateSessionModel = context.getVimClient().getContentLibrary().getUpdateSession().get(sessionId); + UpdateSessionModel.State state = updateSessionModel.getState(); + + while (state == UpdateSessionModel.State.ACTIVE) { + updateSessionModel = context.getVimClient().getContentLibrary().getUpdateSession().get(sessionId); + state = updateSessionModel.getState(); + } + + if (state == UpdateSessionModel.State.DONE) { + LOGGER.debug("Ovf importing completed for sessionId: " + sessionId); + return true; + } else if (state == UpdateSessionModel.State.ERROR) { + LOGGER.error("Ovf importing failed for sessionId: " + sessionId); + return false; + } + + return false; + } + + public static Pair deployOvf(VmwareContext context, String sourcelibraryName, String sourceovfName, String vmName, ManagedObjectReference resourcePoolMor, ManagedObjectReference datastoreMor) throws Exception { + String libraryId = getContentLibraryByName(context, sourcelibraryName); + if (libraryId == null) { + return new Pair(null, "Library not found with the name: " + sourcelibraryName); + } + + String itemId = getContentLibraryItemByName(context, libraryId, sourceovfName); + if (itemId == null) { + return new Pair(null, "Ovf file not found with the name: " + sourceovfName); + } + + DeploymentTarget target = new DeploymentTarget(); + target.setResourcePoolId(resourcePoolMor.getValue()); + + // Create a resource pool deployment spec + ResourcePoolDeploymentSpec spec = createResourcePoolDeploymentSpec(vmName, datastoreMor); + + // Deploy the OVF library item with the spec, on the target + DeploymentResult result = context.getVimClient().getContentLibrary().getLibraryItem().deploy(null, itemId, target, spec); + + displayOperationResult(result.getSucceeded(), result.getError()); + + if (!result.getSucceeded()) { + return new Pair(null, result.getError().toString()); + } + + String vmId = result.getResourceId().getId(); + ManagedObjectReference vmMor = new ManagedObjectReference(); + vmMor.setType("VirtualMachine"); + vmMor.setValue(vmId); + return new Pair(vmMor, "success"); + } + + private static ResourcePoolDeploymentSpec createResourcePoolDeploymentSpec(String entityName, ManagedObjectReference datastoreMor) { + ResourcePoolDeploymentSpec spec = new ResourcePoolDeploymentSpec(); + spec.setAcceptAllEULA(true); + spec.setName(entityName); + spec.setAnnotation("CloudStack VM:" + entityName); + + String datastoreId = datastoreMor.getValue(); + if (StringUtils.isNotBlank(datastoreId)) { + spec.setDefaultDatastoreId(datastoreId); + } + return spec; + } + + public static void displayOperationResult(boolean operationSucceeded, LibraryItemTypes.ResultInfo operationResult) { + boolean displayHeader = true; + LibraryItemTypes.ResultInfo info = operationResult; + + if (operationSucceeded) { + LOGGER.info("OVF item deployment succeeded"); + } else { + LOGGER.warn("OVF item deployment failed"); + // print only failure information here + if (info != null) { + List errors = info.getErrors(); + if (!errors.isEmpty() /* to decide if header needs to be printed */ ) { + LOGGER.debug(HEADING_ADDITIONAL_INFO); + displayHeader = false; + + for (OvfError error : errors) { + printOvfMessage(error._convertTo(OvfMessage.class)); + } + } + } + } + + // display information in both the success and failure cases + if (info != null) { + List warnings = info.getWarnings(); + List additionalInfo = info.getInformation(); + + // little bit of pretty print + if (!warnings.isEmpty() || !additionalInfo.isEmpty()) { + LOGGER.debug(HEADING_ADDITIONAL_INFO); + displayHeader = false; // for completeness + } + // display warnings + for (OvfWarning warning : warnings) { + printOvfMessage(warning._convertTo(OvfMessage.class)); + } + // display addition info + for (OvfInfo information : additionalInfo) { + List messages = + information.getMessages(); + for (LocalizableMessage message : messages) { + LOGGER.debug("Information: " + message.getDefaultMessage()); + } + } + } + } + + private static void printOvfMessage(OvfMessage ovfMessage) { + if (ovfMessage.getCategory().equals(OvfMessage.Category.SERVER)) { + List messages = + ovfMessage.getError()._convertTo(com.vmware.vapi.std.errors.Error.class).getMessages(); + for (LocalizableMessage message : messages) { + LOGGER.debug("Server error message: " + message); + } + } else if (ovfMessage.getCategory().equals( + OvfMessage.Category.VALIDATION)) { + for (ParseIssue issue : ovfMessage.getIssues()) { + LOGGER.debug("Issue message: " + issue.getMessage()); + } + } else if (ovfMessage.getCategory().equals(OvfMessage.Category.INPUT)) { + LOGGER.debug("Input validation message: " + ovfMessage.getMessage()); + } + } + + private static String getContentLibraryByName(VmwareContext context, String libraryName) { + LibraryTypes.FindSpec findSpec = new LibraryTypes.FindSpec(); + findSpec.setName(libraryName); + List libraryIds = context.getVimClient().getContentLibrary().getLibrary().find(findSpec); + if (!libraryIds.isEmpty()) { + LOGGER.debug("Found content library with name: " + libraryName); + String libraryId = libraryIds.get(0); + return libraryId; + } + + LOGGER.debug("Couldn't find the content library with name: " + libraryName); + return null; + } + + private static String getContentLibraryItemByName(VmwareContext context, String libraryId, String libraryItemName) { + ItemTypes.FindSpec findSpec = new ItemTypes.FindSpec(); + findSpec.setLibraryId(libraryId); + findSpec.setName(libraryItemName); + List itemIds = context.getVimClient().getContentLibrary().getItem().find(findSpec); + if (!itemIds.isEmpty()) { + String itemId = itemIds.get(0); + LOGGER.debug("Found library item : " + libraryItemName); + return itemId; + } + + LOGGER.debug("Couldn't find the content library item with name: " + libraryItemName); + return null; + } + + private static String createOvfItem(VmwareContext context, String libraryId, String itemName) { + return createLibraryItem(context, libraryId, itemName, "ovf"); + } + + private static String createLibraryItem(VmwareContext context, String libraryId, String itemName, String type) { + ItemModel item = new ItemModel(); + item.setName(itemName); + item.setLibraryId(libraryId); + item.setType(type); + return context.getVimClient().getContentLibrary().getItem().create(UUID.randomUUID().toString(), item); + } + + private static StorageBacking createStorageBacking(VmwareContext context, String datastoreName) { + String dsId = getDatastoreId(context, datastoreName); + if (StringUtils.isBlank(dsId)) { + return null; + } + + //Build the storage backing with the datastore Id + StorageBacking storageBacking = new StorageBacking(); + storageBacking.setType(StorageBacking.Type.DATASTORE); + storageBacking.setDatastoreId(dsId); + return storageBacking; + } + + private static String getDatastoreId(VmwareContext context, String datastoreName) { + Datastore datastoreService = context.getVimClient().getContentLibrary().getDatastore(); + Set datastores = Collections.singleton(datastoreName); + List datastoreSummaries = null; + DatastoreTypes.FilterSpec datastoreFilterSpec = null; + + datastoreFilterSpec = new DatastoreTypes.FilterSpec.Builder().setNames(datastores).build(); + datastoreSummaries = datastoreService.list(datastoreFilterSpec); + if (datastoreSummaries == null || datastoreSummaries.isEmpty()) { + LOGGER.debug("Couldn't find the datastore with name: " + datastoreName); + return null; + } + + return datastoreSummaries.get(0).getDatastore(); + } +} diff --git a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/util/VcenterSessionHandler.java b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/util/VcenterSessionHandler.java new file mode 100644 index 000000000000..9efab7b8eceb --- /dev/null +++ b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/util/VcenterSessionHandler.java @@ -0,0 +1,88 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.hypervisor.vmware.util; + +import java.util.Set; + +import javax.xml.namespace.QName; +import javax.xml.soap.SOAPElement; +import javax.xml.soap.SOAPException; +import javax.xml.soap.SOAPHeader; +import javax.xml.ws.handler.MessageContext; +import javax.xml.ws.handler.soap.SOAPHandler; +import javax.xml.ws.handler.soap.SOAPMessageContext; + +import org.apache.log4j.Logger; +import org.w3c.dom.DOMException; + +import com.cloud.utils.exception.CloudRuntimeException; + +public class VcenterSessionHandler implements SOAPHandler { + public static final Logger s_logger = Logger.getLogger(VcenterSessionHandler.class); + private final String vcSessionCookie; + + public VcenterSessionHandler(String vcSessionCookie) { + this.vcSessionCookie = vcSessionCookie; + } + + @Override + public boolean handleMessage(SOAPMessageContext smc) { + if (isOutgoingMessage(smc)) { + try { + SOAPHeader header = getSOAPHeader(smc); + + SOAPElement vcsessionHeader = header.addChildElement(new javax.xml.namespace.QName("#", + "vcSessionCookie")); + vcsessionHeader.setValue(vcSessionCookie); + + } catch (DOMException e) { + s_logger.debug(e); + throw new CloudRuntimeException(e); + } catch (SOAPException e) { + s_logger.debug(e); + throw new CloudRuntimeException(e); + } + } + return true; + } + + @Override + public void close(MessageContext arg0) { + } + + @Override + public boolean handleFault(SOAPMessageContext arg0) { + return false; + } + + @Override + public Set getHeaders() { + return null; + } + + SOAPHeader getSOAPHeader(SOAPMessageContext smc) throws SOAPException { + return smc.getMessage().getSOAPPart().getEnvelope().getHeader() == null ? smc + .getMessage().getSOAPPart().getEnvelope().addHeader() + : smc.getMessage().getSOAPPart().getEnvelope().getHeader(); + } + + boolean isOutgoingMessage(SOAPMessageContext smc) { + Boolean outboundProperty = (Boolean)smc.get(MessageContext.MESSAGE_OUTBOUND_PROPERTY); + return outboundProperty; + } + +} diff --git a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/util/VmwareClient.java b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/util/VmwareClient.java index 3d80ffdfae74..dca76752005e 100644 --- a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/util/VmwareClient.java +++ b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/util/VmwareClient.java @@ -17,8 +17,11 @@ package com.cloud.hypervisor.vmware.util; import java.lang.reflect.Method; +import java.net.URI; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.StringTokenizer; @@ -29,9 +32,16 @@ import javax.xml.ws.BindingProvider; import javax.xml.ws.WebServiceException; import javax.xml.ws.handler.MessageContext; +import javax.xml.ws.handler.Handler; +import javax.xml.ws.handler.HandlerResolver; +import javax.xml.ws.handler.PortInfo; + import org.apache.cloudstack.utils.security.SSLUtils; import org.apache.cloudstack.utils.security.SecureSSLSocketFactory; +import com.vmware.pbm.PbmPortType; +import com.vmware.pbm.PbmService; +import com.vmware.pbm.PbmServiceInstanceContent; import org.apache.log4j.Logger; import org.w3c.dom.Element; @@ -62,6 +72,9 @@ import com.vmware.vim25.VimService; import com.vmware.vim25.WaitOptions; +import com.vmware.vapi.protocol.HttpConfiguration; +import com.vmware.vapi.protocol.HttpConfiguration.SslConfiguration; + /** * A wrapper class to handle Vmware vsphere connection and disconnection. * @@ -101,6 +114,7 @@ public boolean verify(String urlHostName, SSLSession session) { HttpsURLConnection.setDefaultHostnameVerifier(hv); vimService = new VimService(); + pbmService = new PbmService(); } catch (Exception e) { s_logger.info("[ignored]" + "failed to trust all certificates blindly: ", e); @@ -120,11 +134,21 @@ private static void trustAllHttpsCertificates() throws Exception { } private final ManagedObjectReference svcInstRef = new ManagedObjectReference(); + private final ManagedObjectReference pbmSvcInstRef = new ManagedObjectReference(); + private static VimService vimService; + private static PbmService pbmService; + private PbmServiceInstanceContent pbmServiceContent; + private ContentLibraryClient libraryClient; private VimPortType vimPort; + private PbmPortType pbmPort; + private static final String PBM_SERVICE_INSTANCE_TYPE = "PbmServiceInstance"; + private static final String PBM_SERVICE_INSTANCE_VALUE = "ServiceInstance"; + private String serviceCookie; private final static String SVC_INST_NAME = "ServiceInstance"; private int vCenterSessionTimeout = 1200000; // Timeout in milliseconds + public static final String VIMSERVICE_PATH = "/sdk/vimService"; private boolean isConnected = false; @@ -137,12 +161,13 @@ public VmwareClient(String name) { * @throws Exception * the exception */ - public void connect(String url, String userName, String password) throws Exception { + public void connect(String vCenterAddress, String userName, String password) throws Exception { svcInstRef.setType(SVC_INST_NAME); svcInstRef.setValue(SVC_INST_NAME); vimPort = vimService.getVimPort(); Map ctxt = ((BindingProvider)vimPort).getRequestContext(); + String url = "https://" + vCenterAddress + VIMSERVICE_PATH; ctxt.put(BindingProvider.ENDPOINT_ADDRESS_PROPERTY, url); ctxt.put(BindingProvider.SESSION_MAINTAIN_PROPERTY, true); @@ -176,10 +201,46 @@ public void connect(String url, String userName, String password) throws Excepti cookieValue = tokenizer.nextToken(); String pathData = "$" + tokenizer.nextToken(); serviceCookie = "$Version=\"1\"; " + cookieValue + "; " + pathData; + Map> map = new HashMap>(); + map.put("Cookie", Collections.singletonList(serviceCookie)); + ((BindingProvider)vimPort).getRequestContext().put(MessageContext.HTTP_REQUEST_HEADERS, map); + pbmConnect(url, cookieValue); + + //login to content library + HttpConfiguration httpConfig = new HttpConfiguration.Builder().setSslConfiguration(buildSslConfiguration()).getConfig(); + Map contentLibraryConfig = new HashMap(); + contentLibraryConfig.put(ContentLibraryClient.HTTP_CONFIG, httpConfig); + libraryClient = new ContentLibraryClient(); + libraryClient.login(vCenterAddress, userName, password, contentLibraryConfig); isConnected = true; } + private void pbmConnect(String url, String cookieValue) throws Exception { + URI uri = new URI(url); + String pbmurl = "https://" + uri.getHost() + "/pbm"; + String[] tokens = cookieValue.split("="); + String extractedCookie = tokens[1]; + + HandlerResolver soapHandlerResolver = new HandlerResolver() { + @Override + public List getHandlerChain(PortInfo portInfo) { + VcenterSessionHandler VcSessionHandler = new VcenterSessionHandler(extractedCookie); + List handlerChain = new ArrayList(); + handlerChain.add((Handler)VcSessionHandler); + return handlerChain; + } + }; + pbmService.setHandlerResolver(soapHandlerResolver); + + pbmSvcInstRef.setType(PBM_SERVICE_INSTANCE_TYPE); + pbmSvcInstRef.setValue(PBM_SERVICE_INSTANCE_VALUE); + pbmPort = pbmService.getPbmPort(); + Map pbmCtxt = ((BindingProvider)pbmPort).getRequestContext(); + pbmCtxt.put(BindingProvider.SESSION_MAINTAIN_PROPERTY, true); + pbmCtxt.put(BindingProvider.ENDPOINT_ADDRESS_PROPERTY, pbmurl); + } + /** * Disconnects the user session. * @@ -188,6 +249,7 @@ public void connect(String url, String userName, String password) throws Excepti public void disconnect() throws Exception { if (isConnected) { vimPort.logout(getServiceContent().getSessionManager()); + libraryClient.logout(); } isConnected = false; } @@ -211,6 +273,36 @@ public ServiceContent getServiceContent() { return null; } + /** + * @return PBM service instance + */ + public PbmPortType getPbmService() { + return pbmPort; + } + + /** + * @return Service instance content + */ + public PbmServiceInstanceContent getPbmServiceContent() { + try { + return pbmPort.pbmRetrieveServiceContent(pbmSvcInstRef); + } catch (com.vmware.pbm.RuntimeFaultFaultMsg e) { + } + return null; + } + + /** + * @return Content library client instance + */ + public ContentLibraryClient getContentLibrary() { + return libraryClient; + } + + private SslConfiguration buildSslConfiguration() throws Exception { + trustAllHttpsCertificates(); + return new SslConfiguration.Builder().disableCertificateValidation().disableHostnameVerification().getConfig(); + } + /** * @return cookie used in service connection */ diff --git a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/util/VmwareClientService.java b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/util/VmwareClientService.java new file mode 100644 index 000000000000..a2ee420afe01 --- /dev/null +++ b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/util/VmwareClientService.java @@ -0,0 +1,37 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.hypervisor.vmware.util; + +import java.util.Map; + +public interface VmwareClientService { + /** + * Logs in to the client service + * + * @return login result - true or false + * @throws Exception + */ + boolean login(String vCenterAddress, String userName, String password, Map configProperties) throws Exception; + + /** + * Logs out from the client service + * + * @return logout result - true or false + * @throws Exception + */ + boolean logout() throws Exception; +} diff --git a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/util/VmwareContext.java b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/util/VmwareContext.java index 9b477aef42bc..807289f8bdff 100644 --- a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/util/VmwareContext.java +++ b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/util/VmwareContext.java @@ -20,6 +20,8 @@ import com.cloud.hypervisor.vmware.mo.DatastoreFile; import com.cloud.utils.ActionDelegate; import com.cloud.utils.StringUtils; +import com.vmware.pbm.PbmPortType; +import com.vmware.pbm.PbmServiceInstanceContent; import com.vmware.vim25.ManagedObjectReference; import com.vmware.vim25.ObjectContent; import com.vmware.vim25.ObjectSpec; @@ -148,6 +150,14 @@ public ServiceContent getServiceContent() { return _vimClient.getServiceContent(); } + public PbmPortType getPbmService() { + return _vimClient.getPbmService(); + } + + public PbmServiceInstanceContent getPbmServiceContent() { + return _vimClient.getPbmServiceContent(); + } + public ManagedObjectReference getPropertyCollector() { return _vimClient.getPropCol(); } @@ -311,6 +321,24 @@ public ManagedObjectReference getDatastoreMorByPath(String inventoryPath) throws return dcMo.findDatastore(tokens[1]); } + // path in format of / + public String getDatastoreNameFromPath(String inventoryPath) throws Exception { + assert (inventoryPath != null); + + String[] tokens; + if (inventoryPath.startsWith("/")) + tokens = inventoryPath.substring(1).split("/"); + else + tokens = inventoryPath.split("/"); + + if (tokens == null || tokens.length != 2) { + s_logger.error("Invalid datastore inventory path. path: " + inventoryPath); + return null; + } + + return tokens[1]; + } + public void waitForTaskProgressDone(ManagedObjectReference morTask) throws Exception { while (true) { TaskInfo tinfo = (TaskInfo)_vimClient.getDynamicProperty(morTask, "info"); diff --git a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/util/VmwareHelper.java b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/util/VmwareHelper.java index 181b2ef183f6..08b5672760ae 100644 --- a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/util/VmwareHelper.java +++ b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/util/VmwareHelper.java @@ -212,35 +212,49 @@ public static VirtualDevice prepareDvNicDevice(VirtualMachineMO vmMo, ManagedObj } // vmdkDatastorePath: [datastore name] vmdkFilePath - public static VirtualDevice prepareDiskDevice(VirtualMachineMO vmMo, int controllerKey, String vmdkDatastorePath, int sizeInMb, ManagedObjectReference morDs, - int deviceNumber, int contextNumber) throws Exception { + public static VirtualDevice prepareDiskDevice(VirtualMachineMO vmMo, VirtualDisk device, int controllerKey, String vmdkDatastorePathChain[], + ManagedObjectReference morDs, int deviceNumber, int contextNumber) throws Exception { - VirtualDisk disk = new VirtualDisk(); + assert (vmdkDatastorePathChain != null); + assert (vmdkDatastorePathChain.length >= 1); - VirtualDiskFlatVer2BackingInfo backingInfo = new VirtualDiskFlatVer2BackingInfo(); - backingInfo.setDiskMode(VirtualDiskMode.PERSISTENT.value()); - backingInfo.setThinProvisioned(true); - backingInfo.setEagerlyScrub(false); - backingInfo.setDatastore(morDs); - backingInfo.setFileName(vmdkDatastorePath); - disk.setBacking(backingInfo); + VirtualDisk disk; + VirtualDiskFlatVer2BackingInfo backingInfo; + if (device != null) { + disk = device; + backingInfo = (VirtualDiskFlatVer2BackingInfo)disk.getBacking(); + } else { + disk = new VirtualDisk(); + backingInfo = new VirtualDiskFlatVer2BackingInfo(); + backingInfo.setDatastore(morDs); + backingInfo.setDiskMode(VirtualDiskMode.PERSISTENT.value()); + disk.setBacking(backingInfo); - int ideControllerKey = vmMo.getIDEDeviceControllerKey(); - if (controllerKey < 0) - controllerKey = ideControllerKey; - if (deviceNumber < 0) { - deviceNumber = vmMo.getNextDeviceNumber(controllerKey); + int ideControllerKey = vmMo.getIDEDeviceControllerKey(); + if (controllerKey < 0) + controllerKey = ideControllerKey; + if (deviceNumber < 0) { + deviceNumber = vmMo.getNextDeviceNumber(controllerKey); + } + + disk.setControllerKey(controllerKey); + disk.setKey(-contextNumber); + disk.setUnitNumber(deviceNumber); + + VirtualDeviceConnectInfo connectInfo = new VirtualDeviceConnectInfo(); + connectInfo.setConnected(true); + connectInfo.setStartConnected(true); + disk.setConnectable(connectInfo); } - disk.setControllerKey(controllerKey); - disk.setKey(-contextNumber); - disk.setUnitNumber(deviceNumber); - disk.setCapacityInKB(sizeInMb * 1024); + backingInfo.setFileName(vmdkDatastorePathChain[0]); + if (vmdkDatastorePathChain.length > 1) { + String[] parentDisks = new String[vmdkDatastorePathChain.length - 1]; + for (int i = 0; i < vmdkDatastorePathChain.length - 1; i++) + parentDisks[i] = vmdkDatastorePathChain[i + 1]; - VirtualDeviceConnectInfo connectInfo = new VirtualDeviceConnectInfo(); - connectInfo.setConnected(true); - connectInfo.setStartConnected(true); - disk.setConnectable(connectInfo); + setParentBackingInfo(backingInfo, morDs, parentDisks); + } return disk; } @@ -314,96 +328,6 @@ public static VirtualDevice prepareDiskDevice(VirtualMachineMO vmMo, int control return disk; } - // vmdkDatastorePath: [datastore name] vmdkFilePath - public static VirtualDevice prepareDiskDevice(VirtualMachineMO vmMo, VirtualDisk device, int controllerKey, String vmdkDatastorePathChain[], - ManagedObjectReference morDs, int deviceNumber, int contextNumber) throws Exception { - - assert (vmdkDatastorePathChain != null); - assert (vmdkDatastorePathChain.length >= 1); - - VirtualDisk disk; - VirtualDiskFlatVer2BackingInfo backingInfo; - if (device != null) { - disk = device; - backingInfo = (VirtualDiskFlatVer2BackingInfo)disk.getBacking(); - } else { - disk = new VirtualDisk(); - backingInfo = new VirtualDiskFlatVer2BackingInfo(); - backingInfo.setDatastore(morDs); - backingInfo.setDiskMode(VirtualDiskMode.PERSISTENT.value()); - disk.setBacking(backingInfo); - - int ideControllerKey = vmMo.getIDEDeviceControllerKey(); - if (controllerKey < 0) - controllerKey = ideControllerKey; - if (deviceNumber < 0) { - deviceNumber = vmMo.getNextDeviceNumber(controllerKey); - } - - disk.setControllerKey(controllerKey); - disk.setKey(-contextNumber); - disk.setUnitNumber(deviceNumber); - - VirtualDeviceConnectInfo connectInfo = new VirtualDeviceConnectInfo(); - connectInfo.setConnected(true); - connectInfo.setStartConnected(true); - disk.setConnectable(connectInfo); - } - - backingInfo.setFileName(vmdkDatastorePathChain[0]); - if (vmdkDatastorePathChain.length > 1) { - String[] parentDisks = new String[vmdkDatastorePathChain.length - 1]; - for (int i = 0; i < vmdkDatastorePathChain.length - 1; i++) - parentDisks[i] = vmdkDatastorePathChain[i + 1]; - - setParentBackingInfo(backingInfo, morDs, parentDisks); - } - - return disk; - } - - @SuppressWarnings("unchecked") - public static VirtualDevice prepareDiskDevice(VirtualMachineMO vmMo, int controllerKey, Pair[] vmdkDatastorePathChain, - int deviceNumber, int contextNumber) throws Exception { - - assert (vmdkDatastorePathChain != null); - assert (vmdkDatastorePathChain.length >= 1); - - VirtualDisk disk = new VirtualDisk(); - - VirtualDiskFlatVer2BackingInfo backingInfo = new VirtualDiskFlatVer2BackingInfo(); - backingInfo.setDatastore(vmdkDatastorePathChain[0].second()); - backingInfo.setFileName(vmdkDatastorePathChain[0].first()); - backingInfo.setDiskMode(VirtualDiskMode.PERSISTENT.value()); - if (vmdkDatastorePathChain.length > 1) { - Pair[] parentDisks = new Pair[vmdkDatastorePathChain.length - 1]; - for (int i = 0; i < vmdkDatastorePathChain.length - 1; i++) - parentDisks[i] = vmdkDatastorePathChain[i + 1]; - - setParentBackingInfo(backingInfo, parentDisks); - } - - disk.setBacking(backingInfo); - - int ideControllerKey = vmMo.getIDEDeviceControllerKey(); - if (controllerKey < 0) - controllerKey = ideControllerKey; - if (deviceNumber < 0) { - deviceNumber = vmMo.getNextDeviceNumber(controllerKey); - } - - disk.setControllerKey(controllerKey); - disk.setKey(-contextNumber); - disk.setUnitNumber(deviceNumber); - - VirtualDeviceConnectInfo connectInfo = new VirtualDeviceConnectInfo(); - connectInfo.setConnected(true); - connectInfo.setStartConnected(true); - disk.setConnectable(connectInfo); - - return disk; - } - private static void setParentBackingInfo(VirtualDiskFlatVer2BackingInfo backingInfo, ManagedObjectReference morDs, String[] parentDatastorePathList) { VirtualDiskFlatVer2BackingInfo parentBacking = new VirtualDiskFlatVer2BackingInfo(); diff --git a/vmware-base/src/test/java/com/cloud/hypervisor/vmware/mo/TestVmwareContextFactory.java b/vmware-base/src/test/java/com/cloud/hypervisor/vmware/mo/TestVmwareContextFactory.java index 3d0e73690172..2d2f2a244e12 100644 --- a/vmware-base/src/test/java/com/cloud/hypervisor/vmware/mo/TestVmwareContextFactory.java +++ b/vmware-base/src/test/java/com/cloud/hypervisor/vmware/mo/TestVmwareContextFactory.java @@ -42,15 +42,13 @@ public static VmwareContext create(String vCenterAddress, String vCenterUserName assert (vCenterUserName != null); assert (vCenterPassword != null); - String serviceUrl = "https://" + vCenterAddress + "/sdk/vimService"; - if (s_logger.isDebugEnabled()) - s_logger.debug("initialize VmwareContext. url: " + serviceUrl + ", username: " + vCenterUserName + ", password: " + + s_logger.debug("initialize VmwareContext. vCenter: " + vCenterAddress + ", username: " + vCenterUserName + ", password: " + StringUtils.getMaskedPasswordForDisplay(vCenterPassword)); VmwareClient vimClient = new VmwareClient(vCenterAddress + "-" + s_seq++); vimClient.setVcenterSessionTimeout(1200000); - vimClient.connect(serviceUrl, vCenterUserName, vCenterPassword); + vimClient.connect(vCenterAddress, vCenterUserName, vCenterPassword); VmwareContext context = new VmwareContext(vimClient, vCenterAddress); return context;