diff --git a/README.md b/README.md index 3d9bc3d4fe36..928b8a92a6b6 100644 --- a/README.md +++ b/README.md @@ -60,6 +60,7 @@ via Github pull requests. * Design [documents](https://cwiki.apache.org/confluence/display/CLOUDSTACK/Design) * API [documentation](https://cloudstack.apache.org/api.html) * How to [contribute](CONTRIBUTING.md) +* Check the [YouTube channel](https://www.youtube.com/ApacheCloudStack) for presentations, interviews, and more ## Getting Involved and Contributing diff --git a/agent/conf/agent.properties b/agent/conf/agent.properties index 06d8f3f2a1ef..25863276c091 100644 --- a/agent/conf/agent.properties +++ b/agent/conf/agent.properties @@ -266,3 +266,10 @@ iscsi.session.cleanup.enabled=false # Automatically clean up iscsi sessions not attached to any VM. # Should be enabled for users using managed storage for example solidfire. # Should be disabled for users with unmanaged iscsi connections on their hosts + +# This parameter specifies the heartbeat update timeout in ms; The default value is 60000ms (1 min). +# Depending on the use case, this timeout might need increasing/decreasing. +# heartbeat.update.timeout=60000 + +# Enable manually setting CPU's topology on KVM's VM. +# enable.manually.setting.cpu.topology.on.kvm.vm=true diff --git a/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java b/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java new file mode 100644 index 000000000000..d04e98fb704d --- /dev/null +++ b/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java @@ -0,0 +1,59 @@ +/* + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.cloud.agent.properties; + +/** + * Class of constant agent's properties available to configure on + * "agent.properties". + *

+ * Not all available agent properties are defined here, but we should work to + * migrate them on demand to this class. + * + * @param type of the default value. + */ +public class AgentProperties{ + + /** + * Heartbeat update timeout.
+ * Data type: int.
+ * Default value: 60000 (ms). + */ + public static final Property HEARTBEAT_UPDATE_TIMEOUT = new Property("heartbeat.update.timeout", 60000); + + /** + * Enable manually setting CPU's topology on KVM's VM.
+ * Data type: boolean.
+ * Default value: true. + */ + public static final Property ENABLE_MANUALLY_SETTING_CPU_TOPOLOGY_ON_KVM_VM = new Property("enable.manually.setting.cpu.topology.on.kvm.vm", true); + + public static class Property { + private final String name; + private final T defaultValue; + + private Property(String name, T value) { + this.name = name; + this.defaultValue = value; + } + + public String getName() { + return name; + } + + public T getDefaultValue() { + return defaultValue; + } + } +} diff --git a/agent/src/main/java/com/cloud/agent/properties/AgentPropertiesFileHandler.java b/agent/src/main/java/com/cloud/agent/properties/AgentPropertiesFileHandler.java new file mode 100644 index 000000000000..6d515f0004f0 --- /dev/null +++ b/agent/src/main/java/com/cloud/agent/properties/AgentPropertiesFileHandler.java @@ -0,0 +1,70 @@ +/* + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.cloud.agent.properties; + +import com.cloud.utils.PropertiesUtil; +import java.io.File; +import java.io.IOException; +import org.apache.cloudstack.utils.security.KeyStoreUtils; +import org.apache.commons.beanutils.ConvertUtils; +import org.apache.commons.beanutils.converters.IntegerConverter; +import org.apache.commons.lang3.StringUtils; +import org.apache.log4j.Logger; + +/** + * This class provides a facility to read the agent's properties file and get + * its properties, according to the {@link AgentProperties} properties constants. + * + */ +public class AgentPropertiesFileHandler { + + private static final Logger logger = Logger.getLogger(AgentPropertiesFileHandler.class); + + /** + * This method reads the property in the agent.properties file. + * + * @param property the property to retrieve. + * @return The value of the property. If the property is not available, the + * default defined value will be used. + */ + public static T getPropertyValue(AgentProperties.Property property) { + T defaultValue = property.getDefaultValue(); + String name = property.getName(); + + File agentPropertiesFile = PropertiesUtil.findConfigFile(KeyStoreUtils.AGENT_PROPSFILE); + + if (agentPropertiesFile != null) { + try { + String configValue = PropertiesUtil.loadFromFile(agentPropertiesFile).getProperty(name); + if (StringUtils.isNotBlank(configValue)) { + if (defaultValue instanceof Integer) { + ConvertUtils.register(new IntegerConverter(defaultValue), Integer.class); + } + + return (T)ConvertUtils.convert(configValue, defaultValue.getClass()); + } else { + logger.debug(String.format("Property [%s] has empty or null value. Using default value [%s].", name, defaultValue)); + } + } catch (IOException ex) { + logger.debug(String.format("Failed to get property [%s]. Using default value [%s].", name, defaultValue), ex); + } + } else { + logger.debug(String.format("File [%s] was not found, we will use default defined values. Property [%s]: [%s].", KeyStoreUtils.AGENT_PROPSFILE, name, defaultValue)); + } + + return defaultValue; + } + +} diff --git a/agent/src/test/java/com/cloud/agent/properties/AgentPropertiesFileHandlerTest.java b/agent/src/test/java/com/cloud/agent/properties/AgentPropertiesFileHandlerTest.java new file mode 100644 index 000000000000..d91d0e03fb29 --- /dev/null +++ b/agent/src/test/java/com/cloud/agent/properties/AgentPropertiesFileHandlerTest.java @@ -0,0 +1,143 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package com.cloud.agent.properties; + +import com.cloud.utils.PropertiesUtil; +import java.io.File; +import java.io.IOException; +import java.util.Properties; +import junit.framework.TestCase; +import org.apache.commons.beanutils.ConvertUtils; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.powermock.api.mockito.PowerMockito; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; + +@RunWith(PowerMockRunner.class) +@PrepareForTest({PropertiesUtil.class, ConvertUtils.class}) +public class AgentPropertiesFileHandlerTest extends TestCase { + + @Mock + AgentProperties.Property agentPropertiesStringMock; + + @Mock + AgentProperties.Property agentPropertiesIntegerMock; + + @Mock + File fileMock; + + @Mock + Properties propertiesMock; + + @Test + public void validateGetPropertyValueFileNotFoundReturnDefaultValue() throws Exception{ + String expectedResult = "default value"; + Mockito.doReturn(expectedResult).when(agentPropertiesStringMock).getDefaultValue(); + + PowerMockito.mockStatic(PropertiesUtil.class); + PowerMockito.doReturn(null).when(PropertiesUtil.class, "findConfigFile", Mockito.anyString()); + + String result = AgentPropertiesFileHandler.getPropertyValue(agentPropertiesStringMock); + + Assert.assertEquals(expectedResult, result); + } + + @Test + public void validateGetPropertyValueLoadFromFileThrowsIOExceptionReturnDefaultValue() throws Exception{ + String expectedResult = "default value"; + Mockito.doReturn(expectedResult).when(agentPropertiesStringMock).getDefaultValue(); + + PowerMockito.mockStatic(PropertiesUtil.class); + PowerMockito.doReturn(fileMock).when(PropertiesUtil.class, "findConfigFile", Mockito.anyString()); + PowerMockito.doThrow(new IOException()).when(PropertiesUtil.class, "loadFromFile", Mockito.any()); + + String result = AgentPropertiesFileHandler.getPropertyValue(agentPropertiesStringMock); + + Assert.assertEquals(expectedResult, result); + } + + @Test + public void validateGetPropertyValuePropertyIsEmptyReturnDefaultValue() throws Exception{ + String expectedResult = "default value"; + Mockito.doReturn(expectedResult).when(agentPropertiesStringMock).getDefaultValue(); + Mockito.doReturn("name").when(agentPropertiesStringMock).getName(); + + PowerMockito.mockStatic(PropertiesUtil.class); + PowerMockito.doReturn(fileMock).when(PropertiesUtil.class, "findConfigFile", Mockito.anyString()); + PowerMockito.doReturn(propertiesMock).when(PropertiesUtil.class, "loadFromFile", Mockito.any()); + PowerMockito.doReturn("").when(propertiesMock).getProperty(Mockito.anyString()); + + String result = AgentPropertiesFileHandler.getPropertyValue(agentPropertiesStringMock); + + Assert.assertEquals(expectedResult, result); + } + + @Test + public void validateGetPropertyValuePropertyIsNullReturnDefaultValue() throws Exception{ + String expectedResult = "default value"; + Mockito.doReturn(expectedResult).when(agentPropertiesStringMock).getDefaultValue(); + Mockito.doReturn("name").when(agentPropertiesStringMock).getName(); + + PowerMockito.mockStatic(PropertiesUtil.class); + PowerMockito.doReturn(fileMock).when(PropertiesUtil.class, "findConfigFile", Mockito.anyString()); + PowerMockito.doReturn(propertiesMock).when(PropertiesUtil.class, "loadFromFile", Mockito.any()); + PowerMockito.doReturn(null).when(propertiesMock).getProperty(Mockito.anyString()); + + String result = AgentPropertiesFileHandler.getPropertyValue(agentPropertiesStringMock); + + Assert.assertEquals(expectedResult, result); + } + + @Test + public void validateGetPropertyValueValidPropertyReturnPropertyValue() throws Exception{ + String expectedResult = "test"; + Mockito.doReturn("default value").when(agentPropertiesStringMock).getDefaultValue(); + Mockito.doReturn("name").when(agentPropertiesStringMock).getName(); + + PowerMockito.mockStatic(PropertiesUtil.class); + PowerMockito.doReturn(fileMock).when(PropertiesUtil.class, "findConfigFile", Mockito.anyString()); + PowerMockito.doReturn(propertiesMock).when(PropertiesUtil.class, "loadFromFile", Mockito.any()); + Mockito.doReturn(expectedResult).when(propertiesMock).getProperty(Mockito.anyString()); + + String result = AgentPropertiesFileHandler.getPropertyValue(agentPropertiesStringMock); + + Assert.assertEquals(expectedResult, result); + } + + @Test + public void validateGetPropertyValueValidIntegerPropertyReturnPropertyValue() throws Exception{ + Integer expectedResult = 2; + Mockito.doReturn(1).when(agentPropertiesIntegerMock).getDefaultValue(); + Mockito.doReturn("name").when(agentPropertiesIntegerMock).getName(); + + PowerMockito.mockStatic(PropertiesUtil.class); + PowerMockito.doReturn(fileMock).when(PropertiesUtil.class, "findConfigFile", Mockito.anyString()); + PowerMockito.doReturn(propertiesMock).when(PropertiesUtil.class, "loadFromFile", Mockito.any()); + Mockito.doReturn(String.valueOf(expectedResult)).when(propertiesMock).getProperty(Mockito.anyString()); + + Integer result = AgentPropertiesFileHandler.getPropertyValue(agentPropertiesIntegerMock); + + Assert.assertEquals(expectedResult, result); + } +} diff --git a/api/src/main/java/com/cloud/agent/api/to/VirtualMachineTO.java b/api/src/main/java/com/cloud/agent/api/to/VirtualMachineTO.java index c4729383dd4f..8a30b5ef9fee 100644 --- a/api/src/main/java/com/cloud/agent/api/to/VirtualMachineTO.java +++ b/api/src/main/java/com/cloud/agent/api/to/VirtualMachineTO.java @@ -413,4 +413,9 @@ public DeployAsIsInfoTO getDeployAsIsInfo() { public void setDeployAsIsInfo(DeployAsIsInfoTO deployAsIsInfo) { this.deployAsIsInfo = deployAsIsInfo; } + + @Override + public String toString() { + return String.format("VM {id: \"%s\", name: \"%s\", uuid: \"%s\", type: \"%s\"}", id, name, uuid, type); + } } diff --git a/api/src/main/java/com/cloud/event/EventTypes.java b/api/src/main/java/com/cloud/event/EventTypes.java index fe603d1f411c..85e9aa58a71c 100644 --- a/api/src/main/java/com/cloud/event/EventTypes.java +++ b/api/src/main/java/com/cloud/event/EventTypes.java @@ -352,6 +352,10 @@ public class EventTypes { // Host public static final String EVENT_HOST_RECONNECT = "HOST.RECONNECT"; + // Host on Degraded ResourceState + public static final String EVENT_DECLARE_HOST_DEGRADED = "HOST.DECLARE.DEGRADED"; + public static final String EVENT_CANCEL_HOST_DEGRADED = "HOST.CANCEL.DEGRADED"; + // Host Out-of-band management public static final String EVENT_HOST_OUTOFBAND_MANAGEMENT_ENABLE = "HOST.OOBM.ENABLE"; public static final String EVENT_HOST_OUTOFBAND_MANAGEMENT_DISABLE = "HOST.OOBM.DISABLE"; diff --git a/api/src/main/java/com/cloud/network/Network.java b/api/src/main/java/com/cloud/network/Network.java index 37b8f332d0a9..111b85a6d96d 100644 --- a/api/src/main/java/com/cloud/network/Network.java +++ b/api/src/main/java/com/cloud/network/Network.java @@ -19,6 +19,7 @@ import java.io.Serializable; import java.net.URI; import java.util.ArrayList; +import java.util.Date; import java.util.List; import com.cloud.exception.InvalidParameterValueException; @@ -456,4 +457,6 @@ public void setIp6Address(String ip6Address) { String getRouterIp(); String getRouterIpv6(); + + Date getCreated(); } diff --git a/api/src/main/java/com/cloud/network/NetworkProfile.java b/api/src/main/java/com/cloud/network/NetworkProfile.java index 08a420a1b20f..f3c178767e85 100644 --- a/api/src/main/java/com/cloud/network/NetworkProfile.java +++ b/api/src/main/java/com/cloud/network/NetworkProfile.java @@ -17,6 +17,7 @@ package com.cloud.network; import java.net.URI; +import java.util.Date; import com.cloud.network.Networks.BroadcastDomainType; import com.cloud.network.Networks.Mode; @@ -329,4 +330,9 @@ public String getRouterIpv6() { return null; } + @Override + public Date getCreated() { + return null; + } + } diff --git a/api/src/main/java/com/cloud/network/vpc/Vpc.java b/api/src/main/java/com/cloud/network/vpc/Vpc.java index 9f40562423d8..432c8839ad89 100644 --- a/api/src/main/java/com/cloud/network/vpc/Vpc.java +++ b/api/src/main/java/com/cloud/network/vpc/Vpc.java @@ -20,6 +20,8 @@ import org.apache.cloudstack.api.Identity; import org.apache.cloudstack.api.InternalIdentity; +import java.util.Date; + public interface Vpc extends ControlledEntity, Identity, InternalIdentity { public enum State { @@ -91,4 +93,6 @@ public enum State { boolean isRollingRestart(); void setRollingRestart(boolean rollingRestart); + + Date getCreated(); } diff --git a/api/src/main/java/com/cloud/offering/NetworkOffering.java b/api/src/main/java/com/cloud/offering/NetworkOffering.java index 8ae90c574224..f01c58542e3e 100644 --- a/api/src/main/java/com/cloud/offering/NetworkOffering.java +++ b/api/src/main/java/com/cloud/offering/NetworkOffering.java @@ -23,6 +23,8 @@ import com.cloud.network.Network.GuestType; import com.cloud.network.Networks.TrafficType; +import java.util.Date; + /** * Describes network offering * @@ -141,4 +143,6 @@ public enum Detail { boolean isSupportingPublicAccess(); String getServicePackage(); + + Date getCreated(); } diff --git a/api/src/main/java/com/cloud/resource/ResourceService.java b/api/src/main/java/com/cloud/resource/ResourceService.java index 7f04d8919b97..e2b84ba87203 100644 --- a/api/src/main/java/com/cloud/resource/ResourceService.java +++ b/api/src/main/java/com/cloud/resource/ResourceService.java @@ -24,10 +24,12 @@ import org.apache.cloudstack.api.command.admin.host.AddHostCmd; import org.apache.cloudstack.api.command.admin.host.AddSecondaryStorageCmd; import org.apache.cloudstack.api.command.admin.host.CancelMaintenanceCmd; -import org.apache.cloudstack.api.command.admin.host.PrepareForMaintenanceCmd; import org.apache.cloudstack.api.command.admin.host.ReconnectHostCmd; import org.apache.cloudstack.api.command.admin.host.UpdateHostCmd; import org.apache.cloudstack.api.command.admin.host.UpdateHostPasswordCmd; +import org.apache.cloudstack.api.command.admin.host.PrepareForMaintenanceCmd; +import org.apache.cloudstack.api.command.admin.host.DeclareHostAsDegradedCmd; +import org.apache.cloudstack.api.command.admin.host.CancelHostAsDegradedCmd; import com.cloud.dc.DataCenter; import com.cloud.exception.AgentUnavailableException; @@ -67,6 +69,10 @@ public interface ResourceService { Host maintain(PrepareForMaintenanceCmd cmd); + Host declareHostAsDegraded(DeclareHostAsDegradedCmd cmd) throws NoTransitionException; + + Host cancelHostAsDegraded(CancelHostAsDegradedCmd cmd) throws NoTransitionException; + /** * Deletes a host * @param true if deleted, false otherwise diff --git a/api/src/main/java/com/cloud/resource/ResourceState.java b/api/src/main/java/com/cloud/resource/ResourceState.java index 6e0fa9092307..70738c7921bc 100644 --- a/api/src/main/java/com/cloud/resource/ResourceState.java +++ b/api/src/main/java/com/cloud/resource/ResourceState.java @@ -30,7 +30,8 @@ public enum ResourceState { PrepareForMaintenance, ErrorInMaintenance, Maintenance, - Error; + Error, + Degraded; public enum Event { InternalCreated("Resource is created"), @@ -45,6 +46,8 @@ public enum Event { ErrorsCorrected("Errors were corrected on a resource attempting to enter maintenance but encountered errors"), Error("An internal error happened"), DeleteHost("Admin delete a host"), + DeclareHostDegraded("Admin declares host as Degraded"), + EnableDegradedHost("Admin puts Degraded host into Enabled"), /* * Below events don't cause resource state to change, they are merely @@ -113,11 +116,13 @@ public static boolean canAttemptMaintenance(ResourceState state) { s_fsm.addTransition(ResourceState.Enabled, Event.InternalCreated, ResourceState.Enabled); s_fsm.addTransition(ResourceState.Enabled, Event.Disable, ResourceState.Disabled); s_fsm.addTransition(ResourceState.Enabled, Event.AdminAskMaintenance, ResourceState.PrepareForMaintenance); + s_fsm.addTransition(ResourceState.Enabled, Event.DeclareHostDegraded, ResourceState.Degraded); s_fsm.addTransition(ResourceState.Enabled, Event.InternalEnterMaintenance, ResourceState.Maintenance); s_fsm.addTransition(ResourceState.Enabled, Event.DeleteHost, ResourceState.Disabled); s_fsm.addTransition(ResourceState.Disabled, Event.Enable, ResourceState.Enabled); s_fsm.addTransition(ResourceState.Disabled, Event.Disable, ResourceState.Disabled); s_fsm.addTransition(ResourceState.Disabled, Event.InternalCreated, ResourceState.Disabled); + s_fsm.addTransition(ResourceState.Disabled, Event.DeclareHostDegraded, ResourceState.Degraded); s_fsm.addTransition(ResourceState.PrepareForMaintenance, Event.InternalEnterMaintenance, ResourceState.Maintenance); s_fsm.addTransition(ResourceState.PrepareForMaintenance, Event.AdminCancelMaintenance, ResourceState.Enabled); s_fsm.addTransition(ResourceState.PrepareForMaintenance, Event.UnableToMigrate, ResourceState.ErrorInPrepareForMaintenance); @@ -126,6 +131,7 @@ public static boolean canAttemptMaintenance(ResourceState state) { s_fsm.addTransition(ResourceState.Maintenance, Event.AdminCancelMaintenance, ResourceState.Enabled); s_fsm.addTransition(ResourceState.Maintenance, Event.InternalCreated, ResourceState.Maintenance); s_fsm.addTransition(ResourceState.Maintenance, Event.DeleteHost, ResourceState.Disabled); + s_fsm.addTransition(ResourceState.Maintenance, Event.DeclareHostDegraded, ResourceState.Degraded); s_fsm.addTransition(ResourceState.ErrorInPrepareForMaintenance, Event.InternalCreated, ResourceState.ErrorInPrepareForMaintenance); s_fsm.addTransition(ResourceState.ErrorInPrepareForMaintenance, Event.Disable, ResourceState.Disabled); s_fsm.addTransition(ResourceState.ErrorInPrepareForMaintenance, Event.DeleteHost, ResourceState.Disabled); @@ -141,6 +147,8 @@ public static boolean canAttemptMaintenance(ResourceState state) { s_fsm.addTransition(ResourceState.ErrorInMaintenance, Event.AdminCancelMaintenance, ResourceState.Enabled); s_fsm.addTransition(ResourceState.Error, Event.InternalCreated, ResourceState.Error); s_fsm.addTransition(ResourceState.Disabled, Event.DeleteHost, ResourceState.Disabled); - + s_fsm.addTransition(ResourceState.Degraded, Event.DeleteHost, ResourceState.Disabled); + s_fsm.addTransition(ResourceState.Degraded, Event.EnableDegradedHost, ResourceState.Enabled); + s_fsm.addTransition(ResourceState.Degraded, Event.AdminAskMaintenance, ResourceState.Maintenance); } } diff --git a/api/src/main/java/com/cloud/storage/Snapshot.java b/api/src/main/java/com/cloud/storage/Snapshot.java index 2f3a59541d9a..9dc7d45b036e 100644 --- a/api/src/main/java/com/cloud/storage/Snapshot.java +++ b/api/src/main/java/com/cloud/storage/Snapshot.java @@ -26,7 +26,8 @@ public interface Snapshot extends ControlledEntity, Identity, InternalIdentity, StateObject { public enum Type { - MANUAL, RECURRING, TEMPLATE, HOURLY, DAILY, WEEKLY, MONTHLY; + MANUAL, RECURRING, TEMPLATE, HOURLY, DAILY, WEEKLY, MONTHLY, INTERNAL; + // New types should be defined after INTERNAL, and change the max value private int max = 8; public void setMax(int max) { @@ -71,6 +72,7 @@ enum LocationType { } public static final long MANUAL_POLICY_ID = 0L; + public static final long INTERNAL_POLICY_ID = 7L; @Override long getAccountId(); diff --git a/api/src/main/java/com/cloud/storage/Storage.java b/api/src/main/java/com/cloud/storage/Storage.java index 362cc2cac296..407935919ca2 100644 --- a/api/src/main/java/com/cloud/storage/Storage.java +++ b/api/src/main/java/com/cloud/storage/Storage.java @@ -76,6 +76,16 @@ public String getFileExtension() { } + public static enum Capability { + HARDWARE_ACCELERATION("HARDWARE_ACCELERATION"); + + private final String capability; + + private Capability(String capability) { + this.capability = capability; + } + } + public static enum ProvisioningType { THIN("thin"), SPARSE("sparse"), diff --git a/api/src/main/java/com/cloud/storage/StorageService.java b/api/src/main/java/com/cloud/storage/StorageService.java index 4b18739b55dc..bb086ad05cbf 100644 --- a/api/src/main/java/com/cloud/storage/StorageService.java +++ b/api/src/main/java/com/cloud/storage/StorageService.java @@ -26,8 +26,8 @@ import org.apache.cloudstack.api.command.admin.storage.DeleteImageStoreCmd; import org.apache.cloudstack.api.command.admin.storage.DeletePoolCmd; import org.apache.cloudstack.api.command.admin.storage.DeleteSecondaryStagingStoreCmd; -import org.apache.cloudstack.api.command.admin.storage.UpdateStoragePoolCmd; import org.apache.cloudstack.api.command.admin.storage.SyncStoragePoolCmd; +import org.apache.cloudstack.api.command.admin.storage.UpdateStoragePoolCmd; import com.cloud.exception.DiscoveryException; import com.cloud.exception.InsufficientCapacityException; @@ -105,6 +105,8 @@ public interface StorageService { ImageStore updateImageStoreStatus(Long id, Boolean readonly); + void updateStorageCapabilities(Long poolId, boolean failOnChecks); + StoragePool syncStoragePool(SyncStoragePoolCmd cmd); } diff --git a/api/src/main/java/com/cloud/storage/VolumeApiService.java b/api/src/main/java/com/cloud/storage/VolumeApiService.java index c9a5139043f6..5eb93ca9556c 100644 --- a/api/src/main/java/com/cloud/storage/VolumeApiService.java +++ b/api/src/main/java/com/cloud/storage/VolumeApiService.java @@ -22,7 +22,6 @@ import java.util.Map; import com.cloud.exception.StorageUnavailableException; -import org.apache.cloudstack.api.command.user.vm.CloneVMCmd; import org.apache.cloudstack.api.command.user.volume.AttachVolumeCmd; import org.apache.cloudstack.api.command.user.volume.CreateVolumeCmd; import org.apache.cloudstack.api.command.user.volume.DetachVolumeCmd; @@ -94,7 +93,7 @@ public interface VolumeApiService { Volume detachVolumeViaDestroyVM(long vmId, long volumeId); - Volume cloneDataVolume(CloneVMCmd cmd, long snapshotId, Volume volume) throws StorageUnavailableException; + Volume cloneDataVolume(long vmId, long snapshotId, Volume volume) throws StorageUnavailableException; Volume detachVolumeFromVM(DetachVolumeCmd cmd); @@ -105,7 +104,7 @@ Snapshot takeSnapshot(Long volumeId, Long policyId, Long snapshotId, Account acc Volume updateVolume(long volumeId, String path, String state, Long storageId, Boolean displayVolume, String customId, long owner, String chainInfo); - Volume attachVolumeToVm(CloneVMCmd cmd, Long volumeId, Long deviceId); + Volume attachVolumeToVM(Long vmId, Long volumeId, Long deviceId); /** * Extracts the volume to a particular location. diff --git a/api/src/main/java/com/cloud/template/TemplateApiService.java b/api/src/main/java/com/cloud/template/TemplateApiService.java index fd45499ef6b4..60b30745a8b0 100644 --- a/api/src/main/java/com/cloud/template/TemplateApiService.java +++ b/api/src/main/java/com/cloud/template/TemplateApiService.java @@ -20,7 +20,9 @@ import java.net.URISyntaxException; import java.util.List; +import com.cloud.storage.Snapshot; import com.cloud.storage.VolumeApiService; +import com.cloud.uservm.UserVm; import org.apache.cloudstack.api.BaseListTemplateOrIsoPermissionsCmd; import org.apache.cloudstack.api.BaseUpdateTemplateOrIsoPermissionsCmd; import org.apache.cloudstack.api.command.user.iso.DeleteIsoCmd; @@ -101,12 +103,14 @@ public interface TemplateApiService { boolean updateTemplateOrIsoPermissions(BaseUpdateTemplateOrIsoPermissionsCmd cmd); + Snapshot createSnapshotFromTemplateOwner(long vmId, UserVm curVm, Account templateOwner, VolumeApiService volumeService) throws ResourceAllocationException; + /** * create a template record for later usage of creating a real template by createPrivateTemplate * */ - VirtualMachineTemplate createPrivateTemplateRecord(CloneVMCmd cmd, Account templateOwner, VolumeApiService serviceObj) throws ResourceAllocationException; + VirtualMachineTemplate createPrivateTemplateRecord(CloneVMCmd cmd, Account templateOwner, VolumeApiService serviceObj, Snapshot snapshot) throws ResourceAllocationException; - VirtualMachineTemplate createPrivateTemplate(CloneVMCmd cmd) throws CloudRuntimeException; + VirtualMachineTemplate createPrivateTemplate(CloneVMCmd cmd, long snapshotId, long templateId) throws CloudRuntimeException; VirtualMachineTemplate createPrivateTemplateRecord(CreateTemplateCmd cmd, Account templateOwner) throws ResourceAllocationException; diff --git a/api/src/main/java/com/cloud/user/AccountService.java b/api/src/main/java/com/cloud/user/AccountService.java index 4e3733bb5a49..98b1618a8da8 100644 --- a/api/src/main/java/com/cloud/user/AccountService.java +++ b/api/src/main/java/com/cloud/user/AccountService.java @@ -121,4 +121,6 @@ UserAccount createUserAccount(String userName, String password, String firstName UserAccount getUserAccountById(Long userId); public Map getKeys(GetUserKeysCmd cmd); + + public Map getKeys(Long userId); } diff --git a/api/src/main/java/com/cloud/vm/UserVmService.java b/api/src/main/java/com/cloud/vm/UserVmService.java index da8c2373ca79..7bdbdfc802ef 100644 --- a/api/src/main/java/com/cloud/vm/UserVmService.java +++ b/api/src/main/java/com/cloud/vm/UserVmService.java @@ -98,8 +98,9 @@ public interface UserVmService { * */ Optional cloneVirtualMachine(CloneVMCmd cmd, VolumeApiService volumeService, SnapshotApiService snapshotService) throws ResourceUnavailableException, ConcurrentOperationException, InsufficientCapacityException, ResourceAllocationException; - void checkCloneCondition(CloneVMCmd cmd) throws ResourceUnavailableException, ConcurrentOperationException, ResourceAllocationException; + void validateCloneCondition(CloneVMCmd cmd) throws ResourceUnavailableException, ConcurrentOperationException, ResourceAllocationException; + void prepareCloneVirtualMachine(CloneVMCmd cmd) throws ResourceAllocationException, InsufficientCapacityException, ResourceUnavailableException; /** * Resets the password of a virtual machine. * @@ -444,7 +445,7 @@ UserVm createAdvancedVirtualMachine(DataCenter zone, ServiceOffering serviceOffe UserVm createVirtualMachine(DeployVMCmd cmd) throws InsufficientCapacityException, ResourceUnavailableException, ConcurrentOperationException, StorageUnavailableException, ResourceAllocationException; - UserVm recordVirtualMachineToDB(CloneVMCmd cmd) throws InsufficientCapacityException, ResourceUnavailableException, ConcurrentOperationException, + UserVm recordVirtualMachineToDB(CloneVMCmd cmd, long templateId) throws InsufficientCapacityException, ResourceUnavailableException, ConcurrentOperationException, StorageUnavailableException, ResourceAllocationException; UserVm getUserVm(long vmId); diff --git a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java index b060b5a21762..48386a45bf35 100644 --- a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java @@ -29,6 +29,7 @@ public class ApiConstants { public static final String ANNOTATION = "annotation"; public static final String API_KEY = "apikey"; public static final String ASYNC_BACKUP = "asyncbackup"; + public static final String AUTO_SELECT = "autoselect"; public static final String USER_API_KEY = "userapikey"; public static final String APPLIED = "applied"; public static final String LIST_LB_VMIPS = "lbvmips"; @@ -156,6 +157,7 @@ public class ApiConstants { public static final String FIRSTNAME = "firstname"; public static final String FORCED = "forced"; public static final String FORCED_DESTROY_LOCAL_STORAGE = "forcedestroylocalstorage"; + public static final String FORCE_DELETE_HOST = "forcedeletehost"; public static final String FORMAT = "format"; public static final String FOR_VIRTUAL_NETWORK = "forvirtualnetwork"; public static final String FOR_SYSTEM_VMS = "forsystemvms"; @@ -357,6 +359,7 @@ public class ApiConstants { public static final String SWAP_OWNER = "swapowner"; public static final String SYSTEM_VM_TYPE = "systemvmtype"; public static final String TAGS = "tags"; + public static final String STORAGE_TAGS = "storagetags"; public static final String TARGET_IQN = "targetiqn"; public static final String TEMPLATE_FILTER = "templatefilter"; public static final String TEMPLATE_ID = "templateid"; diff --git a/api/src/main/java/org/apache/cloudstack/api/BaseListCmd.java b/api/src/main/java/org/apache/cloudstack/api/BaseListCmd.java index 36fa36fcfc9a..bcebbb860033 100644 --- a/api/src/main/java/org/apache/cloudstack/api/BaseListCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/BaseListCmd.java @@ -94,7 +94,7 @@ public Long getPageSizeVal() { if (pageSizeInt != null) { defaultPageSize = pageSizeInt.longValue(); } - if (defaultPageSize.longValue() == s_pageSizeUnlimited) { + if (s_pageSizeUnlimited.equals(defaultPageSize)) { defaultPageSize = null; } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/CancelHostAsDegradedCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/CancelHostAsDegradedCmd.java new file mode 100644 index 000000000000..98557dd710ab --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/CancelHostAsDegradedCmd.java @@ -0,0 +1,113 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.admin.host; + +import com.cloud.event.EventTypes; +import com.cloud.host.Host; +import com.cloud.utils.fsm.NoTransitionException; +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.ApiArgValidator; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiCommandJobType; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.HostResponse; +import org.apache.cloudstack.context.CallContext; + +@APICommand(name = "cancelHostAsDegraded", + description = "Cancel host status from 'Degraded'. Host will transit back to status 'Enabled'.", + since = "4.16.0.0", + responseObject = HostResponse.class, + requestHasSensitiveInfo = false, + responseHasSensitiveInfo = false, + authorized = {RoleType.Admin}) +public class CancelHostAsDegradedCmd extends BaseAsyncCmd { + + private static final String COMMAND_RESPONSE_NAME = "cancelhostasdegradedresponse"; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.ID, type = BaseCmd.CommandType.UUID, entityType = HostResponse.class, description = "host ID", required = true, validations = {ApiArgValidator.PositiveNumber}) + private Long id; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getId() { + return id; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public String getCommandName() { + return COMMAND_RESPONSE_NAME; + } + + public static String getResultObjectName() { + return "host"; + } + + @Override + public long getEntityOwnerId() { + return CallContext.current().getCallingAccountId(); + } + + @Override + public String getEventType() { + return EventTypes.EVENT_CANCEL_HOST_DEGRADED; + } + + @Override + public String getEventDescription() { + return "declaring host: " + getId() + " as Degraded"; + } + + @Override + public ApiCommandJobType getInstanceType() { + return ApiCommandJobType.Host; + } + + @Override + public Long getInstanceId() { + return getId(); + } + + @Override + public void execute() { + Host host; + try { + host = _resourceService.cancelHostAsDegraded(this); + } catch (NoTransitionException exception) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to Cancel host from Degraded status due to: " + exception.getMessage()); + } + + HostResponse response = _responseGenerator.createHostResponse(host); + response.setResponseName(COMMAND_RESPONSE_NAME); + this.setResponseObject(response); + } + +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/DeclareHostAsDegradedCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/DeclareHostAsDegradedCmd.java new file mode 100644 index 000000000000..bdf440fc054f --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/DeclareHostAsDegradedCmd.java @@ -0,0 +1,113 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.admin.host; + +import com.cloud.event.EventTypes; +import com.cloud.host.Host; +import com.cloud.utils.fsm.NoTransitionException; +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.ApiArgValidator; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiCommandJobType; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.HostResponse; +import org.apache.cloudstack.context.CallContext; + +@APICommand(name = "declareHostAsDegraded", + description = "Declare host as 'Degraded'. Host must be on 'Disconnected' or 'Alert' state. The ADMIN must be sure that there are no VMs running on the respective host otherwise this command might corrupted VMs that were running on the 'Degraded' host.", + since = "4.16.0.0", + responseObject = HostResponse.class, + requestHasSensitiveInfo = false, + responseHasSensitiveInfo = false, + authorized = {RoleType.Admin}) +public class DeclareHostAsDegradedCmd extends BaseAsyncCmd { + + private static final String COMMAND_RESPONSE_NAME = "declarehostasdegradedresponse"; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.ID, type = BaseCmd.CommandType.UUID, entityType = HostResponse.class, description = "host ID", required = true, validations = {ApiArgValidator.PositiveNumber}) + private Long id; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getId() { + return id; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public String getCommandName() { + return COMMAND_RESPONSE_NAME; + } + + public static String getResultObjectName() { + return "host"; + } + + @Override + public long getEntityOwnerId() { + return CallContext.current().getCallingAccountId(); + } + + @Override + public String getEventType() { + return EventTypes.EVENT_DECLARE_HOST_DEGRADED; + } + + @Override + public String getEventDescription() { + return "declaring host: " + getId() + " as Degraded"; + } + + @Override + public ApiCommandJobType getInstanceType() { + return ApiCommandJobType.Host; + } + + @Override + public Long getInstanceId() { + return getId(); + } + + @Override + public void execute() { + Host host; + try { + host = _resourceService.declareHostAsDegraded(this); + } catch (NoTransitionException exception) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to declare host as Degraded due to: " + exception.getMessage()); + } + + HostResponse response = _responseGenerator.createHostResponse(host); + response.setResponseName(COMMAND_RESPONSE_NAME); + this.setResponseObject(response); + } + +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/UpdateServiceOfferingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/UpdateServiceOfferingCmd.java index 43a0666e934a..4212a0059e20 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/UpdateServiceOfferingCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/UpdateServiceOfferingCmd.java @@ -19,12 +19,14 @@ import java.util.ArrayList; import java.util.List; +import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.ApiErrorCode; import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.BaseCmd.CommandType; import org.apache.cloudstack.api.response.ServiceOfferingResponse; import org.apache.log4j.Logger; @@ -71,6 +73,20 @@ public class UpdateServiceOfferingCmd extends BaseCmd { since = "4.13") private String zoneIds; + @Parameter(name = ApiConstants.STORAGE_TAGS, + type = CommandType.STRING, + description = "comma-separated list of tags for the service offering, tags should match with existing storage pool tags", + authorized = {RoleType.Admin}, + since = "4.16") + private String storageTags; + + @Parameter(name = ApiConstants.HOST_TAGS, + type = CommandType.STRING, + description = "the host tag for this service offering.", + authorized = {RoleType.Admin}, + since = "4.16") + private String hostTags; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -151,6 +167,14 @@ public List getZoneIds() { return validZoneIds; } + public String getStorageTags() { + return storageTags; + } + + public String getHostTags() { + return hostTags; + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListStoragePoolsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListStoragePoolsCmd.java index ed123dbb6c37..2450ac7cd6ba 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListStoragePoolsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListStoragePoolsCmd.java @@ -99,7 +99,10 @@ public Long getId() { return id; } - ///////////////////////////////////////////////////// + public void setId(Long id) { + this.id = id; + } +///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateStorageCapabilitiesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateStorageCapabilitiesCmd.java new file mode 100644 index 000000000000..b6fb03dd7985 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateStorageCapabilitiesCmd.java @@ -0,0 +1,86 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.admin.storage; + +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.NetworkRuleConflictException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.api.response.StoragePoolResponse; +import org.apache.cloudstack.context.CallContext; +import org.apache.log4j.Logger; + +import java.util.Locale; + +@APICommand(name = UpdateStorageCapabilitiesCmd.APINAME, description = "Syncs capabilities of storage pools", + responseObject = StoragePoolResponse.class, + requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, since = "4.16.0") +public class UpdateStorageCapabilitiesCmd extends BaseCmd { + public static final String APINAME = "updateStorageCapabilities"; + private static final Logger LOG = Logger.getLogger(UpdateStorageCapabilitiesCmd.class.getName()); + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = StoragePoolResponse.class, required = true, description = "Storage pool id") + private Long poolId; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getPoolId() { + return poolId; + } + + public void setPoolId(Long poolId) { + this.poolId = poolId; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + + @Override + public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException { + _storageService.updateStorageCapabilities(poolId, true); + ListStoragePoolsCmd listStoragePoolCmd = new ListStoragePoolsCmd(); + listStoragePoolCmd.setId(poolId); + ListResponse listResponse = _queryService.searchForStoragePools(listStoragePoolCmd); + listResponse.setResponseName(getCommandName()); + this.setResponseObject(listResponse); + } + + @Override + public String getCommandName() { + return APINAME.toLowerCase(Locale.ROOT) + "response" ; + } + + @Override + public long getEntityOwnerId() { + return CallContext.current().getCallingAccountId(); + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/systemvm/MigrateSystemVMCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/systemvm/MigrateSystemVMCmd.java index 50129a580b31..decc722e86f1 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/systemvm/MigrateSystemVMCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/systemvm/MigrateSystemVMCmd.java @@ -30,6 +30,7 @@ import org.apache.cloudstack.api.response.StoragePoolResponse; import org.apache.cloudstack.api.response.SystemVmResponse; import org.apache.cloudstack.context.CallContext; +import org.apache.commons.lang.BooleanUtils; import org.apache.log4j.Logger; import com.cloud.event.EventTypes; @@ -75,6 +76,12 @@ public class MigrateSystemVMCmd extends BaseAsyncCmd { description = "Destination storage pool ID to migrate VM volumes to. Required for migrating the root disk volume") private Long storageId; + @Parameter(name = ApiConstants.AUTO_SELECT, + since = "4.16.0", + type = CommandType.BOOLEAN, + description = "Automatically select a destination host which do not require storage migration, if hostId and storageId are not specified. false by default") + private Boolean autoSelect; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -91,6 +98,10 @@ public Long getStorageId() { return storageId; } + public Boolean isAutoSelect() { + return BooleanUtils.isNotFalse(autoSelect); + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// @@ -122,27 +133,14 @@ public String getEventDescription() { @Override public void execute() { - if (getHostId() == null && getStorageId() == null) { - throw new InvalidParameterValueException("Either hostId or storageId must be specified"); - } - if (getHostId() != null && getStorageId() != null) { throw new InvalidParameterValueException("Only one of hostId and storageId can be specified"); } + try { //FIXME : Should not be calling UserVmService to migrate all types of VMs - need a generic VM layer VirtualMachine migratedVm = null; - if (getHostId() != null) { - Host destinationHost = _resourceService.getHost(getHostId()); - if (destinationHost == null) { - throw new InvalidParameterValueException("Unable to find the host to migrate the VM, host id=" + getHostId()); - } - if (destinationHost.getType() != Host.Type.Routing) { - throw new InvalidParameterValueException("The specified host(" + destinationHost.getName() + ") is not suitable to migrate the VM, please specify another one"); - } - CallContext.current().setEventDetails("VM Id: " + getVirtualMachineId() + " to host Id: " + getHostId()); - migratedVm = _userVmService.migrateVirtualMachineWithVolume(getVirtualMachineId(), destinationHost, new HashMap()); - } else if (getStorageId() != null) { + if (getStorageId() != null) { // OfflineMigration performed when this parameter is specified StoragePool destStoragePool = _storageService.getStoragePool(getStorageId()); if (destStoragePool == null) { @@ -150,6 +148,25 @@ public void execute() { } CallContext.current().setEventDetails("VM Id: " + getVirtualMachineId() + " to storage pool Id: " + getStorageId()); migratedVm = _userVmService.vmStorageMigration(getVirtualMachineId(), destStoragePool); + } else { + Host destinationHost = null; + if (getHostId() != null) { + destinationHost =_resourceService.getHost(getHostId()); + if (destinationHost == null) { + throw new InvalidParameterValueException("Unable to find the host to migrate the VM, host id=" + getHostId()); + } + if (destinationHost.getType() != Host.Type.Routing) { + throw new InvalidParameterValueException("The specified host(" + destinationHost.getName() + ") is not suitable to migrate the VM, please specify another one"); + } + } else if (! isAutoSelect()) { + throw new InvalidParameterValueException("Please specify a host or storage as destination, or pass 'autoselect=true' to automatically select a destination host which do not require storage migration"); + } + CallContext.current().setEventDetails("VM Id: " + getVirtualMachineId() + " to host Id: " + getHostId()); + if (destinationHost == null) { + migratedVm = _userVmService.migrateVirtualMachine(getVirtualMachineId(), null); + } else { + migratedVm = _userVmService.migrateVirtualMachineWithVolume(getVirtualMachineId(), destinationHost, new HashMap()); + } } if (migratedVm != null) { // return the generic system VM instance response diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/MigrateVMCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/MigrateVMCmd.java index 9f73ae586a08..2c68d86f4450 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/MigrateVMCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/MigrateVMCmd.java @@ -29,6 +29,7 @@ import org.apache.cloudstack.api.response.StoragePoolResponse; import org.apache.cloudstack.api.response.UserVmResponse; import org.apache.cloudstack.context.CallContext; +import org.apache.commons.lang.BooleanUtils; import com.cloud.event.EventTypes; import com.cloud.exception.ConcurrentOperationException; @@ -60,7 +61,7 @@ public class MigrateVMCmd extends BaseAsyncCmd { type = CommandType.UUID, entityType = HostResponse.class, required = false, - description = "Destination Host ID to migrate VM to. Required for live migrating a VM from host to host") + description = "Destination Host ID to migrate VM to.") private Long hostId; @Parameter(name = ApiConstants.VIRTUAL_MACHINE_ID, @@ -77,6 +78,12 @@ public class MigrateVMCmd extends BaseAsyncCmd { description = "Destination storage pool ID to migrate VM volumes to. Required for migrating the root disk volume") private Long storageId; + @Parameter(name = ApiConstants.AUTO_SELECT, + since = "4.16.0", + type = CommandType.BOOLEAN, + description = "Automatically select a destination host which do not require storage migration, if hostId and storageId are not specified. false by default") + private Boolean autoSelect; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -93,6 +100,10 @@ public Long getStoragePoolId() { return storageId; } + public Boolean isAutoSelect() { + return BooleanUtils.isNotFalse(autoSelect); + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// @@ -132,10 +143,6 @@ public String getEventDescription() { @Override public void execute() { - if (getHostId() == null && getStoragePoolId() == null) { - throw new InvalidParameterValueException("Either hostId or storageId must be specified"); - } - if (getHostId() != null && getStoragePoolId() != null) { throw new InvalidParameterValueException("Only one of hostId and storageId can be specified"); } @@ -146,17 +153,6 @@ public void execute() { } Host destinationHost = null; - if (getHostId() != null) { - destinationHost = _resourceService.getHost(getHostId()); - if (destinationHost == null) { - throw new InvalidParameterValueException("Unable to find the host to migrate the VM, host id=" + getHostId()); - } - if (destinationHost.getType() != Host.Type.Routing) { - throw new InvalidParameterValueException("The specified host(" + destinationHost.getName() + ") is not suitable to migrate the VM, please specify another one"); - } - CallContext.current().setEventDetails("VM Id: " + getVirtualMachineId() + " to host Id: " + getHostId()); - } - // OfflineMigration performed when this parameter is specified StoragePool destStoragePool = null; if (getStoragePoolId() != null) { @@ -165,13 +161,24 @@ public void execute() { throw new InvalidParameterValueException("Unable to find the storage pool to migrate the VM"); } CallContext.current().setEventDetails("VM Id: " + getVirtualMachineId() + " to storage pool Id: " + getStoragePoolId()); + } else if (getHostId() != null) { + destinationHost = _resourceService.getHost(getHostId()); + if (destinationHost == null) { + throw new InvalidParameterValueException("Unable to find the host to migrate the VM, host id=" + getHostId()); + } + if (destinationHost.getType() != Host.Type.Routing) { + throw new InvalidParameterValueException("The specified host(" + destinationHost.getName() + ") is not suitable to migrate the VM, please specify another one"); + } + CallContext.current().setEventDetails("VM Id: " + getVirtualMachineId() + " to host Id: " + getHostId()); + } else if (! isAutoSelect()) { + throw new InvalidParameterValueException("Please specify a host or storage as destination, or pass 'autoselect=true' to automatically select a destination host which do not require storage migration"); } try { VirtualMachine migratedVm = null; - if (getHostId() != null) { + if (getStoragePoolId() == null) { migratedVm = _userVmService.migrateVirtualMachine(getVirtualMachineId(), destinationHost); - } else if (getStoragePoolId() != null) { + } else { migratedVm = _userVmService.vmStorageMigration(getVirtualMachineId(), destStoragePool); } if (migratedVm != null) { diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/CreateFirewallRuleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/CreateFirewallRuleCmd.java index e0310a184122..ea5657cf9657 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/CreateFirewallRuleCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/CreateFirewallRuleCmd.java @@ -73,7 +73,7 @@ public class CreateFirewallRuleCmd extends BaseAsyncCreateCmd implements Firewal @Parameter(name = ApiConstants.END_PORT, type = CommandType.INTEGER, description = "the ending port of firewall rule") private Integer publicEndPort; - @Parameter(name = ApiConstants.CIDR_LIST, type = CommandType.LIST, collectionType = CommandType.STRING, description = "the CIDR list to forward traffic from. Multiple entries must be separated by a single comma character (,). This parameter is deprecated. Do not use.") + @Parameter(name = ApiConstants.CIDR_LIST, type = CommandType.LIST, collectionType = CommandType.STRING, description = "the CIDR list to forward traffic from. Multiple entries must be separated by a single comma character (,).") private List cidrlist; @Parameter(name = ApiConstants.ICMP_TYPE, type = CommandType.INTEGER, description = "type of the ICMP message being sent") diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/CreateLoadBalancerRuleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/CreateLoadBalancerRuleCmd.java index 283da3cd8e41..6c1e133d57c6 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/CreateLoadBalancerRuleCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/CreateLoadBalancerRuleCmd.java @@ -107,7 +107,7 @@ public class CreateLoadBalancerRuleCmd extends BaseAsyncCreateCmd /*implements L @Parameter(name = ApiConstants.DOMAIN_ID, type = CommandType.UUID, entityType = DomainResponse.class, description = "the domain ID associated with the load balancer") private Long domainId; - @Parameter(name = ApiConstants.CIDR_LIST, type = CommandType.LIST, collectionType = CommandType.STRING, description = "the CIDR list to forward traffic from. Multiple entries must be separated by a single comma character (,).") + @Parameter(name = ApiConstants.CIDR_LIST, type = CommandType.LIST, collectionType = CommandType.STRING, description = "the CIDR list to forward traffic from. Multiple entries must be separated by a single comma character (,). This parameter is deprecated. Do not use.") private List cidrlist; @Parameter(name = ApiConstants.NETWORK_ID, type = CommandType.UUID, entityType = NetworkResponse.class, description = "The guest network this " diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/nat/CreateIpForwardingRuleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/nat/CreateIpForwardingRuleCmd.java index 7b66bd9b5051..1e65a413fd1c 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/nat/CreateIpForwardingRuleCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/nat/CreateIpForwardingRuleCmd.java @@ -74,7 +74,7 @@ public class CreateIpForwardingRuleCmd extends BaseAsyncCreateCmd implements Sta description = "if true, firewall rule for source/end public port is automatically created; if false - firewall rule has to be created explicitly. Has value true by default") private Boolean openFirewall; - @Parameter(name = ApiConstants.CIDR_LIST, type = CommandType.LIST, collectionType = CommandType.STRING, description = "the CIDR list to forward traffic from. Multiple entries must be separated by a single comma character (,).") + @Parameter(name = ApiConstants.CIDR_LIST, type = CommandType.LIST, collectionType = CommandType.STRING, description = "the CIDR list to forward traffic from. Multiple entries must be separated by a single comma character (,). This parameter is deprecated. Do not use.") private List cidrlist; ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/CloneVMCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/CloneVMCmd.java index 136f5b9ce087..33314ac7ac89 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/CloneVMCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/CloneVMCmd.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.api.command.user.vm; import com.cloud.event.EventTypes; @@ -9,6 +25,7 @@ import com.cloud.template.VirtualMachineTemplate; import com.cloud.user.Account; import com.cloud.uservm.UserVm; +import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.VirtualMachine; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -16,24 +33,24 @@ import org.apache.cloudstack.api.ApiCommandJobType; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.ApiErrorCode; -import org.apache.cloudstack.api.BaseAsyncCreateCustomIdCmd; +import org.apache.cloudstack.api.BaseAsyncCreateCmd; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ResponseObject; import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.command.user.UserCmd; import org.apache.cloudstack.api.response.DomainResponse; import org.apache.cloudstack.api.response.UserVmResponse; -//import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.context.CallContext; import org.apache.log4j.Logger; import java.util.Optional; -@APICommand(name = "cloneVirtualMachine", responseObject = UserVmResponse.class, description = "clone a virtual VM in full clone mode", - responseView = ResponseObject.ResponseView.Restricted, requestHasSensitiveInfo = false, responseHasSensitiveInfo = true, entityType = {VirtualMachine.class}) -public class CloneVMCmd extends BaseAsyncCreateCustomIdCmd implements UserCmd { +@APICommand(name = "cloneVirtualMachine", responseObject = UserVmResponse.class, description = "clone a virtual VM", + responseView = ResponseObject.ResponseView.Restricted, requestHasSensitiveInfo = false, responseHasSensitiveInfo = true, entityType = {VirtualMachine.class}, since="4.16.0") +public class CloneVMCmd extends BaseAsyncCreateCmd implements UserCmd { public static final Logger s_logger = Logger.getLogger(CloneVMCmd.class.getName()); private static final String s_name = "clonevirtualmachineresponse"; + private static final String CLONE_IDENTIFIER = "Clone"; ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// @@ -41,7 +58,10 @@ public class CloneVMCmd extends BaseAsyncCreateCustomIdCmd implements UserCmd { @ACL(accessType = AccessType.OperateEntry) @Parameter(name = ApiConstants.VIRTUAL_MACHINE_ID, type = CommandType.UUID, entityType=UserVmResponse.class, required = true, description = "The ID of the virtual machine") - private Long id; + private Long virtualmachineid; + + @Parameter(name = ApiConstants.NAME, type = CommandType.STRING, description = "name of the cloned virtual machine") + private String name; //Owner information @Parameter(name = ApiConstants.ACCOUNT, type = CommandType.STRING, description = "an optional account for the virtual machine. Must be used with domainId.") @@ -50,10 +70,6 @@ public class CloneVMCmd extends BaseAsyncCreateCustomIdCmd implements UserCmd { @Parameter(name = ApiConstants.DOMAIN_ID, type = CommandType.UUID, entityType = DomainResponse.class, description = "an optional domainId for the virtual machine. If the account parameter is used, domainId must also be used.") private Long domainId; - private Long temporaryTemlateId; - - private Long temporarySnapShotId; - public String getAccountName() { return accountName; } @@ -62,8 +78,16 @@ public Long getDomainId() { return domainId; } + public String getName() { + return this.name; + } + + public void setName(String name) { + this.name = name; + } + public Long getId() { - return this.id; + return this.virtualmachineid; } @Override public String getEventType() { @@ -80,52 +104,22 @@ public String getEventDescription() { return "Cloning user VM: " + this._uuidMgr.getUuid(VirtualMachine.class, getId()); } - public Long getTemporaryTemlateId() { - return this.temporaryTemlateId; - } - - public void setTemporarySnapShotId(Long snapshotId) { - this.temporarySnapShotId = snapshotId; - } - - public Long getTemporarySnapShotId() { - return temporarySnapShotId; - } - - - public void setTemporaryTemlateId(long tempId) { - this.temporaryTemlateId = tempId; - } - @Override public void create() throws ResourceAllocationException { try { - _userVmService.checkCloneCondition(this); - VirtualMachineTemplate template = _templateService.createPrivateTemplateRecord(this, _accountService.getAccount(getEntityOwnerId()), _volumeService); - if (template == null) { - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "failed to create a template to db"); - } - s_logger.info("The template id recorded is: " + template.getId()); - setTemporaryTemlateId(template.getId()); - _templateService.createPrivateTemplate(this); - _snapshotService.deleteSnapshot(getTemporarySnapShotId()); - UserVm vmRecord = _userVmService.recordVirtualMachineToDB(this); - if (vmRecord == null) { - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "unable to record a new VM to db!"); - } - setEntityId(vmRecord.getId()); - setEntityUuid(vmRecord.getUuid()); - } catch (ResourceUnavailableException | InsufficientCapacityException e) { + _userVmService.validateCloneCondition(this); + _userVmService.prepareCloneVirtualMachine(this); + } + catch (ResourceUnavailableException | InsufficientCapacityException e) { s_logger.warn("Exception: ", e); throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, e.getMessage()); } catch (InvalidParameterValueException e) { s_logger.warn("Exception: ", e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage()); - } finally { - if (getTemporaryTemlateId() != null) { - // TODO: delete template in the service - s_logger.warn("clearing the temporary template: " + getTemporaryTemlateId()); - } + } catch (ServerApiException e) { + throw new ServerApiException(e.getErrorCode(), e.getDescription()); + } catch (CloudRuntimeException e) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage()); } } @@ -134,11 +128,14 @@ public boolean isPublic() { } public String getVMName() { - return getTargetVM().getInstanceName(); + if (getName() == null) { + return getTargetVM().getInstanceName() + "-" + CLONE_IDENTIFIER; + } + return getName(); } public String getTemplateName() { - return getVMName() + "-QA"; + return (getVMName() + "-" + _uuidMgr.generateUuid(VirtualMachineTemplate.class, null)).substring(0, 32); } @Override diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java index 733bddb45149..12a4a02c0264 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java @@ -150,7 +150,9 @@ public class DeployVMCmd extends BaseAsyncCreateCustomIdCmd implements SecurityG + "The parameter is required and respected only when hypervisor info is not set on the ISO/Template passed to the call") private String hypervisor; - @Parameter(name = ApiConstants.USER_DATA, type = CommandType.STRING, description = "an optional binary data that can be sent to the virtual machine upon a successful deployment. This binary data must be base64 encoded before adding it to the request. Using HTTP GET (via querystring), you can send up to 2KB of data after base64 encoding. Using HTTP POST(via POST body), you can send up to 32K of data after base64 encoding.", length = 32768) + @Parameter(name = ApiConstants.USER_DATA, type = CommandType.STRING, + description = "an optional binary data that can be sent to the virtual machine upon a successful deployment. This binary data must be base64 encoded before adding it to the request. Using HTTP GET (via querystring), you can send up to 4KB of data after base64 encoding. Using HTTP POST(via POST body), you can send up to 1MB of data after base64 encoding.", + length = 1048576) private String userData; @Parameter(name = ApiConstants.SSH_KEYPAIR, type = CommandType.STRING, description = "name of the ssh key pair used to login to the virtual machine") diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/UpdateVMCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/UpdateVMCmd.java index 38d1a5d5dd4b..38289537bc64 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/UpdateVMCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/UpdateVMCmd.java @@ -80,8 +80,13 @@ public class UpdateVMCmd extends BaseCustomIdCmd implements SecurityGroupAction, @Parameter(name = ApiConstants.USER_DATA, type = CommandType.STRING, - description = "an optional binary data that can be sent to the virtual machine upon a successful deployment. This binary data must be base64 encoded before adding it to the request. Using HTTP GET (via querystring), you can send up to 2KB of data after base64 encoding. Using HTTP POST(via POST body), you can send up to 32K of data after base64 encoding.", - length = 32768) + description = "an optional binary data that can be sent to the virtual machine upon a successful deployment. " + + "This binary data must be base64 encoded before adding it to the request. " + + "Using HTTP GET (via querystring), you can send up to 4KB of data after base64 encoding. " + + "Using HTTP POST(via POST body), you can send up to 1MB of data after base64 encoding." + + "You also need to change vm.userdata.max.length value", + length = 1048576, + since = "4.16.0") private String userData; @Parameter(name = ApiConstants.DISPLAY_VM, type = CommandType.BOOLEAN, description = "an optional field, whether to the display the vm to the end user or not.", authorized = {RoleType.Admin}) diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/RemoveVpnUserCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/RemoveVpnUserCmd.java index 140bdad717f7..33cbb46485c0 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/RemoveVpnUserCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/RemoveVpnUserCmd.java @@ -30,6 +30,7 @@ import org.apache.cloudstack.context.CallContext; import com.cloud.event.EventTypes; +import com.cloud.exception.ResourceUnavailableException; import com.cloud.network.VpnUser; import com.cloud.user.Account; @@ -110,19 +111,31 @@ public String getEventType() { @Override public void execute() { Account owner = _accountService.getAccount(getEntityOwnerId()); - boolean result = _ravService.removeVpnUser(owner.getId(), userName, CallContext.current().getCallingAccount()); + long ownerId = owner.getId(); + boolean result = _ravService.removeVpnUser(ownerId, userName, CallContext.current().getCallingAccount()); if (!result) { - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to remove vpn user"); + String errorMessage = String.format("Failed to remove VPN user=[%s]. VPN owner id=[%s].", userName, ownerId); + s_logger.error(errorMessage); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, errorMessage); } + boolean appliedVpnUsers = false; + try { - if (!_ravService.applyVpnUsers(owner.getId(), userName)) { - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to apply vpn user removal"); - } - }catch (Exception ex) { - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to remove vpn user due to resource unavailable"); + appliedVpnUsers = _ravService.applyVpnUsers(ownerId, userName); + } catch (ResourceUnavailableException ex) { + String errorMessage = String.format("Failed to refresh VPN user=[%s] due to resource unavailable. VPN owner id=[%s].", userName, ownerId); + s_logger.error(errorMessage, ex); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, errorMessage, ex); + } + + if (!appliedVpnUsers) { + String errorMessage = String.format("Failed to refresh VPN user=[%s]. VPN owner id=[%s].", userName, ownerId); + s_logger.debug(errorMessage); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, errorMessage); } + SuccessResponse response = new SuccessResponse(getCommandName()); setResponseObject(response); } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/NetworkResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/NetworkResponse.java index a1337144a6ca..9464317f779b 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/NetworkResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/NetworkResponse.java @@ -16,6 +16,7 @@ // under the License. package org.apache.cloudstack.api.response; +import java.util.Date; import java.util.List; import java.util.Map; import java.util.Set; @@ -246,6 +247,10 @@ public class NetworkResponse extends BaseResponse implements ControlledEntityRes @Param(description = "If the network has redundant routers enabled", since = "4.11.1") private Boolean redundantRouter; + @SerializedName(ApiConstants.CREATED) + @Param(description = "the date this network was created", since = "4.16.0") + private Date created; + public Boolean getDisplayNetwork() { return displayNetwork; } @@ -482,4 +487,12 @@ public String getVpcName() { public void setVpcName(String vpcName) { this.vpcName = vpcName; } + + public Date getCreated() { + return created; + } + + public void setCreated(Date created) { + this.created = created; + } } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/ProjectResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/ProjectResponse.java index 47ebab8756fc..7f14fce30078 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/ProjectResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/ProjectResponse.java @@ -17,6 +17,7 @@ package org.apache.cloudstack.api.response; import java.util.ArrayList; +import java.util.Date; import java.util.List; import java.util.Map; @@ -207,6 +208,10 @@ public class ProjectResponse extends BaseResponse implements ResourceLimitAndCou @Param(description = "the total number of virtual machines running for this project", since = "4.2.0") private Integer vmRunning; + @SerializedName(ApiConstants.CREATED) + @Param(description = "the date this project was created", since = "4.16.0") + private Date created; + public void setId(String id) { this.id = id; } @@ -421,4 +426,12 @@ public void setSecondaryStorageAvailable(String secondaryStorageAvailable) { public void setOwners(List> owners) { this.owners = owners; } + + public Date getCreated() { + return created; + } + + public void setCreated(Date created) { + this.created = created; + } } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/ServiceOfferingResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/ServiceOfferingResponse.java index 59bd72ce88e6..42f89b1b8a92 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/ServiceOfferingResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/ServiceOfferingResponse.java @@ -76,7 +76,7 @@ public class ServiceOfferingResponse extends BaseResponse { @Param(description = "true if the vm needs to be volatile, i.e., on every reboot of vm from API root disk is discarded and creates a new root disk") private Boolean isVolatile; - @SerializedName("tags") + @SerializedName("storagetags") @Param(description = "the tags for the service offering") private String tags; diff --git a/api/src/main/java/org/apache/cloudstack/api/response/UnmanagedInstanceResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/UnmanagedInstanceResponse.java index 5167f1788ce1..e866b19e1c13 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/UnmanagedInstanceResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/UnmanagedInstanceResponse.java @@ -43,6 +43,10 @@ public class UnmanagedInstanceResponse extends BaseResponse { @Param(description = "the ID of the host to which virtual machine belongs") private String hostId; + @SerializedName(ApiConstants.HOST_NAME) + @Param(description = "the name of the host to which virtual machine belongs") + private String hostName; + @SerializedName(ApiConstants.POWER_STATE) @Param(description = "the power state of the virtual machine") private String powerState; @@ -108,6 +112,14 @@ public void setHostId(String hostId) { this.hostId = hostId; } + public String getHostName() { + return hostName; + } + + public void setHostName(String hostName) { + this.hostName = hostName; + } + public String getPowerState() { return powerState; } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/UserVmResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/UserVmResponse.java index cb0d6fa0cc72..d2b9e2373cad 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/UserVmResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/UserVmResponse.java @@ -16,10 +16,12 @@ // under the License. package org.apache.cloudstack.api.response; +import java.util.Comparator; import java.util.Date; import java.util.LinkedHashSet; import java.util.Map; import java.util.Set; +import java.util.TreeSet; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.affinity.AffinityGroupResponse; @@ -80,6 +82,10 @@ public class UserVmResponse extends BaseResponseWithTagInformation implements Co @Param(description = "the date when this virtual machine was created") private Date created; + @SerializedName("lastupdated") + @Param(description="the date when this virtual machine was updated last time", since="4.16.0") + private Date lastUpdated; + @SerializedName(ApiConstants.STATE) @Param(description = "the state of the virtual machine") private String state; @@ -316,7 +322,7 @@ public class UserVmResponse extends BaseResponseWithTagInformation implements Co public UserVmResponse() { securityGroupList = new LinkedHashSet(); - nics = new LinkedHashSet(); + nics = new TreeSet<>(Comparator.comparingInt(x -> Integer.parseInt(x.getDeviceId()))); tags = new LinkedHashSet(); tagIds = new LinkedHashSet(); affinityGroupList = new LinkedHashSet(); @@ -909,4 +915,12 @@ public String getOsDisplayName() { public String getPoolType() { return poolType; } public void setPoolType(String poolType) { this.poolType = poolType; } + + public void setLastUpdated(Date lastUpdated) { + this.lastUpdated = lastUpdated; + } + + public Date getLastUpdated() { + return lastUpdated; + } } diff --git a/api/src/main/java/org/apache/cloudstack/vm/UnmanagedInstanceTO.java b/api/src/main/java/org/apache/cloudstack/vm/UnmanagedInstanceTO.java index 860ecdc3ba14..95675f2bf349 100644 --- a/api/src/main/java/org/apache/cloudstack/vm/UnmanagedInstanceTO.java +++ b/api/src/main/java/org/apache/cloudstack/vm/UnmanagedInstanceTO.java @@ -29,6 +29,8 @@ public enum PowerState { private String name; + private String internalCSName; + private PowerState powerState; private Integer cpuCores; @@ -55,6 +57,14 @@ public void setName(String name) { this.name = name; } + public String getInternalCSName() { + return internalCSName; + } + + public void setInternalCSName(String internalCSName) { + this.internalCSName = internalCSName; + } + public PowerState getPowerState() { return powerState; } diff --git a/core/src/main/java/com/cloud/agent/api/GetStoragePoolCapabilitiesAnswer.java b/core/src/main/java/com/cloud/agent/api/GetStoragePoolCapabilitiesAnswer.java new file mode 100644 index 000000000000..65db9b6a7bae --- /dev/null +++ b/core/src/main/java/com/cloud/agent/api/GetStoragePoolCapabilitiesAnswer.java @@ -0,0 +1,47 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.agent.api; + +import java.util.HashMap; +import java.util.Map; + +public class GetStoragePoolCapabilitiesAnswer extends Answer { + + private Map poolDetails; + + public GetStoragePoolCapabilitiesAnswer(GetStoragePoolCapabilitiesCommand cmd) { + super(cmd); + poolDetails = new HashMap<>(); + } + + public void setResult(boolean result){ + this.result = result; + } + + public void setDetails(String details){ + this.details = details; + } + + public Map getPoolDetails() { + return poolDetails; + } + + public void setPoolDetails(Map poolDetails) { + this.poolDetails = poolDetails; + } + +} diff --git a/core/src/main/java/com/cloud/agent/api/GetStoragePoolCapabilitiesCommand.java b/core/src/main/java/com/cloud/agent/api/GetStoragePoolCapabilitiesCommand.java new file mode 100644 index 000000000000..b7dd731df3eb --- /dev/null +++ b/core/src/main/java/com/cloud/agent/api/GetStoragePoolCapabilitiesCommand.java @@ -0,0 +1,37 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.agent.api; + +import com.cloud.agent.api.to.StorageFilerTO; + +public class GetStoragePoolCapabilitiesCommand extends Command { + + public StorageFilerTO getPool() { + return pool; + } + + public void setPool(StorageFilerTO pool) { + this.pool = pool; + } + + private StorageFilerTO pool; + + @Override + public boolean executeInSequence() { + return false; + } +} diff --git a/core/src/main/java/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java b/core/src/main/java/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java index 0bb5b7977703..9df2a6c955bc 100644 --- a/core/src/main/java/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java +++ b/core/src/main/java/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java @@ -52,6 +52,7 @@ public class PrimaryDataStoreTO implements DataStoreTO { private Map details; private static final String pathSeparator = "/"; private Boolean fullCloneFlag; + private Boolean diskProvisioningStrictnessFlag; private final boolean isManaged; public PrimaryDataStoreTO(PrimaryDataStore dataStore) { @@ -163,4 +164,12 @@ public void setFullCloneFlag(Boolean fullCloneFlag) { public boolean isManaged() { return isManaged; } + + public Boolean getDiskProvisioningStrictnessFlag() { + return diskProvisioningStrictnessFlag; + } + + public void setDiskProvisioningStrictnessFlag(Boolean diskProvisioningStrictnessFlag) { + this.diskProvisioningStrictnessFlag = diskProvisioningStrictnessFlag; + } } diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/StoragePoolAllocator.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/StoragePoolAllocator.java index c8fcf5f3df5c..fde71fe0d4c1 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/StoragePoolAllocator.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/StoragePoolAllocator.java @@ -60,5 +60,5 @@ public interface StoragePoolAllocator extends Adapter { static int RETURN_UPTO_ALL = -1; - List reorderPools(List pools, VirtualMachineProfile vmProfile, DeploymentPlan plan); + List reorderPools(List pools, VirtualMachineProfile vmProfile, DeploymentPlan plan, DiskProfile dskCh); } diff --git a/engine/components-api/src/main/java/com/cloud/alert/AlertManager.java b/engine/components-api/src/main/java/com/cloud/alert/AlertManager.java index be29a5dc57b0..6a90e74987f1 100644 --- a/engine/components-api/src/main/java/com/cloud/alert/AlertManager.java +++ b/engine/components-api/src/main/java/com/cloud/alert/AlertManager.java @@ -35,6 +35,12 @@ public interface AlertManager extends Manager, AlertService { "Alert", "0.75", "Percentage (as a value between 0 and 1) of allocated storage utilization above which alerts will be sent about low storage available.", true, ConfigKey.Scope.Cluster, null); + public static final ConfigKey AlertSmtpUseStartTLS = new ConfigKey("Advanced", Boolean.class, "alert.smtp.useStartTLS", "false", + "If set to true and if we enable security via alert.smtp.useAuth, this will enable StartTLS to secure the conection.", true); + + public static final ConfigKey AlertSmtpEnabledSecurityProtocols = new ConfigKey("Advanced", String.class, "alert.smtp.enabledSecurityProtocols", "", + "White-space separated security protocols; ex: \"TLSv1 TLSv1.1\". Supported protocols: SSLv2Hello, SSLv3, TLSv1, TLSv1.1 and TLSv1.2", true); + void clearAlert(AlertType alertType, long dataCenterId, long podId); void recalculateCapacity(); diff --git a/engine/components-api/src/main/java/com/cloud/capacity/CapacityManager.java b/engine/components-api/src/main/java/com/cloud/capacity/CapacityManager.java index e347c61cf018..44de85edeef5 100644 --- a/engine/components-api/src/main/java/com/cloud/capacity/CapacityManager.java +++ b/engine/components-api/src/main/java/com/cloud/capacity/CapacityManager.java @@ -89,6 +89,9 @@ public interface CapacityManager { ConfigKey.Scope.ImageStore, null); + static final ConfigKey SecondaryStorageCapacityThreshold = new ConfigKey("Advanced", Float.class, "secondary.storage.capacity.threshold", "0.90", + "Percentage (as a value between 0 and 1) of secondary storage capacity threshold.", true); + public boolean releaseVmCapacity(VirtualMachine vm, boolean moveFromReserved, boolean moveToReservered, Long hostId); void allocateVmCapacity(VirtualMachine vm, boolean fromLastHost); diff --git a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java index b9a45f0dcdc3..7976b3131672 100644 --- a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java +++ b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java @@ -145,6 +145,12 @@ public interface StorageManager extends StorageService { ConfigKey MaxDataMigrationWaitTime = new ConfigKey("Advanced", Integer.class, "max.data.migration.wait.time", "15", "Maximum wait time for a data migration task before spawning a new SSVM", false, ConfigKey.Scope.Global); + ConfigKey DiskProvisioningStrictness = new ConfigKey("Storage", Boolean.class, "disk.provisioning.type.strictness", "false", + "If set to true, the disk is created only when there is a suitable storage pool that supports the disk provisioning type specified by the service/disk offering. " + + "If set to false, the disk is created with a disk provisioning type supported by the pool. Default value is false, and this is currently supported for VMware only.", + true, ConfigKey.Scope.Zone); + ConfigKey PreferredStoragePool = new ConfigKey(String.class, "preferred.storage.pool", "Advanced", "", + "The UUID of preferred storage pool for allocation.", true, ConfigKey.Scope.Account, null); /** * Returns a comma separated list of tags for the specified storage pool diff --git a/engine/components-api/src/main/java/com/cloud/template/TemplateManager.java b/engine/components-api/src/main/java/com/cloud/template/TemplateManager.java index 7207a96a9efa..a3d9c1b79f1a 100644 --- a/engine/components-api/src/main/java/com/cloud/template/TemplateManager.java +++ b/engine/components-api/src/main/java/com/cloud/template/TemplateManager.java @@ -18,17 +18,17 @@ import java.util.List; -import com.cloud.agent.api.to.DatadiskTO; -import com.cloud.deploy.DeployDestination; -import com.cloud.storage.DataStoreRole; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import com.cloud.agent.api.to.DatadiskTO; import com.cloud.dc.DataCenterVO; +import com.cloud.deploy.DeployDestination; import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.StorageUnavailableException; +import com.cloud.storage.DataStoreRole; import com.cloud.storage.StoragePool; import com.cloud.storage.VMTemplateStoragePoolVO; import com.cloud.storage.VMTemplateVO; @@ -48,7 +48,8 @@ public interface TemplateManager { static final ConfigKey TemplatePreloaderPoolSize = new ConfigKey("Advanced", Integer.class, TemplatePreloaderPoolSizeCK, "8", "Size of the TemplateManager threadpool", false, ConfigKey.Scope.Global); - + static final String VMWARE_TOOLS_ISO = "vmware-tools.iso"; + static final String XS_TOOLS_ISO = "xs-tools.iso"; /** * Prepares a template for vm creation for a certain storage pool. diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java index ab53fd464880..2046adafb1d4 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java @@ -17,6 +17,7 @@ package org.apache.cloudstack.engine.orchestration; +import com.cloud.capacity.CapacityManager; import java.util.ArrayList; import java.util.Date; import java.util.HashMap; @@ -101,7 +102,6 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra true, ConfigKey.Scope.Global); Integer numConcurrentCopyTasksPerSSVM = 2; - private double imageStoreCapacityThreshold = 0.90; @Override public String getConfigComponentName() { @@ -404,7 +404,7 @@ private boolean shouldMigrate(DataObject chosenFile, Long srcDatastoreId, Long d private boolean storageCapacityBelowThreshold(Map> storageCapacities, Long destStoreId) { Pair imageStoreCapacity = storageCapacities.get(destStoreId); long usedCapacity = imageStoreCapacity.second() - imageStoreCapacity.first(); - if (imageStoreCapacity != null && (usedCapacity / (imageStoreCapacity.second() * 1.0)) <= imageStoreCapacityThreshold) { + if (imageStoreCapacity != null && (usedCapacity / (imageStoreCapacity.second() * 1.0)) <= CapacityManager.SecondaryStorageCapacityThreshold.value()) { s_logger.debug("image store: " + destStoreId + " has sufficient capacity to proceed with migration of file"); return true; } diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java index 1c4aead5941f..e1c6bf9753d8 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java @@ -26,6 +26,7 @@ import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.UUID; import java.util.concurrent.ExecutionException; @@ -300,6 +301,34 @@ public VolumeVO allocateDuplicateVolumeVO(Volume oldVol, Long templateId) { return _volsDao.persist(newVol); } + private Optional getMatchingStoragePool(String preferredPoolId, List storagePools) { + if (preferredPoolId == null) { + return Optional.empty(); + } + return storagePools.stream() + .filter(pool -> pool.getUuid().equalsIgnoreCase(preferredPoolId)) + .findFirst(); + } + + private Optional getPreferredStoragePool(List poolList, VirtualMachine vm) { + String accountStoragePoolUuid = null; + if (vm != null) { + accountStoragePoolUuid = StorageManager.PreferredStoragePool.valueIn(vm.getAccountId()); + } + Optional storagePool = getMatchingStoragePool(accountStoragePoolUuid, poolList); + + if (storagePool.isPresent()) { + s_logger.debug("A storage pool is specified for this account, so we will use this storage pool for allocation: " + + storagePool.get().getUuid()); + } else { + String globalStoragePoolUuid = StorageManager.PreferredStoragePool.value(); + storagePool = getMatchingStoragePool(globalStoragePoolUuid, poolList); + storagePool.ifPresent(pool -> s_logger.debug("A storage pool is specified in global setting, so we will use this storage pool for allocation: " + + pool.getUuid())); + } + return storagePool; + } + @Override public StoragePool findStoragePool(DiskProfile dskCh, DataCenter dc, Pod pod, Long clusterId, Long hostId, VirtualMachine vm, final Set avoid) { Long podId = null; @@ -321,9 +350,13 @@ public StoragePool findStoragePool(DiskProfile dskCh, DataCenter dc, Pod pod, Lo } DataCenterDeployment plan = new DataCenterDeployment(dc.getId(), podId, clusterId, hostId, null, null); - final List poolList = allocator.allocateToPool(dskCh, profile, plan, avoidList, 1); + final List poolList = allocator.allocateToPool(dskCh, profile, plan, avoidList, StoragePoolAllocator.RETURN_UPTO_ALL); if (poolList != null && !poolList.isEmpty()) { - return (StoragePool)dataStoreMgr.getDataStore(poolList.get(0).getId(), DataStoreRole.Primary); + // Check if the preferred storage pool can be used. If yes, use it. + Optional storagePool = getPreferredStoragePool(poolList, vm); + + return (storagePool.isPresent()) ? (StoragePool) this.dataStoreMgr.getDataStore(storagePool.get().getId(), DataStoreRole.Primary) : + (StoragePool)dataStoreMgr.getDataStore(poolList.get(0).getId(), DataStoreRole.Primary); } } return null; @@ -349,7 +382,7 @@ public StoragePool findChildDataStoreInDataStoreCluster(DataCenter dc, Pod pod, VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm); for (StoragePoolAllocator allocator : _storagePoolAllocators) { DataCenterDeployment plan = new DataCenterDeployment(dc.getId(), podId, clusterId, hostId, null, null); - final List poolList = allocator.reorderPools(suitablePools, profile, plan); + final List poolList = allocator.reorderPools(suitablePools, profile, plan, null); if (poolList != null && !poolList.isEmpty()) { return (StoragePool)dataStoreMgr.getDataStore(poolList.get(0).getId(), DataStoreRole.Primary); diff --git a/engine/schema/src/main/java/com/cloud/dc/DataCenterVO.java b/engine/schema/src/main/java/com/cloud/dc/DataCenterVO.java index 38121a72d028..d0f3192c60c2 100644 --- a/engine/schema/src/main/java/com/cloud/dc/DataCenterVO.java +++ b/engine/schema/src/main/java/com/cloud/dc/DataCenterVO.java @@ -476,5 +476,4 @@ public PartitionType partitionType() { public String toString() { return String.format("Zone {\"id\": \"%s\", \"name\": \"%s\", \"uuid\": \"%s\"}", id, name, uuid); } - } diff --git a/engine/schema/src/main/java/com/cloud/host/dao/HostDao.java b/engine/schema/src/main/java/com/cloud/host/dao/HostDao.java index af06fcc31f79..12207da5e69b 100644 --- a/engine/schema/src/main/java/com/cloud/host/dao/HostDao.java +++ b/engine/schema/src/main/java/com/cloud/host/dao/HostDao.java @@ -138,4 +138,6 @@ public interface HostDao extends GenericDao, StateDao listByClusterAndHypervisorType(long clusterId, HypervisorType hypervisorType); HostVO findByName(String name); + + List listHostsWithActiveVMs(long offeringId); } diff --git a/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java b/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java index e58df1defd01..b19f717f27ea 100644 --- a/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java @@ -79,6 +79,10 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao private static final Logger state_logger = Logger.getLogger(ResourceState.class); private static final String LIST_CLUSTERID_FOR_HOST_TAG = "select distinct cluster_id from host join host_tags on host.id = host_tags.host_id and host_tags.tag = ?"; + private static final String GET_HOSTS_OF_ACTIVE_VMS = "select h.id " + + "from vm_instance vm " + + "join host h on (vm.host_id=h.id) " + + "where vm.service_offering_id= ? and vm.state not in (\"Destroyed\", \"Expunging\", \"Error\") group by h.id"; protected SearchBuilder TypePodDcStatusSearch; @@ -1197,6 +1201,27 @@ public List listClustersByHostTag(String hostTagOnOffering) { } } + @Override + public List listHostsWithActiveVMs(long offeringId) { + TransactionLegacy txn = TransactionLegacy.currentTxn(); + PreparedStatement pstmt = null; + List result = new ArrayList<>(); + StringBuilder sql = new StringBuilder(GET_HOSTS_OF_ACTIVE_VMS); + try { + pstmt = txn.prepareAutoCloseStatement(sql.toString()); + pstmt.setLong(1, offeringId); + ResultSet rs = pstmt.executeQuery(); + while (rs.next()) { + result.add(toEntityBean(rs, false)); + } + return result; + } catch (SQLException e) { + throw new CloudRuntimeException("DB Exception on: " + sql, e); + } catch (Throwable e) { + throw new CloudRuntimeException("Caught: " + sql, e); + } + } + @Override public List listAllHostsByType(Host.Type type) { SearchCriteria sc = TypeSearch.create(); diff --git a/engine/schema/src/main/java/com/cloud/network/vpc/VpcVO.java b/engine/schema/src/main/java/com/cloud/network/vpc/VpcVO.java index 3061d305d05b..83034b3fdbe0 100644 --- a/engine/schema/src/main/java/com/cloud/network/vpc/VpcVO.java +++ b/engine/schema/src/main/java/com/cloud/network/vpc/VpcVO.java @@ -241,6 +241,11 @@ public void setRollingRestart(boolean rollingRestart) { this.rollingRestart = rollingRestart; } + @Override + public Date getCreated() { + return created; + } + @Override public Class getEntityType() { return Vpc.class; diff --git a/engine/schema/src/main/java/com/cloud/offerings/dao/NetworkOfferingServiceMapDaoImpl.java b/engine/schema/src/main/java/com/cloud/offerings/dao/NetworkOfferingServiceMapDaoImpl.java index 7868be2ad69a..67b341a93618 100644 --- a/engine/schema/src/main/java/com/cloud/offerings/dao/NetworkOfferingServiceMapDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/offerings/dao/NetworkOfferingServiceMapDaoImpl.java @@ -129,7 +129,6 @@ public void deleteByOfferingId(long networkOfferingId) { @Override public List listProvidersForServiceForNetworkOffering(long networkOfferingId, Service service) { SearchCriteria sc = ProvidersSearch.create(); - ; sc.setParameters("networkOfferingId", networkOfferingId); sc.setParameters("service", service.getName()); @@ -140,21 +139,16 @@ public List listProvidersForServiceForNetworkOffering(long networkOfferi @Override public boolean isProviderForNetworkOffering(long networkOfferingId, Provider provider) { SearchCriteria sc = AllFieldsSearch.create(); - ; sc.setParameters("networkOfferingId", networkOfferingId); sc.setParameters("provider", provider.getName()); - if (findOneBy(sc) != null) { - return true; - } - return false; + return findOneBy(sc) != null; } @Override public List listServicesForNetworkOffering(long networkOfferingId) { SearchCriteria sc = ServicesSearch.create(); - ; sc.setParameters("networkOfferingId", networkOfferingId); return customSearch(sc, null); } diff --git a/engine/schema/src/main/java/com/cloud/vm/ConsoleProxyVO.java b/engine/schema/src/main/java/com/cloud/vm/ConsoleProxyVO.java index fdce20c9fa89..729499284a99 100644 --- a/engine/schema/src/main/java/com/cloud/vm/ConsoleProxyVO.java +++ b/engine/schema/src/main/java/com/cloud/vm/ConsoleProxyVO.java @@ -149,4 +149,8 @@ public int getPort() { return port; } + @Override + public String toString() { + return String.format("Console %s", super.toString()); + } } diff --git a/engine/schema/src/main/java/com/cloud/vm/UserVmVO.java b/engine/schema/src/main/java/com/cloud/vm/UserVmVO.java index e3950340469a..f02380a0471d 100644 --- a/engine/schema/src/main/java/com/cloud/vm/UserVmVO.java +++ b/engine/schema/src/main/java/com/cloud/vm/UserVmVO.java @@ -38,7 +38,7 @@ public class UserVmVO extends VMInstanceVO implements UserVm { @Column(name = "iso_id", nullable = true, length = 17) private Long isoId = null; - @Column(name = "user_data", updatable = true, nullable = true, length = 32768) + @Column(name = "user_data", updatable = true, nullable = true, length = 1048576) @Basic(fetch = FetchType.LAZY) private String userData; diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java index f56fdb85e58f..80fddc9bd946 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java @@ -132,4 +132,5 @@ public interface PrimaryDataStoreDao extends GenericDao { List findPoolsByStorageType(String storageType); + List listStoragePoolsWithActiveVolumesByOfferingId(long offeringid); } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java index 6b07ef95ca3f..2ab95bb8cfc1 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java @@ -74,6 +74,11 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase protected final String TagsSqlPrefix = "SELECT storage_pool.* from storage_pool LEFT JOIN storage_pool_tags ON storage_pool.id = storage_pool_tags.pool_id WHERE storage_pool.removed is null and storage_pool.status = 'Up' and storage_pool.data_center_id = ? and (storage_pool.pod_id = ? or storage_pool.pod_id is null) and storage_pool.scope = ? and ("; protected final String TagsSqlSuffix = ") GROUP BY storage_pool_tags.pool_id HAVING COUNT(storage_pool_tags.tag) >= ?"; + private static final String GET_STORAGE_POOLS_OF_VOLUMES_WITHOUT_OR_NOT_HAVING_TAGS = "select s.id " + + "from volumes vol " + + "join storage_pool s on vol.pool_id=s.id " + + "where vol.disk_offering_id= ? and vol.state not in (\"Destroy\", \"Error\", \"Expunging\") group by s.id"; + /** * Used in method findPoolsByDetailsOrTagsInternal */ @@ -589,4 +594,25 @@ public List findPoolsByStorageType(String storageType) { sc.setParameters("poolType", storageType); return listBy(sc); } + + @Override + public List listStoragePoolsWithActiveVolumesByOfferingId(long offeringId) { + TransactionLegacy txn = TransactionLegacy.currentTxn(); + PreparedStatement pstmt = null; + List result = new ArrayList<>(); + StringBuilder sql = new StringBuilder(GET_STORAGE_POOLS_OF_VOLUMES_WITHOUT_OR_NOT_HAVING_TAGS); + try { + pstmt = txn.prepareAutoCloseStatement(sql.toString()); + pstmt.setLong(1, offeringId); + ResultSet rs = pstmt.executeQuery(); + while (rs.next()) { + result.add(toEntityBean(rs, false)); + } + return result; + } catch (SQLException e) { + throw new CloudRuntimeException("DB Exception on: " + sql, e); + } catch (Throwable e) { + throw new CloudRuntimeException("Caught: " + sql, e); + } + } } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/StoragePoolDetailVO.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/StoragePoolDetailVO.java index 8a746ff9d1d1..8c1428bbd157 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/StoragePoolDetailVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/StoragePoolDetailVO.java @@ -70,6 +70,10 @@ public String getName() { return name; } + public void setValue(String value) { + this.value = value; + } + @Override public String getValue() { return value; diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/VolumeDataStoreDao.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/VolumeDataStoreDao.java index 0e4e380c7538..b3b2ece90437 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/VolumeDataStoreDao.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/VolumeDataStoreDao.java @@ -55,4 +55,6 @@ public interface VolumeDataStoreDao extends GenericDao, boolean updateVolumeId(long srcVolId, long destVolId); List listVolumeDownloadUrlsByZoneId(long zoneId); + + List listByVolume(long volumeId, long storeId); } diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41510to41600.sql b/engine/schema/src/main/resources/META-INF/db/schema-41510to41600.sql index ad13853fc1f4..cc43cc61a030 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-41510to41600.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-41510to41600.sql @@ -392,6 +392,191 @@ from `cloud`.`resource_count` secondary_storage_count ON domain.id = secondary_storage_count.domain_id and secondary_storage_count.type = 'secondary_storage'; + +DROP VIEW IF EXISTS `cloud`.`user_vm_view`; +CREATE + VIEW `user_vm_view` AS +SELECT + `vm_instance`.`id` AS `id`, + `vm_instance`.`name` AS `name`, + `user_vm`.`display_name` AS `display_name`, + `user_vm`.`user_data` AS `user_data`, + `account`.`id` AS `account_id`, + `account`.`uuid` AS `account_uuid`, + `account`.`account_name` AS `account_name`, + `account`.`type` AS `account_type`, + `domain`.`id` AS `domain_id`, + `domain`.`uuid` AS `domain_uuid`, + `domain`.`name` AS `domain_name`, + `domain`.`path` AS `domain_path`, + `projects`.`id` AS `project_id`, + `projects`.`uuid` AS `project_uuid`, + `projects`.`name` AS `project_name`, + `instance_group`.`id` AS `instance_group_id`, + `instance_group`.`uuid` AS `instance_group_uuid`, + `instance_group`.`name` AS `instance_group_name`, + `vm_instance`.`uuid` AS `uuid`, + `vm_instance`.`user_id` AS `user_id`, + `vm_instance`.`last_host_id` AS `last_host_id`, + `vm_instance`.`vm_type` AS `type`, + `vm_instance`.`limit_cpu_use` AS `limit_cpu_use`, + `vm_instance`.`created` AS `created`, + `vm_instance`.`state` AS `state`, + `vm_instance`.`update_time` AS `update_time`, + `vm_instance`.`removed` AS `removed`, + `vm_instance`.`ha_enabled` AS `ha_enabled`, + `vm_instance`.`hypervisor_type` AS `hypervisor_type`, + `vm_instance`.`instance_name` AS `instance_name`, + `vm_instance`.`guest_os_id` AS `guest_os_id`, + `vm_instance`.`display_vm` AS `display_vm`, + `guest_os`.`uuid` AS `guest_os_uuid`, + `vm_instance`.`pod_id` AS `pod_id`, + `host_pod_ref`.`uuid` AS `pod_uuid`, + `vm_instance`.`private_ip_address` AS `private_ip_address`, + `vm_instance`.`private_mac_address` AS `private_mac_address`, + `vm_instance`.`vm_type` AS `vm_type`, + `data_center`.`id` AS `data_center_id`, + `data_center`.`uuid` AS `data_center_uuid`, + `data_center`.`name` AS `data_center_name`, + `data_center`.`is_security_group_enabled` AS `security_group_enabled`, + `data_center`.`networktype` AS `data_center_type`, + `host`.`id` AS `host_id`, + `host`.`uuid` AS `host_uuid`, + `host`.`name` AS `host_name`, + `vm_template`.`id` AS `template_id`, + `vm_template`.`uuid` AS `template_uuid`, + `vm_template`.`name` AS `template_name`, + `vm_template`.`display_text` AS `template_display_text`, + `vm_template`.`enable_password` AS `password_enabled`, + `iso`.`id` AS `iso_id`, + `iso`.`uuid` AS `iso_uuid`, + `iso`.`name` AS `iso_name`, + `iso`.`display_text` AS `iso_display_text`, + `service_offering`.`id` AS `service_offering_id`, + `svc_disk_offering`.`uuid` AS `service_offering_uuid`, + `disk_offering`.`uuid` AS `disk_offering_uuid`, + `disk_offering`.`id` AS `disk_offering_id`, + (CASE + WHEN ISNULL(`service_offering`.`cpu`) THEN `custom_cpu`.`value` + ELSE `service_offering`.`cpu` + END) AS `cpu`, + (CASE + WHEN ISNULL(`service_offering`.`speed`) THEN `custom_speed`.`value` + ELSE `service_offering`.`speed` + END) AS `speed`, + (CASE + WHEN ISNULL(`service_offering`.`ram_size`) THEN `custom_ram_size`.`value` + ELSE `service_offering`.`ram_size` + END) AS `ram_size`, + `backup_offering`.`uuid` AS `backup_offering_uuid`, + `backup_offering`.`id` AS `backup_offering_id`, + `svc_disk_offering`.`name` AS `service_offering_name`, + `disk_offering`.`name` AS `disk_offering_name`, + `backup_offering`.`name` AS `backup_offering_name`, + `storage_pool`.`id` AS `pool_id`, + `storage_pool`.`uuid` AS `pool_uuid`, + `storage_pool`.`pool_type` AS `pool_type`, + `volumes`.`id` AS `volume_id`, + `volumes`.`uuid` AS `volume_uuid`, + `volumes`.`device_id` AS `volume_device_id`, + `volumes`.`volume_type` AS `volume_type`, + `security_group`.`id` AS `security_group_id`, + `security_group`.`uuid` AS `security_group_uuid`, + `security_group`.`name` AS `security_group_name`, + `security_group`.`description` AS `security_group_description`, + `nics`.`id` AS `nic_id`, + `nics`.`uuid` AS `nic_uuid`, + `nics`.`device_id` AS `nic_device_id`, + `nics`.`network_id` AS `network_id`, + `nics`.`ip4_address` AS `ip_address`, + `nics`.`ip6_address` AS `ip6_address`, + `nics`.`ip6_gateway` AS `ip6_gateway`, + `nics`.`ip6_cidr` AS `ip6_cidr`, + `nics`.`default_nic` AS `is_default_nic`, + `nics`.`gateway` AS `gateway`, + `nics`.`netmask` AS `netmask`, + `nics`.`mac_address` AS `mac_address`, + `nics`.`broadcast_uri` AS `broadcast_uri`, + `nics`.`isolation_uri` AS `isolation_uri`, + `vpc`.`id` AS `vpc_id`, + `vpc`.`uuid` AS `vpc_uuid`, + `networks`.`uuid` AS `network_uuid`, + `networks`.`name` AS `network_name`, + `networks`.`traffic_type` AS `traffic_type`, + `networks`.`guest_type` AS `guest_type`, + `user_ip_address`.`id` AS `public_ip_id`, + `user_ip_address`.`uuid` AS `public_ip_uuid`, + `user_ip_address`.`public_ip_address` AS `public_ip_address`, + `ssh_keypairs`.`keypair_name` AS `keypair_name`, + `resource_tags`.`id` AS `tag_id`, + `resource_tags`.`uuid` AS `tag_uuid`, + `resource_tags`.`key` AS `tag_key`, + `resource_tags`.`value` AS `tag_value`, + `resource_tags`.`domain_id` AS `tag_domain_id`, + `domain`.`uuid` AS `tag_domain_uuid`, + `domain`.`name` AS `tag_domain_name`, + `resource_tags`.`account_id` AS `tag_account_id`, + `account`.`account_name` AS `tag_account_name`, + `resource_tags`.`resource_id` AS `tag_resource_id`, + `resource_tags`.`resource_uuid` AS `tag_resource_uuid`, + `resource_tags`.`resource_type` AS `tag_resource_type`, + `resource_tags`.`customer` AS `tag_customer`, + `async_job`.`id` AS `job_id`, + `async_job`.`uuid` AS `job_uuid`, + `async_job`.`job_status` AS `job_status`, + `async_job`.`account_id` AS `job_account_id`, + `affinity_group`.`id` AS `affinity_group_id`, + `affinity_group`.`uuid` AS `affinity_group_uuid`, + `affinity_group`.`name` AS `affinity_group_name`, + `affinity_group`.`description` AS `affinity_group_description`, + `vm_instance`.`dynamically_scalable` AS `dynamically_scalable` +FROM + (((((((((((((((((((((((((((((((((`user_vm` + JOIN `vm_instance` ON (((`vm_instance`.`id` = `user_vm`.`id`) + AND ISNULL(`vm_instance`.`removed`)))) + JOIN `account` ON ((`vm_instance`.`account_id` = `account`.`id`))) + JOIN `domain` ON ((`vm_instance`.`domain_id` = `domain`.`id`))) + LEFT JOIN `guest_os` ON ((`vm_instance`.`guest_os_id` = `guest_os`.`id`))) + LEFT JOIN `host_pod_ref` ON ((`vm_instance`.`pod_id` = `host_pod_ref`.`id`))) + LEFT JOIN `projects` ON ((`projects`.`project_account_id` = `account`.`id`))) + LEFT JOIN `instance_group_vm_map` ON ((`vm_instance`.`id` = `instance_group_vm_map`.`instance_id`))) + LEFT JOIN `instance_group` ON ((`instance_group_vm_map`.`group_id` = `instance_group`.`id`))) + LEFT JOIN `data_center` ON ((`vm_instance`.`data_center_id` = `data_center`.`id`))) + LEFT JOIN `host` ON ((`vm_instance`.`host_id` = `host`.`id`))) + LEFT JOIN `vm_template` ON ((`vm_instance`.`vm_template_id` = `vm_template`.`id`))) + LEFT JOIN `vm_template` `iso` ON ((`iso`.`id` = `user_vm`.`iso_id`))) + LEFT JOIN `service_offering` ON ((`vm_instance`.`service_offering_id` = `service_offering`.`id`))) + LEFT JOIN `disk_offering` `svc_disk_offering` ON ((`vm_instance`.`service_offering_id` = `svc_disk_offering`.`id`))) + LEFT JOIN `disk_offering` ON ((`vm_instance`.`disk_offering_id` = `disk_offering`.`id`))) + LEFT JOIN `backup_offering` ON ((`vm_instance`.`backup_offering_id` = `backup_offering`.`id`))) + LEFT JOIN `volumes` ON ((`vm_instance`.`id` = `volumes`.`instance_id`))) + LEFT JOIN `storage_pool` ON ((`volumes`.`pool_id` = `storage_pool`.`id`))) + LEFT JOIN `security_group_vm_map` ON ((`vm_instance`.`id` = `security_group_vm_map`.`instance_id`))) + LEFT JOIN `security_group` ON ((`security_group_vm_map`.`security_group_id` = `security_group`.`id`))) + LEFT JOIN `nics` ON (((`vm_instance`.`id` = `nics`.`instance_id`) + AND ISNULL(`nics`.`removed`)))) + LEFT JOIN `networks` ON ((`nics`.`network_id` = `networks`.`id`))) + LEFT JOIN `vpc` ON (((`networks`.`vpc_id` = `vpc`.`id`) + AND ISNULL(`vpc`.`removed`)))) + LEFT JOIN `user_ip_address` ON ((`user_ip_address`.`vm_id` = `vm_instance`.`id`))) + LEFT JOIN `user_vm_details` `ssh_details` ON (((`ssh_details`.`vm_id` = `vm_instance`.`id`) + AND (`ssh_details`.`name` = 'SSH.PublicKey')))) + LEFT JOIN `ssh_keypairs` ON (((`ssh_keypairs`.`public_key` = `ssh_details`.`value`) + AND (`ssh_keypairs`.`account_id` = `account`.`id`)))) + LEFT JOIN `resource_tags` ON (((`resource_tags`.`resource_id` = `vm_instance`.`id`) + AND (`resource_tags`.`resource_type` = 'UserVm')))) + LEFT JOIN `async_job` ON (((`async_job`.`instance_id` = `vm_instance`.`id`) + AND (`async_job`.`instance_type` = 'VirtualMachine') + AND (`async_job`.`job_status` = 0)))) + LEFT JOIN `affinity_group_vm_map` ON ((`vm_instance`.`id` = `affinity_group_vm_map`.`instance_id`))) + LEFT JOIN `affinity_group` ON ((`affinity_group_vm_map`.`affinity_group_id` = `affinity_group`.`id`))) + LEFT JOIN `user_vm_details` `custom_cpu` ON (((`custom_cpu`.`vm_id` = `vm_instance`.`id`) + AND (`custom_cpu`.`name` = 'CpuNumber')))) + LEFT JOIN `user_vm_details` `custom_speed` ON (((`custom_speed`.`vm_id` = `vm_instance`.`id`) + AND (`custom_speed`.`name` = 'CpuSpeed')))) + LEFT JOIN `user_vm_details` `custom_ram_size` ON (((`custom_ram_size`.`vm_id` = `vm_instance`.`id`) + AND (`custom_ram_size`.`name` = 'memory')))); + -- Update name for global configuration user.vm.readonly.ui.details Update configuration set name='user.vm.readonly.details' where name='user.vm.readonly.ui.details'; @@ -511,3 +696,64 @@ CREATE VIEW `cloud`.`host_view` AS `cloud`.`user` ON `user`.`uuid` = `last_annotation_view`.`user_uuid` GROUP BY `host`.`id`; + +-- PR#4699 Drop the procedure `ADD_GUEST_OS_AND_HYPERVISOR_MAPPING` if it already exist. +DROP PROCEDURE IF EXISTS `cloud`.`ADD_GUEST_OS_AND_HYPERVISOR_MAPPING`; + +-- PR#4699 Create the procedure `ADD_GUEST_OS_AND_HYPERVISOR_MAPPING` to add guest_os and guest_os_hypervisor mapping. +CREATE PROCEDURE `cloud`.`ADD_GUEST_OS_AND_HYPERVISOR_MAPPING` ( + IN guest_os_category_id bigint(20) unsigned, + IN guest_os_display_name VARCHAR(255), + IN guest_os_hypervisor_hypervisor_type VARCHAR(32), + IN guest_os_hypervisor_hypervisor_version VARCHAR(32), + IN guest_os_hypervisor_guest_os_name VARCHAR(255) +) +BEGIN + INSERT INTO cloud.guest_os (uuid, category_id, display_name, created) + SELECT UUID(), guest_os_category_id, guest_os_display_name, now() + FROM DUAL + WHERE not exists( SELECT 1 + FROM cloud.guest_os + WHERE cloud.guest_os.category_id = guest_os_category_id + AND cloud.guest_os.display_name = guest_os_display_name) + +; INSERT INTO cloud.guest_os_hypervisor (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created) + SELECT UUID(), guest_os_hypervisor_hypervisor_type, guest_os_hypervisor_hypervisor_version, guest_os_hypervisor_guest_os_name, guest_os.id, now() + FROM cloud.guest_os + WHERE guest_os.category_id = guest_os_category_id + AND guest_os.display_name = guest_os_display_name + AND NOT EXISTS (SELECT 1 + FROM cloud.guest_os_hypervisor as hypervisor + WHERE hypervisor_type = guest_os_hypervisor_hypervisor_type + AND hypervisor_version = guest_os_hypervisor_hypervisor_version + AND hypervisor.guest_os_id = guest_os.id + AND hypervisor.guest_os_name = guest_os_hypervisor_guest_os_name) +;END; + +-- PR#4699 Call procedure `ADD_GUEST_OS_AND_HYPERVISOR_MAPPING` to add new data to guest_os and guest_os_hypervisor. +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (10, 'Ubuntu 20.04 LTS', 'KVM', 'default', 'Ubuntu 20.04 LTS'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (10, 'Ubuntu 21.04', 'KVM', 'default', 'Ubuntu 21.04'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (9, 'pfSense 2.4', 'KVM', 'default', 'pfSense 2.4'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (9, 'OpenBSD 6.7', 'KVM', 'default', 'OpenBSD 6.7'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (9, 'OpenBSD 6.8', 'KVM', 'default', 'OpenBSD 6.8'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'AlmaLinux 8.3', 'KVM', 'default', 'AlmaLinux 8.3'); + +-- Alter value column of *_details table to prevent NULL values +UPDATE cloud.account_details SET value='' WHERE value IS NULL; +ALTER TABLE cloud.account_details MODIFY value varchar(255) NOT NULL; +UPDATE cloud.cluster_details SET value='' WHERE value IS NULL; +ALTER TABLE cloud.cluster_details MODIFY value varchar(255) NOT NULL; +UPDATE cloud.data_center_details SET value='' WHERE value IS NULL; +ALTER TABLE cloud.data_center_details MODIFY value varchar(1024) NOT NULL; +UPDATE cloud.domain_details SET value='' WHERE value IS NULL; +ALTER TABLE cloud.domain_details MODIFY value varchar(255) NOT NULL; +UPDATE cloud.image_store_details SET value='' WHERE value IS NULL; +ALTER TABLE cloud.image_store_details MODIFY value varchar(255) NOT NULL; +UPDATE cloud.storage_pool_details SET value='' WHERE value IS NULL; +ALTER TABLE cloud.storage_pool_details MODIFY value varchar(255) NOT NULL; +UPDATE cloud.template_deploy_as_is_details SET value='' WHERE value IS NULL; +ALTER TABLE cloud.template_deploy_as_is_details MODIFY value text NOT NULL; +UPDATE cloud.user_vm_deploy_as_is_details SET value='' WHERE value IS NULL; +ALTER TABLE cloud.user_vm_deploy_as_is_details MODIFY value text NOT NULL; +UPDATE cloud.user_vm_details SET value='' WHERE value IS NULL; +ALTER TABLE cloud.user_vm_details MODIFY value varchar(5120) NOT NULL; diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java index c49ffba0b82b..51e0c97a21e5 100644 --- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java +++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java @@ -19,6 +19,7 @@ package org.apache.cloudstack.storage.motion; import java.util.HashMap; +import java.util.List; import java.util.Map; import javax.inject.Inject; @@ -44,6 +45,8 @@ import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.storage.RemoteHostEndPoint; import org.apache.cloudstack.storage.command.CopyCommand; +import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO; import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity; import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; import org.apache.log4j.Logger; @@ -85,6 +88,11 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { DataStoreManager dataStoreMgr; @Inject StorageCacheManager cacheMgr; + @Inject + VolumeDataStoreDao volumeDataStoreDao; + + @Inject + StorageManager storageManager; @Override public StrategyPriority canHandle(DataObject srcData, DataObject destData) { @@ -156,7 +164,7 @@ protected Answer copyObject(DataObject srcData, DataObject destData, Host destHo srcForCopy = cacheData = cacheMgr.createCacheObject(srcData, destScope); } - CopyCommand cmd = new CopyCommand(srcForCopy.getTO(), addFullCloneFlagOnVMwareDest(destData.getTO()), primaryStorageDownloadWait, + CopyCommand cmd = new CopyCommand(srcForCopy.getTO(), addFullCloneAndDiskprovisiongStrictnessFlagOnVMwareDest(destData.getTO()), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value()); EndPoint ep = destHost != null ? RemoteHostEndPoint.getHypervisorHostEndPoint(destHost) : selector.select(srcForCopy, destData); if (ep == null) { @@ -210,18 +218,20 @@ protected Answer copyObject(DataObject srcData, DataObject destData, Host destHo * @param dataTO Dest data store TO * @return dataTO including fullCloneFlag, if provided */ - protected DataTO addFullCloneFlagOnVMwareDest(DataTO dataTO) { + protected DataTO addFullCloneAndDiskprovisiongStrictnessFlagOnVMwareDest(DataTO dataTO) { if (dataTO != null && dataTO.getHypervisorType().equals(Hypervisor.HypervisorType.VMware)){ DataStoreTO dataStoreTO = dataTO.getDataStore(); if (dataStoreTO != null && dataStoreTO instanceof PrimaryDataStoreTO){ PrimaryDataStoreTO primaryDataStoreTO = (PrimaryDataStoreTO) dataStoreTO; - Boolean value = CapacityManager.VmwareCreateCloneFull.valueIn(primaryDataStoreTO.getId()); - primaryDataStoreTO.setFullCloneFlag(value); + primaryDataStoreTO.setFullCloneFlag(CapacityManager.VmwareCreateCloneFull.valueIn(primaryDataStoreTO.getId())); + StoragePool pool = storageManager.getStoragePool(primaryDataStoreTO.getId()); + primaryDataStoreTO.setDiskProvisioningStrictnessFlag(storageManager.DiskProvisioningStrictness.valueIn(pool.getDataCenterId())); } } return dataTO; } + protected Answer copyObject(DataObject srcData, DataObject destData) { return copyObject(srcData, destData, null); } @@ -278,7 +288,7 @@ protected Answer copyVolumeFromSnapshot(DataObject snapObj, DataObject volObj) { ep = selector.select(srcData, volObj); } - CopyCommand cmd = new CopyCommand(srcData.getTO(), addFullCloneFlagOnVMwareDest(volObj.getTO()), _createVolumeFromSnapshotWait, VirtualMachineManager.ExecuteInSequence.value()); + CopyCommand cmd = new CopyCommand(srcData.getTO(), addFullCloneAndDiskprovisiongStrictnessFlagOnVMwareDest(volObj.getTO()), _createVolumeFromSnapshotWait, VirtualMachineManager.ExecuteInSequence.value()); Answer answer = null; if (ep == null) { String errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; @@ -301,7 +311,7 @@ protected Answer copyVolumeFromSnapshot(DataObject snapObj, DataObject volObj) { } protected Answer cloneVolume(DataObject template, DataObject volume) { - CopyCommand cmd = new CopyCommand(template.getTO(), addFullCloneFlagOnVMwareDest(volume.getTO()), 0, VirtualMachineManager.ExecuteInSequence.value()); + CopyCommand cmd = new CopyCommand(template.getTO(), addFullCloneAndDiskprovisiongStrictnessFlagOnVMwareDest(volume.getTO()), 0, VirtualMachineManager.ExecuteInSequence.value()); try { EndPoint ep = selector.select(volume.getDataStore()); Answer answer = null; @@ -319,6 +329,15 @@ protected Answer cloneVolume(DataObject template, DataObject volume) { } } + private void deleteVolumeOnSecondaryStore(DataObject objectInStore) { + ImageStoreEntity store = (ImageStoreEntity) objectInStore.getDataStore(); + store.delete(objectInStore); + List volumesOnStore = volumeDataStoreDao.listByVolume(objectInStore.getId(), store.getId()); + for (VolumeDataStoreVO volume : volumesOnStore) { + volumeDataStoreDao.remove(volume.getId()); + } + } + protected Answer copyVolumeBetweenPools(DataObject srcData, DataObject destData) { String value = configDao.getValue(Config.CopyVolumeWait.key()); int _copyvolumewait = NumbersUtil.parseInt(value, Integer.parseInt(Config.CopyVolumeWait.getDefaultValue())); @@ -354,10 +373,9 @@ protected Answer copyVolumeBetweenPools(DataObject srcData, DataObject destData) } DataObject objOnImageStore = imageStore.create(srcData); - objOnImageStore.processEvent(Event.CreateOnlyRequested); - Answer answer = null; try { + objOnImageStore.processEvent(Event.CreateOnlyRequested); answer = copyObject(srcData, objOnImageStore); if (answer == null || !answer.getResult()) { @@ -373,7 +391,7 @@ protected Answer copyVolumeBetweenPools(DataObject srcData, DataObject destData) objOnImageStore.processEvent(Event.CopyingRequested); - CopyCommand cmd = new CopyCommand(objOnImageStore.getTO(), addFullCloneFlagOnVMwareDest(destData.getTO()), _copyvolumewait, VirtualMachineManager.ExecuteInSequence.value()); + CopyCommand cmd = new CopyCommand(objOnImageStore.getTO(), addFullCloneAndDiskprovisiongStrictnessFlagOnVMwareDest(destData.getTO()), _copyvolumewait, VirtualMachineManager.ExecuteInSequence.value()); EndPoint ep = selector.select(objOnImageStore, destData); if (ep == null) { String errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; @@ -396,11 +414,12 @@ protected Answer copyVolumeBetweenPools(DataObject srcData, DataObject destData) objOnImageStore.processEvent(Event.OperationFailed); imageStore.delete(objOnImageStore); } + s_logger.error("Failed to perform operation: "+ e.getLocalizedMessage()); throw e; } objOnImageStore.processEvent(Event.OperationSuccessed); - imageStore.delete(objOnImageStore); + deleteVolumeOnSecondaryStore(objOnImageStore); return answer; } else { DataObject cacheData = cacheMgr.createCacheObject(srcData, destScope); @@ -526,7 +545,7 @@ protected Answer createTemplateFromSnapshot(DataObject srcData, DataObject destD ep = selector.select(srcData, destData); } - CopyCommand cmd = new CopyCommand(srcData.getTO(), addFullCloneFlagOnVMwareDest(destData.getTO()), _createprivatetemplatefromsnapshotwait, VirtualMachineManager.ExecuteInSequence.value()); + CopyCommand cmd = new CopyCommand(srcData.getTO(), addFullCloneAndDiskprovisiongStrictnessFlagOnVMwareDest(destData.getTO()), _createprivatetemplatefromsnapshotwait, VirtualMachineManager.ExecuteInSequence.value()); Answer answer = null; if (ep == null) { String errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; @@ -562,7 +581,7 @@ protected Answer copySnapshot(DataObject srcData, DataObject destData) { Scope selectedScope = pickCacheScopeForCopy(srcData, destData); cacheData = cacheMgr.getCacheObject(srcData, selectedScope); - CopyCommand cmd = new CopyCommand(srcData.getTO(), addFullCloneFlagOnVMwareDest(destData.getTO()), _backupsnapshotwait, VirtualMachineManager.ExecuteInSequence.value()); + CopyCommand cmd = new CopyCommand(srcData.getTO(), addFullCloneAndDiskprovisiongStrictnessFlagOnVMwareDest(destData.getTO()), _backupsnapshotwait, VirtualMachineManager.ExecuteInSequence.value()); cmd.setCacheTO(cacheData.getTO()); cmd.setOptions(options); EndPoint ep = selector.select(srcData, destData); @@ -574,7 +593,7 @@ protected Answer copySnapshot(DataObject srcData, DataObject destData) { answer = ep.sendMessage(cmd); } } else { - addFullCloneFlagOnVMwareDest(destData.getTO()); + addFullCloneAndDiskprovisiongStrictnessFlagOnVMwareDest(destData.getTO()); CopyCommand cmd = new CopyCommand(srcData.getTO(), destData.getTO(), _backupsnapshotwait, VirtualMachineManager.ExecuteInSequence.value()); cmd.setOptions(options); EndPoint ep = selector.select(srcData, destData, StorageAction.BACKUPSNAPSHOT); diff --git a/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/AncientDataMotionStrategyTest.java b/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/AncientDataMotionStrategyTest.java index dccb6b445e56..cd46fc511f1f 100755 --- a/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/AncientDataMotionStrategyTest.java +++ b/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/AncientDataMotionStrategyTest.java @@ -27,6 +27,8 @@ import static org.mockito.Mockito.never; import static org.mockito.Mockito.any; +import com.cloud.storage.StorageManager; +import com.cloud.storage.StoragePool; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; import org.junit.Before; @@ -57,6 +59,10 @@ public class AncientDataMotionStrategyTest { PrimaryDataStoreTO dataStoreTO; @Mock ConfigKey vmwareKey; + @Mock + StorageManager storageManager; + @Mock + StoragePool storagePool; private static final long POOL_ID = 1l; private static final Boolean FULL_CLONE_FLAG = true; @@ -72,6 +78,7 @@ public void setup() throws Exception { when(dataTO.getHypervisorType()).thenReturn(HypervisorType.VMware); when(dataTO.getDataStore()).thenReturn(dataStoreTO); when(dataStoreTO.getId()).thenReturn(POOL_ID); + when(storageManager.getStoragePool(POOL_ID)).thenReturn(storagePool); } private void replaceVmwareCreateCloneFullField() throws Exception { @@ -86,7 +93,7 @@ private void replaceVmwareCreateCloneFullField() throws Exception { @Test public void testAddFullCloneFlagOnVMwareDest(){ - strategy.addFullCloneFlagOnVMwareDest(dataTO); + strategy.addFullCloneAndDiskprovisiongStrictnessFlagOnVMwareDest(dataTO); verify(dataStoreTO).setFullCloneFlag(FULL_CLONE_FLAG); } diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java index b8788fbef988..a763a8bf0de1 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java @@ -218,8 +218,11 @@ public SnapshotResult takeSnapshot(SnapshotInfo snap) { try { result = future.get(); - UsageEventUtils.publishUsageEvent(EventTypes.EVENT_SNAPSHOT_ON_PRIMARY, snap.getAccountId(), snap.getDataCenterId(), snap.getId(), - snap.getName(), null, null, snapshotOnPrimary.getSize(), snapshotOnPrimary.getSize(), snap.getClass().getName(), snap.getUuid()); + SnapshotVO snapVO = _snapshotDao.findById(snap.getId()); + if (snapVO == null || snapVO.getsnapshotType() != Snapshot.Type.INTERNAL.ordinal()) { + UsageEventUtils.publishUsageEvent(EventTypes.EVENT_SNAPSHOT_ON_PRIMARY, snap.getAccountId(), snap.getDataCenterId(), snap.getId(), + snap.getName(), null, null, snapshotOnPrimary.getSize(), snapshotOnPrimary.getSize(), snap.getClass().getName(), snap.getUuid()); + } return result; } catch (InterruptedException e) { s_logger.debug("Failed to create snapshot", e); diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java index 10d39ee88fa9..af206a7378ec 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java @@ -26,12 +26,17 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.exception.StorageUnavailableException; +import com.cloud.storage.StoragePoolStatus; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.log4j.Logger; + import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import org.apache.log4j.Logger; import com.cloud.capacity.Capacity; import com.cloud.capacity.dao.CapacityDao; @@ -39,12 +44,10 @@ import com.cloud.dc.dao.ClusterDao; import com.cloud.deploy.DeploymentPlan; import com.cloud.deploy.DeploymentPlanner.ExcludeList; -import com.cloud.exception.StorageUnavailableException; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.storage.Storage; import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; -import com.cloud.storage.StoragePoolStatus; import com.cloud.storage.StorageUtil; import com.cloud.storage.Volume; import com.cloud.storage.dao.VolumeDao; @@ -68,6 +71,7 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement @Inject private ClusterDao clusterDao; @Inject private StorageManager storageMgr; @Inject private StorageUtil storageUtil; + @Inject private StoragePoolDetailsDao storagePoolDetailsDao; @Override public boolean configure(String name, Map params) throws ConfigurationException { @@ -96,7 +100,7 @@ public List allocateToPool(DiskProfile dskCh, VirtualMachineProfile @Override public List allocateToPool(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo, boolean bypassStorageTypeCheck) { List pools = select(dskCh, vmProfile, plan, avoid, returnUpTo, bypassStorageTypeCheck); - return reorderPools(pools, vmProfile, plan); + return reorderPools(pools, vmProfile, plan, dskCh); } protected List reorderPoolsByCapacity(DeploymentPlan plan, @@ -163,7 +167,7 @@ protected List reorderPoolsByNumberOfVolumes(DeploymentPlan plan, L } @Override - public List reorderPools(List pools, VirtualMachineProfile vmProfile, DeploymentPlan plan) { + public List reorderPools(List pools, VirtualMachineProfile vmProfile, DeploymentPlan plan, DiskProfile dskCh) { if (pools == null) { return null; } @@ -180,9 +184,41 @@ public List reorderPools(List pools, VirtualMachinePro } else if(allocationAlgorithm.equals("firstfitleastconsumed")){ pools = reorderPoolsByCapacity(plan, pools); } + + if (vmProfile.getVirtualMachine() == null) { + s_logger.trace("The VM is null, skipping pools reordering by disk provisioning type."); + return pools; + } + + if (vmProfile.getHypervisorType() == HypervisorType.VMware && + !storageMgr.DiskProvisioningStrictness.valueIn(plan.getDataCenterId())) { + pools = reorderPoolsByDiskProvisioningType(pools, dskCh); + } + return pools; } + private List reorderPoolsByDiskProvisioningType(List pools, DiskProfile diskProfile) { + if (diskProfile != null && diskProfile.getProvisioningType() != null && !diskProfile.getProvisioningType().equals(Storage.ProvisioningType.THIN)) { + List reorderedPools = new ArrayList<>(); + int preferredIndex = 0; + for (StoragePool pool : pools) { + StoragePoolDetailVO hardwareAcceleration = storagePoolDetailsDao.findDetail(pool.getId(), Storage.Capability.HARDWARE_ACCELERATION.toString()); + if (pool.getPoolType() == Storage.StoragePoolType.NetworkFilesystem && + (hardwareAcceleration == null || !hardwareAcceleration.getValue().equals("true"))) { + // add to the bottom of the list + reorderedPools.add(pool); + } else { + // add to the top of the list + reorderedPools.add(preferredIndex++, pool); + } + } + return reorderedPools; + } else { + return pools; + } + } + protected boolean filter(ExcludeList avoid, StoragePool pool, DiskProfile dskCh, DeploymentPlan plan) { if (s_logger.isDebugEnabled()) { @@ -211,6 +247,10 @@ protected boolean filter(ExcludeList avoid, StoragePool pool, DiskProfile dskCh, return false; } + if (!checkDiskProvisioningSupport(dskCh, pool)) { + return false; + } + if(!checkHypervisorCompatibility(dskCh.getHypervisorType(), dskCh.getType(), pool.getPoolType())){ return false; } @@ -253,6 +293,18 @@ protected boolean filter(ExcludeList avoid, StoragePool pool, DiskProfile dskCh, return storageMgr.storagePoolHasEnoughIops(requestVolumes, pool) && storageMgr.storagePoolHasEnoughSpace(requestVolumes, pool, plan.getClusterId()); } + private boolean checkDiskProvisioningSupport(DiskProfile dskCh, StoragePool pool) { + if (dskCh.getHypervisorType() != null && dskCh.getHypervisorType() == HypervisorType.VMware && pool.getPoolType() == Storage.StoragePoolType.NetworkFilesystem && + storageMgr.DiskProvisioningStrictness.valueIn(pool.getDataCenterId())) { + StoragePoolDetailVO hardwareAcceleration = storagePoolDetailsDao.findDetail(pool.getId(), Storage.Capability.HARDWARE_ACCELERATION.toString()); + if (dskCh.getProvisioningType() == null || !dskCh.getProvisioningType().equals(Storage.ProvisioningType.THIN) && + (hardwareAcceleration == null || hardwareAcceleration.getValue() == null || !hardwareAcceleration.getValue().equals("true"))) { + return false; + } + } + return true; + } + /* Check StoragePool and Volume type compatibility for the hypervisor */ diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/db/VolumeDataStoreDaoImpl.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/db/VolumeDataStoreDaoImpl.java index 34854d5d5980..dca2e9a862e1 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/db/VolumeDataStoreDaoImpl.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/db/VolumeDataStoreDaoImpl.java @@ -62,7 +62,6 @@ public class VolumeDataStoreDaoImpl extends GenericDaoBase uploadVolumeStateSearch; private static final String EXPIRE_DOWNLOAD_URLS_FOR_ZONE = "update volume_store_ref set download_url_created=? where download_url_created is not null and store_id in (select id from image_store where data_center_id=?)"; - @Inject DataStoreManager storeMgr; @Inject @@ -119,7 +118,6 @@ public boolean configure(String name, Map params) throws Configu uploadVolumeStateSearch.join("volumeOnlySearch", volumeOnlySearch, volumeOnlySearch.entity().getId(), uploadVolumeStateSearch.entity().getVolumeId(), JoinType.LEFT); uploadVolumeStateSearch.and("destroyed", uploadVolumeStateSearch.entity().getDestroyed(), SearchCriteria.Op.EQ); uploadVolumeStateSearch.done(); - return true; } @@ -332,6 +330,15 @@ public List listVolumeDownloadUrlsByZoneId(long zoneId) { return listBy(sc); } + @Override + public List listByVolume(long volumeId, long storeId) { + SearchCriteria sc = storeVolumeSearch.create(); + sc.setParameters("store_id", storeId); + sc.setParameters("volume_id", volumeId); + sc.setParameters("destroyed", false); + return listBy(sc); + } + @Override public List listUploadedVolumesByStoreId(long id) { SearchCriteria sc = uploadVolumeSearch.create(); @@ -340,7 +347,6 @@ public List listUploadedVolumesByStoreId(long id) { return listIncludingRemovedBy(sc); } - @Override public void expireDnldUrlsForZone(Long dcId){ TransactionLegacy txn = TransactionLegacy.currentTxn(); diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java index eb2262f0298d..30cd7ac3f2f1 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java @@ -26,10 +26,11 @@ import com.cloud.exception.StorageConflictException; import com.cloud.storage.DataStoreRole; import com.cloud.storage.Storage; +import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; import com.cloud.storage.StoragePoolHostVO; +import com.cloud.storage.StorageService; import com.cloud.storage.dao.StoragePoolHostDao; -import com.cloud.storage.StorageManager; import com.cloud.utils.exception.CloudRuntimeException; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; @@ -59,6 +60,8 @@ public class DefaultHostListener implements HypervisorHostListener { StoragePoolDetailsDao storagePoolDetailsDao; @Inject StorageManager storageManager; + @Inject + StorageService storageService; @Override public boolean hostAdded(long hostId) { @@ -67,7 +70,7 @@ public boolean hostAdded(long hostId) { @Override public boolean hostConnect(long hostId, long poolId) throws StorageConflictException { - StoragePool pool = (StoragePool)this.dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary); + StoragePool pool = (StoragePool) this.dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary); ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, pool); final Answer answer = agentMgr.easySend(hostId, cmd); @@ -84,7 +87,7 @@ public boolean hostConnect(long hostId, long poolId) throws StorageConflictExcep assert (answer instanceof ModifyStoragePoolAnswer) : "Well, now why won't you actually return the ModifyStoragePoolAnswer when it's ModifyStoragePoolCommand? Pool=" + pool.getId() + "Host=" + hostId; - ModifyStoragePoolAnswer mspAnswer = (ModifyStoragePoolAnswer)answer; + ModifyStoragePoolAnswer mspAnswer = (ModifyStoragePoolAnswer) answer; if (mspAnswer.getLocalDatastoreName() != null && pool.isShared()) { String datastoreName = mspAnswer.getLocalDatastoreName(); List localStoragePools = this.primaryStoreDao.listLocalStoragePoolByPath(pool.getDataCenterId(), datastoreName); @@ -103,6 +106,8 @@ public boolean hostConnect(long hostId, long poolId) throws StorageConflictExcep storageManager.syncDatastoreClusterStoragePool(poolId, ((ModifyStoragePoolAnswer) answer).getDatastoreClusterChildren(), hostId); } + storageService.updateStorageCapabilities(poolId, false); + s_logger.info("Connection established between storage pool " + pool + " and host " + hostId); return true; } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/KVMHAChecker.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/KVMHAChecker.java index 723335b97f7b..5ceaef2bb197 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/KVMHAChecker.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/KVMHAChecker.java @@ -16,9 +16,9 @@ // under the License. package com.cloud.hypervisor.kvm.resource; -import java.util.ArrayList; import java.util.List; import java.util.concurrent.Callable; +import java.util.stream.Collectors; import org.apache.log4j.Logger; @@ -27,13 +27,13 @@ public class KVMHAChecker extends KVMHABase implements Callable { private static final Logger s_logger = Logger.getLogger(KVMHAChecker.class); - private List _pools; - private String _hostIP; - private long _heartBeatCheckerTimeout = 360000; /* 6 minutes */ + private List nfsStoragePools; + private String hostIp; + private long heartBeatCheckerTimeout = 360000; // 6 minutes public KVMHAChecker(List pools, String host) { - this._pools = pools; - this._hostIP = host; + this.nfsStoragePools = pools; + this.hostIp = host; } /* @@ -42,35 +42,40 @@ public KVMHAChecker(List pools, String host) { */ @Override public Boolean checkingHeartBeat() { - List results = new ArrayList(); - for (NfsStoragePool pool : _pools) { - Script cmd = new Script(s_heartBeatPath, _heartBeatCheckerTimeout, s_logger); + boolean validResult = false; + + String hostAndPools = String.format("host IP [%s] in pools [%s]", hostIp, nfsStoragePools.stream().map(pool -> pool._poolIp).collect(Collectors.joining(", "))); + + s_logger.debug(String.format("Checking heart beat with KVMHAChecker for %s", hostAndPools)); + + for (NfsStoragePool pool : nfsStoragePools) { + Script cmd = new Script(s_heartBeatPath, heartBeatCheckerTimeout, s_logger); cmd.add("-i", pool._poolIp); cmd.add("-p", pool._poolMountSourcePath); cmd.add("-m", pool._mountDestPath); - cmd.add("-h", _hostIP); + cmd.add("-h", hostIp); cmd.add("-r"); cmd.add("-t", String.valueOf(_heartBeatUpdateFreq / 1000)); OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser(); String result = cmd.execute(parser); - s_logger.debug("KVMHAChecker pool: " + pool._poolIp); - s_logger.debug("KVMHAChecker result: " + result); - s_logger.debug("KVMHAChecker parser: " + parser.getLine()); - if (result == null && parser.getLine().contains("> DEAD <")) { - s_logger.debug("read heartbeat failed: "); - results.add(false); + String parsedLine = parser.getLine(); + + s_logger.debug(String.format("Checking heart beat with KVMHAChecker [{command=\"%s\", result: \"%s\", log: \"%s\", pool: \"%s\"}].", cmd.toString(), result, parsedLine, + pool._poolIp)); + + if (result == null && parsedLine.contains("DEAD")) { + s_logger.warn(String.format("Checking heart beat with KVMHAChecker command [%s] returned [%s]. [%s]. It may cause a shutdown of host IP [%s].", cmd.toString(), + result, parsedLine, hostIp)); } else { - results.add(true); + validResult = true; } } - for (Boolean r : results) { - if (r) { - return true; - } + if (!validResult) { + s_logger.warn(String.format("All checks with KVMHAChecker for %s considered it as dead. It may cause a shutdown of the host.", hostAndPools)); } - return false; + return validResult; } @Override diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/KVMHAMonitor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/KVMHAMonitor.java index 8a11b7fc962c..a939abe3bbe5 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/KVMHAMonitor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/KVMHAMonitor.java @@ -16,8 +16,9 @@ // under the License. package com.cloud.hypervisor.kvm.resource; +import com.cloud.agent.properties.AgentProperties; +import com.cloud.agent.properties.AgentPropertiesFileHandler; import com.cloud.utils.script.Script; -import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.log4j.Logger; import org.libvirt.Connect; import org.libvirt.LibvirtException; @@ -32,17 +33,20 @@ import java.util.concurrent.ConcurrentHashMap; public class KVMHAMonitor extends KVMHABase implements Runnable { + private static final Logger s_logger = Logger.getLogger(KVMHAMonitor.class); - private final Map _storagePool = new ConcurrentHashMap(); + private final Map storagePool = new ConcurrentHashMap<>(); - private final String _hostIP; /* private ip address */ + private final String hostPrivateIp; public KVMHAMonitor(NfsStoragePool pool, String host, String scriptPath) { if (pool != null) { - _storagePool.put(pool._poolUUID, pool); + storagePool.put(pool._poolUUID, pool); } - _hostIP = host; + hostPrivateIp = host; configureHeartBeatPath(scriptPath); + + _heartBeatUpdateTimeout = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.HEARTBEAT_UPDATE_TIMEOUT); } private static synchronized void configureHeartBeatPath(String scriptPath) { @@ -50,135 +54,129 @@ private static synchronized void configureHeartBeatPath(String scriptPath) { } public void addStoragePool(NfsStoragePool pool) { - synchronized (_storagePool) { - _storagePool.put(pool._poolUUID, pool); + synchronized (storagePool) { + storagePool.put(pool._poolUUID, pool); } } public void removeStoragePool(String uuid) { - synchronized (_storagePool) { - NfsStoragePool pool = _storagePool.get(uuid); + synchronized (storagePool) { + NfsStoragePool pool = storagePool.get(uuid); if (pool != null) { Script.runSimpleBashScript("umount " + pool._mountDestPath); - _storagePool.remove(uuid); + storagePool.remove(uuid); } } } public List getStoragePools() { - synchronized (_storagePool) { - return new ArrayList(_storagePool.values()); + synchronized (storagePool) { + return new ArrayList<>(storagePool.values()); } } public NfsStoragePool getStoragePool(String uuid) { - synchronized (_storagePool) { - return _storagePool.get(uuid); + synchronized (storagePool) { + return storagePool.get(uuid); } } - private class Monitor extends ManagedContextRunnable { + protected void runHeartBeat() { + synchronized (storagePool) { + Set removedPools = new HashSet<>(); + for (String uuid : storagePool.keySet()) { + NfsStoragePool primaryStoragePool = storagePool.get(uuid); + StoragePool storage; + try { + Connect conn = LibvirtConnection.getConnection(); + storage = conn.storagePoolLookupByUUIDString(uuid); + if (storage == null || storage.getInfo().state != StoragePoolState.VIR_STORAGE_POOL_RUNNING) { + if (storage == null) { + s_logger.debug(String.format("Libvirt storage pool [%s] not found, removing from HA list.", uuid)); + } else { + s_logger.debug(String.format("Libvirt storage pool [%s] found, but not running, removing from HA list.", uuid)); + } - @Override - protected void runInContext() { - synchronized (_storagePool) { - Set removedPools = new HashSet(); - for (String uuid : _storagePool.keySet()) { - NfsStoragePool primaryStoragePool = _storagePool.get(uuid); + removedPools.add(uuid); + continue; + } - // check for any that have been deregistered with libvirt and - // skip,remove them + s_logger.debug(String.format("Found NFS storage pool [%s] in libvirt, continuing.", uuid)); - StoragePool storage = null; - try { - Connect conn = LibvirtConnection.getConnection(); - storage = conn.storagePoolLookupByUUIDString(uuid); - if (storage == null) { - s_logger.debug("Libvirt storage pool " + uuid + " not found, removing from HA list"); - removedPools.add(uuid); - continue; + } catch (LibvirtException e) { + s_logger.debug(String.format("Failed to lookup libvirt storage pool [%s].", uuid), e); - } else if (storage.getInfo().state != StoragePoolState.VIR_STORAGE_POOL_RUNNING) { - s_logger.debug("Libvirt storage pool " + uuid + " found, but not running, removing from HA list"); + if (e.toString().contains("pool not found")) { + s_logger.debug(String.format("Removing pool [%s] from HA monitor since it was deleted.", uuid)); + removedPools.add(uuid); + continue; + } - removedPools.add(uuid); - continue; - } - s_logger.debug("Found NFS storage pool " + uuid + " in libvirt, continuing"); + } - } catch (LibvirtException e) { - s_logger.debug("Failed to lookup libvirt storage pool " + uuid + " due to: " + e); + String result = null; + for (int i = 1; i <= _heartBeatUpdateMaxTries; i++) { + Script cmd = createHeartBeatCommand(primaryStoragePool, hostPrivateIp, true); + result = cmd.execute(); - // we only want to remove pool if it's not found, not if libvirt - // connection fails - if (e.toString().contains("pool not found")) { - s_logger.debug("removing pool from HA monitor since it was deleted"); - removedPools.add(uuid); - continue; - } - } + s_logger.debug(String.format("The command (%s), to the pool [%s], has the result [%s].", cmd.toString(), uuid, result)); - String result = null; - // Try multiple times, but sleep in between tries to ensure it isn't a short lived transient error - for (int i = 1; i <= _heartBeatUpdateMaxTries; i++) { - Script cmd = new Script(s_heartBeatPath, _heartBeatUpdateTimeout, s_logger); - cmd.add("-i", primaryStoragePool._poolIp); - cmd.add("-p", primaryStoragePool._poolMountSourcePath); - cmd.add("-m", primaryStoragePool._mountDestPath); - cmd.add("-h", _hostIP); - result = cmd.execute(); - if (result != null) { - s_logger.warn("write heartbeat failed: " + result + ", try: " + i + " of " + _heartBeatUpdateMaxTries); - try { - Thread.sleep(_heartBeatUpdateRetrySleep); - } catch (InterruptedException e) { - s_logger.debug("[ignored] interupted between heartbeat retries."); - } - } else { - break; + if (result != null) { + s_logger.warn(String.format("Write heartbeat for pool [%s] failed: %s; try: %s of %s.", uuid, result, i, _heartBeatUpdateMaxTries)); + try { + Thread.sleep(_heartBeatUpdateRetrySleep); + } catch (InterruptedException e) { + s_logger.debug("[IGNORED] Interrupted between heartbeat retries.", e); } + } else { + break; } - if (result != null) { - // Stop cloudstack-agent if can't write to heartbeat file. - // This will raise an alert on the mgmt server - s_logger.warn("write heartbeat failed: " + result + "; stopping cloudstack-agent"); - Script cmd = new Script(s_heartBeatPath, _heartBeatUpdateTimeout, s_logger); - cmd.add("-i", primaryStoragePool._poolIp); - cmd.add("-p", primaryStoragePool._poolMountSourcePath); - cmd.add("-m", primaryStoragePool._mountDestPath); - cmd.add("-c"); - result = cmd.execute(); - } } - if (!removedPools.isEmpty()) { - for (String uuid : removedPools) { - removeStoragePool(uuid); - } + if (result != null) { + s_logger.warn(String.format("Write heartbeat for pool [%s] failed: %s; stopping cloudstack-agent.", uuid, result)); + Script cmd = createHeartBeatCommand(primaryStoragePool, null, false); + result = cmd.execute(); } } + if (!removedPools.isEmpty()) { + for (String uuid : removedPools) { + removeStoragePool(uuid); + } + } } + + } + + private Script createHeartBeatCommand(NfsStoragePool primaryStoragePool, String hostPrivateIp, boolean hostValidation) { + Script cmd = new Script(s_heartBeatPath, _heartBeatUpdateTimeout, s_logger); + cmd.add("-i", primaryStoragePool._poolIp); + cmd.add("-p", primaryStoragePool._poolMountSourcePath); + cmd.add("-m", primaryStoragePool._mountDestPath); + + if (hostValidation) { + cmd.add("-h", hostPrivateIp); + } + + if (!hostValidation) { + cmd.add("-c"); + } + + return cmd; } @Override public void run() { - // s_logger.addAppender(new org.apache.log4j.ConsoleAppender(new - // org.apache.log4j.PatternLayout(), "System.out")); while (true) { - Thread monitorThread = new Thread(new Monitor()); - monitorThread.start(); - try { - monitorThread.join(); - } catch (InterruptedException e) { - s_logger.debug("[ignored] interupted joining monitor."); - } + + runHeartBeat(); try { Thread.sleep(_heartBeatUpdateFreq); } catch (InterruptedException e) { - s_logger.debug("[ignored] interupted between heartbeats."); + s_logger.debug("[IGNORED] Interrupted between heartbeats.", e); } } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java index 338a41b6c256..df729cbe355b 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java @@ -109,6 +109,8 @@ import com.cloud.agent.api.to.NicTO; import com.cloud.agent.api.to.VirtualMachineTO; import com.cloud.agent.dao.impl.PropertiesStorage; +import com.cloud.agent.properties.AgentProperties; +import com.cloud.agent.properties.AgentPropertiesFileHandler; import com.cloud.agent.resource.virtualnetwork.VRScripts; import com.cloud.agent.resource.virtualnetwork.VirtualRouterDeployer; import com.cloud.agent.resource.virtualnetwork.VirtualRoutingResource; @@ -209,6 +211,71 @@ public class LibvirtComputingResource extends ServerResourceBase implements ServerResource, VirtualRouterDeployer { private static final Logger s_logger = Logger.getLogger(LibvirtComputingResource.class); + private static final String LEGACY = "legacy"; + private static final String SECURE = "secure"; + + /** + * Machine type. + */ + private static final String PC = "pc"; + private static final String VIRT = "virt"; + + /** + * Possible devices to add to VM. + */ + private static final String TABLET = "tablet"; + private static final String USB = "usb"; + private static final String MOUSE = "mouse"; + private static final String KEYBOARD = "keyboard"; + + /** + * Policies used by VM. + */ + private static final String RESTART = "restart"; + private static final String DESTROY = "destroy"; + + private static final String KVMCLOCK = "kvmclock"; + private static final String HYPERVCLOCK = "hypervclock"; + private static final String WINDOWS = "Windows"; + private static final String Q35 = "q35"; + private static final String PTY = "pty"; + private static final String VNC = "vnc"; + + /** + * Acronym of System Management Mode. Perform low-level system management operations while an OS is running. + */ + private static final String SMM = "smm"; + /** + * Acronym of Advanced Configuration and Power Interface.
+ * Provides an open standard that operating systems can use to discover and configure + * computer hardware components, to perform power management. + */ + private static final String ACPI = "acpi"; + /** + * Acronym of Advanced Programmable Interrupt Controllers.
+ * With an I/O APIC, operating systems can use more than 16 interrupt requests (IRQs) + * and therefore avoid IRQ sharing for improved reliability. + */ + private static final String APIC = "apic"; + /** + * Acronym of Physical Address Extension. Feature implemented in modern x86 processors.
+ * PAE extends memory addressing capabilities, allowing more than 4 GB of random access memory (RAM) to be used. + */ + private static final String PAE = "pae"; + /** + * Libvirt supports guest CPU mode since 0.9.10. + */ + private static final int MIN_LIBVIRT_VERSION_FOR_GUEST_CPU_MODE = 9010; + /** + * The CPU tune element provides details of the CPU tunable parameters for the domain.
+ * It is supported since Libvirt 0.9.0 + */ + private static final int MIN_LIBVIRT_VERSION_FOR_GUEST_CPU_TUNE = 9000; + /** + * Constant that defines ARM64 (aarch64) guest architectures. + */ + private static final String AARCH64 = "aarch64"; + private String _modifyVlanPath; private String _versionstringpath; private String _patchScriptPath; @@ -346,6 +413,8 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv protected MemStat _memStat = new MemStat(_dom0MinMem, _dom0OvercommitMem); private final LibvirtUtilitiesHelper libvirtUtilitiesHelper = new LibvirtUtilitiesHelper(); + protected Boolean enableManuallySettingCpuTopologyOnKvmVm = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.ENABLE_MANUALLY_SETTING_CPU_TOPOLOGY_ON_KVM_VM); + protected long getHypervisorLibvirtVersion() { return _hypervisorLibvirtVersion; } @@ -2239,8 +2308,13 @@ protected void enlightenWindowsVm(VirtualMachineTO vmTO, FeaturesDef features) { } } + /** + * Creates VM KVM definitions from virtual machine transfer object specifications. + */ public LibvirtVMDef createVMFromSpec(final VirtualMachineTO vmTO) { - final LibvirtVMDef vm = new LibvirtVMDef(); + s_logger.debug(String.format("Creating VM from specifications [%s]", vmTO.toString())); + + LibvirtVMDef vm = new LibvirtVMDef(); vm.setDomainName(vmTO.getName()); String uuid = vmTO.getUuid(); uuid = getUuid(uuid); @@ -2251,215 +2325,311 @@ public LibvirtVMDef createVMFromSpec(final VirtualMachineTO vmTO) { Map customParams = vmTO.getDetails(); boolean isUefiEnabled = false; boolean isSecureBoot = false; - String bootMode =null; + String bootMode = null; + if (MapUtils.isNotEmpty(customParams) && customParams.containsKey(GuestDef.BootType.UEFI.toString())) { isUefiEnabled = true; - bootMode = customParams.get(GuestDef.BootType.UEFI.toString()); - if (StringUtils.isNotBlank(bootMode) && "secure".equalsIgnoreCase(bootMode)) { + s_logger.debug(String.format("Enabled UEFI for VM UUID [%s].", uuid)); + + if (isSecureMode(customParams.get(GuestDef.BootType.UEFI.toString()))) { + s_logger.debug(String.format("Enabled Secure Boot for VM UUID [%s].", uuid)); isSecureBoot = true; } } Map extraConfig = vmTO.getExtraConfig(); if (dpdkSupport && (!extraConfig.containsKey(DpdkHelper.DPDK_NUMA) || !extraConfig.containsKey(DpdkHelper.DPDK_HUGE_PAGES))) { - s_logger.info("DPDK is enabled but it needs extra configurations for CPU NUMA and Huge Pages for VM deployment"); + s_logger.info(String.format("DPDK is enabled for VM [%s], but it needs extra configurations for CPU NUMA and Huge Pages for VM deployment.", vmTO.toString())); } + configureVM(vmTO, vm, customParams, isUefiEnabled, isSecureBoot, bootMode, extraConfig, uuid); + return vm; + } - final GuestDef guest = new GuestDef(); + /** + * Configures created VM from specification, adding the necessary components to VM. + */ + private void configureVM(VirtualMachineTO vmTO, LibvirtVMDef vm, Map customParams, boolean isUefiEnabled, boolean isSecureBoot, String bootMode, + Map extraConfig, String uuid) { + s_logger.debug(String.format("Configuring VM with UUID [%s].", uuid)); - if (HypervisorType.LXC == _hypervisorType && VirtualMachine.Type.User == vmTO.getType()) { - // LXC domain is only valid for user VMs. Use KVM for system VMs. - guest.setGuestType(GuestDef.GuestType.LXC); - vm.setHvsType(HypervisorType.LXC.toString().toLowerCase()); - } else { - guest.setGuestType(GuestDef.GuestType.KVM); - vm.setHvsType(HypervisorType.KVM.toString().toLowerCase()); - vm.setLibvirtVersion(_hypervisorLibvirtVersion); - vm.setQemuVersion(_hypervisorQemuVersion); + GuestDef guest = createGuestFromSpec(vmTO, vm, uuid, customParams); + if (isUefiEnabled) { + configureGuestIfUefiEnabled(isSecureBoot, bootMode, guest); } - guest.setGuestArch(_guestCpuArch != null ? _guestCpuArch : vmTO.getArch()); - guest.setMachineType(_guestCpuArch != null && _guestCpuArch.equals("aarch64") ? "virt" : "pc"); - guest.setBootType(GuestDef.BootType.BIOS); - if (MapUtils.isNotEmpty(customParams) && customParams.containsKey(GuestDef.BootType.UEFI.toString())) { - guest.setBootType(GuestDef.BootType.UEFI); - guest.setBootMode(GuestDef.BootMode.LEGACY); - guest.setMachineType("q35"); - if (StringUtils.isNotBlank(customParams.get(GuestDef.BootType.UEFI.toString())) && "secure".equalsIgnoreCase(customParams.get(GuestDef.BootType.UEFI.toString()))) { - guest.setBootMode(GuestDef.BootMode.SECURE); // setting to secure mode - } + + vm.addComp(guest); + vm.addComp(createGuestResourceDef(vmTO)); + + int vcpus = vmTO.getCpus(); + if (!extraConfig.containsKey(DpdkHelper.DPDK_NUMA)) { + vm.addComp(createCpuModeDef(vmTO, vcpus)); } - guest.setUuid(uuid); - guest.setBootOrder(GuestDef.BootOrder.CDROM); - guest.setBootOrder(GuestDef.BootOrder.HARDISK); - if (isUefiEnabled) { - if (_uefiProperties.getProperty(GuestDef.GUEST_LOADER_SECURE) != null && "secure".equalsIgnoreCase(bootMode)) { - guest.setLoader(_uefiProperties.getProperty(GuestDef.GUEST_LOADER_SECURE)); - } + if (_hypervisorLibvirtVersion >= MIN_LIBVIRT_VERSION_FOR_GUEST_CPU_TUNE) { + vm.addComp(createCpuTuneDef(vmTO)); + } - if (_uefiProperties.getProperty(GuestDef.GUEST_LOADER_LEGACY) != null && "legacy".equalsIgnoreCase(bootMode)) { - guest.setLoader(_uefiProperties.getProperty(GuestDef.GUEST_LOADER_LEGACY)); - } + FeaturesDef features = createFeaturesDef(customParams, isUefiEnabled, isSecureBoot); + enlightenWindowsVm(vmTO, features); + vm.addComp(features); - if (_uefiProperties.getProperty(GuestDef.GUEST_NVRAM_PATH) != null) { - guest.setNvram(_uefiProperties.getProperty(GuestDef.GUEST_NVRAM_PATH)); - } + vm.addComp(createTermPolicy()); + vm.addComp(createClockDef(vmTO)); + vm.addComp(createDevicesDef(vmTO, guest, vcpus, isUefiEnabled)); - if (isSecureBoot) { - if (_uefiProperties.getProperty(GuestDef.GUEST_NVRAM_TEMPLATE_SECURE) != null && "secure".equalsIgnoreCase(bootMode)) { - guest.setNvramTemplate(_uefiProperties.getProperty(GuestDef.GUEST_NVRAM_TEMPLATE_SECURE)); - } - } else { - if (_uefiProperties.getProperty(GuestDef.GUEST_NVRAM_TEMPLATE_LEGACY) != null) { - guest.setNvramTemplate(_uefiProperties.getProperty(GuestDef.GUEST_NVRAM_TEMPLATE_LEGACY)); - } - } + addExtraConfigsToVM(vmTO, vm, extraConfig); + } + + /** + * Adds extra configuration to User VM Domain XML before starting. + */ + private void addExtraConfigsToVM(VirtualMachineTO vmTO, LibvirtVMDef vm, Map extraConfig) { + if (MapUtils.isNotEmpty(extraConfig) && VirtualMachine.Type.User.equals(vmTO.getType())) { + s_logger.debug(String.format("Appending extra configuration data [%s] to guest VM [%s] domain XML.", extraConfig, vmTO.toString())); + addExtraConfigComponent(extraConfig, vm); } + } - vm.addComp(guest); + /** + * Adds devices components to VM. + */ + protected DevicesDef createDevicesDef(VirtualMachineTO vmTO, GuestDef guest, int vcpus, boolean isUefiEnabled) { + DevicesDef devices = new DevicesDef(); + devices.setEmulatorPath(_hypervisorPath); + devices.setGuestType(guest.getGuestType()); + devices.addDevice(createSerialDef()); - final GuestResourceDef grd = new GuestResourceDef(); + if (_rngEnable) { + devices.addDevice(createRngDef()); + } - if (vmTO.getMinRam() != vmTO.getMaxRam() && !_noMemBalloon) { - grd.setMemBalloning(true); - grd.setCurrentMem(vmTO.getMinRam() / 1024); - grd.setMemorySize(vmTO.getMaxRam() / 1024); - } else { - grd.setMemorySize(vmTO.getMaxRam() / 1024); + devices.addDevice(createChannelDef(vmTO)); + devices.addDevice(createWatchDogDef()); + devices.addDevice(createVideoDef()); + devices.addDevice(createConsoleDef()); + devices.addDevice(createGraphicDef(vmTO)); + devices.addDevice(createTabletInputDef()); + + if (isGuestAarch64()) { + createArm64UsbDef(devices); } - final int vcpus = vmTO.getCpus(); - grd.setVcpuNum(vcpus); - vm.addComp(grd); - if (!extraConfig.containsKey(DpdkHelper.DPDK_NUMA)) { - final CpuModeDef cmd = new CpuModeDef(); - cmd.setMode(_guestCpuMode); - cmd.setModel(_guestCpuModel); - if (vmTO.getType() == VirtualMachine.Type.User) { - cmd.setFeatures(_cpuFeatures); - } - setCpuTopology(cmd, vcpus, vmTO.getDetails()); - vm.addComp(cmd); + DiskDef.DiskBus busT = getDiskModelFromVMDetail(vmTO); + if (busT == null) { + busT = getGuestDiskModel(vmTO.getPlatformEmulator(), isUefiEnabled); + } + + if (busT == DiskDef.DiskBus.SCSI) { + devices.addDevice(createSCSIDef(vcpus)); } + return devices; + } - if (_hypervisorLibvirtVersion >= 9000) { - final CpuTuneDef ctd = new CpuTuneDef(); - /** - A 4.0.X/4.1.X management server doesn't send the correct JSON - command for getMinSpeed, it only sends a 'speed' field. + protected WatchDogDef createWatchDogDef() { + return new WatchDogDef(_watchDogAction, _watchDogModel); + } - So if getMinSpeed() returns null we fall back to getSpeed(). + protected void createArm64UsbDef(DevicesDef devices) { + devices.addDevice(new InputDef(KEYBOARD, USB)); + devices.addDevice(new InputDef(MOUSE, USB)); + devices.addDevice(new LibvirtVMDef.USBDef((short)0, 0, 5, 0, 0)); + } - This way a >4.1 agent can work communicate a <=4.1 management server + protected InputDef createTabletInputDef() { + return new InputDef(TABLET, USB); + } - This change is due to the overcommit feature in 4.2 - */ - if (vmTO.getMinSpeed() != null) { - ctd.setShares(vmTO.getCpus() * vmTO.getMinSpeed()); - } else { - ctd.setShares(vmTO.getCpus() * vmTO.getSpeed()); - } + /** + * Creates a Libvirt Graphic Definition with the VM's password and VNC address. + */ + protected GraphicDef createGraphicDef(VirtualMachineTO vmTO) { + return new GraphicDef(VNC, (short)0, true, vmTO.getVncAddr(), vmTO.getVncPassword(), null); + } - setQuotaAndPeriod(vmTO, ctd); + /** + * Adds a Virtio channel for the Qemu Guest Agent tools. + */ + protected ChannelDef createChannelDef(VirtualMachineTO vmTO) { + File virtIoChannel = Paths.get(_qemuSocketsPath.getPath(), vmTO.getName() + "." + _qemuGuestAgentSocketName).toFile(); + return new ChannelDef(_qemuGuestAgentSocketName, ChannelDef.ChannelType.UNIX, virtIoChannel); + } - vm.addComp(ctd); - } + /** + * Creates Virtio SCSI controller.
+ * The respective Virtio SCSI XML definition is generated only if the VM's Disk Bus is of ISCSI. + */ + protected SCSIDef createSCSIDef(int vcpus) { + return new SCSIDef((short)0, 0, 0, 9, 0, vcpus); + } - final FeaturesDef features = new FeaturesDef(); - features.addFeatures("pae"); - features.addFeatures("apic"); - features.addFeatures("acpi"); - if (isUefiEnabled && isSecureMode(customParams.get(GuestDef.BootType.UEFI.toString()))) { - features.addFeatures("smm"); - } + protected ConsoleDef createConsoleDef() { + return new ConsoleDef(PTY, null, null, (short)0); + } - //KVM hyperv enlightenment features based on OS Type - enlightenWindowsVm(vmTO, features); + protected VideoDef createVideoDef() { + return new VideoDef(_videoHw, _videoRam); + } - vm.addComp(features); + protected RngDef createRngDef() { + return new RngDef(_rngPath, _rngBackendModel, _rngRateBytes, _rngRatePeriod); + } - final TermPolicy term = new TermPolicy(); - term.setCrashPolicy("destroy"); - term.setPowerOffPolicy("destroy"); - term.setRebootPolicy("restart"); - vm.addComp(term); + protected SerialDef createSerialDef() { + return new SerialDef(PTY, null, (short)0); + } - final ClockDef clock = new ClockDef(); - if (vmTO.getOs().startsWith("Windows")) { + protected ClockDef createClockDef(final VirtualMachineTO vmTO) { + ClockDef clock = new ClockDef(); + if (org.apache.commons.lang.StringUtils.startsWith(vmTO.getOs(), WINDOWS)) { clock.setClockOffset(ClockDef.ClockOffset.LOCALTIME); - clock.setTimer("hypervclock", null, null); - } else if (vmTO.getType() != VirtualMachine.Type.User || isGuestPVEnabled(vmTO.getOs())) { - if (_hypervisorLibvirtVersion >= 9 * 1000 + 10) { - clock.setTimer("kvmclock", null, null, _noKvmClock); - } + clock.setTimer(HYPERVCLOCK, null, null); + } else if ((vmTO.getType() != VirtualMachine.Type.User || isGuestPVEnabled(vmTO.getOs())) && _hypervisorLibvirtVersion >= MIN_LIBVIRT_VERSION_FOR_GUEST_CPU_MODE) { + clock.setTimer(KVMCLOCK, null, null, _noKvmClock); } + return clock; + } - vm.addComp(clock); + protected TermPolicy createTermPolicy() { + TermPolicy term = new TermPolicy(); + term.setCrashPolicy(DESTROY); + term.setPowerOffPolicy(DESTROY); + term.setRebootPolicy(RESTART); + return term; + } - final DevicesDef devices = new DevicesDef(); - devices.setEmulatorPath(_hypervisorPath); - devices.setGuestType(guest.getGuestType()); + protected FeaturesDef createFeaturesDef(Map customParams, boolean isUefiEnabled, boolean isSecureBoot) { + FeaturesDef features = new FeaturesDef(); + features.addFeatures(PAE); + features.addFeatures(APIC); + features.addFeatures(ACPI); + if (isUefiEnabled && isSecureBoot) { + features.addFeatures(SMM); + } + return features; + } - final SerialDef serial = new SerialDef("pty", null, (short)0); - devices.addDevice(serial); + /** + * A 4.0.X/4.1.X management server doesn't send the correct JSON + * command for getMinSpeed, it only sends a 'speed' field.
+ * So, to create a cpu tune, if getMinSpeed() returns null we fall back to getSpeed().
+ * This way a >4.1 agent can work communicate a <=4.1 management server.
+ * This change is due to the overcommit feature in 4.2. + */ + protected CpuTuneDef createCpuTuneDef(VirtualMachineTO vmTO) { + CpuTuneDef ctd = new CpuTuneDef(); + int shares = vmTO.getCpus() * (vmTO.getMinSpeed() != null ? vmTO.getMinSpeed() : vmTO.getSpeed()); + ctd.setShares(shares); + setQuotaAndPeriod(vmTO, ctd); + return ctd; + } - if (_rngEnable) { - final RngDef rngDevice = new RngDef(_rngPath, _rngBackendModel, _rngRateBytes, _rngRatePeriod); - devices.addDevice(rngDevice); + private CpuModeDef createCpuModeDef(VirtualMachineTO vmTO, int vcpus) { + final CpuModeDef cmd = new CpuModeDef(); + cmd.setMode(_guestCpuMode); + cmd.setModel(_guestCpuModel); + if (VirtualMachine.Type.User.equals(vmTO.getType())) { + cmd.setFeatures(_cpuFeatures); } + setCpuTopology(cmd, vcpus, vmTO.getDetails()); + return cmd; + } - /* Add a VirtIO channel for the Qemu Guest Agent tools */ - File virtIoChannel = Paths.get(_qemuSocketsPath.getPath(), vmTO.getName() + "." + _qemuGuestAgentSocketName).toFile(); - devices.addDevice(new ChannelDef(_qemuGuestAgentSocketName, ChannelDef.ChannelType.UNIX, virtIoChannel)); - - devices.addDevice(new WatchDogDef(_watchDogAction, _watchDogModel)); + /** + * Creates guest resources based in VM specification. + */ + protected GuestResourceDef createGuestResourceDef(VirtualMachineTO vmTO) { + GuestResourceDef grd = new GuestResourceDef(); - final VideoDef videoCard = new VideoDef(_videoHw, _videoRam); - devices.addDevice(videoCard); + grd.setMemorySize(vmTO.getMaxRam() / 1024); + if (vmTO.getMinRam() != vmTO.getMaxRam() && !_noMemBalloon) { + grd.setMemBalloning(true); + grd.setCurrentMem(vmTO.getMinRam() / 1024); + } + grd.setVcpuNum(vmTO.getCpus()); + return grd; + } - final ConsoleDef console = new ConsoleDef("pty", null, null, (short)0); - devices.addDevice(console); + private void configureGuestIfUefiEnabled(boolean isSecureBoot, String bootMode, GuestDef guest) { + setGuestLoader(bootMode, SECURE, guest, GuestDef.GUEST_LOADER_SECURE); + setGuestLoader(bootMode, LEGACY, guest, GuestDef.GUEST_LOADER_LEGACY); - //add the VNC port passwd here, get the passwd from the vmInstance. - final String passwd = vmTO.getVncPassword(); - final GraphicDef grap = new GraphicDef("vnc", (short)0, true, vmTO.getVncAddr(), passwd, null); - devices.addDevice(grap); + if (isUefiPropertieNotNull(GuestDef.GUEST_NVRAM_PATH)) { + guest.setNvram(_uefiProperties.getProperty(GuestDef.GUEST_NVRAM_PATH)); + } - final InputDef input = new InputDef("tablet", "usb"); - devices.addDevice(input); + if (isSecureBoot && isUefiPropertieNotNull(GuestDef.GUEST_NVRAM_TEMPLATE_SECURE) && SECURE.equalsIgnoreCase(bootMode)) { + guest.setNvramTemplate(_uefiProperties.getProperty(GuestDef.GUEST_NVRAM_TEMPLATE_SECURE)); + } else if (isUefiPropertieNotNull(GuestDef.GUEST_NVRAM_TEMPLATE_LEGACY)) { + guest.setNvramTemplate(_uefiProperties.getProperty(GuestDef.GUEST_NVRAM_TEMPLATE_LEGACY)); + } + } - // Add an explicit USB devices for ARM64 - if (_guestCpuArch != null && _guestCpuArch.equals("aarch64")) { - devices.addDevice(new InputDef("keyboard", "usb")); - devices.addDevice(new InputDef("mouse", "usb")); - devices.addDevice(new LibvirtVMDef.USBDef((short)0, 0, 5, 0, 0)); + private void setGuestLoader(String bootMode, String mode, GuestDef guest, String propertie) { + if (isUefiPropertieNotNull(propertie) && mode.equalsIgnoreCase(bootMode)) { + guest.setLoader(_uefiProperties.getProperty(propertie)); } + } - DiskDef.DiskBus busT = getDiskModelFromVMDetail(vmTO); + private boolean isUefiPropertieNotNull(String propertie) { + return _uefiProperties.getProperty(propertie) != null; + } - if (busT == null) { - busT = getGuestDiskModel(vmTO.getPlatformEmulator(), isUefiEnabled); - } + private boolean isGuestAarch64() { + return AARCH64.equals(_guestCpuArch); + } - // If we're using virtio scsi, then we need to add a virtual scsi controller - if (busT == DiskDef.DiskBus.SCSI) { - final SCSIDef sd = new SCSIDef((short)0, 0, 0, 9, 0, vcpus); - devices.addDevice(sd); - } + /** + * Creates a guest definition from a VM specification. + */ + protected GuestDef createGuestFromSpec(VirtualMachineTO vmTO, LibvirtVMDef vm, String uuid, Map customParams) { + GuestDef guest = new GuestDef(); - vm.addComp(devices); + configureGuestAndVMHypervisorType(vmTO, vm, guest); + guest.setGuestArch(_guestCpuArch != null ? _guestCpuArch : vmTO.getArch()); + guest.setMachineType(isGuestAarch64() ? VIRT : PC); + guest.setBootType(GuestDef.BootType.BIOS); + if (MapUtils.isNotEmpty(customParams) && customParams.containsKey(GuestDef.BootType.UEFI.toString())) { + guest.setBootType(GuestDef.BootType.UEFI); + guest.setBootMode(GuestDef.BootMode.LEGACY); + guest.setMachineType(Q35); + if (SECURE.equalsIgnoreCase(customParams.get(GuestDef.BootType.UEFI.toString()))) { + guest.setBootMode(GuestDef.BootMode.SECURE); + } + } + guest.setUuid(uuid); + guest.setBootOrder(GuestDef.BootOrder.CDROM); + guest.setBootOrder(GuestDef.BootOrder.HARDISK); + return guest; + } - // Add extra configuration to User VM Domain XML before starting - if (vmTO.getType().equals(VirtualMachine.Type.User) && MapUtils.isNotEmpty(extraConfig)) { - s_logger.info("Appending extra configuration data to guest VM domain XML"); - addExtraConfigComponent(extraConfig, vm); + protected void configureGuestAndVMHypervisorType(VirtualMachineTO vmTO, LibvirtVMDef vm, GuestDef guest) { + if (HypervisorType.LXC == _hypervisorType && VirtualMachine.Type.User.equals(vmTO.getType())) { + configureGuestAndUserVMToUseLXC(vm, guest); + } else { + configureGuestAndSystemVMToUseKVM(vm, guest); } + } - return vm; + /** + * KVM domain is only valid for system VMs. Use LXC for user VMs. + */ + private void configureGuestAndSystemVMToUseKVM(LibvirtVMDef vm, GuestDef guest) { + guest.setGuestType(GuestDef.GuestType.KVM); + vm.setHvsType(HypervisorType.KVM.toString().toLowerCase()); + vm.setLibvirtVersion(_hypervisorLibvirtVersion); + vm.setQemuVersion(_hypervisorQemuVersion); + } + + /** + * LXC domain is only valid for user VMs. Use KVM for system VMs. + */ + private void configureGuestAndUserVMToUseLXC(LibvirtVMDef vm, GuestDef guest) { + guest.setGuestType(GuestDef.GuestType.LXC); + vm.setHvsType(HypervisorType.LXC.toString().toLowerCase()); } /** - * Add extra configurations (if any) as a String component to the domain XML + * Adds extra configurations (if any) as a String component to the domain XML */ protected void addExtraConfigComponent(Map extraConfig, LibvirtVMDef vm) { if (MapUtils.isNotEmpty(extraConfig)) { @@ -4336,6 +4506,11 @@ public boolean isSecureMode(String bootMode) { } private void setCpuTopology(CpuModeDef cmd, int vcpus, Map details) { + if (!enableManuallySettingCpuTopologyOnKvmVm) { + s_logger.debug(String.format("Skipping manually setting CPU topology on VM's XML due to it is disabled in agent.properties {\"property\": \"%s\", \"value\": %s}.", + AgentProperties.ENABLE_MANUALLY_SETTING_CPU_TOPOLOGY_ON_KVM_VM.getName(), enableManuallySettingCpuTopologyOnKvmVm)); + return; + } // multi cores per socket, for larger core configs int numCoresPerSocket = -1; if (details != null) { diff --git a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java index 1020ff86e3ae..86f30bf3bc3d 100644 --- a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java +++ b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java @@ -79,6 +79,7 @@ import org.mockito.BDDMockito; import org.mockito.Mock; import org.mockito.Mockito; +import org.mockito.Spy; import org.mockito.invocation.InvocationOnMock; import org.powermock.api.mockito.PowerMockito; import org.powermock.core.classloader.annotations.PowerMockIgnore; @@ -160,11 +161,27 @@ import com.cloud.agent.api.to.VolumeTO; import com.cloud.agent.resource.virtualnetwork.VirtualRoutingResource; import com.cloud.exception.InternalErrorException; +import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.hypervisor.kvm.resource.KVMHABase.NfsStoragePool; import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.ChannelDef; +import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.ClockDef; +import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.ConsoleDef; import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.CpuTuneDef; +import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.DevicesDef; import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.DiskDef; +import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.FeaturesDef; +import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.GraphicDef; +import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.GuestDef; +import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.GuestDef.GuestType; +import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.GuestResourceDef; +import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.InputDef; import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.InterfaceDef; +import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.RngDef; +import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.SCSIDef; +import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.SerialDef; +import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.TermPolicy; +import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.VideoDef; +import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.WatchDogDef; import com.cloud.hypervisor.kvm.resource.wrapper.LibvirtRequestWrapper; import com.cloud.hypervisor.kvm.resource.wrapper.LibvirtUtilitiesHelper; import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk; @@ -201,6 +218,8 @@ public class LibvirtComputingResourceTest { VirtualMachineTO vmTO; @Mock LibvirtVMDef vmDef; + @Spy + private LibvirtComputingResource libvirtComputingResourceSpy = Mockito.spy(LibvirtComputingResource.class); private final static long HYPERVISOR_LIBVIRT_VERSION_SUPPORTS_IOURING = 6003000; private final static long HYPERVISOR_QEMU_VERSION_SUPPORTS_IOURING = 5000000; @@ -217,6 +236,7 @@ public class LibvirtComputingResourceTest { @Before public void setup() throws Exception { + libvirtComputingResourceSpy._qemuSocketsPath = new File("/var/run/qemu"); Scanner scanner = new Scanner(memInfo); PowerMockito.whenNew(Scanner.class).withAnyArguments().thenReturn(scanner); } @@ -243,15 +263,13 @@ public void testCreateVMFromSpecLegacy() { final String vncAddr = ""; final String vncPassword = "mySuperSecretPassword"; - final LibvirtComputingResource lcr = new LibvirtComputingResource(); - lcr._qemuSocketsPath = new File("/var/run/qemu"); - final VirtualMachineTO to = new VirtualMachineTO(id, name, VirtualMachine.Type.User, cpus, speed, minRam, maxRam, BootloaderType.HVM, os, false, false, vncPassword); to.setVncAddr(vncAddr); to.setArch("x86_64"); to.setUuid("b0f0a72d-7efb-3cad-a8ff-70ebf30b3af9"); + to.setVcpuMaxLimit(cpus + 1); - final LibvirtVMDef vm = lcr.createVMFromSpec(to); + final LibvirtVMDef vm = libvirtComputingResourceSpy.createVMFromSpec(to); vm.setHvsType(hyperVisorType); verifyVm(to, vm); @@ -276,15 +294,13 @@ public void testCreateVMFromSpecWithTopology6() { final String vncAddr = ""; final String vncPassword = "mySuperSecretPassword"; - final LibvirtComputingResource lcr = new LibvirtComputingResource(); - lcr._qemuSocketsPath = new File("/var/run/qemu"); - final VirtualMachineTO to = new VirtualMachineTO(id, name, VirtualMachine.Type.User, cpus, minSpeed, maxSpeed, minRam, maxRam, BootloaderType.HVM, os, false, false, vncPassword); to.setVncAddr(vncAddr); to.setArch("x86_64"); to.setUuid("b0f0a72d-7efb-3cad-a8ff-70ebf30b3af9"); + to.setVcpuMaxLimit(cpus + 1); - final LibvirtVMDef vm = lcr.createVMFromSpec(to); + final LibvirtVMDef vm = libvirtComputingResourceSpy.createVMFromSpec(to); vm.setHvsType(hyperVisorType); verifyVm(to, vm); @@ -309,14 +325,11 @@ public void testCreateVMFromSpecWithTopology4() { final String vncAddr = ""; final String vncPassword = "mySuperSecretPassword"; - final LibvirtComputingResource lcr = new LibvirtComputingResource(); - lcr._qemuSocketsPath = new File("/var/run/qemu"); - final VirtualMachineTO to = new VirtualMachineTO(id, name, VirtualMachine.Type.User, cpus, minSpeed, maxSpeed, minRam, maxRam, BootloaderType.HVM, os, false, false, vncPassword); to.setVncAddr(vncAddr); to.setUuid("b0f0a72d-7efb-3cad-a8ff-70ebf30b3af9"); - final LibvirtVMDef vm = lcr.createVMFromSpec(to); + LibvirtVMDef vm = libvirtComputingResourceSpy.createVMFromSpec(to); vm.setHvsType(hyperVisorType); verifyVm(to, vm); @@ -331,59 +344,388 @@ public void testCreateVMFromSpecWithTopology4() { */ @Test public void testCreateVMFromSpec() { - final int id = random.nextInt(65534); - final String name = "test-instance-1"; + VirtualMachineTO to = createDefaultVM(false); + final LibvirtVMDef vm = libvirtComputingResourceSpy.createVMFromSpec(to); + vm.setHvsType(hyperVisorType); - final int cpus = random.nextInt(2) + 1; - final int minSpeed = 1024; - final int maxSpeed = 2048; - final int minRam = 256 * 1024; - final int maxRam = 512 * 1024; + verifyVm(to, vm); + } - final String os = "Ubuntu"; + @Test + public void testCreateGuestFromSpecWithoutCustomParam() { + VirtualMachineTO to = createDefaultVM(false); + LibvirtVMDef vm = new LibvirtVMDef(); + GuestDef guestDef = libvirtComputingResourceSpy.createGuestFromSpec(to, vm, to.getUuid(), null); + verifySysInfo(guestDef, "smbios", to.getUuid(), "pc"); + Assert.assertEquals(GuestDef.BootType.BIOS, guestDef.getBootType()); + Assert.assertNull(guestDef.getBootMode()); + } - final String vncAddr = ""; - final String vncPassword = "mySuperSecretPassword"; + @Test + public void testCreateGuestFromSpecWithCustomParamAndUefi() { + VirtualMachineTO to = createDefaultVM(false); - final LibvirtComputingResource lcr = new LibvirtComputingResource(); - lcr._qemuSocketsPath = new File("/var/run/qemu"); + Map extraConfig = new HashMap<>(); + extraConfig.put(GuestDef.BootType.UEFI.toString(), "legacy"); - final VirtualMachineTO to = - new VirtualMachineTO(id, name, VirtualMachine.Type.User, cpus, minSpeed, maxSpeed, minRam, maxRam, BootloaderType.HVM, os, false, false, vncPassword); - to.setVncAddr(vncAddr); + LibvirtVMDef vm = new LibvirtVMDef(); + + GuestDef guestDef = libvirtComputingResourceSpy.createGuestFromSpec(to, vm, to.getUuid(), extraConfig); + verifySysInfo(guestDef, "smbios", to.getUuid(), "q35"); + Assert.assertEquals(GuestDef.BootType.UEFI, guestDef.getBootType()); + Assert.assertEquals(GuestDef.BootMode.LEGACY, guestDef.getBootMode()); + } + + @Test + public void testCreateGuestFromSpecWithCustomParamUefiAndSecure() { + VirtualMachineTO to = createDefaultVM(false); + + Map extraConfig = new HashMap<>(); + extraConfig.put(GuestDef.BootType.UEFI.toString(), "secure"); + + LibvirtVMDef vm = new LibvirtVMDef(); + + GuestDef guestDef = libvirtComputingResourceSpy.createGuestFromSpec(to, vm, to.getUuid(), extraConfig); + verifySysInfo(guestDef, "smbios", to.getUuid(), "q35"); + Assert.assertEquals(GuestDef.BootType.UEFI, guestDef.getBootType()); + Assert.assertEquals(GuestDef.BootMode.SECURE, guestDef.getBootMode()); + } + + @Test + public void testCreateGuestResourceDef() { + VirtualMachineTO to = createDefaultVM(false); + + GuestResourceDef guestResourceDef = libvirtComputingResourceSpy.createGuestResourceDef(to); + verifyGuestResourceDef(guestResourceDef, to); + } + + @Test + public void testCreateDevicesDef() { + VirtualMachineTO to = createDefaultVM(false); + + GuestDef guest = new GuestDef(); + guest.setGuestType(GuestType.KVM); + + DevicesDef devicesDef = libvirtComputingResourceSpy.createDevicesDef(to, guest, to.getCpus() + 1, false); + verifyDevices(devicesDef, to); + } + + @Test + public void testCreateDevicesWithSCSIDisk() { + VirtualMachineTO to = createDefaultVM(false); + to.setDetails(new HashMap<>()); + libvirtComputingResourceSpy._guestCpuArch = "aarch64"; + + GuestDef guest = new GuestDef(); + guest.setGuestType(GuestType.KVM); + + DevicesDef devicesDef = libvirtComputingResourceSpy.createDevicesDef(to, guest, to.getCpus() + 1, false); + verifyDevices(devicesDef, to); + + Document domainDoc = parse(devicesDef.toString()); + assertNodeExists(domainDoc, "/devices/controller[@type='scsi']"); + assertNodeExists(domainDoc, "/devices/controller[@model='virtio-scsi']"); + assertNodeExists(domainDoc, "/devices/controller/address[@type='pci']"); + assertNodeExists(domainDoc, "/devices/controller/driver[@queues='" + (to.getCpus() + 1) + "']"); + } + + @Test + public void testConfigureGuestAndSystemVMToUseKVM() { + VirtualMachineTO to = createDefaultVM(false); + libvirtComputingResourceSpy._hypervisorLibvirtVersion = 100; + libvirtComputingResourceSpy._hypervisorQemuVersion = 10; + LibvirtVMDef vm = new LibvirtVMDef(); + + GuestDef guestFromSpec = libvirtComputingResourceSpy.createGuestFromSpec(to, vm, to.getUuid(), null); + Assert.assertEquals(GuestDef.GuestType.KVM, guestFromSpec.getGuestType()); + Assert.assertEquals(HypervisorType.KVM.toString().toLowerCase(), vm.getHvsType()); + } + + @Test + public void testConfigureGuestAndUserVMToUseLXC() { + VirtualMachineTO to = createDefaultVM(false); + libvirtComputingResourceSpy._hypervisorType = HypervisorType.LXC; + LibvirtVMDef vm = new LibvirtVMDef(); + + GuestDef guestFromSpec = libvirtComputingResourceSpy.createGuestFromSpec(to, vm, to.getUuid(), null); + Assert.assertEquals(GuestDef.GuestType.LXC, guestFromSpec.getGuestType()); + Assert.assertEquals(HypervisorType.LXC.toString().toLowerCase(), vm.getHvsType()); + } + + @Test + public void testCreateCpuTuneDefWithoutQuotaAndPeriod() { + VirtualMachineTO to = createDefaultVM(false); + + CpuTuneDef cpuTuneDef = libvirtComputingResourceSpy.createCpuTuneDef(to); + Document domainDoc = parse(cpuTuneDef.toString()); + assertXpath(domainDoc, "/cputune/shares/text()", String.valueOf(cpuTuneDef.getShares())); + } + + @Test + public void testCreateCpuTuneDefWithQuotaAndPeriod() { + VirtualMachineTO to = createDefaultVM(true); + to.setCpuQuotaPercentage(10.0); + + CpuTuneDef cpuTuneDef = libvirtComputingResourceSpy.createCpuTuneDef(to); + Document domainDoc = parse(cpuTuneDef.toString()); + assertXpath(domainDoc, "/cputune/shares/text()", String.valueOf(cpuTuneDef.getShares())); + assertXpath(domainDoc, "/cputune/quota/text()", String.valueOf(cpuTuneDef.getQuota())); + assertXpath(domainDoc, "/cputune/period/text()", String.valueOf(cpuTuneDef.getPeriod())); + } + + @Test + public void testCreateCpuTuneDefWithMinQuota() { + VirtualMachineTO to = createDefaultVM(true); + to.setCpuQuotaPercentage(0.01); + + CpuTuneDef cpuTuneDef = libvirtComputingResourceSpy.createCpuTuneDef(to); + Document domainDoc = parse(cpuTuneDef.toString()); + assertXpath(domainDoc, "/cputune/shares/text()", String.valueOf(cpuTuneDef.getShares())); + assertXpath(domainDoc, "/cputune/quota/text()", "1000"); + assertXpath(domainDoc, "/cputune/period/text()", String.valueOf(cpuTuneDef.getPeriod())); + } + + @Test + public void testCreateDefaultClockDef() { + VirtualMachineTO to = createDefaultVM(false); + + ClockDef clockDef = libvirtComputingResourceSpy.createClockDef(to); + Document domainDoc = parse(clockDef.toString()); + + assertXpath(domainDoc, "/clock/@offset", "utc"); + } + + @Test + public void testCreateClockDefWindows() { + VirtualMachineTO to = createDefaultVM(false); + to.setOs("Windows"); + + ClockDef clockDef = libvirtComputingResourceSpy.createClockDef(to); + Document domainDoc = parse(clockDef.toString()); + + assertXpath(domainDoc, "/clock/@offset", "localtime"); + assertXpath(domainDoc, "/clock/timer/@name", "hypervclock"); + assertXpath(domainDoc, "/clock/timer/@present", "yes"); + } + + @Test + public void testCreateClockDefKvmclock() { + VirtualMachineTO to = createDefaultVM(false); + libvirtComputingResourceSpy._hypervisorLibvirtVersion = 9020; + + ClockDef clockDef = libvirtComputingResourceSpy.createClockDef(to); + Document domainDoc = parse(clockDef.toString()); + + assertXpath(domainDoc, "/clock/@offset", "utc"); + assertXpath(domainDoc, "/clock/timer/@name", "kvmclock"); + } + + @Test + public void testCreateTermPolicy() { + TermPolicy termPolicy = libvirtComputingResourceSpy.createTermPolicy(); + + String xml = "\n" + termPolicy.toString() + ""; + Document domainDoc = parse(xml); + + assertXpath(domainDoc, "/terms/on_reboot/text()", "restart"); + assertXpath(domainDoc, "/terms/on_poweroff/text()", "destroy"); + assertXpath(domainDoc, "/terms/on_crash/text()", "destroy"); + } + + @Test + public void testCreateFeaturesDef() { + VirtualMachineTO to = createDefaultVM(false); + FeaturesDef featuresDef = libvirtComputingResourceSpy.createFeaturesDef(null, false, false); + + String xml = "" + featuresDef.toString() + ""; + Document domainDoc = parse(xml); + + verifyFeatures(domainDoc); + } + + @Test + public void testCreateFeaturesDefWithUefi() { + VirtualMachineTO to = createDefaultVM(false); + HashMap extraConfig = new HashMap<>(); + extraConfig.put(GuestDef.BootType.UEFI.toString(), ""); + + FeaturesDef featuresDef = libvirtComputingResourceSpy.createFeaturesDef(extraConfig, true, true); + + String xml = "" + featuresDef.toString() + ""; + Document domainDoc = parse(xml); + + verifyFeatures(domainDoc); + } + + @Test + public void testCreateWatchDog() { + WatchDogDef watchDogDef = libvirtComputingResourceSpy.createWatchDogDef(); + verifyWatchDogDevices(parse(watchDogDef.toString()), ""); + } + + @Test + public void testCreateArm64UsbDef() { + DevicesDef devicesDef = new DevicesDef(); + + libvirtComputingResourceSpy.createArm64UsbDef(devicesDef); + Document domainDoc = parse(devicesDef.toString()); + + assertXpath(domainDoc, "/devices/controller/@type", "usb"); + assertXpath(domainDoc, "/devices/controller/@model", "qemu-xhci"); + assertXpath(domainDoc, "/devices/controller/address/@type", "pci"); + assertNodeExists(domainDoc, "/devices/input[@type='keyboard']"); + assertNodeExists(domainDoc, "/devices/input[@bus='usb']"); + assertNodeExists(domainDoc, "/devices/input[@type='mouse']"); + assertNodeExists(domainDoc, "/devices/input[@bus='usb']"); + } + + @Test + public void testCreateInputDef() { + InputDef inputDef = libvirtComputingResourceSpy.createTabletInputDef(); + verifyTabletInputDevice(parse(inputDef.toString()), ""); + } + + @Test + public void testCreateGraphicDef() { + VirtualMachineTO to = createDefaultVM(false); + GraphicDef graphicDef = libvirtComputingResourceSpy.createGraphicDef(to); + verifyGraphicsDevices(to, parse(graphicDef.toString()), ""); + } + + @Test + public void testCreateChannelDef() { + VirtualMachineTO to = createDefaultVM(false); + ChannelDef channelDef = libvirtComputingResourceSpy.createChannelDef(to); + verifyChannelDevices(to, parse(channelDef.toString()), ""); + } + + @Test + public void testCreateSCSIDef() { + VirtualMachineTO to = createDefaultVM(false); + + SCSIDef scsiDef = libvirtComputingResourceSpy.createSCSIDef(to.getCpus()); + Document domainDoc = parse(scsiDef.toString()); + verifyScsi(to, domainDoc, ""); + } + + @Test + public void testCreateConsoleDef() { + VirtualMachineTO to = createDefaultVM(false); + ConsoleDef consoleDef = libvirtComputingResourceSpy.createConsoleDef(); + verifyConsoleDevices(parse(consoleDef.toString()), ""); + } + + @Test + public void testCreateVideoDef() { + VirtualMachineTO to = createDefaultVM(false); + libvirtComputingResourceSpy._videoRam = 200; + libvirtComputingResourceSpy._videoHw = "vGPU"; + + VideoDef videoDef = libvirtComputingResourceSpy.createVideoDef(); + Document domainDoc = parse(videoDef.toString()); + assertXpath(domainDoc, "/video/model/@type", "vGPU"); + assertXpath(domainDoc, "/video/model/@vram", "200"); + } + + @Test + public void testCreateRngDef() { + VirtualMachineTO to = createDefaultVM(false); + RngDef rngDef = libvirtComputingResourceSpy.createRngDef(); + Document domainDoc = parse(rngDef.toString()); + + assertXpath(domainDoc, "/rng/@model", "virtio"); + assertXpath(domainDoc, "/rng/rate/@period", "1000"); + assertXpath(domainDoc, "/rng/rate/@bytes", "2048"); + assertXpath(domainDoc, "/rng/backend/@model", "random"); + assertXpath(domainDoc, "/rng/backend/text()", "/dev/random"); + } + + @Test + public void testCreateSerialDef() { + VirtualMachineTO to = createDefaultVM(false); + SerialDef serialDef = libvirtComputingResourceSpy.createSerialDef(); + verifySerialDevices(parse(serialDef.toString()), ""); + } + + private VirtualMachineTO createDefaultVM(boolean limitCpuUse) { + int id = random.nextInt(65534); + String name = "test-instance-1"; + + int cpus = random.nextInt(2) + 1; + int minSpeed = 1024; + int maxSpeed = 2048; + int minRam = 256 * 1024; + int maxRam = 512 * 1024; + + String os = "Ubuntu"; + String vncAddr = ""; + String vncPassword = "mySuperSecretPassword"; + + final VirtualMachineTO to = new VirtualMachineTO(id, name, VirtualMachine.Type.User, cpus, minSpeed, maxSpeed, minRam, maxRam, BootloaderType.HVM, os, false, limitCpuUse, + vncPassword); to.setArch("x86_64"); + to.setVncAddr(vncAddr); to.setUuid("b0f0a72d-7efb-3cad-a8ff-70ebf30b3af9"); + to.setVcpuMaxLimit(cpus + 1); - final LibvirtVMDef vm = lcr.createVMFromSpec(to); - vm.setHvsType(hyperVisorType); + return to; + } - verifyVm(to, vm); + private void verifyGuestResourceDef(GuestResourceDef guestResourceDef, VirtualMachineTO to) { + String xml = "" + guestResourceDef.toString() + ""; + Document domainDoc = parse(xml); + + String minRam = String.valueOf(to.getMinRam() / 1024); + verifyMemory(to, domainDoc, minRam); + assertNodeExists(domainDoc, "/domain/vcpu"); + verifyMemballoonDevices(domainDoc); + verifyVcpu(to, domainDoc); } - private void verifyVm(final VirtualMachineTO to, final LibvirtVMDef vm) { - final Document domainDoc = parse(vm.toString()); - assertXpath(domainDoc, "/domain/@type", vm.getHvsType()); - assertXpath(domainDoc, "/domain/name/text()", to.getName()); - assertXpath(domainDoc, "/domain/uuid/text()", to.getUuid()); - assertXpath(domainDoc, "/domain/description/text()", to.getOs()); - assertXpath(domainDoc, "/domain/clock/@offset", "utc"); - assertNodeExists(domainDoc, "/domain/features/pae"); - assertNodeExists(domainDoc, "/domain/features/apic"); - assertNodeExists(domainDoc, "/domain/features/acpi"); - assertXpath(domainDoc, "/domain/devices/serial/@type", "pty"); - assertXpath(domainDoc, "/domain/devices/serial/target/@port", "0"); - assertXpath(domainDoc, "/domain/devices/graphics/@type", "vnc"); - assertXpath(domainDoc, "/domain/devices/graphics/@listen", to.getVncAddr()); - assertXpath(domainDoc, "/domain/devices/graphics/@autoport", "yes"); - assertXpath(domainDoc, "/domain/devices/graphics/@passwd", to.getVncPassword()); + private void verifyVm(VirtualMachineTO to, LibvirtVMDef vm) { + Document domainDoc = parse(vm.toString()); + verifyHeader(domainDoc, vm.getHvsType(), to.getName(), to.getUuid(), to.getOs()); + verifyFeatures(domainDoc); + verifyClock(domainDoc); + verifySerialDevices(domainDoc, "/domain/devices"); + verifyGraphicsDevices(to, domainDoc, "/domain/devices"); + verifyConsoleDevices(domainDoc, "/domain/devices"); + verifyTabletInputDevice(domainDoc, "/domain/devices"); + verifyChannelDevices(to, domainDoc, "/domain/devices"); + + String minRam = String.valueOf(to.getMinRam() / 1024); + verifyMemory(to, domainDoc, minRam); + assertNodeExists(domainDoc, "/domain/cpu"); + + verifyMemballoonDevices(domainDoc); + verifyVcpu(to, domainDoc); + verifyOsType(domainDoc); + verifyOsBoot(domainDoc); + verifyPoliticOn_(domainDoc); + verifyWatchDogDevices(domainDoc, "/domain/devices"); + } + + private void verifyMemballoonDevices(Document domainDoc) { + assertXpath(domainDoc, "/domain/devices/memballoon/@model", "virtio"); + } + + private void verifyVcpu(VirtualMachineTO to, Document domainDoc) { + assertXpath(domainDoc, "/domain/vcpu/text()", String.valueOf(to.getCpus())); + } - assertXpath(domainDoc, "/domain/devices/console/@type", "pty"); - assertXpath(domainDoc, "/domain/devices/console/target/@port", "0"); - assertXpath(domainDoc, "/domain/devices/input/@type", "tablet"); - assertXpath(domainDoc, "/domain/devices/input/@bus", "usb"); + private void verifyMemory(VirtualMachineTO to, Document domainDoc, String minRam) { + assertXpath(domainDoc, "/domain/memory/text()", String.valueOf(to.getMaxRam() / 1024)); + assertXpath(domainDoc, "/domain/currentMemory/text()", minRam); + } + + private void verifyWatchDogDevices(Document domainDoc, String prefix) { + assertXpath(domainDoc, prefix + "/watchdog/@model", "i6300esb"); + assertXpath(domainDoc, prefix + "/watchdog/@action", "none"); + } - assertNodeExists(domainDoc, "/domain/devices/channel"); - assertXpath(domainDoc, "/domain/devices/channel/@type", ChannelDef.ChannelType.UNIX.toString()); + private void verifyChannelDevices(VirtualMachineTO to, Document domainDoc, String prefix) { + assertNodeExists(domainDoc, prefix + "/channel"); + assertXpath(domainDoc, prefix + "/channel/@type", ChannelDef.ChannelType.UNIX.toString()); /* The configure() method of LibvirtComputingResource has not been called, so the default path for the sockets @@ -392,28 +734,93 @@ The configure() method of LibvirtComputingResource has not been called, so the d Calling configure is also not possible since that looks for certain files on the system which are not present during testing */ - assertXpath(domainDoc, "/domain/devices/channel/source/@path", "/var/run/qemu/" + to.getName() + ".org.qemu.guest_agent.0"); - assertXpath(domainDoc, "/domain/devices/channel/target/@name", "org.qemu.guest_agent.0"); + assertXpath(domainDoc, prefix + "/channel/source/@path", "/var/run/qemu/" + to.getName() + ".org.qemu.guest_agent.0"); + assertXpath(domainDoc, prefix + "/channel/target/@name", "org.qemu.guest_agent.0"); + } - assertXpath(domainDoc, "/domain/memory/text()", String.valueOf( to.getMaxRam() / 1024 )); - assertXpath(domainDoc, "/domain/currentMemory/text()", String.valueOf( to.getMinRam() / 1024 )); + private void verifyTabletInputDevice(Document domainDoc, String prefix) { + assertXpath(domainDoc, prefix + "/input/@type", "tablet"); + assertXpath(domainDoc, prefix + "/input/@bus", "usb"); + } - assertXpath(domainDoc, "/domain/devices/memballoon/@model", "virtio"); - assertXpath(domainDoc, "/domain/vcpu/text()", String.valueOf(to.getCpus())); + private void verifyConsoleDevices(Document domainDoc, String prefix) { + assertXpath(domainDoc, prefix + "/console/@type", "pty"); + assertXpath(domainDoc, prefix + "/console/target/@port", "0"); + } - assertXpath(domainDoc, "/domain/os/type/@machine", "pc"); - assertXpath(domainDoc, "/domain/os/type/text()", "hvm"); + private void verifyScsi(VirtualMachineTO to, Document domainDoc, String prefix) { + assertXpath(domainDoc, prefix + "/controller/@type", "scsi"); + assertXpath(domainDoc, prefix + "/controller/@model", "virtio-scsi"); + assertXpath(domainDoc, prefix + "/controller/address/@type", "pci"); + assertXpath(domainDoc, prefix + "/controller/driver/@queues", String.valueOf(to.getCpus())); + } - assertNodeExists(domainDoc, "/domain/cpu"); + private void verifyClock(Document domainDoc) { + assertXpath(domainDoc, "/domain/clock/@offset", "utc"); + } + + private void verifyGraphicsDevices(VirtualMachineTO to, Document domainDoc, String prefix) { + assertXpath(domainDoc, prefix + "/graphics/@type", "vnc"); + assertXpath(domainDoc, prefix + "/graphics/@listen", to.getVncAddr()); + assertXpath(domainDoc, prefix + "/graphics/@autoport", "yes"); + assertXpath(domainDoc, prefix + "/graphics/@passwd", to.getVncPassword()); + } + + private void verifySerialDevices(Document domainDoc, String prefix) { + assertXpath(domainDoc, prefix + "/serial/@type", "pty"); + assertXpath(domainDoc, prefix + "/serial/target/@port", "0"); + } + + private void verifyOsBoot(Document domainDoc) { assertNodeExists(domainDoc, "/domain/os/boot[@dev='cdrom']"); assertNodeExists(domainDoc, "/domain/os/boot[@dev='hd']"); + } + + private void verifyOsType(Document domainDoc) { + assertXpath(domainDoc, "/domain/os/type/@machine", "pc"); + assertXpath(domainDoc, "/domain/os/type/text()", "hvm"); + } + private void verifyPoliticOn_(Document domainDoc) { assertXpath(domainDoc, "/domain/on_reboot/text()", "restart"); assertXpath(domainDoc, "/domain/on_poweroff/text()", "destroy"); assertXpath(domainDoc, "/domain/on_crash/text()", "destroy"); + } + + private void verifyFeatures(Document domainDoc) { + assertNodeExists(domainDoc, "/domain/features/pae"); + assertNodeExists(domainDoc, "/domain/features/apic"); + assertNodeExists(domainDoc, "/domain/features/acpi"); + } + + private void verifyHeader(Document domainDoc, String hvsType, String name, String uuid, String os) { + assertXpath(domainDoc, "/domain/@type", hvsType); + assertXpath(domainDoc, "/domain/name/text()", name); + assertXpath(domainDoc, "/domain/uuid/text()", uuid); + assertXpath(domainDoc, "/domain/description/text()", os); + } + + private void verifyDevices(DevicesDef devicesDef, VirtualMachineTO to) { + Document domainDoc = parse(devicesDef.toString()); + + verifyWatchDogDevices(domainDoc, "/devices"); + verifyConsoleDevices(domainDoc, "/devices"); + verifySerialDevices(domainDoc, "/devices"); + verifyGraphicsDevices(to, domainDoc, "/devices"); + verifyChannelDevices(to, domainDoc, "/devices"); + verifyTabletInputDevice(domainDoc, "/devices"); + } + + private void verifySysInfo(GuestDef guestDef, String type, String uuid, String machine) { + // Need put because the string of guestdef generate two root element in XML, raising a error in parse. + String xml = "\n" + guestDef.toString() + ""; - assertXpath(domainDoc, "/domain/devices/watchdog/@model", "i6300esb"); - assertXpath(domainDoc, "/domain/devices/watchdog/@action", "none"); + Document domainDoc = parse(xml); + assertXpath(domainDoc, "/guestdef/sysinfo/@type", type); + assertNodeExists(domainDoc, "/guestdef/sysinfo/system/entry[@name='manufacturer']"); + assertNodeExists(domainDoc, "/guestdef/sysinfo/system/entry[@name='product']"); + assertXpath(domainDoc, "/guestdef/sysinfo/system/entry[@name='uuid']/text()", uuid); + assertXpath(domainDoc, "/guestdef/os/type/@machine", machine); } static Document parse(final String input) { diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java index 1102f9d1ce87..f1f3b709cfc4 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java @@ -27,11 +27,8 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; - -import com.vmware.vim25.ManagedObjectReference; - import org.apache.cloudstack.api.ApiConstants; +import org.apache.log4j.Logger; import com.cloud.agent.api.StartupCommand; import com.cloud.agent.api.StartupRoutingCommand; @@ -75,9 +72,11 @@ import com.cloud.storage.Storage.TemplateType; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.template.TemplateManager; import com.cloud.user.Account; import com.cloud.utils.Pair; import com.cloud.utils.UriUtils; +import com.vmware.vim25.ManagedObjectReference; public class VmwareServerDiscoverer extends DiscovererBase implements Discoverer, ResourceStateAdapter { private static final Logger s_logger = Logger.getLogger(VmwareServerDiscoverer.class); @@ -563,7 +562,7 @@ public boolean configure(String name, Map params) throws Configu } private void createVmwareToolsIso() { - String isoName = "vmware-tools.iso"; + String isoName = TemplateManager.VMWARE_TOOLS_ISO; VMTemplateVO tmplt = _tmpltDao.findByTemplateName(isoName); Long id; if (tmplt == null) { diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java index e2a7969af060..0248903173ba 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java @@ -94,6 +94,8 @@ import com.cloud.agent.api.DeleteVMSnapshotCommand; import com.cloud.agent.api.GetHostStatsAnswer; import com.cloud.agent.api.GetHostStatsCommand; +import com.cloud.agent.api.GetStoragePoolCapabilitiesAnswer; +import com.cloud.agent.api.GetStoragePoolCapabilitiesCommand; import com.cloud.agent.api.GetStorageStatsAnswer; import com.cloud.agent.api.GetStorageStatsCommand; import com.cloud.agent.api.GetUnmanagedInstancesAnswer; @@ -101,12 +103,12 @@ import com.cloud.agent.api.GetVmDiskStatsAnswer; import com.cloud.agent.api.GetVmDiskStatsCommand; import com.cloud.agent.api.GetVmIpAddressCommand; -import com.cloud.agent.api.GetVmVncTicketCommand; -import com.cloud.agent.api.GetVmVncTicketAnswer; import com.cloud.agent.api.GetVmNetworkStatsAnswer; import com.cloud.agent.api.GetVmNetworkStatsCommand; import com.cloud.agent.api.GetVmStatsAnswer; import com.cloud.agent.api.GetVmStatsCommand; +import com.cloud.agent.api.GetVmVncTicketAnswer; +import com.cloud.agent.api.GetVmVncTicketCommand; import com.cloud.agent.api.GetVncPortAnswer; import com.cloud.agent.api.GetVncPortCommand; import com.cloud.agent.api.GetVolumeStatsAnswer; @@ -259,6 +261,7 @@ import com.cloud.storage.resource.VmwareStorageProcessor.VmwareStorageProcessorConfigurableFields; import com.cloud.storage.resource.VmwareStorageSubsystemCommandHandler; import com.cloud.storage.template.TemplateProp; +import com.cloud.template.TemplateManager; import com.cloud.utils.DateUtil; import com.cloud.utils.ExecutionResult; import com.cloud.utils.NumbersUtil; @@ -296,6 +299,8 @@ import com.vmware.vim25.GuestInfo; import com.vmware.vim25.GuestNicInfo; import com.vmware.vim25.HostCapability; +import com.vmware.vim25.HostConfigInfo; +import com.vmware.vim25.HostFileSystemMountInfo; import com.vmware.vim25.HostHostBusAdapter; import com.vmware.vim25.HostInternetScsiHba; import com.vmware.vim25.HostPortGroupSpec; @@ -505,6 +510,8 @@ public Answer executeRequest(Command cmd) { answer = execute((ModifyTargetsCommand) cmd); } else if (clz == ModifyStoragePoolCommand.class) { answer = execute((ModifyStoragePoolCommand) cmd); + } else if (clz == GetStoragePoolCapabilitiesCommand.class) { + answer = execute((GetStoragePoolCapabilitiesCommand) cmd); } else if (clz == DeleteStoragePoolCommand.class) { answer = execute((DeleteStoragePoolCommand) cmd); } else if (clz == CopyVolumeCommand.class) { @@ -694,6 +701,9 @@ protected EnumMap examineStora if (dest.isFullCloneFlag() != null) { paramsCopy.put(VmwareStorageProcessorConfigurableFields.FULL_CLONE_FLAG, dest.isFullCloneFlag().booleanValue()); } + if (dest.getDiskProvisioningStrictnessFlag() != null) { + paramsCopy.put(VmwareStorageProcessorConfigurableFields.DISK_PROVISIONING_STRICTNESS, dest.getDiskProvisioningStrictnessFlag().booleanValue()); + } } } return paramsCopy; @@ -1983,15 +1993,22 @@ protected StartAnswer execute(StartCommand cmd) { } // Check for hotadd settings - vmConfigSpec.setMemoryHotAddEnabled(vmMo.isMemoryHotAddSupported(guestOsId)); - + vmConfigSpec.setMemoryHotAddEnabled(vmMo.isMemoryHotAddSupported(guestOsId) && vmSpec.isEnableDynamicallyScaleVm()); String hostApiVersion = ((HostMO) hyperHost).getHostAboutInfo().getApiVersion(); if (numCoresPerSocket > 1 && hostApiVersion.compareTo("5.0") < 0) { s_logger.warn("Dynamic scaling of CPU is not supported for Virtual Machines with multi-core vCPUs in case of ESXi hosts 4.1 and prior. Hence CpuHotAdd will not be" + " enabled for Virtual Machine: " + vmInternalCSName); vmConfigSpec.setCpuHotAddEnabled(false); } else { - vmConfigSpec.setCpuHotAddEnabled(vmMo.isCpuHotAddSupported(guestOsId)); + vmConfigSpec.setCpuHotAddEnabled(vmMo.isCpuHotAddSupported(guestOsId) && vmSpec.isEnableDynamicallyScaleVm()); + } + + if(!vmMo.isMemoryHotAddSupported(guestOsId) && vmSpec.isEnableDynamicallyScaleVm()){ + s_logger.warn("hotadd of memory is not supported, dynamic scaling feature can not be applied to vm: " + vmInternalCSName); + } + + if(!vmMo.isCpuHotAddSupported(guestOsId) && vmSpec.isEnableDynamicallyScaleVm()){ + s_logger.warn("hotadd of cpu is not supported, dynamic scaling feature can not be applied to vm: " + vmInternalCSName); } configNestedHVSupport(vmMo, vmSpec, vmConfigSpec); @@ -5045,6 +5062,63 @@ protected Answer execute(ModifyStoragePoolCommand cmd) { } } + protected Answer execute(GetStoragePoolCapabilitiesCommand cmd) { + + try { + + VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext()); + + HostMO host = (HostMO) hyperHost; + + StorageFilerTO pool = cmd.getPool(); + + ManagedObjectReference morDatastore = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, pool.getUuid()); + + if (morDatastore == null) { + morDatastore = hyperHost.mountDatastore((pool.getType() == StoragePoolType.VMFS || pool.getType() == StoragePoolType.PreSetup || pool.getType() == StoragePoolType.DatastoreCluster), pool.getHost(), pool.getPort(), pool.getPath(), pool.getUuid().replace("-", ""), true); + } + + assert (morDatastore != null); + + DatastoreMO dsMo = new DatastoreMO(getServiceContext(), morDatastore); + + GetStoragePoolCapabilitiesAnswer answer = new GetStoragePoolCapabilitiesAnswer(cmd); + + boolean hardwareAccelerationSupportForDataStore = getHardwareAccelerationSupportForDataStore(host.getMor(), dsMo.getName()); + Map poolDetails = answer.getPoolDetails(); + poolDetails.put(Storage.Capability.HARDWARE_ACCELERATION.toString(), String.valueOf(hardwareAccelerationSupportForDataStore)); + answer.setPoolDetails(poolDetails); + answer.setResult(true); + + return answer; + } catch (Throwable e) { + if (e instanceof RemoteException) { + s_logger.warn("Encounter remote exception to vCenter, invalidate VMware session context"); + + invalidateServiceContext(); + } + + String msg = "GetStoragePoolCapabilitiesCommand failed due to " + VmwareHelper.getExceptionMessage(e); + + s_logger.error(msg, e); + GetStoragePoolCapabilitiesAnswer answer = new GetStoragePoolCapabilitiesAnswer(cmd); + answer.setResult(false); + answer.setDetails(msg); + return answer; + } + } + + private boolean getHardwareAccelerationSupportForDataStore(ManagedObjectReference host, String dataStoreName) throws Exception { + HostConfigInfo config = getServiceContext().getVimClient().getDynamicProperty(host, "config"); + List mountInfoList = config.getFileSystemVolume().getMountInfo(); + for (HostFileSystemMountInfo hostFileSystemMountInfo: mountInfoList) { + if ( hostFileSystemMountInfo.getVolume().getName().equals(dataStoreName) ) { + return hostFileSystemMountInfo.getVStorageSupport().equals("vStorageSupported"); + } + } + return false; + } + private void handleTargets(boolean add, ModifyTargetsCommand.TargetTypeToRemove targetTypeToRemove, boolean isRemoveAsync, List> targets, List hosts) { if (targets != null && targets.size() > 0) { @@ -5115,7 +5189,7 @@ protected AttachIsoAnswer execute(AttachIsoCommand cmd) { String storeUrl = cmd.getStoreUrl(); if (storeUrl == null) { - if (!cmd.getIsoPath().equalsIgnoreCase("vmware-tools.iso")) { + if (!cmd.getIsoPath().equalsIgnoreCase(TemplateManager.VMWARE_TOOLS_ISO)) { String msg = "ISO store root url is not found in AttachIsoCommand"; s_logger.error(msg); throw new Exception(msg); @@ -7203,6 +7277,7 @@ private UnmanagedInstanceTO getUnmanagedInstance(VmwareHypervisorHost hyperHost, try { instance = new UnmanagedInstanceTO(); instance.setName(vmMo.getVmName()); + instance.setInternalCSName(vmMo.getInternalCSName()); instance.setCpuCores(vmMo.getConfigSummary().getNumCpu()); instance.setCpuCoresPerSocket(vmMo.getCoresPerSocket()); instance.setCpuSpeed(vmMo.getConfigSummary().getCpuReservation()); @@ -7474,7 +7549,7 @@ private List relocateVirtualMachine(final VmwareHypervisorHost h } else { String msg = String.format("Successfully migrated VM: %s with its storage to target datastore(s)", vmName); if (targetHyperHost != null) { - msg = String.format("% from host %s to %s", msg, sourceHyperHost.getHyperHostName(), targetHyperHost.getHyperHostName()); + msg = String.format("%s from host %s to %s", msg, sourceHyperHost.getHyperHostName(), targetHyperHost.getHyperHostName()); } s_logger.debug(msg); } diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareStorageProcessor.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareStorageProcessor.java index da6713746b84..97c328674963 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareStorageProcessor.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareStorageProcessor.java @@ -92,9 +92,11 @@ import com.cloud.storage.DataStoreRole; import com.cloud.storage.JavaStorageLayer; import com.cloud.storage.Storage.ImageFormat; +import com.cloud.storage.Storage.ProvisioningType; import com.cloud.storage.StorageLayer; import com.cloud.storage.Volume; import com.cloud.storage.template.OVAProcessor; +import com.cloud.template.TemplateManager; import com.cloud.utils.Pair; import com.cloud.utils.Ternary; import com.cloud.utils.exception.CloudRuntimeException; @@ -128,7 +130,6 @@ import com.vmware.vim25.VirtualDeviceConfigSpecOperation; import com.vmware.vim25.VirtualDisk; import com.vmware.vim25.VirtualDiskFlatVer2BackingInfo; -import com.vmware.vim25.VirtualDiskType; import com.vmware.vim25.VirtualMachineConfigSpec; import com.vmware.vim25.VmConfigInfo; import com.vmware.vim25.VmfsDatastoreExpandSpec; @@ -137,7 +138,7 @@ public class VmwareStorageProcessor implements StorageProcessor { public enum VmwareStorageProcessorConfigurableFields { - NFS_VERSION("nfsVersion"), FULL_CLONE_FLAG("fullCloneFlag"); + NFS_VERSION("nfsVersion"), FULL_CLONE_FLAG("fullCloneFlag"), DISK_PROVISIONING_STRICTNESS("diskProvisioningStrictness"); private String name; @@ -156,6 +157,7 @@ public String getName() { private final VmwareHostService hostService; private boolean _fullCloneFlag; + private boolean _diskProvisioningStrictness; private final VmwareStorageMount mountService; private final VmwareResource resource; private final Integer _timeout; @@ -775,10 +777,10 @@ private boolean createVMLinkedClone(VirtualMachineMO vmTemplate, DatacenterMO dc } private boolean createVMFullClone(VirtualMachineMO vmTemplate, DatacenterMO dcMo, DatastoreMO dsMo, String vmdkName, ManagedObjectReference morDatastore, - ManagedObjectReference morPool) throws Exception { + ManagedObjectReference morPool, ProvisioningType diskProvisioningType) throws Exception { s_logger.info("creating full clone from template"); - if (!vmTemplate.createFullClone(vmdkName, dcMo.getVmFolder(), morPool, morDatastore)) { + if (!vmTemplate.createFullClone(vmdkName, dcMo.getVmFolder(), morPool, morDatastore, diskProvisioningType)) { String msg = "Unable to create full clone from the template"; s_logger.error(msg); @@ -866,7 +868,7 @@ public Answer cloneVolumeFromBaseTemplate(CopyCommand cmd) { if (dsMo.getDatastoreType().equalsIgnoreCase("VVOL")) { vmdkFileBaseName = cloneVMforVvols(context, hyperHost, template, vmTemplate, volume, dcMo, dsMo); } else { - vmdkFileBaseName = createVMFolderWithVMName(context, hyperHost, template, vmTemplate, volume, dcMo, dsMo, searchExcludedFolders); + vmdkFileBaseName = createVMAndFolderWithVMName(context, hyperHost, template, vmTemplate, volume, dcMo, dsMo, searchExcludedFolders); } } // restoreVM - move the new ROOT disk into corresponding VM folder @@ -915,9 +917,12 @@ private String cloneVMforVvols(VmwareContext context, VmwareHypervisorHost hyper if (volume.getVolumeType() == Volume.Type.DATADISK) vmName = volume.getName(); if (!_fullCloneFlag) { + if (_diskProvisioningStrictness && volume.getProvisioningType() != ProvisioningType.THIN) { + throw new CloudRuntimeException("Unable to create linked clones with strict disk provisioning enabled"); + } createVMLinkedClone(vmTemplate, dcMo, vmName, morDatastore, morPool); } else { - createVMFullClone(vmTemplate, dcMo, dsMo, vmName, morDatastore, morPool); + createVMFullClone(vmTemplate, dcMo, dsMo, vmName, morDatastore, morPool, volume.getProvisioningType()); } VirtualMachineMO vmMo = new ClusterMO(context, morCluster).findVmOnHyperHost(vmName); @@ -931,21 +936,24 @@ private String cloneVMforVvols(VmwareContext context, VmwareHypervisorHost hyper return vmdkFileBaseName; } - private String createVMFolderWithVMName(VmwareContext context, VmwareHypervisorHost hyperHost, TemplateObjectTO template, - VirtualMachineMO vmTemplate, VolumeObjectTO volume, DatacenterMO dcMo, DatastoreMO dsMo, - String searchExcludedFolders) throws Exception { + private String createVMAndFolderWithVMName(VmwareContext context, VmwareHypervisorHost hyperHost, TemplateObjectTO template, + VirtualMachineMO vmTemplate, VolumeObjectTO volume, DatacenterMO dcMo, DatastoreMO dsMo, + String searchExcludedFolders) throws Exception { String vmdkName = volume.getName(); try { ManagedObjectReference morDatastore = dsMo.getMor(); ManagedObjectReference morPool = hyperHost.getHyperHostOwnerResourcePool(); ManagedObjectReference morCluster = hyperHost.getHyperHostCluster(); - if (template.getSize() != null){ + if (template.getSize() != null) { _fullCloneFlag = volume.getSize() > template.getSize() ? true : _fullCloneFlag; } if (!_fullCloneFlag) { + if (_diskProvisioningStrictness && volume.getProvisioningType() != ProvisioningType.THIN) { + throw new CloudRuntimeException("Unable to create linked clones with strict disk provisioning enabled"); + } createVMLinkedClone(vmTemplate, dcMo, vmdkName, morDatastore, morPool); } else { - createVMFullClone(vmTemplate, dcMo, dsMo, vmdkName, morDatastore, morPool); + createVMFullClone(vmTemplate, dcMo, dsMo, vmdkName, morDatastore, morPool, volume.getProvisioningType()); } VirtualMachineMO vmMo = new ClusterMO(context, morCluster).findVmOnHyperHost(vmdkName); @@ -956,7 +964,7 @@ private String createVMFolderWithVMName(VmwareContext context, VmwareHypervisorH String[] vmwareLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, vmdkName, vmdkFileBaseName, VmwareStorageLayoutType.VMWARE, !_fullCloneFlag); String[] legacyCloudStackLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, vmdkName, vmdkFileBaseName, VmwareStorageLayoutType.CLOUDSTACK_LEGACY, !_fullCloneFlag); - for (int i=0; i template.getSize() || _fullCloneFlag; } if (!_fullCloneFlag) { + if (_diskProvisioningStrictness && volume.getProvisioningType() != ProvisioningType.THIN) { + throw new CloudRuntimeException("Unable to create linked clones with strict disk provisioning enabled"); + } createVMLinkedClone(vmMo, dcMo, cloneName, morDatastore, morPool); } else { - createVMFullClone(vmMo, dcMo, dsMo, cloneName, morDatastore, morPool); + createVMFullClone(vmMo, dcMo, dsMo, cloneName, morDatastore, morPool, volume.getProvisioningType()); } } @@ -2430,7 +2441,7 @@ private Answer attachIso(DiskTO disk, boolean isAttach, String vmName, boolean f storeUrl = nfsImageStore.getUrl(); } if (storeUrl == null) { - if (!iso.getName().equalsIgnoreCase("vmware-tools.iso")) { + if (!iso.getName().equalsIgnoreCase(TemplateManager.VMWARE_TOOLS_ISO)) { String msg = "ISO store root url is not found in AttachIsoCommand"; s_logger.error(msg); throw new Exception(msg); @@ -2534,7 +2545,7 @@ public Answer createVolume(CreateObjectCommand cmd) { try { VirtualStorageObjectManagerMO vStorageObjectManagerMO = new VirtualStorageObjectManagerMO(context); - VStorageObject virtualDisk = vStorageObjectManagerMO.createDisk(morDatastore, VirtualDiskType.THIN, volume.getSize(), volumeDatastorePath, volumeUuid); + VStorageObject virtualDisk = vStorageObjectManagerMO.createDisk(morDatastore, volume.getProvisioningType(), volume.getSize(), volumeDatastorePath, volumeUuid); DatastoreFile file = new DatastoreFile(((BaseConfigInfoDiskFileBackingInfo)virtualDisk.getConfig().getBacking()).getFilePath()); newVol.setPath(file.getFileBaseName()); newVol.setSize(volume.getSize()); @@ -3889,6 +3900,11 @@ void setFullCloneFlag(boolean value){ s_logger.debug("VmwareProcessor instance - create full clone = " + (value ? "TRUE" : "FALSE")); } + void setDiskProvisioningStrictness(boolean value){ + this._diskProvisioningStrictness = value; + s_logger.debug("VmwareProcessor instance - diskProvisioningStrictness = " + (value ? "TRUE" : "FALSE")); + } + @Override public Answer handleDownloadTemplateToPrimaryStorage(DirectDownloadCommand cmd) { return null; @@ -3950,9 +3966,9 @@ public VirtualMachineMO cloneVMFromTemplate(VmwareHypervisorHost hyperHost, Stri } s_logger.info("Cloning VM " + cloneName + " from template " + templateName + " into datastore " + templatePrimaryStoreUuid); if (!_fullCloneFlag) { - createVMLinkedClone(templateMo, dcMo, cloneName, morDatastore, morPool); + createVMLinkedClone(templateMo, dcMo, cloneName, morDatastore, morPool, null); } else { - createVMFullClone(templateMo, dcMo, dsMo, cloneName, morDatastore, morPool); + createVMFullClone(templateMo, dcMo, dsMo, cloneName, morDatastore, morPool, null); } VirtualMachineMO vm = dcMo.findVm(cloneName); if (vm == null) { diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareStorageSubsystemCommandHandler.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareStorageSubsystemCommandHandler.java index 122a034288d4..15caa1d878ea 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareStorageSubsystemCommandHandler.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareStorageSubsystemCommandHandler.java @@ -83,6 +83,10 @@ public boolean reconfigureStorageProcessor(EnumMap params2 = _resource.examineStorageSubSystemCommandFullCloneFlagForVmware(storageCmd, params); verify(destDataTO).getDataStore(); verify(destDataStoreTO, times(2)).isFullCloneFlag(); - assertEquals(1, params2.size()); + assertEquals(2, params2.size()); assertEquals(FULL_CLONE_FLAG, params2.get(VmwareStorageProcessorConfigurableFields.FULL_CLONE_FLAG)); } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/discoverer/XcpServerDiscoverer.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/discoverer/XcpServerDiscoverer.java index 23af5f2131fd..c2a0969c9d3c 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/discoverer/XcpServerDiscoverer.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/discoverer/XcpServerDiscoverer.java @@ -84,6 +84,7 @@ import com.cloud.storage.Storage.TemplateType; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.template.TemplateManager; import com.cloud.user.Account; import com.cloud.utils.NumbersUtil; import com.cloud.utils.db.QueryBuilder; @@ -118,7 +119,7 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L @Inject private HostPodDao _podDao; - private String xenServerIsoName = "xs-tools.iso"; + private String xenServerIsoName = TemplateManager.XS_TOOLS_ISO; private String xenServerIsoDisplayText = "XenServer Tools Installer ISO (xen-pv-drv-iso)"; protected XcpServerDiscoverer() { diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java index 295905cd863a..8aaa25fed47c 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java @@ -16,6 +16,8 @@ // under the License. package com.cloud.hypervisor.xenserver.resource; +import static com.cloud.utils.NumbersUtil.toHumanReadableSize; + import java.io.BufferedReader; import java.io.BufferedWriter; import java.io.File; @@ -124,6 +126,7 @@ import com.cloud.storage.VolumeVO; import com.cloud.storage.resource.StorageSubsystemCommandHandler; import com.cloud.storage.resource.StorageSubsystemCommandHandlerBase; +import com.cloud.template.TemplateManager; import com.cloud.template.VirtualMachineTemplate.BootloaderType; import com.cloud.utils.ExecutionResult; import com.cloud.utils.NumbersUtil; @@ -164,8 +167,6 @@ import com.xensource.xenapi.VM; import com.xensource.xenapi.XenAPIObject; -import static com.cloud.utils.NumbersUtil.toHumanReadableSize; - /** * CitrixResourceBase encapsulates the calls to the XenServer Xapi process to * perform the required functionalities for CloudStack. @@ -222,8 +223,7 @@ public String toString() { private static final Logger s_logger = Logger.getLogger(CitrixResourceBase.class); protected static final HashMap s_powerStatesTable; - private String xenServer70plusGuestToolsName = "guest-tools.iso"; - private String xenServerBefore70GuestToolsName = "xs-tools.iso"; + public static final String XS_TOOLS_ISO_AFTER_70 = "guest-tools.iso"; static { s_powerStatesTable = new HashMap(); @@ -2666,11 +2666,10 @@ public VDI getIsoVDIByURL(final Connection conn, final String vmName, final Stri * Retrieve the actual ISO 'name-label' to be used. * We based our decision on XenServer version. *
    - *
  • for XenServer 7.0+, we use {@value #xenServer70plusGuestToolsName}; - *
  • for versions before 7.0, we use {@value #xenServerBefore70GuestToolsName}. + *
  • for XenServer 7.0+, we use {@value #XS_TOOLS_ISO_AFTER_70}; + *
  • for versions before 7.0, we use {@value TemplateManager#XS_TOOLS_ISO}. *
* - * For XCP we always use {@value #xenServerBefore70GuestToolsName}. */ protected String getActualIsoTemplate(Connection conn) throws XenAPIException, XmlRpcException { Host host = Host.getByUuid(conn, _host.getUuid()); @@ -2680,9 +2679,9 @@ protected String getActualIsoTemplate(Connection conn) throws XenAPIException, X String[] items = xenVersion.split("\\."); if ((xenBrand.equals("XenServer") || xenBrand.equals("XCP-ng")) && Integer.parseInt(items[0]) >= 7) { - return xenServer70plusGuestToolsName; + return XS_TOOLS_ISO_AFTER_70; } - return xenServerBefore70GuestToolsName; + return TemplateManager.XS_TOOLS_ISO; } public String getLabel() { diff --git a/plugins/hypervisors/xenserver/src/test/java/com/cloud/hypervisor/xenserver/discoverer/XcpServerDiscovererTest.java b/plugins/hypervisors/xenserver/src/test/java/com/cloud/hypervisor/xenserver/discoverer/XcpServerDiscovererTest.java index a3082c609877..f8188afd6828 100644 --- a/plugins/hypervisors/xenserver/src/test/java/com/cloud/hypervisor/xenserver/discoverer/XcpServerDiscovererTest.java +++ b/plugins/hypervisors/xenserver/src/test/java/com/cloud/hypervisor/xenserver/discoverer/XcpServerDiscovererTest.java @@ -29,6 +29,7 @@ import com.cloud.storage.Storage.TemplateType; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.template.TemplateManager; @RunWith(MockitoJUnitRunner.class) public class XcpServerDiscovererTest { @@ -42,13 +43,13 @@ public class XcpServerDiscovererTest { @Test public void createXenServerToolsIsoEntryInDatabaseTestNoEntryFound() { - Mockito.when(vmTemplateDao.findByTemplateName("xs-tools.iso")).thenReturn(null); + Mockito.when(vmTemplateDao.findByTemplateName(TemplateManager.XS_TOOLS_ISO)).thenReturn(null); Mockito.when(vmTemplateDao.getNextInSequence(Long.class, "id")).thenReturn(1L); xcpServerDiscoverer.createXenServerToolsIsoEntryInDatabase(); InOrder inOrder = Mockito.inOrder(vmTemplateDao); - inOrder.verify(vmTemplateDao).findByTemplateName("xs-tools.iso"); + inOrder.verify(vmTemplateDao).findByTemplateName(TemplateManager.XS_TOOLS_ISO); inOrder.verify(vmTemplateDao).getNextInSequence(Long.class, "id"); inOrder.verify(vmTemplateDao).persist(Mockito.any(VMTemplateVO.class)); } @@ -56,13 +57,13 @@ public void createXenServerToolsIsoEntryInDatabaseTestNoEntryFound() { @Test public void createXenServerToolsIsoEntryInDatabaseTestEntryAlreadyExist() { VMTemplateVO vmTemplateVOMock = Mockito.mock(VMTemplateVO.class); - Mockito.when(vmTemplateDao.findByTemplateName("xs-tools.iso")).thenReturn(vmTemplateVOMock); + Mockito.when(vmTemplateDao.findByTemplateName(TemplateManager.XS_TOOLS_ISO)).thenReturn(vmTemplateVOMock); Mockito.when(vmTemplateVOMock.getId()).thenReturn(1L); xcpServerDiscoverer.createXenServerToolsIsoEntryInDatabase(); InOrder inOrder = Mockito.inOrder(vmTemplateDao, vmTemplateVOMock); - inOrder.verify(vmTemplateDao).findByTemplateName("xs-tools.iso"); + inOrder.verify(vmTemplateDao).findByTemplateName(TemplateManager.XS_TOOLS_ISO); inOrder.verify(vmTemplateDao, Mockito.times(0)).getNextInSequence(Long.class, "id"); inOrder.verify(vmTemplateVOMock).setTemplateType(TemplateType.PERHOST); inOrder.verify(vmTemplateVOMock).setUrl(null); diff --git a/plugins/hypervisors/xenserver/src/test/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBaseTest.java b/plugins/hypervisors/xenserver/src/test/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBaseTest.java index b34bba09e807..b1a89c9da826 100644 --- a/plugins/hypervisors/xenserver/src/test/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBaseTest.java +++ b/plugins/hypervisors/xenserver/src/test/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBaseTest.java @@ -42,6 +42,7 @@ import com.cloud.hypervisor.xenserver.resource.CitrixResourceBase.SRType; import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.Storage.StorageResourceType; +import com.cloud.template.TemplateManager; import com.cloud.utils.script.Script; import com.xensource.xenapi.Connection; import com.xensource.xenapi.Host; @@ -149,7 +150,7 @@ public void actualIsoTemplateTestXcpHots() throws XenAPIException, XmlRpcExcepti String returnedIsoTemplateName = citrixResourceBase.getActualIsoTemplate(connectionMock); - Assert.assertEquals("xs-tools.iso", returnedIsoTemplateName); + Assert.assertEquals(TemplateManager.XS_TOOLS_ISO, returnedIsoTemplateName); } @Test @@ -159,7 +160,7 @@ public void actualIsoTemplateTestXenServerBefore70() throws XenAPIException, Xml String returnedIsoTemplateName = citrixResourceBase.getActualIsoTemplate(connectionMock); - Assert.assertEquals("xs-tools.iso", returnedIsoTemplateName); + Assert.assertEquals(TemplateManager.XS_TOOLS_ISO, returnedIsoTemplateName); } @Test @@ -169,7 +170,7 @@ public void actualIsoTemplateTestXenServer70() throws XenAPIException, XmlRpcExc String returnedIsoTemplateName = citrixResourceBase.getActualIsoTemplate(connectionMock); - Assert.assertEquals("guest-tools.iso", returnedIsoTemplateName); + Assert.assertEquals(CitrixResourceBase.XS_TOOLS_ISO_AFTER_70, returnedIsoTemplateName); } @Test @@ -179,7 +180,7 @@ public void actualIsoTemplateTestXenServer71() throws XenAPIException, XmlRpcExc String returnedIsoTemplateName = citrixResourceBase.getActualIsoTemplate(connectionMock); - Assert.assertEquals("guest-tools.iso", returnedIsoTemplateName); + Assert.assertEquals(CitrixResourceBase.XS_TOOLS_ISO_AFTER_70, returnedIsoTemplateName); } @Test diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java index a384a07d7b34..7e52d98bcc4b 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java @@ -32,6 +32,7 @@ import java.util.concurrent.TimeUnit; import java.util.regex.Matcher; import java.util.regex.Pattern; +import java.util.UUID; import javax.inject.Inject; import javax.naming.ConfigurationException; @@ -53,6 +54,7 @@ import org.apache.cloudstack.api.response.KubernetesClusterResponse; import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.UserVmResponse; +import org.apache.cloudstack.config.ApiServiceConfiguration; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.framework.config.ConfigKey; @@ -134,7 +136,11 @@ import com.cloud.user.AccountManager; import com.cloud.user.AccountService; import com.cloud.user.SSHKeyPairVO; +import com.cloud.user.User; +import com.cloud.user.UserAccount; +import com.cloud.user.UserVO; import com.cloud.user.dao.SSHKeyPairDao; +import com.cloud.user.dao.UserDao; import com.cloud.utils.Pair; import com.cloud.utils.Ternary; import com.cloud.utils.component.ComponentContext; @@ -198,6 +204,8 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne @Inject protected AccountManager accountManager; @Inject + protected UserDao userDao; + @Inject protected VMInstanceDao vmInstanceDao; @Inject protected UserVmJoinDao userVmJoinDao; @@ -644,7 +652,17 @@ public KubernetesClusterResponse createKubernetesClusterResponse(long kubernetes return response; } + private void validateEndpointUrl() { + String csUrl = ApiServiceConfiguration.ApiServletPath.value(); + if (csUrl == null || csUrl.contains("localhost")) { + String error = String.format("Global setting %s has to be set to the Management Server's API end point", + ApiServiceConfiguration.ApiServletPath.key()); + throw new InvalidParameterValueException(error); + } + } + private void validateKubernetesClusterCreateParameters(final CreateKubernetesClusterCmd cmd) throws CloudRuntimeException { + validateEndpointUrl(); final String name = cmd.getName(); final Long zoneId = cmd.getZoneId(); final Long kubernetesVersionId = cmd.getKubernetesVersionId(); @@ -927,6 +945,8 @@ private void validateKubernetesClusterScaleParameters(ScaleKubernetesClusterCmd private void validateKubernetesClusterUpgradeParameters(UpgradeKubernetesClusterCmd cmd) { // Validate parameters + validateEndpointUrl(); + final Long kubernetesClusterId = cmd.getId(); final Long upgradeVersionId = cmd.getKubernetesVersionId(); if (kubernetesClusterId == null || kubernetesClusterId < 1L) { @@ -1093,6 +1113,9 @@ public boolean startKubernetesCluster(long kubernetesClusterId, boolean onCreate startWorker = ComponentContext.inject(startWorker); if (onCreate) { // Start for Kubernetes cluster in 'Created' state + Account owner = accountService.getActiveAccountById(kubernetesCluster.getAccountId()); + String[] keys = getServiceUserKeys(owner); + startWorker.setKeys(keys); return startWorker.startKubernetesClusterOnCreate(); } else { // Start for Kubernetes cluster in 'Stopped' state. Resources are already provisioned, just need to be started @@ -1100,6 +1123,29 @@ public boolean startKubernetesCluster(long kubernetesClusterId, boolean onCreate } } + private String[] getServiceUserKeys(Account owner) { + if (owner == null) { + owner = CallContext.current().getCallingAccount(); + } + String username = owner.getAccountName() + "-" + KUBEADMIN_ACCOUNT_NAME; + UserAccount kubeadmin = accountService.getActiveUserAccount(username, owner.getDomainId()); + String[] keys = null; + if (kubeadmin == null) { + User kube = userDao.persist(new UserVO(owner.getAccountId(), username, UUID.randomUUID().toString(), owner.getAccountName(), + KUBEADMIN_ACCOUNT_NAME, "kubeadmin", null, UUID.randomUUID().toString(), User.Source.UNKNOWN)); + keys = accountService.createApiKeyAndSecretKey(kube.getId()); + } else { + String apiKey = kubeadmin.getApiKey(); + String secretKey = kubeadmin.getSecretKey(); + if (Strings.isNullOrEmpty(apiKey) || Strings.isNullOrEmpty(secretKey)) { + keys = accountService.createApiKeyAndSecretKey(kubeadmin.getId()); + } else { + keys = new String[]{apiKey, secretKey}; + } + } + return keys; + } + @Override public boolean stopKubernetesCluster(long kubernetesClusterId) throws CloudRuntimeException { if (!KubernetesServiceEnabled.value()) { @@ -1240,9 +1286,12 @@ public boolean upgradeKubernetesCluster(UpgradeKubernetesClusterCmd cmd) throws logAndThrow(Level.ERROR, "Kubernetes Service plugin is disabled"); } validateKubernetesClusterUpgradeParameters(cmd); + KubernetesClusterVO kubernetesCluster = kubernetesClusterDao.findById(cmd.getId()); + Account owner = accountService.getActiveAccountById(kubernetesCluster.getAccountId()); + String[] keys = getServiceUserKeys(owner); KubernetesClusterUpgradeWorker upgradeWorker = new KubernetesClusterUpgradeWorker(kubernetesClusterDao.findById(cmd.getId()), - kubernetesSupportedVersionDao.findById(cmd.getKubernetesVersionId()), this); + kubernetesSupportedVersionDao.findById(cmd.getKubernetesVersionId()), this, keys); upgradeWorker = ComponentContext.inject(upgradeWorker); return upgradeWorker.upgradeCluster(); } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterService.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterService.java index db5ab91b3d11..07939ddb101a 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterService.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterService.java @@ -34,6 +34,7 @@ public interface KubernetesClusterService extends PluggableService, Configurable static final String MIN_KUBERNETES_VERSION_HA_SUPPORT = "1.16.0"; static final int MIN_KUBERNETES_CLUSTER_NODE_CPU = 2; static final int MIN_KUBERNETES_CLUSTER_NODE_RAM_SIZE = 2048; + static final String KUBEADMIN_ACCOUNT_NAME = "kubeadmin"; static final ConfigKey KubernetesServiceEnabled = new ConfigKey("Advanced", Boolean.class, "cloud.kubernetes.service.enabled", diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java index 5f663dff7849..5426e9cd242e 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java @@ -17,7 +17,9 @@ package com.cloud.kubernetes.cluster.actionworkers; +import java.io.BufferedWriter; import java.io.File; +import java.io.FileWriter; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; @@ -28,6 +30,7 @@ import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.ca.CAManager; +import org.apache.cloudstack.config.ApiServiceConfiguration; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.commons.collections.CollectionUtils; @@ -50,6 +53,7 @@ import com.cloud.network.IpAddress; import com.cloud.network.IpAddressManager; import com.cloud.network.Network; +import com.cloud.network.Network.GuestType; import com.cloud.network.NetworkModel; import com.cloud.network.dao.NetworkDao; import com.cloud.service.dao.ServiceOfferingDao; @@ -70,6 +74,7 @@ import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.fsm.NoTransitionException; import com.cloud.utils.fsm.StateMachine2; +import com.cloud.utils.ssh.SshHelper; import com.cloud.vm.UserVmService; import com.cloud.vm.dao.UserVmDao; import com.google.common.base.Strings; @@ -127,12 +132,22 @@ public class KubernetesClusterActionWorker { protected String publicIpAddress; protected int sshPort; + + protected final String deploySecretsScriptFilename = "deploy-cloudstack-secret"; + protected final String deployProviderScriptFilename = "deploy-provider"; + protected final String scriptPath = "/opt/bin/"; + protected File deploySecretsScriptFile; + protected File deployProviderScriptFile; + protected KubernetesClusterManagerImpl manager; + protected String[] keys; + protected KubernetesClusterActionWorker(final KubernetesCluster kubernetesCluster, final KubernetesClusterManagerImpl clusterManager) { this.kubernetesCluster = kubernetesCluster; this.kubernetesClusterDao = clusterManager.kubernetesClusterDao; this.kubernetesClusterDetailsDao = clusterManager.kubernetesClusterDetailsDao; this.kubernetesClusterVmMapDao = clusterManager.kubernetesClusterVmMapDao; this.kubernetesSupportedVersionDao = clusterManager.kubernetesSupportedVersionDao; + this.manager = clusterManager; } protected void init() { @@ -380,4 +395,108 @@ protected boolean stateTransitTo(long kubernetesClusterId, KubernetesCluster.Eve return false; } } + + protected boolean createCloudStackSecret(String[] keys) { + File pkFile = getManagementServerSshPublicKeyFile(); + Pair publicIpSshPort = getKubernetesClusterServerIpSshPort(null); + publicIpAddress = publicIpSshPort.first(); + sshPort = publicIpSshPort.second(); + + try { + final String command = String.format("sudo %s/%s -u '%s' -k '%s' -s '%s'", + scriptPath, deploySecretsScriptFilename, ApiServiceConfiguration.ApiServletPath.value(), keys[0], keys[1]); + Pair result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, + pkFile, null, command, 10000, 10000, 60000); + return result.first(); + } catch (Exception e) { + String msg = String.format("Failed to add cloudstack-secret to Kubernetes cluster: %s", kubernetesCluster.getName()); + LOGGER.warn(msg, e); + } + return false; + } + + protected File retrieveScriptFile(String filename) { + File file = null; + try { + String data = readResourceFile("/script/" + filename); + file = File.createTempFile(filename, ".sh"); + BufferedWriter writer = new BufferedWriter(new FileWriter(file)); + writer.write(data); + writer.close(); + } catch (IOException e) { + logAndThrow(Level.ERROR, String.format("Kubernetes Cluster %s : Failed to to fetch script %s", + kubernetesCluster.getName(), filename), e); + } + return file; + } + + protected void retrieveScriptFiles() { + deploySecretsScriptFile = retrieveScriptFile(deploySecretsScriptFilename); + deployProviderScriptFile = retrieveScriptFile(deployProviderScriptFilename); + } + + protected void copyScripts(String nodeAddress, final int sshPort) { + try { + SshHelper.scpTo(nodeAddress, sshPort, CLUSTER_NODE_VM_USER, sshKeyFile, null, + "~/", deploySecretsScriptFile.getAbsolutePath(), "0755"); + SshHelper.scpTo(nodeAddress, sshPort, CLUSTER_NODE_VM_USER, sshKeyFile, null, + "~/", deployProviderScriptFile.getAbsolutePath(), "0755"); + String cmdStr = String.format("sudo mv ~/%s %s/%s", deploySecretsScriptFile.getName(), scriptPath, deploySecretsScriptFilename); + SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, sshKeyFile, null, + cmdStr, 10000, 10000, 10 * 60 * 1000); + cmdStr = String.format("sudo mv ~/%s %s/%s", deployProviderScriptFile.getName(), scriptPath, deployProviderScriptFilename); + SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, sshKeyFile, null, + cmdStr, 10000, 10000, 10 * 60 * 1000); + } catch (Exception e) { + throw new CloudRuntimeException(e); + } + } + + protected boolean deployProvider() { + Network network = networkDao.findById(kubernetesCluster.getNetworkId()); + // Since the provider creates IP addresses, don't deploy it unless the underlying network supports it + if (network.getGuestType() != GuestType.Isolated) { + logMessage(Level.INFO, String.format("Skipping adding the provider as %s is not on an isolated network", + kubernetesCluster.getName()), null); + return true; + } + File pkFile = getManagementServerSshPublicKeyFile(); + Pair publicIpSshPort = getKubernetesClusterServerIpSshPort(null); + publicIpAddress = publicIpSshPort.first(); + sshPort = publicIpSshPort.second(); + + try { + String command = String.format("sudo %s/%s", scriptPath, deployProviderScriptFilename); + Pair result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, + pkFile, null, command, 10000, 10000, 60000); + + // Maybe the file isn't present. Try and copy it + if (!result.first()) { + logMessage(Level.INFO, "Provider files missing. Adding them now", null); + retrieveScriptFiles(); + copyScripts(publicIpAddress, sshPort); + + if (!createCloudStackSecret(keys)) { + logTransitStateAndThrow(Level.ERROR, String.format("Failed to setup keys for Kubernetes cluster %s", + kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); + } + + // If at first you don't succeed ... + result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, + pkFile, null, command, 10000, 10000, 60000); + if (!result.first()) { + throw new CloudRuntimeException(result.second()); + } + } + return true; + } catch (Exception e) { + String msg = String.format("Failed to deploy kubernetes provider: %s : %s", kubernetesCluster.getName(), e.getMessage()); + logAndThrow(Level.ERROR, msg); + return false; + } + } + + public void setKeys(String[] keys) { + this.keys = keys; + } } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java index 9a30fdd82357..072094e7c031 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java @@ -571,6 +571,7 @@ public boolean startKubernetesClusterOnCreate() { if (!isKubernetesClusterDashboardServiceRunning(true, startTimeoutTime)) { logTransitStateAndThrow(Level.ERROR, String.format("Failed to setup Kubernetes cluster : %s in usable state as unable to get Dashboard service running for the cluster", kubernetesCluster.getName()), kubernetesCluster.getId(),KubernetesCluster.Event.OperationFailed); } + deployProvider(); stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded); return true; } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java index 86c5c8ed70bc..e8b61d4a26da 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java @@ -50,9 +50,11 @@ public class KubernetesClusterUpgradeWorker extends KubernetesClusterActionWorke public KubernetesClusterUpgradeWorker(final KubernetesCluster kubernetesCluster, final KubernetesSupportedVersion upgradeVersion, - final KubernetesClusterManagerImpl clusterManager) { + final KubernetesClusterManagerImpl clusterManager, + final String[] keys) { super(kubernetesCluster, clusterManager); this.upgradeVersion = upgradeVersion; + this.keys = keys; } private void retrieveUpgradeScriptFile() { @@ -110,6 +112,8 @@ private void upgradeKubernetesClusterNodes() { logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster : %s, upgrade action timed out", kubernetesCluster.getName()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null); } try { + int port = (sshPort == CLUSTER_NODES_DEFAULT_START_SSH_PORT) ? sshPort + i : sshPort; + deployProvider(); result = runInstallScriptOnVM(vm, i); } catch (Exception e) { logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster : %s, unable to upgrade Kubernetes node on VM : %s", kubernetesCluster.getName(), vm.getDisplayName()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, e); diff --git a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-control-node-add.yml b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-control-node-add.yml index a8650ac957b3..601df21454d4 100644 --- a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-control-node-add.yml +++ b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-control-node-add.yml @@ -125,6 +125,10 @@ write-files: done <<< "$output" setup_complete=true fi + if [ -e "${BINARIES_DIR}/provider.yaml" ]; then + mkdir -p /opt/provider + cp "${BINARIES_DIR}/provider.yaml" /opt/provider/provider.yaml + fi umount "${ISO_MOUNT_DIR}" && rmdir "${ISO_MOUNT_DIR}" if [ "$EJECT_ISO_FROM_OS" = true ] && [ "$iso_drive_path" != "" ]; then eject "${iso_drive_path}" diff --git a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-control-node.yml b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-control-node.yml index c2cecc4a0994..44a78a346e1f 100644 --- a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-control-node.yml +++ b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-control-node.yml @@ -147,6 +147,10 @@ write-files: fi mkdir -p "${K8S_CONFIG_SCRIPTS_COPY_DIR}" cp ${BINARIES_DIR}/*.yaml "${K8S_CONFIG_SCRIPTS_COPY_DIR}" + if [ -e "${BINARIES_DIR}/provider.yaml" ]; then + mkdir -p /opt/provider + cp "${BINARIES_DIR}/provider.yaml" /opt/provider/provider.yaml + fi umount "${ISO_MOUNT_DIR}" && rmdir "${ISO_MOUNT_DIR}" if [ "$EJECT_ISO_FROM_OS" = true ] && [ "$iso_drive_path" != "" ]; then eject "${iso_drive_path}" diff --git a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-node.yml b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-node.yml index f65cc9c82963..03ed7013a4ca 100644 --- a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-node.yml +++ b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-node.yml @@ -125,6 +125,10 @@ write-files: done <<< "$output" setup_complete=true fi + if [ -e "${BINARIES_DIR}/provider.yaml" ]; then + mkdir -p /opt/provider + cp "${BINARIES_DIR}/provider.yaml" /opt/provider/provider.yaml + fi umount "${ISO_MOUNT_DIR}" && rmdir "${ISO_MOUNT_DIR}" if [ "$EJECT_ISO_FROM_OS" = true ] && [ "$iso_drive_path" != "" ]; then eject "${iso_drive_path}" diff --git a/plugins/integrations/kubernetes-service/src/main/resources/script/deploy-cloudstack-secret b/plugins/integrations/kubernetes-service/src/main/resources/script/deploy-cloudstack-secret new file mode 100755 index 000000000000..9356f8a03f14 --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/resources/script/deploy-cloudstack-secret @@ -0,0 +1,68 @@ +#!/bin/bash -e +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +function usage() { + cat << USAGE +Usage: ./deploy-cloudstack-secret [OPTIONS]... +To deploy the keys needed for the cloudstack kubernetes provider. +Arguments: + -u, --url string ID of the cluster + -k, --key string API Key + -s, --secret string Secret Key +Other arguments: + -h, --help Display this help message and exit +Examples: + ./deploy-cloudstack-secret -u http://localhost:8080 -k abcd -s efgh +USAGE + exit 0 +} +API_URL="" +API_KEY="" +SECRET_KEY="" +while [ -n "$1" ]; do + case "$1" in + -h | --help) + usage + ;; + -u | --url) + API_URL=$2 + shift 2 + ;; + -k | --key) + API_KEY=$2 + shift 2 + ;; + -s | --secret) + SECRET_KEY=$2 + shift 2 + ;; + -*|*) + echo "ERROR: no such option $1. -h or --help for help" + exit 1 + ;; + esac +done +cat > /tmp/cloud-config < listVmMetrics(List vmResponses) { metricsResponse.setDiskRead(vmResponse.getDiskKbsRead()); metricsResponse.setDiskWrite(vmResponse.getDiskKbsWrite()); metricsResponse.setDiskIopsTotal(vmResponse.getDiskIORead(), vmResponse.getDiskIOWrite()); + metricsResponse.setLastUpdated(vmResponse.getLastUpdated()); metricsResponses.add(metricsResponse); } return metricsResponses; diff --git a/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/MockAccountManager.java b/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/MockAccountManager.java index 68ff2e803789..fb1eaa198bc7 100644 --- a/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/MockAccountManager.java +++ b/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/MockAccountManager.java @@ -457,6 +457,12 @@ public Map getKeys(GetUserKeysCmd cmd){ return null; } + + @Override + public Map getKeys(Long userId) { + return null; + } + @Override public void checkAccess(User user, ControlledEntity entity) throws PermissionDeniedException { diff --git a/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java b/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java index 1b2e41ab3867..f39b17076e7a 100644 --- a/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java +++ b/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java @@ -359,6 +359,7 @@ public DataStore initialize(Map dsInfos) { parameters.setName(poolName); parameters.setClusterId(clusterId); parameters.setProviderName(providerName); + parameters.setHypervisorType(hypervisorType); return dataStoreHelper.createPrimaryDataStore(parameters); } @@ -404,6 +405,7 @@ protected boolean createStoragePool(long hostId, StoragePool pool) { CreateStoragePoolCommand cmd = new CreateStoragePoolCommand(true, pool); final Answer answer = agentMgr.easySend(hostId, cmd); if (answer != null && answer.getResult()) { + storageMgr.updateStorageCapabilities(pool.getId(), false); return true; } else { primaryDataStoreDao.expunge(pool.getId()); diff --git a/scripts/util/create-kubernetes-binaries-iso.sh b/scripts/util/create-kubernetes-binaries-iso.sh index 67062bea4c41..241b45e8b33a 100755 --- a/scripts/util/create-kubernetes-binaries-iso.sh +++ b/scripts/util/create-kubernetes-binaries-iso.sh @@ -77,6 +77,11 @@ echo "Downloading dashboard config ${DASHBORAD_CONFIG_URL}" dashboard_conf_file="${working_dir}/dashboard.yaml" curl -sSL ${DASHBORAD_CONFIG_URL} -o ${dashboard_conf_file} +PROVIDER_URL="https://raw.githubusercontent.com/apache/cloudstack-kubernetes-provider/main/deployment.yaml" +echo "Downloading kubernetes cluster provider ${PROVIDER_URL}" +provider_conf_file="${working_dir}/provider.yaml" +curl -sSL ${PROVIDER_URL} -o ${provider_conf_file} + echo "Fetching k8s docker images..." docker -v if [ $? -ne 0 ]; then @@ -102,6 +107,9 @@ do output=`printf "%s\n" ${output} ${images}` done +provider_image=`grep "image:" ${provider_conf_file} | cut -d ':' -f2- | tr -d ' '` +output=`printf "%s\n" ${output} ${provider_image}` + while read -r line; do echo "Downloading docker image $line ---" sudo docker pull "$line" diff --git a/scripts/vm/hypervisor/kvm/kvmheartbeat.sh b/scripts/vm/hypervisor/kvm/kvmheartbeat.sh index df2e54db85ad..a931d94aaf28 100755 --- a/scripts/vm/hypervisor/kvm/kvmheartbeat.sh +++ b/scripts/vm/hypervisor/kvm/kvmheartbeat.sh @@ -138,7 +138,7 @@ check_hbLog() { diff=`expr $now - $hb` if [ $diff -gt $interval ] then - return 1 + return $diff fi return 0 } @@ -146,11 +146,12 @@ check_hbLog() { if [ "$rflag" == "1" ] then check_hbLog - if [ $? == 0 ] + diff=$? + if [ $diff == 0 ] then echo "=====> ALIVE <=====" else - echo "=====> DEAD <======" + echo "=====> Considering host as DEAD because last write on [$hbFile] was [$diff] seconds ago, but the max interval is [$interval] <======" fi exit 0 elif [ "$cflag" == "1" ] diff --git a/server/src/main/java/com/cloud/alert/AlertManagerImpl.java b/server/src/main/java/com/cloud/alert/AlertManagerImpl.java index f6e4360b9816..5ba8c26009c3 100644 --- a/server/src/main/java/com/cloud/alert/AlertManagerImpl.java +++ b/server/src/main/java/com/cloud/alert/AlertManagerImpl.java @@ -759,7 +759,8 @@ public String getConfigComponentName() { @Override public ConfigKey[] getConfigKeys() { - return new ConfigKey[] {CPUCapacityThreshold, MemoryCapacityThreshold, StorageAllocatedCapacityThreshold, StorageCapacityThreshold}; + return new ConfigKey[] {CPUCapacityThreshold, MemoryCapacityThreshold, StorageAllocatedCapacityThreshold, StorageCapacityThreshold, AlertSmtpEnabledSecurityProtocols, + AlertSmtpUseStartTLS}; } @Override diff --git a/server/src/main/java/com/cloud/api/ApiResponseHelper.java b/server/src/main/java/com/cloud/api/ApiResponseHelper.java index b84e10d35a04..d72d9405b6e2 100644 --- a/server/src/main/java/com/cloud/api/ApiResponseHelper.java +++ b/server/src/main/java/com/cloud/api/ApiResponseHelper.java @@ -2354,6 +2354,7 @@ public NetworkResponse createNetworkResponse(ResponseView view, Network network) } response.setExternalId(network.getExternalId()); response.setRedundantRouter(network.isRedundant()); + response.setCreated(network.getCreated()); response.setObjectName("network"); return response; } @@ -2963,6 +2964,7 @@ public VpcResponse createVpcResponse(ResponseView view, Vpc vpc) { response.setId(vpc.getUuid()); response.setName(vpc.getName()); response.setDisplayText(vpc.getDisplayText()); + response.setCreated(vpc.getCreated()); response.setState(vpc.getState().name()); VpcOffering voff = ApiDBUtils.findVpcOfferingById(vpc.getVpcOfferingId()); if (voff != null) { diff --git a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java index 0c8a7f794355..bc1a9fa4b956 100644 --- a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java +++ b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java @@ -31,8 +31,6 @@ import javax.inject.Inject; -import com.cloud.storage.dao.VMTemplateDetailsDao; -import com.cloud.vm.VirtualMachineManager; import org.apache.cloudstack.acl.ControlledEntity.ACLType; import org.apache.cloudstack.affinity.AffinityGroupDomainMapVO; import org.apache.cloudstack.affinity.AffinityGroupResponse; @@ -119,6 +117,8 @@ import org.apache.cloudstack.query.QueryService; import org.apache.cloudstack.resourcedetail.dao.DiskOfferingDetailsDao; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; import org.apache.commons.collections.CollectionUtils; @@ -218,6 +218,7 @@ import com.cloud.storage.Volume; import com.cloud.storage.dao.StoragePoolTagsDao; import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.storage.dao.VMTemplateDetailsDao; import com.cloud.tags.ResourceTagVO; import com.cloud.tags.dao.ResourceTagDao; import com.cloud.template.VirtualMachineTemplate.State; @@ -244,6 +245,7 @@ import com.cloud.vm.UserVmVO; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; +import com.cloud.vm.VirtualMachineManager; import com.cloud.vm.VmDetailConstants; import com.cloud.vm.dao.DomainRouterDao; import com.cloud.vm.dao.UserVmDao; @@ -421,6 +423,9 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q @Inject private PrimaryDataStoreDao _storagePoolDao; + @Inject + private StoragePoolDetailsDao _storagePoolDetailsDao; + @Inject private ProjectInvitationDao projectInvitationDao; @@ -2420,7 +2425,16 @@ public ListResponse searchForStoragePools(ListStoragePoolsC if (store != null) { DataStoreDriver driver = store.getDriver(); if (driver != null && driver.getCapabilities() != null) { - poolResponse.setCaps(driver.getCapabilities()); + Map caps = driver.getCapabilities(); + if (Storage.StoragePoolType.NetworkFilesystem.toString().equals(poolResponse.getType()) && + HypervisorType.VMware.toString().equals(poolResponse.getHypervisor())) { + StoragePoolVO pool = _storagePoolDao.findPoolByUUID(poolResponse.getId()); + StoragePoolDetailVO detail = _storagePoolDetailsDao.findDetail(pool.getId(), Storage.Capability.HARDWARE_ACCELERATION.toString()); + if (detail != null) { + caps.put(Storage.Capability.HARDWARE_ACCELERATION.toString(), detail.getValue()); + } + } + poolResponse.setCaps(caps); } } } @@ -3649,7 +3663,7 @@ private Pair, Integer> templateChecks(boolean isIso, List zoneSc = _templateJoinDao.createSearchCriteria(); zoneSc.addOr("dataCenterId", SearchCriteria.Op.EQ, zoneId); zoneSc.addOr("dataStoreScope", SearchCriteria.Op.EQ, ScopeType.REGION); - // handle the case where xs-tools.iso and vmware-tools.iso do not + // handle the case where TemplateManager.VMWARE_TOOLS_ISO and TemplateManager.VMWARE_TOOLS_ISO do not // have data_center information in template_view SearchCriteria isoPerhostSc = _templateJoinDao.createSearchCriteria(); isoPerhostSc.addAnd("format", SearchCriteria.Op.EQ, ImageFormat.ISO); diff --git a/server/src/main/java/com/cloud/api/query/dao/NetworkOfferingJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/NetworkOfferingJoinDaoImpl.java index 0c258d1966a4..ee3e0f21829d 100644 --- a/server/src/main/java/com/cloud/api/query/dao/NetworkOfferingJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/NetworkOfferingJoinDaoImpl.java @@ -88,6 +88,7 @@ public NetworkOfferingResponse newNetworkOfferingResponse(NetworkOffering offeri networkOfferingResponse.setConcurrentConnections(offering.getConcurrentConnections()); networkOfferingResponse.setSupportsStrechedL2Subnet(offering.isSupportingStrechedL2()); networkOfferingResponse.setSupportsPublicAccess(offering.isSupportingPublicAccess()); + networkOfferingResponse.setCreated(offering.getCreated()); if (offering.getGuestType() != null) { networkOfferingResponse.setGuestIpType(offering.getGuestType().toString()); } diff --git a/server/src/main/java/com/cloud/api/query/dao/ProjectJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/ProjectJoinDaoImpl.java index 3f384f3c0451..d893a5ca37a9 100644 --- a/server/src/main/java/com/cloud/api/query/dao/ProjectJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/ProjectJoinDaoImpl.java @@ -110,6 +110,7 @@ public ProjectResponse newProjectResponse(EnumSet details, Projec ownersList.add(ownerDetails); } response.setOwners(ownersList); + response.setCreated(proj.getCreated()); // update tag information List tags = ApiDBUtils.listResourceTagViewByResourceUUID(proj.getUuid(), ResourceObjectType.Project); diff --git a/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java index 8527ac0ece95..6ce2a0bc0c9e 100644 --- a/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java @@ -26,15 +26,7 @@ import javax.inject.Inject; -import com.cloud.deployasis.DeployAsIsConstants; -import com.cloud.deployasis.TemplateDeployAsIsDetailVO; -import com.cloud.deployasis.dao.TemplateDeployAsIsDetailsDao; import org.apache.cloudstack.api.ApiConstants; -import org.apache.cloudstack.storage.datastore.db.ImageStoreVO; -import org.apache.cloudstack.utils.security.DigestHelper; -import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; - import org.apache.cloudstack.api.ResponseObject.ResponseView; import org.apache.cloudstack.api.response.ChildTemplateResponse; import org.apache.cloudstack.api.response.TemplateResponse; @@ -43,13 +35,20 @@ import org.apache.cloudstack.engine.subsystem.api.storage.TemplateState; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.storage.datastore.db.ImageStoreDao; +import org.apache.cloudstack.storage.datastore.db.ImageStoreVO; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; +import org.apache.cloudstack.utils.security.DigestHelper; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; import com.cloud.api.ApiDBUtils; import com.cloud.api.ApiResponseHelper; import com.cloud.api.query.vo.ResourceTagJoinVO; import com.cloud.api.query.vo.TemplateJoinVO; +import com.cloud.deployasis.DeployAsIsConstants; +import com.cloud.deployasis.TemplateDeployAsIsDetailVO; +import com.cloud.deployasis.dao.TemplateDeployAsIsDetailsDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.storage.Storage; import com.cloud.storage.Storage.TemplateType; @@ -368,7 +367,7 @@ public TemplateResponse newIsoResponse(TemplateJoinVO iso) { isoResponse.setCreated(iso.getCreatedOnStore()); isoResponse.setDynamicallyScalable(iso.isDynamicallyScalable()); if (iso.getTemplateType() == TemplateType.PERHOST) { - // for xs-tools.iso and vmware-tools.iso, we didn't download, but is ready to use. + // for TemplateManager.XS_TOOLS_ISO and TemplateManager.VMWARE_TOOLS_ISO, we didn't download, but is ready to use. isoResponse.setReady(true); } else { isoResponse.setReady(iso.getState() == ObjectInDataStoreStateMachine.State.Ready); diff --git a/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java index 09eaee356e7e..f0a29db961f4 100644 --- a/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java @@ -138,6 +138,7 @@ public UserVmResponse newUserVmResponse(ResponseView view, String objectName, Us userVmResponse.setDomainName(userVm.getDomainName()); userVmResponse.setCreated(userVm.getCreated()); + userVmResponse.setLastUpdated(userVm.getLastUpdated()); userVmResponse.setDisplayVm(userVm.isDisplayVm()); if (userVm.getState() != null) { @@ -277,6 +278,7 @@ public UserVmResponse newUserVmResponse(ResponseView view, String objectName, Us nicResponse.setType(userVm.getGuestType().toString()); } nicResponse.setIsDefault(userVm.isDefaultNic()); + nicResponse.setDeviceId(String.valueOf(userVm.getNicDeviceId())); List secondaryIps = ApiDBUtils.findNicSecondaryIps(userVm.getNicId()); if (secondaryIps != null) { List ipList = new ArrayList(); @@ -437,6 +439,7 @@ public UserVmResponse setUserVmResponse(ResponseView view, UserVmResponse userVm } /*17: default*/ nicResponse.setIsDefault(uvo.isDefaultNic()); + nicResponse.setDeviceId(String.valueOf(uvo.getNicDeviceId())); List secondaryIps = ApiDBUtils.findNicSecondaryIps(uvo.getNicId()); if (secondaryIps != null) { List ipList = new ArrayList(); diff --git a/server/src/main/java/com/cloud/api/query/vo/UserVmJoinVO.java b/server/src/main/java/com/cloud/api/query/vo/UserVmJoinVO.java index 6d48bec716be..0ef33a919abf 100644 --- a/server/src/main/java/com/cloud/api/query/vo/UserVmJoinVO.java +++ b/server/src/main/java/com/cloud/api/query/vo/UserVmJoinVO.java @@ -108,6 +108,9 @@ public class UserVmJoinVO extends BaseViewWithTagInformationVO implements Contro @Column(name = GenericDao.REMOVED_COLUMN) private Date removed; + @Column(name="update_time") + private Date lastUpdated; + @Column(name = "instance_name", updatable = true, nullable = false) private String instanceName; @@ -276,6 +279,9 @@ public class UserVmJoinVO extends BaseViewWithTagInformationVO implements Contro @Column(name = "nic_uuid") private String nicUuid; + @Column(name = "nic_device_id") + private Integer nicDeviceId = null; + @Column(name = "is_default_nic") private boolean isDefaultNic; @@ -488,6 +494,10 @@ public Date getRemoved() { return removed; } + public Date getLastUpdated() { + return lastUpdated; + } + public String getInstanceName() { return instanceName; } @@ -668,6 +678,10 @@ public long getNicId() { return nicId; } + public Integer getNicDeviceId() { + return nicDeviceId; + } + public boolean isDefaultNic() { return isDefaultNic; } diff --git a/server/src/main/java/com/cloud/capacity/CapacityManagerImpl.java b/server/src/main/java/com/cloud/capacity/CapacityManagerImpl.java index 05ea9fd94ca2..181b1db3f4a8 100644 --- a/server/src/main/java/com/cloud/capacity/CapacityManagerImpl.java +++ b/server/src/main/java/com/cloud/capacity/CapacityManagerImpl.java @@ -1252,6 +1252,6 @@ public String getConfigComponentName() { @Override public ConfigKey[] getConfigKeys() { return new ConfigKey[] {CpuOverprovisioningFactor, MemOverprovisioningFactor, StorageCapacityDisableThreshold, StorageOverprovisioningFactor, - StorageAllocatedCapacityDisableThreshold, StorageOperationsExcludeCluster, VmwareCreateCloneFull, ImageStoreNFSVersion}; + StorageAllocatedCapacityDisableThreshold, StorageOperationsExcludeCluster, VmwareCreateCloneFull, ImageStoreNFSVersion, SecondaryStorageCapacityThreshold}; } } diff --git a/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java b/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java index f431945c6321..c0b1e2dc1c1c 100755 --- a/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java +++ b/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java @@ -161,7 +161,9 @@ import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.gpu.GPU; +import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; +import com.cloud.host.dao.HostTagsDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.network.IpAddressManager; import com.cloud.network.Network; @@ -211,6 +213,7 @@ import com.cloud.storage.StorageManager; import com.cloud.storage.Volume; import com.cloud.storage.dao.DiskOfferingDao; +import com.cloud.storage.dao.StoragePoolTagsDao; import com.cloud.storage.dao.VMTemplateZoneDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.test.IPRangeConfig; @@ -392,6 +395,10 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati private VMTemplateZoneDao templateZoneDao; @Inject VsphereStoragePolicyDao vsphereStoragePolicyDao; + @Inject + HostTagsDao hostTagDao; + @Inject + StoragePoolTagsDao storagePoolTagDao; // FIXME - why don't we have interface for DataCenterLinkLocalIpAddressDao? @@ -404,6 +411,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati protected Set configValuesForValidation; private Set weightBasedParametersForValidation; private Set overprovisioningFactorsForValidation; + public static final String VM_USERDATA_MAX_LENGTH_STRING = "vm.userdata.max.length"; public static final ConfigKey SystemVMUseLocalStorage = new ConfigKey(Boolean.class, "system.vm.use.local.storage", "Advanced", "false", "Indicates whether to use local storage pools or shared storage pools for system VMs.", false, ConfigKey.Scope.Zone, null); @@ -423,6 +431,9 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati "Indicates whether the host in down state can be put into maintenance state so thats its not enabled after it comes back.", true, ConfigKey.Scope.Zone, null); + public static final ConfigKey VM_USERDATA_MAX_LENGTH = new ConfigKey("Advanced", Integer.class, VM_USERDATA_MAX_LENGTH_STRING, "32768", + "Max length of vm userdata after base64 decoding. Default is 32768 and maximum is 1048576", true); + private static final String IOPS_READ_RATE = "IOPS Read"; private static final String IOPS_WRITE_RATE = "IOPS Write"; private static final String BYTES_READ_RATE = "Bytes Read"; @@ -477,6 +488,7 @@ private void populateConfigValuesForValidationSet() { configValuesForValidation.add(StorageManager.STORAGE_POOL_DISK_WAIT.key()); configValuesForValidation.add(StorageManager.STORAGE_POOL_CLIENT_TIMEOUT.key()); configValuesForValidation.add(StorageManager.STORAGE_POOL_CLIENT_MAX_CONNECTIONS.key()); + configValuesForValidation.add(VM_USERDATA_MAX_LENGTH_STRING); } private void weightBasedParametersForValidation() { @@ -497,6 +509,7 @@ private void weightBasedParametersForValidation() { weightBasedParametersForValidation.add(DeploymentClusterPlanner.ClusterMemoryCapacityDisableThreshold.key()); weightBasedParametersForValidation.add(Config.AgentLoadThreshold.key()); weightBasedParametersForValidation.add(Config.VmUserDispersionWeight.key()); + weightBasedParametersForValidation.add(CapacityManager.SecondaryStorageCapacityThreshold.key()); } @@ -950,6 +963,11 @@ private String validateConfigurationValue(final String name, String value, final throw new InvalidParameterValueException("Please enter a value less than 257 for the configuration parameter:" + name); } } + if (VM_USERDATA_MAX_LENGTH_STRING.equalsIgnoreCase(name)) { + if (val > 1048576) { + throw new InvalidParameterValueException("Please enter a value less than 1048576 for the configuration parameter:" + name); + } + } } catch (final NumberFormatException e) { s_logger.error("There was an error trying to parse the integer value for:" + name); throw new InvalidParameterValueException("There was an error trying to parse the integer value for:" + name); @@ -2705,6 +2723,8 @@ public ServiceOffering updateServiceOffering(final UpdateServiceOfferingCmd cmd) Long userId = CallContext.current().getCallingUserId(); final List domainIds = cmd.getDomainIds(); final List zoneIds = cmd.getZoneIds(); + String storageTags = cmd.getStorageTags(); + String hostTags = cmd.getHostTags(); if (userId == null) { userId = Long.valueOf(User.UID_SYSTEM); @@ -2786,7 +2806,7 @@ public ServiceOffering updateServiceOffering(final UpdateServiceOfferingCmd cmd) throw new InvalidParameterValueException(String.format("Unable to update service offering: %s by id user: %s because it is not root-admin or domain-admin", offeringHandle.getUuid(), user.getUuid())); } - final boolean updateNeeded = name != null || displayText != null || sortKey != null; + final boolean updateNeeded = name != null || displayText != null || sortKey != null || storageTags != null || hostTags != null; final boolean detailsUpdateNeeded = !filteredDomainIds.equals(existingDomainIds) || !filteredZoneIds.equals(existingZoneIds); if (!updateNeeded && !detailsUpdateNeeded) { return _serviceOfferingDao.findById(id); @@ -2806,29 +2826,9 @@ public ServiceOffering updateServiceOffering(final UpdateServiceOfferingCmd cmd) offering.setSortKey(sortKey); } - // Note: tag editing commented out for now; keeping the code intact, - // might need to re-enable in next releases - // if (tags != null) - // { - // if (tags.trim().isEmpty() && offeringHandle.getTags() == null) - // { - // //no new tags; no existing tags - // offering.setTagsArray(csvTagsToList(null)); - // } - // else if (!tags.trim().isEmpty() && offeringHandle.getTags() != null) - // { - // //new tags + existing tags - // List oldTags = csvTagsToList(offeringHandle.getTags()); - // List newTags = csvTagsToList(tags); - // oldTags.addAll(newTags); - // offering.setTagsArray(oldTags); - // } - // else if(!tags.trim().isEmpty()) - // { - // //new tags; NO existing tags - // offering.setTagsArray(csvTagsToList(tags)); - // } - // } + updateOfferingTagsIfIsNotNull(storageTags, offering); + + updateServiceOfferingHostTagsIfNotNull(hostTags, offering); if (updateNeeded && !_serviceOfferingDao.update(id, offering)) { return null; @@ -3283,7 +3283,7 @@ public DiskOffering updateDiskOffering(final UpdateDiskOfferingCmd cmd) { diskOffering.setDisplayOffering(displayDiskOffering); } - updateDiskOfferingTagsIfIsNotNull(tags, diskOffering); + updateOfferingTagsIfIsNotNull(tags, diskOffering); validateMaxRateEqualsOrGreater(iopsReadRate, iopsReadRateMax, IOPS_READ_RATE); validateMaxRateEqualsOrGreater(iopsWriteRate, iopsWriteRateMax, IOPS_WRITE_RATE); @@ -3335,22 +3335,63 @@ public DiskOffering updateDiskOffering(final UpdateDiskOfferingCmd cmd) { } /** - * Check the tags parameters to the diskOffering + * Check the tags parameters to the disk/service offering *
    *
  • If tags is null, do nothing and return.
  • - *
  • If tags is not null, set tag to the diskOffering.
  • - *
  • If tags is an blank string, set null on diskOffering tag.
  • + *
  • If tags is not null, will set tag to the disk/service offering if the pools with active volumes have the new tags.
  • + *
  • If tags is an blank string, set null on disk/service offering tag.
  • *
*/ - protected void updateDiskOfferingTagsIfIsNotNull(String tags, DiskOfferingVO diskOffering) { + protected void updateOfferingTagsIfIsNotNull(String tags, DiskOfferingVO diskOffering) { if (tags == null) { return; } if (StringUtils.isNotBlank(tags)) { + tags = StringUtils.cleanupTags(tags); + List pools = _storagePoolDao.listStoragePoolsWithActiveVolumesByOfferingId(diskOffering.getId()); + if (CollectionUtils.isNotEmpty(pools)) { + List listOfTags = Arrays.asList(tags.split(",")); + for (StoragePoolVO storagePoolVO : pools) { + List tagsOnPool = storagePoolTagDao.getStoragePoolTags(storagePoolVO.getId()); + if (CollectionUtils.isEmpty(tagsOnPool) || !tagsOnPool.containsAll(listOfTags)) { + throw new InvalidParameterValueException(String.format("There are active volumes using offering [%s], and the pools [%s] don't have the new tags", diskOffering.getId(), pools)); + } + } + } diskOffering.setTags(tags); } else { diskOffering.setTags(null); } } + /** + * Check the host tags parameters to the service offering + *
    + *
  • If host tags is null, do nothing and return.
  • + *
  • If host tags is not null, will set host tag to the service offering if the hosts with active VMs have the new tags.
  • + *
  • If host tags is an blank string, set null on service offering tag.
  • + *
+ */ + protected void updateServiceOfferingHostTagsIfNotNull(String hostTags, ServiceOfferingVO offering) { + if (hostTags == null) { + return; + } + if (StringUtils.isNotBlank(hostTags)) { + hostTags = StringUtils.cleanupTags(hostTags); + List hosts = _hostDao.listHostsWithActiveVMs(offering.getId()); + if (CollectionUtils.isNotEmpty(hosts)) { + List listOfHostTags = Arrays.asList(hostTags.split(",")); + for (HostVO host : hosts) { + List tagsOnHost = hostTagDao.gethostTags(host.getId()); + if (CollectionUtils.isEmpty(tagsOnHost) || !tagsOnHost.containsAll(listOfHostTags)) { + throw new InvalidParameterValueException(String.format("There are active VMs using offering [%s], and the hosts [%s] don't have the new tags", offering.getId(), hosts)); + } + } + } + offering.setHostTag(hostTags); + } else { + offering.setHostTag(null); + } + } + /** * Check if it needs to update any parameter when updateDiskoffering is called * Verify if name or displayText are not blank, tags is not null, sortkey and displayDiskOffering is not null @@ -6488,6 +6529,7 @@ public String getConfigComponentName() { @Override public ConfigKey[] getConfigKeys() { return new ConfigKey[] {SystemVMUseLocalStorage, IOPS_MAX_READ_LENGTH, IOPS_MAX_WRITE_LENGTH, - BYTES_MAX_READ_LENGTH, BYTES_MAX_WRITE_LENGTH, ADD_HOST_ON_SERVICE_RESTART_KVM, SET_HOST_DOWN_TO_MAINTENANCE}; + BYTES_MAX_READ_LENGTH, BYTES_MAX_WRITE_LENGTH, ADD_HOST_ON_SERVICE_RESTART_KVM, SET_HOST_DOWN_TO_MAINTENANCE, + VM_USERDATA_MAX_LENGTH}; } } diff --git a/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java b/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java index 3a89d9641c57..6cc0ace1e056 100644 --- a/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java +++ b/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java @@ -69,10 +69,10 @@ import com.cloud.deploy.DataCenterDeployment; import com.cloud.deploy.DeployDestination; import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientAddressCapacityException; import com.cloud.exception.InsufficientCapacityException; import com.cloud.exception.OperationTimedoutException; import com.cloud.exception.ResourceUnavailableException; -import com.cloud.exception.StorageUnavailableException; import com.cloud.host.Host; import com.cloud.host.Host.Type; import com.cloud.host.HostVO; @@ -143,111 +143,114 @@ import com.cloud.vm.dao.VMInstanceDao; import com.google.gson.Gson; import com.google.gson.GsonBuilder; - -// -// Possible console proxy state transition cases -// Stopped --> Starting -> Running -// HA -> Stopped -> Starting -> Running -// Migrating -> Running (if previous state is Running before it enters into Migrating state -// Migrating -> Stopped (if previous state is not Running before it enters into Migrating state) -// Running -> HA (if agent lost connection) -// Stopped -> Destroyed -// -// Starting, HA, Migrating, Running state are all counted as "Open" for available capacity calculation -// because sooner or later, it will be driven into Running state -// +import com.google.gson.JsonParseException; +import java.util.HashSet; +import java.util.Set; +import java.util.stream.Collectors; +import org.apache.commons.lang3.BooleanUtils; + +/** + * Class to manage console proxys.

+ * Possible console proxy state transition cases:
+ * - Stopped -> Starting -> Running
+ * - HA -> Stopped -> Starting -> Running
+ * - Migrating -> Running (if previous state is Running before it enters into Migrating state)
+ * - Migrating -> Stopped (if previous state is not Running before it enters into Migrating state)
+ * - Running -> HA (if agent lost connection)
+ * - Stopped -> Destroyed
+ * + * Starting, HA, Migrating and Running states are all counted as Open for available capacity calculation because sooner or later, it will be driven into Running state + **/ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxyManager, VirtualMachineGuru, SystemVmLoadScanHandler, ResourceStateAdapter, Configurable { private static final Logger s_logger = Logger.getLogger(ConsoleProxyManagerImpl.class); - private static final int DEFAULT_CAPACITY_SCAN_INTERVAL = 30000; // 30 seconds - private static final int ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_SYNC = 180; // 3 minutes + private static final int DEFAULT_CAPACITY_SCAN_INTERVAL_IN_MILLISECONDS = 30000; + private static final int ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_SYNC_IN_SECONDS = 180; + private static final int STARTUP_DELAY_IN_MILLISECONDS = 60000; - private static final int STARTUP_DELAY = 60000; // 60 seconds + private int consoleProxyPort = ConsoleProxyManager.DEFAULT_PROXY_VNC_PORT; - private int _consoleProxyPort = ConsoleProxyManager.DEFAULT_PROXY_VNC_PORT; + private int managementPort = 8250; - private int _mgmtPort = 8250; - - private List _consoleProxyAllocators; + private List consoleProxyAllocators; @Inject - private ConsoleProxyDao _consoleProxyDao; + private ConsoleProxyDao consoleProxyDao; @Inject - private DataCenterDao _dcDao; + private DataCenterDao dataCenterDao; @Inject - private VMTemplateDao _templateDao; + private VMTemplateDao vmTemplateDao; @Inject - private HostPodDao _podDao; + private HostPodDao hostPodDao; @Inject - private HostDao _hostDao; + private HostDao hostDao; @Inject - private ConfigurationDao _configDao; + private ConfigurationDao configurationDao; @Inject - private VMInstanceDao _instanceDao; + private VMInstanceDao vmInstanceDao; @Inject - private TemplateDataStoreDao _vmTemplateStoreDao; + private TemplateDataStoreDao templateDataStoreDao; @Inject - private AgentManager _agentMgr; + private AgentManager agentManager; @Inject - private NetworkOrchestrationService _networkMgr; + private NetworkOrchestrationService networkOrchestrationService; @Inject - private NetworkModel _networkModel; + private NetworkModel networkModel; @Inject - private AccountManager _accountMgr; + private AccountManager accountManager; @Inject - private ServiceOfferingDao _offeringDao; + private ServiceOfferingDao serviceOfferingDao; @Inject - private NetworkOfferingDao _networkOfferingDao; + private NetworkOfferingDao networkOfferingDao; @Inject - private PrimaryDataStoreDao _storagePoolDao; + private PrimaryDataStoreDao primaryDataStoreDao; @Inject - private UserVmDetailsDao _vmDetailsDao; + private UserVmDetailsDao userVmDetailsDao; @Inject - private ResourceManager _resourceMgr; + private ResourceManager resourceManager; @Inject - private NetworkDao _networkDao; + private NetworkDao networkDao; @Inject - private RulesManager _rulesMgr; + private RulesManager rulesManager; @Inject - private IPAddressDao _ipAddressDao; + private IPAddressDao ipAddressDao; @Inject - private KeysManager _keysMgr; + private KeysManager keysManager; @Inject - private VirtualMachineManager _itMgr; + private VirtualMachineManager virtualMachineManager; @Inject private IndirectAgentLB indirectAgentLB; - private ConsoleProxyListener _listener; + private ConsoleProxyListener consoleProxyListener; - private ServiceOfferingVO _serviceOffering; + private ServiceOfferingVO serviceOfferingVO; - /* - * private final ExecutorService _requestHandlerScheduler = Executors.newCachedThreadPool(new - * NamedThreadFactory("Request-handler")); - */ - private long _capacityScanInterval = DEFAULT_CAPACITY_SCAN_INTERVAL; - private int _capacityPerProxy = ConsoleProxyManager.DEFAULT_PROXY_CAPACITY; - private int _standbyCapacity = ConsoleProxyManager.DEFAULT_STANDBY_CAPACITY; + private long capacityScanInterval = DEFAULT_CAPACITY_SCAN_INTERVAL_IN_MILLISECONDS; + private int capacityPerProxy = ConsoleProxyManager.DEFAULT_PROXY_CAPACITY; + private int standbyCapacity = ConsoleProxyManager.DEFAULT_STANDBY_CAPACITY; + + private boolean useStorageVm; + private boolean disableRpFilter = false; + private String instance; + + private int proxySessionTimeoutValue = DEFAULT_PROXY_SESSION_TIMEOUT; + private boolean sslEnabled = false; + private String consoleProxyUrlDomain; - private boolean _useStorageVm; - private boolean _disableRpFilter = false; - private String _instance; + private SystemVmLoadScanner loadScanner; + private Map zoneHostInfoMap; + private Map zoneProxyCountMap; + private Map zoneVmCountMap; - private int _proxySessionTimeoutValue = DEFAULT_PROXY_SESSION_TIMEOUT; - private boolean _sslEnabled = false; - private String _consoleProxyUrlDomain; + private String staticPublicIp; + private int staticPort; - // global load picture at zone basis - private SystemVmLoadScanner _loadScanner; - private Map _zoneHostInfoMap; // map - private Map _zoneProxyCountMap; // map - private Map _zoneVmCountMap; // map + private final GlobalLock allocProxyLock = GlobalLock.getInternLock(getAllocProxyLockName()); - private String _staticPublicIp; - private int _staticPort; + protected Gson jsonParser = new GsonBuilder().setVersion(1.3).create(); - private final GlobalLock _allocProxyLock = GlobalLock.getInternLock(getAllocProxyLockName()); + protected Set availableVmStateOnAssignProxy = new HashSet<>(Arrays.asList(State.Starting, State.Running, State.Stopping, State.Migrating)); @Inject private KeystoreDao _ksDao; @@ -262,50 +265,13 @@ public VmBasedAgentHook(VMInstanceDao instanceDao, HostDao hostDao, Configuratio @Override public void onLoadReport(ConsoleProxyLoadReportCommand cmd) { - if (cmd.getLoadInfo() == null) { - return; - } - - ConsoleProxyStatus status = null; - try { - GsonBuilder gb = new GsonBuilder(); - gb.setVersion(1.3); - Gson gson = gb.create(); - status = gson.fromJson(cmd.getLoadInfo(), ConsoleProxyStatus.class); - } catch (Throwable e) { - s_logger.warn("Unable to parse load info from proxy, proxy vm id : " + cmd.getProxyVmId() + ", info : " + cmd.getLoadInfo()); - } - - if (status != null) { - int count = 0; - if (status.getConnections() != null) { - count = status.getConnections().length; - } - - byte[] details = null; - if (cmd.getLoadInfo() != null) { - details = cmd.getLoadInfo().getBytes(Charset.forName("US-ASCII")); - } - _consoleProxyDao.update(cmd.getProxyVmId(), count, DateUtil.currentGMTTime(), details); - } else { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Unable to get console proxy load info, id : " + cmd.getProxyVmId()); - } - - _consoleProxyDao.update(cmd.getProxyVmId(), 0, DateUtil.currentGMTTime(), null); - } + updateConsoleProxyStatus(cmd.getLoadInfo(), cmd.getProxyVmId()); } @Override public void onAgentDisconnect(long agentId, com.cloud.host.Status state) { if (state == com.cloud.host.Status.Alert || state == com.cloud.host.Status.Disconnected) { - // be it either in alert or in disconnected state, the agent - // process - // may be gone in the VM, - // we will be reacting to stop the corresponding VM and let the - // scan - // process to HostVO host = _hostDao.findById(agentId); if (host.getType() == Type.ConsoleProxy) { String name = host.getName(); @@ -314,40 +280,18 @@ public void onAgentDisconnect(long agentId, com.cloud.host.Status state) { } if (name != null && name.startsWith("v-")) { String[] tokens = name.split("-"); - long proxyVmId = 0; + long proxyVmId; + String tokenSecondElement = tokens[1]; try { - proxyVmId = Long.parseLong(tokens[1]); + proxyVmId = Long.parseLong(tokenSecondElement); } catch (NumberFormatException e) { - s_logger.error("Unexpected exception " + e.getMessage(), e); + s_logger.error(String.format("[%s] is not a valid number, unable to parse [%s].", tokenSecondElement, e.getMessage()), e); return; } - final ConsoleProxyVO proxy = _consoleProxyDao.findById(proxyVmId); - if (proxy != null) { - - // Disable this feature for now, as it conflicts - // with - // the case of allowing user to reboot console proxy - // when rebooting happens, we will receive - // disconnect - // here and we can't enter into stopping process, - // as when the rebooted one comes up, it will kick - // off a - // newly started one and trigger the process - // continue on forever - - /* - * _capacityScanScheduler.execute(new Runnable() { - * public void run() { if(s_logger.isInfoEnabled()) - * s_logger.info("Stop console proxy " + - * proxy.getName() + - * " VM because of that the agent running inside it has disconnected" - * ); stopProxy(proxy.getId()); } }); - */ - } else { - if (s_logger.isInfoEnabled()) { - s_logger.info("Console proxy agent disconnected but corresponding console proxy VM no longer exists in DB, proxy: " + name); - } + final ConsoleProxyVO proxy = consoleProxyDao.findById(proxyVmId); + if (proxy == null && s_logger.isInfoEnabled()) { + s_logger.info("Console proxy agent disconnected but corresponding console proxy VM no longer exists in DB, proxy: " + name); } } else { assert (false) : "Invalid console proxy name: " + name; @@ -360,7 +304,7 @@ public void onAgentDisconnect(long agentId, com.cloud.host.Status state) { @Override protected HostVO findConsoleProxyHost(StartupProxyCommand startupCmd) { long proxyVmId = startupCmd.getProxyVmId(); - ConsoleProxyVO consoleProxy = _consoleProxyDao.findById(proxyVmId); + ConsoleProxyVO consoleProxy = consoleProxyDao.findById(proxyVmId); if (consoleProxy == null) { s_logger.info("Proxy " + proxyVmId + " is no longer in DB, skip sending startup command"); return null; @@ -380,43 +324,42 @@ public ConsoleProxyInfo assignProxy(final long dataCenterId, final long vmId) { } if (proxy.getPublicIpAddress() == null) { - s_logger.warn("Assigned console proxy does not have a valid public IP address"); + s_logger.warn(String.format("Assigned console proxy [%s] does not have a valid public IP address.", proxy.toString())); return null; } KeystoreVO ksVo = _ksDao.findByName(ConsoleProxyManager.CERTIFICATE_NAME); if (proxy.isSslEnabled() && ksVo == null) { - s_logger.warn("SSL enabled for console proxy but no server certificate found in database"); + s_logger.warn(String.format("SSL is enabled for console proxy [%s] but no server certificate found in database.", proxy.toString())); } - if (_staticPublicIp == null) { - return new ConsoleProxyInfo(proxy.isSslEnabled(), proxy.getPublicIpAddress(), _consoleProxyPort, proxy.getPort(), _consoleProxyUrlDomain); + if (staticPublicIp == null) { + return new ConsoleProxyInfo(proxy.isSslEnabled(), proxy.getPublicIpAddress(), consoleProxyPort, proxy.getPort(), consoleProxyUrlDomain); } else { - return new ConsoleProxyInfo(proxy.isSslEnabled(), _staticPublicIp, _consoleProxyPort, _staticPort, _consoleProxyUrlDomain); + return new ConsoleProxyInfo(proxy.isSslEnabled(), staticPublicIp, consoleProxyPort, staticPort, consoleProxyUrlDomain); } } public ConsoleProxyVO doAssignProxy(long dataCenterId, long vmId) { ConsoleProxyVO proxy = null; - VMInstanceVO vm = _instanceDao.findById(vmId); + VMInstanceVO vm = vmInstanceDao.findById(vmId); if (vm == null) { s_logger.warn("VM " + vmId + " no longer exists, return a null proxy for vm:" + vmId); return null; } - if (vm != null && vm.getState() != State.Starting && vm.getState() != State.Running - && vm.getState() != State.Stopping && vm.getState() != State.Migrating) { + if (!availableVmStateOnAssignProxy.contains(vm.getState())) { if (s_logger.isInfoEnabled()) { - s_logger.info("Detected that vm : " + vmId + " is not currently in starting or running or stopping or migrating state, we will fail the proxy assignment for it"); + s_logger.info(String.format("Detected that %s is not currently in \"Starting\", \"Running\", \"Stopping\" or \"Migrating\" state, it will fail the proxy assignment.", vm.toString())); } return null; } - if (_allocProxyLock.lock(ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_SYNC)) { + if (allocProxyLock.lock(ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_SYNC_IN_SECONDS)) { try { if (vm.getProxyId() != null) { - proxy = _consoleProxyDao.findById(vm.getProxyId()); + proxy = consoleProxyDao.findById(vm.getProxyId()); if (proxy != null) { if (!isInAssignableState(proxy)) { @@ -425,12 +368,12 @@ public ConsoleProxyVO doAssignProxy(long dataCenterId, long vmId) { } proxy = null; } else { - if (_consoleProxyDao.getProxyActiveLoad(proxy.getId()) < _capacityPerProxy || hasPreviousSession(proxy, vm)) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Assign previous allocated console proxy for user vm : " + vmId); + if (consoleProxyDao.getProxyActiveLoad(proxy.getId()) < capacityPerProxy || hasPreviousSession(proxy, vm)) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Assign previous allocated console proxy for user vm : " + vmId); } - if (proxy.getActiveSession() >= _capacityPerProxy) { + if (proxy.getActiveSession() >= capacityPerProxy) { s_logger.warn("Assign overloaded proxy to user VM as previous session exists, user vm : " + vmId); } } else { @@ -444,7 +387,7 @@ public ConsoleProxyVO doAssignProxy(long dataCenterId, long vmId) { proxy = assignProxyFromRunningPool(dataCenterId); } } finally { - _allocProxyLock.unlock(); + allocProxyLock.unlock(); } } else { s_logger.error("Unable to acquire synchronization lock to get/allocate proxy resource for vm :" + vmId + @@ -456,13 +399,12 @@ public ConsoleProxyVO doAssignProxy(long dataCenterId, long vmId) { return null; } - // if it is a new assignment or a changed assignment, update the record - if (vm.getProxyId() == null || vm.getProxyId().longValue() != proxy.getId()) { - _instanceDao.updateProxyId(vmId, proxy.getId(), DateUtil.currentGMTTime()); + if (vm.getProxyId() == null || vm.getProxyId() != proxy.getId()) { + vmInstanceDao.updateProxyId(vmId, proxy.getId(), DateUtil.currentGMTTime()); } - proxy.setSslEnabled(_sslEnabled); - if (_sslEnabled) { + proxy.setSslEnabled(sslEnabled); + if (sslEnabled) { proxy.setPort(443); } else { proxy.setPort(80); @@ -472,57 +414,40 @@ public ConsoleProxyVO doAssignProxy(long dataCenterId, long vmId) { } private static boolean isInAssignableState(ConsoleProxyVO proxy) { - // console proxies that are in states of being able to serve user VM - State state = proxy.getState(); - if (state == State.Running) { - return true; - } - - return false; + return proxy.getState() == State.Running; } private boolean hasPreviousSession(ConsoleProxyVO proxy, VMInstanceVO vm) { ConsoleProxyStatus status = null; try { - GsonBuilder gb = new GsonBuilder(); - gb.setVersion(1.3); - Gson gson = gb.create(); - - byte[] details = proxy.getSessionDetails(); - status = gson.fromJson(details != null ? new String(details, Charset.forName("US-ASCII")) : null, ConsoleProxyStatus.class); - } catch (Throwable e) { - s_logger.warn("Unable to parse proxy session details : " + Arrays.toString(proxy.getSessionDetails())); + byte[] detailsInBytes = proxy.getSessionDetails(); + String details = detailsInBytes != null ? new String(detailsInBytes, Charset.forName("US-ASCII")) : null; + status = parseJsonToConsoleProxyStatus(details); + } catch (JsonParseException e) { + s_logger.warn(String.format("Unable to parse proxy [%s] session details [%s] due to [%s].", proxy.toString(), Arrays.toString(proxy.getSessionDetails()), e.getMessage()), e); } if (status != null && status.getConnections() != null) { ConsoleProxyConnectionInfo[] connections = status.getConnections(); - for (int i = 0; i < connections.length; i++) { + for (ConsoleProxyConnectionInfo connection : connections) { long taggedVmId = 0; - if (connections[i].tag != null) { + if (connection.tag != null) { try { - taggedVmId = Long.parseLong(connections[i].tag); + taggedVmId = Long.parseLong(connection.tag); } catch (NumberFormatException e) { - s_logger.warn("Unable to parse console proxy connection info passed through tag: " + connections[i].tag, e); + s_logger.warn(String.format("Unable to parse console proxy connection info passed through tag [%s] due to [%s].", connection.tag, e.getMessage()), e); } } + if (taggedVmId == vm.getId()) { return true; } } - // - // even if we are not in the list, it may because we haven't - // received load-update yet - // wait until session time - // - if (DateUtil.currentGMTTime().getTime() - vm.getProxyAssignTime().getTime() < _proxySessionTimeoutValue) { - return true; - } - - return false; + return DateUtil.currentGMTTime().getTime() - vm.getProxyAssignTime().getTime() < proxySessionTimeoutValue; } else { - s_logger.error("No proxy load info on an overloaded proxy ?"); + s_logger.warn(String.format("Unable to retrieve load info from proxy [%s] on an overloaded proxy.", proxy.toString())); return false; } } @@ -530,104 +455,89 @@ private boolean hasPreviousSession(ConsoleProxyVO proxy, VMInstanceVO vm) { @Override public ConsoleProxyVO startProxy(long proxyVmId, boolean ignoreRestartSetting) { try { - ConsoleProxyVO proxy = _consoleProxyDao.findById(proxyVmId); + ConsoleProxyVO proxy = consoleProxyDao.findById(proxyVmId); if (proxy.getState() == VirtualMachine.State.Running) { return proxy; } - String restart = _configDao.getValue(Config.ConsoleProxyRestart.key()); + String restart = configurationDao.getValue(Config.ConsoleProxyRestart.key()); if (!ignoreRestartSetting && restart != null && restart.equalsIgnoreCase("false")) { return null; } if (proxy.getState() == VirtualMachine.State.Stopped) { - _itMgr.advanceStart(proxy.getUuid(), null, null); - proxy = _consoleProxyDao.findById(proxy.getId()); + virtualMachineManager.advanceStart(proxy.getUuid(), null, null); + proxy = consoleProxyDao.findById(proxy.getId()); return proxy; } - // For VMs that are in Stopping, Starting, Migrating state, let client to wait by returning null - // as sooner or later, Starting/Migrating state will be transited to Running and Stopping will be transited - // to Stopped to allow Starting of it - s_logger.warn("Console proxy is not in correct state to be started: " + proxy.getState()); - return null; - } catch (StorageUnavailableException e) { - s_logger.warn("Exception while trying to start console proxy", e); - return null; - } catch (InsufficientCapacityException e) { - s_logger.warn("Exception while trying to start console proxy", e); - return null; - } catch (ResourceUnavailableException e) { - s_logger.warn("Exception while trying to start console proxy", e); - return null; - } catch (ConcurrentOperationException e) { - s_logger.warn("Runtime Exception while trying to start console proxy", e); - return null; - } catch (CloudRuntimeException e) { - s_logger.warn("Runtime Exception while trying to start console proxy", e); - return null; - } catch (OperationTimedoutException e) { - s_logger.warn("Runtime Exception while trying to start console proxy", e); - return null; + s_logger.warn(String.format("Console proxy [%s] must be in \"Stopped\" state to start proxy. Current state [%s].", proxy.toString(), proxy.getState())); + } catch ( ConcurrentOperationException | InsufficientCapacityException | OperationTimedoutException | ResourceUnavailableException ex) { + s_logger.warn(String.format("Unable to start proxy [%s] due to [%s].", proxyVmId, ex.getMessage()), ex); } + + return null; } public ConsoleProxyVO assignProxyFromRunningPool(long dataCenterId) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Assign console proxy from running pool for request from data center : " + dataCenterId); + if (s_logger.isDebugEnabled()) { + s_logger.debug("Assign console proxy from running pool for request from data center : " + dataCenterId); } ConsoleProxyAllocator allocator = getCurrentAllocator(); assert (allocator != null); - List runningList = _consoleProxyDao.getProxyListInStates(dataCenterId, State.Running); + List runningList = consoleProxyDao.getProxyListInStates(dataCenterId, State.Running); if (runningList != null && runningList.size() > 0) { Iterator it = runningList.iterator(); while (it.hasNext()) { ConsoleProxyVO proxy = it.next(); - if (proxy.getActiveSession() >= _capacityPerProxy) { + if (proxy.getActiveSession() >= capacityPerProxy) { it.remove(); } } - if (s_logger.isTraceEnabled()) { - s_logger.trace("Running proxy pool size : " + runningList.size()); - for (ConsoleProxyVO proxy : runningList) { - s_logger.trace("Running proxy instance : " + proxy.getHostName()); - } + if (s_logger.isDebugEnabled()) { + s_logger.debug(String.format("Running [%s] proxy instances [%s].", runningList.size(), runningList.stream().map(proxy -> proxy.toString()).collect(Collectors.joining(", ")))); } - List> l = _consoleProxyDao.getProxyLoadMatrix(); - Map loadInfo = new HashMap(); + List> l = consoleProxyDao.getProxyLoadMatrix(); + Map loadInfo = new HashMap<>(); if (l != null) { for (Pair p : l) { - loadInfo.put(p.first(), p.second()); + Long proxyId = p.first(); + Integer countRunningVms = p.second(); + + loadInfo.put(proxyId, countRunningVms); - if (s_logger.isTraceEnabled()) { - s_logger.trace("Running proxy instance allocation load { proxy id : " + p.first() + ", load : " + p.second() + "}"); + if (s_logger.isDebugEnabled()) { + s_logger.debug(String.format("Running proxy instance allocation {\"proxyId\": %s, \"countRunningVms\": %s}.", proxyId, countRunningVms)); } + } } + Long allocated = allocator.allocProxy(runningList, loadInfo, dataCenterId); + if (allocated == null) { - s_logger.debug("Unable to find a console proxy "); + s_logger.debug(String.format("Console proxy not found, unable to assign console proxy from running pool for request from zone [%s].", dataCenterId)); return null; } - return _consoleProxyDao.findById(allocated); + + return consoleProxyDao.findById(allocated); } else { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Empty running proxy pool for now in data center : " + dataCenterId); + if (s_logger.isDebugEnabled()) { + s_logger.debug("Empty running proxy pool for now in data center : " + dataCenterId); } + } + return null; } public ConsoleProxyVO assignProxyFromStoppedPool(long dataCenterId) { - // practically treat all console proxy VM that is not in Running state but can be entering into Running state as - // candidates - // this is to prevent launching unneccessary console proxy VMs because of temporarily unavailable state - List l = _consoleProxyDao.getProxyListInStates(dataCenterId, State.Starting, State.Stopped, State.Migrating, State.Stopping); - if (l != null && l.size() > 0) { + List l = consoleProxyDao.getProxyListInStates(dataCenterId, State.Starting, State.Stopped, State.Migrating, State.Stopping); + if (CollectionUtils.isNotEmpty(l)) { return l.get(0); } @@ -641,13 +551,13 @@ public ConsoleProxyVO startNew(long dataCenterId) throws ConcurrentOperationExce } if (!allowToLaunchNew(dataCenterId)) { - s_logger.warn("The number of launched console proxy on zone " + dataCenterId + " has reached to limit"); + String configKey = Config.ConsoleProxyLaunchMax.key(); + s_logger.warn(String.format("The number of launched console proxys on zone [%s] has reached the limit [%s]. Limit set in [%s].", dataCenterId, configurationDao.getValue(configKey), configKey)); return null; } - VMTemplateVO template = null; - HypervisorType availableHypervisor = _resourceMgr.getAvailableHypervisor(dataCenterId); - template = _templateDao.findSystemVMReadyTemplate(dataCenterId, availableHypervisor); + HypervisorType availableHypervisor = resourceManager.getAvailableHypervisor(dataCenterId); + VMTemplateVO template = vmTemplateDao.findSystemVMReadyTemplate(dataCenterId, availableHypervisor); if (template == null) { throw new CloudRuntimeException("Not able to find the System templates or not downloaded in zone " + dataCenterId); } @@ -656,13 +566,13 @@ public ConsoleProxyVO startNew(long dataCenterId) throws ConcurrentOperationExce long proxyVmId = (Long)context.get("proxyVmId"); if (proxyVmId == 0) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Creating proxy instance failed, data center id : " + dataCenterId); + if (s_logger.isDebugEnabled()) { + s_logger.debug(String.format("Unable to create proxy instance in zone [%s].", dataCenterId)); } return null; } - ConsoleProxyVO proxy = _consoleProxyDao.findById(proxyVmId); + ConsoleProxyVO proxy = consoleProxyDao.findById(proxyVmId); if (proxy != null) { SubscriptionMgr.getInstance().notifySubscribers(ConsoleProxyManager.ALERT_SUBJECT, this, new ConsoleProxyAlertEventArgs(ConsoleProxyAlertEventArgs.PROXY_CREATED, dataCenterId, proxy.getId(), proxy, null)); @@ -704,7 +614,7 @@ protected NetworkVO getDefaultNetworkForAdvancedZone(DataCenter dc) { } if (dc.isSecurityGroupEnabled()) { - List networks = _networkDao.listByZoneSecurityGroup(dc.getId()); + List networks = networkDao.listByZoneSecurityGroup(dc.getId()); if (CollectionUtils.isEmpty(networks)) { throw new CloudRuntimeException("Can not found security enabled network in SG Zone " + dc); } @@ -713,9 +623,8 @@ protected NetworkVO getDefaultNetworkForAdvancedZone(DataCenter dc) { } else { TrafficType defaultTrafficType = TrafficType.Public; - List defaultNetworks = _networkDao.listByZoneAndTrafficType(dc.getId(), defaultTrafficType); + List defaultNetworks = networkDao.listByZoneAndTrafficType(dc.getId(), defaultTrafficType); - // api should never allow this situation to happen if (defaultNetworks.size() != 1) { throw new CloudRuntimeException("Found " + defaultNetworks.size() + " networks of type " + defaultTrafficType + " when expect to find 1"); } @@ -737,9 +646,8 @@ protected NetworkVO getDefaultNetworkForBasicZone(DataCenter dc) { } TrafficType defaultTrafficType = TrafficType.Guest; - List defaultNetworks = _networkDao.listByZoneAndTrafficType(dc.getId(), defaultTrafficType); + List defaultNetworks = networkDao.listByZoneAndTrafficType(dc.getId(), defaultTrafficType); - // api should never allow this situation to happen if (defaultNetworks.size() != 1) { throw new CloudRuntimeException("Found " + defaultNetworks.size() + " networks of type " + defaultTrafficType + " when expect to find 1"); } @@ -749,48 +657,49 @@ protected NetworkVO getDefaultNetworkForBasicZone(DataCenter dc) { protected Map createProxyInstance(long dataCenterId, VMTemplateVO template) throws ConcurrentOperationException { - long id = _consoleProxyDao.getNextInSequence(Long.class, "id"); - String name = VirtualMachineName.getConsoleProxyName(id, _instance); - DataCenterVO dc = _dcDao.findById(dataCenterId); - Account systemAcct = _accountMgr.getSystemAccount(); + long id = consoleProxyDao.getNextInSequence(Long.class, "id"); + String name = VirtualMachineName.getConsoleProxyName(id, instance); + DataCenterVO dc = dataCenterDao.findById(dataCenterId); + Account systemAcct = accountManager.getSystemAccount(); DataCenterDeployment plan = new DataCenterDeployment(dataCenterId); NetworkVO defaultNetwork = getDefaultNetworkForCreation(dc); List offerings = - _networkModel.getSystemAccountNetworkOfferings(NetworkOffering.SystemControlNetwork, NetworkOffering.SystemManagementNetwork); - LinkedHashMap> networks = new LinkedHashMap>(offerings.size() + 1); + networkModel.getSystemAccountNetworkOfferings(NetworkOffering.SystemControlNetwork, NetworkOffering.SystemManagementNetwork); + LinkedHashMap> networks = new LinkedHashMap<>(offerings.size() + 1); NicProfile defaultNic = new NicProfile(); defaultNic.setDefaultNic(true); defaultNic.setDeviceId(2); - networks.put(_networkMgr.setupNetwork(systemAcct, _networkOfferingDao.findById(defaultNetwork.getNetworkOfferingId()), plan, null, null, false).get(0), - new ArrayList(Arrays.asList(defaultNic))); + networks.put(networkOrchestrationService.setupNetwork(systemAcct, networkOfferingDao.findById(defaultNetwork.getNetworkOfferingId()), plan, null, null, false).get(0), + new ArrayList<>(Arrays.asList(defaultNic))); for (NetworkOffering offering : offerings) { - networks.put(_networkMgr.setupNetwork(systemAcct, offering, plan, null, null, false).get(0), new ArrayList()); + networks.put(networkOrchestrationService.setupNetwork(systemAcct, offering, plan, null, null, false).get(0), new ArrayList<>()); } - ServiceOfferingVO serviceOffering = _serviceOffering; + ServiceOfferingVO serviceOffering = serviceOfferingVO; if (serviceOffering == null) { - serviceOffering = _offeringDao.findDefaultSystemOffering(ServiceOffering.consoleProxyDefaultOffUniqueName, ConfigurationManagerImpl.SystemVMUseLocalStorage.valueIn(dataCenterId)); + serviceOffering = serviceOfferingDao.findDefaultSystemOffering(ServiceOffering.consoleProxyDefaultOffUniqueName, ConfigurationManagerImpl.SystemVMUseLocalStorage.valueIn(dataCenterId)); } ConsoleProxyVO proxy = new ConsoleProxyVO(id, serviceOffering.getId(), name, template.getId(), template.getHypervisorType(), template.getGuestOSId(), dataCenterId, - systemAcct.getDomainId(), systemAcct.getId(), _accountMgr.getSystemUser().getId(), 0, serviceOffering.isOfferHA()); + systemAcct.getDomainId(), systemAcct.getId(), accountManager.getSystemUser().getId(), 0, serviceOffering.isOfferHA()); proxy.setDynamicallyScalable(template.isDynamicallyScalable()); - proxy = _consoleProxyDao.persist(proxy); + proxy = consoleProxyDao.persist(proxy); try { - _itMgr.allocate(name, template, serviceOffering, networks, plan, null); + virtualMachineManager.allocate(name, template, serviceOffering, networks, plan, null); } catch (InsufficientCapacityException e) { - s_logger.warn("InsufficientCapacity", e); - throw new CloudRuntimeException("Insufficient capacity exception", e); + String message = String.format("Unable to allocate proxy [%s] on zone [%s] due to [%s].", proxy.toString(), dataCenterId, e.getMessage()); + s_logger.warn(message, e); + throw new CloudRuntimeException(message, e); } - Map context = new HashMap(); + Map context = new HashMap<>(); context.put("dc", dc); - HostPodVO pod = _podDao.findById(proxy.getPodIdToDeployIn()); + HostPodVO pod = hostPodDao.findById(proxy.getPodIdToDeployIn()); context.put("pod", pod); context.put("proxyVmId", proxy.getId()); @@ -798,8 +707,7 @@ protected Map createProxyInstance(long dataCenterId, VMTemplateV } private ConsoleProxyAllocator getCurrentAllocator() { - // for now, only one adapter is supported - for (ConsoleProxyAllocator allocator : _consoleProxyAllocators) { + for (ConsoleProxyAllocator allocator : consoleProxyAllocators) { return allocator; } @@ -807,48 +715,12 @@ private ConsoleProxyAllocator getCurrentAllocator() { } public void onLoadAnswer(ConsoleProxyLoadAnswer answer) { - if (answer.getDetails() == null) { - return; - } - - ConsoleProxyStatus status = null; - try { - GsonBuilder gb = new GsonBuilder(); - gb.setVersion(1.3); - Gson gson = gb.create(); - status = gson.fromJson(answer.getDetails(), ConsoleProxyStatus.class); - } catch (Throwable e) { - s_logger.warn("Unable to parse load info from proxy, proxy vm id : " + answer.getProxyVmId() + ", info : " + answer.getDetails()); - } - - if (status != null) { - int count = 0; - if (status.getConnections() != null) { - count = status.getConnections().length; - } - - byte[] details = null; - if (answer.getDetails() != null) { - details = answer.getDetails().getBytes(Charset.forName("US-ASCII")); - } - _consoleProxyDao.update(answer.getProxyVmId(), count, DateUtil.currentGMTTime(), details); - } else { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Unable to get console proxy load info, id : " + answer.getProxyVmId()); - } - - _consoleProxyDao.update(answer.getProxyVmId(), 0, DateUtil.currentGMTTime(), null); - // TODO : something is wrong with the VM, restart it? - } + updateConsoleProxyStatus(answer.getDetails(), answer.getProxyVmId()); } public void handleAgentDisconnect(long agentId, com.cloud.host.Status state) { if (state == com.cloud.host.Status.Alert || state == com.cloud.host.Status.Disconnected) { - // be it either in alert or in disconnected state, the agent process - // may be gone in the VM, - // we will be reacting to stop the corresponding VM and let the scan - // process to - HostVO host = _hostDao.findById(agentId); + HostVO host = hostDao.findById(agentId); if (host.getType() == Type.ConsoleProxy) { String name = host.getName(); if (s_logger.isInfoEnabled()) { @@ -856,7 +728,7 @@ public void handleAgentDisconnect(long agentId, com.cloud.host.Status state) { } if (name != null && name.startsWith("v-")) { String[] tokens = name.split("-"); - long proxyVmId = 0; + long proxyVmId; try { proxyVmId = Long.parseLong(tokens[1]); } catch (NumberFormatException e) { @@ -864,28 +736,9 @@ public void handleAgentDisconnect(long agentId, com.cloud.host.Status state) { return; } - final ConsoleProxyVO proxy = _consoleProxyDao.findById(proxyVmId); - if (proxy != null) { - - // Disable this feature for now, as it conflicts with - // the case of allowing user to reboot console proxy - // when rebooting happens, we will receive disconnect - // here and we can't enter into stopping process, - // as when the rebooted one comes up, it will kick off a - // newly started one and trigger the process - // continue on forever - - /* - * _capacityScanScheduler.execute(new Runnable() { public void run() { - * if(s_logger.isInfoEnabled()) - * s_logger.info("Stop console proxy " + proxy.getName() + - * " VM because of that the agent running inside it has disconnected" ); - * stopProxy(proxy.getId()); } }); - */ - } else { - if (s_logger.isInfoEnabled()) { - s_logger.info("Console proxy agent disconnected but corresponding console proxy VM no longer exists in DB, proxy: " + name); - } + final ConsoleProxyVO proxy = consoleProxyDao.findById(proxyVmId); + if (proxy == null && s_logger.isInfoEnabled()) { + s_logger.info("Console proxy agent disconnected but corresponding console proxy VM no longer exists in DB, proxy: " + name); } } else { assert (false) : "Invalid console proxy name: " + name; @@ -896,16 +749,12 @@ public void handleAgentDisconnect(long agentId, com.cloud.host.Status state) { private boolean reserveStandbyCapacity() { ConsoleProxyManagementState state = getManagementState(); - if (state == null || state != ConsoleProxyManagementState.Auto) { - return false; - } - - return true; + return !(state == null || state != ConsoleProxyManagementState.Auto); } private boolean isConsoleProxyVmRequired(long dcId) { - DataCenterVO dc = _dcDao.findById(dcId); - _dcDao.loadDetails(dc); + DataCenterVO dc = dataCenterDao.findById(dcId); + dataCenterDao.loadDetails(dc); String cpvmReq = dc.getDetail(ZoneConfig.EnableConsoleProxyVm.key()); if (cpvmReq != null) { return Boolean.parseBoolean(cpvmReq); @@ -921,26 +770,21 @@ private boolean allowToLaunchNew(long dcId) { return false; } List l = - _consoleProxyDao.getProxyListInStates(dcId, VirtualMachine.State.Starting, VirtualMachine.State.Running, VirtualMachine.State.Stopping, + consoleProxyDao.getProxyListInStates(dcId, VirtualMachine.State.Starting, VirtualMachine.State.Running, VirtualMachine.State.Stopping, VirtualMachine.State.Stopped, VirtualMachine.State.Migrating, VirtualMachine.State.Shutdown, VirtualMachine.State.Unknown); - String value = _configDao.getValue(Config.ConsoleProxyLaunchMax.key()); + String value = configurationDao.getValue(Config.ConsoleProxyLaunchMax.key()); int launchLimit = NumbersUtil.parseInt(value, 10); return l.size() < launchLimit; } private boolean checkCapacity(ConsoleProxyLoadInfo proxyCountInfo, ConsoleProxyLoadInfo vmCountInfo) { - - if (proxyCountInfo.getCount() * _capacityPerProxy - vmCountInfo.getCount() <= _standbyCapacity) { - return false; - } - - return true; + return proxyCountInfo.getCount() * capacityPerProxy - vmCountInfo.getCount() > standbyCapacity; } private void allocCapacity(long dataCenterId) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Allocate console proxy standby capacity for data center : " + dataCenterId); + if (s_logger.isDebugEnabled()) { + s_logger.debug(String.format("Allocating console proxy standby capacity for zone [%s].", dataCenterId)); } ConsoleProxyVO proxy = null; @@ -953,13 +797,13 @@ private void allocCapacity(long dataCenterId) { s_logger.info("No stopped console proxy is available, need to allocate a new console proxy"); } - if (_allocProxyLock.lock(ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_SYNC)) { + if (allocProxyLock.lock(ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_SYNC_IN_SECONDS)) { try { proxy = startNew(dataCenterId); } catch (ConcurrentOperationException e) { - s_logger.info("Concurrent operation exception caught " + e); + s_logger.warn(String.format("Unable to start new console proxy on zone [%s] due to [%s].", dataCenterId, e.getMessage()), e); } finally { - _allocProxyLock.unlock(); + allocProxyLock.unlock(); } } else { if (s_logger.isInfoEnabled()) { @@ -995,10 +839,9 @@ private void allocCapacity(long dataCenterId) { } } catch (Exception e) { errorString = e.getMessage(); + s_logger.warn(String.format("Unable to allocate console proxy standby capacity for zone [%s] due to [%s].", dataCenterId, e.getMessage()), e); throw e; } finally { - // TODO - For now put all the alerts as creation failure. Distinguish between creation vs start failure in future. - // Also add failure reason since startvm masks some of them. if (proxy == null || proxy.getState() != State.Running) SubscriptionMgr.getInstance().notifySubscribers(ConsoleProxyManager.ALERT_SUBJECT, this, new ConsoleProxyAlertEventArgs(ConsoleProxyAlertEventArgs.PROXY_CREATE_FAILURE, dataCenterId, 0l, null, errorString)); @@ -1006,7 +849,7 @@ private void allocCapacity(long dataCenterId) { } public boolean isZoneReady(Map zoneHostInfoMap, long dataCenterId) { - List hosts = _hostDao.listByDataCenterId(dataCenterId); + List hosts = hostDao.listByDataCenterId(dataCenterId); if (CollectionUtils.isEmpty(hosts)) { if (s_logger.isDebugEnabled()) { s_logger.debug("Zone " + dataCenterId + " has no host available which is enabled and in Up state"); @@ -1015,28 +858,24 @@ public boolean isZoneReady(Map zoneHostInfoMap, long dataCen } ZoneHostInfo zoneHostInfo = zoneHostInfoMap.get(dataCenterId); if (zoneHostInfo != null && isZoneHostReady(zoneHostInfo)) { - VMTemplateVO template = _templateDao.findSystemVMReadyTemplate(dataCenterId, HypervisorType.Any); + VMTemplateVO template = vmTemplateDao.findSystemVMReadyTemplate(dataCenterId, HypervisorType.Any); if (template == null) { if (s_logger.isDebugEnabled()) { s_logger.debug("System vm template is not ready at data center " + dataCenterId + ", wait until it is ready to launch console proxy vm"); } return false; } - TemplateDataStoreVO templateHostRef = null; + TemplateDataStoreVO templateHostRef; if (template.isDirectDownload()) { - templateHostRef = _vmTemplateStoreDao.findByTemplate(template.getId(), DataStoreRole.Image); + templateHostRef = templateDataStoreDao.findByTemplate(template.getId(), DataStoreRole.Image); } else { - templateHostRef = _vmTemplateStoreDao.findByTemplateZoneDownloadStatus(template.getId(), dataCenterId, Status.DOWNLOADED); + templateHostRef = templateDataStoreDao.findByTemplateZoneDownloadStatus(template.getId(), dataCenterId, Status.DOWNLOADED); } if (templateHostRef != null) { - boolean useLocalStorage = false; - Boolean useLocal = ConfigurationManagerImpl.SystemVMUseLocalStorage.valueIn(dataCenterId); - if (useLocal != null) { - useLocalStorage = useLocal.booleanValue(); - } - List> l = _consoleProxyDao.getDatacenterStoragePoolHostInfo(dataCenterId, useLocalStorage); - if (l != null && l.size() > 0 && l.get(0).second().intValue() > 0) { + Boolean useLocalStorage = BooleanUtils.toBoolean(ConfigurationManagerImpl.SystemVMUseLocalStorage.valueIn(dataCenterId)); + List> l = consoleProxyDao.getDatacenterStoragePoolHostInfo(dataCenterId, useLocalStorage); + if (CollectionUtils.isNotEmpty(l) && l.get(0).second() > 0) { return true; } else { if (s_logger.isDebugEnabled()) { @@ -1045,7 +884,7 @@ public boolean isZoneReady(Map zoneHostInfoMap, long dataCen } } else { if (s_logger.isDebugEnabled()) { - s_logger.debug("Zone host is ready, but console proxy template: " + template.getId() + " is not ready on secondary storage."); + s_logger.debug(String.format("Zone [%s] is ready, but console proxy template [%s] is not ready on secondary storage.", dataCenterId, template.getId())); } } } @@ -1053,8 +892,8 @@ public boolean isZoneReady(Map zoneHostInfoMap, long dataCen } private boolean isZoneHostReady(ZoneHostInfo zoneHostInfo) { - int expectedFlags = 0; - if (_useStorageVm) { + int expectedFlags; + if (useStorageVm) { expectedFlags = RunningHostInfoAgregator.ZoneHostInfo.ROUTING_HOST_MASK; } else { expectedFlags = RunningHostInfoAgregator.ZoneHostInfo.ALL_HOST_MASK; @@ -1065,7 +904,7 @@ private boolean isZoneHostReady(ZoneHostInfo zoneHostInfo) { private synchronized Map getZoneHostInfo() { Date cutTime = DateUtil.currentGMTTime(); - List l = _hostDao.getRunningHostCounts(new Date(cutTime.getTime() - ClusterManager.HeartbeatThreshold.value())); + List l = hostDao.getRunningHostCounts(new Date(cutTime.getTime() - ClusterManager.HeartbeatThreshold.value())); RunningHostInfoAgregator aggregator = new RunningHostInfoAgregator(); if (l.size() > 0) { @@ -1092,15 +931,15 @@ public boolean stop() { s_logger.info("Stop console proxy manager"); } - _loadScanner.stop(); - _allocProxyLock.releaseRef(); - _resourceMgr.unregisterResourceStateAdapter(this.getClass().getSimpleName()); + loadScanner.stop(); + allocProxyLock.releaseRef(); + resourceManager.unregisterResourceStateAdapter(this.getClass().getSimpleName()); return true; } @Override public boolean stopProxy(long proxyVmId) { - ConsoleProxyVO proxy = _consoleProxyDao.findById(proxyVmId); + ConsoleProxyVO proxy = consoleProxyDao.findById(proxyVmId); if (proxy == null) { if (s_logger.isDebugEnabled()) { s_logger.debug("Stopping console proxy failed: console proxy " + proxyVmId + " no longer exists"); @@ -1109,13 +948,10 @@ public boolean stopProxy(long proxyVmId) { } try { - _itMgr.stop(proxy.getUuid()); + virtualMachineManager.stop(proxy.getUuid()); return true; - } catch (ResourceUnavailableException e) { - s_logger.warn("Stopping console proxy " + proxy.getHostName() + " failed : exception ", e); - return false; - } catch (CloudRuntimeException e) { - s_logger.warn("Unable to stop proxy ", e); + } catch (CloudRuntimeException | ResourceUnavailableException e) { + s_logger.warn(String.format("Unable to stop console proxy [%s] due to [%s].", proxy.toString(), e.getMessage()), e); return false; } } @@ -1133,29 +969,30 @@ public void setManagementState(final ConsoleProxyManagementState state) { Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(TransactionStatus status) { - _configDao.update(Config.ConsoleProxyManagementLastState.key(), Config.ConsoleProxyManagementLastState.getCategory(), lastState.toString()); - _configDao.update(Config.ConsoleProxyManagementState.key(), Config.ConsoleProxyManagementState.getCategory(), state.toString()); + configurationDao.update(Config.ConsoleProxyManagementLastState.key(), Config.ConsoleProxyManagementLastState.getCategory(), lastState.toString()); + configurationDao.update(Config.ConsoleProxyManagementState.key(), Config.ConsoleProxyManagementState.getCategory(), state.toString()); } }); } } catch (Throwable e) { - s_logger.error("Failed to set managment state", e); + s_logger.error(String.format("Unable to set console proxy management state to [%s] due to [%s].", state, e.getMessage()), e); } } @Override public ConsoleProxyManagementState getManagementState() { - String value = _configDao.getValue(Config.ConsoleProxyManagementState.key()); + String configKey = Config.ConsoleProxyManagementState.key(); + String value = configurationDao.getValue(configKey); + if (value != null) { ConsoleProxyManagementState state = ConsoleProxyManagementState.valueOf(value); - if (state == null) { - s_logger.error("Invalid console proxy management state: " + value); + if (state != null) { + return state; } - return state; } - s_logger.error("Invalid console proxy management state: " + value); + s_logger.error(String.format("Value [%s] set in global configuration [%s] is not a valid console proxy management state.", value, configKey)); return null; } @@ -1170,39 +1007,40 @@ public void resumeLastManagementState() { } if (lastState != state) { - _configDao.update(Config.ConsoleProxyManagementState.key(), Config.ConsoleProxyManagementState.getCategory(), lastState.toString()); + configurationDao.update(Config.ConsoleProxyManagementState.key(), Config.ConsoleProxyManagementState.getCategory(), lastState.toString()); } } catch (Throwable e) { - s_logger.error("Failed to resume last management state", e); + s_logger.error(String.format("Unable to resume last management state due to [%s].", e.getMessage()), e); } } private ConsoleProxyManagementState getLastManagementState() { - String value = _configDao.getValue(Config.ConsoleProxyManagementLastState.key()); + String configKey = Config.ConsoleProxyManagementLastState.key(); + String value = configurationDao.getValue(configKey); + if (value != null) { ConsoleProxyManagementState state = ConsoleProxyManagementState.valueOf(value); - if (state == null) { - s_logger.error("Invalid console proxy management state: " + value); + if (state != null) { + return state; } - return state; } - s_logger.error("Invalid console proxy management state: " + value); + s_logger.error(String.format("Value [%s] set in global configuration [%s] is not a valid console proxy management state.", value, configKey)); return null; } @Override public boolean rebootProxy(long proxyVmId) { - final ConsoleProxyVO proxy = _consoleProxyDao.findById(proxyVmId); + final ConsoleProxyVO proxy = consoleProxyDao.findById(proxyVmId); if (proxy == null || proxy.getState() == State.Destroyed) { return false; } if (proxy.getState() == State.Running && proxy.getHostId() != null) { - final RebootCommand cmd = new RebootCommand(proxy.getInstanceName(), _itMgr.getExecuteInSequence(proxy.getHypervisorType())); - final Answer answer = _agentMgr.easySend(proxy.getHostId(), cmd); + final RebootCommand cmd = new RebootCommand(proxy.getInstanceName(), virtualMachineManager.getExecuteInSequence(proxy.getHypervisorType())); + final Answer answer = agentManager.easySend(proxy.getHostId(), cmd); if (answer != null && answer.getResult()) { if (s_logger.isDebugEnabled()) { @@ -1227,26 +1065,25 @@ public boolean rebootProxy(long proxyVmId) { @Override public boolean destroyProxy(long vmId) { - ConsoleProxyVO proxy = _consoleProxyDao.findById(vmId); + ConsoleProxyVO proxy = consoleProxyDao.findById(vmId); try { - //expunge the vm - _itMgr.expunge(proxy.getUuid()); + virtualMachineManager.expunge(proxy.getUuid()); proxy.setPublicIpAddress(null); proxy.setPublicMacAddress(null); proxy.setPublicNetmask(null); proxy.setPrivateMacAddress(null); proxy.setPrivateIpAddress(null); - _consoleProxyDao.update(proxy.getId(), proxy); - _consoleProxyDao.remove(vmId); - HostVO host = _hostDao.findByTypeNameAndZoneId(proxy.getDataCenterId(), proxy.getHostName(), Host.Type.ConsoleProxy); + consoleProxyDao.update(proxy.getId(), proxy); + consoleProxyDao.remove(vmId); + HostVO host = hostDao.findByTypeNameAndZoneId(proxy.getDataCenterId(), proxy.getHostName(), Host.Type.ConsoleProxy); if (host != null) { - s_logger.debug("Removing host entry for proxy id=" + vmId); - return _hostDao.remove(host.getId()); + s_logger.debug(String.format("Removing host [%s] entry for proxy [%s].", host.toString(), vmId)); + return hostDao.remove(host.getId()); } return true; } catch (ResourceUnavailableException e) { - s_logger.warn("Unable to expunge " + proxy, e); + s_logger.warn(String.format("Unable to destroy console proxy [%s] due to [%s].", proxy, e.getMessage()), e); return false; } } @@ -1261,84 +1098,85 @@ public boolean configure(String name, Map params) throws Configu s_logger.info("Start configuring console proxy manager : " + name); } - Map configs = _configDao.getConfiguration("management-server", params); + Map configs = configurationDao.getConfiguration("management-server", params); String value = configs.get("consoleproxy.sslEnabled"); if (value != null && value.equalsIgnoreCase("true")) { - _sslEnabled = true; + sslEnabled = true; } - _consoleProxyUrlDomain = configs.get(Config.ConsoleProxyUrlDomain.key()); - if( _sslEnabled && (_consoleProxyUrlDomain == null || _consoleProxyUrlDomain.isEmpty())) { + consoleProxyUrlDomain = configs.get(Config.ConsoleProxyUrlDomain.key()); + if( sslEnabled && (consoleProxyUrlDomain == null || consoleProxyUrlDomain.isEmpty())) { s_logger.warn("Empty console proxy domain, explicitly disabling SSL"); - _sslEnabled = false; + sslEnabled = false; } value = configs.get(Config.ConsoleProxyCapacityScanInterval.key()); - _capacityScanInterval = NumbersUtil.parseLong(value, DEFAULT_CAPACITY_SCAN_INTERVAL); + capacityScanInterval = NumbersUtil.parseLong(value, DEFAULT_CAPACITY_SCAN_INTERVAL_IN_MILLISECONDS); - _capacityPerProxy = NumbersUtil.parseInt(configs.get("consoleproxy.session.max"), DEFAULT_PROXY_CAPACITY); - _standbyCapacity = NumbersUtil.parseInt(configs.get("consoleproxy.capacity.standby"), DEFAULT_STANDBY_CAPACITY); - _proxySessionTimeoutValue = NumbersUtil.parseInt(configs.get("consoleproxy.session.timeout"), DEFAULT_PROXY_SESSION_TIMEOUT); + capacityPerProxy = NumbersUtil.parseInt(configs.get("consoleproxy.session.max"), DEFAULT_PROXY_CAPACITY); + standbyCapacity = NumbersUtil.parseInt(configs.get("consoleproxy.capacity.standby"), DEFAULT_STANDBY_CAPACITY); + proxySessionTimeoutValue = NumbersUtil.parseInt(configs.get("consoleproxy.session.timeout"), DEFAULT_PROXY_SESSION_TIMEOUT); value = configs.get("consoleproxy.port"); if (value != null) { - _consoleProxyPort = NumbersUtil.parseInt(value, ConsoleProxyManager.DEFAULT_PROXY_VNC_PORT); + consoleProxyPort = NumbersUtil.parseInt(value, ConsoleProxyManager.DEFAULT_PROXY_VNC_PORT); } value = configs.get(Config.ConsoleProxyDisableRpFilter.key()); if (value != null && value.equalsIgnoreCase("true")) { - _disableRpFilter = true; + disableRpFilter = true; } value = configs.get("secondary.storage.vm"); if (value != null && value.equalsIgnoreCase("true")) { - _useStorageVm = true; + useStorageVm = true; } if (s_logger.isInfoEnabled()) { - s_logger.info("Console proxy max session soft limit : " + _capacityPerProxy); - s_logger.info("Console proxy standby capacity : " + _standbyCapacity); + s_logger.info("Console proxy max session soft limit : " + capacityPerProxy); + s_logger.info("Console proxy standby capacity : " + standbyCapacity); } - _instance = configs.get("instance.name"); - if (_instance == null) { - _instance = "DEFAULT"; + instance = configs.get("instance.name"); + if (instance == null) { + instance = "DEFAULT"; } - Map agentMgrConfigs = _configDao.getConfiguration("AgentManager", params); + Map agentMgrConfigs = configurationDao.getConfiguration("AgentManager", params); value = agentMgrConfigs.get("port"); - _mgmtPort = NumbersUtil.parseInt(value, 8250); + managementPort = NumbersUtil.parseInt(value, 8250); - _listener = new ConsoleProxyListener(new VmBasedAgentHook(_instanceDao, _hostDao, _configDao, _ksMgr, _agentMgr, _keysMgr)); - _agentMgr.registerForHostEvents(_listener, true, true, false); + consoleProxyListener = new ConsoleProxyListener(new VmBasedAgentHook(vmInstanceDao, hostDao, configurationDao, _ksMgr, agentManager, keysManager)); + agentManager.registerForHostEvents(consoleProxyListener, true, true, false); - _itMgr.registerGuru(VirtualMachine.Type.ConsoleProxy, this); + virtualMachineManager.registerGuru(VirtualMachine.Type.ConsoleProxy, this); - //check if there is a default service offering configured - String cpvmSrvcOffIdStr = configs.get(Config.ConsoleProxyServiceOffering.key()); + String configKey = Config.ConsoleProxyServiceOffering.key(); + String cpvmSrvcOffIdStr = configs.get(configKey); if (cpvmSrvcOffIdStr != null) { - _serviceOffering = _offeringDao.findByUuid(cpvmSrvcOffIdStr); - if (_serviceOffering == null) { + serviceOfferingVO = serviceOfferingDao.findByUuid(cpvmSrvcOffIdStr); + if (serviceOfferingVO == null) { try { - _serviceOffering = _offeringDao.findById(Long.parseLong(cpvmSrvcOffIdStr)); + s_logger.debug(String.format("Unable to find a service offering by the UUID for console proxy VM with the value [%s] set in the configuration [%s]. Trying to find by the ID.", cpvmSrvcOffIdStr, configKey)); + serviceOfferingVO = serviceOfferingDao.findById(Long.parseLong(cpvmSrvcOffIdStr)); } catch (NumberFormatException ex) { - s_logger.debug("The system service offering specified by global config is not id, but uuid=" + cpvmSrvcOffIdStr + " for console proxy vm"); + s_logger.warn(String.format("Unable to find a service offering by the ID for console proxy VM with the value [%s] set in the configuration [%s]. The value is not a valid integer number. Error: [%s].", cpvmSrvcOffIdStr, configKey, ex.getMessage()), ex); } } - if (_serviceOffering == null) { - s_logger.warn("Can't find system service offering specified by global config, uuid=" + cpvmSrvcOffIdStr + " for console proxy vm"); + if (serviceOfferingVO == null) { + s_logger.warn(String.format("Unable to find a service offering by the UUID or ID for console proxy VM with the value [%s] set in the configuration [%s]", cpvmSrvcOffIdStr, configKey)); } } - if (_serviceOffering == null || !_serviceOffering.isSystemUse()) { - int ramSize = NumbersUtil.parseInt(_configDao.getValue("console.ram.size"), DEFAULT_PROXY_VM_RAMSIZE); - int cpuFreq = NumbersUtil.parseInt(_configDao.getValue("console.cpu.mhz"), DEFAULT_PROXY_VM_CPUMHZ); - List offerings = _offeringDao.createSystemServiceOfferings("System Offering For Console Proxy", + if (serviceOfferingVO == null || !serviceOfferingVO.isSystemUse()) { + int ramSize = NumbersUtil.parseInt(configurationDao.getValue("console.ram.size"), DEFAULT_PROXY_VM_RAMSIZE); + int cpuFreq = NumbersUtil.parseInt(configurationDao.getValue("console.cpu.mhz"), DEFAULT_PROXY_VM_CPUMHZ); + List offerings = serviceOfferingDao.createSystemServiceOfferings("System Offering For Console Proxy", ServiceOffering.consoleProxyDefaultOffUniqueName, 1, ramSize, cpuFreq, 0, 0, false, null, Storage.ProvisioningType.THIN, true, null, true, VirtualMachine.Type.ConsoleProxy, true); - // this can sometimes happen, if DB is manually or programmatically manipulated + if (offerings == null || offerings.size() < 2) { String msg = "Data integrity problem : System Offering For Console Proxy has been removed?"; s_logger.error(msg); @@ -1346,13 +1184,13 @@ public boolean configure(String name, Map params) throws Configu } } - _loadScanner = new SystemVmLoadScanner(this); - _loadScanner.initScan(STARTUP_DELAY, _capacityScanInterval); - _resourceMgr.registerResourceStateAdapter(this.getClass().getSimpleName(), this); + loadScanner = new SystemVmLoadScanner<>(this); + loadScanner.initScan(STARTUP_DELAY_IN_MILLISECONDS, capacityScanInterval); + resourceManager.registerResourceStateAdapter(this.getClass().getSimpleName(), this); - _staticPublicIp = _configDao.getValue("consoleproxy.static.publicIp"); - if (_staticPublicIp != null) { - _staticPort = NumbersUtil.parseInt(_configDao.getValue("consoleproxy.static.port"), 8443); + staticPublicIp = configurationDao.getValue("consoleproxy.static.publicIp"); + if (staticPublicIp != null) { + staticPort = NumbersUtil.parseInt(configurationDao.getValue("consoleproxy.static.port"), 8443); } if (s_logger.isInfoEnabled()) { @@ -1367,34 +1205,34 @@ protected ConsoleProxyManagerImpl() { @Override public boolean finalizeVirtualMachineProfile(VirtualMachineProfile profile, DeployDestination dest, ReservationContext context) { - ConsoleProxyVO vm = _consoleProxyDao.findById(profile.getId()); - Map details = _vmDetailsDao.listDetailsKeyPairs(vm.getId()); + ConsoleProxyVO vm = consoleProxyDao.findById(profile.getId()); + Map details = userVmDetailsDao.listDetailsKeyPairs(vm.getId()); vm.setDetails(details); StringBuilder buf = profile.getBootArgsBuilder(); buf.append(" template=domP type=consoleproxy"); buf.append(" host=").append(StringUtils.toCSVList(indirectAgentLB.getManagementServerList(dest.getHost().getId(), dest.getDataCenter().getId(), null))); - buf.append(" port=").append(_mgmtPort); + buf.append(" port=").append(managementPort); buf.append(" name=").append(profile.getVirtualMachine().getHostName()); - if (_sslEnabled) { + if (sslEnabled) { buf.append(" premium=true"); } buf.append(" zone=").append(dest.getDataCenter().getId()); buf.append(" pod=").append(dest.getPod().getId()); buf.append(" guid=Proxy.").append(profile.getId()); buf.append(" proxy_vm=").append(profile.getId()); - if (_disableRpFilter) { + if (disableRpFilter) { buf.append(" disable_rp_filter=true"); } boolean externalDhcp = false; - String externalDhcpStr = _configDao.getValue("direct.attach.network.externalIpAllocator.enabled"); + String externalDhcpStr = configurationDao.getValue("direct.attach.network.externalIpAllocator.enabled"); if (externalDhcpStr != null && externalDhcpStr.equalsIgnoreCase("true")) { externalDhcp = true; } - if (Boolean.valueOf(_configDao.getValue("system.vm.random.password"))) { - buf.append(" vmpassword=").append(_configDao.getValue("system.vm.password")); + if (Boolean.valueOf(configurationDao.getValue("system.vm.random.password"))) { + buf.append(" vmpassword=").append(configurationDao.getValue("system.vm.password")); } for (NicProfile nic : profile.getNics()) { @@ -1412,7 +1250,7 @@ public boolean finalizeVirtualMachineProfile(VirtualMachineProfile profile, Depl } if (nic.getTrafficType() == TrafficType.Management) { - String mgmt_cidr = _configDao.getValue(Config.ManagementNetwork.key()); + String mgmt_cidr = configurationDao.getValue(Config.ManagementNetwork.key()); if (NetUtils.isValidIp4Cidr(mgmt_cidr)) { buf.append(" mgmtcidr=").append(mgmt_cidr); } @@ -1420,11 +1258,10 @@ public boolean finalizeVirtualMachineProfile(VirtualMachineProfile profile, Depl } } - /* External DHCP mode */ if (externalDhcp) { buf.append(" bootproto=dhcp"); } - DataCenterVO dc = _dcDao.findById(profile.getVirtualMachine().getDataCenterId()); + DataCenterVO dc = dataCenterDao.findById(profile.getVirtualMachine().getDataCenterId()); buf.append(" internaldns1=").append(dc.getInternalDns1()); if (dc.getInternalDns2() != null) { buf.append(" internaldns2=").append(dc.getInternalDns2()); @@ -1447,7 +1284,7 @@ public boolean finalizeDeployment(Commands cmds, VirtualMachineProfile profile, finalizeCommandsOnStart(cmds, profile); - ConsoleProxyVO proxy = _consoleProxyDao.findById(profile.getId()); + ConsoleProxyVO proxy = consoleProxyDao.findById(profile.getId()); DataCenter dc = dest.getDataCenter(); List nics = profile.getNics(); for (NicProfile nic : nics) { @@ -1461,7 +1298,7 @@ public boolean finalizeDeployment(Commands cmds, VirtualMachineProfile profile, proxy.setPrivateMacAddress(nic.getMacAddress()); } } - _consoleProxyDao.update(proxy.getId(), proxy); + consoleProxyDao.update(proxy.getId(), proxy); return true; } @@ -1486,7 +1323,6 @@ public boolean finalizeCommandsOnStart(Commands cmds, VirtualMachineProfile prof controlNic = managementNic; } - // verify ssh access on management nic for system vm running on HyperV if(profile.getHypervisorType() == HypervisorType.Hyperv) { controlNic = managementNic; } @@ -1501,26 +1337,20 @@ public boolean finalizeCommandsOnStart(Commands cmds, VirtualMachineProfile prof public boolean finalizeStart(VirtualMachineProfile profile, long hostId, Commands cmds, ReservationContext context) { CheckSshAnswer answer = (CheckSshAnswer)cmds.getAnswer("checkSsh"); if (answer == null || !answer.getResult()) { - if (answer != null) { - s_logger.warn("Unable to ssh to the VM: " + answer.getDetails()); - } else { - s_logger.warn("Unable to ssh to the VM: null answer"); - } + s_logger.warn(String.format("Unable to use SSH on the VM [%s] due to [%s].", profile.toString(), answer == null ? "null answer" : answer.getDetails())); return false; } try { - //get system ip and create static nat rule for the vm in case of basic networking with EIP/ELB - _rulesMgr.getSystemIpAndEnableStaticNatForVm(profile.getVirtualMachine(), false); - IPAddressVO ipaddr = _ipAddressDao.findByAssociatedVmId(profile.getVirtualMachine().getId()); + rulesManager.getSystemIpAndEnableStaticNatForVm(profile.getVirtualMachine(), false); + IPAddressVO ipaddr = ipAddressDao.findByAssociatedVmId(profile.getVirtualMachine().getId()); if (ipaddr != null && ipaddr.getSystem()) { - ConsoleProxyVO consoleVm = _consoleProxyDao.findById(profile.getId()); - // override CPVM guest IP with EIP, so that console url's will be prepared with EIP + ConsoleProxyVO consoleVm = consoleProxyDao.findById(profile.getId()); consoleVm.setPublicIpAddress(ipaddr.getAddress().addr()); - _consoleProxyDao.update(consoleVm.getId(), consoleVm); + consoleProxyDao.update(consoleVm.getId(), consoleVm); } - } catch (Exception ex) { - s_logger.warn("Failed to get system ip and enable static nat for the vm " + profile.getVirtualMachine() + " due to exception ", ex); + } catch (InsufficientAddressCapacityException ex) { + s_logger.warn(String.format("Unable to retrieve system IP and enable static NAT for the VM [%s] due to [%s].", profile.toString(), ex.getMessage()), ex); return false; } @@ -1529,26 +1359,24 @@ public boolean finalizeStart(VirtualMachineProfile profile, long hostId, Command @Override public void finalizeExpunge(VirtualMachine vm) { - ConsoleProxyVO proxy = _consoleProxyDao.findById(vm.getId()); + ConsoleProxyVO proxy = consoleProxyDao.findById(vm.getId()); proxy.setPublicIpAddress(null); proxy.setPublicMacAddress(null); proxy.setPublicNetmask(null); proxy.setPrivateMacAddress(null); proxy.setPrivateIpAddress(null); - _consoleProxyDao.update(proxy.getId(), proxy); + consoleProxyDao.update(proxy.getId(), proxy); } @Override public void finalizeStop(VirtualMachineProfile profile, Answer answer) { - //release elastic IP here if assigned - IPAddressVO ip = _ipAddressDao.findByAssociatedVmId(profile.getId()); + IPAddressVO ip = ipAddressDao.findByAssociatedVmId(profile.getId()); if (ip != null && ip.getSystem()) { CallContext ctx = CallContext.current(); try { - _rulesMgr.disableStaticNat(ip.getId(), ctx.getCallingAccount(), ctx.getCallingUserId(), true); - } catch (Exception ex) { - s_logger.warn("Failed to disable static nat and release system ip " + ip + " as a part of vm " + profile.getVirtualMachine() + " stop due to exception ", - ex); + rulesManager.disableStaticNat(ip.getId(), ctx.getCallingAccount(), ctx.getCallingUserId(), true); + } catch (ResourceUnavailableException ex) { + s_logger.error(String.format("Unable to disable static NAT and release system IP [%s] as a part of VM [%s] stop due to [%s].", ip.toString(), profile.toString(), ex.getMessage()), ex); } } } @@ -1560,20 +1388,18 @@ public String getScanHandlerName() { @Override public void onScanStart() { - // to reduce possible number of DB queries for capacity scan, we run following aggregated queries in preparation - // stage - _zoneHostInfoMap = getZoneHostInfo(); + zoneHostInfoMap = getZoneHostInfo(); - _zoneProxyCountMap = new HashMap(); - List listProxyCounts = _consoleProxyDao.getDatacenterProxyLoadMatrix(); + zoneProxyCountMap = new HashMap<>(); + List listProxyCounts = consoleProxyDao.getDatacenterProxyLoadMatrix(); for (ConsoleProxyLoadInfo info : listProxyCounts) { - _zoneProxyCountMap.put(info.getId(), info); + zoneProxyCountMap.put(info.getId(), info); } - _zoneVmCountMap = new HashMap(); - List listVmCounts = _consoleProxyDao.getDatacenterSessionLoadMatrix(); + zoneVmCountMap = new HashMap<>(); + List listVmCounts = consoleProxyDao.getDatacenterSessionLoadMatrix(); for (ConsoleProxyLoadInfo info : listVmCounts) { - _zoneVmCountMap.put(info.getId(), info); + zoneVmCountMap.put(info.getId(), info); } } @@ -1597,15 +1423,14 @@ private void scanManagementState() { } private void handleResetSuspending() { - List runningProxies = _consoleProxyDao.getProxyListInStates(State.Running); + List runningProxies = consoleProxyDao.getProxyListInStates(State.Running); for (ConsoleProxyVO proxy : runningProxies) { s_logger.info("Stop console proxy " + proxy.getId() + " because of we are currently in ResetSuspending management mode"); stopProxy(proxy.getId()); } - // check if it is time to resume - List proxiesInTransition = _consoleProxyDao.getProxyListInStates(State.Running, State.Starting, State.Stopping); - if (proxiesInTransition.size() == 0) { + List proxiesInTransition = consoleProxyDao.getProxyListInStates(State.Running, State.Starting, State.Stopping); + if (CollectionUtils.isEmpty(proxiesInTransition)) { s_logger.info("All previous console proxy VMs in transition mode ceased the mode, we will now resume to last management state"); resumeLastManagementState(); } @@ -1613,7 +1438,6 @@ private void handleResetSuspending() { @Override public boolean canScan() { - // take the chance to do management-state management scanManagementState(); if (!reserveStandbyCapacity()) { @@ -1623,8 +1447,8 @@ public boolean canScan() { return false; } - List upPools = _storagePoolDao.listByStatus(StoragePoolStatus.Up); - if (upPools == null || upPools.size() == 0) { + List upPools = primaryDataStoreDao.listByStatus(StoragePoolStatus.Up); + if (CollectionUtils.isEmpty(upPools)) { s_logger.debug("Skip capacity scan as there is no Primary Storage in 'Up' state"); return false; } @@ -1634,7 +1458,7 @@ public boolean canScan() { @Override public Long[] getScannablePools() { - List zones = _dcDao.listEnabledZones(); + List zones = dataCenterDao.listEnabledZones(); Long[] dcIdList = new Long[zones.size()]; int i = 0; @@ -1646,18 +1470,15 @@ public Long[] getScannablePools() { } @Override - public boolean isPoolReadyForScan(Long pool) { - // pool is at zone basis - long dataCenterId = pool.longValue(); - - if (!isZoneReady(_zoneHostInfoMap, dataCenterId)) { + public boolean isPoolReadyForScan(Long dataCenterId) { + if (!isZoneReady(zoneHostInfoMap, dataCenterId)) { if (s_logger.isDebugEnabled()) { s_logger.debug("Zone " + dataCenterId + " is not ready to launch console proxy yet"); } return false; } - List l = _consoleProxyDao.getProxyListInStates(VirtualMachine.State.Starting, VirtualMachine.State.Stopping); + List l = consoleProxyDao.getProxyListInStates(VirtualMachine.State.Starting, VirtualMachine.State.Stopping); if (l.size() > 0) { if (s_logger.isDebugEnabled()) { s_logger.debug("Zone " + dataCenterId + " has " + l.size() + " console proxy VM(s) in transition state"); @@ -1673,15 +1494,13 @@ public boolean isPoolReadyForScan(Long pool) { } @Override - public Pair scanPool(Long pool) { - long dataCenterId = pool.longValue(); - - ConsoleProxyLoadInfo proxyInfo = _zoneProxyCountMap.get(dataCenterId); + public Pair scanPool(Long dataCenterId) { + ConsoleProxyLoadInfo proxyInfo = zoneProxyCountMap.get(dataCenterId); if (proxyInfo == null) { - return new Pair(AfterScanAction.nop, null); + return new Pair<>(AfterScanAction.nop, null); } - ConsoleProxyLoadInfo vmInfo = _zoneVmCountMap.get(dataCenterId); + ConsoleProxyLoadInfo vmInfo = zoneVmCountMap.get(dataCenterId); if (vmInfo == null) { vmInfo = new ConsoleProxyLoadInfo(); } @@ -1691,15 +1510,14 @@ public Pair scanPool(Long pool) { s_logger.debug("Expand console proxy standby capacity for zone " + proxyInfo.getName()); } - return new Pair(AfterScanAction.expand, null); + return new Pair<>(AfterScanAction.expand, null); } - return new Pair(AfterScanAction.nop, null); + return new Pair<>(AfterScanAction.nop, null); } @Override - public void expandPool(Long pool, Object actionArgs) { - long dataCenterId = pool.longValue(); + public void expandPool(Long dataCenterId, Object actionArgs) { allocCapacity(dataCenterId); } @@ -1747,12 +1565,12 @@ public void finalizeUnmanage(VirtualMachine vm) { } public List getConsoleProxyAllocators() { - return _consoleProxyAllocators; + return consoleProxyAllocators; } @Inject public void setConsoleProxyAllocators(List consoleProxyAllocators) { - _consoleProxyAllocators = consoleProxyAllocators; + this.consoleProxyAllocators = consoleProxyAllocators; } @Override @@ -1765,4 +1583,33 @@ public ConfigKey[] getConfigKeys() { return new ConfigKey[] { NoVncConsoleDefault, NoVncConsoleSourceIpCheckEnabled }; } + protected ConsoleProxyStatus parseJsonToConsoleProxyStatus(String json) throws JsonParseException { + return jsonParser.fromJson(json, ConsoleProxyStatus.class); + } + + protected void updateConsoleProxyStatus(String statusInfo, Long proxyVmId) { + if (statusInfo == null) return; + + ConsoleProxyStatus status = null; + try { + status = parseJsonToConsoleProxyStatus(statusInfo); + } catch (JsonParseException e) { + s_logger.warn(String.format("Unable to parse load info [%s] from proxy {\"vmId\": %s} due to [%s].", statusInfo, proxyVmId, e.getMessage()), e); + } + + int count = 0; + byte[] details = null; + + if (status != null) { + if (status.getConnections() != null) { + count = status.getConnections().length; + } + + details = statusInfo.getBytes(Charset.forName("US-ASCII")); + } else { + s_logger.debug(String.format("Unable to retrieve load info from proxy {\"vmId\": %s}. Invalid load info [%s].", proxyVmId, statusInfo)); + } + + consoleProxyDao.update(proxyVmId, count, DateUtil.currentGMTTime(), details); + } } diff --git a/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java b/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java index afcaacf01436..a225015ebffd 100644 --- a/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java +++ b/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java @@ -23,6 +23,7 @@ import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.Timer; import java.util.TreeSet; @@ -1674,7 +1675,7 @@ protected Pair>, List> findSuitablePoolsFo for (StoragePoolAllocator allocator : _storagePoolAllocators) { final List suitablePools = allocator.allocateToPool(diskProfile, vmProfile, plan, avoid, returnUpTo); if (suitablePools != null && !suitablePools.isEmpty()) { - suitableVolumeStoragePools.put(toBeCreated, suitablePools); + checkForPreferredStoragePool(suitablePools, vmProfile.getVirtualMachine(), suitableVolumeStoragePools, toBeCreated); foundPotentialPools = true; break; } @@ -1715,6 +1716,43 @@ protected Pair>, List> findSuitablePoolsFo return new Pair>, List>(suitableVolumeStoragePools, readyAndReusedVolumes); } + private void checkForPreferredStoragePool(List suitablePools, + VirtualMachine vm, + Map> suitableVolumeStoragePools, + VolumeVO toBeCreated) { + List pools = new ArrayList<>(); + Optional storagePool = getPreferredStoragePool(suitablePools, vm); + storagePool.ifPresent(pools::add); + + pools.addAll(suitablePools); + suitableVolumeStoragePools.put(toBeCreated, pools); + } + + private Optional getMatchingStoragePool(String preferredPoolId, List storagePools) { + if (preferredPoolId == null) { + return Optional.empty(); + } + return storagePools.stream() + .filter(pool -> pool.getUuid().equalsIgnoreCase(preferredPoolId)) + .findFirst(); + } + + private Optional getPreferredStoragePool(List poolList, VirtualMachine vm) { + String accountStoragePoolUuid = StorageManager.PreferredStoragePool.valueIn(vm.getAccountId()); + Optional storagePool = getMatchingStoragePool(accountStoragePoolUuid, poolList); + + if (storagePool.isPresent()) { + s_logger.debug("A storage pool is specified for this account, so we will use this storage pool for allocation: " + + storagePool.get().getUuid()); + } else { + String globalStoragePoolUuid = StorageManager.PreferredStoragePool.value(); + storagePool = getMatchingStoragePool(globalStoragePoolUuid, poolList); + storagePool.ifPresent(pool -> s_logger.debug("A storage pool is specified in global setting, so we will use this storage pool for allocation: " + + pool.getUuid())); + } + return storagePool; + } + private boolean isEnabledForAllocation(long zoneId, Long podId, Long clusterId) { // Check if the zone exists in the system DataCenterVO zone = _dcDao.findById(zoneId); diff --git a/server/src/main/java/com/cloud/network/IpAddressManagerImpl.java b/server/src/main/java/com/cloud/network/IpAddressManagerImpl.java index 9ba8c1f6fde1..ebdf63560501 100644 --- a/server/src/main/java/com/cloud/network/IpAddressManagerImpl.java +++ b/server/src/main/java/com/cloud/network/IpAddressManagerImpl.java @@ -840,11 +840,11 @@ public List doInTransaction(TransactionStatus status) throws Insuff } if (vlanUse == VlanType.VirtualNetwork) { - if (dedicatedVlanDbIds != null && !dedicatedVlanDbIds.isEmpty()) { + if (!dedicatedVlanDbIds.isEmpty()) { fetchFromDedicatedRange = true; sc.setParameters("vlanId", dedicatedVlanDbIds.toArray()); errorMessage.append(", vlanId id=" + Arrays.toString(dedicatedVlanDbIds.toArray())); - } else if (nonDedicatedVlanDbIds != null && !nonDedicatedVlanDbIds.isEmpty()) { + } else if (!nonDedicatedVlanDbIds.isEmpty()) { sc.setParameters("vlanId", nonDedicatedVlanDbIds.toArray()); errorMessage.append(", vlanId id=" + Arrays.toString(nonDedicatedVlanDbIds.toArray())); } else { @@ -904,7 +904,7 @@ public List doInTransaction(TransactionStatus status) throws Insuff if ((!lockOneRow || (lockOneRow && addrs.size() == 0)) && fetchFromDedicatedRange && vlanUse == VlanType.VirtualNetwork) { // Verify if account is allowed to acquire IPs from the system boolean useSystemIps = UseSystemPublicIps.valueIn(owner.getId()); - if (useSystemIps && nonDedicatedVlanDbIds != null && !nonDedicatedVlanDbIds.isEmpty()) { + if (useSystemIps && !nonDedicatedVlanDbIds.isEmpty()) { fetchFromDedicatedRange = false; sc.setParameters("vlanId", nonDedicatedVlanDbIds.toArray()); errorMessage.append(", vlanId id=" + Arrays.toString(nonDedicatedVlanDbIds.toArray())); @@ -1130,6 +1130,10 @@ public boolean applyIpAssociations(Network network, boolean postApplyRules, bool return success; } + private String generateErrorMessageForOperationOnDisabledZone(String operation, DataCenter zone) { + return String.format("Cannot %s, %s is currently disabled.", operation, zone); + } + @DB @Override public AcquirePodIpCmdResponse allocatePodIp(String zoneId, String podId) throws ConcurrentOperationException, ResourceAllocationException { @@ -1137,8 +1141,8 @@ public AcquirePodIpCmdResponse allocatePodIp(String zoneId, String podId) throws DataCenter zone = _entityMgr.findByUuid(DataCenter.class, zoneId); Account caller = CallContext.current().getCallingAccount(); if (Grouping.AllocationState.Disabled == zone.getAllocationState() && !_accountMgr.isRootAdmin(caller.getId())) { - ResourceAllocationException ex = new ResourceAllocationException("Cannot perform this operation, " + "Zone is currently disabled" + "zoneId=" + zone.getUuid(), - ResourceType.network); + ResourceAllocationException ex = new ResourceAllocationException( + generateErrorMessageForOperationOnDisabledZone("allocate Pod IP addresses", zone), ResourceType.network); throw ex; } @@ -1148,7 +1152,7 @@ public AcquirePodIpCmdResponse allocatePodIp(String zoneId, String podId) throws HostPodVO podvo = null; podvo = _hpDao.findByUuid(podId); if (podvo == null) - throw new ResourceAllocationException("No sush pod exists", ResourceType.network); + throw new ResourceAllocationException("No such pod exists", ResourceType.network); vo = _privateIPAddressDao.takeIpAddress(zone.getId(), podvo.getId(), 0, caller.getId() + "", false); if(vo == null) @@ -1187,7 +1191,7 @@ public void releasePodIp(Long id) throws CloudRuntimeException { DataCenter zone = _entityMgr.findById(DataCenter.class, ipVO.getDataCenterId()); Account caller = CallContext.current().getCallingAccount(); if (Grouping.AllocationState.Disabled == zone.getAllocationState() && !_accountMgr.isRootAdmin(caller.getId())) { - throw new CloudRuntimeException("Cannot perform this operation, " + "Zone is currently disabled" + "zoneId=" + ipVO.getDataCenterId()); + throw new CloudRuntimeException(generateErrorMessageForOperationOnDisabledZone("release Pod IP", zone)); } try { _privateIPAddressDao.releasePodIpAddress(id); @@ -1207,7 +1211,7 @@ public IpAddress allocateIp(final Account ipOwner, final boolean isSystem, Accou if (Grouping.AllocationState.Disabled == zone.getAllocationState() && !_accountMgr.isRootAdmin(caller.getId())) { // zone is of type DataCenter. See DataCenterVO.java. - PermissionDeniedException ex = new PermissionDeniedException("Cannot perform this operation, " + "Zone is currently disabled"); + PermissionDeniedException ex = new PermissionDeniedException(generateErrorMessageForOperationOnDisabledZone("allocate IP addresses", zone)); ex.addProxyObject(zone.getUuid(), "zoneId"); throw ex; } @@ -1391,7 +1395,7 @@ public IPAddressVO associateIPToGuestNetwork(long ipId, long networkId, boolean } if (ipToAssoc.getAssociatedWithNetworkId() != null) { - s_logger.debug("IP " + ipToAssoc + " is already assocaited with network id" + networkId); + s_logger.debug("IP " + ipToAssoc + " is already associated with network id" + networkId); return ipToAssoc; } @@ -1469,7 +1473,7 @@ public IPAddressVO associateIPToGuestNetwork(long ipId, long networkId, boolean s_logger.warn("Failed to associate ip address, so releasing ip from the database " + ip); _ipAddressDao.markAsUnavailable(ip.getId()); if (!applyIpAssociations(network, true)) { - // if fail to apply ip assciations again, unassign ip address without updating resource + // if fail to apply ip associations again, unassign ip address without updating resource // count and generating usage event as there is no need to keep it in the db _ipAddressDao.unassignIpAddress(ip.getId()); } diff --git a/server/src/main/java/com/cloud/network/NetworkModelImpl.java b/server/src/main/java/com/cloud/network/NetworkModelImpl.java index 32643ca2a48f..001a53411102 100644 --- a/server/src/main/java/com/cloud/network/NetworkModelImpl.java +++ b/server/src/main/java/com/cloud/network/NetworkModelImpl.java @@ -1564,6 +1564,7 @@ public boolean areServicesEnabledInZone(long zoneId, NetworkOffering offering, L if (!checkedProvider.contains(providerName)) { result = result && isProviderEnabledInPhysicalNetwork(physicalNtwkId, providerName); } + checkedProvider.add(providerName); } } diff --git a/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java b/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java index 7ff911393f1a..478d1df3b2ec 100644 --- a/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java +++ b/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java @@ -1459,7 +1459,7 @@ private GetRouterMonitorResultsAnswer fetchAndUpdateRouterHealthChecks(DomainRou return null; } - String controlIP = getRouterControlIP(router); + String controlIP = _routerControlHelper.getRouterControlIp(router.getId()); if (StringUtils.isNotBlank(controlIP) && !controlIP.equals("0.0.0.0")) { final GetRouterMonitorResultsCommand command = new GetRouterMonitorResultsCommand(performFreshChecks, false); command.setAccessDetail(NetworkElementCommand.ROUTER_IP, controlIP); @@ -1491,7 +1491,7 @@ private GetRouterMonitorResultsAnswer performBasicTestsOnRouter(DomainRouterVO r return null; } - String controlIP = getRouterControlIP(router); + String controlIP = _routerControlHelper.getRouterControlIp(router.getId()); if (StringUtils.isNotBlank(controlIP) && !controlIP.equals("0.0.0.0")) { final GetRouterMonitorResultsCommand command = new GetRouterMonitorResultsCommand(false, true); command.setAccessDetail(NetworkElementCommand.ROUTER_IP, controlIP); @@ -1601,7 +1601,7 @@ protected void runInContext() { private SetMonitorServiceCommand createMonitorServiceCommand(DomainRouterVO router, List services, boolean reconfigure, boolean deleteFromProcessedCache) { final SetMonitorServiceCommand command = new SetMonitorServiceCommand(services); - command.setAccessDetail(NetworkElementCommand.ROUTER_IP, getRouterControlIP(router)); + command.setAccessDetail(NetworkElementCommand.ROUTER_IP, _routerControlHelper.getRouterControlIp(router.getId())); command.setAccessDetail(NetworkElementCommand.ROUTER_NAME, router.getInstanceName()); command.setAccessDetail(SetMonitorServiceCommand.ROUTER_HEALTH_CHECKS_ENABLED, RouterHealthChecksEnabled.value().toString()); command.setAccessDetail(SetMonitorServiceCommand.ROUTER_HEALTH_CHECKS_BASIC_INTERVAL, RouterHealthChecksBasicInterval.value().toString()); @@ -1633,7 +1633,7 @@ private boolean updateRouterHealthChecksConfig(DomainRouterVO router) { return false; } - String controlIP = getRouterControlIP(router); + String controlIP = _routerControlHelper.getRouterControlIp(router.getId()); if (StringUtils.isBlank(controlIP) || controlIP.equals("0.0.0.0")) { s_logger.debug("Skipping update data on router " + router.getUuid() + " because controlIp is not correct."); return false; @@ -1844,7 +1844,7 @@ protected void getRouterAlerts() { if (!Boolean.parseBoolean(serviceMonitoringFlag)) { continue; } - String controlIP = getRouterControlIP(router); + String controlIP = _routerControlHelper.getRouterControlIp(router.getId()); if (controlIP != null && !controlIP.equals("0.0.0.0")) { OpRouterMonitorServiceVO opRouterMonitorServiceVO = _opRouterMonitorServiceDao.findById(router.getId()); @@ -1915,29 +1915,6 @@ protected void getRouterAlerts() { } } - private String getRouterControlIP(DomainRouterVO router){ - final DataCenterVO dcVo = _dcDao.findById(router.getDataCenterId()); - String controlIP = null; - - if(router.getHypervisorType() == HypervisorType.VMware && dcVo.getNetworkType() == NetworkType.Basic ){ - - final List nics = _nicDao.listByVmId(router.getId()); - for (final NicVO nic : nics) { - final NetworkVO nc = _networkDao.findById(nic.getNetworkId()); - if (nc.getTrafficType() == TrafficType.Guest && nic.getIPv4Address() != null) { - controlIP = nic.getIPv4Address(); - break; - } - } - s_logger.debug("Vmware with Basic network selected Guest NIC ip as control IP " + controlIP ); - }else{ - controlIP = _routerControlHelper.getRouterControlIp(router.getId()); - } - - s_logger.debug("IP of control NIC " + controlIP ); - return controlIP; - } - @Override public boolean finalizeVirtualMachineProfile(final VirtualMachineProfile profile, final DeployDestination dest, final ReservationContext context) { diff --git a/server/src/main/java/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java b/server/src/main/java/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java index b923099b8847..74b53f14f076 100644 --- a/server/src/main/java/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java +++ b/server/src/main/java/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java @@ -417,7 +417,9 @@ public VpnUser doInTransaction(TransactionStatus status) { public boolean removeVpnUser(long vpnOwnerId, String username, Account caller) { final VpnUserVO user = _vpnUsersDao.findByAccountAndUsername(vpnOwnerId, username); if (user == null) { - throw new InvalidParameterValueException(String.format("Could not find VPN user=[%s]. VPN owner id=[%s]", username, vpnOwnerId)); + String errorMessage = String.format("Could not find VPN user=[%s]. VPN owner id=[%s]", username, vpnOwnerId); + s_logger.debug(errorMessage); + throw new InvalidParameterValueException(errorMessage); } _accountMgr.checkAccess(caller, null, true, user); diff --git a/server/src/main/java/com/cloud/projects/ProjectManager.java b/server/src/main/java/com/cloud/projects/ProjectManager.java index f5681464615b..8eebfd3d1585 100644 --- a/server/src/main/java/com/cloud/projects/ProjectManager.java +++ b/server/src/main/java/com/cloud/projects/ProjectManager.java @@ -19,8 +19,15 @@ import java.util.List; import com.cloud.user.Account; +import org.apache.cloudstack.framework.config.ConfigKey; public interface ProjectManager extends ProjectService { + public static final ConfigKey ProjectSmtpUseStartTLS = new ConfigKey("Advanced", Boolean.class, "project.smtp.useStartTLS", "false", + "If set to true and if we enable security via project.smtp.useAuth, this will enable StartTLS to secure the conection.", true); + + public static final ConfigKey ProjectSmtpEnabledSecurityProtocols = new ConfigKey("Advanced", String.class, "project.smtp.enabledSecurityProtocols", "", + "White-space separated security protocols; ex: \"TLSv1 TLSv1.1\". Supported protocols: SSLv2Hello, SSLv3, TLSv1, TLSv1.1 and TLSv1.2", true); + boolean canAccessProjectAccount(Account caller, long accountId); boolean canModifyProjectAccount(Account caller, long accountId); diff --git a/server/src/main/java/com/cloud/projects/ProjectManagerImpl.java b/server/src/main/java/com/cloud/projects/ProjectManagerImpl.java index 7cb4674f7a38..6d51c19e0860 100644 --- a/server/src/main/java/com/cloud/projects/ProjectManagerImpl.java +++ b/server/src/main/java/com/cloud/projects/ProjectManagerImpl.java @@ -82,13 +82,15 @@ import com.cloud.utils.exception.CloudRuntimeException; import java.util.HashSet; import java.util.Set; +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.framework.config.Configurable; import org.apache.cloudstack.utils.mailing.MailAddress; import org.apache.cloudstack.utils.mailing.SMTPMailProperties; import org.apache.cloudstack.utils.mailing.SMTPMailSender; import org.apache.commons.lang3.BooleanUtils; @Component -public class ProjectManagerImpl extends ManagerBase implements ProjectManager { +public class ProjectManagerImpl extends ManagerBase implements ProjectManager, Configurable { public static final Logger s_logger = Logger.getLogger(ProjectManagerImpl.class); @Inject @@ -1366,4 +1368,13 @@ public boolean allowUserToCreateProject() { return _allowUserToCreateProject; } + @Override + public String getConfigComponentName() { + return ProjectManager.class.getSimpleName(); + } + + @Override + public ConfigKey[] getConfigKeys() { + return new ConfigKey[] {ProjectSmtpEnabledSecurityProtocols, ProjectSmtpUseStartTLS}; + } } diff --git a/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java b/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java index 488be99e32da..ad0190ffbad8 100755 --- a/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java +++ b/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java @@ -50,11 +50,14 @@ import org.apache.cloudstack.api.command.admin.cluster.UpdateClusterCmd; import org.apache.cloudstack.api.command.admin.host.AddHostCmd; import org.apache.cloudstack.api.command.admin.host.AddSecondaryStorageCmd; +import org.apache.cloudstack.api.command.admin.host.CancelHostAsDegradedCmd; import org.apache.cloudstack.api.command.admin.host.CancelMaintenanceCmd; import org.apache.cloudstack.api.command.admin.host.PrepareForMaintenanceCmd; +import org.apache.cloudstack.api.command.admin.host.DeclareHostAsDegradedCmd; import org.apache.cloudstack.api.command.admin.host.ReconnectHostCmd; import org.apache.cloudstack.api.command.admin.host.UpdateHostCmd; import org.apache.cloudstack.api.command.admin.host.UpdateHostPasswordCmd; + import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; @@ -860,7 +863,7 @@ protected boolean doDeleteHost(final long hostId, final boolean isForced, final } _accountMgr.checkAccessAndSpecifyAuthority(CallContext.current().getCallingAccount(), host.getDataCenterId()); - if (!isForced && host.getResourceState() != ResourceState.Maintenance) { + if (!canDeleteHost(host) && !isForced) { throw new CloudRuntimeException("Host " + host.getUuid() + " cannot be deleted as it is not in maintenance mode. Either put the host into maintenance or perform a forced deletion."); } @@ -973,6 +976,14 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { return true; } + /** + * Returns true if host can be deleted.
+ * A host can be deleted either if it is in Maintenance or "Degraded" state. + */ + protected boolean canDeleteHost(HostVO host) { + return host.getResourceState() == ResourceState.Maintenance || host.getResourceState() == ResourceState.Degraded; + } + @Override public boolean deleteHost(final long hostId, final boolean isForced, final boolean isForceDeleteStorage) { try { @@ -1448,6 +1459,89 @@ protected boolean isMaintenanceLocalStrategyDefault() { return false; } + /** + * Declares host as Degraded. This method is used in critical situations; e.g. if it is not possible to start host, not even via out-of-band. + */ + @Override + public Host declareHostAsDegraded(final DeclareHostAsDegradedCmd cmd) throws NoTransitionException { + Long hostId = cmd.getId(); + HostVO host = _hostDao.findById(hostId); + + if (host == null || StringUtils.isBlank(host.getName())) { + throw new InvalidParameterValueException(String.format("Host [id:%s] does not exist.", hostId)); + } else if (host.getRemoved() != null){ + throw new InvalidParameterValueException(String.format("Host [id:%s, name:%s] does not exist or it has been removed.", hostId, host.getName())); + } + + if (host.getResourceState() == ResourceState.Degraded) { + throw new NoTransitionException(String.format("Host [id:%s] was already marked as Degraded.", host.getId())); + } + + if (host.getStatus() != Status.Alert && host.getStatus() != Status.Disconnected) { + throw new InvalidParameterValueException( + String.format("Cannot perform declare host [id=%s, name=%s] as 'Degraded' when host is in %s status", host.getId(), host.getName(), host.getStatus())); + } + + try { + resourceStateTransitTo(host, ResourceState.Event.DeclareHostDegraded, _nodeId); + host.setResourceState(ResourceState.Degraded); + } catch (NoTransitionException e) { + s_logger.error(String.format("Cannot transmit host [id:%s, name:%s, state:%s, status:%s] to %s state", host.getId(), host.getName(), host.getState(), host.getStatus(), + ResourceState.Event.DeclareHostDegraded), e); + throw e; + } + + scheduleVmsRestart(hostId); + + return host; + } + + /** + * This method assumes that the host is Degraded; therefore it schedule VMs to be re-started by the HA manager. + */ + private void scheduleVmsRestart(Long hostId) { + List allVmsOnHost = _vmDao.listByHostId(hostId); + if (CollectionUtils.isEmpty(allVmsOnHost)) { + s_logger.debug(String.format("Host [id=%s] was marked as Degraded with no allocated VMs, no need to schedule VM restart", hostId)); + } + + s_logger.debug(String.format("Host [id=%s] was marked as Degraded with a total of %s allocated VMs. Triggering HA to start VMs that have HA enabled.", hostId, allVmsOnHost.size())); + for (VMInstanceVO vm : allVmsOnHost) { + State vmState = vm.getState(); + if (vmState == State.Starting || vmState == State.Running || vmState == State.Stopping) { + _haMgr.scheduleRestart(vm, false); + } + } + } + + /** + * Changes a host from 'Degraded' to 'Enabled' ResourceState. + */ + @Override + public Host cancelHostAsDegraded(final CancelHostAsDegradedCmd cmd) throws NoTransitionException { + Long hostId = cmd.getId(); + HostVO host = _hostDao.findById(hostId); + + if (host == null || host.getRemoved() != null) { + throw new InvalidParameterValueException(String.format("Host [id=%s] does not exist", host.getId())); + } + + if (host.getResourceState() != ResourceState.Degraded) { + throw new NoTransitionException( + String.format("Cannot perform cancelHostAsDegraded on host [id=%s, name=%s] when host is in %s state", host.getId(), host.getName(), host.getResourceState())); + } + + try { + resourceStateTransitTo(host, ResourceState.Event.EnableDegradedHost, _nodeId); + host.setResourceState(ResourceState.Enabled); + } catch (NoTransitionException e) { + throw new NoTransitionException( + String.format("Cannot transmit host [id=%s, name=%s, state=%s, status=%s] to %s state", host.getId(), host.getName(), host.getResourceState(), host.getStatus(), + ResourceState.Enabled)); + } + return host; + } + /** * Add VNC details as user VM details for each VM in 'vms' (KVM hosts only) */ diff --git a/server/src/main/java/com/cloud/server/ManagementServerImpl.java b/server/src/main/java/com/cloud/server/ManagementServerImpl.java index 5ac1aef6c1ab..193cd47e3b93 100644 --- a/server/src/main/java/com/cloud/server/ManagementServerImpl.java +++ b/server/src/main/java/com/cloud/server/ManagementServerImpl.java @@ -89,7 +89,9 @@ import org.apache.cloudstack.api.command.admin.guest.UpdateGuestOsMappingCmd; import org.apache.cloudstack.api.command.admin.host.AddHostCmd; import org.apache.cloudstack.api.command.admin.host.AddSecondaryStorageCmd; +import org.apache.cloudstack.api.command.admin.host.CancelHostAsDegradedCmd; import org.apache.cloudstack.api.command.admin.host.CancelMaintenanceCmd; +import org.apache.cloudstack.api.command.admin.host.DeclareHostAsDegradedCmd; import org.apache.cloudstack.api.command.admin.host.DeleteHostCmd; import org.apache.cloudstack.api.command.admin.host.FindHostsForMigrationCmd; import org.apache.cloudstack.api.command.admin.host.ListHostTagsCmd; @@ -206,8 +208,8 @@ import org.apache.cloudstack.api.command.admin.storage.PreparePrimaryStorageForMaintenanceCmd; import org.apache.cloudstack.api.command.admin.storage.UpdateCloudToUseObjectStoreCmd; import org.apache.cloudstack.api.command.admin.storage.UpdateImageStoreCmd; +import org.apache.cloudstack.api.command.admin.storage.UpdateStorageCapabilitiesCmd; import org.apache.cloudstack.api.command.admin.storage.UpdateStoragePoolCmd; -import org.apache.cloudstack.api.command.admin.storage.SyncStoragePoolCmd; import org.apache.cloudstack.api.command.admin.swift.AddSwiftCmd; import org.apache.cloudstack.api.command.admin.swift.ListSwiftsCmd; import org.apache.cloudstack.api.command.admin.systemvm.DestroySystemVmCmd; @@ -2129,7 +2131,7 @@ public Pair, Integer> searchForIPAddresses(final ListP } } - final Filter searchFilter = new Filter(IPAddressVO.class, "address", false, cmd.getStartIndex(), cmd.getPageSizeVal()); + final Filter searchFilter = new Filter(IPAddressVO.class, "address", false, null, null); final SearchBuilder sb = _publicIpAddressDao.createSearchBuilder(); Long domainId = null; Boolean isRecursive = null; @@ -2215,7 +2217,10 @@ public Pair, Integer> searchForIPAddresses(final ListP sc2.setParameters("ids", freeAddrIds.toArray()); addrs.addAll(_publicIpAddressDao.search(sc2, searchFilter)); // Allocated + Free } - + List wPagination = com.cloud.utils.StringUtils.applyPagination(addrs, cmd.getStartIndex(), cmd.getPageSizeVal()); + if (wPagination != null) { + return new Pair, Integer>(wPagination, addrs.size()); + } return new Pair<>(addrs, addrs.size()); } @@ -2973,6 +2978,8 @@ public List> getCommands() { cmdList.add(AddHostCmd.class); cmdList.add(AddSecondaryStorageCmd.class); cmdList.add(CancelMaintenanceCmd.class); + cmdList.add(CancelHostAsDegradedCmd.class); + cmdList.add(DeclareHostAsDegradedCmd.class); cmdList.add(DeleteHostCmd.class); cmdList.add(ListHostsCmd.class); cmdList.add(ListHostTagsCmd.class); @@ -3038,7 +3045,7 @@ public List> getCommands() { cmdList.add(FindStoragePoolsForMigrationCmd.class); cmdList.add(PreparePrimaryStorageForMaintenanceCmd.class); cmdList.add(UpdateStoragePoolCmd.class); - cmdList.add(SyncStoragePoolCmd.class); + cmdList.add(UpdateStorageCapabilitiesCmd.class); cmdList.add(UpdateImageStoreCmd.class); cmdList.add(DestroySystemVmCmd.class); cmdList.add(ListSystemVMsCmd.class); diff --git a/server/src/main/java/com/cloud/server/StatsCollector.java b/server/src/main/java/com/cloud/server/StatsCollector.java index 0da1d78c224d..fd59fb86c762 100644 --- a/server/src/main/java/com/cloud/server/StatsCollector.java +++ b/server/src/main/java/com/cloud/server/StatsCollector.java @@ -70,6 +70,7 @@ import com.cloud.agent.api.VmNetworkStatsEntry; import com.cloud.agent.api.VmStatsEntry; import com.cloud.agent.api.VolumeStatsEntry; +import com.cloud.capacity.CapacityManager; import com.cloud.cluster.ManagementServerHostVO; import com.cloud.cluster.dao.ManagementServerHostDao; import com.cloud.dc.Vlan.VlanType; @@ -145,6 +146,7 @@ import com.cloud.vm.dao.VMInstanceDao; import static com.cloud.utils.NumbersUtil.toHumanReadableSize; +import org.apache.commons.io.FileUtils; /** * Provides real time stats for various agent resources up to x seconds @@ -296,8 +298,6 @@ public String toString() { private long volumeStatsInterval = -1L; private long autoScaleStatsInterval = -1L; - private double _imageStoreCapacityThreshold = 0.90; - private String externalStatsPrefix = ""; String externalStatsHost = null; int externalStatsPort = -1; @@ -1375,10 +1375,28 @@ public boolean imageStoreHasEnoughCapacity(DataStore imageStore) { if (!_storageStats.keySet().contains(imageStore.getId())) { // Stats not available for this store yet, can be a new store. Better to assume it has enough capacity? return true; } - StorageStats imageStoreStats = _storageStats.get(imageStore.getId()); - if (imageStoreStats != null && (imageStoreStats.getByteUsed() / (imageStoreStats.getCapacityBytes() * 1.0)) <= _imageStoreCapacityThreshold) { + + long imageStoreId = imageStore.getId(); + StorageStats imageStoreStats = _storageStats.get(imageStoreId); + + if (imageStoreStats == null) { + s_logger.debug(String.format("Stats for image store [%s] not found.", imageStoreId)); + return false; + } + + double totalCapacity = imageStoreStats.getCapacityBytes(); + double usedCapacity = imageStoreStats.getByteUsed(); + double threshold = getImageStoreCapacityThreshold(); + String readableTotalCapacity = FileUtils.byteCountToDisplaySize((long) totalCapacity); + String readableUsedCapacity = FileUtils.byteCountToDisplaySize((long) usedCapacity); + + s_logger.debug(String.format("Verifying image storage [%s]. Capacity: total=[%s], used=[%s], threshold=[%s%%].", imageStoreId, readableTotalCapacity, readableUsedCapacity, threshold * 100)); + + if (usedCapacity / totalCapacity <= threshold) { return true; } + + s_logger.warn(String.format("Image storage [%s] has not enough capacity. Capacity: total=[%s], used=[%s], threshold=[%s%%].", imageStoreId, readableTotalCapacity, readableUsedCapacity, threshold * 100)); return false; } @@ -1611,6 +1629,6 @@ public ConfigKey[] getConfigKeys() { } public double getImageStoreCapacityThreshold() { - return _imageStoreCapacityThreshold; + return CapacityManager.SecondaryStorageCapacityThreshold.value(); } } diff --git a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java index 743e90f4eaee..01932169a38e 100644 --- a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java @@ -43,6 +43,8 @@ import javax.inject.Inject; +import com.cloud.agent.api.GetStoragePoolCapabilitiesAnswer; +import com.cloud.agent.api.GetStoragePoolCapabilitiesCommand; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.command.admin.storage.CancelPrimaryStorageMaintenanceCmd; import org.apache.cloudstack.api.command.admin.storage.CreateSecondaryStagingStoreCmd; @@ -98,6 +100,7 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; @@ -316,8 +319,12 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C @Inject SnapshotService _snapshotService; @Inject + public StorageService storageService; + @Inject StoragePoolTagsDao _storagePoolTagsDao; @Inject + PrimaryDataStoreDao primaryStoreDao; + @Inject DiskOfferingDetailsDao _diskOfferingDetailsDao; @Inject ServiceOfferingDetailsDao _serviceOfferingDetailsDao; @@ -1578,6 +1585,7 @@ public void cleanupSecondaryStorage(boolean recurring) { for (DataStore store : imageStores) { try { List destroyedStoreVOs = _volumeStoreDao.listDestroyed(store.getId()); + destroyedStoreVOs.addAll(_volumeDataStoreDao.listByVolumeState(Volume.State.Expunged)); s_logger.debug("Secondary storage garbage collector found " + destroyedStoreVOs.size() + " volumes to cleanup on volume_store_ref for store: " + store.getName()); for (VolumeDataStoreVO destroyedStoreVO : destroyedStoreVOs) { if (s_logger.isDebugEnabled()) { @@ -2773,6 +2781,77 @@ public ImageStore updateImageStoreStatus(Long id, Boolean readonly) { return imageStoreVO; } + /** + * @param poolId - Storage pool id for pool to update. + * @param failOnChecks - If true, throw an error if pool type and state checks fail. + */ + @Override + public void updateStorageCapabilities(Long poolId, boolean failOnChecks) { + StoragePoolVO pool = _storagePoolDao.findById(poolId); + + if (pool == null) { + throw new CloudRuntimeException("Primary storage not found for id: " + poolId); + } + + // Only checking NFS for now - required for disk provisioning type support for vmware. + if (pool.getPoolType() != StoragePoolType.NetworkFilesystem) { + if (failOnChecks) { + throw new CloudRuntimeException("Storage capabilities update only supported on NFS storage mounted."); + } + return; + } + + if (pool.getStatus() != StoragePoolStatus.Initialized && pool.getStatus() != StoragePoolStatus.Up) { + if (failOnChecks){ + throw new CloudRuntimeException("Primary storage is not in the right state to update capabilities"); + } + return; + } + + HypervisorType hypervisor = pool.getHypervisor(); + + if (hypervisor == null){ + if (pool.getClusterId() != null) { + ClusterVO cluster = _clusterDao.findById(pool.getClusterId()); + hypervisor = cluster.getHypervisorType(); + } + } + + if (!HypervisorType.VMware.equals(hypervisor)) { + if (failOnChecks) { + throw new CloudRuntimeException("Storage capabilities update only supported on VMWare."); + } + return; + } + + // find the host + List poolIds = new ArrayList(); + poolIds.add(pool.getId()); + List hosts = _storagePoolHostDao.findHostsConnectedToPools(poolIds); + if (hosts.size() > 0) { + GetStoragePoolCapabilitiesCommand cmd = new GetStoragePoolCapabilitiesCommand(); + cmd.setPool(new StorageFilerTO(pool)); + GetStoragePoolCapabilitiesAnswer answer = (GetStoragePoolCapabilitiesAnswer) _agentMgr.easySend(hosts.get(0), cmd); + if (answer.getPoolDetails() != null && answer.getPoolDetails().containsKey(Storage.Capability.HARDWARE_ACCELERATION.toString())) { + StoragePoolDetailVO hardwareAccelerationSupported = _storagePoolDetailsDao.findDetail(pool.getId(), Storage.Capability.HARDWARE_ACCELERATION.toString()); + if (hardwareAccelerationSupported == null) { + StoragePoolDetailVO storagePoolDetailVO = new StoragePoolDetailVO(pool.getId(), Storage.Capability.HARDWARE_ACCELERATION.toString(), answer.getPoolDetails().get(Storage.Capability.HARDWARE_ACCELERATION.toString()), false); + _storagePoolDetailsDao.persist(storagePoolDetailVO); + } else { + hardwareAccelerationSupported.setValue(answer.getPoolDetails().get(Storage.Capability.HARDWARE_ACCELERATION.toString())); + _storagePoolDetailsDao.update(hardwareAccelerationSupported.getId(), hardwareAccelerationSupported); + } + } else { + if (answer != null && !answer.getResult()) { + s_logger.error("Failed to update storage pool capabilities: " + answer.getDetails()); + if (failOnChecks) { + throw new CloudRuntimeException(answer.getDetails()); + } + } + } + } + } + private void duplicateCacheStoreRecordsToRegionStore(long storeId) { _templateStoreDao.duplicateCacheRecordsOnRegionStore(storeId); _snapshotStoreDao.duplicateCacheRecordsOnRegionStore(storeId); @@ -3132,7 +3211,9 @@ public ConfigKey[] getConfigKeys() { STORAGE_POOL_CLIENT_MAX_CONNECTIONS, PRIMARY_STORAGE_DOWNLOAD_WAIT, SecStorageMaxMigrateSessions, - MaxDataMigrationWaitTime + MaxDataMigrationWaitTime, + DiskProvisioningStrictness, + PreferredStoragePool }; } diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java index 684404a84805..1e34c38626ea 100644 --- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java @@ -32,8 +32,6 @@ import java.util.concurrent.ExecutionException; import javax.inject.Inject; - -import org.apache.cloudstack.api.command.user.vm.CloneVMCmd; import com.cloud.api.query.dao.ServiceOfferingJoinDao; import com.cloud.api.query.vo.ServiceOfferingJoinVO; import org.apache.cloudstack.api.command.user.volume.AttachVolumeCmd; @@ -910,8 +908,7 @@ public VolumeVO createVolume(CreateVolumeCmd cmd) { } @Override - public Volume cloneDataVolume(CloneVMCmd cmd, long snapshotId, Volume volume) throws StorageUnavailableException { - long vmId = cmd.getEntityId(); + public Volume cloneDataVolume(long vmId, long snapshotId, Volume volume) throws StorageUnavailableException { return createVolumeFromSnapshot((VolumeVO) volume, snapshotId, vmId); } @@ -1120,14 +1117,6 @@ public VolumeVO resizeVolume(ResizeVolumeCmd cmd) throws ResourceAllocationExcep * This will be checked again at the hypervisor level where we can see * the actual disk size. */ - if (currentSize > newSize) { - VolumeVO vol = _volsDao.findById(cmd.getEntityId()); - if (vol != null && ImageFormat.QCOW2.equals(vol.getFormat()) && !Volume.State.Allocated.equals(volume.getState())) { - String message = "Unable to shrink volumes of type QCOW2"; - s_logger.warn(message); - throw new InvalidParameterValueException(message); - } - } if (currentSize > newSize && !shrinkOk) { throw new InvalidParameterValueException("Going from existing size of " + currentSize + " to size of " + newSize + " would shrink the volume." + "Need to sign off by supplying the shrinkok parameter with value of true."); @@ -1166,10 +1155,6 @@ public VolumeVO resizeVolume(ResizeVolumeCmd cmd) throws ResourceAllocationExcep UserVmVO userVm = _userVmDao.findById(volume.getInstanceId()); if (userVm != null) { - if (volume.getVolumeType().equals(Volume.Type.ROOT) && userVm.getPowerState() != VirtualMachine.PowerState.PowerOff && hypervisorType == HypervisorType.VMware) { - s_logger.error(" For ROOT volume resize VM should be in Power Off state."); - throw new InvalidParameterValueException("VM current state is : " + userVm.getPowerState() + ". But VM should be in " + VirtualMachine.PowerState.PowerOff + " state."); - } // serialize VM operation AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext(); @@ -1386,7 +1371,7 @@ private VolumeVO orchestrateResizeVolume(long volumeId, long currentSize, long n return volume; } catch (Exception e) { - throw new CloudRuntimeException("Couldn't resize volume: " + volume.getName() + ", " + e.getMessage(), e); + throw new CloudRuntimeException(String.format("Failed to resize volume operation of volume UUID: [%s] due to - %s", volume.getUuid(), e.getMessage()), e); } } @@ -1562,9 +1547,9 @@ public Volume destroyVolume(long volumeId, Account caller, boolean expunge, bool s_logger.warn("Failed to expunge volume: " + volumeId); return null; } + removeVolume(volume.getId()); } - removeVolume(volume.getId()); return volume; } @@ -1706,10 +1691,6 @@ private Volume orchestrateAttachVolumeToVM(Long vmId, Long volumeId, Long device } @Override - public Volume attachVolumeToVm(CloneVMCmd cmd, Long volumeId, Long deviceId) { - return attachVolumeToVM(cmd.getEntityId(), volumeId, deviceId); - } - public Volume attachVolumeToVM(Long vmId, Long volumeId, Long deviceId) { Account caller = CallContext.current().getCallingAccount(); @@ -2420,10 +2401,6 @@ public Volume migrateVolume(MigrateVolumeCmd cmd) { if (diskOffering == null) { throw new CloudRuntimeException("volume '" + vol.getUuid() + "', has no diskoffering. Migration target cannot be checked."); } - if (!doesTargetStorageSupportDiskOffering(destPool, diskOffering)) { - throw new CloudRuntimeException(String.format("Migration target pool [%s, tags:%s] has no matching tags for volume [%s, uuid:%s, tags:%s]", destPool.getName(), - getStoragePoolTags(destPool), vol.getName(), vol.getUuid(), diskOffering.getTags())); - } if (liveMigrateVolume && State.Running.equals(vm.getState()) && destPool.getClusterId() != null && srcClusterId != null) { diff --git a/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java b/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java index 06da5d1f0026..6dcb412355cd 100755 --- a/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java @@ -588,8 +588,11 @@ public boolean deleteSnapshot(long snapshotId) { if (result) { if (snapshotCheck.getState() == Snapshot.State.BackedUp) { - UsageEventUtils.publishUsageEvent(EventTypes.EVENT_SNAPSHOT_DELETE, snapshotCheck.getAccountId(), snapshotCheck.getDataCenterId(), snapshotId, - snapshotCheck.getName(), null, null, 0L, snapshotCheck.getClass().getName(), snapshotCheck.getUuid()); + SnapshotVO snapVO = _snapshotDao.findById(snapshotId); + if (snapVO == null || snapVO.getsnapshotType() != Type.INTERNAL.ordinal()) { + UsageEventUtils.publishUsageEvent(EventTypes.EVENT_SNAPSHOT_DELETE, snapshotCheck.getAccountId(), snapshotCheck.getDataCenterId(), snapshotId, + snapshotCheck.getName(), null, null, 0L, snapshotCheck.getClass().getName(), snapshotCheck.getUuid()); + } } if (snapshotCheck.getState() != Snapshot.State.Error && snapshotCheck.getState() != Snapshot.State.Destroyed) { @@ -665,7 +668,7 @@ public Pair, Integer> listSnapshots(ListSnapshotsCmd cm sb.and("snapshotTypeEQ", sb.entity().getsnapshotType(), SearchCriteria.Op.IN); sb.and("snapshotTypeNEQ", sb.entity().getsnapshotType(), SearchCriteria.Op.NEQ); sb.and("dataCenterId", sb.entity().getDataCenterId(), SearchCriteria.Op.EQ); - + sb.and("snapshotTypeInternal", sb.entity().getsnapshotType(), SearchCriteria.Op.NEQ); if (tags != null && !tags.isEmpty()) { SearchBuilder tagSearch = _resourceTagDao.createSearchBuilder(); for (int count = 0; count < tags.size(); count++) { @@ -737,7 +740,7 @@ public Pair, Integer> listSnapshots(ListSnapshotsCmd cm // Show only MANUAL and RECURRING snapshot types sc.setParameters("snapshotTypeNEQ", Snapshot.Type.TEMPLATE.ordinal()); } - + sc.setParameters("snapshotTypeInternal", Type.INTERNAL.ordinal()); Pair, Integer> result = _snapshotDao.searchAndCount(sc, searchFilter); return new Pair, Integer>(result.first(), result.second()); } @@ -1027,7 +1030,13 @@ public List findRecurringSnapshotSchedule(ListRecurringSnaps private Type getSnapshotType(Long policyId) { if (policyId.equals(Snapshot.MANUAL_POLICY_ID)) { return Type.MANUAL; - } else { + } + + else if (policyId.equals(Snapshot.INTERNAL_POLICY_ID)) { + return Type.INTERNAL; + } + + else { SnapshotPolicyVO spstPolicyVO = _snapshotPolicyDao.findById(policyId); IntervalType intvType = DateUtil.getIntervalType(spstPolicyVO.getInterval()); return getSnapshotType(intvType); @@ -1180,8 +1189,11 @@ public SnapshotInfo takeSnapshot(VolumeInfo volume) throws ResourceAllocationExc throw new CloudRuntimeException("Could not find snapshot"); } } - UsageEventUtils.publishUsageEvent(EventTypes.EVENT_SNAPSHOT_CREATE, snapshot.getAccountId(), snapshot.getDataCenterId(), snapshotId, snapshot.getName(), null, null, - snapshotStoreRef.getPhysicalSize(), volume.getSize(), snapshot.getClass().getName(), snapshot.getUuid()); + SnapshotVO snapInstance = _snapshotDao.findById(snapshot.getId()); + if (snapInstance == null || snapInstance.getsnapshotType() != Type.INTERNAL.ordinal()) { + UsageEventUtils.publishUsageEvent(EventTypes.EVENT_SNAPSHOT_CREATE, snapshot.getAccountId(), snapshot.getDataCenterId(), snapshotId, snapshot.getName(), null, null, + snapshotStoreRef.getPhysicalSize(), volume.getSize(), snapshot.getClass().getName(), snapshot.getUuid()); + } // Correct the resource count of snapshot in case of delta snapshots. _resourceLimitMgr.decrementResourceCount(snapshotOwner.getId(), ResourceType.secondary_storage, new Long(volume.getSize() - snapshotStoreRef.getPhysicalSize())); diff --git a/server/src/main/java/com/cloud/template/TemplateManagerImpl.java b/server/src/main/java/com/cloud/template/TemplateManagerImpl.java index 55399b34ddd2..7380186017c9 100755 --- a/server/src/main/java/com/cloud/template/TemplateManagerImpl.java +++ b/server/src/main/java/com/cloud/template/TemplateManagerImpl.java @@ -158,9 +158,9 @@ import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor; +import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.hypervisor.HypervisorGuru; import com.cloud.hypervisor.HypervisorGuruManager; -import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.projects.Project; import com.cloud.projects.ProjectManager; import com.cloud.storage.Storage.ImageFormat; @@ -301,7 +301,6 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, @Inject private EndPointSelector selector; - private TemplateAdapter getAdapter(HypervisorType type) { TemplateAdapter adapter = null; if (type == HypervisorType.BareMetal) { @@ -1167,10 +1166,11 @@ public boolean attachIso(long isoId, long vmId, boolean forced) { throw new InvalidParameterValueException("Unable to find an ISO with id " + isoId); } - long dcId = vm.getDataCenterId(); - VMTemplateZoneVO exists = _tmpltZoneDao.findByZoneTemplate(dcId, isoId); - if (null == exists) { - throw new InvalidParameterValueException("ISO is not available in the zone the VM is in."); + if (!TemplateType.PERHOST.equals(iso.getTemplateType())) { + VMTemplateZoneVO exists = _tmpltZoneDao.findByZoneTemplate(vm.getDataCenterId(), isoId); + if (null == exists) { + throw new InvalidParameterValueException("ISO is not available in the zone the VM is in."); + } } // check permissions @@ -1187,11 +1187,11 @@ public boolean attachIso(long isoId, long vmId, boolean forced) { throw new InvalidParameterValueException("Please specify a VM that is either Stopped or Running."); } - if ("xen-pv-drv-iso".equals(iso.getDisplayText()) && vm.getHypervisorType() != Hypervisor.HypervisorType.XenServer) { + if (XS_TOOLS_ISO.equals(iso.getUniqueName()) && vm.getHypervisorType() != Hypervisor.HypervisorType.XenServer) { throw new InvalidParameterValueException("Cannot attach Xenserver PV drivers to incompatible hypervisor " + vm.getHypervisorType()); } - if ("vmware-tools.iso".equals(iso.getName()) && vm.getHypervisorType() != Hypervisor.HypervisorType.VMware) { + if (VMWARE_TOOLS_ISO.equals(iso.getUniqueName()) && vm.getHypervisorType() != Hypervisor.HypervisorType.VMware) { throw new InvalidParameterValueException("Cannot attach VMware tools drivers to incompatible hypervisor " + vm.getHypervisorType()); } boolean result = attachISOToVM(vmId, userId, isoId, true, forced); @@ -1747,10 +1747,8 @@ public void doInTransactionWithoutResult(TransactionStatus status) { @Override @DB @ActionEvent(eventType = EventTypes.EVENT_TEMPLATE_CREATE, eventDescription = "creating actual private template", create = true) - public VirtualMachineTemplate createPrivateTemplate(CloneVMCmd cmd) throws CloudRuntimeException { + public VirtualMachineTemplate createPrivateTemplate(CloneVMCmd cmd, long snapshotId, long templateId) throws CloudRuntimeException { UserVm curVm = cmd.getTargetVM(); - long templateId = cmd.getTemporaryTemlateId(); - long snapshotId = cmd.getTemporarySnapShotId(); final Long accountId = curVm.getAccountId(); Account caller = CallContext.current().getCallingAccount(); List volumes = _volumeDao.findByInstanceAndType(cmd.getId(), Volume.Type.ROOT); @@ -1765,7 +1763,6 @@ public VirtualMachineTemplate createPrivateTemplate(CloneVMCmd cmd) throws Cloud VolumeInfo vInfo = _volFactory.getVolume(volumeId); DataStore store = _dataStoreMgr.getImageStoreWithFreeCapacity(zoneId); snapshot = _snapshotDao.findById(snapshotId); -// future = _tmpltSvr.createTemplateFromVolumeAsync(vInfo, cloneTempalateInfp, store); // create template from snapshot DataStoreRole dataStoreRole = ApiResponseHelper.getDataStoreRole(snapshot, _snapshotStoreDao, _dataStoreMgr); SnapshotInfo snapInfo = _snapshotFactory.getSnapshot(snapshotId, dataStoreRole); @@ -1813,6 +1810,12 @@ public VirtualMachineTemplate createPrivateTemplate(CloneVMCmd cmd) throws Cloud s_logger.info("successfully created the template with Id: " + templateId); finalTmpProduct = _tmpltDao.findById(templateId); TemplateDataStoreVO srcTmpltStore = _tmplStoreDao.findByStoreTemplate(store.getId(), templateId); + try { + srcTmpltStore.getSize(); + } catch (NullPointerException e) { + srcTmpltStore.setSize(0L); + _tmplStoreDao.update(srcTmpltStore.getId(), srcTmpltStore); + } UsageEventVO usageEvent = new UsageEventVO(EventTypes.EVENT_TEMPLATE_CREATE, finalTmpProduct.getAccountId(), zoneId, finalTmpProduct.getId(), finalTmpProduct.getName(), null, finalTmpProduct.getSourceTemplateId(), srcTmpltStore.getPhysicalSize(), finalTmpProduct.getSize()); @@ -1858,13 +1861,42 @@ public void doInTransactionWithoutResult(TransactionStatus status) { return null; } + @Override + public Snapshot createSnapshotFromTemplateOwner(long vmId, UserVm curVm, Account templateOwner, VolumeApiService volumeService) throws ResourceAllocationException { + Account caller = CallContext.current().getCallingAccount(); + _accountMgr.checkAccess(caller, null, true, templateOwner); +// UserVm curVm = cmd.getTargetVM(); + Long nextSnapId = _tmpltDao.getNextInSequence(Long.class, "id"); + Long volumeId = _volumeDao.findByInstanceAndType(vmId, Volume.Type.ROOT).get(0).getId(); + VolumeVO volume = _volumeDao.findById(volumeId); + if (volume == null) { + throw new InvalidParameterValueException("Failed to create private template record, unable to find root volume " + volumeId); + } + + // check permissions + _accountMgr.checkAccess(caller, null, true, volume); + s_logger.info("Creating snapshot for the tempalte creation"); + SnapshotVO snapshot = (SnapshotVO) volumeService.allocSnapshot(volumeId, Snapshot.INTERNAL_POLICY_ID, curVm.getDisplayName() + "-Clone-" + nextSnapId, null); + if (snapshot == null) { + throw new CloudRuntimeException("Unable to create a snapshot during the template creation recording"); + } + Snapshot snapshotEntity = volumeService.takeSnapshot(volumeId, Snapshot.INTERNAL_POLICY_ID, snapshot.getId(), caller, false, null, false, new HashMap<>()); + if (snapshotEntity == null) { + throw new CloudRuntimeException("Error when creating the snapshot entity"); + } + if (snapshotEntity.getState() != Snapshot.State.BackedUp) { + throw new CloudRuntimeException("Async backup of snapshot happens during the clone for snapshot id: " + snapshot.getId()); + } + return snapshot; + } @Override @ActionEvent(eventType = EventTypes.EVENT_TEMPLATE_CREATE, eventDescription = "creating template from clone", create = true) - public VMTemplateVO createPrivateTemplateRecord(CloneVMCmd cmd, Account templateOwner, VolumeApiService volumeService) throws ResourceAllocationException { + public VMTemplateVO createPrivateTemplateRecord(CloneVMCmd cmd, Account templateOwner, VolumeApiService volumeService, Snapshot snapshot) throws ResourceAllocationException { Account caller = CallContext.current().getCallingAccount(); _accountMgr.checkAccess(caller, null, true, templateOwner); String name = cmd.getTemplateName(); if (name.length() > 32) { + name = name.substring(5) + "-QA-Clone"; } @@ -1878,21 +1910,8 @@ public VMTemplateVO createPrivateTemplateRecord(CloneVMCmd cmd, Account template if (volume == null) { throw new InvalidParameterValueException("Failed to create private template record, unable to find root volume " + volumeId); } - // check permissions _accountMgr.checkAccess(caller, null, true, volume); - - // If private template is created from Volume, check that the volume - // will not be active when the private template is - // created -// if (!_volumeMgr.volumeInactive(volume)) { -// String msg = "Unable to create private template for volume: " + volume.getName() + "; volume is attached to a non-stopped VM, please stop the VM first"; -// if (s_logger.isInfoEnabled()) { -// s_logger.info(msg); -// } -// throw new CloudRuntimeException(msg); -// } - hyperType = _volumeDao.getHypervisorType(volumeId); if (HypervisorType.LXC.equals(hyperType)) { throw new InvalidParameterValueException("Template creation is not supported for LXC volume: " + volumeId); @@ -1906,24 +1925,8 @@ public VMTemplateVO createPrivateTemplateRecord(CloneVMCmd cmd, Account template if (guestOS == null) { throw new InvalidParameterValueException("GuestOS with ID: " + guestOSId + " does not exist."); } - // get snapshot from this step - - Long nextTemplateId = _tmpltDao.getNextInSequence(Long.class, "id"); - s_logger.info("Creating snapshot for the tempalte creation"); - SnapshotVO snapshot = (SnapshotVO) volumeService.allocSnapshot(volumeId, Snapshot.MANUAL_POLICY_ID, curVm.getDisplayName() + "-Clone-" + nextTemplateId, null); - if (snapshot == null) { - throw new CloudRuntimeException("Unable to create a snapshot during the template creation recording"); - } - Snapshot snapshotEntity = volumeService.takeSnapshot(volumeId, Snapshot.MANUAL_POLICY_ID, snapshot.getId(), caller, false, null, false, new HashMap<>()); - if (snapshotEntity == null) { - throw new CloudRuntimeException("Error when creating the snapshot entity"); - } - if (snapshotEntity.getState() != Snapshot.State.BackedUp) { - throw new CloudRuntimeException("Async backup of snapshot happens during the clone for snapshot id: " + snapshot.getId()); - } - cmd.setTemporarySnapShotId(snapshot.getId()); String description = ""; // TODO: add this to clone parameter in the future boolean isExtractable = false; Long sourceTemplateId = null; @@ -1948,13 +1951,6 @@ public VMTemplateVO createPrivateTemplateRecord(CloneVMCmd cmd, Account template privateTemplate.setSourceTemplateId(sourceTemplateId); VMTemplateVO template = _tmpltDao.persist(privateTemplate); - // persist this to the template zone area and remember to remove the resource count in the execute phase once in failure or clean up phase -// VMTemplateZoneVO templateZone = new VMTemplateZoneVO(zoneId, template.getId(), new Date()); -// _tmpltZoneDao.persist(templateZone); -// TemplateDataStoreVO voRecord = _tmplStoreDao.createTemplateDirectDownloadEntry(template.getId(), template.getSize()); -// voRecord.setDataStoreId(2); -// _tmplStoreDao.persist(voRecord); - // Increment the number of templates if (template != null) { Map details = new HashMap(); @@ -1988,14 +1984,10 @@ public VMTemplateVO createPrivateTemplateRecord(CloneVMCmd cmd, Account template _resourceLimitMgr.incrementResourceCount(templateOwner.getId(), ResourceType.template); _resourceLimitMgr.incrementResourceCount(templateOwner.getId(), ResourceType.secondary_storage, - snapshot.getSize()); + ((SnapshotVO) snapshot).getSize()); } - if (template != null) { - return template; - } else { - throw new CloudRuntimeException("Failed to create a template"); - } + return template; } @Override diff --git a/server/src/main/java/com/cloud/user/AccountManagerImpl.java b/server/src/main/java/com/cloud/user/AccountManagerImpl.java index b6f4e5e36000..88cd217698a3 100644 --- a/server/src/main/java/com/cloud/user/AccountManagerImpl.java +++ b/server/src/main/java/com/cloud/user/AccountManagerImpl.java @@ -2431,7 +2431,11 @@ public Pair findUserByApiKey(String apiKey) { @Override public Map getKeys(GetUserKeysCmd cmd) { final long userId = cmd.getID(); + return getKeys(userId); + } + @Override + public Map getKeys(Long userId) { User user = getActiveUser(userId); if (user == null) { throw new InvalidParameterValueException("Unable to find user by id"); diff --git a/server/src/main/java/com/cloud/uuididentity/UUIDManagerImpl.java b/server/src/main/java/com/cloud/uuididentity/UUIDManagerImpl.java index 8f3e9a1af5bc..9b6ac8a960a6 100644 --- a/server/src/main/java/com/cloud/uuididentity/UUIDManagerImpl.java +++ b/server/src/main/java/com/cloud/uuididentity/UUIDManagerImpl.java @@ -117,12 +117,16 @@ public String getUuid(Class entityType, Long customId) { if (customId == null) { return null; } + if (entityType == null ) { + throw new InvalidParameterValueException("Unknown entity type"); + } + Identity identity = (Identity) this._entityMgr.findById(entityType, customId); if (identity == null) { - throw new InvalidParameterValueException("Unable to find UUID for id " + customId); + throw new InvalidParameterValueException(String.format("Unable to find UUID for id [%s] of type [%s]", + customId, entityType.getSimpleName())); + } return identity.getUuid(); - } - } diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index d04c85c52ad6..c11aa0f54f7b 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -237,11 +237,13 @@ import com.cloud.event.UsageEventUtils; import com.cloud.event.UsageEventVO; import com.cloud.event.dao.UsageEventDao; +import com.cloud.exception.AffinityConflictException; import com.cloud.exception.AgentUnavailableException; import com.cloud.exception.CloudException; import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.InsufficientAddressCapacityException; import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.InsufficientServerCapacityException; import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.ManagementServerException; import com.cloud.exception.OperationTimedoutException; @@ -348,6 +350,8 @@ import com.cloud.vm.snapshot.VMSnapshotVO; import com.cloud.vm.snapshot.dao.VMSnapshotDao; +import static com.cloud.configuration.ConfigurationManagerImpl.VM_USERDATA_MAX_LENGTH; + public class UserVmManagerImpl extends ManagerBase implements UserVmManager, VirtualMachineGuru, UserVmService, Configurable { private static final Logger s_logger = Logger.getLogger(UserVmManagerImpl.class); @@ -532,6 +536,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir private BackupDao backupDao; @Inject private BackupManager backupManager; + @Inject + private SnapshotApiService _snapshotService; private ScheduledExecutorService _executor = null; private ScheduledExecutorService _vmIpFetchExecutor = null; @@ -548,7 +554,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir private Map vmIdCountMap = new ConcurrentHashMap<>(); private static final int MAX_HTTP_GET_LENGTH = 2 * MAX_USER_DATA_LENGTH_BYTES; - private static final int MAX_HTTP_POST_LENGTH = 16 * MAX_USER_DATA_LENGTH_BYTES; + private static final int NUM_OF_2K_BLOCKS = 512; + private static final int MAX_HTTP_POST_LENGTH = NUM_OF_2K_BLOCKS * MAX_USER_DATA_LENGTH_BYTES; @Inject private OrchestrationService _orchSrvc; @@ -984,8 +991,7 @@ private UserVm rebootVirtualMachine(long userId, long vmId, boolean enterSetup, } if (vm.getState() == State.Running && vm.getHostId() != null) { - collectVmDiskStatistics(vm); - collectVmNetworkStatistics(vm); + collectVmDiskAndNetworkStatistics(vm, State.Running); if (forced) { Host vmOnHost = _hostDao.findById(vm.getHostId()); @@ -4478,11 +4484,14 @@ protected String validateUserData(String userData, HTTPMethod httpmethod) { if (!Base64.isBase64(userData)) { throw new InvalidParameterValueException("User data is not base64 encoded"); } - // If GET, use 4K. If POST, support upto 32K. + // If GET, use 4K. If POST, support up to 1M. if (httpmethod.equals(HTTPMethod.GET)) { if (userData.length() >= MAX_HTTP_GET_LENGTH) { throw new InvalidParameterValueException("User data is too long for an http GET request"); } + if (userData.length() > VM_USERDATA_MAX_LENGTH.value()) { + throw new InvalidParameterValueException("User data has exceeded configurable max length : " + VM_USERDATA_MAX_LENGTH.value()); + } decodedUserData = Base64.decodeBase64(userData.getBytes()); if (decodedUserData.length > MAX_HTTP_GET_LENGTH) { throw new InvalidParameterValueException("User data is too long for GET request"); @@ -4491,6 +4500,9 @@ protected String validateUserData(String userData, HTTPMethod httpmethod) { if (userData.length() >= MAX_HTTP_POST_LENGTH) { throw new InvalidParameterValueException("User data is too long for an http POST request"); } + if (userData.length() > VM_USERDATA_MAX_LENGTH.value()) { + throw new InvalidParameterValueException("User data has exceeded configurable max length : " + VM_USERDATA_MAX_LENGTH.value()); + } decodedUserData = Base64.decodeBase64(userData.getBytes()); if (decodedUserData.length > MAX_HTTP_POST_LENGTH) { throw new InvalidParameterValueException("User data is too long for POST request"); @@ -4507,7 +4519,7 @@ protected String validateUserData(String userData, HTTPMethod httpmethod) { } @Override - public void checkCloneCondition(CloneVMCmd cmd) throws InvalidParameterValueException, ResourceUnavailableException, CloudRuntimeException, ResourceAllocationException { + public void validateCloneCondition(CloneVMCmd cmd) throws InvalidParameterValueException, ResourceUnavailableException, CloudRuntimeException, ResourceAllocationException { if (cmd.getAccountName() != null && cmd.getDomainId() == null) { throw new InvalidParameterValueException("You must input the domainId together with the account name"); @@ -4528,9 +4540,6 @@ public void checkCloneCondition(CloneVMCmd cmd) throws InvalidParameterValueExce throw new CloudRuntimeException("the VM doesn't exist or not registered in management server!"); } UserVmVO vmStatus = _vmDao.findById(cmd.getId()); -// if (vmStatus.state != State.Shutdown && vmStatus.state != State.Stopped) { -// throw new CloudRuntimeException("You should clone an instance that's shutdown!"); -// } if (vmStatus.getHypervisorType() != HypervisorType.KVM && vmStatus.getHypervisorType() != HypervisorType.Simulator) { throw new CloudRuntimeException("The clone operation is only supported on KVM and Simulator!"); } @@ -4550,7 +4559,6 @@ public void checkCloneCondition(CloneVMCmd cmd) throws InvalidParameterValueExce // verify that the VM doesn't expire Map details = curVm.getDetails(); verifyDetails(details); -// Account activeOwner = _accountDao.findById(cmd.getEntityOwnerId()); long zoneId = curVm.getDataCenterId(); DataCenter zone = _entityMgr.findById(DataCenter.class, zoneId); if (zone == null) { @@ -4633,11 +4641,39 @@ private VolumeVO saveDataDiskVolumeFromSnapShot(final Account owner, final Boole }); } + @Override + public void prepareCloneVirtualMachine(CloneVMCmd cmd) throws ResourceAllocationException, ResourceUnavailableException, InsufficientCapacityException { + Long temporarySnapshotId = null; + try { + Account owner = _accountService.getAccount(cmd.getEntityOwnerId()); + Snapshot snapshot = _tmplService.createSnapshotFromTemplateOwner(cmd.getId(), cmd.getTargetVM(), owner, _volumeService); + temporarySnapshotId = snapshot.getId(); + VirtualMachineTemplate template = _tmplService.createPrivateTemplateRecord(cmd, owner, _volumeService, snapshot); + if (template == null) { + throw new CloudRuntimeException("failed to create a template to db"); + } + s_logger.info("The template id recorded is: " + template.getId()); + _tmplService.createPrivateTemplate(cmd, snapshot.getId(), template.getId()); + UserVm vmRecord = recordVirtualMachineToDB(cmd, template.getId()); + if (vmRecord == null) { + throw new CloudRuntimeException("Unable to record the VM to DB!"); + } + cmd.setEntityUuid(vmRecord.getUuid()); + cmd.setEntityId(vmRecord.getId()); + } finally { + if (temporarySnapshotId != null) { + _snapshotService.deleteSnapshot(temporarySnapshotId); + s_logger.warn("clearing the temporary snapshot: " + temporarySnapshotId); + } + } + } + @Override @ActionEvent(eventType = EventTypes.EVENT_VM_CLONE, eventDescription = "clone vm", async = true) public Optional cloneVirtualMachine(CloneVMCmd cmd, VolumeApiService volumeService, SnapshotApiService snapshotService) throws ResourceUnavailableException, ConcurrentOperationException, CloudRuntimeException, InsufficientCapacityException, ResourceAllocationException { long vmId = cmd.getEntityId(); UserVmVO curVm = _vmDao.findById(vmId); + Account curVmAccount = _accountDao.findById(curVm.getAccountId()); // create and attach data disk long targetClonedVmId = cmd.getId(); Account caller = CallContext.current().getCallingAccount(); @@ -4651,12 +4687,12 @@ public Optional cloneVirtualMachine(CloneVMCmd cmd, VolumeApiService vol try { for (VolumeVO dataDisk : dataDisks) { long diskId = dataDisk.getId(); - SnapshotVO dataSnapShot = (SnapshotVO) volumeService.allocSnapshot(diskId, Snapshot.MANUAL_POLICY_ID, "DataDisk-Clone" + dataDisk.getName(), null); + SnapshotVO dataSnapShot = (SnapshotVO) volumeService.allocSnapshot(diskId, Snapshot.INTERNAL_POLICY_ID, "DataDisk-Clone" + dataDisk.getName(), null); if (dataSnapShot == null) { throw new CloudRuntimeException("Unable to allocate snapshot of data disk: " + dataDisk.getId() + " name: " + dataDisk.getName()); } createdSnapshots.add(dataSnapShot); - SnapshotVO snapshotEntity = (SnapshotVO) volumeService.takeSnapshot(diskId, Snapshot.MANUAL_POLICY_ID, dataSnapShot.getId(), caller, false, null, false, new HashMap<>()); + SnapshotVO snapshotEntity = (SnapshotVO) volumeService.takeSnapshot(diskId, Snapshot.INTERNAL_POLICY_ID, dataSnapShot.getId(), caller, false, null, false, new HashMap<>()); if (snapshotEntity == null) { throw new CloudRuntimeException("Error when creating the snapshot entity"); } @@ -4672,11 +4708,16 @@ public Optional cloneVirtualMachine(CloneVMCmd cmd, VolumeApiService vol DataCenterVO dataCenter = _dcDao.findById(zoneId); String volumeName = snapshotEntity.getName() + "-DataDisk-Volume"; VolumeVO parentVolume = _volsDao.findByIdIncludingRemoved(snapshotEntity.getVolumeId()); - newDatadisk = saveDataDiskVolumeFromSnapShot(caller, true, zoneId, + newDatadisk = saveDataDiskVolumeFromSnapShot(curVmAccount, true, zoneId, diskOfferingId, provisioningType, size, minIops, maxIops, parentVolume, volumeName, _uuidMgr.generateUuid(Volume.class, null), new HashMap<>()); - VolumeVO volumeEntity = (VolumeVO) volumeService.cloneDataVolume(cmd, snapshotEntity.getId(), newDatadisk); + VolumeVO volumeEntity = (VolumeVO) volumeService.cloneDataVolume(cmd.getEntityId(), snapshotEntity.getId(), newDatadisk); createdVolumes.add(volumeEntity); } + + for (VolumeVO createdVol : createdVolumes) { +// volumeService.attachVolumeToVm(cmd, createdVol.getId(), createdVol.getDeviceId()); + volumeService.attachVolumeToVM(cmd.getEntityId(), createdVol.getId(), createdVol.getDeviceId()); + } } catch (CloudRuntimeException e){ s_logger.warn("data disk process failed during clone, clearing the temporary resources..."); for (VolumeVO dataDiskToClear : createdVolumes) { @@ -4686,6 +4727,7 @@ public Optional cloneVirtualMachine(CloneVMCmd cmd, VolumeApiService vol if (newDatadisk != null) { volumeService.destroyVolume(newDatadisk.getId(), caller, true, false); } + destroyVm(vmId, true); throw new CloudRuntimeException(e.getMessage()); } finally { // clear the temporary data snapshots @@ -4695,16 +4737,6 @@ public Optional cloneVirtualMachine(CloneVMCmd cmd, VolumeApiService vol } } - for (VolumeVO createdVol : createdVolumes) { - try { - volumeService.attachVolumeToVm(cmd, createdVol.getId(), createdVol.getDeviceId()); - } catch (CloudRuntimeException e) { - s_logger.warn("data disk: " + createdVol.getId() + " attachment to VM " + vmId + " failed due to" + e.getMessage()); - s_logger.info("Clearing the data disk: " + createdVol.getId()); - volumeService.destroyVolume(createdVol.getId(), caller, true, true); - } - } - // start the VM if successfull Long podId = curVm.getPodIdToDeployIn(); Long clusterId = null; @@ -5707,7 +5739,7 @@ public UserVm createVirtualMachine(DeployVMCmd cmd) throws InsufficientCapacityE } @Override - public UserVm recordVirtualMachineToDB(CloneVMCmd cmd) throws ConcurrentOperationException, ResourceAllocationException, InsufficientCapacityException, ResourceUnavailableException { + public UserVm recordVirtualMachineToDB(CloneVMCmd cmd, long templateId) throws ConcurrentOperationException, ResourceAllocationException, InsufficientCapacityException, ResourceUnavailableException { //network configurations and check, then create the template UserVm curVm = cmd.getTargetVM(); // check if host is available @@ -5719,14 +5751,9 @@ public UserVm recordVirtualMachineToDB(CloneVMCmd cmd) throws ConcurrentOperatio String keyboard = vmProperties.get(VmDetailConstants.KEYBOARD); HypervisorType hypervisorType = curVm.getHypervisorType(); Account curAccount = _accountDao.findById(curVm.getAccountId()); - long callingUserId = CallContext.current().getCallingUserId(); - Account callerAccount = CallContext.current().getCallingAccount(); -// IpAddress ipAddress = _ipAddrMgr.assignPublicIpAddress(zoneId, curVm.getPodIdToDeployIn(), callerAccount, VlanType.DirectAttached, ) -// IpAddress ipAddress = _ipAddrMgr.allocateIp(curAccount, false, callerAccount, callingUserId, dataCenter, true, null); String ipv6Address = null; String macAddress = null; IpAddresses addr = new IpAddresses(null, ipv6Address, macAddress); -// IpAddresses addr = new IpAddresses("172.20.0.98", ipv6Address, macAddress); long serviceOfferingId = curVm.getServiceOfferingId(); ServiceOffering serviceOffering = _serviceOfferingDao.findById(curVm.getId(), serviceOfferingId); List securityGroupList = _securityGroupMgr.getSecurityGroupsForVm(curVm.getId()); @@ -5736,13 +5763,11 @@ public UserVm recordVirtualMachineToDB(CloneVMCmd cmd) throws ConcurrentOperatio String displayName = hostName + "-Clone"; Long diskOfferingId = curVm.getDiskOfferingId(); Long size = null; // mutual exclusive with disk offering id - HTTPMethod httpMethod = cmd.getHttpMethod(); String userData = curVm.getUserData(); String sshKeyPair = null; Map ipToNetoworkMap = null; // Since we've specified Ip boolean isDisplayVM = curVm.isDisplayVm(); boolean dynamicScalingEnabled = curVm.isDynamicallyScalable(); - Long templateId = cmd.getTemporaryTemlateId(); VirtualMachineTemplate template = _entityMgr.findById(VirtualMachineTemplate.class, templateId); if (template == null) { throw new CloudRuntimeException("the temporary template is not created, server error, contact your sys admin"); @@ -5759,22 +5784,27 @@ public UserVm recordVirtualMachineToDB(CloneVMCmd cmd) throws ConcurrentOperatio mapToLong(AffinityGroupVO::getId). boxed(). collect(Collectors.toList()); - if (dataCenter.getNetworkType() == NetworkType.Basic) { - vmResult = createBasicSecurityGroupVirtualMachine(dataCenter, serviceOffering, template, securityGroupIdList, curAccount, hostName, displayName, diskOfferingId, - size, group, hypervisorType, cmd.getHttpMethod(), userData, sshKeyPair, ipToNetoworkMap, addr, isDisplayVM, keyboard, affinityGroupIdList, - curVm.getDetails() == null ? new HashMap<>() : curVm.getDetails(), cmd.getCustomId(), new HashMap<>(), - null, new HashMap<>(), dynamicScalingEnabled); - } else { - if (dataCenter.isSecurityGroupEnabled()) { - vmResult = createAdvancedSecurityGroupVirtualMachine(dataCenter, serviceOffering, template, networkIds, securityGroupIdList, curAccount, hostName, - displayName, diskOfferingId, size, group, hypervisorType, cmd.getHttpMethod(), userData, sshKeyPair, ipToNetoworkMap, addr, isDisplayVM, keyboard, - affinityGroupIdList, curVm.getDetails() == null ? new HashMap<>() : curVm.getDetails(), cmd.getCustomId(), new HashMap<>(), + try { + if (dataCenter.getNetworkType() == NetworkType.Basic) { + vmResult = createBasicSecurityGroupVirtualMachine(dataCenter, serviceOffering, template, securityGroupIdList, curAccount, hostName, displayName, diskOfferingId, + size, group, hypervisorType, cmd.getHttpMethod(), userData, sshKeyPair, ipToNetoworkMap, addr, isDisplayVM, keyboard, affinityGroupIdList, + curVm.getDetails() == null ? new HashMap<>() : curVm.getDetails(), null, new HashMap<>(), null, new HashMap<>(), dynamicScalingEnabled); } else { - vmResult = createAdvancedVirtualMachine(dataCenter, serviceOffering, template, networkIds, curAccount, hostName, displayName, diskOfferingId, size, group, - hypervisorType, cmd.getHttpMethod(), userData, sshKeyPair, ipToNetoworkMap, addr, isDisplayVM, keyboard, affinityGroupIdList, curVm.getDetails() == null ? new HashMap<>() : curVm.getDetails(), - cmd.getCustomId(), new HashMap<>(), null, new HashMap<>(), dynamicScalingEnabled); + if (dataCenter.isSecurityGroupEnabled()) { + vmResult = createAdvancedSecurityGroupVirtualMachine(dataCenter, serviceOffering, template, networkIds, securityGroupIdList, curAccount, hostName, + displayName, diskOfferingId, size, group, hypervisorType, cmd.getHttpMethod(), userData, sshKeyPair, ipToNetoworkMap, addr, isDisplayVM, keyboard, + affinityGroupIdList, curVm.getDetails() == null ? new HashMap<>() : curVm.getDetails(), null, new HashMap<>(), + null, new HashMap<>(), dynamicScalingEnabled); + } else { + vmResult = createAdvancedVirtualMachine(dataCenter, serviceOffering, template, networkIds, curAccount, hostName, displayName, diskOfferingId, size, group, + hypervisorType, cmd.getHttpMethod(), userData, sshKeyPair, ipToNetoworkMap, addr, isDisplayVM, keyboard, affinityGroupIdList, curVm.getDetails() == null ? new HashMap<>() : curVm.getDetails(), + null, new HashMap<>(), null, new HashMap<>(), dynamicScalingEnabled); + } } + } catch (CloudRuntimeException e) { + _templateMgr.delete(curAccount.getId(), template.getId(), zoneId); + throw new CloudRuntimeException("Unable to create the VM record"); } return vmResult; } @@ -6256,8 +6286,47 @@ public VirtualMachine migrateVirtualMachine(Long vmId, Host destinationHost) thr throw new InvalidParameterValueException("Cannot migrate VM, host with id: " + srcHostId + " for VM not found"); } + DeployDestination dest = null; + if (destinationHost == null) { + dest = chooseVmMigrationDestination(vm, srcHost); + } else { + dest = checkVmMigrationDestination(vm, srcHost, destinationHost); + } - if (destinationHost.getId() == srcHostId) { + // If no suitable destination found then throw exception + if (dest == null) { + throw new CloudRuntimeException("Unable to find suitable destination to migrate VM " + vm.getInstanceName()); + } + + collectVmDiskAndNetworkStatistics(vmId, State.Running); + _itMgr.migrate(vm.getUuid(), srcHostId, dest); + return findMigratedVm(vm.getId(), vm.getType()); + } + + private DeployDestination chooseVmMigrationDestination(VMInstanceVO vm, Host srcHost) { + vm.setLastHostId(null); // Last host does not have higher priority in vm migration + final ServiceOfferingVO offering = _offeringDao.findById(vm.getId(), vm.getServiceOfferingId()); + final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm, null, offering, null, null); + final Long srcHostId = srcHost.getId(); + final Host host = _hostDao.findById(srcHostId); + final DataCenterDeployment plan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), null, null, null); + ExcludeList excludes = new ExcludeList(); + excludes.addHost(srcHostId); + try { + return _planningMgr.planDeployment(profile, plan, excludes, null); + } catch (final AffinityConflictException e2) { + s_logger.warn("Unable to create deployment, affinity rules associted to the VM conflict", e2); + throw new CloudRuntimeException("Unable to create deployment, affinity rules associted to the VM conflict"); + } catch (final InsufficientServerCapacityException e3) { + throw new CloudRuntimeException("Unable to find a server to migrate the vm to"); + } + } + + private DeployDestination checkVmMigrationDestination(VMInstanceVO vm, Host srcHost, Host destinationHost) throws VirtualMachineMigrationException { + if (destinationHost == null) { + return null; + } + if (destinationHost.getId() == srcHost.getId()) { throw new InvalidParameterValueException("Cannot migrate VM, VM is already present on this host, please specify valid destination host to migrate the VM"); } @@ -6278,7 +6347,7 @@ public VirtualMachine migrateVirtualMachine(Long vmId, Host destinationHost) thr throw new CloudRuntimeException("Cannot migrate VM, VM is DPDK enabled VM but destination host is not DPDK enabled"); } - checkHostsDedication(vm, srcHostId, destinationHost.getId()); + checkHostsDedication(vm, srcHost.getId(), destinationHost.getId()); // call to core process DataCenterVO dcVO = _dcDao.findById(destinationHost.getDataCenterId()); @@ -6297,19 +6366,14 @@ public VirtualMachine migrateVirtualMachine(Long vmId, Host destinationHost) thr + " already has max Running VMs(count includes system VMs), cannot migrate to this host"); } //check if there are any ongoing volume snapshots on the volumes associated with the VM. + Long vmId = vm.getId(); s_logger.debug("Checking if there are any ongoing snapshots volumes associated with VM with ID " + vmId); if (checkStatusOfVolumeSnapshots(vmId, null)) { throw new CloudRuntimeException("There is/are unbacked up snapshot(s) on volume(s) attached to this VM, VM Migration is not permitted, please try again later."); } s_logger.debug("Found no ongoing snapshots on volumes associated with the vm with id " + vmId); - UserVmVO uservm = _vmDao.findById(vmId); - if (uservm != null) { - collectVmDiskStatistics(uservm); - collectVmNetworkStatistics(uservm); - } - _itMgr.migrate(vm.getUuid(), srcHostId, dest); - return findMigratedVm(vm.getId(), vm.getType()); + return dest; } private boolean isOnSupportedHypevisorForMigration(VMInstanceVO vm) { @@ -6713,7 +6777,7 @@ public VirtualMachine migrateVirtualMachineWithVolume(Long vmId, Host destinatio throw new InvalidParameterValueException("Live Migration of GPU enabled VM is not supported"); } - if (VM_STORAGE_MIGRATION_SUPPORTING_HYPERVISORS.contains(vm.getHypervisorType())) { + if (!VM_STORAGE_MIGRATION_SUPPORTING_HYPERVISORS.contains(vm.getHypervisorType())) { throw new InvalidParameterValueException(String.format("Unsupported hypervisor: %s for VM migration, we support XenServer/VMware/KVM only", vm.getHypervisorType())); } @@ -7672,11 +7736,7 @@ else if (!answer.getResult()) { @Override public void prepareStop(VirtualMachineProfile profile) { - UserVmVO vm = _vmDao.findById(profile.getId()); - if (vm != null && vm.getState() == State.Stopping) { - collectVmDiskStatistics(vm); - collectVmNetworkStatistics(vm); - } + collectVmDiskAndNetworkStatistics(profile.getId(), State.Stopping); } @Override @@ -8019,4 +8079,22 @@ private Network getNetworkForOvfNetworkMapping(DataCenter zone, Account owner) t } return network; } + + private void collectVmDiskAndNetworkStatistics(Long vmId, State expectedState) { + UserVmVO uservm = _vmDao.findById(vmId); + if (uservm != null) { + collectVmDiskAndNetworkStatistics(uservm, expectedState); + } else { + s_logger.info(String.format("Skip collecting vm %s disk and network statistics as it is not user vm", uservm)); + } + } + + private void collectVmDiskAndNetworkStatistics(UserVm vm, State expectedState) { + if (expectedState == null || expectedState == vm.getState()) { + collectVmDiskStatistics(vm); + collectVmNetworkStatistics(vm); + } else { + s_logger.warn(String.format("Skip collecting vm %s disk and network statistics as the expected vm state is %s but actual state is %s", vm, expectedState, vm.getState())); + } + } } diff --git a/server/src/main/java/org/apache/cloudstack/outofbandmanagement/OutOfBandManagementServiceImpl.java b/server/src/main/java/org/apache/cloudstack/outofbandmanagement/OutOfBandManagementServiceImpl.java index a956cf6727b5..e4e221daca23 100644 --- a/server/src/main/java/org/apache/cloudstack/outofbandmanagement/OutOfBandManagementServiceImpl.java +++ b/server/src/main/java/org/apache/cloudstack/outofbandmanagement/OutOfBandManagementServiceImpl.java @@ -29,6 +29,7 @@ import com.cloud.host.Host; import com.cloud.host.dao.HostDao; import com.cloud.org.Cluster; +import com.cloud.resource.ResourceState; import com.cloud.utils.component.Manager; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.db.Transaction; @@ -248,6 +249,13 @@ private boolean isOutOfBandManagementEnabledForHost(Long hostId) { if (hostId == null) { return false; } + + Host host = hostDao.findById(hostId); + if (host == null || host.getResourceState() == ResourceState.Degraded) { + LOG.debug(String.format("Host [id=%s, state=] was removed or placed in Degraded state by the Admin.", hostId, host.getResourceState())); + return false; + } + final OutOfBandManagement outOfBandManagementConfig = outOfBandManagementDao.findByHost(hostId); if (outOfBandManagementConfig == null || !outOfBandManagementConfig.isEnabled()) { return false; diff --git a/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java b/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java index 501ca6328020..07cf567b7e37 100644 --- a/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java @@ -25,17 +25,6 @@ import javax.inject.Inject; -import com.cloud.agent.api.PrepareUnmanageVMInstanceAnswer; -import com.cloud.agent.api.PrepareUnmanageVMInstanceCommand; -import com.cloud.event.ActionEvent; -import com.cloud.exception.UnsupportedServiceException; -import com.cloud.storage.Snapshot; -import com.cloud.storage.SnapshotVO; -import com.cloud.storage.dao.SnapshotDao; -import com.cloud.vm.NicVO; -import com.cloud.vm.UserVmVO; -import com.cloud.vm.dao.UserVmDao; -import com.cloud.vm.snapshot.dao.VMSnapshotDao; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.ApiErrorCode; import org.apache.cloudstack.api.ResponseGenerator; @@ -59,12 +48,15 @@ import org.apache.cloudstack.utils.volume.VirtualMachineDiskInfo; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.MapUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; import com.cloud.agent.api.GetUnmanagedInstancesAnswer; import com.cloud.agent.api.GetUnmanagedInstancesCommand; +import com.cloud.agent.api.PrepareUnmanageVMInstanceAnswer; +import com.cloud.agent.api.PrepareUnmanageVMInstanceCommand; import com.cloud.capacity.CapacityManager; import com.cloud.configuration.Config; import com.cloud.configuration.Resource; @@ -75,6 +67,7 @@ import com.cloud.deploy.DeployDestination; import com.cloud.deploy.DeploymentPlanner; import com.cloud.deploy.DeploymentPlanningManager; +import com.cloud.event.ActionEvent; import com.cloud.event.EventTypes; import com.cloud.event.UsageEventUtils; import com.cloud.exception.InsufficientAddressCapacityException; @@ -83,6 +76,7 @@ import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.PermissionDeniedException; import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.UnsupportedServiceException; import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.host.Status; @@ -103,6 +97,8 @@ import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.storage.GuestOS; import com.cloud.storage.GuestOSHypervisor; +import com.cloud.storage.Snapshot; +import com.cloud.storage.SnapshotVO; import com.cloud.storage.StoragePool; import com.cloud.storage.VMTemplateStoragePoolVO; import com.cloud.storage.VMTemplateVO; @@ -112,6 +108,7 @@ import com.cloud.storage.dao.DiskOfferingDao; import com.cloud.storage.dao.GuestOSDao; import com.cloud.storage.dao.GuestOSHypervisorDao; +import com.cloud.storage.dao.SnapshotDao; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VMTemplatePoolDao; import com.cloud.storage.dao.VolumeDao; @@ -127,7 +124,9 @@ import com.cloud.utils.net.NetUtils; import com.cloud.vm.DiskProfile; import com.cloud.vm.NicProfile; +import com.cloud.vm.NicVO; import com.cloud.vm.UserVmManager; +import com.cloud.vm.UserVmVO; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineManager; @@ -135,7 +134,9 @@ import com.cloud.vm.VirtualMachineProfileImpl; import com.cloud.vm.VmDetailConstants; import com.cloud.vm.dao.NicDao; +import com.cloud.vm.dao.UserVmDao; import com.cloud.vm.dao.VMInstanceDao; +import com.cloud.vm.snapshot.dao.VMSnapshotDao; import com.google.common.base.Strings; import com.google.gson.Gson; @@ -243,6 +244,7 @@ private UnmanagedInstanceResponse createUnmanagedInstanceResponse(UnmanagedInsta } if (host != null) { response.setHostId(host.getUuid()); + response.setHostName(host.getName()); } response.setPowerState(instance.getPowerState().toString()); response.setCpuCores(instance.getCpuCores()); @@ -954,6 +956,10 @@ private UserVm importVirtualMachineInternal(final UnmanagedInstanceTO unmanagedI throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to import VM: %s. %s", unmanagedInstance.getName(), Strings.nullToEmpty(e.getMessage()))); } + String internalCSName = unmanagedInstance.getInternalCSName(); + if(StringUtils.isEmpty(internalCSName)){ + internalCSName = instanceName; + } Map allDetails = new HashMap<>(details); if (validatedServiceOffering.isDynamic()) { allDetails.put(VmDetailConstants.CPU_NUMBER, String.valueOf(validatedServiceOffering.getCpu())); @@ -1000,7 +1006,7 @@ private UserVm importVirtualMachineInternal(final UnmanagedInstanceTO unmanagedI powerState = VirtualMachine.PowerState.PowerOn; } try { - userVm = userVmManager.importVM(zone, host, template, instanceName, displayName, owner, + userVm = userVmManager.importVM(zone, host, template, internalCSName, displayName, owner, null, caller, true, null, owner.getAccountId(), userId, validatedServiceOffering, null, hostName, cluster.getHypervisorType(), allDetails, powerState); @@ -1078,6 +1084,10 @@ public ListResponse listUnmanagedInstances(ListUnmana if (cluster.getHypervisorType() != Hypervisor.HypervisorType.VMware) { throw new InvalidParameterValueException(String.format("VM ingestion is currently not supported for hypervisor: %s", cluster.getHypervisorType().toString())); } + String keyword = cmd.getKeyword(); + if (StringUtils.isNotEmpty(keyword)) { + keyword = keyword.toLowerCase(); + } List hosts = resourceManager.listHostsInClusterByStatus(clusterId, Status.Up); List additionalNameFilters = getAdditionalNameFilters(cluster); List responses = new ArrayList<>(); @@ -1097,11 +1107,15 @@ public ListResponse listUnmanagedInstances(ListUnmana continue; } GetUnmanagedInstancesAnswer unmanagedInstancesAnswer = (GetUnmanagedInstancesAnswer) answer; - HashMap unmanagedInstances = new HashMap<>(); - unmanagedInstances.putAll(unmanagedInstancesAnswer.getUnmanagedInstances()); + HashMap unmanagedInstances = new HashMap<>(unmanagedInstancesAnswer.getUnmanagedInstances()); Set keys = unmanagedInstances.keySet(); for (String key : keys) { - responses.add(createUnmanagedInstanceResponse(unmanagedInstances.get(key), cluster, host)); + UnmanagedInstanceTO instance = unmanagedInstances.get(key); + if (StringUtils.isNotEmpty(keyword) && + !instance.getName().toLowerCase().contains(keyword)) { + continue; + } + responses.add(createUnmanagedInstanceResponse(instance, cluster, host)); } } ListResponse listResponses = new ListResponse<>(); diff --git a/server/src/test/java/com/cloud/api/query/dao/TemplateJoinDaoImplTest.java b/server/src/test/java/com/cloud/api/query/dao/TemplateJoinDaoImplTest.java index d4cbf910c247..fc4a86f85826 100755 --- a/server/src/test/java/com/cloud/api/query/dao/TemplateJoinDaoImplTest.java +++ b/server/src/test/java/com/cloud/api/query/dao/TemplateJoinDaoImplTest.java @@ -16,9 +16,9 @@ // under the License. package com.cloud.api.query.dao; -import com.cloud.hypervisor.Hypervisor; -import com.cloud.storage.Storage; -import com.cloud.user.Account; +import java.util.Date; +import java.util.Map; + import org.apache.cloudstack.api.response.TemplateResponse; import org.junit.Assert; import org.junit.Before; @@ -27,13 +27,14 @@ import org.mockito.InjectMocks; import org.powermock.core.classloader.annotations.PrepareForTest; import org.powermock.modules.junit4.PowerMockRunner; +import org.springframework.test.util.ReflectionTestUtils; import com.cloud.api.ApiDBUtils; import com.cloud.api.query.vo.TemplateJoinVO; -import org.springframework.test.util.ReflectionTestUtils; - -import java.util.Date; -import java.util.Map; +import com.cloud.hypervisor.Hypervisor; +import com.cloud.storage.Storage; +import com.cloud.template.TemplateManager; +import com.cloud.user.Account; @RunWith(PowerMockRunner.class) @PrepareForTest(ApiDBUtils.class) @@ -47,7 +48,7 @@ public class TemplateJoinDaoImplTest extends GenericDaoBaseWithTagInformationBas //TemplateJoinVO fields private String uuid = "1234567890abc"; - private String name = "xs-tools.iso"; + private String name = TemplateManager.XS_TOOLS_ISO; private String displayText = "xen-pv-drv-iso"; private boolean publicTemplate = true; private Date created = new Date(); diff --git a/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java b/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java index 3d18d1a99c00..f8155f177585 100644 --- a/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java +++ b/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java @@ -991,15 +991,15 @@ public void shouldUpdateBytesRateParametersTests(){ @Test public void updateDiskOfferingTagsIfIsNotNullTestWhenTagsIsNull(){ - Mockito.doNothing().when(configurationMgr).updateDiskOfferingTagsIfIsNotNull(null, diskOfferingVOMock); - this.configurationMgr.updateDiskOfferingTagsIfIsNotNull(null, diskOfferingVOMock); - Mockito.verify(configurationMgr, Mockito.times(1)).updateDiskOfferingTagsIfIsNotNull(null, diskOfferingVOMock); + Mockito.doNothing().when(configurationMgr).updateOfferingTagsIfIsNotNull(null, diskOfferingVOMock); + this.configurationMgr.updateOfferingTagsIfIsNotNull(null, diskOfferingVOMock); + Mockito.verify(configurationMgr, Mockito.times(1)).updateOfferingTagsIfIsNotNull(null, diskOfferingVOMock); } @Test public void updateDiskOfferingTagsIfIsNotNullTestWhenTagsIsNotNull(){ String tags = "tags"; - Mockito.doNothing().when(configurationMgr).updateDiskOfferingTagsIfIsNotNull(tags, diskOfferingVOMock); - this.configurationMgr.updateDiskOfferingTagsIfIsNotNull(tags, diskOfferingVOMock); - Mockito.verify(configurationMgr, Mockito.times(1)).updateDiskOfferingTagsIfIsNotNull(tags, diskOfferingVOMock); + Mockito.doNothing().when(configurationMgr).updateOfferingTagsIfIsNotNull(tags, diskOfferingVOMock); + this.configurationMgr.updateOfferingTagsIfIsNotNull(tags, diskOfferingVOMock); + Mockito.verify(configurationMgr, Mockito.times(1)).updateOfferingTagsIfIsNotNull(tags, diskOfferingVOMock); } } diff --git a/server/src/test/java/com/cloud/consoleproxy/ConsoleProxyManagerTest.java b/server/src/test/java/com/cloud/consoleproxy/ConsoleProxyManagerTest.java index 76a3f42766d8..5dc7df807d21 100644 --- a/server/src/test/java/com/cloud/consoleproxy/ConsoleProxyManagerTest.java +++ b/server/src/test/java/com/cloud/consoleproxy/ConsoleProxyManagerTest.java @@ -39,38 +39,42 @@ import com.cloud.dc.DataCenter; import com.cloud.dc.DataCenter.NetworkType; import com.cloud.dc.dao.DataCenterDao; +import com.cloud.info.ConsoleProxyStatus; import com.cloud.network.Networks.TrafficType; import com.cloud.network.dao.NetworkDao; import com.cloud.network.dao.NetworkVO; import com.cloud.utils.db.GlobalLock; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.ConsoleProxyVO; +import com.google.gson.Gson; +import com.google.gson.GsonBuilder; +import com.google.gson.JsonParseException; public class ConsoleProxyManagerTest { private static final Logger s_logger = Logger.getLogger(ConsoleProxyManagerTest.class); @Mock - GlobalLock globalLock; + GlobalLock globalLockMock; @Mock - ConsoleProxyVO proxyVO; + ConsoleProxyVO consoleProxyVOMock; @Mock - DataCenterDao _dcDao; + DataCenterDao dataCenterDaoMock; @Mock - NetworkDao _networkDao; + NetworkDao networkDaoMock; @Mock - ConsoleProxyManagerImpl cpvmManager; + ConsoleProxyManagerImpl consoleProxyManagerImplMock; @Before public void setup() throws Exception { MockitoAnnotations.initMocks(this); - ReflectionTestUtils.setField(cpvmManager, "_allocProxyLock", globalLock); - ReflectionTestUtils.setField(cpvmManager, "_dcDao", _dcDao); - ReflectionTestUtils.setField(cpvmManager, "_networkDao", _networkDao); - Mockito.doCallRealMethod().when(cpvmManager).expandPool(Mockito.anyLong(), Mockito.anyObject()); - Mockito.doCallRealMethod().when(cpvmManager).getDefaultNetworkForCreation(Mockito.any(DataCenter.class)); - Mockito.doCallRealMethod().when(cpvmManager).getDefaultNetworkForAdvancedZone(Mockito.any(DataCenter.class)); - Mockito.doCallRealMethod().when(cpvmManager).getDefaultNetworkForBasicZone(Mockito.any(DataCenter.class)); + ReflectionTestUtils.setField(consoleProxyManagerImplMock, "allocProxyLock", globalLockMock); + ReflectionTestUtils.setField(consoleProxyManagerImplMock, "dataCenterDao", dataCenterDaoMock); + ReflectionTestUtils.setField(consoleProxyManagerImplMock, "networkDao", networkDaoMock); + Mockito.doCallRealMethod().when(consoleProxyManagerImplMock).expandPool(Mockito.anyLong(), Mockito.anyObject()); + Mockito.doCallRealMethod().when(consoleProxyManagerImplMock).getDefaultNetworkForCreation(Mockito.any(DataCenter.class)); + Mockito.doCallRealMethod().when(consoleProxyManagerImplMock).getDefaultNetworkForAdvancedZone(Mockito.any(DataCenter.class)); + Mockito.doCallRealMethod().when(consoleProxyManagerImplMock).getDefaultNetworkForBasicZone(Mockito.any(DataCenter.class)); } @Test @@ -78,15 +82,15 @@ public void testNewCPVMCreation() throws Exception { s_logger.info("Running test for new CPVM creation"); // No existing CPVM - Mockito.when(cpvmManager.assignProxyFromStoppedPool(Mockito.anyLong())).thenReturn(null); + Mockito.when(consoleProxyManagerImplMock.assignProxyFromStoppedPool(Mockito.anyLong())).thenReturn(null); // Allocate a new one - Mockito.when(globalLock.lock(Mockito.anyInt())).thenReturn(true); - Mockito.when(globalLock.unlock()).thenReturn(true); - Mockito.when(cpvmManager.startNew(Mockito.anyLong())).thenReturn(proxyVO); + Mockito.when(globalLockMock.lock(Mockito.anyInt())).thenReturn(true); + Mockito.when(globalLockMock.unlock()).thenReturn(true); + Mockito.when(consoleProxyManagerImplMock.startNew(Mockito.anyLong())).thenReturn(consoleProxyVOMock); // Start CPVM - Mockito.when(cpvmManager.startProxy(Mockito.anyLong(), Mockito.anyBoolean())).thenReturn(proxyVO); + Mockito.when(consoleProxyManagerImplMock.startProxy(Mockito.anyLong(), Mockito.anyBoolean())).thenReturn(consoleProxyVOMock); - cpvmManager.expandPool(new Long(1), new Object()); + consoleProxyManagerImplMock.expandPool(new Long(1), new Object()); } @Test @@ -94,11 +98,11 @@ public void testExistingCPVMStart() throws Exception { s_logger.info("Running test for existing CPVM start"); // CPVM already exists - Mockito.when(cpvmManager.assignProxyFromStoppedPool(Mockito.anyLong())).thenReturn(proxyVO); + Mockito.when(consoleProxyManagerImplMock.assignProxyFromStoppedPool(Mockito.anyLong())).thenReturn(consoleProxyVOMock); // Start CPVM - Mockito.when(cpvmManager.startProxy(Mockito.anyLong(), Mockito.anyBoolean())).thenReturn(proxyVO); + Mockito.when(consoleProxyManagerImplMock.startProxy(Mockito.anyLong(), Mockito.anyBoolean())).thenReturn(consoleProxyVOMock); - cpvmManager.expandPool(new Long(1), new Object()); + consoleProxyManagerImplMock.expandPool(new Long(1), new Object()); } @Test @@ -106,13 +110,13 @@ public void testExisingCPVMStartFailure() throws Exception { s_logger.info("Running test for existing CPVM start failure"); // CPVM already exists - Mockito.when(cpvmManager.assignProxyFromStoppedPool(Mockito.anyLong())).thenReturn(proxyVO); + Mockito.when(consoleProxyManagerImplMock.assignProxyFromStoppedPool(Mockito.anyLong())).thenReturn(consoleProxyVOMock); // Start CPVM - Mockito.when(cpvmManager.startProxy(Mockito.anyLong(), Mockito.anyBoolean())).thenReturn(null); + Mockito.when(consoleProxyManagerImplMock.startProxy(Mockito.anyLong(), Mockito.anyBoolean())).thenReturn(null); // Destroy existing CPVM, so that a new one is created subsequently - Mockito.when(cpvmManager.destroyProxy(Mockito.anyLong())).thenReturn(true); + Mockito.when(consoleProxyManagerImplMock.destroyProxy(Mockito.anyLong())).thenReturn(true); - cpvmManager.expandPool(new Long(1), new Object()); + consoleProxyManagerImplMock.expandPool(new Long(1), new Object()); } @Test @@ -121,20 +125,20 @@ public void getDefaultNetworkForAdvancedNonSG() { when(dc.getNetworkType()).thenReturn(NetworkType.Advanced); when(dc.isSecurityGroupEnabled()).thenReturn(false); - when(_dcDao.findById(Mockito.anyLong())).thenReturn(dc); + when(dataCenterDaoMock.findById(Mockito.anyLong())).thenReturn(dc); NetworkVO network = Mockito.mock(NetworkVO.class); NetworkVO badNetwork = Mockito.mock(NetworkVO.class); - when(_networkDao.listByZoneAndTrafficType(anyLong(), eq(TrafficType.Public))) + when(networkDaoMock.listByZoneAndTrafficType(anyLong(), eq(TrafficType.Public))) .thenReturn(Collections.singletonList(network)); - when(_networkDao.listByZoneAndTrafficType(anyLong(), not(eq(TrafficType.Public)))) + when(networkDaoMock.listByZoneAndTrafficType(anyLong(), not(eq(TrafficType.Public)))) .thenReturn(Collections.singletonList(badNetwork)); - when(_networkDao.listByZoneSecurityGroup(anyLong())) + when(networkDaoMock.listByZoneSecurityGroup(anyLong())) .thenReturn(Collections.singletonList(badNetwork)); - NetworkVO returnedNetwork = cpvmManager.getDefaultNetworkForAdvancedZone(dc); + NetworkVO returnedNetwork = consoleProxyManagerImplMock.getDefaultNetworkForAdvancedZone(dc); Assert.assertNotNull(returnedNetwork); Assert.assertEquals(network, returnedNetwork); @@ -147,17 +151,17 @@ public void getDefaultNetworkForAdvancedSG() { when(dc.getNetworkType()).thenReturn(NetworkType.Advanced); when(dc.isSecurityGroupEnabled()).thenReturn(true); - when(_dcDao.findById(Mockito.anyLong())).thenReturn(dc); + when(dataCenterDaoMock.findById(Mockito.anyLong())).thenReturn(dc); NetworkVO network = Mockito.mock(NetworkVO.class); NetworkVO badNetwork = Mockito.mock(NetworkVO.class); - when(_networkDao.listByZoneAndTrafficType(anyLong(), any(TrafficType.class))) + when(networkDaoMock.listByZoneAndTrafficType(anyLong(), any(TrafficType.class))) .thenReturn(Collections.singletonList(badNetwork)); - when(_networkDao.listByZoneSecurityGroup(anyLong())) + when(networkDaoMock.listByZoneSecurityGroup(anyLong())) .thenReturn(Collections.singletonList(network)); - NetworkVO returnedNetwork = cpvmManager.getDefaultNetworkForAdvancedZone(dc); + NetworkVO returnedNetwork = consoleProxyManagerImplMock.getDefaultNetworkForAdvancedZone(dc); Assert.assertEquals(network, returnedNetwork); Assert.assertNotEquals(badNetwork, returnedNetwork); @@ -169,17 +173,17 @@ public void getDefaultNetworkForBasicNonSG() { when(dc.getNetworkType()).thenReturn(NetworkType.Basic); when(dc.isSecurityGroupEnabled()).thenReturn(false); - when(_dcDao.findById(Mockito.anyLong())).thenReturn(dc); + when(dataCenterDaoMock.findById(Mockito.anyLong())).thenReturn(dc); NetworkVO network = Mockito.mock(NetworkVO.class); NetworkVO badNetwork = Mockito.mock(NetworkVO.class); - when(_networkDao.listByZoneAndTrafficType(anyLong(), eq(TrafficType.Guest))) + when(networkDaoMock.listByZoneAndTrafficType(anyLong(), eq(TrafficType.Guest))) .thenReturn(Collections.singletonList(network)); - when(_networkDao.listByZoneAndTrafficType(anyLong(), not(eq(TrafficType.Guest)))) + when(networkDaoMock.listByZoneAndTrafficType(anyLong(), not(eq(TrafficType.Guest)))) .thenReturn(Collections.singletonList(badNetwork)); - NetworkVO returnedNetwork = cpvmManager.getDefaultNetworkForBasicZone(dc); + NetworkVO returnedNetwork = consoleProxyManagerImplMock.getDefaultNetworkForBasicZone(dc); Assert.assertNotNull(returnedNetwork); Assert.assertEquals(network, returnedNetwork); Assert.assertNotEquals(badNetwork, returnedNetwork); @@ -191,17 +195,17 @@ public void getDefaultNetworkForBasicSG() { when(dc.getNetworkType()).thenReturn(NetworkType.Basic); when(dc.isSecurityGroupEnabled()).thenReturn(true); - when(_dcDao.findById(Mockito.anyLong())).thenReturn(dc); + when(dataCenterDaoMock.findById(Mockito.anyLong())).thenReturn(dc); NetworkVO network = Mockito.mock(NetworkVO.class); NetworkVO badNetwork = Mockito.mock(NetworkVO.class); - when(_networkDao.listByZoneAndTrafficType(anyLong(), eq(TrafficType.Guest))) + when(networkDaoMock.listByZoneAndTrafficType(anyLong(), eq(TrafficType.Guest))) .thenReturn(Collections.singletonList(network)); - when(_networkDao.listByZoneAndTrafficType(anyLong(), not(eq(TrafficType.Guest)))) + when(networkDaoMock.listByZoneAndTrafficType(anyLong(), not(eq(TrafficType.Guest)))) .thenReturn(Collections.singletonList(badNetwork)); - NetworkVO returnedNetwork = cpvmManager.getDefaultNetworkForBasicZone(dc); + NetworkVO returnedNetwork = consoleProxyManagerImplMock.getDefaultNetworkForBasicZone(dc); Assert.assertNotNull(returnedNetwork); Assert.assertEquals(network, returnedNetwork); @@ -215,17 +219,17 @@ public void getDefaultNetworkForBasicSGWrongZoneType() { when(dc.getNetworkType()).thenReturn(NetworkType.Advanced); when(dc.isSecurityGroupEnabled()).thenReturn(true); - when(_dcDao.findById(Mockito.anyLong())).thenReturn(dc); + when(dataCenterDaoMock.findById(Mockito.anyLong())).thenReturn(dc); NetworkVO network = Mockito.mock(NetworkVO.class); NetworkVO badNetwork = Mockito.mock(NetworkVO.class); - when(_networkDao.listByZoneAndTrafficType(anyLong(), eq(TrafficType.Guest))) + when(networkDaoMock.listByZoneAndTrafficType(anyLong(), eq(TrafficType.Guest))) .thenReturn(Collections.singletonList(network)); - when(_networkDao.listByZoneAndTrafficType(anyLong(), not(eq(TrafficType.Guest)))) + when(networkDaoMock.listByZoneAndTrafficType(anyLong(), not(eq(TrafficType.Guest)))) .thenReturn(Collections.singletonList(badNetwork)); - cpvmManager.getDefaultNetworkForBasicZone(dc); + consoleProxyManagerImplMock.getDefaultNetworkForBasicZone(dc); } @Test(expected=CloudRuntimeException.class) @@ -234,16 +238,41 @@ public void getDefaultNetworkForAdvancedWrongZoneType() { when(dc.getNetworkType()).thenReturn(NetworkType.Basic); when(dc.isSecurityGroupEnabled()).thenReturn(true); - when(_dcDao.findById(Mockito.anyLong())).thenReturn(dc); + when(dataCenterDaoMock.findById(Mockito.anyLong())).thenReturn(dc); NetworkVO network = Mockito.mock(NetworkVO.class); NetworkVO badNetwork = Mockito.mock(NetworkVO.class); - when(_networkDao.listByZoneAndTrafficType(anyLong(), any(TrafficType.class))) + when(networkDaoMock.listByZoneAndTrafficType(anyLong(), any(TrafficType.class))) .thenReturn(Collections.singletonList(badNetwork)); - when(_networkDao.listByZoneSecurityGroup(anyLong())) + when(networkDaoMock.listByZoneSecurityGroup(anyLong())) .thenReturn(Collections.singletonList(network)); - cpvmManager.getDefaultNetworkForAdvancedZone(dc); + consoleProxyManagerImplMock.getDefaultNetworkForAdvancedZone(dc); + } + + @Test + public void validateParseJsonToConsoleProxyStatusWithValidParamMustReturnValue() { + ConsoleProxyStatus expectedResult = new ConsoleProxyStatus(); + + GsonBuilder gb = new GsonBuilder(); + gb.setVersion(1.3); + Gson gson = gb.create(); + + ConsoleProxyStatus result = new ConsoleProxyManagerImpl().parseJsonToConsoleProxyStatus(gson.toJson(expectedResult)); + + Assert.assertArrayEquals(expectedResult.getConnections(), result.getConnections()); + } + + @Test (expected = JsonParseException.class) + public void validateParseJsonToConsoleProxyStatusWithInvalidParamMustThrowJsonParseException() { + new ConsoleProxyManagerImpl().parseJsonToConsoleProxyStatus("Invalid format to throw exception"); + } + + @Test + public void validateParseJsonToConsoleProxyStatusWithNullParamMustReturnNull() { + ConsoleProxyStatus expectedResult = null; + ConsoleProxyStatus result = new ConsoleProxyManagerImpl().parseJsonToConsoleProxyStatus(null); + Assert.assertEquals(expectedResult, result); } } diff --git a/server/src/test/java/com/cloud/resource/MockResourceManagerImpl.java b/server/src/test/java/com/cloud/resource/MockResourceManagerImpl.java index 4e1daa87c346..4d5b5ba584bf 100755 --- a/server/src/test/java/com/cloud/resource/MockResourceManagerImpl.java +++ b/server/src/test/java/com/cloud/resource/MockResourceManagerImpl.java @@ -33,6 +33,9 @@ import org.apache.cloudstack.api.command.admin.host.ReconnectHostCmd; import org.apache.cloudstack.api.command.admin.host.UpdateHostCmd; import org.apache.cloudstack.api.command.admin.host.UpdateHostPasswordCmd; +import org.apache.cloudstack.api.command.admin.host.CancelHostAsDegradedCmd; +import org.apache.cloudstack.api.command.admin.host.DeclareHostAsDegradedCmd; + import org.apache.cloudstack.framework.config.ConfigKey; import com.cloud.agent.api.StartupCommand; @@ -142,6 +145,16 @@ public Host maintain(final PrepareForMaintenanceCmd cmd) { return null; } + @Override + public Host declareHostAsDegraded(DeclareHostAsDegradedCmd cmd) { + return null; + } + + @Override + public Host cancelHostAsDegraded(final CancelHostAsDegradedCmd cmd) { + return null; + } + @Override public boolean updateClusterPassword(final UpdateHostPasswordCmd upasscmd) { // TODO Auto-generated method stub diff --git a/server/src/test/java/com/cloud/resource/ResourceManagerImplTest.java b/server/src/test/java/com/cloud/resource/ResourceManagerImplTest.java index 6faa83bc9107..abc03ad3d702 100644 --- a/server/src/test/java/com/cloud/resource/ResourceManagerImplTest.java +++ b/server/src/test/java/com/cloud/resource/ResourceManagerImplTest.java @@ -35,7 +35,11 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.UUID; +import com.cloud.exception.InvalidParameterValueException; +import org.apache.cloudstack.api.command.admin.host.CancelHostAsDegradedCmd; +import org.apache.cloudstack.api.command.admin.host.DeclareHostAsDegradedCmd; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.junit.Assert; import org.junit.Before; @@ -46,6 +50,7 @@ import org.mockito.Mock; import org.mockito.MockitoAnnotations; import org.mockito.Spy; +import org.mockito.Mockito; import org.powermock.api.mockito.PowerMockito; import org.powermock.core.classloader.annotations.PrepareForTest; import org.powermock.modules.junit4.PowerMockRunner; @@ -427,4 +432,95 @@ private void verifyNoChangeInMaintenance() throws NoTransitionException { verify(resourceManager, never()).resourceStateTransitTo(anyObject(), any(), anyLong()); Assert.assertFalse(enterMaintenanceMode); } + + @Test + public void declareHostAsDegradedTestDisconnected() throws NoTransitionException { + prepareAndTestDeclareHostAsDegraded(Status.Disconnected, ResourceState.Enabled, ResourceState.Degraded); + } + + @Test + public void declareHostAsDegradedTestAlert() throws NoTransitionException { + prepareAndTestDeclareHostAsDegraded(Status.Alert, ResourceState.Enabled, ResourceState.Degraded); + } + + @Test(expected = InvalidParameterValueException.class) + public void declareHostAsDegradedExpectNoTransitionException() throws NoTransitionException { + Status[] statusArray = Status.values(); + for (int i = 0; i < statusArray.length - 1; i++) { + if (statusArray[i] != Status.Alert && statusArray[i] != Status.Disconnected) { + prepareAndTestDeclareHostAsDegraded(statusArray[i], ResourceState.Enabled, ResourceState.Enabled); + } + } + } + + @Test(expected = NoTransitionException.class) + public void declareHostAsDegradedTestAlreadyDegraded() throws NoTransitionException { + prepareAndTestDeclareHostAsDegraded(Status.Alert, ResourceState.Degraded, ResourceState.Degraded); + } + + @Test(expected = NoTransitionException.class) + public void declareHostAsDegradedTestOnError() throws NoTransitionException { + prepareAndTestDeclareHostAsDegraded(Status.Alert, ResourceState.Error, ResourceState.Degraded); + } + + @Test(expected = NoTransitionException.class) + public void declareHostAsDegradedTestOnCreating() throws NoTransitionException { + prepareAndTestDeclareHostAsDegraded(Status.Alert, ResourceState.Creating, ResourceState.Degraded); + } + + @Test(expected = NoTransitionException.class) + public void declareHostAsDegradedTestOnErrorInMaintenance() throws NoTransitionException { + prepareAndTestDeclareHostAsDegraded(Status.Alert, ResourceState.ErrorInPrepareForMaintenance, ResourceState.Degraded); + } + + @Test + public void declareHostAsDegradedTestSupportedStates() throws NoTransitionException { + ResourceState[] states = ResourceState.values(); + for (int i = 0; i < states.length - 1; i++) { + if (states[i] == ResourceState.Enabled + || states[i] == ResourceState.Maintenance + || states[i] == ResourceState.Disabled) { + prepareAndTestDeclareHostAsDegraded(Status.Alert, states[i], ResourceState.Degraded); + } + } + } + + private void prepareAndTestDeclareHostAsDegraded(Status hostStatus, ResourceState originalState, ResourceState expectedResourceState) throws NoTransitionException { + DeclareHostAsDegradedCmd declareHostAsDegradedCmd = Mockito.spy(new DeclareHostAsDegradedCmd()); + HostVO hostVo = createDummyHost(hostStatus); + hostVo.setResourceState(originalState); + when(declareHostAsDegradedCmd.getId()).thenReturn(0l); + when(hostDao.findById(0l)).thenReturn(hostVo); + + Host result = resourceManager.declareHostAsDegraded(declareHostAsDegradedCmd); + + Assert.assertEquals(expectedResourceState, hostVo.getResourceState()); + } + + @Test + public void cancelHostAsDegradedTest() throws NoTransitionException { + prepareAndTestCancelHostAsDegraded(Status.Alert, ResourceState.Degraded, ResourceState.Enabled); + } + + @Test(expected = NoTransitionException.class) + public void cancelHostAsDegradedTestHostNotDegraded() throws NoTransitionException { + prepareAndTestCancelHostAsDegraded(Status.Alert, ResourceState.Enabled, ResourceState.Enabled); + } + + private void prepareAndTestCancelHostAsDegraded(Status hostStatus, ResourceState originalState, ResourceState expectedResourceState) throws NoTransitionException { + CancelHostAsDegradedCmd cancelHostAsDegradedCmd = Mockito.spy(new CancelHostAsDegradedCmd()); + HostVO hostVo = createDummyHost(hostStatus); + hostVo.setResourceState(originalState); + when(cancelHostAsDegradedCmd.getId()).thenReturn(0l); + when(hostDao.findById(0l)).thenReturn(hostVo); + + Host result = resourceManager.cancelHostAsDegraded(cancelHostAsDegradedCmd); + + Assert.assertEquals(expectedResourceState, hostVo.getResourceState()); + } + + private HostVO createDummyHost(Status hostStatus) { + return new HostVO(1L, "host01", Host.Type.Routing, "192.168.1.1", "255.255.255.0", null, null, null, null, null, null, null, null, null, null, UUID.randomUUID().toString(), + hostStatus, "1.0", null, null, 1L, null, 0, 0, null, 0, null); + } } diff --git a/server/src/test/java/com/cloud/user/MockAccountManagerImpl.java b/server/src/test/java/com/cloud/user/MockAccountManagerImpl.java index ea6287d2bcc1..7916007c4065 100644 --- a/server/src/test/java/com/cloud/user/MockAccountManagerImpl.java +++ b/server/src/test/java/com/cloud/user/MockAccountManagerImpl.java @@ -425,6 +425,11 @@ public Map getKeys(GetUserKeysCmd cmd) { return null; } + @Override + public Map getKeys(Long userId) { + return null; + } + @Override public void checkAccess(User user, ControlledEntity entity) throws PermissionDeniedException { diff --git a/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java b/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java index ae647f349df7..038768c5b9c1 100644 --- a/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java +++ b/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java @@ -32,13 +32,20 @@ import java.util.List; import java.util.Map; +import com.cloud.exception.InsufficientAddressCapacityException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; import com.cloud.hypervisor.Hypervisor; import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.DiskOfferingDao; import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.utils.exception.CloudRuntimeException; import org.apache.cloudstack.api.BaseCmd.HTTPMethod; +import org.apache.cloudstack.api.command.user.vm.CloneVMCmd; import org.apache.cloudstack.api.command.user.vm.UpdateVMCmd; import org.apache.cloudstack.api.command.user.volume.ResizeVolumeCmd; import org.apache.cloudstack.context.CallContext; @@ -57,10 +64,6 @@ import com.cloud.dc.DataCenterVO; import com.cloud.dc.dao.DataCenterDao; -import com.cloud.exception.InsufficientAddressCapacityException; -import com.cloud.exception.InsufficientCapacityException; -import com.cloud.exception.InvalidParameterValueException; -import com.cloud.exception.ResourceUnavailableException; import com.cloud.network.NetworkModel; import com.cloud.network.dao.NetworkDao; import com.cloud.network.dao.NetworkVO; @@ -145,6 +148,9 @@ public class UserVmManagerImplTest { @Mock private VMTemplateDao templateDao; + @Mock + private CloneVMCmd cloneVMCommand; + private long vmId = 1l; private static final long GiB_TO_BYTES = 1024 * 1024 * 1024; @@ -571,4 +577,18 @@ public void validateRemoveEncryptedPasswordFromUserVmVoDetails(){ Mockito.verify(userVmVoMock, Mockito.times(1)).setDetails(detailsMock); Mockito.verify(userVmDao, Mockito.times(1)).saveDetails(userVmVoMock); } + + @Test + public void validateCloneCondition() { + Mockito.when(cloneVMCommand.getTargetVM()).thenReturn(null); + Mockito.when(cloneVMCommand.getAccountName()).thenReturn(null); + Mockito.when(cloneVMCommand.getDomainId()).thenReturn(null); + Exception err = null; + try { + userVmManagerImpl.validateCloneCondition(cloneVMCommand); + } catch (CloudRuntimeException | ResourceUnavailableException | ResourceAllocationException e) { + err = e; + } + assertTrue(err instanceof CloudRuntimeException); + } } diff --git a/systemvm/agent/noVNC/app/images/shift.png b/systemvm/agent/noVNC/app/images/shift.png new file mode 100644 index 000000000000..b79d6973c8b7 Binary files /dev/null and b/systemvm/agent/noVNC/app/images/shift.png differ diff --git a/systemvm/agent/noVNC/app/ui.js b/systemvm/agent/noVNC/app/ui.js index 9158c33f317a..1c6a00799c3f 100644 --- a/systemvm/agent/noVNC/app/ui.js +++ b/systemvm/agent/noVNC/app/ui.js @@ -282,6 +282,8 @@ const UI = { .addEventListener('click', UI.toggleExtraKeys); document.getElementById("noVNC_toggle_ctrl_button") .addEventListener('click', UI.toggleCtrl); + document.getElementById("noVNC_toggle_shift_button") + .addEventListener('click', UI.toggleShift); document.getElementById("noVNC_toggle_windows_button") .addEventListener('click', UI.toggleWindows); document.getElementById("noVNC_toggle_alt_button") @@ -1551,6 +1553,17 @@ const UI = { } }, + toggleShift() { + const btn = document.getElementById('noVNC_toggle_shift_button'); + if (btn.classList.contains("noVNC_selected")) { + UI.sendKey(KeyTable.XK_Shift_L, "ShiftLeft", false); + btn.classList.remove("noVNC_selected"); + } else { + UI.sendKey(KeyTable.XK_Shift_L, "ShiftLeft", true); + btn.classList.add("noVNC_selected"); + } + }, + toggleWindows() { const btn = document.getElementById('noVNC_toggle_windows_button'); if (btn.classList.contains("noVNC_selected")) { diff --git a/systemvm/agent/noVNC/vnc.html b/systemvm/agent/noVNC/vnc.html index 04c00291abbb..6f1b7998fe44 100644 --- a/systemvm/agent/noVNC/vnc.html +++ b/systemvm/agent/noVNC/vnc.html @@ -107,6 +107,9 @@

no
+ diff --git a/systemvm/debian/opt/cloud/bin/cs/CsRedundant.py b/systemvm/debian/opt/cloud/bin/cs/CsRedundant.py index 23622fdbf5d0..e0a7bfdf6329 100755 --- a/systemvm/debian/opt/cloud/bin/cs/CsRedundant.py +++ b/systemvm/debian/opt/cloud/bin/cs/CsRedundant.py @@ -279,8 +279,7 @@ def set_backup(self): CsHelper.execute(cmd2) dev = interface.get_device() - cmd = "%s -C %s" % (self.CONNTRACKD_BIN, self.CONNTRACKD_CONF) - CsHelper.execute("%s -d" % cmd) + CsHelper.service("conntrackd", "restart") CsHelper.service("ipsec", "stop") CsHelper.service("xl2tpd", "stop") diff --git a/test/integration/component/test_rootvolume_resize.py b/test/integration/component/test_rootvolume_resize.py index f5bd47d11465..06b827862302 100644 --- a/test/integration/component/test_rootvolume_resize.py +++ b/test/integration/component/test_rootvolume_resize.py @@ -228,16 +228,14 @@ def chk_volume_resize(self, apiclient, vm): listall='True' ) rootvolume = list_volume_response[0] - if vm.state == "Running" and \ - (vm.hypervisor.lower() == "xenserver" or \ - vm.hypervisor.lower() == "vmware"): + if vm.state == "Running" and vm.hypervisor.lower() == "xenserver": self.virtual_machine.stop(apiclient) time.sleep(self.services["sleep"]) - if vm.hypervisor.lower() == "vmware": - rootdiskcontroller = self.getDiskController(vm) - if rootdiskcontroller!="scsi": - raise Exception("root volume resize only supported on scsi disk ," - "please check rootdiskcontroller type") + if vm.hypervisor.lower() == "vmware": + rootdiskcontroller = self.getDiskController(vm) + if rootdiskcontroller!="scsi": + raise Exception("root volume resize only supported on scsi disk ," + "please check rootdiskcontroller type") rootvolobj = Volume(rootvolume.__dict__) newsize = (rootvolume.size >> 30) + 2 @@ -245,8 +243,7 @@ def chk_volume_resize(self, apiclient, vm): if rootvolume is not None: try: rootvolobj.resize(apiclient, size=newsize) - if vm.hypervisor.lower() == "xenserver" or \ - vm.hypervisor.lower() == "vmware": + if vm.hypervisor.lower() == "xenserver": self.virtual_machine.start(apiclient) time.sleep(self.services["sleep"]) ssh = SshClient(self.virtual_machine.ssh_ip, 22, @@ -916,9 +913,7 @@ def test_6_resized_rootvolume_with_lessvalue(self): ) res = validateList(list_volume_response) self.assertNotEqual(res[2], INVALID_INPUT, "listVolumes returned invalid object in response") - if vm.state == "Running" and ( - vm.hypervisor.lower() == "xenserver" or - vm.hypervisor.lower() == "vmware"): + if vm.state == "Running" and vm.hypervisor.lower() == "xenserver": self.virtual_machine.stop(self.apiclient) time.sleep(self.services["sleep"]) @@ -998,9 +993,7 @@ def test_7_usage_events_after_rootvolume_resized_(self): ) res = validateList(list_volume_response) self.assertNotEqual(res[2], INVALID_INPUT, "listVolumes returned invalid object in response") - if vm.state == "Running" and ( - vm.hypervisor.lower() == "xenserver" or - vm.hypervisor.lower() == "vmware"): + if vm.state == "Running" and vm.hypervisor.lower() == "xenserver": self.virtual_machine.stop(self.apiclient) time.sleep(self.services["sleep"]) rootvolume = list_volume_response[0] @@ -1115,9 +1108,7 @@ def test_08_increase_volume_size_within_account_limit(self): ) res = validateList(list_volume_response) self.assertNotEqual(res[2], INVALID_INPUT, "listVolumes returned invalid object in response") - if vm.state == "Running" and \ - (vm.hypervisor.lower() == "xenserver" or - vm.hypervisor.lower() == "vmware"): + if vm.state == "Running" and vm.hypervisor.lower() == "xenserver": self.virtual_machine.stop(self.apiclient) time.sleep(self.services["sleep"]) rootvolume = list_volume_response[0] diff --git a/test/integration/smoke/test_disk_offerings.py b/test/integration/smoke/test_disk_offerings.py index d0d3433e96c1..660dd30024d7 100644 --- a/test/integration/smoke/test_disk_offerings.py +++ b/test/integration/smoke/test_disk_offerings.py @@ -19,7 +19,6 @@ #Import Local Modules import marvin from marvin.cloudstackTestCase import * -from marvin.cloudstackAPI import * from marvin.lib.utils import * from marvin.lib.base import * from marvin.lib.common import * @@ -134,7 +133,7 @@ def test_02_create_sparse_type_disk_offering(self): @attr(hypervisor="kvm") @attr(tags = ["advanced", "basic", "eip", "sg", "advancedns", "simulator", "smoke"]) def test_04_create_fat_type_disk_offering(self): - """Test to create a sparse type disk offering""" + """Test to create a sparse type disk offering""" # Validate the following: # 1. createDiskOfferings should return valid info for new offering diff --git a/test/integration/smoke/test_disk_provisioning_types.py b/test/integration/smoke/test_disk_provisioning_types.py new file mode 100644 index 000000000000..c87b2e4969cd --- /dev/null +++ b/test/integration/smoke/test_disk_provisioning_types.py @@ -0,0 +1,149 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from marvin.cloudstackTestCase import cloudstackTestCase, unittest +from marvin.lib.utils import cleanup_resources +from marvin.lib.base import DiskOffering, Iso, Account, VirtualMachine, ServiceOffering, Volume +from marvin.codes import FAILED +from marvin.lib.common import list_disk_offering, get_zone, get_suitable_test_template, get_domain +from marvin.cloudstackAPI import listStoragePools, updateStorageCapabilities +from nose.plugins.attrib import attr + + +class TestDiskProvisioningTypes(cloudstackTestCase): + + def setUp(self): + + if self.testClient.getHypervisorInfo().lower() != "vmware": + raise unittest.SkipTest("VMWare tests only valid on VMWare hypervisor") + + self.services = self.testClient.getParsedTestDataConfig() + self.apiclient = self.testClient.getApiClient() + self.dbclient = self.testClient.getDbConnection() + self.zone = get_zone(self.apiclient, self.testClient.getZoneForTests()) + self.domain = get_domain(self.apiclient) + self.services['mode'] = self.zone.networktype + self.hypervisor = self.hypervisor = self.testClient.getHypervisorInfo() + + template = get_suitable_test_template( + self.apiclient, + self.zone.id, + self.services["ostype"], + self.hypervisor + ) + + if template == FAILED: + assert False, "get_suitable_test_template() failed to return template with description %s" % self.services["ostype"] + + self.account = Account.create( + self.apiclient, + self.services["account"], + domainid=self.domain.id + ) + + self.services["small"]["zoneid"] = self.zone.id + self.services["small"]["template"] = template.id + + self.services["iso1"]["zoneid"] = self.zone.id + + iso = Iso.create( + self.apiclient, + self.services["iso1"], + account=self.account.name, + domainid=self.account.domainid + ) + + self.cleanup = [ + self.account + ] + + + def tearDown(self): + cleanup_resources(self.apiclient, self.cleanup) + + @attr(tags=["advanced", "basic", "eip", "sg", "advancedns", "smoke"], required_hardware="false") + def test_01_vm_with_thin_disk_offering(self): + self.runner("thin") + + @attr(tags=["advanced", "basic", "eip", "sg", "advancedns", "smoke"], required_hardware="false") + def test_02_vm_with_fat_disk_offering(self): + self.runner("fat") + + @attr(tags=["advanced", "basic", "eip", "sg", "advancedns", "smoke"], required_hardware="false") + def test_03_vm_with_sparse_disk_offering(self): + self.runner("sparse") + + @attr(tags=["advanced", "basic", "eip", "sg", "advancedns", "smoke"], required_hardware="false") + def test_05_update_cmd(self): + cmd = listStoragePools.listStoragePoolsCmd() + storagePools = self.apiclient.listStoragePools(cmd) + + for pool in storagePools: + if pool.type == 'NetworkFilesystem': + cmd = updateStorageCapabilities.updateStorageCapabilitiesCmd() + cmd.id = pool.id + response = self.apiclient.updateStorageCapabilities(cmd) + acceleration = getattr(response[0].storagecapabilities, "HARDWARE_ACCELERATION") + self.assertNotEqual( + acceleration, + None, + "Check Updated storage pool capabilities" + ) + + def runner(self, provisioning_type): + self.services["disk_offering"]['provisioningtype'] = provisioning_type + self.services["small"]['size'] = "1" + disk_offering = DiskOffering.create( + self.apiclient, + self.services["disk_offering"], + custom=True, + ) + self.cleanup.append(disk_offering) + + self.debug("Created Disk offering with ID: %s" % disk_offering.id) + + self.services["service_offerings"]["small"]["provisioningtype"] = provisioning_type + small_offering = ServiceOffering.create( + self.apiclient, + self.services["service_offerings"]["small"] + ) + + self.cleanup.append(small_offering) + + self.debug("Created service offering with ID: %s" % small_offering.id) + + virtual_machine = VirtualMachine.create( + self.apiclient, + self.services["small"], + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=small_offering.id, + diskofferingid=disk_offering.id, + mode=self.services["mode"] + ) + + self.debug("Created virtual machine with ID: %s" % virtual_machine.id) + + volumes = Volume.list(self.apiclient, virtualMachineId=virtual_machine.id, listAll='true') + + for volume in volumes: + if volume["type"] == "DATADISK": + VirtualMachine.detach_volume(virtual_machine, self.apiclient, volume) + currentVolume = Volume({}) + currentVolume.id = volume.id + Volume.resize(currentVolume, self.apiclient, size='2') + VirtualMachine.attach_volume(virtual_machine, self.apiclient, volume) diff --git a/test/integration/smoke/test_kubernetes_clusters.py b/test/integration/smoke/test_kubernetes_clusters.py index d78f3a0afb3b..d4be6b8d3c67 100644 --- a/test/integration/smoke/test_kubernetes_clusters.py +++ b/test/integration/smoke/test_kubernetes_clusters.py @@ -75,8 +75,12 @@ def setUpClass(cls): cls.kubernetes_version_ids = [] if cls.hypervisorNotSupported == False: - cls.initial_configuration_cks_enabled = Configurations.list(cls.apiclient, - name="cloud.kubernetes.service.enabled")[0].value + cls.endpoint_url = Configurations.list(cls.apiclient, name="endpointe.url")[0].value + if "localhost" in cls.endpoint_url: + endpoint_url = "http://%s:%d/client/api " %(cls.mgtSvrDetails["mgtSvrIp"], cls.mgtSvrDetails["port"]) + cls.debug("Setting endpointe.url to %s" %(endpoint_url)) + Configurations.update(cls.apiclient, "endpointe.url", endpoint_url) + cls.initial_configuration_cks_enabled = Configurations.list(cls.apiclient, name="cloud.kubernetes.service.enabled")[0].value if cls.initial_configuration_cks_enabled not in ["true", True]: cls.debug("Enabling CloudStack Kubernetes Service plugin and restarting management server") Configurations.update(cls.apiclient, diff --git a/test/integration/smoke/test_service_offerings.py b/test/integration/smoke/test_service_offerings.py index 8a7682ea462d..3a942a10b62c 100644 --- a/test/integration/smoke/test_service_offerings.py +++ b/test/integration/smoke/test_service_offerings.py @@ -414,6 +414,8 @@ def test_02_edit_service_offering(self): # Generate new name & displaytext from random data random_displaytext = random_gen() random_name = random_gen() + random_tag = random_gen() + random_hosttag = random_gen() self.debug("Updating service offering with ID: %s" % self.service_offering_1.id) @@ -423,6 +425,8 @@ def test_02_edit_service_offering(self): cmd.id = self.service_offering_1.id cmd.displaytext = random_displaytext cmd.name = random_name + cmd.storagetags = random_tag + cmd.hosttags = random_hosttag self.apiclient.updateServiceOffering(cmd) list_service_response = list_service_offering( @@ -452,6 +456,17 @@ def test_02_edit_service_offering(self): "Check server name in updateServiceOffering" ) + self.assertEqual( + list_service_response[0].storagetags, + random_tag, + "Check storage tags in updateServiceOffering" + ) + + self.assertEqual( + list_service_response[0].hosttags, + random_hosttag, + "Check host tags in updateServiceOffering" + ) return @attr( diff --git a/test/integration/smoke/test_vm_life_cycle.py b/test/integration/smoke/test_vm_life_cycle.py index 61b3a22a6c8e..896985dcefae 100644 --- a/test/integration/smoke/test_vm_life_cycle.py +++ b/test/integration/smoke/test_vm_life_cycle.py @@ -876,7 +876,6 @@ def test_11_destroy_vm_and_volumes(self): self.assertEqual(Volume.list(self.apiclient, id=vol1.id), None, "List response contains records when it should not") - class TestSecuredVmMigration(cloudstackTestCase): @classmethod @@ -1226,7 +1225,7 @@ def tearDown(self): def get_target_host(self, virtualmachineid): target_hosts = Host.listForMigration(self.apiclient, - virtualmachineid=virtualmachineid)[0] + virtualmachineid=virtualmachineid) if len(target_hosts) < 1: self.skipTest("No target hosts found") @@ -1252,7 +1251,8 @@ def deploy_vm(self): serviceofferingid=self.small_offering.id, mode=self.services["mode"]) - def migrate_vm_with_pools(self, target_pool, id): + def migrate_vm_to_pool(self, target_pool, id): + cmd = migrateVirtualMachine.migrateVirtualMachineCmd() cmd.storageid = target_pool.id @@ -1273,17 +1273,17 @@ def create_volume(self): ) """ - BVT for Vmware Offline VM and Volume Migration + BVT for Vmware Offline and Live VM and Volume Migration """ @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg", "security"], required_hardware="false") - def test_01_migrate_VM_and_root_volume(self): + def test_01_offline_migrate_VM_and_root_volume(self): """Test VM will be migrated with it's root volume""" # Validate the following # 1. Deploys a VM - # 2. Finds suitable host for migration + # 2. Stops the VM # 3. Finds suitable storage pool for root volume - # 4. Migrate the VM to new host and storage pool and assert migration successful + # 4. Migrate the VM to new storage pool and assert migration successful vm = self.deploy_vm() @@ -1293,19 +1293,19 @@ def test_01_migrate_VM_and_root_volume(self): vm.stop(self.apiclient) - self.migrate_vm_with_pools(target_pool, vm.id) + self.migrate_vm_to_pool(target_pool, vm.id) root_volume = self.get_vm_volumes(vm.id)[0] self.assertEqual(root_volume.storageid, target_pool.id, "Pool ID was not as expected") @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg", "security"], required_hardware="false") - def test_02_migrate_VM_with_two_data_disks(self): + def test_02_offline_migrate_VM_with_two_data_disks(self): """Test VM will be migrated with it's root volume""" # Validate the following # 1. Deploys a VM and attaches 2 data disks - # 2. Finds suitable host for migration + # 2. Stops the VM # 3. Finds suitable storage pool for volumes - # 4. Migrate the VM to new host and storage pool and assert migration successful + # 4. Migrate the VM to new storage pool and assert migration successful vm = self.deploy_vm() @@ -1321,7 +1321,7 @@ def test_02_migrate_VM_with_two_data_disks(self): vm.stop(self.apiclient) - self.migrate_vm_with_pools(target_pool, vm.id) + self.migrate_vm_to_pool(target_pool, vm.id) volume1 = Volume.list(self.apiclient, id=volume1.id)[0] volume2 = Volume.list(self.apiclient, id=volume2.id)[0] @@ -1332,7 +1332,54 @@ def test_02_migrate_VM_with_two_data_disks(self): self.assertEqual(volume2.storageid, target_pool.id, "Pool ID was not as expected") @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg", "security"], required_hardware="false") - def test_03_migrate_detached_volume(self): + def test_03_live_migrate_VM_with_two_data_disks(self): + """Test VM will be migrated with it's root volume""" + # Validate the following + # 1. Deploys a VM and attaches 2 data disks + # 2. Finds suitable host for migration + # 3. Finds suitable storage pool for volumes + # 4. Migrate the VM to new host and storage pool and assert migration successful + + vm = self.deploy_vm() + + root_volume = self.get_vm_volumes(vm.id)[0] + volume1 = self.create_volume() + volume2 = self.create_volume() + vm.attach_volume(self.apiclient, volume1) + vm.attach_volume(self.apiclient, volume2) + + target_host = self.get_target_host(vm.id) + target_pool = self.get_target_pool(root_volume.id) + volume1.target_pool = self.get_target_pool(volume1.id) + volume2.target_pool = self.get_target_pool(volume2.id) + + cmd = migrateVirtualMachineWithVolume.migrateVirtualMachineWithVolumeCmd() + cmd.migrateto = [{"volume": str(root_volume.id), "pool": str(target_pool.id)}, + {"volume": str(volume1.id), "pool": str(volume1.target_pool.id)}, + {"volume": str(volume2.id), "pool": str(volume2.target_pool.id)}] + cmd.virtualmachineid = vm.id + cmd.hostid = target_host.id + + response = self.apiclient.migrateVirtualMachineWithVolume(cmd) + + self.assertEqual(Volume.list(self.apiclient, id=root_volume.id)[0].storageid, + target_pool.id, + "Pool ID not as expected") + + self.assertEqual(Volume.list(self.apiclient, id=volume1.id)[0].storageid, + volume1.target_pool.id, + "Pool ID not as expected") + + self.assertEqual(Volume.list(self.apiclient, id=volume2.id)[0].storageid, + volume2.target_pool.id, + "Pool ID not as expected") + + self.assertEqual(response.hostid, + target_host.id, + "HostID not as expected") + + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg", "security"], required_hardware="false") + def test_04_migrate_detached_volume(self): """Test VM will be migrated with it's root volume""" # Validate the following # 1. Deploys a VM and attaches 1 data disk @@ -1943,3 +1990,139 @@ def test_01_vapps_vm_cycle(self): cmd = destroyVirtualMachine.destroyVirtualMachineCmd() cmd.id = vm.id self.apiclient.destroyVirtualMachine(cmd) + +class TestCloneVM(cloudstackTestCase): + + @classmethod + def setUpClass(cls): + testClient = super(TestCloneVM, cls).getClsTestClient() + cls.apiclient = testClient.getApiClient() + cls.services = testClient.getParsedTestDataConfig() + cls.hypervisor = testClient.getHypervisorInfo() + + # Get Zone, Domain and templates + domain = get_domain(cls.apiclient) + cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) + cls.services['mode'] = cls.zone.networktype + + # if local storage is enabled, alter the offerings to use localstorage + # this step is needed for devcloud + if cls.zone.localstorageenabled == True: + cls.services["service_offerings"]["tiny"]["storagetype"] = 'local' + cls.services["service_offerings"]["small"]["storagetype"] = 'local' + cls.services["service_offerings"]["medium"]["storagetype"] = 'local' + + template = get_suitable_test_template( + cls.apiclient, + cls.zone.id, + cls.services["ostype"], + cls.hypervisor + ) + if template == FAILED: + assert False, "get_suitable_test_template() failed to return template with description %s" % cls.services["ostype"] + + # Set Zones and disk offerings + cls.services["small"]["zoneid"] = cls.zone.id + cls.services["small"]["template"] = template.id + + cls.services["iso1"]["zoneid"] = cls.zone.id + + # Create VMs, NAT Rules etc + cls.account = Account.create( + cls.apiclient, + cls.services["account"], + domainid=domain.id + ) + cls._cleanup = [] + cls._cleanup.append(cls.account) + cls.small_offering = ServiceOffering.create( + cls.apiclient, + cls.services["service_offerings"]["small"] + ) + cls._cleanup.append(cls.small_offering) + + cls.medium_offering = ServiceOffering.create( + cls.apiclient, + cls.services["service_offerings"]["medium"] + ) + cls._cleanup.append(cls.medium_offering) + # create small and large virtual machines + cls.small_virtual_machine = VirtualMachine.create( + cls.apiclient, + cls.services["small"], + accountid=cls.account.name, + domainid=cls.account.domainid, + serviceofferingid=cls.small_offering.id, + mode=cls.services["mode"] + ) + cls._cleanup.append(cls.small_virtual_machine) + cls.medium_virtual_machine = VirtualMachine.create( + cls.apiclient, + cls.services["small"], + accountid=cls.account.name, + domainid=cls.account.domainid, + serviceofferingid=cls.medium_offering.id, + mode=cls.services["mode"] + ) + cls._cleanup.append(cls.medium_virtual_machine) + cls.virtual_machine = VirtualMachine.create( + cls.apiclient, + cls.services["small"], + accountid=cls.account.name, + domainid=cls.account.domainid, + serviceofferingid=cls.small_offering.id, + mode=cls.services["mode"] + ) + cls._cleanup.append(cls.virtual_machine) + + @classmethod + def tearDownClass(cls): + super(TestCloneVM, cls).tearDownClass() + + def setUp(self): + self.apiclient = self.testClient.getApiClient() + self.dbclient = self.testClient.getDbConnection() + self.cleanup = [] + + def tearDown(self): + try: + cleanup_resources(self.apiclient, self.cleanup) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + + @attr(tags = ["clone","devcloud", "advanced", "smoke", "basic", "sg"], required_hardware="false") + def test_clone_vm_and_volumes(self): + small_disk_offering = DiskOffering.list(self.apiclient, name='Small')[0]; + config = Configurations.list(self.apiclient, + name="kvm.snapshot.enabled" + ) + if config is None: + self.skipTest("Please enable kvm.snapshot.enable global config") + if len(config) == 0 or config[0].value != "true": + self.skipTest("Please enable kvm.snapshot.enable global config") + if self.hypervisor.lower() in ["kvm", "simulator"]: + small_virtual_machine = VirtualMachine.create( + self.apiclient, + self.services["small"], + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.small_offering.id,) + self.cleanup.append(small_virtual_machine) + vol1 = Volume.create( + self.apiclient, + self.services, + account=self.account.name, + diskofferingid=small_disk_offering.id, + domainid=self.account.domainid, + zoneid=self.zone.id + ) + self.cleanup.append(vol1) + small_virtual_machine.attach_volume(self.apiclient, vol1) + self.debug("Clone VM - ID: %s" % small_virtual_machine.id) + try: + clone_response = small_virtual_machine.clone(self.apiclient, small_virtual_machine) + self.cleanup.append(clone_response) + except Exception as e: + self.debug("Clone --" + str(e)) + raise e + self.assertTrue(VirtualMachine.list(self.apiclient, id=clone_response.id) is not None, "vm id should be populated") \ No newline at end of file diff --git a/test/integration/smoke/test_volumes.py b/test/integration/smoke/test_volumes.py index e1d419ff150b..130828596821 100644 --- a/test/integration/smoke/test_volumes.py +++ b/test/integration/smoke/test_volumes.py @@ -627,8 +627,8 @@ def test_07_resize_fail(self): if hosts[0].hypervisor == "XenServer": self.virtual_machine.stop(self.apiClient) - elif hosts[0].hypervisor.lower() in ("vmware", "hyperv"): - self.skipTest("Resize Volume is unsupported on VmWare and Hyper-V") + elif hosts[0].hypervisor.lower() == "hyperv": + self.skipTest("Resize Volume is unsupported on Hyper-V") # Attempting to resize it should throw an exception, as we're using a non # customisable disk offering, therefore our size parameter should be ignored @@ -659,8 +659,8 @@ def test_08_resize_volume(self): if hosts[0].hypervisor == "XenServer": self.virtual_machine.stop(self.apiClient) - elif hosts[0].hypervisor.lower() in ("vmware", "hyperv"): - self.skipTest("Resize Volume is unsupported on VmWare and Hyper-V") + elif hosts[0].hypervisor.lower() == "hyperv": + self.skipTest("Resize Volume is unsupported on Hyper-V") # resize the data disk self.debug("Resize Volume ID: %s" % self.volume.id) diff --git a/tools/apidoc/gen_toc.py b/tools/apidoc/gen_toc.py index 9107b7cba84b..6d7841edb4b4 100644 --- a/tools/apidoc/gen_toc.py +++ b/tools/apidoc/gen_toc.py @@ -95,7 +95,7 @@ 'StorageMaintenance': 'Storage Pool', 'StoragePool': 'Storage Pool', 'StorageProvider': 'Storage Pool', - 'syncStoragePool': 'Storage Pool', + 'updateStorageCapabilities' : 'Storage Pool', 'SecurityGroup': 'Security Group', 'SSH': 'SSH', 'register': 'Registration', diff --git a/tools/marvin/marvin/lib/base.py b/tools/marvin/marvin/lib/base.py index 916af64d96cc..68153b55f243 100755 --- a/tools/marvin/marvin/lib/base.py +++ b/tools/marvin/marvin/lib/base.py @@ -635,6 +635,9 @@ def create(cls, apiclient, services, templateid=None, accountid=None, if rootdiskcontroller: cmd.details[0]["rootDiskController"] = rootdiskcontroller + if "size" in services: + cmd.size = services["size"] + if group: cmd.group = group @@ -744,6 +747,21 @@ def reboot(self, apiclient, forced=None): if response[0] == FAIL: raise Exception(response[1]) + def clone(self, apiclient, vm): + """"Clone the instance""" + cmd = cloneVirtualMachine.cloneVirtualMachineCmd() + cmd.virtualmachineid = vm.id + if vm.id is None: + cmd.virtualmachineid = self.id + response = apiclient.cloneVirtualMachine(cmd) + temp = self.id + self.id = response.id + state = self.getState(apiclient, VirtualMachine.RUNNING) + self.id = temp + if (state[0] == FAIL): + raise Exception(state[1]) + return response + def recover(self, apiclient): """Recover the instance""" cmd = recoverVirtualMachine.recoverVirtualMachineCmd() @@ -2296,6 +2314,9 @@ def create(cls, apiclient, services, tags=None, domainid=None, cacheMode=None, * if "offerha" in services: cmd.offerha = services["offerha"] + if "provisioningtype" in services: + cmd.provisioningtype = services["provisioningtype"] + if "dynamicscalingenabled" in services: cmd.dynamicscalingenabled = services["dynamicscalingenabled"] diff --git a/ui/public/config.json b/ui/public/config.json index 4c9be76b13f8..6c134087101b 100644 --- a/ui/public/config.json +++ b/ui/public/config.json @@ -1,5 +1,12 @@ { "apiBase": "/client/api", + "servers": [ + { + "name": "Local-Server", + "apiHost": "", + "apiBase": "/client/api" + } + ], "docBase": "http://docs.cloudstack.apache.org/en/latest", "appTitle": "CloudStack", "footer": "Licensed under the Apache License, Version 2.0.", @@ -48,5 +55,6 @@ }, "plugins": [], "basicZoneEnabled": true, + "multipleServer": false, "docHelpMappings": {} } diff --git a/ui/public/locales/en.json b/ui/public/locales/en.json index 3ff927b578ab..dad188df7a58 100644 --- a/ui/public/locales/en.json +++ b/ui/public/locales/en.json @@ -24,8 +24,8 @@ "error.release.dedicate.pod": "Failed to release dedicated pod", "error.release.dedicate.zone": "Failed to release dedicated zone", "error.session.expired": "Your session has expired.", -"error.unable.to.reach.management.server": "Unable to reach Management Server", "error.unable.to.proceed": "Unable to proceed. Please contact your administrator", +"error.unable.to.reach.management.server": "Unable to reach Management Server", "error.unresolved.internet.name": "Your internet name cannot be resolved.", "firewall.close": "Firewall", "force.delete.domain.warning": "Warning: Choosing this option will cause the deletion of all child domains and all associated accounts and their resources.", @@ -71,6 +71,13 @@ "label.action.attach.disk.processing": "Attaching Disk....", "label.action.attach.iso": "Attach ISO", "label.action.attach.iso.processing": "Attaching ISO....", +"label.action.bulk.delete.egress.firewall.rules": "Bulk delete egress firewall rules", +"label.action.bulk.delete.firewall.rules": "Bulk delete firewall rules", +"label.action.bulk.delete.isos": "Bulk delete ISOs", +"label.action.bulk.delete.load.balancer.rules": "Bulk delete load balancer rules", +"label.action.bulk.delete.portforward.rules": "Bulk delete Port Forward rules", +"label.action.bulk.delete.templates": "Bulk delete templates", +"label.action.bulk.release.public.ip.address": "Bulk release Public IP Addresses", "label.action.cancel.maintenance.mode": "Cancel Maintenance Mode", "label.action.cancel.maintenance.mode.processing": "Cancelling Maintenance Mode....", "label.action.change.password": "Change Password", @@ -82,10 +89,11 @@ "label.action.copy.iso.processing": "Copying ISO....", "label.action.copy.template": "Copy Template", "label.action.copy.template.processing": "Copying Template....", + "label.action.clone.vm": "Clone VM", +"label.action.create.snapshot.from.vmsnapshot": "Create Snapshot from VM Snapshot", "label.action.create.template.from.vm": "Create Template from VM", "label.action.create.template.from.volume": "Create Template from Volume", "label.action.create.template.processing": "Creating Template....", -"label.action.create.snapshot.from.vmsnapshot": "Create Snapshot from VM Snapshot", "label.action.create.vm": "Create VM", "label.action.create.vm.processing": "Creating VM....", "label.action.create.volume": "Create Volume", @@ -99,6 +107,7 @@ "label.action.delete.disk.offering.processing": "Deleting Disk Offering....", "label.action.delete.domain": "Delete Domain", "label.action.delete.domain.processing": "Deleting Domain....", +"label.action.delete.egress.firewall": "Delete egress firewall rule", "label.action.delete.firewall": "Delete firewall rule", "label.action.delete.firewall.processing": "Deleting Firewall....", "label.action.delete.ingress.rule": "Delete Ingress Rule", @@ -204,6 +213,7 @@ "label.action.get.diagnostics": "Get Diagnostics Data", "label.action.image.store.read.only": "Make Image store read-only", "label.action.image.store.read.write": "Make Image store read-write", +"label.action.import.export.instances":"Import-Export Instances", "label.action.iso.permission": "Update ISO Permissions", "label.action.iso.share": "Update ISO Sharing", "label.action.list.nexusvswitch": "List Nexus 1000v", @@ -267,6 +277,8 @@ "label.action.template.share": "Update Template Sharing", "label.action.unmanage.cluster": "Unmanage Cluster", "label.action.unmanage.cluster.processing": "Unmanaging Cluster....", +"label.action.unmanage.instance": "Unmanage Instance", +"label.action.unmanage.instances": "Unmanage Instances", "label.action.unmanage.virtualmachine": "Unmanage VM", "label.action.update.offering.access": "Update Offering Access", "label.action.update.os.preference": "Update OS Preference", @@ -453,6 +465,8 @@ "label.asyncbackup": "Async Backup", "label.author.email": "Author e-mail", "label.author.name": "Author name", +"label.auto.assign.diskoffering.disk.size": "Automatically assign offering matching the disk size", +"label.auto.assign.random.ip": "Automatically assign a random IP address", "label.autoscale": "AutoScale", "label.autoscale.configuration.wizard": "AutoScale Configuration Wizard", "label.availability": "Availability", @@ -581,6 +595,13 @@ "label.configure.ovs": "Configure Ovs", "label.configure.sticky.policy": "Configure Sticky Policy", "label.configure.vpc": "Configure VPC", +"label.confirm.delete.egress.firewall.rules": "Please confirm you wish to delete the selected egress firewall rules", +"label.confirm.delete.firewall.rules": "Please confirm you wish to delete the selected firewall rules", +"label.confirm.delete.isos": "Please confirm you wish to delete the selected isos", +"label.confirm.delete.loadbalancer.rules": "Please confirm you wish to delete the selected load balancing rules", +"label.confirm.delete.portforward.rules": "Please confirm you wish to delete the selected port-forward rules", +"label.confirm.delete.templates": "Please confirm you wish to delete the selected templates", +"label.confirm.release.public.ip.addresses": "Please confirm you wish to release the selected public IP addresses", "label.confirmacceptinvitation": "Please confirm you wish to join this project", "label.confirmation": "Confirmation", "label.confirmdeclineinvitation": "Are you sure you want to decline this project invitation?", @@ -593,9 +614,11 @@ "label.console.proxy.vm": "Console Proxy VM", "label.continue": "Continue", "label.continue.install": "Continue with installation", +"label.controlnodes": "Control nodes", "label.copied.clipboard": "Copied to clipboard", "label.copy": "Copy", "label.copy.clipboard": "Copy to clipboard", +"label.copy.setting.success": "Copy success, Please replace theme setting in public/config.js", "label.copy.text": "Copy Text", "label.copyid": "Copy ID", "label.copying.iso": "Copying ISO", @@ -639,7 +662,7 @@ "label.credit": "Credit", "label.crosszones": "Cross Zones", "label.currency": "Currency", -"label.current": "isCurrent", +"label.current": "Current", "label.currentpassword": "Current Password", "label.custom": "Custom", "label.custom.disk.offering": "Custom Disk Offering", @@ -647,6 +670,7 @@ "label.customdisksize": "Custom Disk Size", "label.customunconstrained": "Custom Unconstrained", "label.daily": "Daily", +"label.dark.mode": "Dark mode", "label.dashboard": "Dashboard", "label.dashboard.endpoint": "Dashboard endpoint", "label.data.disk": "Data Disk", @@ -693,6 +717,7 @@ "label.delete.opendaylight.device": "Delete OpenDaylight Controller", "label.delete.pa": "Delete Palo Alto", "label.delete.portable.ip.range": "Delete Portable IP Range", +"label.delete.portforward.rules": "Delete Port Forward Rules", "label.delete.project": "Delete project", "label.delete.project.role": "Delete Project Role", "label.delete.role": "Delete Role", @@ -720,6 +745,7 @@ "label.deny": "Deny", "label.deployasis":"Read VM settings from OVA", "label.deploymentplanner": "Deployment planner", +"label.desc.importexportinstancewizard": "Import and export instances to/from an existing VMware cluster.", "label.description": "Description", "label.destcidr": "Destination CIDR", "label.destination": "Destination", @@ -740,9 +766,9 @@ "label.directdownload": "Direct Download", "label.disable.autoscale": "Disable Autoscale", "label.disable.host": "Disable Host", -"label.disable.storage": "Disable Storage Pool", "label.disable.network.offering": "Disable network offering", "label.disable.provider": "Disable provider", +"label.disable.storage": "Disable Storage Pool", "label.disable.vnmc.provider": "Disable VNMC provider", "label.disable.vpc.offering": "Disable VPC offering", "label.disable.vpn": "Disable Remote Access VPN", @@ -757,6 +783,7 @@ "label.disk.offering.access": "Disk offering access", "label.disk.offering.details": "Disk offering details", "label.disk.offerings": "Disk Offerings", +"label.disk.selection": "Disk Selection", "label.disk.size": "Disk Size", "label.disk.volume": "Disk Volume", "label.diskbytesreadrate": "Disk Read Rate (BPS)", @@ -840,8 +867,8 @@ "label.enable.network.offering": "Enable network offering", "label.enable.provider": "Enable provider", "label.enable.s3": "Enable S3-backed Secondary Storage", -"label.enable.swift": "Enable Swift", "label.enable.storage": "Enable Storage Pool", +"label.enable.swift": "Enable Swift", "label.enable.vnmc.device": "Enable VNMC device", "label.enable.vnmc.provider": "Enable VNMC provider", "label.enable.vpc.offering": "Enable VPC offering", @@ -875,6 +902,7 @@ "label.esphash": "ESP Hash", "label.esplifetime": "ESP Lifetime (second)", "label.esppolicy": "ESP policy", +"label.esx.host": "ESX/ESXi Host", "label.event": "Event", "label.event.archived": "Event(s) Archived", "label.event.deleted": "Event(s) Deleted", @@ -902,6 +930,7 @@ "label.filterby": "Filter by", "label.fingerprint": "FingerPrint", "label.firewall": "Firewall", +"label.firewallrule": "Firewall Rule", "label.firstname": "First Name", "label.firstname.lower": "firstname", "label.fix.errors": "Fix errors", @@ -1040,6 +1069,7 @@ "label.ikeversion": "IKE Version", "label.images": "Images", "label.import.backup.offering": "Import Backup Offering", +"label.import.instance": "Import Instance", "label.import.offering": "Import Offering", "label.import.role": "Import Role", "label.in.progress": "in progress", @@ -1169,6 +1199,7 @@ "label.isvolatile": "Volatile", "label.item.listing": "Item listing", "label.items": "items", +"label.items.selected": "item(s) selected", "label.japanese.keyboard": "Japanese keyboard", "label.keep": "Keep", "label.keep.colon": "Keep:", @@ -1282,13 +1313,13 @@ "label.manage": "Manage", "label.manage.resources": "Manage Resources", "label.manage.vpn.user": "Manage VPN Users", +"label.managed.instances": "Managed Instances", "label.managedstate": "Managed State", "label.management": "Management", "label.management.ips": "Management IP Addresses", "label.management.server": "Management Server", "label.management.servers": "Management Servers", "label.managementservers": "Number of Management Servers", -"label.controlnodes": "Control nodes", "label.max.primary.storage": "Max. primary (GiB)", "label.max.secondary.storage": "Max. secondary (GiB)", "label.maxcpu": "Max. CPU Cores", @@ -1365,6 +1396,8 @@ "label.metrics.network.usage": "Network Usage", "label.metrics.network.write": "Write", "label.metrics.num.cpu.cores": "Cores", +"label.migrate.allowed": "Migrate Allowed", +"label.migrate.auto.select": "AutoSelect", "label.migrate.data.from.image.store": "Migrate Data from Image store", "label.migrate.instance.to": "Migrate instance to", "label.migrate.instance.to.host": "Migrate instance to another host", @@ -1435,6 +1468,7 @@ "label.network.offering.display.text": "Network Offering Display Text", "label.network.offering.name": "Network Offering Name", "label.network.offerings": "Network Offerings", +"label.network.selection": "Network Selection", "label.network.service.providers": "Network Service Providers", "label.networkcidr": "Network CIDR", "label.networkdevicetype": "Type", @@ -1475,6 +1509,7 @@ "label.nfscachepath": "Path", "label.nfscachezoneid": "Zone", "label.nfsserver": "NFS Server", +"label.nic": "NIC", "label.nicadaptertype": "NIC adapter type", "label.nicira.controller.address": "Controller Address", "label.nicira.nvp.details": "Nicira NVP details", @@ -1520,6 +1555,7 @@ "label.opendaylight.controllerdetail": "OpenDaylight Controller Details", "label.opendaylight.controllers": "OpenDaylight Controllers", "label.operation": "Operation", +"label.operation.status": "Operation Status", "label.optional": "Optional", "label.order": "Order", "label.oscategoryid": "OS Preference", @@ -1605,9 +1641,10 @@ "label.portable.ip.ranges": "Portable IP Ranges", "label.portableipaddress": "Portable IPs", "label.portforwarding": "Port Forwarding", +"label.portforwarding.rule": "Port Forwarding Rule", "label.powerflex.gateway": "Gateway", -"label.powerflex.gateway.username": "Gateway Username", "label.powerflex.gateway.password": "Gateway Password", +"label.powerflex.gateway.username": "Gateway Username", "label.powerflex.storage.pool": "Storage Pool", "label.powerstate": "Power State", "label.preferred": "Prefered", @@ -1800,6 +1837,7 @@ "label.reset": "Reset", "label.reset.ssh.key.pair": "Reset SSH Key Pair", "label.reset.ssh.key.pair.on.vm": "Reset SSH Key Pair on VM", +"label.reset.to.default": "Reset to default", "label.reset.vpn.connection": "Reset VPN connection", "label.resetvm": "Reset VM", "label.resource": "Resource", @@ -1824,20 +1862,21 @@ "label.rolename": "Role", "label.roles": "Roles", "label.roletype": "Role Type", -"label.rootdisksize": "Root disk size (GB)", "label.root.certificate": "Root certificate", "label.root.disk.offering": "Root Disk Offering", "label.root.disk.size": "Root disk size (GB)", +"label.rootdisk": "ROOT disk", "label.rootdiskcontrollertype": "Root disk controller", "label.rootdiskcontrollertypekvm": "Root disk controller", -"label.routerip": "IPv4 address for Router in Shared Network", -"label.routeripv6": "IPv6 address for Router in Shared Network", +"label.rootdisksize": "Root disk size (GB)", "label.router.health.check.last.updated": "Last updated", "label.router.health.check.name": "Check name", "label.router.health.check.success": "Success", "label.router.health.checks": "Health Checks", "label.router.vm.scaled.up": "Router VM Scaled Up", "label.routercount": "Total of Virtual Routers", +"label.routerip": "IPv4 address for Router in Shared Network", +"label.routeripv6": "IPv6 address for Router in Shared Network", "label.routerrequiresupgrade": "Upgrade is required", "label.routertype": "Type", "label.routing.host": "Routing Host", @@ -1845,10 +1884,12 @@ "label.rule.number": "Rule Number", "label.rules": "Rules", "label.rules.file": "Rules File", -"label.rules.file.to.import": "Rule defintions CSV file to import", "label.rules.file.import.description": "Click or drag rule defintions CSV file to import", +"label.rules.file.to.import": "Rule defintions CSV file to import", "label.run.proxy.locally": "Run proxy locally", "label.running": "Running VMs", +"label.s2scustomergatewayid": "Site to Site customer gateway ID", +"label.s2svpngatewayid": "Site to Site VPN gateway ID", "label.s3.access.key": "Access Key", "label.s3.bucket": "Bucket", "label.s3.connection.timeout": "Connection Timeout", @@ -1868,9 +1909,10 @@ "label.save.and.continue": "Save and continue", "label.save.changes": "Save changes", "label.save.new.rule": "Save new Rule", +"label.save.setting": "Save setting", "label.saving.processing": "Saving....", -"label.scale.vm": "Scale VM", "label.scale.up.policy": "SCALE UP POLICY", +"label.scale.vm": "Scale VM", "label.scaledown.policy": "ScaleDown Policy", "label.scaleup.policy": "ScaleUp Policy", "label.schedule": "Schedule", @@ -1956,8 +1998,6 @@ "label.shrinkok": "Shrink OK", "label.shutdown.provider": "Shutdown provider", "label.simplified.chinese.keyboard": "Simplified Chinese keyboard", -"label.s2scustomergatewayid": "Site to Site customer gateway ID", -"label.s2svpngatewayid": "Site to Site VPN gateway ID", "label.site.to.site.vpn": "Site-to-site VPN", "label.site.to.site.vpn.connections": "Site-to-site VPN Connections", "label.size": "Size", @@ -2049,10 +2089,10 @@ "label.stop.lb.vm": "Stop LB VM", "label.stopped": "Stopped VMs", "label.storage": "Storage", +"label.storage.migration.required": "Storage Migration Required", "label.storage.tags": "Storage Tags", "label.storage.traffic": "Storage Traffic", "label.storageid": "Primary Storage", -"label.storage.migration.required": "Storage Migration Required", "label.storagemotionenabled": "Storage Motion Enabled", "label.storagepolicy": "Storage policy", "label.storagepool": "Storage Pool", @@ -2101,6 +2141,8 @@ "label.tcp": "TCP", "label.tcp.proxy": "TCP Proxy", "label.template": "Select a template", +"label.template.select.existing": "Select an existing template", +"label.template.temporary.import": "Use a temporary template for import", "label.templatebody": "Body", "label.templatedn": "Select Template", "label.templatefileupload": "Local file", @@ -2115,9 +2157,28 @@ "label.templatetype": "Template Type", "label.tftp.dir": "TFTP Directory", "label.tftpdir": "Tftp root directory", +"label.theme.alert": "The settings panel is only visible in the development environment, please save for the changes to take effect.", +"label.theme.color": "Theme Color", +"label.theme.cyan": "Cyan", +"label.theme.dark": "Dark Style", +"label.theme.daybreak.blue": "Daybreak Blue", "label.theme.default": "Default Theme", +"label.theme.dust.red": "Dust Red", +"label.theme.geek.blue": "Geek Blue", +"label.theme.golden.purple": "Golden Purple", "label.theme.grey": "Custom - Grey", +"label.theme.light": "Light Style", "label.theme.lightblue": "Custom - Light Blue", +"label.theme.navigation.bgColor": "Background Color", +"label.theme.navigation.setting": "Navigation setting", +"label.theme.navigation.txtColor": "Text Color", +"label.theme.page.style.setting": "Page style setting", +"label.theme.polar.green": "Polar Green", +"label.theme.project": "Project Style", +"label.theme.project.navigation.setting": "Project Navigation setting", +"label.theme.sunset.orange": "Sunset Orange", +"label.theme.volcano": "Volcano", +"label.theme.white": "White", "label.threshold": "Threshold", "label.thursday": "Thursday", "label.tier.details": "Tier details", @@ -2131,6 +2192,7 @@ "label.to": "to", "label.token": "Token", "label.token.for.dashboard.login": "Token for dashboard login can be retrieved using following command", +"label.tools": "Tools", "label.total": "Total", "label.total.hosts": "Total Hosts", "label.total.memory": "Total Memory", @@ -2157,6 +2219,9 @@ "label.unit": "Usage Unit", "label.unknown": "Unknown", "label.unlimited": "Unlimited", +"label.unmanage.instance": "Unmanage Instance", +"label.unmanaged.instance": "Unmanaged Instance", +"label.unmanaged.instances": "Unmanaged Instances", "label.untagged": "Untagged", "label.update.instance.group": "Update Instance Group", "label.update.physical.network": "Update Physical Network", @@ -2182,8 +2247,8 @@ "label.usageinterface": "Usage Interface", "label.usagename": "Usage Type", "label.usageunit": "Unit", -"label.use.local.timezone": "Use Local Timezone", "label.use.kubectl.access.cluster": "kubectl and kubeconfig file to access cluster", +"label.use.local.timezone": "Use Local Timezone", "label.use.vm.ip": "Use VM IP:", "label.use.vm.ips": "Use VM IPs", "label.used": "Used", @@ -2215,7 +2280,6 @@ "label.vcenter.username": "vCenter Username", "label.vcenterdatacenter": "vCenter Datacenter", "label.vcenterdatastore": "vCenter Datastore", -"label.esx.host": "ESX/ESXi Host", "label.vcenterpassword": "vCenter Password", "label.vcenterusername": "vCenter Username", "label.vcipaddress": "vCenter IP Address", @@ -2284,7 +2348,6 @@ "label.vnmc.devices": "VNMC Devices", "label.volgroup": "Volume Group", "label.volume": "Volume", -"label.volumeid": "Volume", "label.volume.details": "Volume details", "label.volume.empty": "No data volumes attached to this VM", "label.volume.ids": "Volume ID's", @@ -2294,6 +2357,7 @@ "label.volumechecksum.description": "Use the hash that you created at the start of the volume upload procedure", "label.volumefileupload": "Local file", "label.volumegroup": "Volume Group", +"label.volumeid": "Volume", "label.volumeids": "Volumes to be deleted", "label.volumelimit": "Volume Limits", "label.volumename": "Volume Name", @@ -2336,6 +2400,9 @@ "label.vspherestoragepolicy": "vSphere Storage Policy", "label.vswitch.name": "vSwitch Name", "label.vswitch.type": "vSwitch Type", +"label.vswitch.type.nexusdvs": "Cisco Nexus 1000v Distributed Virtual Switch", +"label.vswitch.type.vmwaredvs": "VMware vNetwork Distributed Virtual Switch", +"label.vswitch.type.vmwaresvs": "VMware vNetwork Standard Virtual Switch", "label.vswitchguestname": "Guest Traffic vSwitch Name", "label.vswitchguesttype": "Guest Traffic vSwitch Type", "label.vswitchpublicname": "Public Traffic vSwitch Name", @@ -2399,6 +2466,7 @@ "message.action.delete.external.firewall": "Please confirm that you would like to remove this external firewall. Warning: If you are planning to add back the same external firewall, you must reset usage data on the device.", "message.action.delete.external.load.balancer": "Please confirm that you would like to remove this external load balancer. Warning: If you are planning to add back the same external load balancer, you must reset usage data on the device.", "message.action.delete.ingress.rule": "Please confirm that you want to delete this ingress rule.", +"message.action.delete.instance.group": "Please confirm that you want to delete the instance group", "message.action.delete.iso": "Please confirm that you want to delete this ISO.", "message.action.delete.iso.for.all.zones": "The ISO is used by all zones. Please confirm that you want to delete it from all zones.", "message.action.delete.network": "Please confirm that you want to delete this network.", @@ -2443,6 +2511,7 @@ "message.action.manage.cluster": "Please confirm that you want to manage the cluster.", "message.action.primarystorage.enable.maintenance.mode": "Warning: placing the primary storage into maintenance mode will cause all VMs using volumes from it to be stopped. Do you want to continue?", "message.action.reboot.instance": "Please confirm that you want to reboot this instance.", + "message.action.clone.instance": "Please confirm that you want to clone this instance", "message.action.reboot.router": "All services provided by this virtual router will be interrupted. Please confirm that you want to reboot this router.", "message.action.reboot.systemvm": "Please confirm that you want to reboot this system VM.", "message.action.recover.volume": "Please confirm that you would like to recover this volume.", @@ -2466,7 +2535,10 @@ "message.action.stop.router": "All services provided by this virtual router will be interrupted. Please confirm that you want to stop this router.", "message.action.stop.systemvm": "Please confirm that you want to stop this system VM.", "message.action.unmanage.cluster": "Please confirm that you want to unmanage the cluster.", +"message.action.unmanage.instance": "Please confirm that you want to unmanage the instance.", +"message.action.unmanage.instances": "Please confirm that you want to unmanage the instances.", "message.action.unmanage.virtualmachine": "Please confirm that you want to unmanage the virtual machine.", +"message.action.unmanage.virtualmachines": "Please confirm that you want to unmanage the virtual machines.", "message.action.vmsnapshot.create": "Please confirm that you want to take a snapshot of this instance.
Please notice that the instance will be paused during the snapshoting, and resumed after snapshotting, if it runs on KVM.", "message.action.vmsnapshot.delete": "Please confirm that you want to delete this VM snapshot.
Please notice that the instance will be paused before the snapshot deletion, and resumed after deletion, if it runs on KVM.", "message.action.vmsnapshot.revert": "Revert VM snapshot", @@ -2606,15 +2678,15 @@ "message.confirm.destroy.kubernetes.cluster": "Please confirm that you want to destroy this Kubernetes cluster.", "message.confirm.destroy.router": "All services provided by this virtual router will be interrupted. Please confirm that you want to stop this router. Please confirm that you would like to destroy this router", "message.confirm.disable.host": "Please confirm that you want to disable the host", -"message.confirm.disable.storage": "Please confirm that you want to disable the storage pool", "message.confirm.disable.network.offering": "Are you sure you want to disable this network offering?", "message.confirm.disable.provider": "Please confirm that you would like to disable this provider", +"message.confirm.disable.storage": "Please confirm that you want to disable the storage pool", "message.confirm.disable.vnmc.provider": "Please confirm you would like to disable the VNMC provider.", "message.confirm.disable.vpc.offering": "Are you sure you want to disable this VPC offering?", "message.confirm.enable.host": "Please confirm that you want to enable the host", -"message.confirm.enable.storage": "Please confirm that you want to enable the storage pool", "message.confirm.enable.network.offering": "Are you sure you want to enable this network offering?", "message.confirm.enable.provider": "Please confirm that you would like to enable this provider", +"message.confirm.enable.storage": "Please confirm that you want to enable the storage pool", "message.confirm.enable.vnmc.provider": "Please confirm you would like to enable the VNMC provider.", "message.confirm.enable.vpc.offering": "Are you sure you want to enable this VPC offering?", "message.confirm.force.update": "Do you want to make a force update?", @@ -2673,10 +2745,10 @@ "message.creating.secondary.storage": "Creating secondary storage", "message.creating.systemvm": "Creating system VMs (this may take a while)", "message.creating.zone": "Creating zone", -"message.datacenter.description": "Name of the datacenter on vCenter", -"message.datastore.description": "Name of the datastore on vCenter", "message.data.migration": "Data Migration", "message.data.migration.progress": "Data Migration between image stores", +"message.datacenter.description": "Name of the datacenter on vCenter", +"message.datastore.description": "Name of the datastore on vCenter", "message.dedicate.zone": "Dedicating zone", "message.dedicated.zone.released": "Zone dedication released", "message.dedicating.cluster": "Dedicating Cluster...", @@ -2713,6 +2785,7 @@ "message.desc.create.ssh.key.pair": "Please fill in the following data to create or register a ssh key pair.

(1) If public key is set, CloudStack will register the public key. You can use it through your private key.

(2) If public key is not set, CloudStack will create a new SSH Key pair. In this case, please copy and save the private key. CloudStack will not keep it.
", "message.desc.created.ssh.key.pair": "Created a SSH Key Pair.", "message.desc.host": "Each cluster must contain at least one host (computer) for guest VMs to run on, and we will add the first host now. For a host to function in CloudStack, you must install hypervisor software on the host, assign an IP address to the host, and ensure the host is connected to the CloudStack management server.

Give the host's DNS or IP address, the user name (usually root) and password, and any labels you use to categorize hosts.", +"message.desc.importexportinstancewizard": "This feature only applies Cloudstack VMware clusters. By choosing to Manage an instance, CloudStack takes over the orchestration of that instance. The instance is left running and not physically moved. Unmanaging instances, removes CloudStack ability to mange them (but they are left running and not destroyed)", "message.desc.primary.storage": "Each cluster must contain one or more primary storage servers, and we will add the first one now. Primary storage contains the disk volumes for all the VMs running on hosts in the cluster. Use any standards-compliant protocol that is supported by the underlying hypervisor.", "message.desc.reset.ssh.key.pair": "Please specify a ssh key pair that you would like to add to this VM. Please note the root password will be changed by this operation if password is enabled.", "message.desc.secondary.storage": "Each zone must have at least one NFS or secondary storage server, and we will add the first one now. Secondary storage stores VM templates, ISO images, and VM disk volume snapshots. This server must be available to all hosts in the zone.

Provide the IP address and exported path.", @@ -2765,6 +2838,7 @@ "message.enabling.zone.dots": "Enabling zone...", "message.enter.seperated.list.multiple.cidrs": "Please enter a comma separated list of CIDRs if more than one", "message.enter.token": "Please enter the token that you were given in your invite e-mail.", +"message.enter.valid.nic.ip": "Please enter a valid IP address for NIC", "message.error.access.key": "Please enter Access Key", "message.error.add.guest.network": "Either IPv4 fields or IPv6 fields need to be filled when adding a guest network", "message.error.add.secondary.ipaddress": "There was an error adding the secondary IP Address", @@ -2931,10 +3005,13 @@ "message.installwizard.tooltip.configureguesttraffic.guestnetmask": "The netmask in use on the subnet that the guests should use", "message.installwizard.tooltip.configureguesttraffic.gueststartip": "The range of IP addresses that will be available for allocation to guests in this zone. If one NIC is used, these IPs should be in the same CIDR as the pod CIDR.", "message.installwizard.tooltip.configureguesttraffic.name": "A name for your network", -"message.instance.scaled.up.confirm": "Do you really want to scale Up your instance ?", +"message.instances.managed": "Instances or VMs controlled by CloudStack", +"message.instances.scaled.up.confirm": "Do you really want to scale Up your instance ?", +"message.instances.unmanaged": "Instances or VMs not controlled by CloudStack", "message.instancewizard.notemplates": "You do not have any templates available; please add a compatible template, and re-launch the instance wizard.", "message.interloadbalance.not.return.elementid": "error: listInternalLoadBalancerElements API doesn't return Internal LB Element Id", "message.ip.address.changed": "Your IP addresses may have changed; would you like to refresh the listing? Note that in this case the details pane will close.", +"message.ip.address.changes.effect.after.vm.restart": "IP address changes takes effect only after VM restart.", "message.iso.desc": "Disc image containing data or bootable media for OS", "message.join.project": "You have now joined a project. Please switch to Project view to see the project.", "message.kubeconfig.cluster.not.available": "Kubernetes cluster kubeconfig not available currently", @@ -2960,11 +3037,11 @@ "message.migrate.instance.select.host": "Please select a host for migration", "message.migrate.instance.to.host": "Please confirm that you want to migrate instance to another host.", "message.migrate.instance.to.ps": "Please confirm that you want to migrate instance to another primary storage.", -"message.migrate.router.confirm": "Please confirm the host you wish to migrate the router to:", -"message.migrate.systemvm.confirm": "Please confirm the host you wish to migrate the system VM to:", "message.migrate.lb.vm.to.ps": "Please confirm that you want to migrate LB VM to another primary storage.", +"message.migrate.router.confirm": "Please confirm the host you wish to migrate the router to:", "message.migrate.router.to.ps": "Please confirm that you want to migrate router to another primary storage.", "message.migrate.system.vm.to.ps": "Please confirm that you want to migrate system VM to another primary storage.", +"message.migrate.systemvm.confirm": "Please confirm the host you wish to migrate the system VM to:", "message.migrate.volume": "Please confirm that you want to migrate volume to another primary storage.", "message.migrate.volume.failed": "Migrating volume failed", "message.migrate.volume.processing": "Migrating volume...", @@ -3016,6 +3093,7 @@ "message.pending.projects.2": "To view, please go to the projects section, then select invitations from the drop-down.", "message.please.add.at.lease.one.traffic.range": "Please add at least one traffic range.", "message.please.confirm.remove.ssh.key.pair": "Please confirm that you want to remove this SSH Key Pair", +"message.please.enter.valid.value": "Please enter a valid value", "message.please.enter.value": "Please enter values", "message.please.proceed": "Please proceed to the next step.", "message.please.select.a.configuration.for.your.zone": "Please select a configuration for your zone.", @@ -3027,8 +3105,8 @@ "message.pod.dedication.released": "Pod dedication released", "message.portable.ip.delete.confirm": "Please confirm you want to delete Portable IP Range", "message.processing.complete": "Processing complete!", -"message.protocol.description": "For XenServer, choose NFS, iSCSI, or PreSetup. For KVM, choose NFS, SharedMountPoint, RDB, CLVM or Gluster. For vSphere, choose NFS, PreSetup (VMFS or iSCSI or FiberChannel or vSAN or vVols) or DatastoreCluster. For Hyper-V, choose SMB/CIFS. For LXC, choose NFS or SharedMountPoint. For OVM, choose NFS or ocfs2.", "message.project.invite.sent": "Invite sent to user; they will be added to the project once they accept the invitation", +"message.protocol.description": "For XenServer, choose NFS, iSCSI, or PreSetup. For KVM, choose NFS, SharedMountPoint, RDB, CLVM or Gluster. For vSphere, choose NFS, PreSetup (VMFS or iSCSI or FiberChannel or vSAN or vVols) or DatastoreCluster. For Hyper-V, choose SMB/CIFS. For LXC, choose NFS or SharedMountPoint. For OVM, choose NFS or ocfs2.", "message.public.traffic.in.advanced.zone": "Public traffic is generated when VMs in the cloud access the internet. Publicly-accessible IPs must be allocated for this purpose. End users can use the CloudStack UI to acquire these IPs to implement NAT between their guest network and their public network.

Provide at least one range of IP addresses for internet traffic.", "message.public.traffic.in.basic.zone": "Public traffic is generated when VMs in the cloud access the Internet or provide services to clients over the Internet. Publicly accessible IPs must be allocated for this purpose. When a instance is created, an IP from this set of Public IPs will be allocated to the instance in addition to the guest IP address. Static 1-1 NAT will be set up automatically between the public IP and the guest IP. End users can also use the CloudStack UI to acquire additional IPs to implement static NAT between their instances and the public IP.", "message.publicip.state.allocated": "The IP address is in used.", @@ -3089,10 +3167,12 @@ "message.select.a.zone": "A zone typically corresponds to a single datacenter. Multiple zones help make the cloud more reliable by providing physical isolation and redundancy.", "message.select.affinity.groups": "Please select any affinity groups you want this VM to belong to:", "message.select.destination.image.stores": "Please select Image Store(s) to which data is to be migrated to", +"message.select.disk.offering": "Please select a disk offering for disk", "message.select.instance": "Please select an instance.", "message.select.iso": "Please select an ISO for your new virtual instance.", "message.select.item": "Please select an item.", "message.select.migration.policy": "Please select a migration Policy", +"message.select.nic.network": "Please select a network for NIC", "message.select.security.groups": "Please select security group(s) for your new VM", "message.select.template": "Please select a template for your new virtual instance.", "message.select.tier": "Please select a tier", @@ -3163,6 +3243,7 @@ "message.success.edit.acl": "Successfully edited ACL rule", "message.success.edit.rule": "Successfully edited rule", "message.success.enable.saml.auth": "Successfully enabled SAML Authorization", +"message.success.import.instance": "Successfully imported instance", "message.success.migrate.volume": "Successfully migrated volume", "message.success.migrating": "Migration completed successfully for", "message.success.move.acl.order": "Successfully moved ACL rule", @@ -3183,6 +3264,7 @@ "message.success.remove.sticky.policy": "Successfully removed sticky policy", "message.success.resize.volume": "Successfully resized volume", "message.success.scale.kubernetes": "Successfully scaled Kubernetes cluster", +"message.success.unmanage.instance": "Successfully unmanaged instance", "message.success.update.ipaddress": "Successfully updated IP Address", "message.success.update.kubeversion": "Successfully updated Kubernetes supported version", "message.success.update.user": "Successfully updated user", @@ -3199,6 +3281,7 @@ "message.template.copy.select.zone": "Please select a zone to copy template.", "message.template.copying": "Template is being copied.", "message.template.desc": "OS image that can be used to boot VMs", +"message.template.import.vm.temporary": "If a temporary template is used, reset VM operation will not work after import.", "message.template.iso": "Please select a template or ISO to continue", "message.tier.required": "Tier is required", "message.tooltip.dns.1": "Name of a DNS server for use by VMs in the zone. The public IP addresses for the zone must have a route to this server.", @@ -3323,6 +3406,8 @@ "state.error": "Error", "state.expired": "Expired", "state.expunging": "Expunging", +"state.failed": "Failed", +"state.inprogress": "In Progress", "state.migrating": "Migrating", "state.pending": "Pending", "state.readonly": "Read-Only", diff --git a/ui/src/App.vue b/ui/src/App.vue index fa75a8e635c3..f7aa55892e53 100644 --- a/ui/src/App.vue +++ b/ui/src/App.vue @@ -36,7 +36,12 @@ export default { } }, created () { - window.less.modifyVars(this.$config.theme) + const userThemeSetting = this.$store.getters.themeSetting || {} + if (Object.keys(userThemeSetting).length === 0) { + window.less.modifyVars(this.$config.theme) + } else { + window.less.modifyVars(userThemeSetting) + } console.log('config and theme applied') } } diff --git a/ui/src/assets/icons/dark.svg b/ui/src/assets/icons/dark.svg new file mode 100644 index 000000000000..9190c1d3bf80 --- /dev/null +++ b/ui/src/assets/icons/dark.svg @@ -0,0 +1,39 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/ui/src/assets/icons/light.svg b/ui/src/assets/icons/light.svg new file mode 100644 index 000000000000..fbb1000c1d69 --- /dev/null +++ b/ui/src/assets/icons/light.svg @@ -0,0 +1,40 @@ + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/ui/src/components/CheckBoxInputPair.vue b/ui/src/components/CheckBoxInputPair.vue new file mode 100644 index 000000000000..fee6f4d52ea2 --- /dev/null +++ b/ui/src/components/CheckBoxInputPair.vue @@ -0,0 +1,117 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + + + + diff --git a/ui/src/components/CheckBoxSelectPair.vue b/ui/src/components/CheckBoxSelectPair.vue index ec6c6fa16c81..29035e6b7d76 100644 --- a/ui/src/components/CheckBoxSelectPair.vue +++ b/ui/src/components/CheckBoxSelectPair.vue @@ -16,26 +16,39 @@ // under the License. @@ -44,6 +57,10 @@ export default { name: 'CheckBoxSelectPair', props: { + layout: { + type: String, + default: 'horizontal' + }, resourceKey: { type: String, required: true @@ -56,6 +73,10 @@ export default { type: String, default: '' }, + defaultCheckBoxValue: { + type: Boolean, + default: false + }, selectOptions: { type: Array, required: true @@ -67,12 +88,30 @@ export default { selectDecorator: { type: String, default: '' + }, + reversed: { + type: Boolean, + default: false } }, data () { return { checked: false, - selectedOption: '' + selectedOption: null + } + }, + created () { + this.checked = this.defaultCheckBoxValue + }, + computed: { + selectSource () { + return this.selectOptions.map(item => { + var option = { ...item } + if (!('id' in option)) { + option.id = option.name + } + return option + }) } }, methods: { @@ -80,30 +119,18 @@ export default { return array !== null && array !== undefined && Array.isArray(array) && array.length > 0 }, getSelectInitialValue () { - const provider = this.selectOptions?.filter(x => x.enabled)?.[0]?.name || '' - this.handleSelectChange(provider) - return provider + const initialValue = this.selectSource?.filter(x => x.enabled !== false)?.[0]?.id || '' + this.handleSelectChange(initialValue) + return initialValue }, handleCheckChange (e) { this.checked = e.target.checked - this.$emit('handle-checkpair-change', this.resourceKey, this.checked, '') + this.$emit('handle-checkselectpair-change', this.resourceKey, this.checked, this.selectedOption) }, handleSelectChange (val) { this.selectedOption = val - this.$emit('handle-checkpair-change', this.resourceKey, this.checked, val) + this.$emit('handle-checkselectpair-change', this.resourceKey, this.checked, this.selectedOption) } } } - - diff --git a/ui/src/components/header/HeaderNotice.vue b/ui/src/components/header/HeaderNotice.vue index 03a5acd2017a..fb6b3c51e5b7 100644 --- a/ui/src/components/header/HeaderNotice.vue +++ b/ui/src/components/header/HeaderNotice.vue @@ -32,16 +32,19 @@ {{ $t('label.clear.list') }} - - - + + + + {{ getResourceName(notice.description, "name") + ' - ' }} + {{ getResourceName(notice.description, "msg") }} + {{ notice.description }} - + @@ -49,7 +52,6 @@ diff --git a/ui/src/components/view/BulkActionView.vue b/ui/src/components/view/BulkActionView.vue new file mode 100644 index 000000000000..acdc79961515 --- /dev/null +++ b/ui/src/components/view/BulkActionView.vue @@ -0,0 +1,192 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + + + + diff --git a/ui/src/components/view/DedicateData.vue b/ui/src/components/view/DedicateData.vue index af50eac2b41b..0818c70f1515 100644 --- a/ui/src/components/view/DedicateData.vue +++ b/ui/src/components/view/DedicateData.vue @@ -175,20 +175,13 @@ export default { }).then(response => { this.$pollJob({ jobId: response.releasededicatedzoneresponse.jobid, + title: this.$t('label.release.dedicated.zone'), + description: this.resource.id, successMessage: this.$t('message.dedicated.zone.released'), successMethod: () => { - this.parentFetchData() this.dedicatedDomainId = null - this.$store.dispatch('AddAsyncJob', { - title: this.$t('message.dedicated.zone.released'), - jobid: response.releasededicatedzoneresponse.jobid, - status: 'progress' - }) }, errorMessage: this.$t('error.release.dedicate.zone'), - errorMethod: () => { - this.parentFetchData() - }, loadingMessage: this.$t('message.releasing.dedicated.zone'), catchMessage: this.$t('error.fetching.async.job.result'), catchMethod: () => { @@ -205,20 +198,13 @@ export default { }).then(response => { this.$pollJob({ jobId: response.releasededicatedpodresponse.jobid, + title: this.$t('label.release.dedicated.pod'), + description: this.resource.id, successMessage: this.$t('message.pod.dedication.released'), successMethod: () => { - this.parentFetchData() this.dedicatedDomainId = null - this.$store.dispatch('AddAsyncJob', { - title: this.$t('message.pod.dedication.released'), - jobid: response.releasededicatedpodresponse.jobid, - status: 'progress' - }) }, errorMessage: this.$t('error.release.dedicate.pod'), - errorMethod: () => { - this.parentFetchData() - }, loadingMessage: this.$t('message.releasing.dedicated.pod'), catchMessage: this.$t('error.fetching.async.job.result'), catchMethod: () => { @@ -235,20 +221,13 @@ export default { }).then(response => { this.$pollJob({ jobId: response.releasededicatedclusterresponse.jobid, + title: this.$t('label.release.dedicated.cluster'), + description: this.resource.id, successMessage: this.$t('message.cluster.dedication.released'), successMethod: () => { - this.parentFetchData() this.dedicatedDomainId = null - this.$store.dispatch('AddAsyncJob', { - title: this.$t('message.cluster.dedication.released'), - jobid: response.releasededicatedclusterresponse.jobid, - status: 'progress' - }) }, errorMessage: this.$t('error.release.dedicate.cluster'), - errorMethod: () => { - this.parentFetchData() - }, loadingMessage: this.$t('message.releasing.dedicated.cluster'), catchMessage: this.$t('error.fetching.async.job.result'), catchMethod: () => { @@ -265,20 +244,13 @@ export default { }).then(response => { this.$pollJob({ jobId: response.releasededicatedhostresponse.jobid, + title: this.$t('label.release.dedicated.host'), + description: this.resource.id, successMessage: this.$t('message.host.dedication.released'), successMethod: () => { - this.parentFetchData() this.dedicatedDomainId = null - this.$store.dispatch('AddAsyncJob', { - title: this.$t('message.host.dedication.released'), - jobid: response.releasededicatedhostresponse.jobid, - status: 'progress' - }) }, errorMessage: this.$t('error.release.dedicate.host'), - errorMethod: () => { - this.parentFetchData() - }, loadingMessage: this.$t('message.releasing.dedicated.host'), catchMessage: this.$t('error.fetching.async.job.result'), catchMethod: () => { diff --git a/ui/src/components/view/DedicateModal.vue b/ui/src/components/view/DedicateModal.vue index 9443e42cb1ac..3d234885c233 100644 --- a/ui/src/components/view/DedicateModal.vue +++ b/ui/src/components/view/DedicateModal.vue @@ -18,16 +18,20 @@ @@ -63,7 +67,8 @@ export default { dedicatedDomainModal: false, domainId: null, dedicatedAccount: null, - domainError: false + domainError: false, + isSubmitted: false } }, watch: { @@ -93,24 +98,20 @@ export default { }).then(response => { this.$pollJob({ jobId: response.dedicatezoneresponse.jobid, - successMessage: this.$t('label.zone.dedicated'), + title: this.$t('label.dedicate.zone'), + description: `${this.$t('label.domain.id')} : ${this.domainId}`, + successMessage: `${this.$t('label.zone.dedicated')}`, successMethod: () => { - this.parentFetchData() this.fetchParentData() this.dedicatedDomainId = this.domainId this.dedicatedDomainModal = false - this.$store.dispatch('AddAsyncJob', { - title: this.$t('label.zone.dedicated'), - jobid: response.dedicatezoneresponse.jobid, - description: `${this.$t('label.domain.id')} : ${this.dedicatedDomainId}`, - status: 'progress' - }) + this.isSubmitted = false }, errorMessage: this.$t('error.dedicate.zone.failed'), errorMethod: () => { - this.parentFetchData() this.fetchParentData() this.dedicatedDomainModal = false + this.isSubmitted = false }, loadingMessage: this.$t('message.dedicating.zone'), catchMessage: this.$t('error.fetching.async.job.result'), @@ -118,11 +119,13 @@ export default { this.parentFetchData() this.fetchParentData() this.dedicatedDomainModal = false + this.isSubmitted = false } }) }).catch(error => { this.$notifyError(error) this.dedicatedDomainModal = false + this.isSubmitted = false }) }, dedicatePod () { @@ -137,24 +140,20 @@ export default { }).then(response => { this.$pollJob({ jobId: response.dedicatepodresponse.jobid, + title: this.$t('label.dedicate.pod'), + description: `${this.$t('label.domain.id')} : ${this.domainId}`, successMessage: this.$t('label.pod.dedicated'), successMethod: () => { - this.parentFetchData() this.fetchParentData() this.dedicatedDomainId = this.domainId this.dedicatedDomainModal = false - this.$store.dispatch('AddAsyncJob', { - title: this.$t('label.pod.dedicated'), - jobid: response.dedicatepodresponse.jobid, - description: `${this.$t('label.domainid')}: ${this.dedicatedDomainId}`, - status: 'progress' - }) + this.isSubmitted = false }, errorMessage: this.$t('error.dedicate.pod.failed'), errorMethod: () => { - this.parentFetchData() this.fetchParentData() this.dedicatedDomainModal = false + this.isSubmitted = false }, loadingMessage: this.$t('message.dedicating.pod'), catchMessage: this.$t('error.fetching.async.job.result'), @@ -162,11 +161,13 @@ export default { this.parentFetchData() this.fetchParentData() this.dedicatedDomainModal = false + this.isSubmitted = false } }) }).catch(error => { this.$notifyError(error) this.dedicatedDomainModal = false + this.isSubmitted = false }) }, dedicateCluster () { @@ -181,24 +182,20 @@ export default { }).then(response => { this.$pollJob({ jobId: response.dedicateclusterresponse.jobid, + title: this.$t('label.dedicate.cluster'), + description: `${this.$t('label.domain.id')} : ${this.domainId}`, successMessage: this.$t('message.cluster.dedicated'), successMethod: () => { - this.parentFetchData() this.fetchParentData() this.dedicatedDomainId = this.domainId this.dedicatedDomainModal = false - this.$store.dispatch('AddAsyncJob', { - title: this.$t('message.cluster.dedicated'), - jobid: response.dedicateclusterresponse.jobid, - description: `${this.$t('label.domainid')}: ${this.dedicatedDomainId}`, - status: 'progress' - }) + this.isSubmitted = false }, errorMessage: this.$t('error.dedicate.cluster.failed'), errorMethod: () => { - this.parentFetchData() this.fetchParentData() this.dedicatedDomainModal = false + this.isSubmitted = false }, loadingMessage: this.$t('message.dedicating.cluster'), catchMessage: this.$t('error.fetching.async.job.result'), @@ -206,11 +203,13 @@ export default { this.parentFetchData() this.fetchParentData() this.dedicatedDomainModal = false + this.isSubmitted = false } }) }).catch(error => { this.$notifyError(error) this.dedicatedDomainModal = false + this.isSubmitted = false }) }, dedicateHost () { @@ -225,24 +224,20 @@ export default { }).then(response => { this.$pollJob({ jobId: response.dedicatehostresponse.jobid, + title: this.$t('label.dedicate.host'), + description: `${this.$t('label.domain.id')} : ${this.domainId}`, successMessage: this.$t('message.host.dedicated'), successMethod: () => { - this.parentFetchData() this.fetchParentData() this.dedicatedDomainId = this.domainId this.dedicatedDomainModal = false - this.$store.dispatch('AddAsyncJob', { - title: this.$t('message.host.dedicated'), - jobid: response.dedicatehostresponse.jobid, - description: `${this.$t('label.domainid')}: ${this.dedicatedDomainId}`, - status: 'progress' - }) + this.isSubmitted = false }, errorMessage: this.$t('error.dedicate.host.failed'), errorMethod: () => { - this.parentFetchData() this.fetchParentData() this.dedicatedDomainModal = false + this.isSubmitted = false }, loadingMessage: this.$t('message.dedicating.host'), catchMessage: this.$t('error.fetching.async.job.result'), @@ -250,14 +245,20 @@ export default { this.parentFetchData() this.fetchParentData() this.dedicatedDomainModal = false + this.isSubmitted = false } }) }).catch(error => { this.$notifyError(error) this.dedicatedDomainModal = false + this.isSubmitted = false }) }, handleDedicateForm () { + if (this.isSubmitted) { + return + } + this.isSubmitted = true if (this.$route.meta.name === 'zone') { this.dedicateZone() } diff --git a/ui/src/components/view/DetailSettings.vue b/ui/src/components/view/DetailSettings.vue index 302fd63af780..2588c11fb2e4 100644 --- a/ui/src/components/view/DetailSettings.vue +++ b/ui/src/components/view/DetailSettings.vue @@ -44,7 +44,11 @@ :dataSource="Object.keys(detailOptions)" :placeholder="$t('label.name')" @change="e => onAddInputChange(e, 'newKey')" /> - + diff --git a/ui/src/components/view/InfoCard.vue b/ui/src/components/view/InfoCard.vue index b2a8dd3328c7..8c3083a1d2dd 100644 --- a/ui/src/components/view/InfoCard.vue +++ b/ui/src/components/view/InfoCard.vue @@ -280,7 +280,7 @@ :key="eth.id" style="margin-left: -24px; margin-top: 5px;"> eth{{ index }} {{ eth.ipaddress }} - ({{ eth.networkname }}) + ({{ eth.networkname }}) {{ $t('label.default') }} @@ -311,7 +311,7 @@ type="environment" @click="$message.success(`${$t('label.copied.clipboard')} : ${ ipaddress }`)" v-clipboard:copy="ipaddress" /> - {{ ipaddress }} + {{ ipaddress }} {{ ipaddress }} @@ -329,7 +329,7 @@
{{ $t('label.project') }}
- {{ resource.project || resource.projectname || resource.projectid }} + {{ resource.project || resource.projectname || resource.projectid }} {{ resource.projectname }}
@@ -412,10 +412,10 @@
- {{ resource.isoname || resource.isoid }} + {{ resource.isodisplaytext || resource.isoname || resource.isoid }}
- {{ resource.templatename || resource.templateid }} + {{ resource.templatedisplaytext || resource.templatename || resource.templateid }}
@@ -423,7 +423,7 @@
{{ $t('label.serviceofferingname') }}
- {{ resource.serviceofferingname || resource.serviceofferingid }} + {{ resource.serviceofferingname || resource.serviceofferingid }} {{ resource.serviceofferingname || resource.serviceofferingid }} {{ resource.serviceofferingname || resource.serviceofferingid }}
@@ -432,21 +432,21 @@
{{ $t('label.diskoffering') }}
- {{ resource.diskofferingname || resource.diskofferingid }} + {{ resource.diskofferingname || resource.diskofferingid }} {{ resource.diskofferingname || resource.diskofferingid }}
{{ $t('label.backupofferingid') }}
- {{ resource.backupofferingname || resource.backupofferingid }} + {{ resource.backupofferingname || resource.backupofferingid }} {{ resource.backupofferingname || resource.backupofferingid }}
{{ $t('label.networkofferingid') }}
- {{ resource.networkofferingname || resource.networkofferingid }} + {{ resource.networkofferingname || resource.networkofferingid }} {{ resource.networkofferingname || resource.networkofferingid }}
@@ -454,7 +454,7 @@
{{ $t('label.vpcoffering') }}
- {{ resource.vpcofferingname || resource.vpcofferingid }} + {{ resource.vpcofferingname || resource.vpcofferingid }} {{ resource.vpcofferingname || resource.vpcofferingid }}
@@ -462,7 +462,7 @@
{{ $t('label.storagepool') }}
- {{ resource.storage || resource.storageid }} + {{ resource.storage || resource.storageid }} {{ resource.storage || resource.storageid }} {{ resource.storagetype }} @@ -473,7 +473,7 @@
{{ $t('label.hostname') }}
- {{ resource.hostname || resource.hostid }} + {{ resource.hostname || resource.hostid }} {{ resource.hostname || resource.hostid }}
@@ -481,7 +481,7 @@
{{ $t('label.clusterid') }}
- {{ resource.clustername || resource.cluster || resource.clusterid }} + {{ resource.clustername || resource.cluster || resource.clusterid }} {{ resource.clustername || resource.cluster || resource.clusterid }}
@@ -489,7 +489,7 @@
{{ $t('label.podid') }}
- {{ resource.podname || resource.pod || resource.podid }} + {{ resource.podname || resource.pod || resource.podid }} {{ resource.podname || resource.pod || resource.podid }}
@@ -497,7 +497,7 @@
{{ $t('label.zone') }}
- {{ resource.zone || resource.zonename || resource.zoneid }} + {{ resource.zone || resource.zonename || resource.zoneid }} {{ resource.zone || resource.zonename || resource.zoneid }}
@@ -508,7 +508,7 @@