From d230582d482dd2880d1bd7dc7b4634266995e6ed Mon Sep 17 00:00:00 2001 From: Harikrishna Patnala Date: Wed, 12 Feb 2025 17:14:01 +0530 Subject: [PATCH 01/14] Introducing Storage Access Groups to define the host and storage pool connections --- .../configuration/ConfigurationService.java | 3 +- api/src/main/java/com/cloud/dc/Pod.java | 2 + .../main/java/com/cloud/event/EventTypes.java | 1 + api/src/main/java/com/cloud/host/Host.java | 2 + api/src/main/java/com/cloud/org/Cluster.java | 2 + .../com/cloud/resource/ResourceService.java | 7 + .../com/cloud/storage/StorageService.java | 3 + .../apache/cloudstack/api/ApiConstants.java | 1 + .../command/admin/cluster/AddClusterCmd.java | 10 + .../admin/cluster/UpdateClusterCmd.java | 2 +- .../api/command/admin/host/AddHostCmd.java | 10 + .../api/command/admin/pod/CreatePodCmd.java | 14 +- .../storage/ConfigureStorageAccessCmd.java | 134 +++ .../admin/storage/CreateStoragePoolCmd.java | 8 + .../admin/storage/ListStoragePoolsCmd.java | 2 +- .../api/command/admin/zone/CreateZoneCmd.java | 11 + .../api/response/ClusterResponse.java | 36 + .../cloudstack/api/response/HostResponse.java | 48 ++ .../cloudstack/api/response/PodResponse.java | 24 + .../api/response/ServiceOfferingResponse.java | 2 +- .../api/response/StoragePoolResponse.java | 12 + .../cloudstack/api/response/ZoneResponse.java | 12 + .../agent/test/CheckOnHostCommandTest.java | 5 + .../storage/PrimaryDataStoreParameters.java | 16 + .../api/storage/StoragePoolAllocator.java | 1 + .../configuration/ConfigurationManager.java | 9 +- .../com/cloud/resource/ResourceManager.java | 10 + .../com/cloud/storage/StorageManager.java | 5 + .../entity/api/db/EngineClusterVO.java | 8 + .../entity/api/db/EngineHostPodVO.java | 8 + .../entity/api/db/EngineHostVO.java | 12 + .../src/main/java/com/cloud/dc/ClusterVO.java | 12 + .../main/java/com/cloud/dc/DataCenterVO.java | 11 + .../src/main/java/com/cloud/dc/HostPodVO.java | 11 + .../src/main/java/com/cloud/host/HostVO.java | 12 + .../main/java/com/cloud/host/dao/HostDao.java | 8 + .../java/com/cloud/host/dao/HostDaoImpl.java | 62 +- .../StoragePoolAndAccessGroupMapVO.java | 64 ++ .../dao/StoragePoolAndAccessGroupMapDao.java | 30 + .../StoragePoolAndAccessGroupMapDaoImpl.java | 80 ++ .../datastore/db/PrimaryDataStoreDao.java | 18 +- .../datastore/db/PrimaryDataStoreDaoImpl.java | 207 ++++- ...s-between-management-and-usage-context.xml | 1 + .../META-INF/db/schema-42010to42100.sql | 16 + .../db/views/cloud.data_center_view.sql | 1 + .../META-INF/db/views/cloud.host_view.sql | 4 + .../db/views/cloud.storage_pool_view.sql | 2 + .../StorageSystemDataMotionStrategy.java | 74 +- .../AbstractStoragePoolAllocator.java | 50 +- .../ClusterScopeStoragePoolAllocator.java | 6 +- .../ZoneWideStoragePoolAllocator.java | 2 +- .../datastore/PrimaryDataStoreHelper.java | 19 +- .../management/ManagementServerMock.java | 2 +- .../allocator/RandomStoragePoolAllocator.java | 2 +- .../ElastistorPrimaryDataStoreLifeCycle.java | 26 +- .../DateraPrimaryDataStoreLifeCycle.java | 40 +- ...oudStackPrimaryDataStoreLifeCycleImpl.java | 29 +- ...tackPrimaryDataStoreLifeCycleImplTest.java | 12 +- .../LinstorPrimaryDataStoreLifeCycleImpl.java | 26 +- .../NexentaPrimaryDataStoreLifeCycle.java | 19 +- .../ScaleIOPrimaryDataStoreLifeCycle.java | 78 +- .../ScaleIOPrimaryDataStoreLifeCycleTest.java | 16 +- .../SolidFirePrimaryDataStoreLifeCycle.java | 26 +- ...idFireSharedPrimaryDataStoreLifeCycle.java | 20 +- .../StorPoolPrimaryDataStoreLifeCycle.java | 10 +- .../java/com/cloud/api/ApiResponseHelper.java | 10 +- .../com/cloud/api/query/QueryManagerImpl.java | 3 +- .../api/query/dao/DataCenterJoinDaoImpl.java | 1 + .../cloud/api/query/dao/HostJoinDaoImpl.java | 5 + .../api/query/dao/StoragePoolJoinDaoImpl.java | 18 + .../cloud/api/query/vo/DataCenterJoinVO.java | 9 +- .../com/cloud/api/query/vo/HostJoinVO.java | 28 + .../cloud/api/query/vo/StoragePoolJoinVO.java | 7 + .../ConfigurationManagerImpl.java | 23 +- .../deploy/DeploymentPlanningManagerImpl.java | 9 +- .../cloud/resource/ResourceManagerImpl.java | 791 +++++++++++++++++- .../cloud/server/ManagementServerImpl.java | 2 + .../com/cloud/storage/StorageManagerImpl.java | 384 ++++++++- .../cloud/storage/VolumeApiServiceImpl.java | 15 + .../storage/listener/StoragePoolMonitor.java | 68 +- .../java/com/cloud/vm/UserVmManagerImpl.java | 49 +- .../ConfigurationManagerTest.java | 2 +- .../resource/MockResourceManagerImpl.java | 40 + .../resource/ResourceManagerImplTest.java | 371 ++++++++ .../cloud/storage/StorageManagerImplTest.java | 465 +++++++++- .../storage/VolumeApiServiceImplTest.java | 2 + .../listener/StoragePoolMonitorTest.java | 31 +- .../com/cloud/vm/UserVmManagerImplTest.java | 131 ++- .../vpc/MockConfigurationManagerImpl.java | 8 +- .../ChildTestConfiguration.java | 6 + .../test/resources/createNetworkOffering.xml | 1 + tools/apidoc/gen_toc.py | 1 + ui/public/locales/en.json | 10 + ui/src/config/section/infra/clusters.js | 10 +- ui/src/config/section/infra/hosts.js | 2 +- ui/src/config/section/infra/pods.js | 5 +- .../config/section/infra/primaryStorages.js | 2 +- ui/src/config/section/infra/zones.js | 6 +- ui/src/config/section/offering.js | 4 +- ui/src/views/AutogenView.vue | 4 +- ui/src/views/infra/ClusterUpdate.vue | 196 +++++ ui/src/views/infra/HostInfo.vue | 8 + ui/src/views/infra/HostUpdate.vue | 31 +- ui/src/views/infra/PodUpdate.vue | 175 ++++ ui/src/views/infra/UpdatePrimaryStorage.vue | 27 + ui/src/views/infra/ZoneUpdate.vue | 229 +++++ .../java/com/cloud/utils/StringUtils.java | 15 + 107 files changed, 4308 insertions(+), 312 deletions(-) create mode 100644 api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ConfigureStorageAccessCmd.java create mode 100644 engine/schema/src/main/java/com/cloud/storage/StoragePoolAndAccessGroupMapVO.java create mode 100644 engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolAndAccessGroupMapDao.java create mode 100644 engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolAndAccessGroupMapDaoImpl.java create mode 100644 ui/src/views/infra/ClusterUpdate.vue create mode 100644 ui/src/views/infra/PodUpdate.vue create mode 100644 ui/src/views/infra/ZoneUpdate.vue diff --git a/api/src/main/java/com/cloud/configuration/ConfigurationService.java b/api/src/main/java/com/cloud/configuration/ConfigurationService.java index 97d4b42974b3..13a44ef05b05 100644 --- a/api/src/main/java/com/cloud/configuration/ConfigurationService.java +++ b/api/src/main/java/com/cloud/configuration/ConfigurationService.java @@ -201,11 +201,12 @@ public interface ConfigurationService { * TODO * @param allocationState * TODO + * @param storageAccessGroups * @return the new pod if successful, null otherwise * @throws * @throws */ - Pod createPod(long zoneId, String name, String startIp, String endIp, String gateway, String netmask, String allocationState); + Pod createPod(long zoneId, String name, String startIp, String endIp, String gateway, String netmask, String allocationState, List storageAccessGroups); /** * Creates a mutual exclusive IP range in the pod with same gateway, netmask. diff --git a/api/src/main/java/com/cloud/dc/Pod.java b/api/src/main/java/com/cloud/dc/Pod.java index 1cbab36f3bd4..17c5b615d4b6 100644 --- a/api/src/main/java/com/cloud/dc/Pod.java +++ b/api/src/main/java/com/cloud/dc/Pod.java @@ -43,4 +43,6 @@ public interface Pod extends InfrastructureEntity, Grouping, Identity, InternalI AllocationState getAllocationState(); boolean getExternalDhcp(); + + String getStorageAccessGroups(); } diff --git a/api/src/main/java/com/cloud/event/EventTypes.java b/api/src/main/java/com/cloud/event/EventTypes.java index 815bd2363d5a..e68da0f51821 100644 --- a/api/src/main/java/com/cloud/event/EventTypes.java +++ b/api/src/main/java/com/cloud/event/EventTypes.java @@ -465,6 +465,7 @@ public class EventTypes { public static final String EVENT_ENABLE_PRIMARY_STORAGE = "ENABLE.PS"; public static final String EVENT_DISABLE_PRIMARY_STORAGE = "DISABLE.PS"; public static final String EVENT_SYNC_STORAGE_POOL = "SYNC.STORAGE.POOL"; + public static final String EVENT_CONFIGURE_STORAGE_ACCESS = "CONFIGURE.STORAGE.ACCESS"; public static final String EVENT_CHANGE_STORAGE_POOL_SCOPE = "CHANGE.STORAGE.POOL.SCOPE"; // VPN diff --git a/api/src/main/java/com/cloud/host/Host.java b/api/src/main/java/com/cloud/host/Host.java index afac6df56312..8b9aa4ed7917 100644 --- a/api/src/main/java/com/cloud/host/Host.java +++ b/api/src/main/java/com/cloud/host/Host.java @@ -213,4 +213,6 @@ public static String[] toStrings(Host.Type... types) { ResourceState getResourceState(); CPU.CPUArch getArch(); + + String getStorageAccessGroups(); } diff --git a/api/src/main/java/com/cloud/org/Cluster.java b/api/src/main/java/com/cloud/org/Cluster.java index 5124168084c6..b0aa6bb04cf2 100644 --- a/api/src/main/java/com/cloud/org/Cluster.java +++ b/api/src/main/java/com/cloud/org/Cluster.java @@ -41,4 +41,6 @@ public static enum ClusterType { ManagedState getManagedState(); CPU.CPUArch getArch(); + + String getStorageAccessGroups(); } diff --git a/api/src/main/java/com/cloud/resource/ResourceService.java b/api/src/main/java/com/cloud/resource/ResourceService.java index 562c3c418df1..3cdf8fc64e99 100644 --- a/api/src/main/java/com/cloud/resource/ResourceService.java +++ b/api/src/main/java/com/cloud/resource/ResourceService.java @@ -95,4 +95,11 @@ public interface ResourceService { boolean releaseHostReservation(Long hostId); + void updatePodStorageAccessGroups(long podId, List newStorageAccessGroups); + + void updateZoneStorageAccessGroups(long zoneId, List newStorageAccessGroups); + + void updateClusterStorageAccessGroups(Long clusterId, List newStorageAccessGroups); + + void updateHostStorageAccessGroups(Long hostId, List newStorageAccessGroups); } diff --git a/api/src/main/java/com/cloud/storage/StorageService.java b/api/src/main/java/com/cloud/storage/StorageService.java index b8df75cd3e4c..6f7b62911b62 100644 --- a/api/src/main/java/com/cloud/storage/StorageService.java +++ b/api/src/main/java/com/cloud/storage/StorageService.java @@ -22,6 +22,7 @@ import org.apache.cloudstack.api.command.admin.storage.CancelPrimaryStorageMaintenanceCmd; import org.apache.cloudstack.api.command.admin.storage.ChangeStoragePoolScopeCmd; +import org.apache.cloudstack.api.command.admin.storage.ConfigureStorageAccessCmd; import org.apache.cloudstack.api.command.admin.storage.CreateSecondaryStagingStoreCmd; import org.apache.cloudstack.api.command.admin.storage.CreateStoragePoolCmd; import org.apache.cloudstack.api.command.admin.storage.DeleteImageStoreCmd; @@ -99,6 +100,8 @@ public interface StorageService { StoragePool disablePrimaryStoragePool(Long id); + boolean configureStorageAccess(ConfigureStorageAccessCmd cmd); + StoragePool getStoragePool(long id); boolean deleteImageStore(DeleteImageStoreCmd cmd); diff --git a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java index acce2bc77264..c527706ac4e9 100644 --- a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java @@ -496,6 +496,7 @@ public class ApiConstants { public static final String SYSTEM_VM_TYPE = "systemvmtype"; public static final String TAGS = "tags"; public static final String STORAGE_TAGS = "storagetags"; + public static final String STORAGE_ACCESS_GROUPS = "storageaccessgroups"; public static final String SUCCESS = "success"; public static final String SUITABLE_FOR_VM = "suitableforvirtualmachine"; public static final String SUPPORTS_STORAGE_SNAPSHOT = "supportsstoragesnapshot"; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/AddClusterCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/AddClusterCmd.java index 69cb43ce40ec..a7bf19cb409d 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/AddClusterCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/AddClusterCmd.java @@ -118,6 +118,12 @@ public class AddClusterCmd extends BaseCmd { private String ovm3cluster; @Parameter(name = ApiConstants.OVM3_VIP, type = CommandType.STRING, required = false, description = "Ovm3 vip to use for pool (and cluster)") private String ovm3vip; + @Parameter(name = ApiConstants.STORAGE_ACCESS_GROUPS, + type = CommandType.LIST, collectionType = CommandType.STRING, + description = "comma separated list of storage access groups for the hosts in the cluster", + since = "4.18.1") + private List storageAccessGroups; + public String getOvm3Pool() { return ovm3pool; } @@ -192,6 +198,10 @@ public void setClusterType(String type) { this.clusterType = type; } + public List getStorageAccessGroups() { + return storageAccessGroups; + } + @Override public long getEntityOwnerId() { return Account.ACCOUNT_ID_SYSTEM; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/UpdateClusterCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/UpdateClusterCmd.java index c4ee87380ed9..816285e34307 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/UpdateClusterCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/UpdateClusterCmd.java @@ -130,7 +130,7 @@ public void execute() { } Cluster result = _resourceService.updateCluster(this); if (result != null) { - ClusterResponse clusterResponse = _responseGenerator.createClusterResponse(cluster, false); + ClusterResponse clusterResponse = _responseGenerator.createClusterResponse(result, false); clusterResponse.setResponseName(getCommandName()); this.setResponseObject(clusterResponse); } else { diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/AddHostCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/AddHostCmd.java index ca27837aa881..fda847f50141 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/AddHostCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/AddHostCmd.java @@ -75,6 +75,12 @@ public class AddHostCmd extends BaseCmd { @Parameter(name = ApiConstants.HOST_TAGS, type = CommandType.LIST, collectionType = CommandType.STRING, description = "list of tags to be added to the host") private List hostTags; + @Parameter(name = ApiConstants.STORAGE_ACCESS_GROUPS, + type = CommandType.LIST, collectionType = CommandType.STRING, + description = "comma separated list of storage access groups for the host", + since = "4.18.1") + private List storageAccessGroups; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -115,6 +121,10 @@ public List getHostTags() { return hostTags; } + public List getStorageAccessGroups() { + return storageAccessGroups; + } + public String getAllocationState() { return allocationState; } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/pod/CreatePodCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/pod/CreatePodCmd.java index c1d9a6db4296..e178a771217f 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/pod/CreatePodCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/pod/CreatePodCmd.java @@ -30,6 +30,8 @@ import com.cloud.dc.Pod; import com.cloud.user.Account; +import java.util.List; + @APICommand(name = "createPod", description = "Creates a new Pod.", responseObject = PodResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreatePodCmd extends BaseCmd { @@ -63,6 +65,12 @@ public class CreatePodCmd extends BaseCmd { @Parameter(name = ApiConstants.ALLOCATION_STATE, type = CommandType.STRING, description = "Allocation state of this Pod for allocation of new resources") private String allocationState; + @Parameter(name = ApiConstants.STORAGE_ACCESS_GROUPS, + type = CommandType.LIST, collectionType = CommandType.STRING, + description = "comma separated list of storage access groups for the hosts in the pod", + since = "4.18.1") + private List storageAccessGroups; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -95,6 +103,10 @@ public String getAllocationState() { return allocationState; } + public List getStorageAccessGroups() { + return storageAccessGroups; + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// @@ -111,7 +123,7 @@ public ApiCommandResourceType getApiResourceType() { @Override public void execute() { - Pod result = _configService.createPod(getZoneId(), getPodName(), getStartIp(), getEndIp(), getGateway(), getNetmask(), getAllocationState()); + Pod result = _configService.createPod(getZoneId(), getPodName(), getStartIp(), getEndIp(), getGateway(), getNetmask(), getAllocationState(), getStorageAccessGroups()); if (result != null) { PodResponse response = _responseGenerator.createPodResponse(result, false); response.setResponseName(getCommandName()); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ConfigureStorageAccessCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ConfigureStorageAccessCmd.java new file mode 100644 index 000000000000..dfafd1b87030 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ConfigureStorageAccessCmd.java @@ -0,0 +1,134 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.admin.storage; + +import java.util.List; + +import com.cloud.event.EventTypes; +import org.apache.cloudstack.api.ApiCommandResourceType; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.response.ClusterResponse; +import org.apache.cloudstack.api.response.HostResponse; +import org.apache.cloudstack.api.response.PodResponse; +import org.apache.cloudstack.api.response.StoragePoolResponse; +import org.apache.cloudstack.api.response.SuccessResponse; +import org.apache.cloudstack.api.response.ZoneResponse; + +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; + +import com.cloud.user.Account; + +@APICommand(name = "configureStorageAccess", description = "Configure the storage access groups on zone/pod/cluster/host and storage, accordingly connections to the storage pools", responseObject = SuccessResponse.class, since = "4.18.1", + requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) +public class ConfigureStorageAccessCmd extends BaseAsyncCmd { + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.ZONE_ID, type = CommandType.UUID, entityType = ZoneResponse.class, description = "UUID of the zone") + private Long zoneId; + + @Parameter(name = ApiConstants.POD_ID, type = CommandType.UUID, entityType = PodResponse.class, description = "UUID of the pod") + private Long podId; + + @Parameter(name = ApiConstants.CLUSTER_ID, type = CommandType.UUID, entityType = ClusterResponse.class, description = "UUID of the cluster") + private Long clusterId; + + @Parameter(name = ApiConstants.HOST_ID, type = CommandType.UUID, entityType = HostResponse.class, description = "UUID of the host") + private Long hostId; + + @Parameter(name = ApiConstants.STORAGE_ID, type = CommandType.UUID, entityType = StoragePoolResponse.class, description = "UUID of the Storage Pool") + private Long storageId; + + @Parameter(name = ApiConstants.STORAGE_ACCESS_GROUPS, type = CommandType.LIST, collectionType = CommandType.STRING, + description = "comma separated list of storage access groups for connecting the storage pools and the hosts") + private List storageAccessGroups; + + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getZoneId() { + return zoneId; + } + + public Long getPodId() { + return podId; + } + + public Long getClusterId() { + return clusterId; + } + + public Long getHostId() { + return hostId; + } + + public Long getStorageId() { + return storageId; + } + + public List getStorageAccessGroups() { + return storageAccessGroups; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public long getEntityOwnerId() { + return Account.ACCOUNT_ID_SYSTEM; + } + + @Override + public ApiCommandResourceType getApiResourceType() { + return ApiCommandResourceType.StoragePool; + } + + @Override + public void execute() { + try { + boolean result = _storageService.configureStorageAccess(this); + if (result) { + SuccessResponse response = new SuccessResponse(getCommandName()); + setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to configure storage access"); + } + } catch (Exception e) { + logger.debug("Failed to configure storage access ", e); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to configure storage access, " + e.getMessage()); + } + } + + @Override + public String getEventType() { + return EventTypes.EVENT_CONFIGURE_STORAGE_ACCESS; + } + + @Override + public String getEventDescription() { + return "configuring storage access groups"; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java index 75813a7aabf5..61c9e526718c 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java @@ -61,6 +61,10 @@ public class CreateStoragePoolCmd extends BaseCmd { @Parameter(name = ApiConstants.TAGS, type = CommandType.STRING, description = "the tags for the storage pool") private String tags; + @Parameter(name = ApiConstants.STORAGE_ACCESS_GROUPS, type = CommandType.STRING, + description = "comma separated list of storage access groups for connecting to hosts having those specific groups", since = "4.18.1") + private String storageAccessGroups; + @Parameter(name = ApiConstants.URL, type = CommandType.STRING, required = true, description = "the URL of the storage pool") private String url; @@ -115,6 +119,10 @@ public String getTags() { return tags; } + public String getStorageAccessGroups() { + return storageAccessGroups; + } + public String getUrl() { return url; } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListStoragePoolsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListStoragePoolsCmd.java index 57a87939b6bd..2aace9abdc94 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListStoragePoolsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListStoragePoolsCmd.java @@ -41,7 +41,7 @@ public class ListStoragePoolsCmd extends BaseListCmd { @Parameter(name = ApiConstants.CLUSTER_ID, type = CommandType.UUID, entityType = ClusterResponse.class, - description = "list storage pools belongig to the specific cluster") + description = "list storage pools belonging to the specific cluster") private Long clusterId; @Parameter(name = ApiConstants.IP_ADDRESS, type = CommandType.STRING, description = "the IP address for the storage pool") diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/zone/CreateZoneCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/zone/CreateZoneCmd.java index 24660e41ed9b..f0cf6372dc33 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/zone/CreateZoneCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/zone/CreateZoneCmd.java @@ -31,6 +31,8 @@ import com.cloud.dc.DataCenter; import com.cloud.user.Account; +import java.util.List; + @APICommand(name = "createZone", description = "Creates a Zone.", responseObject = ZoneResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateZoneCmd extends BaseCmd { @@ -88,6 +90,11 @@ public class CreateZoneCmd extends BaseCmd { @Parameter(name = ApiConstants.IS_EDGE, type = CommandType.BOOLEAN, description = "true if the zone is an edge zone, false otherwise", since = "4.18.0") private Boolean isEdge; + @Parameter(name = ApiConstants.STORAGE_ACCESS_GROUPS, + type = CommandType.LIST, collectionType = CommandType.STRING, + description = "comma separated list of storage access groups for the hosts in the zone", + since = "4.18.1") + private List storageAccessGroups; ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// @@ -162,6 +169,10 @@ public boolean isEdge() { return isEdge; } + public List getStorageAccessGroups() { + return storageAccessGroups; + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// @Override diff --git a/api/src/main/java/org/apache/cloudstack/api/response/ClusterResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/ClusterResponse.java index 1c69849239f9..4a68e76a3bc5 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/ClusterResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/ClusterResponse.java @@ -95,6 +95,18 @@ public class ClusterResponse extends BaseResponseWithAnnotations { @Param(description = "CPU Arch of the hosts in the cluster", since = "4.20") private String arch; + @SerializedName(ApiConstants.STORAGE_ACCESS_GROUPS) + @Param(description = "comma-separated list of storage access groups for the host", since = "4.20.1") + private String storageAccessGroups; + + @SerializedName("podstorageaccessgroups") + @Param(description = "comma-separated list of storage access groups on the pod", since = "4.20.1") + private String podStorageAccessGroups; + + @SerializedName("zonestorageaccessgroups") + @Param(description = "comma-separated list of storage access groups on the zone", since = "4.20.1") + private String zoneStorageAccessGroups; + public String getId() { return id; } @@ -259,4 +271,28 @@ public void setArch(String arch) { public String getArch() { return arch; } + + public String getStorageAccessGroups() { + return storageAccessGroups; + } + + public void setStorageAccessGroups(String storageAccessGroups) { + this.storageAccessGroups = storageAccessGroups; + } + + public String getPodStorageAccessGroups() { + return podStorageAccessGroups; + } + + public void setPodStorageAccessGroups(String podStorageAccessGroups) { + this.podStorageAccessGroups = podStorageAccessGroups; + } + + public String getZoneStorageAccessGroups() { + return zoneStorageAccessGroups; + } + + public void setZoneStorageAccessGroups(String zoneStorageAccessGroups) { + this.zoneStorageAccessGroups = zoneStorageAccessGroups; + } } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/HostResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/HostResponse.java index 091d6391b313..8d4047ca6ba8 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/HostResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/HostResponse.java @@ -302,6 +302,22 @@ public class HostResponse extends BaseResponseWithAnnotations { @Param(description = "CPU Arch of the host", since = "4.20") private String arch; + @SerializedName(ApiConstants.STORAGE_ACCESS_GROUPS) + @Param(description = "comma-separated list of storage access groups for the host", since = "4.20.1") + private String storageAccessGroups; + + @SerializedName("clusterstorageaccessgroups") + @Param(description = "comma-separated list of storage access groups on the cluster", since = "4.20.1") + private String clusterStorageAccessGroups; + + @SerializedName("podstorageaccessgroups") + @Param(description = "comma-separated list of storage access groups on the pod", since = "4.20.1") + private String podStorageAccessGroups; + + @SerializedName("zonestorageaccessgroups") + @Param(description = "comma-separated list of storage access groups on the zone", since = "4.20.1") + private String zoneStorageAccessGroups; + @Override public String getObjectId() { return this.getId(); @@ -491,6 +507,38 @@ public void setHostTags(String hostTags) { this.hostTags = hostTags; } + public String getStorageAccessGroups() { + return storageAccessGroups; + } + + public void setStorageAccessGroups(String storageAccessGroups) { + this.storageAccessGroups = storageAccessGroups; + } + + public String getClusterStorageAccessGroups() { + return clusterStorageAccessGroups; + } + + public void setClusterStorageAccessGroups(String clusterStorageAccessGroups) { + this.clusterStorageAccessGroups = clusterStorageAccessGroups; + } + + public String getPodStorageAccessGroups() { + return podStorageAccessGroups; + } + + public void setPodStorageAccessGroups(String podStorageAccessGroups) { + this.podStorageAccessGroups = podStorageAccessGroups; + } + + public String getZoneStorageAccessGroups() { + return zoneStorageAccessGroups; + } + + public void setZoneStorageAccessGroups(String zoneStorageAccessGroups) { + this.zoneStorageAccessGroups = zoneStorageAccessGroups; + } + public String getExplicitHostTags() { return explicitHostTags; } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/PodResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/PodResponse.java index 587fabfae8db..375795e4341f 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/PodResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/PodResponse.java @@ -85,6 +85,14 @@ public class PodResponse extends BaseResponseWithAnnotations { @Param(description = "the capacity of the Pod", responseObject = CapacityResponse.class) private List capacities; + @SerializedName(ApiConstants.STORAGE_ACCESS_GROUPS) + @Param(description = "comma-separated list of storage access groups for the pod", since = "4.18.1") + private String storageAccessGroups; + + @SerializedName("zonestorageaccessgroups") + @Param(description = "comma-separated list of storage access groups on the zone", since = "4.18.1") + private String zoneStorageAccessGroups; + public String getId() { return id; } @@ -184,4 +192,20 @@ public List getCapacities() { public void setCapacities(List capacities) { this.capacities = capacities; } + + public String getStorageAccessGroups() { + return storageAccessGroups; + } + + public void setStorageAccessGroups(String storageAccessGroups) { + this.storageAccessGroups = storageAccessGroups; + } + + public String getZoneStorageAccessGroups() { + return zoneStorageAccessGroups; + } + + public void setZoneStorageAccessGroups(String zoneStorageAccessGroups) { + this.zoneStorageAccessGroups = zoneStorageAccessGroups; + } } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/ServiceOfferingResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/ServiceOfferingResponse.java index 0622b936f6e0..4e71d39cb8d9 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/ServiceOfferingResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/ServiceOfferingResponse.java @@ -80,7 +80,7 @@ public class ServiceOfferingResponse extends BaseResponseWithAnnotations { @Param(description = "true if the vm needs to be volatile, i.e., on every reboot of vm from API root disk is discarded and creates a new root disk") private Boolean isVolatile; - @SerializedName("storagetags") + @SerializedName(ApiConstants.STORAGE_TAGS) @Param(description = "the tags for the service offering") private String tags; diff --git a/api/src/main/java/org/apache/cloudstack/api/response/StoragePoolResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/StoragePoolResponse.java index 51efb6d42cb1..567db59a449f 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/StoragePoolResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/StoragePoolResponse.java @@ -109,6 +109,10 @@ public class StoragePoolResponse extends BaseResponseWithAnnotations { @Param(description = "the tags for the storage pool") private String tags; + @SerializedName(ApiConstants.STORAGE_ACCESS_GROUPS) + @Param(description = "the storage access groups for the storage pool") + private String storageAccessGroups; + @SerializedName(ApiConstants.NFS_MOUNT_OPTIONS) @Param(description = "the nfs mount options for the storage pool", since = "4.19.1") private String nfsMountOpts; @@ -344,6 +348,14 @@ public void setTags(String tags) { this.tags = tags; } + public String getStorageAccessGroups() { + return storageAccessGroups; + } + + public void setStorageAccessGroups(String storageAccessGroups) { + this.storageAccessGroups = storageAccessGroups; + } + public Boolean getIsTagARule() { return isTagARule; } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/ZoneResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/ZoneResponse.java index 4a5279753a10..679918e37f16 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/ZoneResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/ZoneResponse.java @@ -161,6 +161,10 @@ public class ZoneResponse extends BaseResponseWithAnnotations implements SetReso @Param(description = "true, if routed network/vpc is enabled", since = "4.20.1") private boolean routedModeEnabled = false; + @SerializedName(ApiConstants.STORAGE_ACCESS_GROUPS) + @Param(description = "comma-separated list of storage access groups for the zone", since = "4.20.1") + private String storageAccessGroups; + public ZoneResponse() { tags = new LinkedHashSet(); @@ -402,6 +406,14 @@ public String getType() { return type; } + public String getStorageAccessGroups() { + return storageAccessGroups; + } + + public void setStorageAccessGroups(String storageAccessGroups) { + this.storageAccessGroups = storageAccessGroups; + } + public void setNsxEnabled(boolean nsxEnabled) { this.nsxEnabled = nsxEnabled; } diff --git a/core/src/test/java/org/apache/cloudstack/api/agent/test/CheckOnHostCommandTest.java b/core/src/test/java/org/apache/cloudstack/api/agent/test/CheckOnHostCommandTest.java index be7563be045a..a696049608e5 100644 --- a/core/src/test/java/org/apache/cloudstack/api/agent/test/CheckOnHostCommandTest.java +++ b/core/src/test/java/org/apache/cloudstack/api/agent/test/CheckOnHostCommandTest.java @@ -284,6 +284,11 @@ public ResourceState getResourceState() { public CPU.CPUArch getArch() { return CPU.CPUArch.amd64; } + + @Override + public String getStorageAccessGroups() { + return null; + } }; CheckOnHostCommand cohc = new CheckOnHostCommand(host); diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreParameters.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreParameters.java index 1b18264df15d..adb77e69e90e 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreParameters.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreParameters.java @@ -30,6 +30,7 @@ public class PrimaryDataStoreParameters { private String providerName; private Map details; private String tags; + private String storageAccessGroups; private StoragePoolType type; private HypervisorType hypervisorType; private String host; @@ -165,6 +166,21 @@ public void setTags(String tags) { this.tags = tags; } + /** + * @return the storageAccessGroups + */ + public String getStorageAccessGroups() { + return storageAccessGroups; + } + + /** + * @param storageAccessGroups + * the storageAccessGroups to set + */ + public void setStorageAccessGroups(String storageAccessGroups) { + this.storageAccessGroups = storageAccessGroups; + } + /** * @return the details */ diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/StoragePoolAllocator.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/StoragePoolAllocator.java index 6a78f6fe253f..9a2dc7346859 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/StoragePoolAllocator.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/StoragePoolAllocator.java @@ -64,4 +64,5 @@ public interface StoragePoolAllocator extends Adapter { static int RETURN_UPTO_ALL = -1; List reorderPools(List pools, VirtualMachineProfile vmProfile, DeploymentPlan plan, DiskProfile dskCh); + } diff --git a/engine/components-api/src/main/java/com/cloud/configuration/ConfigurationManager.java b/engine/components-api/src/main/java/com/cloud/configuration/ConfigurationManager.java index 1694b19c33fd..f172bead7aab 100644 --- a/engine/components-api/src/main/java/com/cloud/configuration/ConfigurationManager.java +++ b/engine/components-api/src/main/java/com/cloud/configuration/ConfigurationManager.java @@ -147,12 +147,12 @@ public interface ConfigurationManager { * @param startIp * @param endIp * @param allocationState - * @param skipGatewayOverlapCheck - * (true if it is ok to not validate that gateway IP address overlap with Start/End IP of the POD) + * @param skipGatewayOverlapCheck (true if it is ok to not validate that gateway IP address overlap with Start/End IP of the POD) + * @param storageAccessGroups * @return Pod */ HostPodVO createPod(long userId, String podName, DataCenter zone, String gateway, String cidr, String startIp, String endIp, String allocationState, - boolean skipGatewayOverlapCheck); + boolean skipGatewayOverlapCheck, List storageAccessGroups); /** * Creates a new zone @@ -170,13 +170,14 @@ HostPodVO createPod(long userId, String podName, DataCenter zone, String gateway * @param isSecurityGroupEnabled * @param ip6Dns1 * @param ip6Dns2 + * @param storageAccessGroups * @return * @throws * @throws */ DataCenterVO createZone(long userId, String zoneName, String dns1, String dns2, String internalDns1, String internalDns2, String guestCidr, String domain, Long domainId, NetworkType zoneType, String allocationState, String networkDomain, boolean isSecurityGroupEnabled, boolean isLocalStorageEnabled, String ip6Dns1, - String ip6Dns2, boolean isEdge); + String ip6Dns2, boolean isEdge, List storageAccessGroups); /** * Deletes a VLAN from the database, along with all of its IP addresses. Will not delete VLANs that have allocated diff --git a/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java b/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java index 34309e942d3e..83f9768a62ac 100755 --- a/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java +++ b/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java @@ -21,6 +21,8 @@ import java.util.List; import java.util.Map; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; @@ -236,4 +238,12 @@ public interface ResourceManager extends ResourceService, Configurable { HostVO findOneRandomRunningHostByHypervisor(HypervisorType type, Long dcId); boolean cancelMaintenance(final long hostId); + + void updateStoragePoolConnectionsOnHosts(Long poolId, List storageAccessGroups); + + List getEligibleUpHostsInClusterForStorageConnection(PrimaryDataStoreInfo primaryStore); + + List getEligibleUpAndEnabledHostsInClusterForStorageConnection(PrimaryDataStoreInfo primaryStore); + + List getEligibleUpAndEnabledHostsInZoneForStorageConnection(DataStore dataStore, long zoneId, HypervisorType hypervisorType); } diff --git a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java index 58db613c2539..3fc6d80befee 100644 --- a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java +++ b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java @@ -410,4 +410,9 @@ void connectHostsToPool(DataStore primaryStore, List hostIds, Scope scope, void validateChildDatastoresToBeAddedInUpState(StoragePoolVO datastoreClusterPool, List childDatastoreAnswerList); + boolean checkIfHostAndStoragePoolHasCommonStorageAccessGroups(Host host, StoragePool pool); + + Pair checkIfReadyVolumeFitsInStoragePoolWithStorageAccessGroups(StoragePool destPool, Volume volume); + + String[] getStorageAccessGroups(Long zoneId, Long podId, Long clusterId, Long hostId); } diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineClusterVO.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineClusterVO.java index 19b0e773cd01..39ab83fab600 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineClusterVO.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineClusterVO.java @@ -114,6 +114,9 @@ public class EngineClusterVO implements EngineCluster, Identity { @Column(name = "engine_state", updatable = true, nullable = false, length = 32) protected State state = null; + @Column(name = "storage_access_groups") + String storageAccessGroups; + public EngineClusterVO() { clusterType = Cluster.ClusterType.CloudManaged; allocationState = Grouping.AllocationState.Enabled; @@ -176,6 +179,11 @@ public ManagedState getManagedState() { return managedState; } + @Override + public String getStorageAccessGroups() { + return storageAccessGroups; + } + public void setManagedState(ManagedState managedState) { this.managedState = managedState; } diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostPodVO.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostPodVO.java index 95931d5b72d5..cd3f6b857a29 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostPodVO.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostPodVO.java @@ -89,6 +89,9 @@ public class EngineHostPodVO implements EnginePod, Identity { @Temporal(value = TemporalType.TIMESTAMP) protected Date lastUpdated; + @Column(name = "storage_access_groups") + String storageAccessGroups; + /** * Note that state is intentionally missing the setter. Any updates to * the state machine needs to go through the DAO object because someone @@ -202,6 +205,11 @@ public boolean getExternalDhcp() { return externalDhcp; } + @Override + public String getStorageAccessGroups() { + return storageAccessGroups; + } + public void setExternalDhcp(boolean use) { externalDhcp = use; } diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostVO.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostVO.java index 8ef2de3f74da..eec2b011b3e8 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostVO.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostVO.java @@ -405,6 +405,9 @@ public void setHostTags(List hostTags) { @Column(name = "engine_state", updatable = true, nullable = false, length = 32) protected State orchestrationState = null; + @Column(name = "storage_access_groups") + private String storageAccessGroups = null; + public EngineHostVO(String guid) { this.guid = guid; this.status = Status.Creating; @@ -807,4 +810,13 @@ public State getOrchestrationState() { public PartitionType partitionType() { return PartitionType.Host; } + + @Override + public String getStorageAccessGroups() { + return storageAccessGroups; + } + + public void setStorageAccessGroups(String storageAccessGroups) { + this.storageAccessGroups = storageAccessGroups; + } } diff --git a/engine/schema/src/main/java/com/cloud/dc/ClusterVO.java b/engine/schema/src/main/java/com/cloud/dc/ClusterVO.java index 434901ef5b3b..a18097db6d6c 100644 --- a/engine/schema/src/main/java/com/cloud/dc/ClusterVO.java +++ b/engine/schema/src/main/java/com/cloud/dc/ClusterVO.java @@ -85,6 +85,10 @@ public class ClusterVO implements Cluster { @Column(name = "uuid") String uuid; + @Column(name = "storage_access_groups") + String storageAccessGroups; + + public ClusterVO() { clusterType = Cluster.ClusterType.CloudManaged; allocationState = Grouping.AllocationState.Enabled; @@ -215,6 +219,14 @@ public void setArch(String arch) { this.arch = arch; } + public String getStorageAccessGroups() { + return storageAccessGroups; + } + + public void setStorageAccessGroups(String storageAccessGroups) { + this.storageAccessGroups = storageAccessGroups; + } + @Override public String toString() { return String.format("Cluster {id: \"%s\", name: \"%s\", uuid: \"%s\"}", id, name, uuid); diff --git a/engine/schema/src/main/java/com/cloud/dc/DataCenterVO.java b/engine/schema/src/main/java/com/cloud/dc/DataCenterVO.java index 827b72b58b05..9b24e51a1a8b 100644 --- a/engine/schema/src/main/java/com/cloud/dc/DataCenterVO.java +++ b/engine/schema/src/main/java/com/cloud/dc/DataCenterVO.java @@ -142,6 +142,9 @@ public class DataCenterVO implements DataCenter { @Enumerated(value = EnumType.STRING) private DataCenter.Type type; + @Column(name = "storage_access_groups") + String storageAccessGroups; + @Override public String getDnsProvider() { return dnsProvider; @@ -485,6 +488,14 @@ public void setType(Type type) { this.type = type; } + public String getStorageAccessGroups() { + return storageAccessGroups; + } + + public void setStorageAccessGroups(String storageAccessGroups) { + this.storageAccessGroups = storageAccessGroups; + } + @Override public String toString() { return String.format("Zone {\"id\": \"%s\", \"name\": \"%s\", \"uuid\": \"%s\"}", id, name, uuid); diff --git a/engine/schema/src/main/java/com/cloud/dc/HostPodVO.java b/engine/schema/src/main/java/com/cloud/dc/HostPodVO.java index fdda38fbc393..99ebcf2346c5 100644 --- a/engine/schema/src/main/java/com/cloud/dc/HostPodVO.java +++ b/engine/schema/src/main/java/com/cloud/dc/HostPodVO.java @@ -71,6 +71,9 @@ public class HostPodVO implements Pod { @Column(name = "uuid") private String uuid; + @Column(name = "storage_access_groups") + String storageAccessGroups; + public HostPodVO(String name, long dcId, String gateway, String cidrAddress, int cidrSize, String description) { this.name = name; this.dataCenterId = dcId; @@ -199,6 +202,14 @@ public void setUuid(String uuid) { this.uuid = uuid; } + public String getStorageAccessGroups() { + return storageAccessGroups; + } + + public void setStorageAccessGroups(String storageAccessGroups) { + this.storageAccessGroups = storageAccessGroups; + } + @Override public String toString() { return String.format("HostPod %s", diff --git a/engine/schema/src/main/java/com/cloud/host/HostVO.java b/engine/schema/src/main/java/com/cloud/host/HostVO.java index bd6768fa0ddb..d51b4eca0577 100644 --- a/engine/schema/src/main/java/com/cloud/host/HostVO.java +++ b/engine/schema/src/main/java/com/cloud/host/HostVO.java @@ -165,6 +165,9 @@ public class HostVO implements Host { @Column(name = "uuid") private String uuid; + @Column(name = "storage_access_groups") + String storageAccessGroups; + // This is a delayed load value. If the value is null, // then this field has not been loaded yet. // Call host dao to load it. @@ -357,6 +360,15 @@ public Boolean getIsTagARule() { return isTagARule; } + @Override + public String getStorageAccessGroups() { + return storageAccessGroups; + } + + public void setStorageAccessGroups(String storageAccessGroups) { + this.storageAccessGroups = storageAccessGroups; + } + public HashMap> getGpuGroupDetails() { return groupDetails; } diff --git a/engine/schema/src/main/java/com/cloud/host/dao/HostDao.java b/engine/schema/src/main/java/com/cloud/host/dao/HostDao.java index 4e07e6f5c370..db70125fe674 100644 --- a/engine/schema/src/main/java/com/cloud/host/dao/HostDao.java +++ b/engine/schema/src/main/java/com/cloud/host/dao/HostDao.java @@ -84,6 +84,10 @@ public interface HostDao extends GenericDao, StateDao findHypervisorHostInCluster(long clusterId); + List findHypervisorHostInPod(long podId); + + List findHypervisorHostInZone(long zoneId); + HostVO findAnyStateHypervisorHostInCluster(long clusterId); HostVO findOldestExistentHypervisorHostInCluster(long clusterId); @@ -96,10 +100,14 @@ public interface HostDao extends GenericDao, StateDao findByPodId(Long podId); + List findByPodId(Long podId, Type type); + List listIdsByPodId(Long podId); List findByClusterId(Long clusterId); + List findByClusterId(Long clusterId, Type type); + List listIdsByClusterId(Long clusterId); List listIdsForUpRouting(Long zoneId, Long podId, Long clusterId); diff --git a/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java b/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java index 7cda0a367aa2..9c4f421ad627 100644 --- a/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java @@ -107,7 +107,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao protected SearchBuilder IdStatusSearch; protected SearchBuilder TypeDcSearch; protected SearchBuilder TypeDcStatusSearch; - protected SearchBuilder TypeClusterStatusSearch; + protected SearchBuilder TypeStatusStateSearch; protected SearchBuilder MsStatusSearch; protected SearchBuilder DcPrivateIpAddressSearch; protected SearchBuilder DcStorageIpAddressSearch; @@ -266,12 +266,14 @@ public void init() { TypeDcStatusSearch.and("resourceState", TypeDcStatusSearch.entity().getResourceState(), SearchCriteria.Op.EQ); TypeDcStatusSearch.done(); - TypeClusterStatusSearch = createSearchBuilder(); - TypeClusterStatusSearch.and("type", TypeClusterStatusSearch.entity().getType(), SearchCriteria.Op.EQ); - TypeClusterStatusSearch.and("cluster", TypeClusterStatusSearch.entity().getClusterId(), SearchCriteria.Op.EQ); - TypeClusterStatusSearch.and("status", TypeClusterStatusSearch.entity().getStatus(), SearchCriteria.Op.EQ); - TypeClusterStatusSearch.and("resourceState", TypeClusterStatusSearch.entity().getResourceState(), SearchCriteria.Op.EQ); - TypeClusterStatusSearch.done(); + TypeStatusStateSearch = createSearchBuilder(); + TypeStatusStateSearch.and("type", TypeStatusStateSearch.entity().getType(), SearchCriteria.Op.EQ); + TypeStatusStateSearch.and("cluster", TypeStatusStateSearch.entity().getClusterId(), SearchCriteria.Op.EQ); + TypeStatusStateSearch.and("pod", TypeStatusStateSearch.entity().getPodId(), SearchCriteria.Op.EQ); + TypeStatusStateSearch.and("zone", TypeStatusStateSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); + TypeStatusStateSearch.and("status", TypeStatusStateSearch.entity().getStatus(), SearchCriteria.Op.EQ); + TypeStatusStateSearch.and("resourceState", TypeStatusStateSearch.entity().getResourceState(), SearchCriteria.Op.EQ); + TypeStatusStateSearch.done(); IdsSearch = createSearchBuilder(); IdsSearch.and("id", IdsSearch.entity().getId(), SearchCriteria.Op.IN); @@ -328,10 +330,12 @@ public void init() { PodSearch = createSearchBuilder(); PodSearch.and("podId", PodSearch.entity().getPodId(), SearchCriteria.Op.EQ); + PodSearch.and("type", PodSearch.entity().getType(), Op.EQ); PodSearch.done(); ClusterSearch = createSearchBuilder(); ClusterSearch.and("clusterId", ClusterSearch.entity().getClusterId(), SearchCriteria.Op.EQ); + ClusterSearch.and("type", ClusterSearch.entity().getType(), Op.EQ); ClusterSearch.done(); TypeSearch = createSearchBuilder(); @@ -1238,8 +1242,16 @@ public List listIdsByDataCenterId(Long zoneId) { @Override public List findByPodId(Long podId) { + return findByPodId(podId, null); + } + + @Override + public List findByPodId(Long podId, Type type) { SearchCriteria sc = PodSearch.create(); sc.setParameters("podId", podId); + if (type != null) { + sc.setParameters("type", Type.Routing); + } return listBy(sc); } @@ -1250,8 +1262,16 @@ public List listIdsByPodId(Long podId) { @Override public List findByClusterId(Long clusterId) { + return findByClusterId(clusterId, null); + } + + @Override + public List findByClusterId(Long clusterId, Type type) { SearchCriteria sc = ClusterSearch.create(); sc.setParameters("clusterId", clusterId); + if (type != null) { + sc.setParameters("type", Type.Routing); + } return listBy(sc); } @@ -1355,7 +1375,7 @@ public HostVO findByIp(final String ipAddress) { @Override public List findHypervisorHostInCluster(long clusterId) { - SearchCriteria sc = TypeClusterStatusSearch.create(); + SearchCriteria sc = TypeStatusStateSearch.create(); sc.setParameters("type", Host.Type.Routing); sc.setParameters("cluster", clusterId); sc.setParameters("status", Status.Up); @@ -1364,9 +1384,31 @@ public List findHypervisorHostInCluster(long clusterId) { return listBy(sc); } + @Override + public List findHypervisorHostInZone(long zoneId) { + SearchCriteria sc = TypeStatusStateSearch.create(); + sc.setParameters("type", Host.Type.Routing); + sc.setParameters("zone", zoneId); + sc.setParameters("status", Status.Up); + sc.setParameters("resourceState", ResourceState.Enabled); + + return listBy(sc); + } + + @Override + public List findHypervisorHostInPod(long podId) { + SearchCriteria sc = TypeStatusStateSearch.create(); + sc.setParameters("type", Host.Type.Routing); + sc.setParameters("pod", podId); + sc.setParameters("status", Status.Up); + sc.setParameters("resourceState", ResourceState.Enabled); + + return listBy(sc); + } + @Override public HostVO findAnyStateHypervisorHostInCluster(long clusterId) { - SearchCriteria sc = TypeClusterStatusSearch.create(); + SearchCriteria sc = TypeStatusStateSearch.create(); sc.setParameters("type", Host.Type.Routing); sc.setParameters("cluster", clusterId); List list = listBy(sc, new Filter(1)); @@ -1375,7 +1417,7 @@ public HostVO findAnyStateHypervisorHostInCluster(long clusterId) { @Override public HostVO findOldestExistentHypervisorHostInCluster(long clusterId) { - SearchCriteria sc = TypeClusterStatusSearch.create(); + SearchCriteria sc = TypeStatusStateSearch.create(); sc.setParameters("type", Host.Type.Routing); sc.setParameters("cluster", clusterId); sc.setParameters("status", Status.Up); diff --git a/engine/schema/src/main/java/com/cloud/storage/StoragePoolAndAccessGroupMapVO.java b/engine/schema/src/main/java/com/cloud/storage/StoragePoolAndAccessGroupMapVO.java new file mode 100644 index 000000000000..5690324340c4 --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/storage/StoragePoolAndAccessGroupMapVO.java @@ -0,0 +1,64 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.storage; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; + +import org.apache.cloudstack.api.InternalIdentity; + +@Entity +@Table(name = "storage_pool_and_access_group_map") +public class StoragePoolAndAccessGroupMapVO implements InternalIdentity { + + protected StoragePoolAndAccessGroupMapVO() { + } + + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + @Column(name = "id") + private long id; + + @Column(name = "pool_id") + private long poolId; + + @Column(name = "storage_access_group") + private String storageAccessGroup; + + public StoragePoolAndAccessGroupMapVO(long poolId, String storageAccessGroup) { + this.poolId = poolId; + this.storageAccessGroup = storageAccessGroup; + } + + @Override + public long getId() { + return this.id; + } + + public long getPoolId() { + return poolId; + } + + public String getStorageAccessGroup() { + return storageAccessGroup; + } + +} diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolAndAccessGroupMapDao.java b/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolAndAccessGroupMapDao.java new file mode 100644 index 000000000000..2edadaacbd5a --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolAndAccessGroupMapDao.java @@ -0,0 +1,30 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.storage.dao; + +import java.util.List; + +import com.cloud.storage.StoragePoolAndAccessGroupMapVO; + +import com.cloud.utils.db.GenericDao; + +public interface StoragePoolAndAccessGroupMapDao extends GenericDao { + + void persist(long poolId, List storageAccessGroups); + List getStorageAccessGroups(long poolId); + void deleteStorageAccessGroups(long poolId); +} diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolAndAccessGroupMapDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolAndAccessGroupMapDaoImpl.java new file mode 100644 index 000000000000..5e2bea508795 --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolAndAccessGroupMapDaoImpl.java @@ -0,0 +1,80 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.storage.dao; + +import java.util.ArrayList; +import java.util.List; + +import com.cloud.storage.StoragePoolAndAccessGroupMapVO; + +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.TransactionLegacy; + +public class StoragePoolAndAccessGroupMapDaoImpl extends GenericDaoBase implements StoragePoolAndAccessGroupMapDao { + + protected final SearchBuilder StoragePoolAccessGroupSearch; + public StoragePoolAndAccessGroupMapDaoImpl() { + StoragePoolAccessGroupSearch = createSearchBuilder(); + StoragePoolAccessGroupSearch.and("poolId", StoragePoolAccessGroupSearch.entity().getPoolId(), SearchCriteria.Op.EQ); + StoragePoolAccessGroupSearch.done(); + } + + @Override + public void persist(long poolId, List storageAccessGroups) { + TransactionLegacy txn = TransactionLegacy.currentTxn(); + + txn.start(); + SearchCriteria sc = StoragePoolAccessGroupSearch.create(); + sc.setParameters("poolId", poolId); + expunge(sc); + + for (String sag : storageAccessGroups) { + sag = sag.trim(); + if (sag.length() > 0) { + StoragePoolAndAccessGroupMapVO vo = new StoragePoolAndAccessGroupMapVO(poolId, sag); + persist(vo); + } + } + txn.commit(); + } + + @Override + public List getStorageAccessGroups(long poolId) { + SearchCriteria sc = StoragePoolAccessGroupSearch.create(); + sc.setParameters("poolId", poolId); + + List results = search(sc, null); + List storagePoolAccessGroups = new ArrayList(results.size()); + for (StoragePoolAndAccessGroupMapVO result : results) { + storagePoolAccessGroups.add(result.getStorageAccessGroup()); + } + + return storagePoolAccessGroups; + } + + @Override + public void deleteStorageAccessGroups(long poolId) { + TransactionLegacy txn = TransactionLegacy.currentTxn(); + txn.start(); + SearchCriteria sc = StoragePoolAccessGroupSearch.create(); + sc.setParameters("poolId", poolId); + expunge(sc); + txn.commit(); + } +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java index d205379cdb24..27d4b0ae7692 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java @@ -58,9 +58,9 @@ public interface PrimaryDataStoreDao extends GenericDao { */ void updateCapacityIops(long id, long capacityIops); - StoragePoolVO persist(StoragePoolVO pool, Map details, List tags, Boolean isTagARule); + StoragePoolVO persist(StoragePoolVO pool, Map details, List tags, Boolean isTagARule, List storageAccessGroups); - StoragePoolVO persist(StoragePoolVO pool, Map details, List tags, Boolean isTagARule, boolean displayDetails); + StoragePoolVO persist(StoragePoolVO pool, Map details, List tags, Boolean isTagARule, boolean displayDetails, List storageAccessGroups); /** * Find pool by name. @@ -84,7 +84,9 @@ public interface PrimaryDataStoreDao extends GenericDao { */ List findPoolsByDetails(long dcId, long podId, Long clusterId, Map details, ScopeType scope); - List findPoolsByTags(long dcId, long podId, Long clusterId, String[] tags, boolean validateTagRule, long ruleExecuteTimeout); + List findPoolsByTags(long dcId, long podId, Long clusterId, ScopeType scope, String[] tags, boolean validateTagRule, long ruleExecuteTimeout); + + List findPoolsByAccessGroupsForHostConnection(Long dcId, Long podId, Long clusterId, ScopeType scope, String[] storageAccessGroups); List findDisabledPoolsByScope(long dcId, Long podId, Long clusterId, ScopeType scope); @@ -127,6 +129,10 @@ public interface PrimaryDataStoreDao extends GenericDao { List findZoneWideStoragePoolsByTags(long dcId, String[] tags, boolean validateTagRule); + List findZoneWideStoragePoolsByAccessGroupsForHostConnection(long dcId, String[] storageAccessGroups); + + List findZoneWideStoragePoolsByAccessGroupsAndHypervisorTypeForHostConnection(long dcId, String[] storageAccessGroups, HypervisorType type); + List findZoneWideStoragePoolsByHypervisor(long dataCenterId, HypervisorType hypervisorType); List findZoneWideStoragePoolsByHypervisor(long dataCenterId, HypervisorType hypervisorType, String keyword); @@ -143,6 +149,8 @@ public interface PrimaryDataStoreDao extends GenericDao { void deletePoolTags(long poolId); + void deleteStoragePoolAccessGroups(long poolId); + List listChildStoragePoolsInDatastoreCluster(long poolId); Integer countAll(); @@ -154,8 +162,10 @@ public interface PrimaryDataStoreDao extends GenericDao { List listStoragePoolsWithActiveVolumesByOfferingId(long offeringid); Pair, Integer> searchForIdsAndCount(Long storagePoolId, String storagePoolName, Long zoneId, - String path, Long podId, Long clusterId, String address, ScopeType scopeType, StoragePoolStatus status, + String path, Long podId, Long clusterId, Long hostId, String address, ScopeType scopeType, StoragePoolStatus status, String keyword, Filter searchFilter); List listByIds(List ids); + + List findStoragePoolsByEmptyStorageAccessGroups(Long dcId, Long podId, Long clusterId, ScopeType scope, HypervisorType hypervisorType); } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java index ef29ddcde867..957b7e37e86a 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java @@ -28,6 +28,8 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.storage.StoragePoolAndAccessGroupMapVO; +import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao; import org.apache.commons.collections.CollectionUtils; import com.cloud.host.Status; @@ -70,15 +72,25 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase private StoragePoolHostDao _hostDao; @Inject private StoragePoolTagsDao _tagsDao; + @Inject + StoragePoolAndAccessGroupMapDao _storagePoolAccessGroupMapDao; protected final String DetailsSqlPrefix = "SELECT storage_pool.* from storage_pool LEFT JOIN storage_pool_details ON storage_pool.id = storage_pool_details.pool_id WHERE storage_pool.removed is null and storage_pool.status = 'Up' and storage_pool.data_center_id = ? and (storage_pool.pod_id = ? or storage_pool.pod_id is null) and storage_pool.scope = ? and ("; protected final String DetailsSqlSuffix = ") GROUP BY storage_pool_details.pool_id HAVING COUNT(storage_pool_details.name) >= ?"; + protected final String DetailsForHostConnectionSqlSuffix = ") GROUP BY storage_pool_details.pool_id"; private final String ZoneWideTagsSqlPrefix = "SELECT storage_pool.* from storage_pool LEFT JOIN storage_pool_tags ON storage_pool.id = storage_pool_tags.pool_id WHERE storage_pool.removed is null and storage_pool.status = 'Up' AND storage_pool_tags.is_tag_a_rule = 0 and storage_pool.data_center_id = ? and storage_pool.scope = ? and ("; private final String ZoneWideTagsSqlSuffix = ") GROUP BY storage_pool_tags.pool_id HAVING COUNT(storage_pool_tags.tag) >= ?"; + private final String ZoneWideStorageAccessGroupsForHostConnectionSqlPrefix = "SELECT storage_pool.* from storage_pool LEFT JOIN storage_pool_and_access_group_map ON storage_pool.id = storage_pool_and_access_group_map.pool_id WHERE storage_pool.removed is null and storage_pool.status = 'Up' and storage_pool.data_center_id = ? and storage_pool.scope = ? and ("; + private final String ZoneWideStorageAccessGroupsForHostConnectionSqlSuffix = ") GROUP BY storage_pool_and_access_group_map.pool_id"; + private final String ZoneWideStorageAccessGroupsWithHypervisorTypeSqlPrefix = "SELECT storage_pool.* from storage_pool LEFT JOIN storage_pool_and_access_group_map ON storage_pool.id = storage_pool_and_access_group_map.pool_id WHERE storage_pool.removed is null and storage_pool.status = 'Up' and storage_pool.hypervisor = ? and storage_pool.data_center_id = ? and storage_pool.scope = ? and ("; + private final String ZoneWideStorageAccessGroupsWithHypervisorTypeSqlSuffix = ") GROUP BY storage_pool_and_access_group_map.pool_id"; // Storage tags are now separate from storage_pool_details, leaving only details on that table protected final String TagsSqlPrefix = "SELECT storage_pool.* from storage_pool LEFT JOIN storage_pool_tags ON storage_pool.id = storage_pool_tags.pool_id WHERE storage_pool.removed is null and storage_pool.status = 'Up' AND storage_pool_tags.is_tag_a_rule = 0 and storage_pool.data_center_id = ? and (storage_pool.pod_id = ? or storage_pool.pod_id is null) and storage_pool.scope = ? and ("; protected final String TagsSqlSuffix = ") GROUP BY storage_pool_tags.pool_id HAVING COUNT(storage_pool_tags.tag) >= ?"; + protected final String SAGsForHostConnectionSqlPrefix = "SELECT storage_pool.* from storage_pool LEFT JOIN storage_pool_and_access_group_map ON storage_pool.id = storage_pool_and_access_group_map.pool_id WHERE storage_pool.removed is null and storage_pool.status = 'Up' and storage_pool.data_center_id = ? and (storage_pool.pod_id = ? or storage_pool.pod_id is null) and storage_pool.scope = ? and ("; + + protected final String SAGsForHostConnectionSqlSuffix = ") GROUP BY storage_pool_and_access_group_map.pool_id"; private static final String GET_STORAGE_POOLS_OF_VOLUMES_WITHOUT_OR_NOT_HAVING_TAGS = "SELECT s.* " + "FROM volumes vol " + @@ -296,13 +308,13 @@ public StoragePoolVO listById(Integer id) { } @Override - public StoragePoolVO persist(StoragePoolVO pool, Map details, List tags, Boolean isTagARule) { - return persist(pool, details, tags, isTagARule, true); + public StoragePoolVO persist(StoragePoolVO pool, Map details, List tags, Boolean isTagARule, List storageAccessGroups) { + return persist(pool, details, tags, isTagARule, true, storageAccessGroups); } @Override @DB - public StoragePoolVO persist(StoragePoolVO pool, Map details, List tags, Boolean isTagARule, boolean displayDetails) { + public StoragePoolVO persist(StoragePoolVO pool, Map details, List tags, Boolean isTagARule, boolean displayDetails, List storageAccessGroups) { TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); pool = super.persist(pool); @@ -315,6 +327,9 @@ public StoragePoolVO persist(StoragePoolVO pool, Map details, Li if (CollectionUtils.isNotEmpty(tags)) { _tagsDao.persist(pool.getId(), tags, isTagARule); } + if (CollectionUtils.isNotEmpty(storageAccessGroups)) { + _storagePoolAccessGroupMapDao.persist(pool.getId(), storageAccessGroups); + } txn.commit(); return pool; } @@ -338,6 +353,56 @@ protected List findPoolsByDetailsOrTagsInternal(long dcId, long p return searchStoragePoolsPreparedStatement(sql, dcId, podId, clusterId, scope, valuesLength); } + protected List findPoolsByDetailsOrTagsForHostConnectionInternal(long dcId, long podId, Long clusterId, ScopeType scope, String sqlValues, ValueType valuesType) { + String sqlPrefix = valuesType.equals(ValueType.DETAILS) ? DetailsSqlPrefix : SAGsForHostConnectionSqlPrefix; + String sqlSuffix = valuesType.equals(ValueType.DETAILS) ? DetailsForHostConnectionSqlSuffix : SAGsForHostConnectionSqlSuffix; + String sql = getSqlPreparedStatement(sqlPrefix, sqlSuffix, sqlValues, clusterId); + return searchStoragePoolsPreparedStatement(sql, dcId, podId, clusterId, scope, null); + } + + /** + * Search storage pools in a transaction + * @param sql prepared statement sql + * @param dcId data center id + * @param podId pod id + * @param clusterId cluster id + * @param scope scope + * @param valuesLength values length + * @return storage pools matching criteria + */ + @DB + protected List searchStoragePoolsWithHypervisorTypesPreparedStatement(String sql, HypervisorType type, long dcId, Long podId, Long clusterId, ScopeType scope, Integer valuesLength) { + TransactionLegacy txn = TransactionLegacy.currentTxn(); + List pools = new ArrayList(); + try (PreparedStatement pstmt = txn.prepareStatement(sql);) { + if (pstmt != null) { + int i = 1; + pstmt.setString(i++, type.toString()); + pstmt.setLong(i++, dcId); + if (podId != null) { + pstmt.setLong(i++, podId); + } + pstmt.setString(i++, scope.toString()); + if (clusterId != null) { + pstmt.setLong(i++, clusterId); + } + if (valuesLength != null) { + pstmt.setInt(i++, valuesLength); + } + try (ResultSet rs = pstmt.executeQuery();) { + while (rs.next()) { + pools.add(toEntityBean(rs, false)); + } + } catch (SQLException e) { + throw new CloudRuntimeException("Unable to execute :" + e.getMessage(), e); + } + } + } catch (SQLException e) { + throw new CloudRuntimeException("Unable to execute :" + e.getMessage(), e); + } + return pools; + } + /** * Search storage pools in a transaction * @param sql prepared statement sql @@ -349,7 +414,7 @@ protected List findPoolsByDetailsOrTagsInternal(long dcId, long p * @return storage pools matching criteria */ @DB - protected List searchStoragePoolsPreparedStatement(String sql, long dcId, Long podId, Long clusterId, ScopeType scope, int valuesLength) { + protected List searchStoragePoolsPreparedStatement(String sql, long dcId, Long podId, Long clusterId, ScopeType scope, Integer valuesLength) { TransactionLegacy txn = TransactionLegacy.currentTxn(); List pools = new ArrayList(); try (PreparedStatement pstmt = txn.prepareStatement(sql);) { @@ -363,7 +428,9 @@ protected List searchStoragePoolsPreparedStatement(String sql, lo if (clusterId != null) { pstmt.setLong(i++, clusterId); } - pstmt.setInt(i++, valuesLength); + if (valuesLength != null) { + pstmt.setInt(i++, valuesLength); + } try (ResultSet rs = pstmt.executeQuery();) { while (rs.next()) { pools.add(toEntityBean(rs, false)); @@ -420,6 +487,22 @@ protected String getSqlValuesFromStorageTags(String[] tags) throws NullPointerEx return sqlValues.toString(); } + /** + * Return SQL string from storage pool access group map, to be placed between SQL Prefix and SQL Suffix when creating storage tags PreparedStatement. + * @param storageAccessGroups storage tags array + * @return SQL string containing storage tag values to be placed between Prefix and Suffix when creating PreparedStatement. + * @throws NullPointerException if tags is null + * @throws IndexOutOfBoundsException if tags is not null, but empty + */ + protected String getSqlValuesFromStorageAccessGroups(String[] storageAccessGroups) throws NullPointerException, IndexOutOfBoundsException { + StringBuilder sqlValues = new StringBuilder(); + for (String tag : storageAccessGroups) { + sqlValues.append("(storage_pool_and_access_group_map.storage_access_group='").append(tag).append("') OR "); + } + sqlValues.delete(sqlValues.length() - 4, sqlValues.length()); + return sqlValues.toString(); + } + @DB @Override public List findPoolsByDetails(long dcId, long podId, Long clusterId, Map details, ScopeType scope) { @@ -428,10 +511,10 @@ public List findPoolsByDetails(long dcId, long podId, Long cluste } @Override - public List findPoolsByTags(long dcId, long podId, Long clusterId, String[] tags, boolean validateTagRule, long ruleExecuteTimeout) { + public List findPoolsByTags(long dcId, long podId, Long clusterId, ScopeType scope, String[] tags, boolean validateTagRule, long ruleExecuteTimeout) { List storagePools = null; if (tags == null || tags.length == 0) { - storagePools = listBy(dcId, podId, clusterId, ScopeType.CLUSTER); + storagePools = listBy(dcId, podId, clusterId, scope); if (validateTagRule) { storagePools = getPoolsWithoutTagRule(storagePools); @@ -439,7 +522,20 @@ public List findPoolsByTags(long dcId, long podId, Long clusterId } else { String sqlValues = getSqlValuesFromStorageTags(tags); - storagePools = findPoolsByDetailsOrTagsInternal(dcId, podId, clusterId, ScopeType.CLUSTER, sqlValues, ValueType.TAGS, tags.length); + storagePools = findPoolsByDetailsOrTagsInternal(dcId, podId, clusterId, scope, sqlValues, ValueType.TAGS, tags.length); + } + + return storagePools; + } + + @Override + public List findPoolsByAccessGroupsForHostConnection(Long dcId, Long podId, Long clusterId, ScopeType scope, String[] storageAccessGroups) { + List storagePools = null; + if (storageAccessGroups == null || storageAccessGroups.length == 0) { + storagePools = listBy(dcId, podId, clusterId, scope); + } else { + String sqlValues = getSqlValuesFromStorageAccessGroups(storageAccessGroups); + storagePools = findPoolsByDetailsOrTagsForHostConnectionInternal(dcId, podId, clusterId, scope, sqlValues, ValueType.TAGS); } return storagePools; @@ -556,6 +652,77 @@ protected List getPoolsWithoutTagRule(List storage return storagePoolsToReturn; } + @Override + public List findZoneWideStoragePoolsByAccessGroupsForHostConnection(long dcId, String[] storageAccessGroups) { + if (storageAccessGroups == null || storageAccessGroups.length == 0) { + QueryBuilder sc = QueryBuilder.create(StoragePoolVO.class); + sc.and(sc.entity().getDataCenterId(), Op.EQ, dcId); + sc.and(sc.entity().getStatus(), Op.EQ, Status.Up); + sc.and(sc.entity().getScope(), Op.EQ, ScopeType.ZONE); + return sc.list(); + } else { + String sqlValues = getSqlValuesFromStorageAccessGroups(storageAccessGroups); + String sql = getSqlPreparedStatement(ZoneWideStorageAccessGroupsForHostConnectionSqlPrefix, ZoneWideStorageAccessGroupsForHostConnectionSqlSuffix, sqlValues, null); + return searchStoragePoolsPreparedStatement(sql, dcId, null, null, ScopeType.ZONE, null); + } + } + + @Override + public List findZoneWideStoragePoolsByAccessGroupsAndHypervisorTypeForHostConnection(long dcId, String[] storageAccessGroups, HypervisorType type) { + if (storageAccessGroups == null || storageAccessGroups.length == 0) { + QueryBuilder sc = QueryBuilder.create(StoragePoolVO.class); + sc.and(sc.entity().getDataCenterId(), Op.EQ, dcId); + sc.and(sc.entity().getStatus(), Op.EQ, Status.Up); + sc.and(sc.entity().getScope(), Op.EQ, ScopeType.ZONE); + sc.and(sc.entity().getHypervisor(), Op.EQ, type); + return sc.list(); + } else { + String sqlValues = getSqlValuesFromStorageAccessGroups(storageAccessGroups); + String sql = getSqlPreparedStatement(ZoneWideStorageAccessGroupsWithHypervisorTypeSqlPrefix, ZoneWideStorageAccessGroupsWithHypervisorTypeSqlSuffix, sqlValues, null); + return searchStoragePoolsWithHypervisorTypesPreparedStatement(sql, type, dcId, null, null, ScopeType.ZONE, null); + } + } + + @Override + public List findStoragePoolsByEmptyStorageAccessGroups(Long dcId, Long podId, Long clusterId, ScopeType scope, HypervisorType hypervisorType) { + SearchBuilder poolSearch = createSearchBuilder(); + SearchBuilder storageAccessGroupsPoolSearch = _storagePoolAccessGroupMapDao.createSearchBuilder(); + // Set criteria for pools + poolSearch.and("scope", poolSearch.entity().getScope(), Op.EQ); + poolSearch.and("removed", poolSearch.entity().getRemoved(), Op.NULL); + poolSearch.and("status", poolSearch.entity().getStatus(), Op.EQ); + poolSearch.and("datacenterid", poolSearch.entity().getDataCenterId(), Op.EQ); + poolSearch.and("podid", poolSearch.entity().getPodId(), Op.EQ); + poolSearch.and("clusterid", poolSearch.entity().getClusterId(), Op.EQ); + poolSearch.and("hypervisortype", poolSearch.entity().getHypervisor(), Op.EQ); + + // Set StoragePoolAccessGroupMapVO.pool_id IS NULL. This ensures only pools without tags are returned + storageAccessGroupsPoolSearch.and("poolid", storageAccessGroupsPoolSearch.entity().getPoolId(), Op.NULL); + poolSearch.join("tagJoin", storageAccessGroupsPoolSearch, poolSearch.entity().getId(), storageAccessGroupsPoolSearch.entity().getPoolId(), JoinBuilder.JoinType.LEFT); + + SearchCriteria sc = poolSearch.create(); + sc.setParameters("scope", scope.toString()); + sc.setParameters("status", Status.Up.toString()); + + if (dcId != null) { + sc.setParameters("datacenterid", dcId); + } + + if (podId != null) { + sc.setParameters("podid", podId); + } + + if (clusterId != null) { + sc.setParameters("clusterid", clusterId); + } + + if (hypervisorType != null) { + sc.setParameters("hypervisortype", hypervisorType); + } + + return listBy(sc); + } + @Override public List searchForStoragePoolTags(long poolId) { return _tagsDao.getStoragePoolTags(poolId); @@ -659,6 +826,11 @@ public void deletePoolTags(long poolId) { _tagsDao.deleteTags(poolId); } + @Override + public void deleteStoragePoolAccessGroups(long poolId) { + _storagePoolAccessGroupMapDao.deleteStorageAccessGroups(poolId); + } + @Override public List listChildStoragePoolsInDatastoreCluster(long poolId) { QueryBuilder sc = QueryBuilder.create(StoragePoolVO.class); @@ -725,9 +897,9 @@ public List listStoragePoolsWithActiveVolumesByOfferingId(long of @Override public Pair, Integer> searchForIdsAndCount(Long storagePoolId, String storagePoolName, Long zoneId, - String path, Long podId, Long clusterId, String address, ScopeType scopeType, StoragePoolStatus status, + String path, Long podId, Long clusterId, Long hostId, String address, ScopeType scopeType, StoragePoolStatus status, String keyword, Filter searchFilter) { - SearchCriteria sc = createStoragePoolSearchCriteria(storagePoolId, storagePoolName, zoneId, path, podId, clusterId, address, scopeType, status, keyword); + SearchCriteria sc = createStoragePoolSearchCriteria(storagePoolId, storagePoolName, zoneId, path, podId, clusterId, hostId, address, scopeType, status, keyword); Pair, Integer> uniquePair = searchAndCount(sc, searchFilter); List idList = uniquePair.first().stream().map(StoragePoolVO::getId).collect(Collectors.toList()); return new Pair<>(idList, uniquePair.second()); @@ -744,8 +916,8 @@ public List listByIds(List ids) { } private SearchCriteria createStoragePoolSearchCriteria(Long storagePoolId, String storagePoolName, - Long zoneId, String path, Long podId, Long clusterId, String address, ScopeType scopeType, - StoragePoolStatus status, String keyword) { + Long zoneId, String path, Long podId, Long clusterId, Long hostId, String address, ScopeType scopeType, + StoragePoolStatus status, String keyword) { SearchBuilder sb = createSearchBuilder(); sb.select(null, SearchCriteria.Func.DISTINCT, sb.entity().getId()); // select distinct // ids @@ -760,6 +932,12 @@ private SearchCriteria createStoragePoolSearchCriteria(Long stora sb.and("status", sb.entity().getStatus(), SearchCriteria.Op.EQ); sb.and("parent", sb.entity().getParent(), SearchCriteria.Op.EQ); + if (hostId != null) { + SearchBuilder hostJoin = _hostDao.createSearchBuilder(); + hostJoin.and("hostId", hostJoin.entity().getHostId(), SearchCriteria.Op.EQ); + sb.join("poolHostJoin", hostJoin, sb.entity().getId(), hostJoin.entity().getPoolId(), JoinBuilder.JoinType.INNER); + } + SearchCriteria sc = sb.create(); if (keyword != null) { @@ -808,6 +986,11 @@ private SearchCriteria createStoragePoolSearchCriteria(Long stora sc.setParameters("status", status.toString()); } sc.setParameters("parent", 0); + + if (hostId != null) { + sc.setJoinParameters("poolHostJoin", "hostId", hostId); + } + return sc; } } diff --git a/engine/schema/src/main/resources/META-INF/cloudstack/core/spring-engine-schema-core-common-daos-between-management-and-usage-context.xml b/engine/schema/src/main/resources/META-INF/cloudstack/core/spring-engine-schema-core-common-daos-between-management-and-usage-context.xml index d6d72f9228e1..96579b265163 100644 --- a/engine/schema/src/main/resources/META-INF/cloudstack/core/spring-engine-schema-core-common-daos-between-management-and-usage-context.xml +++ b/engine/schema/src/main/resources/META-INF/cloudstack/core/spring-engine-schema-core-common-daos-between-management-and-usage-context.xml @@ -62,6 +62,7 @@ + diff --git a/engine/schema/src/main/resources/META-INF/db/schema-42010to42100.sql b/engine/schema/src/main/resources/META-INF/db/schema-42010to42100.sql index 292da4a466bd..896b8ca9084f 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-42010to42100.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-42010to42100.sql @@ -65,3 +65,19 @@ CREATE TABLE IF NOT EXISTS `cloud`.`reconcile_commands` ( CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.snapshot_store_ref', 'kvm_checkpoint_path', 'varchar(255)'); CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.snapshot_store_ref', 'end_of_chain', 'int(1) unsigned'); + +-- Create table storage_pool_and_access_group_map +CREATE TABLE IF NOT EXISTS `cloud`.`storage_pool_and_access_group_map` ( + `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT, + `pool_id` bigint(20) unsigned NOT NULL COMMENT "pool id", + `storage_access_group` varchar(255) NOT NULL, + PRIMARY KEY (`id`), + KEY `fk_storage_pool_and_access_group_map__pool_id` (`pool_id`), + CONSTRAINT `fk_storage_pool_and_access_group_map__pool_id` FOREIGN KEY (`pool_id`) REFERENCES `storage_pool` (`id`) ON DELETE CASCADE +) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8; + +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.host', 'storage_access_groups', 'varchar(255) DEFAULT NULL COMMENT "storage access groups for the host"'); +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.cluster', 'storage_access_groups', 'varchar(255) DEFAULT NULL COMMENT "storage access groups for the hosts in the cluster"'); +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.host_pod_ref', 'storage_access_groups', 'varchar(255) DEFAULT NULL COMMENT "storage access groups for the hosts in the pod"'); +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.data_center', 'storage_access_groups', 'varchar(255) DEFAULT NULL COMMENT "storage access groups for the hosts in the zone"'); + diff --git a/engine/schema/src/main/resources/META-INF/db/views/cloud.data_center_view.sql b/engine/schema/src/main/resources/META-INF/db/views/cloud.data_center_view.sql index c34df4f1cbf5..46aea863fc5e 100644 --- a/engine/schema/src/main/resources/META-INF/db/views/cloud.data_center_view.sql +++ b/engine/schema/src/main/resources/META-INF/db/views/cloud.data_center_view.sql @@ -42,6 +42,7 @@ select data_center.type, data_center.removed, data_center.sort_key, + data_center.storage_access_groups, domain.id domain_id, domain.uuid domain_uuid, domain.name domain_name, diff --git a/engine/schema/src/main/resources/META-INF/db/views/cloud.host_view.sql b/engine/schema/src/main/resources/META-INF/db/views/cloud.host_view.sql index 6fc8fb803862..d9f4e2671595 100644 --- a/engine/schema/src/main/resources/META-INF/db/views/cloud.host_view.sql +++ b/engine/schema/src/main/resources/META-INF/db/views/cloud.host_view.sql @@ -42,17 +42,21 @@ SELECT host.speed, host.ram, host.arch, + host.storage_access_groups, cluster.id cluster_id, cluster.uuid cluster_uuid, cluster.name cluster_name, cluster.cluster_type, + cluster.storage_access_groups AS cluster_storage_access_groups, data_center.id data_center_id, data_center.uuid data_center_uuid, data_center.name data_center_name, + data_center.storage_access_groups AS zone_storage_access_groups, data_center.networktype data_center_type, host_pod_ref.id pod_id, host_pod_ref.uuid pod_uuid, host_pod_ref.name pod_name, + host_pod_ref.storage_access_groups AS pod_storage_access_groups, GROUP_CONCAT(DISTINCT(host_tags.tag)) AS tag, GROUP_CONCAT(DISTINCT(explicit_host_tags.tag)) AS explicit_tag, GROUP_CONCAT(DISTINCT(implicit_host_tags.tag)) AS implicit_tag, diff --git a/engine/schema/src/main/resources/META-INF/db/views/cloud.storage_pool_view.sql b/engine/schema/src/main/resources/META-INF/db/views/cloud.storage_pool_view.sql index 5d7585baa3b4..699537a7a694 100644 --- a/engine/schema/src/main/resources/META-INF/db/views/cloud.storage_pool_view.sql +++ b/engine/schema/src/main/resources/META-INF/db/views/cloud.storage_pool_view.sql @@ -51,6 +51,7 @@ SELECT `host_pod_ref`.`name` AS `pod_name`, `storage_pool_tags`.`tag` AS `tag`, `storage_pool_tags`.`is_tag_a_rule` AS `is_tag_a_rule`, + `storage_pool_and_access_group_map`.`storage_access_group` AS `storage_access_group`, `op_host_capacity`.`used_capacity` AS `disk_used_capacity`, `op_host_capacity`.`reserved_capacity` AS `disk_reserved_capacity`, `async_job`.`id` AS `job_id`, @@ -63,6 +64,7 @@ FROM LEFT JOIN `cloud`.`data_center` ON ((`storage_pool`.`data_center_id` = `data_center`.`id`))) LEFT JOIN `cloud`.`host_pod_ref` ON ((`storage_pool`.`pod_id` = `host_pod_ref`.`id`))) LEFT JOIN `cloud`.`storage_pool_tags` ON (((`storage_pool_tags`.`pool_id` = `storage_pool`.`id`)))) + LEFT JOIN `cloud`.`storage_pool_and_access_group_map` ON (((`storage_pool_and_access_group_map`.`pool_id` = `storage_pool`.`id`)))) LEFT JOIN `cloud`.`op_host_capacity` ON (((`storage_pool`.`id` = `op_host_capacity`.`host_id`) AND (`op_host_capacity`.`capacity_type` IN (3 , 9))))) LEFT JOIN `cloud`.`async_job` ON (((`async_job`.`instance_id` = `storage_pool`.`id`) diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java index 2e13080494f6..0a211ab1934d 100644 --- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java +++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java @@ -35,6 +35,7 @@ import com.cloud.agent.api.CheckVirtualMachineAnswer; import com.cloud.agent.api.CheckVirtualMachineCommand; import com.cloud.agent.api.PrepareForMigrationAnswer; +import com.cloud.resource.ResourceManager; import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; @@ -51,6 +52,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStore; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; import org.apache.cloudstack.engine.subsystem.api.storage.Scope; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.engine.subsystem.api.storage.StorageAction; @@ -199,6 +201,8 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { VMTemplatePoolDao templatePoolDao; @Inject private VolumeDataFactory _volFactory; + @Inject + ResourceManager resourceManager; @Override public StrategyPriority canHandle(DataObject srcData, DataObject destData) { @@ -485,10 +489,10 @@ private void handleVolumeCopyFromManagedStorageToSecondaryStorage(VolumeInfo src HostVO hostVO; if (srcStoragePoolVO.getClusterId() != null) { - hostVO = getHostInCluster(srcStoragePoolVO.getClusterId()); + hostVO = getHostInCluster(srcStoragePoolVO); } else { - hostVO = getHost(srcVolumeInfo.getDataCenterId(), hypervisorType, false); + hostVO = getHost(srcVolumeInfo, hypervisorType, false); } volumePath = copyManagedVolumeToSecondaryStorage(srcVolumeInfo, destVolumeInfo, hostVO, @@ -556,10 +560,10 @@ private void handleVolumeMigrationFromManagedStorageToNonManagedStorage(VolumeIn HostVO hostVO; if (destStoragePoolVO.getClusterId() != null) { - hostVO = getHostInCluster(destStoragePoolVO.getClusterId()); + hostVO = getHostInCluster(destStoragePoolVO); } else { - hostVO = getHost(destVolumeInfo.getDataCenterId(), hypervisorType, false); + hostVO = getHost(destVolumeInfo, hypervisorType, false); } setCertainVolumeValuesNull(destVolumeInfo.getId()); @@ -933,9 +937,9 @@ private HostVO getHostOnWhichToExecuteMigrationCommand(VolumeInfo srcVolumeInfo, hostVO = _hostDao.findById(destVolumeInfo.getDataStore().getScope().getScopeId()); } else { if (srcStoragePoolVO.getClusterId() != null) { - hostVO = getHostInCluster(srcStoragePoolVO.getClusterId()); + hostVO = getHostInCluster(srcStoragePoolVO); } else { - hostVO = getHost(destVolumeInfo.getDataCenterId(), HypervisorType.KVM, false); + hostVO = getHost(destVolumeInfo, HypervisorType.KVM, false); } } @@ -1337,7 +1341,7 @@ private void handleCreateNonManagedVolumeFromManagedSnapshot(SnapshotInfo snapsh createVolumeFromSnapshot(snapshotInfo); - HostVO hostVO = getHost(snapshotInfo.getDataCenterId(), HypervisorType.XenServer, true); + HostVO hostVO = getHost(snapshotInfo, HypervisorType.XenServer, true); copyCmdAnswer = performResignature(snapshotInfo, hostVO, null, true); @@ -1349,7 +1353,7 @@ private void handleCreateNonManagedVolumeFromManagedSnapshot(SnapshotInfo snapsh CopyCommand copyCommand = new CopyCommand(snapshotInfo.getTO(), volumeInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value()); - HostVO hostVO = getHostInCluster(volumeStoragePoolVO.getClusterId()); + HostVO hostVO = getHostInCluster(volumeStoragePoolVO); if (!usingBackendSnapshot) { long snapshotStoragePoolId = snapshotInfo.getDataStore().getId(); @@ -1379,7 +1383,7 @@ private void handleCreateNonManagedVolumeFromManagedSnapshot(SnapshotInfo snapsh } finally { try { - HostVO hostVO = getHostInCluster(volumeStoragePoolVO.getClusterId()); + HostVO hostVO = getHostInCluster(volumeStoragePoolVO); long snapshotStoragePoolId = snapshotInfo.getDataStore().getId(); DataStore snapshotDataStore = dataStoreMgr.getDataStore(snapshotStoragePoolId, DataStoreRole.Primary); @@ -1473,7 +1477,7 @@ private void handleCreateManagedVolumeFromNonManagedSnapshot(SnapshotInfo snapsh handleQualityOfServiceForVolumeMigration(volumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.MIGRATION); - hostVO = getHost(snapshotInfo.getDataCenterId(), snapshotInfo.getHypervisorType(), false); + hostVO = getHost(snapshotInfo, snapshotInfo.getHypervisorType(), false); // copy the volume from secondary via the hypervisor if (HypervisorType.XenServer.equals(snapshotInfo.getHypervisorType())) { @@ -1554,7 +1558,7 @@ private void handleCreateVolumeFromTemplateBothOnStorageSystem(TemplateInfo temp // only XenServer, VMware, and KVM are currently supported // Leave host equal to null for KVM since we don't need to perform a resignature when using that hypervisor type. if (volumeInfo.getFormat() == ImageFormat.VHD) { - hostVO = getHost(volumeInfo.getDataCenterId(), HypervisorType.XenServer, true); + hostVO = getHost(volumeInfo, HypervisorType.XenServer, true); if (hostVO == null) { throw new CloudRuntimeException("Unable to locate a host capable of resigning in the zone with the following ID: " + @@ -1574,7 +1578,7 @@ private void handleCreateVolumeFromTemplateBothOnStorageSystem(TemplateInfo temp } else if (volumeInfo.getFormat() == ImageFormat.OVA) { // all VMware hosts support resigning - hostVO = getHost(volumeInfo.getDataCenterId(), HypervisorType.VMware, false); + hostVO = getHost(volumeInfo, HypervisorType.VMware, false); if (hostVO == null) { throw new CloudRuntimeException("Unable to locate a host capable of resigning in the zone with the following ID: " + @@ -1757,7 +1761,7 @@ private void handleCreateManagedVolumeFromManagedSnapshot(SnapshotInfo snapshotI } else { // asking for a XenServer host here so we don't always prefer to use XenServer hosts that support resigning // even when we don't need those hosts to do this kind of copy work - hostVO = getHost(snapshotInfo.getDataCenterId(), snapshotInfo.getHypervisorType(), false); + hostVO = getHost(snapshotInfo, snapshotInfo.getHypervisorType(), false); handleQualityOfServiceForVolumeMigration(volumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.MIGRATION); @@ -1814,7 +1818,7 @@ private void handleCreateVolumeFromVolumeOnSecondaryStorage(VolumeInfo srcVolume destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore()); - HostVO hostVO = getHost(dataCenterId, hypervisorType, false); + HostVO hostVO = getHost(destVolumeInfo, hypervisorType, false); handleQualityOfServiceForVolumeMigration(destVolumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.MIGRATION); @@ -2606,7 +2610,7 @@ private void handleCreateTemplateFromManagedVolume(VolumeInfo volumeInfo, Templa volumeInfo.processEvent(Event.MigrationRequested); - HostVO hostVO = getHost(volumeInfo.getDataCenterId(), HypervisorType.KVM, false); + HostVO hostVO = getHost(volumeInfo, HypervisorType.KVM, false); DataStore srcDataStore = volumeInfo.getDataStore(); int primaryStorageDownloadWait = StorageManager.PRIMARY_STORAGE_DOWNLOAD_WAIT.value(); @@ -2764,10 +2768,10 @@ private HostVO getHost(SnapshotInfo snapshotInfo) { HypervisorType hypervisorType = snapshotInfo.getHypervisorType(); if (HypervisorType.XenServer.equals(hypervisorType)) { - HostVO hostVO = getHost(snapshotInfo.getDataCenterId(), hypervisorType, true); + HostVO hostVO = getHost(snapshotInfo, hypervisorType, true); if (hostVO == null) { - hostVO = getHost(snapshotInfo.getDataCenterId(), hypervisorType, false); + hostVO = getHost(snapshotInfo, hypervisorType, false); if (hostVO == null) { throw new CloudRuntimeException("Unable to locate an applicable host in data center with ID = " + snapshotInfo.getDataCenterId()); @@ -2778,14 +2782,15 @@ private HostVO getHost(SnapshotInfo snapshotInfo) { } if (HypervisorType.VMware.equals(hypervisorType) || HypervisorType.KVM.equals(hypervisorType)) { - return getHost(snapshotInfo.getDataCenterId(), hypervisorType, false); + return getHost(snapshotInfo, hypervisorType, false); } throw new CloudRuntimeException("Unsupported hypervisor type"); } - private HostVO getHostInCluster(long clusterId) { - List hosts = _hostDao.findByClusterId(clusterId); + private HostVO getHostInCluster(StoragePoolVO storagePool) { + DataStore store = dataStoreMgr.getDataStore(storagePool.getId(), DataStoreRole.Primary); + List hosts = resourceManager.getEligibleUpAndEnabledHostsInClusterForStorageConnection((PrimaryDataStoreInfo) store); if (hosts != null && hosts.size() > 0) { Collections.shuffle(hosts, RANDOM); @@ -2800,12 +2805,37 @@ private HostVO getHostInCluster(long clusterId) { throw new CloudRuntimeException("Unable to locate a host"); } - private HostVO getHost(Long zoneId, HypervisorType hypervisorType, boolean computeClusterMustSupportResign) { + private HostVO getHost(SnapshotInfo snapshotInfo, HypervisorType hypervisorType, boolean computeClusterMustSupportResign) { + Long zoneId = snapshotInfo.getDataCenterId(); Preconditions.checkArgument(zoneId != null, "Zone ID cannot be null."); Preconditions.checkArgument(hypervisorType != null, "Hypervisor type cannot be null."); - List hosts = _hostDao.listByDataCenterIdAndHypervisorType(zoneId, hypervisorType); + List hosts; + if (DataStoreRole.Primary.equals(snapshotInfo.getDataStore().getRole())) { + hosts = resourceManager.getEligibleUpAndEnabledHostsInZoneForStorageConnection(snapshotInfo.getDataStore(), zoneId, hypervisorType); + } else { + hosts = _hostDao.listByDataCenterIdAndHypervisorType(zoneId, hypervisorType); + } + + return getHost(hosts, computeClusterMustSupportResign); + } + + private HostVO getHost(VolumeInfo volumeInfo, HypervisorType hypervisorType, boolean computeClusterMustSupportResign) { + Long zoneId = volumeInfo.getDataCenterId(); + Preconditions.checkArgument(zoneId != null, "Zone ID cannot be null."); + Preconditions.checkArgument(hypervisorType != null, "Hypervisor type cannot be null."); + + List hosts; + if (DataStoreRole.Primary.equals(volumeInfo.getDataStore().getRole())) { + hosts = resourceManager.getEligibleUpAndEnabledHostsInZoneForStorageConnection(volumeInfo.getDataStore(), zoneId, hypervisorType); + } else { + hosts = _hostDao.listByDataCenterIdAndHypervisorType(zoneId, hypervisorType); + } + + return getHost(hosts, computeClusterMustSupportResign); + } + private HostVO getHost(List hosts, boolean computeClusterMustSupportResign) { if (hosts == null) { return null; } diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java index cde635b80499..4b259760915d 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java @@ -17,41 +17,45 @@ package org.apache.cloudstack.storage.allocator; import com.cloud.api.query.dao.StoragePoolJoinDao; +import com.cloud.dc.dao.HostPodDao; +import com.cloud.exception.StorageUnavailableException; +import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; +import com.cloud.storage.ScopeType; +import com.cloud.storage.StoragePoolStatus; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.commons.lang3.StringUtils; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; +import org.apache.commons.collections.CollectionUtils; + +import com.cloud.utils.Pair; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; + import com.cloud.capacity.Capacity; import com.cloud.capacity.dao.CapacityDao; import com.cloud.dc.ClusterVO; import com.cloud.dc.dao.ClusterDao; import com.cloud.deploy.DeploymentPlan; import com.cloud.deploy.DeploymentPlanner.ExcludeList; -import com.cloud.exception.StorageUnavailableException; import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.storage.ScopeType; import com.cloud.storage.Storage; import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; -import com.cloud.storage.StoragePoolStatus; import com.cloud.storage.StorageUtil; import com.cloud.storage.Volume; import com.cloud.storage.dao.VolumeDao; import com.cloud.user.Account; import com.cloud.utils.NumbersUtil; -import com.cloud.utils.Pair; -import com.cloud.utils.StringUtils; import com.cloud.utils.component.AdapterBase; import com.cloud.vm.DiskProfile; import com.cloud.vm.VirtualMachineProfile; import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; -import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator; -import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO; -import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; -import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; - -import org.apache.commons.collections.CollectionUtils; import javax.inject.Inject; import javax.naming.ConfigurationException; @@ -77,11 +81,15 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement @Inject protected PrimaryDataStoreDao storagePoolDao; @Inject protected VolumeDao volumeDao; @Inject protected ConfigurationDao configDao; + @Inject protected ClusterDao clusterDao; @Inject protected CapacityDao capacityDao; - @Inject private ClusterDao clusterDao; @Inject private StorageManager storageMgr; @Inject private StorageUtil storageUtil; @Inject private StoragePoolDetailsDao storagePoolDetailsDao; + @Inject + protected HostDao hostDao; + @Inject + protected HostPodDao podDao; /** * make sure shuffled lists of Pools are really shuffled @@ -320,6 +328,16 @@ protected boolean filter(ExcludeList avoid, StoragePool pool, DiskProfile dskCh, return false; } + if (plan.getHostId() != null) { + HostVO plannedHost = hostDao.findById(plan.getHostId()); + if (!storageMgr.checkIfHostAndStoragePoolHasCommonStorageAccessGroups(plannedHost, pool)) { + if (logger.isDebugEnabled()) { + logger.debug(String.format("StoragePool %s and host %s does not have matching storage access groups", pool, plannedHost)); + } + return false; + } + } + Volume volume = null; boolean isTempVolume = dskCh.getVolumeId() == Volume.DISK_OFFERING_SUITABILITY_CHECK_VOLUME_ID; if (!isTempVolume) { diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java index a80e003a139e..25e4608e58f8 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java @@ -77,12 +77,12 @@ protected List select(DiskProfile dskCh, VirtualMachineProfile vmPr logDisabledStoragePools(dcId, podId, clusterId, ScopeType.CLUSTER); } - List pools = storagePoolDao.findPoolsByTags(dcId, podId, clusterId, dskCh.getTags(), true, VolumeApiServiceImpl.storageTagRuleExecutionTimeout.value()); + List pools = storagePoolDao.findPoolsByTags(dcId, podId, clusterId, ScopeType.CLUSTER, dskCh.getTags(), true, VolumeApiServiceImpl.storageTagRuleExecutionTimeout.value()); pools.addAll(storagePoolJoinDao.findStoragePoolByScopeAndRuleTags(dcId, podId, clusterId, ScopeType.CLUSTER, List.of(dskCh.getTags()))); logger.debug(String.format("Found pools [%s] that match with tags [%s].", pools, Arrays.toString(dskCh.getTags()))); // add remaining pools in cluster, that did not match tags, to avoid set - List allPools = storagePoolDao.findPoolsByTags(dcId, podId, clusterId, null, false, 0); + List allPools = storagePoolDao.findPoolsByTags(dcId, podId, clusterId, ScopeType.CLUSTER, null, false, 0); allPools.removeAll(pools); for (StoragePoolVO pool : allPools) { logger.trace(String.format("Adding pool [%s] to the 'avoid' set since it did not match any tags.", pool)); @@ -100,7 +100,7 @@ protected List select(DiskProfile dskCh, VirtualMachineProfile vmPr } StoragePool storagePool = (StoragePool)dataStoreMgr.getPrimaryDataStore(pool.getId()); if (filter(avoid, storagePool, dskCh, plan)) { - logger.debug(String.format("Found suitable local storage pool [%s] to allocate disk [%s] to it, adding to list.", pool, dskCh)); + logger.debug(String.format("Found suitable cluster storage pool [%s] to allocate disk [%s] to it, adding to list.", pool, dskCh)); suitablePools.add(storagePool); } else { logger.debug(String.format("Adding storage pool [%s] to avoid set during allocation of disk [%s].", pool, dskCh)); diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java index f6712ce46b1f..13b5f8e48143 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java @@ -96,7 +96,7 @@ protected List select(DiskProfile dskCh, VirtualMachineProfile vmPr } StoragePool storagePool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(storage.getId()); if (filter(avoid, storagePool, dskCh, plan)) { - logger.debug(String.format("Found suitable local storage pool [%s] to allocate disk [%s] to it, adding to list.", storagePool, dskCh)); + logger.debug(String.format("Found suitable zone wide storage pool [%s] to allocate disk [%s] to it, adding to list.", storagePool, dskCh)); suitablePools.add(storagePool); } else { if (canAddStoragePoolToAvoidSet(storage)) { diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java index 66adce76172e..5e9891ef9895 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java @@ -159,7 +159,23 @@ public DataStore createPrimaryDataStore(PrimaryDataStoreParameters params) { } } - dataStoreVO = dataStoreDao.persist(dataStoreVO, details, storageTags, params.isTagARule(), displayDetails); + String storageAccessGroupsParams = params.getStorageAccessGroups(); + List storageAccessGroupsList = new ArrayList(); + + if (storageAccessGroupsParams != null) { + String[] storageAccessGroups = storageAccessGroupsParams.split(","); + + for (String storageAccessGroup : storageAccessGroups) { + storageAccessGroup = storageAccessGroup.trim(); + if (storageAccessGroup.length() == 0) { + continue; + } + storageAccessGroupsList.add(storageAccessGroup); + } + } + + dataStoreVO = dataStoreDao.persist(dataStoreVO, details, storageTags, params.isTagARule(), displayDetails, storageAccessGroupsList); + return dataStoreMgr.getDataStore(dataStoreVO.getId(), DataStoreRole.Primary); } @@ -278,6 +294,7 @@ public boolean deletePrimaryDataStore(DataStore store) { this.dataStoreDao.update(poolVO.getId(), poolVO); dataStoreDao.remove(poolVO.getId()); dataStoreDao.deletePoolTags(poolVO.getId()); + dataStoreDao.deleteStoragePoolAccessGroups(poolVO.getId()); annotationDao.removeByEntityType(AnnotationService.EntityType.PRIMARY_STORAGE.name(), poolVO.getUuid()); deletePoolStats(poolVO.getId()); // Delete op_host_capacity entries diff --git a/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/ManagementServerMock.java b/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/ManagementServerMock.java index c630f0bf6b97..15f546db0f08 100644 --- a/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/ManagementServerMock.java +++ b/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/ManagementServerMock.java @@ -373,7 +373,7 @@ private void locateZone() { ConfigurationManager mgr = (ConfigurationManager)_configService; _zone = mgr.createZone(User.UID_SYSTEM, "default", "8.8.8.8", null, "8.8.4.4", null, null /* cidr */, "ROOT", Domain.ROOT_DOMAIN, NetworkType.Advanced, null, - null /* networkDomain */, false, false, null, null, false); + null /* networkDomain */, false, false, null, null, false, null); } } diff --git a/plugins/storage-allocators/random/src/main/java/org/apache/cloudstack/storage/allocator/RandomStoragePoolAllocator.java b/plugins/storage-allocators/random/src/main/java/org/apache/cloudstack/storage/allocator/RandomStoragePoolAllocator.java index dd8f2e78b736..831e5d2a2607 100644 --- a/plugins/storage-allocators/random/src/main/java/org/apache/cloudstack/storage/allocator/RandomStoragePoolAllocator.java +++ b/plugins/storage-allocators/random/src/main/java/org/apache/cloudstack/storage/allocator/RandomStoragePoolAllocator.java @@ -66,7 +66,7 @@ public List select(DiskProfile dskCh, VirtualMachineProfile vmProfi StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId()); if (filter(avoid, pol, dskCh, plan)) { - logger.trace(String.format("Found suitable local storage pool [%s], adding to list.", pool)); + logger.trace(String.format("Found suitable storage pool [%s], adding to list.", pool)); suitablePools.add(pol); } } diff --git a/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ElastistorPrimaryDataStoreLifeCycle.java b/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ElastistorPrimaryDataStoreLifeCycle.java index 3ad08428e9dc..7f3a36d65384 100644 --- a/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ElastistorPrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ElastistorPrimaryDataStoreLifeCycle.java @@ -26,6 +26,8 @@ import javax.inject.Inject; +import com.cloud.host.Host; +import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; @@ -50,7 +52,6 @@ import com.cloud.agent.api.StoragePoolInfo; import com.cloud.capacity.CapacityManager; import com.cloud.dc.dao.DataCenterDao; -import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; @@ -90,6 +91,8 @@ public class ElastistorPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLif DataCenterDao _zoneDao; @Inject CapacityManager _capacityMgr; + @Inject + private StoragePoolAndAccessGroupMapDao storagePoolAndAccessGroupMapDao; @Override public DataStore initialize(Map dsInfos) { @@ -356,17 +359,13 @@ public boolean attachCluster(DataStore store, ClusterScope scope) { StoragePoolVO dataStoreVO = _storagePoolDao.findById(store.getId()); PrimaryDataStoreInfo primarystore = (PrimaryDataStoreInfo) store; - // Check if there is host up in this cluster - List allHosts = _resourceMgr.listAllUpHosts(Host.Type.Routing, primarystore.getClusterId(), primarystore.getPodId(), primarystore.getDataCenterId()); - if (allHosts.isEmpty()) { - primaryDataStoreDao.expunge(primarystore.getId()); - throw new CloudRuntimeException("No host up to associate a storage pool with in cluster " + primarystore.getClusterId()); - } + List hostsToConnect = _resourceMgr.getEligibleUpHostsInClusterForStorageConnection(primarystore); + logger.debug(String.format("Attaching the pool to each of the hosts %s in the cluster: %s", hostsToConnect, primarystore.getClusterId())); if (!dataStoreVO.isManaged()) { boolean success = false; - for (HostVO host : allHosts) { - success = createStoragePool(host, primarystore); + for (HostVO h : hostsToConnect) { + success = createStoragePool(h, primarystore); if (success) { break; } @@ -375,7 +374,7 @@ public boolean attachCluster(DataStore store, ClusterScope scope) { logger.debug("In createPool Adding the pool to each of the hosts"); List poolHosts = new ArrayList(); - for (HostVO h : allHosts) { + for (HostVO h : hostsToConnect) { try { storageMgr.connectHostToSharedPool(h, primarystore.getId()); poolHosts.add(h); @@ -428,10 +427,11 @@ public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo exis @Override public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) { - List hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(hypervisorType, scope.getScopeId()); - logger.debug("In createPool. Attaching the pool to each of the hosts."); + List hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), hypervisorType); + + logger.debug(String.format("In createPool. Attaching the pool to each of the hosts in %s.", hostsToConnect)); List poolHosts = new ArrayList(); - for (HostVO host : hosts) { + for (HostVO host : hostsToConnect) { try { storageMgr.connectHostToSharedPool(host, dataStore.getId()); poolHosts.add(host); diff --git a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/DateraPrimaryDataStoreLifeCycle.java b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/DateraPrimaryDataStoreLifeCycle.java index 04ea3141423d..537243dd3214 100644 --- a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/DateraPrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/DateraPrimaryDataStoreLifeCycle.java @@ -25,7 +25,6 @@ import com.cloud.dc.DataCenterVO; import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.DataCenterDao; -import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; @@ -38,8 +37,10 @@ import com.cloud.storage.dao.SnapshotDao; import com.cloud.storage.dao.SnapshotDetailsDao; import com.cloud.storage.dao.SnapshotDetailsVO; +import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao; import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; @@ -84,6 +85,8 @@ public class DateraPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCyc private StoragePoolHostDao _storagePoolHostDao; @Inject private StoragePoolAutomation storagePoolAutomation; + @Inject + private StoragePoolAndAccessGroupMapDao storagePoolAndAccessGroupMapDao; @Override public DataStore initialize(Map dsInfos) { @@ -97,6 +100,7 @@ public DataStore initialize(Map dsInfos) { Long capacityBytes = (Long) dsInfos.get("capacityBytes"); Long capacityIops = (Long) dsInfos.get("capacityIops"); String tags = (String) dsInfos.get("tags"); + String storageAccessGroups = (String)dsInfos.get(ApiConstants.STORAGE_ACCESS_GROUPS); boolean isTagARule = (Boolean)dsInfos.get("isTagARule"); @SuppressWarnings("unchecked") Map details = (Map) dsInfos.get("details"); @@ -179,6 +183,7 @@ public DataStore initialize(Map dsInfos) { parameters.setCapacityIops(capacityIops); parameters.setHypervisorType(HypervisorType.Any); parameters.setTags(tags); + parameters.setStorageAccessGroups(storageAccessGroups); parameters.setIsTagARule(isTagARule); parameters.setDetails(details); @@ -243,22 +248,13 @@ public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo exis @Override public boolean attachCluster(DataStore datastore, ClusterScope scope) { PrimaryDataStoreInfo primaryDataStoreInfo = (PrimaryDataStoreInfo) datastore; + List hostsToConnect = _resourceMgr.getEligibleUpHostsInClusterForStorageConnection(primaryDataStoreInfo); - // check if there is at least one host up in this cluster - List allHosts = _resourceMgr.listAllUpAndEnabledHosts(Host.Type.Routing, - primaryDataStoreInfo.getClusterId(), primaryDataStoreInfo.getPodId(), - primaryDataStoreInfo.getDataCenterId()); - - if (allHosts.isEmpty()) { - storagePoolDao.expunge(primaryDataStoreInfo.getId()); - - throw new CloudRuntimeException( - "No host up to associate a storage pool with in cluster " + primaryDataStoreInfo.getClusterId()); - } + logger.debug(String.format("Attaching the pool to each of the hosts %s in the cluster: %s", hostsToConnect, primaryDataStoreInfo.getClusterId())); List poolHosts = new ArrayList(); - for (HostVO host : allHosts) { + for (HostVO host : hostsToConnect) { try { _storageMgr.connectHostToSharedPool(host, primaryDataStoreInfo.getId()); @@ -288,19 +284,15 @@ public boolean attachCluster(DataStore datastore, ClusterScope scope) { public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) { dataStoreHelper.attachZone(dataStore); - List xenServerHosts = _resourceMgr - .listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.XenServer, scope.getScopeId()); - List vmWareServerHosts = _resourceMgr - .listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.VMware, scope.getScopeId()); - List kvmHosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.KVM, - scope.getScopeId()); - List hosts = new ArrayList(); + List hostsToConnect = new ArrayList<>(); + HypervisorType[] hypervisorTypes = {HypervisorType.XenServer, HypervisorType.VMware, HypervisorType.KVM}; - hosts.addAll(xenServerHosts); - hosts.addAll(vmWareServerHosts); - hosts.addAll(kvmHosts); + for (HypervisorType type : hypervisorTypes) { + hostsToConnect.addAll(_resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), type)); + } - for (HostVO host : hosts) { + logger.debug(String.format("In createPool. Attaching the pool to each of the hosts in %s.", hostsToConnect)); + for (HostVO host : hostsToConnect) { try { _storageMgr.connectHostToSharedPool(host, dataStore.getId()); } catch (Exception e) { diff --git a/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java b/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java index 351d59f6b038..fbcc51e2e8c4 100644 --- a/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java +++ b/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java @@ -18,7 +18,6 @@ */ package org.apache.cloudstack.storage.datastore.lifecycle; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.UUID; @@ -26,6 +25,7 @@ import javax.inject.Inject; +import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; @@ -51,7 +51,6 @@ import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.StorageConflictException; import com.cloud.exception.StorageUnavailableException; -import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; @@ -63,6 +62,7 @@ import com.cloud.storage.StoragePool; import com.cloud.storage.StoragePoolAutomation; import com.cloud.storage.StoragePoolHostVO; +import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao; import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.storage.dao.StoragePoolWorkDao; import com.cloud.storage.dao.VolumeDao; @@ -129,6 +129,8 @@ public class CloudStackPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStor StoragePoolAutomation storagePoolAutmation; @Inject protected HostDao _hostDao; + @Inject + private StoragePoolAndAccessGroupMapDao storagePoolAndAccessGroupMapDao; @SuppressWarnings("unchecked") @Override @@ -146,9 +148,11 @@ public DataStore initialize(Map dsInfos) { PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters(); String tags = (String)dsInfos.get("tags"); + String storageAccessGroups = (String)dsInfos.get(ApiConstants.STORAGE_ACCESS_GROUPS); Map details = (Map)dsInfos.get("details"); parameters.setTags(tags); + parameters.setStorageAccessGroups(storageAccessGroups); parameters.setIsTagARule((Boolean)dsInfos.get("isTagARule")); parameters.setDetails(details); @@ -386,17 +390,15 @@ protected boolean createStoragePool(HostVO host, StoragePool pool) { } private Pair, Boolean> prepareOcfs2NodesIfNeeded(PrimaryDataStoreInfo primaryStore) { + List hostsToConnect = _resourceMgr.getEligibleUpHostsInClusterForStorageConnection(primaryStore); + logger.debug(String.format("Attaching the pool to each of the hosts %s in the cluster: %s", hostsToConnect, primaryStore.getClusterId())); + List hostIds = hostsToConnect.stream().map(HostVO::getId).collect(Collectors.toList()); + if (!StoragePoolType.OCFS2.equals(primaryStore.getPoolType())) { - return new Pair<>(_hostDao.listIdsForUpRouting(primaryStore.getDataCenterId(), - primaryStore.getPodId(), primaryStore.getClusterId()), true); + return new Pair<>(hostIds, true); } - List allHosts = _resourceMgr.listAllUpHosts(Host.Type.Routing, primaryStore.getClusterId(), - primaryStore.getPodId(), primaryStore.getDataCenterId()); - if (allHosts.isEmpty()) { - return new Pair<>(Collections.emptyList(), true); - } - List hostIds = allHosts.stream().map(HostVO::getId).collect(Collectors.toList()); - if (!_ocfs2Mgr.prepareNodes(allHosts, primaryStore)) { + + if (!_ocfs2Mgr.prepareNodes(hostsToConnect, primaryStore)) { return new Pair<>(hostIds, false); } return new Pair<>(hostIds, true); @@ -432,8 +434,9 @@ public boolean attachCluster(DataStore store, ClusterScope scope) { @Override public boolean attachZone(DataStore store, ZoneScope scope, HypervisorType hypervisorType) { - List hostIds = _hostDao.listIdsForUpEnabledByZoneAndHypervisor(scope.getScopeId(), hypervisorType); - logger.debug("In createPool. Attaching the pool to each of the hosts."); + List hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(store, scope.getScopeId(), hypervisorType); + logger.debug(String.format("In createPool. Attaching the pool to each of the hosts in %s.", hostsToConnect)); + List hostIds = hostsToConnect.stream().map(HostVO::getId).collect(Collectors.toList()); storageMgr.connectHostsToPool(store, hostIds, scope, true, true); dataStoreHelper.attachZone(store, hypervisorType); return true; diff --git a/plugins/storage/volume/default/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImplTest.java b/plugins/storage/volume/default/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImplTest.java index 24c036d443d7..3b533d588d38 100644 --- a/plugins/storage/volume/default/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImplTest.java +++ b/plugins/storage/volume/default/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImplTest.java @@ -25,7 +25,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -import java.util.List; +import java.util.Arrays; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; @@ -143,9 +143,15 @@ public void initMocks() throws StorageConflictException { storageMgr.registerHostListener("default", hostListener); + HostVO host1 = Mockito.mock(HostVO.class); + HostVO host2 = Mockito.mock(HostVO.class); + + Mockito.when(host1.getId()).thenReturn(1L); + Mockito.when(host2.getId()).thenReturn(2L); + + when(_resourceMgr.getEligibleUpHostsInClusterForStorageConnection(store)) + .thenReturn(Arrays.asList(host1, host2)); - when(hostDao.listIdsForUpRouting(anyLong(), anyLong(), anyLong())) - .thenReturn(List.of(1L, 2L)); when(hostDao.findById(anyLong())).thenReturn(mock(HostVO.class)); when(agentMgr.easySend(anyLong(), Mockito.any(ModifyStoragePoolCommand.class))).thenReturn(answer); when(answer.getResult()).thenReturn(true); diff --git a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/LinstorPrimaryDataStoreLifeCycleImpl.java b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/LinstorPrimaryDataStoreLifeCycleImpl.java index e36eacf24c2c..fa9c1b71ff33 100644 --- a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/LinstorPrimaryDataStoreLifeCycleImpl.java +++ b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/LinstorPrimaryDataStoreLifeCycleImpl.java @@ -39,6 +39,7 @@ import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; import com.cloud.storage.StoragePoolAutomation; +import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao; import com.cloud.utils.exception.CloudRuntimeException; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; @@ -68,6 +69,8 @@ public class LinstorPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStoreLi @Inject private CapacityManager _capacityMgr; @Inject + private StoragePoolAndAccessGroupMapDao storagePoolAndAccessGroupMapDao; + @Inject AgentManager _agentMgr; public LinstorPrimaryDataStoreLifeCycleImpl() @@ -204,20 +207,12 @@ public boolean attachCluster(DataStore dataStore, ClusterScope scope) { throw new CloudRuntimeException(hypervisorType + " is not a supported hypervisor type."); } - // check if there is at least one host up in this cluster - List allHosts = resourceMgr.listAllUpAndEnabledHosts(Host.Type.Routing, - primaryDataStoreInfo.getClusterId(), primaryDataStoreInfo.getPodId(), - primaryDataStoreInfo.getDataCenterId()); - - if (allHosts.isEmpty()) { - _primaryDataStoreDao.expunge(primaryDataStoreInfo.getId()); - - throw new CloudRuntimeException( - "No host up to associate a storage pool with in cluster " + primaryDataStoreInfo.getClusterId()); - } + PrimaryDataStoreInfo primarystore = (PrimaryDataStoreInfo) dataStore; + List hostsToConnect = resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(primarystore); + logger.debug(String.format("Attaching the pool to each of the hosts %s in the cluster: %s", hostsToConnect, primarystore.getClusterId())); List poolHosts = new ArrayList<>(); - for (HostVO host : allHosts) { + for (HostVO host : hostsToConnect) { try { createStoragePool(host, primaryDataStoreInfo); @@ -249,10 +244,11 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType h throw new CloudRuntimeException(hypervisorType + " is not a supported hypervisor type."); } - List hosts = resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(hypervisorType, - scope.getScopeId()); + List hostsToConnect = resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), hypervisorType); + + logger.debug(String.format("In createPool. Attaching the pool to each of the hosts in %s.", hostsToConnect)); - for (HostVO host : hosts) { + for (HostVO host : hostsToConnect) { try { _storageMgr.connectHostToSharedPool(host, dataStore.getId()); } catch (Exception e) { diff --git a/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/lifecylce/NexentaPrimaryDataStoreLifeCycle.java b/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/lifecylce/NexentaPrimaryDataStoreLifeCycle.java index 79f771721f5a..431346105524 100644 --- a/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/lifecylce/NexentaPrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/lifecylce/NexentaPrimaryDataStoreLifeCycle.java @@ -24,6 +24,7 @@ import javax.inject.Inject; +import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; @@ -58,6 +59,8 @@ public class NexentaPrimaryDataStoreLifeCycle StorageManager _storageMgr; @Inject private StoragePoolAutomation storagePoolAutomation; + @Inject + private StoragePoolAndAccessGroupMapDao storagePoolAndAccessGroupMapDao; @Override public DataStore initialize(Map dsInfos) { @@ -130,16 +133,14 @@ public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo exis public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.HypervisorType hypervisorType) { dataStoreHelper.attachZone(dataStore); - List xenServerHosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(Hypervisor.HypervisorType.XenServer, scope.getScopeId()); - List vmWareServerHosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(Hypervisor.HypervisorType.VMware, scope.getScopeId()); - List kvmHosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(Hypervisor.HypervisorType.KVM, scope.getScopeId()); - List hosts = new ArrayList(); - - hosts.addAll(xenServerHosts); - hosts.addAll(vmWareServerHosts); - hosts.addAll(kvmHosts); + List hostsToConnect = new ArrayList<>(); + Hypervisor.HypervisorType[] hypervisorTypes = {Hypervisor.HypervisorType.XenServer, Hypervisor.HypervisorType.VMware, Hypervisor.HypervisorType.KVM}; - for (HostVO host : hosts) { + for (Hypervisor.HypervisorType type : hypervisorTypes) { + hostsToConnect.addAll(_resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), type)); + } + logger.debug(String.format("In createPool. Attaching the pool to each of the hosts in %s.", hostsToConnect)); + for (HostVO host : hostsToConnect) { try { _storageMgr.connectHostToSharedPool(host, dataStore.getId()); } catch (Exception e) { diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycle.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycle.java index a538cdb49e4a..461992be1022 100644 --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycle.java @@ -18,6 +18,39 @@ */ package org.apache.cloudstack.storage.datastore.lifecycle; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URLDecoder; +import java.security.KeyManagementException; +import java.security.NoSuchAlgorithmException; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.stream.Collectors; + +import javax.inject.Inject; + +import com.cloud.host.HostVO; +import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao; +import org.apache.cloudstack.api.ApiConstants; +import com.cloud.utils.StringUtils; +import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters; +import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; +import org.apache.cloudstack.storage.datastore.api.StoragePoolStatistics; +import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient; +import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClientConnectionPool; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.datastore.util.ScaleIOUtil; +import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper; +import org.apache.commons.collections.CollectionUtils; + import com.cloud.agent.AgentManager; import com.cloud.agent.api.StoragePoolInfo; import com.cloud.capacity.CapacityManager; @@ -34,41 +67,15 @@ import com.cloud.storage.StoragePoolAutomation; import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.template.TemplateManager; -import com.cloud.utils.StringUtils; import com.cloud.utils.UriUtils; import com.cloud.utils.component.ComponentContext; import com.cloud.utils.crypt.DBEncryptionUtil; import com.cloud.utils.exception.CloudRuntimeException; -import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; -import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; -import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle; -import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters; -import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; -import org.apache.cloudstack.storage.datastore.api.StoragePoolStatistics; -import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient; -import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClientConnectionPool; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO; -import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; -import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.datastore.manager.ScaleIOSDCManager; import org.apache.cloudstack.storage.datastore.manager.ScaleIOSDCManagerImpl; -import org.apache.cloudstack.storage.datastore.util.ScaleIOUtil; -import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper; -import org.apache.commons.collections.CollectionUtils; -import javax.inject.Inject; -import java.net.URI; -import java.net.URISyntaxException; -import java.net.URLDecoder; -import java.security.KeyManagementException; -import java.security.NoSuchAlgorithmException; import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.UUID; public class ScaleIOPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle { @Inject @@ -98,6 +105,8 @@ public class ScaleIOPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCy @Inject private AgentManager agentMgr; private ScaleIOSDCManager sdcManager; + @Inject + private StoragePoolAndAccessGroupMapDao storagePoolAndAccessGroupMapDao; public ScaleIOPrimaryDataStoreLifeCycle() { sdcManager = new ScaleIOSDCManagerImpl(); @@ -141,6 +150,7 @@ public DataStore initialize(Map dsInfos) { Long capacityBytes = (Long)dsInfos.get("capacityBytes"); Long capacityIops = (Long)dsInfos.get("capacityIops"); String tags = (String)dsInfos.get("tags"); + String storageAccessGroups = (String)dsInfos.get(ApiConstants.STORAGE_ACCESS_GROUPS); Boolean isTagARule = (Boolean) dsInfos.get("isTagARule"); Map details = (Map) dsInfos.get("details"); @@ -223,6 +233,7 @@ public DataStore initialize(Map dsInfos) { parameters.setHypervisorType(Hypervisor.HypervisorType.KVM); parameters.setUuid(UUID.randomUUID().toString()); parameters.setTags(tags); + parameters.setStorageAccessGroups(storageAccessGroups); parameters.setIsTagARule(isTagARule); StoragePoolStatistics poolStatistics = scaleIOPool.getStatistics(); @@ -260,14 +271,10 @@ public boolean attachCluster(DataStore dataStore, ClusterScope scope) { } PrimaryDataStoreInfo primaryDataStoreInfo = (PrimaryDataStoreInfo) dataStore; - List hostIds = hostDao.listIdsForUpRouting(primaryDataStoreInfo.getDataCenterId(), - primaryDataStoreInfo.getPodId(), primaryDataStoreInfo.getClusterId()); - if (hostIds.isEmpty()) { - primaryDataStoreDao.expunge(primaryDataStoreInfo.getId()); - throw new CloudRuntimeException("No hosts are Up to associate a storage pool with in cluster: " + cluster); - } + List hostsToConnect = resourceManager.getEligibleUpAndEnabledHostsInClusterForStorageConnection(primaryDataStoreInfo); + logger.debug(String.format("Attaching the pool to each of the hosts %s in the cluster: %s", hostsToConnect, cluster)); + List hostIds = hostsToConnect.stream().map(HostVO::getId).collect(Collectors.toList()); - logger.debug("Attaching the pool to each of the hosts in the {}", cluster); storageMgr.connectHostsToPool(dataStore, hostIds, scope, false, false); dataStoreHelper.attachCluster(dataStore); @@ -287,7 +294,10 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.Hyper logger.debug("Attaching the pool to each of the hosts in the {}", dataCenterDao.findById(scope.getScopeId())); - List hostIds = hostDao.listIdsForUpEnabledByZoneAndHypervisor(scope.getScopeId(), hypervisorType); + List hostsToConnect = resourceManager.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), hypervisorType); + logger.debug(String.format("Attaching the pool to each of the hosts %s in the zone: %s", hostsToConnect, scope.getScopeId())); + List hostIds = hostsToConnect.stream().map(HostVO::getId).collect(Collectors.toList()); + storageMgr.connectHostsToPool(dataStore, hostIds, scope, false, false); dataStoreHelper.attachZone(dataStore); diff --git a/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycleTest.java b/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycleTest.java index bf0b443c18ad..324f3c08cb8f 100644 --- a/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycleTest.java +++ b/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycleTest.java @@ -30,8 +30,11 @@ import static org.mockito.Mockito.when; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; +import com.cloud.host.HostVO; +import com.cloud.resource.ResourceManager; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; @@ -106,6 +109,9 @@ public class ScaleIOPrimaryDataStoreLifeCycleTest { @Mock private HypervisorHostListener hostListener; + @Mock + private ResourceManager resourceManager; + @InjectMocks private ScaleIOPrimaryDataStoreLifeCycle scaleIOPrimaryDataStoreLifeCycleTest; private AutoCloseable closeable; @@ -137,8 +143,14 @@ public void testAttachZone() throws Exception { final ZoneScope scope = new ZoneScope(1L); - when(hostDao.listIdsForUpEnabledByZoneAndHypervisor(scope.getScopeId(), Hypervisor.HypervisorType.KVM)) - .thenReturn(List.of(1L, 2L)); + HostVO host1 = Mockito.mock(HostVO.class); + HostVO host2 = Mockito.mock(HostVO.class); + + Mockito.when(host1.getId()).thenReturn(1L); + Mockito.when(host2.getId()).thenReturn(2L); + + when(resourceManager.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), Hypervisor.HypervisorType.KVM)) + .thenReturn(Arrays.asList(host1, host2)); when(dataStoreMgr.getDataStore(anyLong(), eq(DataStoreRole.Primary))).thenReturn(store); when(store.isShared()).thenReturn(true); diff --git a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SolidFirePrimaryDataStoreLifeCycle.java b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SolidFirePrimaryDataStoreLifeCycle.java index 1dbbf458b489..f23698bc97ba 100644 --- a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SolidFirePrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SolidFirePrimaryDataStoreLifeCycle.java @@ -25,6 +25,7 @@ import javax.inject.Inject; +import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; @@ -43,7 +44,6 @@ import com.cloud.dc.ClusterVO; import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.DataCenterDao; -import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.resource.ResourceManager; @@ -74,6 +74,8 @@ public class SolidFirePrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLife @Inject private StoragePoolAutomation _storagePoolAutomation; @Inject private StoragePoolDetailsDao _storagePoolDetailsDao; @Inject private VMTemplatePoolDao _tmpltPoolDao; + @Inject + private StoragePoolAndAccessGroupMapDao storagePoolAndAccessGroupMapDao; // invoked to add primary storage that is based on the SolidFire plug-in @Override @@ -235,11 +237,10 @@ public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo exis @Override public boolean attachCluster(DataStore dataStore, ClusterScope scope) { PrimaryDataStoreInfo primarystore = (PrimaryDataStoreInfo)dataStore; + List hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(primarystore); - List hosts = - _resourceMgr.listAllUpAndEnabledHosts(Host.Type.Routing, primarystore.getClusterId(), primarystore.getPodId(), primarystore.getDataCenterId()); - - for (HostVO host : hosts) { + logger.debug(String.format("Attaching the pool to each of the hosts %s in the cluster: %s", hostsToConnect, primarystore.getClusterId())); + for (HostVO host : hostsToConnect) { try { _storageMgr.connectHostToSharedPool(host, dataStore.getId()); } catch (Exception e) { @@ -254,16 +255,15 @@ public boolean attachCluster(DataStore dataStore, ClusterScope scope) { @Override public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) { - List xenServerHosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.XenServer, scope.getScopeId()); - List vmWareServerHosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.VMware, scope.getScopeId()); - List kvmHosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.KVM, scope.getScopeId()); - List hosts = new ArrayList<>(); + List hostsToConnect = new ArrayList<>(); + HypervisorType[] hypervisorTypes = {HypervisorType.XenServer, HypervisorType.VMware, HypervisorType.KVM}; - hosts.addAll(xenServerHosts); - hosts.addAll(vmWareServerHosts); - hosts.addAll(kvmHosts); + for (HypervisorType type : hypervisorTypes) { + hostsToConnect.addAll(_resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), type)); + } - for (HostVO host : hosts) { + logger.debug(String.format("In createPool. Attaching the pool to each of the hosts in %s.", hostsToConnect)); + for (HostVO host : hostsToConnect) { try { _storageMgr.connectHostToSharedPool(host, dataStore.getId()); } catch (Exception e) { diff --git a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SolidFireSharedPrimaryDataStoreLifeCycle.java b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SolidFireSharedPrimaryDataStoreLifeCycle.java index 482fa23096a8..b05046cf4969 100644 --- a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SolidFireSharedPrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SolidFireSharedPrimaryDataStoreLifeCycle.java @@ -26,6 +26,8 @@ import javax.inject.Inject; +import com.cloud.host.Host; +import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; @@ -50,7 +52,6 @@ import com.cloud.dc.ClusterVO; import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.DataCenterDao; -import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; @@ -85,6 +86,8 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle extends BasePrimaryDataSto @Inject private StoragePoolDetailsDao storagePoolDetailsDao; @Inject private StoragePoolHostDao storagePoolHostDao; @Inject private TemplateManager tmpltMgr; + @Inject + private StoragePoolAndAccessGroupMapDao storagePoolAndAccessGroupMapDao; // invoked to add primary storage that is based on the SolidFire plug-in @Override @@ -382,19 +385,12 @@ public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo exis public boolean attachCluster(DataStore store, ClusterScope scope) { PrimaryDataStoreInfo primaryDataStoreInfo = (PrimaryDataStoreInfo)store; - // check if there is at least one host up in this cluster - List allHosts = resourceMgr.listAllUpHosts(Host.Type.Routing, primaryDataStoreInfo.getClusterId(), - primaryDataStoreInfo.getPodId(), primaryDataStoreInfo.getDataCenterId()); - - if (allHosts.isEmpty()) { - primaryDataStoreDao.expunge(primaryDataStoreInfo.getId()); - - throw new CloudRuntimeException(String.format("No host up to associate a storage pool with in cluster %s", clusterDao.findById(primaryDataStoreInfo.getClusterId()))); - } + List hostsToConnect = resourceMgr.getEligibleUpHostsInClusterForStorageConnection(primaryDataStoreInfo); boolean success = false; + logger.debug(String.format("Attaching the pool to each of the hosts %s in the cluster: %s", hostsToConnect, clusterDao.findById(primaryDataStoreInfo.getClusterId()))); - for (HostVO host : allHosts) { + for (HostVO host : hostsToConnect) { success = createStoragePool(host, primaryDataStoreInfo); if (success) { @@ -408,7 +404,7 @@ public boolean attachCluster(DataStore store, ClusterScope scope) { List poolHosts = new ArrayList<>(); - for (HostVO host : allHosts) { + for (HostVO host : hostsToConnect) { try { storageMgr.connectHostToSharedPool(host, primaryDataStoreInfo.getId()); diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/StorPoolPrimaryDataStoreLifeCycle.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/StorPoolPrimaryDataStoreLifeCycle.java index 60427e65ea61..d299fe34ffc2 100644 --- a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/StorPoolPrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/StorPoolPrimaryDataStoreLifeCycle.java @@ -24,6 +24,7 @@ import javax.inject.Inject; +import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; @@ -80,6 +81,8 @@ public class StorPoolPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeC private VMTemplateDetailsDao vmTemplateDetailsDao; @Inject private StoragePoolDetailsDao storagePoolDetailsDao; + @Inject + private StoragePoolAndAccessGroupMapDao storagePoolAndAccessGroupMapDao; @Override public DataStore initialize(Map dsInfos) { @@ -208,8 +211,11 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType h if (hypervisorType != HypervisorType.KVM) { throw new UnsupportedOperationException("Only KVM hypervisors supported!"); } - List kvmHosts = resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.KVM, scope.getScopeId()); - for (HostVO host : kvmHosts) { + List kvmHostsToConnect = resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), HypervisorType.KVM); + + logger.debug(String.format("In createPool. Attaching the pool to each of the hosts in %s.", kvmHostsToConnect)); + + for (HostVO host : kvmHostsToConnect) { try { storageMgr.connectHostToSharedPool(host, dataStore.getId()); } catch (Exception e) { diff --git a/server/src/main/java/com/cloud/api/ApiResponseHelper.java b/server/src/main/java/com/cloud/api/ApiResponseHelper.java index 474dfc096264..5049a9a8b701 100644 --- a/server/src/main/java/com/cloud/api/ApiResponseHelper.java +++ b/server/src/main/java/com/cloud/api/ApiResponseHelper.java @@ -1344,7 +1344,7 @@ public PodResponse createPodResponse(Pod pod, Boolean showCapacities) { PodResponse podResponse = new PodResponse(); podResponse.setId(pod.getUuid()); podResponse.setName(pod.getName()); - DataCenter zone = ApiDBUtils.findZoneById(pod.getDataCenterId()); + DataCenterVO zone = ApiDBUtils.findZoneById(pod.getDataCenterId()); if (zone != null) { podResponse.setZoneId(zone.getUuid()); podResponse.setZoneName(zone.getName()); @@ -1357,6 +1357,8 @@ public PodResponse createPodResponse(Pod pod, Boolean showCapacities) { podResponse.setVlanId(vlanIds); podResponse.setGateway(pod.getGateway()); podResponse.setAllocationState(pod.getAllocationState().toString()); + podResponse.setStorageAccessGroups(pod.getStorageAccessGroups()); + podResponse.setZoneStorageAccessGroups(zone.getStorageAccessGroups()); if (showCapacities != null && showCapacities) { List capacities = ApiDBUtils.getCapacityByClusterPodZone(null, pod.getId(), null); Set capacityResponses = new HashSet(); @@ -1516,7 +1518,7 @@ public ClusterResponse createClusterResponse(Cluster cluster, Boolean showCapaci clusterResponse.setPodId(pod.getUuid()); clusterResponse.setPodName(pod.getName()); } - DataCenter dc = ApiDBUtils.findZoneById(cluster.getDataCenterId()); + DataCenterVO dc = ApiDBUtils.findZoneById(cluster.getDataCenterId()); if (dc != null) { clusterResponse.setZoneId(dc.getUuid()); clusterResponse.setZoneName(dc.getName()); @@ -1534,6 +1536,10 @@ public ClusterResponse createClusterResponse(Cluster cluster, Boolean showCapaci clusterResponse.setArch(cluster.getArch().getType()); } + clusterResponse.setStorageAccessGroups(cluster.getStorageAccessGroups()); + clusterResponse.setPodStorageAccessGroups(pod.getStorageAccessGroups()); + clusterResponse.setZoneStorageAccessGroups(dc.getStorageAccessGroups()); + if (showCapacities != null && showCapacities) { List capacities = ApiDBUtils.getCapacityByClusterPodZone(null, null, cluster.getId()); Set capacityResponses = new HashSet(); diff --git a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java index 9290d2aa7014..1928f0b7fa59 100644 --- a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java +++ b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java @@ -3216,6 +3216,7 @@ private Pair, Integer> searchForStoragePoolsInternal(Lis String path = cmd.getPath(); Long pod = cmd.getPodId(); Long cluster = cmd.getClusterId(); + Long host = cmd.getHostId(); String address = cmd.getIpAddress(); String keyword = cmd.getKeyword(); @@ -3225,7 +3226,7 @@ private Pair, Integer> searchForStoragePoolsInternal(Lis Filter searchFilter = new Filter(StoragePoolVO.class, "id", Boolean.TRUE, startIndex, pageSize); Pair, Integer> uniquePoolPair = storagePoolDao.searchForIdsAndCount(id, name, zoneId, path, pod, - cluster, address, scopeType, status, keyword, searchFilter); + cluster, host, address, scopeType, status, keyword, searchFilter); List storagePools = _poolJoinDao.searchByIds(uniquePoolPair.first().toArray(new Long[0])); diff --git a/server/src/main/java/com/cloud/api/query/dao/DataCenterJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/DataCenterJoinDaoImpl.java index d457f8f7931e..b63ea532575f 100644 --- a/server/src/main/java/com/cloud/api/query/dao/DataCenterJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/DataCenterJoinDaoImpl.java @@ -82,6 +82,7 @@ public ZoneResponse newDataCenterResponse(ResponseView view, DataCenterJoinVO da zoneResponse.setSecurityGroupsEnabled(ApiDBUtils.isSecurityGroupEnabledInZone(dataCenter.getId())); zoneResponse.setLocalStorageEnabled(dataCenter.isLocalStorageEnabled()); zoneResponse.setType(ObjectUtils.defaultIfNull(dataCenter.getType(), DataCenter.Type.Core).toString()); + zoneResponse.setStorageAccessGroups(dataCenter.getStorageAccessGroups()); if ((dataCenter.getDescription() != null) && !dataCenter.getDescription().equalsIgnoreCase("null")) { zoneResponse.setDescription(dataCenter.getDescription()); diff --git a/server/src/main/java/com/cloud/api/query/dao/HostJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/HostJoinDaoImpl.java index feee12dcb205..032eb72f1d3c 100644 --- a/server/src/main/java/com/cloud/api/query/dao/HostJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/HostJoinDaoImpl.java @@ -221,6 +221,11 @@ private void setNewHostResponseBase(HostJoinVO host, EnumSet detail hostResponse.setArch(host.getArch().getType()); } + hostResponse.setStorageAccessGroups(host.getStorageAccessGroups()); + hostResponse.setClusterStorageAccessGroups(host.getClusterStorageAccessGroups()); + hostResponse.setPodStorageAccessGroups(host.getPodStorageAccessGroups()); + hostResponse.setZoneStorageAccessGroups(host.getZoneStorageAccessGroups()); + float cpuWithOverprovisioning = host.getCpus() * host.getSpeed() * cpuOverprovisioningFactor; hostResponse.setCpuAllocatedValue(cpu); String cpuAllocated = calculateResourceAllocatedPercentage(cpu, cpuWithOverprovisioning); diff --git a/server/src/main/java/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java index 89bfaf247661..1af51307fd00 100644 --- a/server/src/main/java/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java @@ -165,6 +165,7 @@ public StoragePoolResponse newStoragePoolResponse(StoragePoolJoinVO pool, boolea poolResponse.setClusterName(pool.getClusterName()); poolResponse.setProvider(pool.getStorageProviderName()); poolResponse.setTags(pool.getTag()); + poolResponse.setStorageAccessGroups(pool.getStorageAccessGroup()); poolResponse.setIsTagARule(pool.getIsTagARule()); poolResponse.setOverProvisionFactor(Double.toString(CapacityManager.StorageOverprovisioningFactor.valueIn(pool.getId()))); poolResponse.setManaged(storagePool.isManaged()); @@ -191,6 +192,14 @@ public StoragePoolResponse setStoragePoolResponse(StoragePoolResponse response, response.setTags(tag); } } + String storageAccessGroup = sp.getStorageAccessGroup(); + if (storageAccessGroup != null) { + if (response.getStorageAccessGroups() != null && response.getStorageAccessGroups().length() > 0) { + response.setStorageAccessGroups(response.getStorageAccessGroups() + "," + storageAccessGroup); + } else { + response.setStorageAccessGroups(storageAccessGroup); + } + } if (response.hasAnnotation() == null) { response.setHasAnnotation(annotationDao.hasAnnotations(sp.getUuid(), AnnotationService.EntityType.PRIMARY_STORAGE.name(), accountManager.isRootAdmin(CallContext.current().getCallingAccount().getId()))); @@ -251,6 +260,7 @@ public StoragePoolResponse newStoragePoolForMigrationResponse(StoragePoolJoinVO poolResponse.setClusterName(pool.getClusterName()); poolResponse.setProvider(pool.getStorageProviderName()); poolResponse.setTags(pool.getTag()); + poolResponse.setStorageAccessGroups(pool.getStorageAccessGroup()); poolResponse.setIsTagARule(pool.getIsTagARule()); // set async job @@ -271,6 +281,14 @@ public StoragePoolResponse setStoragePoolForMigrationResponse(StoragePoolRespons response.setTags(tag); } } + String storageAccessGroup = sp.getStorageAccessGroup(); + if (storageAccessGroup != null) { + if (response.getStorageAccessGroups() != null && response.getStorageAccessGroups().length() > 0) { + response.setStorageAccessGroups(response.getStorageAccessGroups() + "," + storageAccessGroup); + } else { + response.setStorageAccessGroups(storageAccessGroup); + } + } return response; } diff --git a/server/src/main/java/com/cloud/api/query/vo/DataCenterJoinVO.java b/server/src/main/java/com/cloud/api/query/vo/DataCenterJoinVO.java index 23e8766e6779..e04577e5eb6c 100644 --- a/server/src/main/java/com/cloud/api/query/vo/DataCenterJoinVO.java +++ b/server/src/main/java/com/cloud/api/query/vo/DataCenterJoinVO.java @@ -125,6 +125,9 @@ public class DataCenterJoinVO extends BaseViewVO implements InternalIdentity, Id @Enumerated(value = EnumType.STRING) private DataCenter.Type type; + @Column(name = "storage_access_groups") + private String storageAccessGroups; + public DataCenterJoinVO() { } @@ -234,7 +237,11 @@ public int getSortKey() { return sortKey; } - public DataCenter.Type getType() { + public DataCenter.Type getType() { return type; } + + public String getStorageAccessGroups() { + return storageAccessGroups; + } } diff --git a/server/src/main/java/com/cloud/api/query/vo/HostJoinVO.java b/server/src/main/java/com/cloud/api/query/vo/HostJoinVO.java index 72918c3fa274..83cfcc8375f3 100644 --- a/server/src/main/java/com/cloud/api/query/vo/HostJoinVO.java +++ b/server/src/main/java/com/cloud/api/query/vo/HostJoinVO.java @@ -185,6 +185,18 @@ public class HostJoinVO extends BaseViewVO implements InternalIdentity, Identity @Column(name = "is_tag_a_rule") private Boolean isTagARule; + @Column(name = "storage_access_groups") + private String storageAccessGroups; + + @Column(name = "cluster_storage_access_groups") + private String clusterStorageAccessGroups; + + @Column(name = "pod_storage_access_groups") + private String podStorageAccessGroups; + + @Column(name = "zone_storage_access_groups") + private String zoneStorageAccessGroups; + @Column(name = "memory_used_capacity") private long memUsedCapacity; @@ -417,6 +429,22 @@ public Boolean getIsTagARule() { return isTagARule; } + public String getStorageAccessGroups() { + return storageAccessGroups; + } + + public String getClusterStorageAccessGroups() { + return clusterStorageAccessGroups; + } + + public String getPodStorageAccessGroups() { + return podStorageAccessGroups; + } + + public String getZoneStorageAccessGroups() { + return zoneStorageAccessGroups; + } + public String getAnnotation() { return annotation; } diff --git a/server/src/main/java/com/cloud/api/query/vo/StoragePoolJoinVO.java b/server/src/main/java/com/cloud/api/query/vo/StoragePoolJoinVO.java index 41a30fd40d3c..0767e468f73e 100644 --- a/server/src/main/java/com/cloud/api/query/vo/StoragePoolJoinVO.java +++ b/server/src/main/java/com/cloud/api/query/vo/StoragePoolJoinVO.java @@ -119,6 +119,9 @@ public class StoragePoolJoinVO extends BaseViewVO implements InternalIdentity, I @Column(name = "is_tag_a_rule") private boolean isTagARule; + @Column(name = "storage_access_group") + private String storageAccessGroup; + @Column(name = "disk_used_capacity") private long usedCapacity; @@ -271,6 +274,10 @@ public long getUsedCapacity() { return usedCapacity; } + public String getStorageAccessGroup() { + return storageAccessGroup; + } + public long getReservedCapacity() { return reservedCapacity; } diff --git a/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java b/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java index 56a86e65da02..9fc7fc589e56 100644 --- a/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java +++ b/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java @@ -50,6 +50,7 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.resource.ResourceManager; import org.apache.cloudstack.acl.SecurityChecker; import org.apache.cloudstack.affinity.AffinityGroup; import org.apache.cloudstack.affinity.AffinityGroupService; @@ -473,6 +474,8 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati Ipv6Service ipv6Service; @Inject NsxProviderDao nsxProviderDao; + @Inject + ResourceManager resourceManager; // FIXME - why don't we have interface for DataCenterLinkLocalIpAddressDao? @Inject @@ -2430,7 +2433,7 @@ private void checkPodRangeParametersBasicsForNonEdgeZone(final String startIp, f @Override @ActionEvent(eventType = EventTypes.EVENT_POD_CREATE, eventDescription = "creating pod", async = false) - public Pod createPod(final long zoneId, final String name, final String startIp, final String endIp, final String gateway, final String netmask, String allocationState) { + public Pod createPod(final long zoneId, final String name, final String startIp, final String endIp, final String gateway, final String netmask, String allocationState, List storageAccessGroups) { final DataCenterVO zone = _zoneDao.findById(zoneId); if (zone == null) { throw new InvalidParameterValueException("Please specify a valid zone."); @@ -2456,13 +2459,13 @@ public Pod createPod(final long zoneId, final String name, final String startIp, if (allocationState == null) { allocationState = Grouping.AllocationState.Enabled.toString(); } - return createPod(userId.longValue(), name, zone, gateway, cidr, startIp, endIp, allocationState, false); + return createPod(userId.longValue(), name, zone, gateway, cidr, startIp, endIp, allocationState, false, storageAccessGroups); } @Override @DB public HostPodVO createPod(final long userId, final String podName, final DataCenter zone, final String gateway, final String cidr, String startIp, String endIp, final String allocationStateStr, - final boolean skipGatewayOverlapCheck) { + final boolean skipGatewayOverlapCheck, List storageAccessGroups) { final String cidrAddress = DataCenter.Type.Edge.equals(zone.getType()) ? "" : getCidrAddress(cidr); final int cidrSize = DataCenter.Type.Edge.equals(zone.getType()) ? 0 : getCidrSize(cidr); if (DataCenter.Type.Edge.equals(zone.getType())) { @@ -2495,6 +2498,10 @@ public HostPodVO createPod(final long userId, final String podName, final DataCe podFinal.setAllocationState(allocationState); } + if (CollectionUtils.isNotEmpty(storageAccessGroups)) { + podFinal.setStorageAccessGroups(String.join(",", storageAccessGroups)); + } + final String startIpFinal = startIp; final String endIpFinal = endIp; HostPodVO hostPodVO = Transaction.execute((TransactionCallback) status -> { @@ -2955,8 +2962,8 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { @Override @DB public DataCenterVO createZone(final long userId, final String zoneName, final String dns1, final String dns2, final String internalDns1, final String internalDns2, final String guestCidr, final String domain, - final Long domainId, final NetworkType zoneType, final String allocationStateStr, final String networkDomain, final boolean isSecurityGroupEnabled, final boolean isLocalStorageEnabled, - final String ip6Dns1, final String ip6Dns2, final boolean isEdge) { + final Long domainId, final NetworkType zoneType, final String allocationStateStr, final String networkDomain, final boolean isSecurityGroupEnabled, final boolean isLocalStorageEnabled, + final String ip6Dns1, final String ip6Dns2, final boolean isEdge, List storageAccessGroups) { // checking the following params outside checkzoneparams method as we do // not use these params for updatezone @@ -2991,6 +2998,9 @@ public DataCenterVO createZone(final long userId, final String zoneName, final S zoneFinal.setAllocationState(Grouping.AllocationState.Disabled); } zoneFinal.setType(isEdge ? DataCenter.Type.Edge : DataCenter.Type.Core); + if (CollectionUtils.isNotEmpty(storageAccessGroups)) { + zoneFinal.setStorageAccessGroups(String.join(",", storageAccessGroups)); + } return Transaction.execute(new TransactionCallback() { @Override @@ -3102,6 +3112,7 @@ public DataCenter createZone(final CreateZoneCmd cmd) { boolean isSecurityGroupEnabled = cmd.getSecuritygroupenabled(); final boolean isLocalStorageEnabled = cmd.getLocalStorageEnabled(); final boolean isEdge = cmd.isEdge(); + final List storageAccessGroups = cmd.getStorageAccessGroups(); if (allocationState == null) { allocationState = Grouping.AllocationState.Disabled.toString(); @@ -3135,7 +3146,7 @@ public DataCenter createZone(final CreateZoneCmd cmd) { } return createZone(userId, zoneName, dns1, dns2, internalDns1, internalDns2, guestCidr, domainVO != null ? domainVO.getName() : null, domainId, zoneType, allocationState, - networkDomain, isSecurityGroupEnabled, isLocalStorageEnabled, ip6Dns1, ip6Dns2, isEdge); + networkDomain, isSecurityGroupEnabled, isLocalStorageEnabled, ip6Dns1, ip6Dns2, isEdge, storageAccessGroups); } @Override diff --git a/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java b/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java index e7b926eb4e44..feb7e66159d9 100644 --- a/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java +++ b/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java @@ -1411,7 +1411,7 @@ private boolean canAvoidCluster(Cluster clusterVO, ExcludeList avoids, ExcludeLi if (vmRequiresSharedStorage) { // check shared pools - List allPoolsInCluster = _storagePoolDao.findPoolsByTags(clusterVO.getDataCenterId(), clusterVO.getPodId(), clusterVO.getId(), null, false, 0); + List allPoolsInCluster = _storagePoolDao.findPoolsByTags(clusterVO.getDataCenterId(), clusterVO.getPodId(), clusterVO.getId(), ScopeType.CLUSTER, null, false, 0); for (StoragePoolVO pool : allPoolsInCluster) { if (!allocatorAvoidOutput.shouldAvoid(pool)) { // there's some pool in the cluster that is not yet in avoid set @@ -1658,6 +1658,13 @@ public boolean checkAffinity(Host potentialHost, List preferredHosts) { } protected boolean hostCanAccessSPool(Host host, StoragePool pool) { + if (!_storageMgr.checkIfHostAndStoragePoolHasCommonStorageAccessGroups(host, pool)) { + if (logger.isDebugEnabled()) { + logger.debug(String.format("StoragePool %s and host %s does not have matching storage access groups", pool, host)); + } + return false; + } + boolean hostCanAccessSPool = false; StoragePoolHostVO hostPoolLinkage = _poolHostDao.findByPoolHost(pool.getId(), host.getId()); diff --git a/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java b/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java index 7c997cc49bc3..9fc93ddf237b 100755 --- a/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java +++ b/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java @@ -32,11 +32,16 @@ import java.util.Locale; import java.util.Map; import java.util.Random; +import java.util.Set; import java.util.stream.Collectors; import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.storage.ScopeType; +import com.cloud.storage.StoragePoolAndAccessGroupMapVO; +import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao; +import com.cloud.storage.dao.StoragePoolTagsDao; import org.apache.cloudstack.alert.AlertService; import org.apache.cloudstack.annotation.AnnotationService; import org.apache.cloudstack.annotation.dao.AnnotationDao; @@ -54,6 +59,8 @@ import org.apache.cloudstack.api.command.admin.host.UpdateHostCmd; import org.apache.cloudstack.api.command.admin.host.UpdateHostPasswordCmd; import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; @@ -61,6 +68,8 @@ import org.apache.cloudstack.utils.identity.ManagementServerNode; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang.ObjectUtils; +import org.apache.commons.lang3.ArrayUtils; + import org.springframework.stereotype.Component; import com.cloud.agent.AgentManager; @@ -172,7 +181,6 @@ import com.cloud.storage.dao.VolumeDao; import com.cloud.user.Account; import com.cloud.user.AccountManager; -import com.cloud.utils.StringUtils; import com.cloud.utils.Ternary; import com.cloud.utils.UriUtils; import com.cloud.utils.component.Manager; @@ -199,6 +207,7 @@ import com.cloud.utils.ssh.SSHCmdHelper; import com.cloud.utils.ssh.SshException; import com.cloud.vm.UserVmManager; +import com.cloud.utils.StringUtils; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachine.State; @@ -250,6 +259,10 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, @Inject private PrimaryDataStoreDao _storagePoolDao; @Inject + private StoragePoolTagsDao _storagePoolTagsDao; + @Inject + private StoragePoolAndAccessGroupMapDao _storagePoolAccessGroupMapDao; + @Inject private DataCenterIpAddressDao _privateIPAddressDao; @Inject private IPAddressDao _publicIPAddressDao; @@ -513,6 +526,11 @@ public List discoverCluster(final AddClusterCmd cmd) throws I cluster.setClusterType(clusterType); cluster.setAllocationState(allocationState); cluster.setArch(arch.getType()); + List storageAccessGroups = cmd.getStorageAccessGroups(); + if (CollectionUtils.isNotEmpty(storageAccessGroups)) { + cluster.setStorageAccessGroups(String.join(",", storageAccessGroups)); + } + try { cluster = _clusterDao.persist(cluster); } catch (final Exception e) { @@ -572,7 +590,7 @@ public List discoverCluster(final AddClusterCmd cmd) throws I for (final Map.Entry> entry : resources.entrySet()) { final ServerResource resource = entry.getKey(); - final HostVO host = (HostVO)createHostAndAgent(resource, entry.getValue(), true, null, false); + final HostVO host = (HostVO)createHostAndAgent(resource, entry.getValue(), true, null, null, false); if (host != null) { hosts.add(host); } @@ -614,6 +632,7 @@ public List discoverHosts(final AddHostCmd cmd) throws IllegalAr final String username = cmd.getUsername(); final String password = cmd.getPassword(); final List hostTags = cmd.getHostTags(); + final List storageAccessGroups = cmd.getStorageAccessGroups(); dcId = _accountMgr.checkAccessAndSpecifyAuthority(CallContext.current().getCallingAccount(), dcId); @@ -643,18 +662,18 @@ public List discoverHosts(final AddHostCmd cmd) throws IllegalAr String hypervisorType = cmd.getHypervisor().equalsIgnoreCase(HypervisorGuru.HypervisorCustomDisplayName.value()) ? "Custom" : cmd.getHypervisor(); - return discoverHostsFull(dcId, podId, clusterId, clusterName, url, username, password, hypervisorType, hostTags, cmd.getFullUrlParams(), false); + return discoverHostsFull(dcId, podId, clusterId, clusterName, url, username, password, hypervisorType, hostTags, storageAccessGroups, cmd.getFullUrlParams(), false); } @Override public List discoverHosts(final AddSecondaryStorageCmd cmd) throws IllegalArgumentException, DiscoveryException, InvalidParameterValueException { final Long dcId = cmd.getZoneId(); final String url = cmd.getUrl(); - return discoverHostsFull(dcId, null, null, null, url, null, null, "SecondaryStorage", null, null, false); + return discoverHostsFull(dcId, null, null, null, url, null, null, "SecondaryStorage", null, null, null, false); } private List discoverHostsFull(final Long dcId, final Long podId, Long clusterId, final String clusterName, String url, String username, String password, - final String hypervisorType, final List hostTags, final Map params, final boolean deferAgentCreation) throws IllegalArgumentException, DiscoveryException, + final String hypervisorType, final List hostTags, List storageAccessGroups, final Map params, final boolean deferAgentCreation) throws IllegalArgumentException, DiscoveryException, InvalidParameterValueException { URI uri; @@ -860,9 +879,9 @@ private List discoverHostsFull(final Long dcId, final Long podId, Long c HostVO host; if (deferAgentCreation) { - host = (HostVO)createHostAndAgentDeferred(resource, entry.getValue(), true, hostTags, false); + host = (HostVO)createHostAndAgentDeferred(resource, entry.getValue(), true, hostTags, storageAccessGroups, false); } else { - host = (HostVO)createHostAndAgent(resource, entry.getValue(), true, hostTags, false); + host = (HostVO)createHostAndAgent(resource, entry.getValue(), true, hostTags, storageAccessGroups, false); } if (host != null) { hosts.add(host); @@ -1270,7 +1289,7 @@ public Cluster updateCluster(UpdateClusterCmd cmd) { } - return cluster; + return _clusterDao.findById(cluster.getId()); } @Override @@ -1917,6 +1936,739 @@ private void updateHostGuestOSCategory(Long hostId, Long guestOSCategoryId) { } } + private void removeStorageAccessGroupsOnPodsInZone(long zoneId, List newStoragePoolTags, List tagsToDeleteOnZone) { + List pods = _podDao.listByDataCenterId(zoneId); + for (HostPodVO pod : pods) { + removeStorageAccessGroupsOnClustersInPod(pod.getId(), newStoragePoolTags, tagsToDeleteOnZone); + updateStorageAccessGroupsToBeAddedOnPodInZone(pod.getId(), newStoragePoolTags); + } + } + + private void removeStorageAccessGroupsOnClustersInPod(long podId, List newStoragePoolTags, List tagsToDeleteOnPod) { + List clusters = _clusterDao.listByPodId(podId); + for (ClusterVO cluster : clusters) { + updateStorageAccessGroupsToBeDeletedOnHostsInCluster(cluster.getId(), tagsToDeleteOnPod); + updateStorageAccessGroupsToBeAddedOnHostsInCluster(cluster.getId(), newStoragePoolTags); + updateStorageAccessGroupsToBeAddedOnClustersInPod(cluster.getId(), newStoragePoolTags); + } + } + + private void updateStorageAccessGroupsToBeDeletedOnHostsInCluster(long clusterId, List storageAccessGroupsToDeleteOnCluster) { + if (CollectionUtils.isEmpty(storageAccessGroupsToDeleteOnCluster)) { + return; + } + + List hosts = _hostDao.findByClusterId(clusterId); + List hostIdsUsingStorageAccessGroups = listOfHostIdsUsingTheStorageAccessGroups(storageAccessGroupsToDeleteOnCluster, clusterId, null, null); + for (HostVO host : hosts) { + String hostStorageAccessGroups = host.getStorageAccessGroups(); + if (hostIdsUsingStorageAccessGroups != null && hostIdsUsingStorageAccessGroups.contains(host.getId())) { + Set mergedSet = hostStorageAccessGroups != null + ? new HashSet<>(Arrays.asList(hostStorageAccessGroups.split(","))) + : new HashSet<>(); + mergedSet.addAll(storageAccessGroupsToDeleteOnCluster); + host.setStorageAccessGroups(String.join(",", mergedSet)); + _hostDao.update(host.getId(), host); + } else { + if (hostStorageAccessGroups != null) { + List hostTagsList = new ArrayList<>(Arrays.asList(hostStorageAccessGroups.split(","))); + hostTagsList.removeAll(storageAccessGroupsToDeleteOnCluster); + String updatedClusterStoragePoolTags = hostTagsList.isEmpty() ? null : String.join(",", hostTagsList); + host.setStorageAccessGroups(updatedClusterStoragePoolTags); + _hostDao.update(host.getId(), host); + } + } + } + } + + private void updateStorageAccessGroupsToBeAddedOnHostsInCluster(long clusterId, List tagsAddedOnCluster) { + if (CollectionUtils.isEmpty(tagsAddedOnCluster)) { + return; + } + + List hosts = _hostDao.findByClusterId(clusterId); + for (HostVO host : hosts) { + String hostStoragePoolTags = host.getStorageAccessGroups(); + Set hostStoragePoolTagsSet = hostStoragePoolTags != null + ? new HashSet<>(Arrays.asList(hostStoragePoolTags.split(","))) + : new HashSet<>(); + + hostStoragePoolTagsSet.removeIf(tagsAddedOnCluster::contains); + host.setStorageAccessGroups(hostStoragePoolTagsSet.isEmpty() ? null : String.join(",", hostStoragePoolTagsSet)); + _hostDao.update(host.getId(), host); + } + } + + private void updateStorageAccessGroupsToBeAddedOnClustersInPod(long clusterId, List tagsAddedOnPod) { + if (CollectionUtils.isEmpty(tagsAddedOnPod)) { + return; + } + + ClusterVO cluster = _clusterDao.findById(clusterId); + String clusterStoragePoolTags = cluster.getStorageAccessGroups(); + if (clusterStoragePoolTags != null) { + List clusterTagsList = new ArrayList<>(Arrays.asList(clusterStoragePoolTags.split(","))); + clusterTagsList.removeAll(tagsAddedOnPod); + String updatedClusterStoragePoolTags = clusterTagsList.isEmpty() ? null : String.join(",", clusterTagsList); + cluster.setStorageAccessGroups(updatedClusterStoragePoolTags); + _clusterDao.update(cluster.getId(), cluster); + } + } + + private void updateStorageAccessGroupsToBeAddedOnPodInZone(long podId, List tagsAddedOnZone) { + if (CollectionUtils.isEmpty(tagsAddedOnZone)) { + return; + } + + HostPodVO pod = _podDao.findById(podId); + String podStoragePoolTags = pod.getStorageAccessGroups(); + if (podStoragePoolTags != null) { + List podTagsList = new ArrayList<>(Arrays.asList(podStoragePoolTags.split(","))); + podTagsList.removeAll(tagsAddedOnZone); + String updatedClusterStoragePoolTags = podTagsList.isEmpty() ? null : String.join(",", podTagsList); + pod.setStorageAccessGroups(updatedClusterStoragePoolTags); + _podDao.update(pod.getId(), pod); + } + } + + public List listOfHostIdsUsingTheStorageAccessGroups(List storageAccessGroups, Long clusterId, Long podId, Long datacenterId) { + GenericSearchBuilder vmInstanceSearch = _vmDao.createSearchBuilder(Long.class); + vmInstanceSearch.select(null, Func.DISTINCT, vmInstanceSearch.entity().getHostId()); + vmInstanceSearch.and("hostId", vmInstanceSearch.entity().getHostId(), Op.NNULL); + vmInstanceSearch.and("removed", vmInstanceSearch.entity().getRemoved(), Op.NULL); + + GenericSearchBuilder volumeSearch = volumeDao.createSearchBuilder(Long.class); + volumeSearch.selectFields(volumeSearch.entity().getInstanceId()); + volumeSearch.and("state", volumeSearch.entity().getState(), Op.NIN); + + GenericSearchBuilder storagePoolSearch = _storagePoolDao.createSearchBuilder(Long.class); + storagePoolSearch.and("clusterId", storagePoolSearch.entity().getClusterId(), Op.EQ); + storagePoolSearch.and("podId", storagePoolSearch.entity().getPodId(), Op.EQ); + storagePoolSearch.and("datacenterId", storagePoolSearch.entity().getDataCenterId(), Op.EQ); + storagePoolSearch.selectFields(storagePoolSearch.entity().getId()); + + GenericSearchBuilder storageAccessGroupSearch = _storagePoolAccessGroupMapDao.createSearchBuilder(Long.class); + storageAccessGroupSearch.and("sag", storageAccessGroupSearch.entity().getStorageAccessGroup(), Op.IN); + + storagePoolSearch.join("storageAccessGroupSearch", storageAccessGroupSearch, storagePoolSearch.entity().getId(), storageAccessGroupSearch.entity().getPoolId(), JoinBuilder.JoinType.INNER); + storageAccessGroupSearch.done(); + + volumeSearch.join("storagePoolSearch", storagePoolSearch, volumeSearch.entity().getPoolId(), storagePoolSearch.entity().getId(), JoinBuilder.JoinType.INNER); + storagePoolSearch.done(); + + vmInstanceSearch.join("volumeSearch", volumeSearch, vmInstanceSearch.entity().getId(), volumeSearch.entity().getInstanceId(), JoinBuilder.JoinType.INNER); + volumeSearch.done(); + + vmInstanceSearch.done(); + + SearchCriteria sc = vmInstanceSearch.create(); + sc.setJoinParameters("storageAccessGroupSearch", "sag", storageAccessGroups.toArray()); + sc.setJoinParameters("volumeSearch", "state", new String[]{"Destroy", "Error", "Expunging", "Expunged"}); + if (clusterId != null) { + sc.setParameters("storagePoolSearch", "clusterId", clusterId); + } + if (podId != null) { + sc.setParameters("storagePoolSearch", "podId", podId); + } + if (datacenterId != null) { + sc.setParameters("storagePoolSearch", "datacenterId", datacenterId); + } + + return _vmDao.customSearch(sc, null); + } + + public List listOfHostIdsUsingTheStoragePool(Long storagePoolId) { + GenericSearchBuilder vmInstanceSearch = _vmDao.createSearchBuilder(Long.class); + vmInstanceSearch.select(null, Func.DISTINCT, vmInstanceSearch.entity().getHostId()); + vmInstanceSearch.and("hostId", vmInstanceSearch.entity().getHostId(), Op.NNULL); + vmInstanceSearch.and("removed", vmInstanceSearch.entity().getRemoved(), Op.NULL); + + GenericSearchBuilder volumeSearch = volumeDao.createSearchBuilder(Long.class); + volumeSearch.selectFields(volumeSearch.entity().getInstanceId()); + volumeSearch.and("state", volumeSearch.entity().getState(), Op.NIN); + + GenericSearchBuilder storagePoolSearch = _storagePoolDao.createSearchBuilder(Long.class); + storagePoolSearch.selectFields(storagePoolSearch.entity().getId()); + storagePoolSearch.and("poolId", storagePoolSearch.entity().getId(), Op.EQ); + + volumeSearch.join("storagePoolSearch", storagePoolSearch, volumeSearch.entity().getPoolId(), storagePoolSearch.entity().getId(), JoinBuilder.JoinType.INNER); + storagePoolSearch.done(); + + vmInstanceSearch.join("volumeSearch", volumeSearch, vmInstanceSearch.entity().getId(), volumeSearch.entity().getInstanceId(), JoinBuilder.JoinType.INNER); + volumeSearch.done(); + + vmInstanceSearch.done(); + + SearchCriteria sc = vmInstanceSearch.create(); + sc.setJoinParameters("storagePoolSearch", "poolId", storagePoolId); + sc.setJoinParameters("volumeSearch", "state", new String[]{"Destroy", "Error", "Expunging", "Expunged"}); + + return _vmDao.customSearch(sc, null); + } + + public List listOfVolumesUsingTheStorageAccessGroups(List storageAccessGroups, Long hostId, Long clusterId, Long podId, Long datacenterId) { + SearchBuilder volumeSearch = volumeDao.createSearchBuilder(); + volumeSearch.and("state", volumeSearch.entity().getState(), Op.NIN); + + GenericSearchBuilder vmInstanceSearch = _vmDao.createSearchBuilder(Long.class); + vmInstanceSearch.selectFields(vmInstanceSearch.entity().getId()); + vmInstanceSearch.and("hostId", vmInstanceSearch.entity().getHostId(), Op.EQ); + vmInstanceSearch.and("removed", vmInstanceSearch.entity().getRemoved(), Op.NULL); + + GenericSearchBuilder storagePoolSearch = _storagePoolDao.createSearchBuilder(Long.class); + storagePoolSearch.and("clusterId", storagePoolSearch.entity().getClusterId(), Op.EQ); + storagePoolSearch.and("podId", storagePoolSearch.entity().getPodId(), Op.EQ); + storagePoolSearch.and("datacenterId", storagePoolSearch.entity().getDataCenterId(), Op.EQ); + storagePoolSearch.selectFields(storagePoolSearch.entity().getId()); + + GenericSearchBuilder storageAccessGroupSearch = _storagePoolAccessGroupMapDao.createSearchBuilder(Long.class); + storageAccessGroupSearch.and("sag", storageAccessGroupSearch.entity().getStorageAccessGroup(), Op.IN); + + storagePoolSearch.join("storageAccessGroupSearch", storageAccessGroupSearch, storagePoolSearch.entity().getId(), storageAccessGroupSearch.entity().getPoolId(), JoinBuilder.JoinType.INNER); + + volumeSearch.join("storagePoolSearch", storagePoolSearch, volumeSearch.entity().getPoolId(), storagePoolSearch.entity().getId(), JoinBuilder.JoinType.INNER); + + volumeSearch.join("vmInstanceSearch", vmInstanceSearch, volumeSearch.entity().getInstanceId(), vmInstanceSearch.entity().getId(), JoinBuilder.JoinType.INNER); + + storageAccessGroupSearch.done(); + storagePoolSearch.done(); + vmInstanceSearch.done(); + volumeSearch.done(); + + SearchCriteria sc = volumeSearch.create(); + sc.setParameters( "state", new String[]{"Destroy", "Error", "Expunging", "Expunged"}); + sc.setJoinParameters("storageAccessGroupSearch", "sag", storageAccessGroups.toArray()); + if (hostId != null) { + sc.setJoinParameters("vmInstanceSearch", "hostId", hostId); + } + if (clusterId != null) { + sc.setJoinParameters("storagePoolSearch", "clusterId", clusterId); + } + if (podId != null) { + sc.setJoinParameters("storagePoolSearch", "podId", podId); + } + if (datacenterId != null) { + sc.setJoinParameters("storagePoolSearch", "datacenterId", datacenterId); + } + + return volumeDao.customSearch(sc, null); + } + + private List listOfStoragePoolIDsUsedByHost(long hostId) { + GenericSearchBuilder vmInstanceSearch = _vmDao.createSearchBuilder(Long.class); + vmInstanceSearch.selectFields(vmInstanceSearch.entity().getId()); + vmInstanceSearch.and("hostId", vmInstanceSearch.entity().getHostId(), Op.EQ); + + GenericSearchBuilder volumeSearch = volumeDao.createSearchBuilder(Long.class); + volumeSearch.selectFields(volumeSearch.entity().getPoolId()); + volumeSearch.and("state", volumeSearch.entity().getState(), Op.EQ); + + volumeSearch.join("vmInstanceSearch", vmInstanceSearch, volumeSearch.entity().getInstanceId(), vmInstanceSearch.entity().getId(), JoinBuilder.JoinType.INNER); + vmInstanceSearch.done(); + + GenericSearchBuilder storagePoolSearch = _storagePoolDao.createSearchBuilder(Long.class); + storagePoolSearch.select(null, Func.DISTINCT, storagePoolSearch.entity().getId()); + + storagePoolSearch.join("volumeSearch", volumeSearch, storagePoolSearch.entity().getId(), volumeSearch.entity().getPoolId(), JoinBuilder.JoinType.INNER); + volumeSearch.done(); + + storagePoolSearch.done(); + + SearchCriteria sc = storagePoolSearch.create(); + sc.setJoinParameters("vmInstanceSearch", "hostId", hostId); + sc.setJoinParameters("volumeSearch", "state", "Ready"); + + List storagePoolsInUse = _storagePoolDao.customSearch(sc, null); + return storagePoolsInUse; + } + + @Override + public void updateStoragePoolConnectionsOnHosts(Long poolId, List storageAccessGroups) { + StoragePoolVO storagePool = _storagePoolDao.findById(poolId); + List hosts = new ArrayList<>(); + + if (storagePool.getScope().equals(ScopeType.CLUSTER)) { + List hostsInCluster = listAllUpHosts(Host.Type.Routing, storagePool.getClusterId(), storagePool.getPodId(), storagePool.getDataCenterId()); + hosts.addAll(hostsInCluster); + } else if (storagePool.getScope().equals(ScopeType.ZONE)) { + List hostsInZone = listAllUpHosts(Host.Type.Routing, null, null, storagePool.getDataCenterId()); + hosts.addAll(hostsInZone); + } + + List hostsToConnect = new ArrayList<>(); + List hostsToDisconnect = new ArrayList<>(); + boolean storagePoolHasAccessGroups = CollectionUtils.isNotEmpty(storageAccessGroups); + + for (HostVO host : hosts) { + String[] storageAccessGroupsOnHost = _storageMgr.getStorageAccessGroups(null, null, null, host.getId()); + List listOfStorageAccessGroupsOnHost = Arrays.asList(storageAccessGroupsOnHost); + StoragePoolHostVO hostPoolRecord = _storagePoolHostDao.findByPoolHost(storagePool.getId(), host.getId()); + + if (storagePoolHasAccessGroups) { + List intersection = new ArrayList<>(listOfStorageAccessGroupsOnHost); + intersection.retainAll(storageAccessGroups); + if (CollectionUtils.isNotEmpty(intersection)) { + if (hostPoolRecord == null) { + hostsToConnect.add(host); + } + } else { + hostsToDisconnect.add(host); + } + } else { + if (hostPoolRecord == null) { + hostsToConnect.add(host); + } + } + } + + if (CollectionUtils.isNotEmpty(hostsToDisconnect)) { + List hostIdsUsingTheStoragePool = listOfHostIdsUsingTheStoragePool(poolId); + List hostIdsToDisconnect = hostsToDisconnect.stream() + .map(HostVO::getId) + .collect(Collectors.toList()); + List conflictingHostIds = new ArrayList<>(CollectionUtils.intersection(hostIdsToDisconnect, hostIdsUsingTheStoragePool)); + if (CollectionUtils.isNotEmpty(conflictingHostIds)) { + Map> hostVolumeMap = new HashMap<>(); + List volumesInPool = volumeDao.findByPoolId(poolId); + Map vmInstanceCache = new HashMap<>(); + + for (Long hostId : conflictingHostIds) { + HostVO host = _hostDao.findById(hostId); + List matchingVolumes = volumesInPool.stream() + .filter(volume -> { + Long vmId = volume.getInstanceId(); + if (vmId == null) return false; + + VMInstanceVO vmInstance = vmInstanceCache.computeIfAbsent(vmId, _vmDao::findById); + return vmInstance != null && hostId.equals(vmInstance.getHostId()); + }) + .collect(Collectors.toList()); + if (!matchingVolumes.isEmpty()) { + hostVolumeMap.put(host, matchingVolumes); + } + } + + logger.error(String.format("Conflict detected: Hosts using the storage pool that need to be disconnected or " + + "connected to the pool: Host IDs and volumes: %s", hostVolumeMap)); + throw new CloudRuntimeException("Storage access groups cannot be updated as they are currently in use by some hosts. Please check the logs."); + } + } + + if (!hostsToConnect.isEmpty()) { + logger.debug(String.format("Hosts to connect to storage pool [%s]: %s", storagePool.getUuid(), hostsToConnect)); + for (HostVO host : hostsToConnect) { + connectHostToStoragePool(host, storagePool); + } + } + + if (!hostsToDisconnect.isEmpty()) { + logger.debug(String.format("Hosts to disconnect from storage pool [%s]: %s", storagePool.getUuid(), hostsToDisconnect)); + for (HostVO host : hostsToDisconnect) { + disconnectHostFromStoragePool(host, storagePool); + } + } + } + + protected List filterHostsBasedOnStorageAccessGroups(List allHosts, List storageAccessGroups) { + List hostsToConnect = new ArrayList<>(); + for (HostVO host : allHosts) { + String[] storageAccessGroupsOnHost = _storageMgr.getStorageAccessGroups(null, null, null, host.getId()); + List listOfStorageAccessGroupsOnHost = Arrays.asList(storageAccessGroupsOnHost); + if (CollectionUtils.isNotEmpty(storageAccessGroups)) { + List intersection = new ArrayList<>(listOfStorageAccessGroupsOnHost); + intersection.retainAll(storageAccessGroups); + if (CollectionUtils.isNotEmpty(intersection)) { + hostsToConnect.add(host); + } + } else { + hostsToConnect.add(host); + } + } + return hostsToConnect; + } + + @Override + public List getEligibleUpHostsInClusterForStorageConnection(PrimaryDataStoreInfo primaryStore) { + List allHosts = listAllUpHosts(Host.Type.Routing, primaryStore.getClusterId(), primaryStore.getPodId(), primaryStore.getDataCenterId()); + if (CollectionUtils.isEmpty(allHosts)) { + _storagePoolDao.expunge(primaryStore.getId()); + throw new CloudRuntimeException("No host up to associate a storage pool with in cluster " + primaryStore.getClusterId()); + } + + List storageAccessGroups = _storagePoolAccessGroupMapDao.getStorageAccessGroups(primaryStore.getId()); + return filterHostsBasedOnStorageAccessGroups(allHosts, storageAccessGroups); + } + + @Override + public List getEligibleUpAndEnabledHostsInClusterForStorageConnection(PrimaryDataStoreInfo primaryStore) { + List allHosts = listAllUpAndEnabledHosts(Host.Type.Routing, primaryStore.getClusterId(), primaryStore.getPodId(), primaryStore.getDataCenterId()); + if (CollectionUtils.isEmpty(allHosts)) { + _storagePoolDao.expunge(primaryStore.getId()); + throw new CloudRuntimeException("No host up to associate a storage pool with in cluster " + primaryStore.getClusterId()); + } + + List storageAccessGroups = _storagePoolAccessGroupMapDao.getStorageAccessGroups(primaryStore.getId()); + return filterHostsBasedOnStorageAccessGroups(allHosts, storageAccessGroups); + } + + @Override + public List getEligibleUpAndEnabledHostsInZoneForStorageConnection(DataStore dataStore, long zoneId, HypervisorType hypervisorType) { + List allHosts = listAllUpAndEnabledHostsInOneZoneByHypervisor(hypervisorType, zoneId); + + List storageAccessGroups = _storagePoolAccessGroupMapDao.getStorageAccessGroups(dataStore.getId()); + return filterHostsBasedOnStorageAccessGroups(allHosts, storageAccessGroups); + } + + protected void checkIfAllHostsInUse(List sagsToDelete, Long clusterId, Long podId, Long zoneId) { + if (CollectionUtils.isEmpty(sagsToDelete)) { + return; + } + + List hostIdsUsingStorageTags = listOfHostIdsUsingTheStorageAccessGroups(sagsToDelete, clusterId, podId, zoneId); + + // Check for zone level hosts + if (zoneId != null) { + List hostsInZone = _hostDao.findByDataCenterId(zoneId); + Set hostIdsInUseSet = hostIdsUsingStorageTags.stream().collect(Collectors.toSet()); + + boolean allInUseZone = hostsInZone.stream() + .map(HostVO::getId) + .allMatch(hostIdsInUseSet::contains); + + if (allInUseZone) { + throw new CloudRuntimeException("All hosts in the zone are using the storage access groups"); + } + } + + // Check for cluster level hosts + if (clusterId != null) { + List hostsInCluster = _hostDao.findByClusterId(clusterId, Type.Routing); + Set hostIdsInUseSet = hostIdsUsingStorageTags.stream().collect(Collectors.toSet()); + + boolean allInUseCluster = hostsInCluster.stream() + .map(HostVO::getId) + .allMatch(hostIdsInUseSet::contains); + + if (allInUseCluster) { + throw new CloudRuntimeException("All hosts in the cluster are using the storage access groups"); + } + } + + // Check for pod level hosts + if (podId != null) { + List hostsInPod = _hostDao.findByPodId(podId, Type.Routing); + Set hostIdsInUseSet = hostIdsUsingStorageTags.stream().collect(Collectors.toSet()); + + boolean allInUsePod = hostsInPod.stream() + .map(HostVO::getId) + .allMatch(hostIdsInUseSet::contains); + + if (allInUsePod) { + throw new CloudRuntimeException("All hosts in the pod are using the storage access groups"); + } + } + } + + @Override + public void updateZoneStorageAccessGroups(long zoneId, List newStorageAccessGroups) { + DataCenterVO zoneVO = _dcDao.findById(zoneId); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Updating storage access groups %s to the zone %s", newStorageAccessGroups, zoneVO)); + } + + List sagsToAdd = new ArrayList<>(newStorageAccessGroups); + String sagsOnPod = zoneVO.getStorageAccessGroups(); + List sagsToDelete; + if (sagsOnPod == null || sagsOnPod.trim().isEmpty()) { + sagsToDelete = new ArrayList<>(); + } else { + sagsToDelete = new ArrayList<>(Arrays.asList(sagsOnPod.split(","))); + } + sagsToDelete.removeAll(newStorageAccessGroups); + checkIfAllHostsInUse(sagsToDelete, null, null, zoneId); + + Map> hostsAndStorageAccessGroupsMap = new HashMap<>(); + List pods = _podDao.listByDataCenterId(zoneId); + for (HostPodVO pod : pods) { + List hostsInPod = _hostDao.findHypervisorHostInPod(pod.getId()); + for (HostVO host : hostsInPod) { + String[] existingSAGs = _storageMgr.getStorageAccessGroups(null, null, null, host.getId()); + List existingSAGsList = new ArrayList<>(Arrays.asList(existingSAGs)); + existingSAGsList.removeAll(sagsToDelete); + List combinedSAGs = new ArrayList<>(sagsToAdd); + combinedSAGs.addAll(existingSAGsList); + hostsAndStorageAccessGroupsMap.put(host, combinedSAGs); + } + updateConnectionsBetweenHostsAndStoragePools(hostsAndStorageAccessGroupsMap); + } + + removeStorageAccessGroupsOnPodsInZone(zoneVO.getId(), newStorageAccessGroups, sagsToDelete); + } + + @Override + public void updatePodStorageAccessGroups(long podId, List newStorageAccessGroups) { + HostPodVO podVO = _podDao.findById(podId); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Updating storage access groups %s to the pod %s", newStorageAccessGroups, podVO)); + } + + List sagsToAdd = new ArrayList<>(newStorageAccessGroups); + + String sagsOnPod = podVO.getStorageAccessGroups(); + List sagsToDelete; + if (sagsOnPod == null || sagsOnPod.trim().isEmpty()) { + sagsToDelete = new ArrayList<>(); + } else { + sagsToDelete = new ArrayList<>(Arrays.asList(sagsOnPod.split(","))); + } + sagsToDelete.removeAll(newStorageAccessGroups); + + checkIfAllHostsInUse(sagsToDelete, null, podId, null); + + Map> hostsAndStorageAccessGroupsMap = new HashMap<>(); + List hostsInPod = _hostDao.findHypervisorHostInPod(podId); + for (HostVO host : hostsInPod) { + String[] existingSAGs = _storageMgr.getStorageAccessGroups(null, null, null, host.getId()); + List existingSAGsList = new ArrayList<>(Arrays.asList(existingSAGs)); + existingSAGsList.removeAll(sagsToDelete); + List combinedSAGs = new ArrayList<>(sagsToAdd); + combinedSAGs.addAll(existingSAGsList); + hostsAndStorageAccessGroupsMap.put(host, combinedSAGs); + } + + updateConnectionsBetweenHostsAndStoragePools(hostsAndStorageAccessGroupsMap); + removeStorageAccessGroupsOnClustersInPod(podId, newStorageAccessGroups, sagsToDelete); + } + + @Override + public void updateClusterStorageAccessGroups(Long clusterId, List newStorageAccessGroups) { + ClusterVO cluster = (ClusterVO) getCluster(clusterId); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Updating storage access groups %s to the cluster %s", newStorageAccessGroups, cluster)); + } + + List sagsToAdd = new ArrayList<>(newStorageAccessGroups); + + String existingClusterStorageAccessGroups = cluster.getStorageAccessGroups(); + List sagsToDelete; + if (existingClusterStorageAccessGroups == null || existingClusterStorageAccessGroups.trim().isEmpty()) { + sagsToDelete = new ArrayList<>(); + } else { + sagsToDelete = new ArrayList<>(Arrays.asList(existingClusterStorageAccessGroups.split(","))); + } + sagsToDelete.removeAll(newStorageAccessGroups); + + checkIfAllHostsInUse(sagsToDelete, clusterId, null, null); + + List hostsInCluster = _hostDao.findHypervisorHostInCluster(cluster.getId()); + Map> hostsAndStorageAccessGroupsMap = new HashMap<>(); + for (HostVO host : hostsInCluster) { + String[] existingSAGs = _storageMgr.getStorageAccessGroups(null, null, null, host.getId()); + List existingSAGsList = new ArrayList<>(Arrays.asList(existingSAGs)); + existingSAGsList.removeAll(sagsToDelete); + List combinedSAGs = new ArrayList<>(sagsToAdd); + combinedSAGs.addAll(existingSAGsList); + hostsAndStorageAccessGroupsMap.put(host, combinedSAGs); + } + + updateConnectionsBetweenHostsAndStoragePools(hostsAndStorageAccessGroupsMap); + + updateStorageAccessGroupsToBeDeletedOnHostsInCluster(cluster.getId(), sagsToDelete); + updateStorageAccessGroupsToBeAddedOnHostsInCluster(cluster.getId(), newStorageAccessGroups); + } + + @Override + public void updateHostStorageAccessGroups(Long hostId, List newStorageAccessGroups) { + HostVO host = _hostDao.findById(hostId); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Updating storage access groups %s to the host %s", newStorageAccessGroups, host)); + } + + List sagsToAdd = new ArrayList<>(newStorageAccessGroups); + String[] sagsOnCluster = _storageMgr.getStorageAccessGroups(null, null, host.getClusterId(), null); + if (ArrayUtils.isNotEmpty(sagsOnCluster)) { + sagsToAdd.addAll(Arrays.asList(sagsOnCluster)); + } + + String sagsOnHost = host.getStorageAccessGroups(); + List sagsToDelete; + if (sagsOnHost == null || sagsOnHost.trim().isEmpty()) { + sagsToDelete = new ArrayList<>(); + } else { + sagsToDelete = new ArrayList<>(Arrays.asList(sagsOnHost.split(","))); + } + sagsToDelete.removeAll(newStorageAccessGroups); + + checkIfAnyVolumesInUse(sagsToAdd, sagsToDelete, host); + + updateConnectionsBetweenHostsAndStoragePools(Collections.singletonMap(host, sagsToAdd)); + + host.setStorageAccessGroups(CollectionUtils.isEmpty(newStorageAccessGroups) ? null : String.join(",", newStorageAccessGroups)); + _hostDao.update(host.getId(), host); + } + + protected void checkIfAnyVolumesInUse(List sagsToAdd, List sagsToDelete, HostVO host) { + if (CollectionUtils.isNotEmpty(sagsToDelete)) { + List volumesUsingTheStoragePoolAccessGroups = listOfVolumesUsingTheStorageAccessGroups(sagsToDelete, host.getId(), null, null, null); + if (CollectionUtils.isNotEmpty(volumesUsingTheStoragePoolAccessGroups)) { + List poolsToAdd; + if (CollectionUtils.isNotEmpty(sagsToAdd)) { + poolsToAdd = getStoragePoolsByAccessGroups(host.getDataCenterId(), host.getPodId(), host.getClusterId(), sagsToAdd.toArray(new String[0]), true); + } else { + poolsToAdd = getStoragePoolsByEmptyStorageAccessGroups(host.getDataCenterId(), host.getPodId(), host.getClusterId()); + } + if (CollectionUtils.isNotEmpty(poolsToAdd)) { + Set poolIdsToAdd = poolsToAdd.stream() + .map(StoragePoolVO::getId) + .collect(Collectors.toSet()); + volumesUsingTheStoragePoolAccessGroups.removeIf(volume -> poolIdsToAdd.contains(volume.getPoolId())); + } + if (CollectionUtils.isNotEmpty(volumesUsingTheStoragePoolAccessGroups)) { + logger.error(String.format("There are volumes in storage pools with the Storage Access Groups that need to be deleted or " + + "in the storage pools which are already connected to the host. Those volume IDs are %s", volumesUsingTheStoragePoolAccessGroups)); + throw new CloudRuntimeException("There are volumes in storage pools with the Storage Access Groups that need to be deleted or " + + "in the storage pools which are already connected to the host"); + } + } + } + } + + private void updateConnectionsBetweenHostsAndStoragePools(Map> hostsAndStorageAccessGroupsMap) { + List hostsList = new ArrayList<>(hostsAndStorageAccessGroupsMap.keySet()); + Map> hostStoragePoolsMapBefore = getHostStoragePoolsBefore(hostsList); + + Map> hostPoolsToAddMapAfter = getHostPoolsToAddAfter(hostsAndStorageAccessGroupsMap); + + disconnectPoolsNotInAccessGroups(hostStoragePoolsMapBefore, hostPoolsToAddMapAfter); + } + + private Map> getHostStoragePoolsBefore(List hostsList) { + Map> hostStoragePoolsMapBefore = new HashMap<>(); + for (HostVO host : hostsList) { + List storagePoolsConnectedToHost = _storageMgr.findStoragePoolsConnectedToHost(host.getId()); + List storagePoolsConnectedBefore = new ArrayList<>(); + if (CollectionUtils.isNotEmpty(storagePoolsConnectedToHost)) { + for (StoragePoolHostVO poolHost : storagePoolsConnectedToHost) { + StoragePoolVO pool = _storagePoolDao.findById(poolHost.getPoolId()); + if (pool != null) { + storagePoolsConnectedBefore.add(pool); + } + } + } + hostStoragePoolsMapBefore.put(host, storagePoolsConnectedBefore); + } + return hostStoragePoolsMapBefore; + } + + private Map> getHostPoolsToAddAfter(Map> hostsAndStorageAccessGroupsMap) { + Map> hostPoolsToAddMapAfter = new HashMap<>(); + for (Map.Entry> entry : hostsAndStorageAccessGroupsMap.entrySet()) { + HostVO host = entry.getKey(); + List sagsToAdd = entry.getValue(); + List poolsToAdd; + if (CollectionUtils.isNotEmpty(sagsToAdd)) { + poolsToAdd = getStoragePoolsByAccessGroups(host.getDataCenterId(), host.getPodId(), host.getClusterId(), sagsToAdd.toArray(new String[0]), true); + } else { + poolsToAdd = getStoragePoolsByEmptyStorageAccessGroups(host.getDataCenterId(), host.getPodId(), host.getClusterId()); + } + hostPoolsToAddMapAfter.put(host, poolsToAdd); + connectHostToStoragePools(host, poolsToAdd); + } + return hostPoolsToAddMapAfter; + } + + private void disconnectPoolsNotInAccessGroups(Map> hostStoragePoolsMapBefore, Map> hostPoolsToAddMapAfter) { + for (Map.Entry> entry : hostStoragePoolsMapBefore.entrySet()) { + HostVO host = entry.getKey(); + List storagePoolsConnectedBefore = entry.getValue(); + List poolsToAdd = hostPoolsToAddMapAfter.get(host); + List poolsToDelete = new ArrayList<>(); + + for (StoragePoolVO pool : storagePoolsConnectedBefore) { + if (poolsToAdd == null || !poolsToAdd.contains(pool)) { + poolsToDelete.add(pool); + } + } + + if (CollectionUtils.isNotEmpty(poolsToDelete)) { + disconnectHostFromStoragePools(host, poolsToDelete); + } + } + } + + protected List getStoragePoolsByAccessGroups(Long dcId, Long podId, Long clusterId, String[] storageAccessGroups, boolean includeEmptyTags) { + List allPoolsByTags = new ArrayList<>(); + allPoolsByTags.addAll(_storagePoolDao.findPoolsByAccessGroupsForHostConnection(dcId, podId, clusterId, ScopeType.CLUSTER, storageAccessGroups)); + allPoolsByTags.addAll(_storagePoolDao.findZoneWideStoragePoolsByAccessGroupsForHostConnection(dcId, storageAccessGroups)); + if (includeEmptyTags) { + allPoolsByTags.addAll(_storagePoolDao.findStoragePoolsByEmptyStorageAccessGroups(dcId, podId, clusterId, ScopeType.CLUSTER, null)); + allPoolsByTags.addAll(_storagePoolDao.findStoragePoolsByEmptyStorageAccessGroups(dcId, null, null, ScopeType.ZONE, null)); + } + + return allPoolsByTags; + } + + private List getStoragePoolsByEmptyStorageAccessGroups(Long dcId, Long podId, Long clusterId) { + List allPoolsByTags = new ArrayList<>(); + allPoolsByTags.addAll(_storagePoolDao.findStoragePoolsByEmptyStorageAccessGroups(dcId, podId, clusterId, ScopeType.CLUSTER, null)); + allPoolsByTags.addAll(_storagePoolDao.findStoragePoolsByEmptyStorageAccessGroups(dcId, null, null, ScopeType.ZONE, null)); + + return allPoolsByTags; + } + + private void connectHostToStoragePools(HostVO host, List poolsToAdd) { + List storagePoolsConnectedToHost = _storageMgr.findStoragePoolsConnectedToHost(host.getId()); + for (StoragePoolVO storagePool : poolsToAdd) { + if (CollectionUtils.isNotEmpty(storagePoolsConnectedToHost)) { + boolean isPresent = storagePoolsConnectedToHost.stream() + .anyMatch(poolHost -> poolHost.getPoolId() == storagePool.getId()); + if (isPresent) { + continue; + } + } + try { + _storageMgr.connectHostToSharedPool(host, storagePool.getId()); + } catch (StorageConflictException se) { + throw new CloudRuntimeException(String.format("Unable to establish a connection between pool %s and the host %s", storagePool, host)); + } catch (Exception e) { + logger.warn(String.format("Unable to establish a connection between pool %s and the host %s", storagePool, host), e); + } + } + } + + protected void connectHostToStoragePool(HostVO host, StoragePoolVO storagePool) { + try { + _storageMgr.connectHostToSharedPool(host, storagePool.getId()); + } catch (StorageConflictException se) { + throw new CloudRuntimeException(String.format("Unable to establish a connection between pool %s and the host %s", storagePool, host)); + } catch (Exception e) { + logger.warn(String.format("Unable to establish a connection between pool %s and the host %s", storagePool, host), e); + } + } + + private void disconnectHostFromStoragePools(HostVO host, List poolsToDelete) { + List usedStoragePoolIDs = listOfStoragePoolIDsUsedByHost(host.getId()); + if (usedStoragePoolIDs != null) { + poolsToDelete.removeIf(poolToDelete -> + usedStoragePoolIDs.stream().anyMatch(usedPoolId -> usedPoolId == poolToDelete.getId()) + ); + } + for (StoragePoolVO storagePool : poolsToDelete) { + disconnectHostFromStoragePool(host, storagePool); + } + } + + protected void disconnectHostFromStoragePool(HostVO host, StoragePoolVO storagePool) { + try { + _storageMgr.disconnectHostFromSharedPool(host, storagePool); + _storagePoolHostDao.deleteStoragePoolHostDetails(host.getId(), storagePool.getId()); + } catch (StorageConflictException se) { + throw new CloudRuntimeException(String.format("Unable to disconnect the pool %s and the host %s", storagePool, host)); + } catch (Exception e) { + logger.warn(String.format("Unable to disconnect the pool %s and the host %s", storagePool, host), e); + } + } + private void updateHostTags(HostVO host, Long hostId, List hostTags, Boolean isTagARule) { List activeVMs = _vmDao.listByHostId(hostId); logger.warn(String.format("The following active VMs [%s] are using the host [%s]. " + @@ -2261,7 +3013,7 @@ private HostVO getNewHost(StartupCommand[] startupCommands) { } protected HostVO createHostVO(final StartupCommand[] cmds, final ServerResource resource, final Map details, List hostTags, - final ResourceStateAdapter.Event stateEvent) { + List storageAccessGroups, final ResourceStateAdapter.Event stateEvent) { boolean newHost = false; StartupCommand startup = cmds[0]; @@ -2353,6 +3105,9 @@ protected HostVO createHostVO(final StartupCommand[] cmds, final ServerResource host.setStorageUrl(startup.getIqn()); host.setLastPinged(System.currentTimeMillis() >> 10); host.setHostTags(hostTags, false); + if ((CollectionUtils.isNotEmpty(storageAccessGroups))) { + host.setStorageAccessGroups(String.join(",", storageAccessGroups)); + } host.setDetails(details); host.setArch(CPU.CPUArch.fromType(startup.getArch())); if (startup.getStorageIpAddressDeux() != null) { @@ -2495,11 +3250,11 @@ private void markHostAsDisconnected(HostVO host, final StartupCommand[] cmds) { } } - private Host createHostAndAgent(final ServerResource resource, final Map details, final boolean old, final List hostTags, final boolean forRebalance) { - return createHostAndAgent(resource, details, old, hostTags, forRebalance, false); + private Host createHostAndAgent(final ServerResource resource, final Map details, final boolean old, final List hostTags, List storageAccessGroups, final boolean forRebalance) { + return createHostAndAgent(resource, details, old, hostTags, storageAccessGroups, forRebalance, false); } - private Host createHostAndAgent(final ServerResource resource, final Map details, final boolean old, final List hostTags, final boolean forRebalance, final boolean isTransferredConnection) { + private Host createHostAndAgent(final ServerResource resource, final Map details, final boolean old, final List hostTags, List storageAccessGroups, final boolean forRebalance, final boolean isTransferredConnection) { HostVO host = null; StartupCommand[] cmds = null; boolean hostExists = false; @@ -2541,7 +3296,7 @@ private Host createHostAndAgent(final ServerResource resource, final Map details, final boolean old, final List hostTags, final boolean forRebalance) { + private Host createHostAndAgentDeferred(final ServerResource resource, final Map details, final boolean old, final List hostTags, List storageAccessGroups, final boolean forRebalance) { HostVO host = null; StartupCommand[] cmds = null; boolean hostExists = false; @@ -2625,7 +3380,7 @@ private Host createHostAndAgentDeferred(final ServerResource resource, final Map // find out if the host we want to connect to is new (so we can send an event) newHost = getNewHost(cmds) == null; - host = createHostVO(cmds, resource, details, hostTags, ResourceStateAdapter.Event.CREATE_HOST_VO_FOR_DIRECT_CONNECT); + host = createHostVO(cmds, resource, details, hostTags, storageAccessGroups, ResourceStateAdapter.Event.CREATE_HOST_VO_FOR_DIRECT_CONNECT); if (host != null) { // if first host in cluster no need to defer agent creation @@ -2682,7 +3437,7 @@ public Host createHostAndAgent(final Long hostId, final ServerResource resource, @Override public Host createHostAndAgent(final Long hostId, final ServerResource resource, final Map details, final boolean old, final List hostTags, final boolean forRebalance, boolean isTransferredConnection) { - final Host host = createHostAndAgent(resource, details, old, hostTags, forRebalance, isTransferredConnection); + final Host host = createHostAndAgent(resource, details, old, hostTags, null, forRebalance, isTransferredConnection); return host; } @@ -2701,12 +3456,12 @@ public Host addHost(final long zoneId, final ServerResource resource, final Type } } - return createHostAndAgent(resource, hostDetails, true, null, false); + return createHostAndAgent(resource, hostDetails, true, null, null, false); } @Override public HostVO createHostVOForConnectedAgent(final StartupCommand[] cmds) { - return createHostVO(cmds, null, null, null, ResourceStateAdapter.Event.CREATE_HOST_VO_FOR_CONNECTED); + return createHostVO(cmds, null, null, null, null, ResourceStateAdapter.Event.CREATE_HOST_VO_FOR_CONNECTED); } private void checkIPConflicts(final HostPodVO pod, final DataCenterVO dc, final String serverPrivateIP, final String serverPublicIP) { diff --git a/server/src/main/java/com/cloud/server/ManagementServerImpl.java b/server/src/main/java/com/cloud/server/ManagementServerImpl.java index d2ddbddcb484..eefc0be31794 100644 --- a/server/src/main/java/com/cloud/server/ManagementServerImpl.java +++ b/server/src/main/java/com/cloud/server/ManagementServerImpl.java @@ -213,6 +213,7 @@ import org.apache.cloudstack.api.command.admin.storage.AddObjectStoragePoolCmd; import org.apache.cloudstack.api.command.admin.storage.CancelPrimaryStorageMaintenanceCmd; import org.apache.cloudstack.api.command.admin.storage.ChangeStoragePoolScopeCmd; +import org.apache.cloudstack.api.command.admin.storage.ConfigureStorageAccessCmd; import org.apache.cloudstack.api.command.admin.storage.CreateSecondaryStagingStoreCmd; import org.apache.cloudstack.api.command.admin.storage.CreateStoragePoolCmd; import org.apache.cloudstack.api.command.admin.storage.DeleteImageStoreCmd; @@ -3593,6 +3594,7 @@ public List> getCommands() { cmdList.add(SyncStoragePoolCmd.class); cmdList.add(UpdateStorageCapabilitiesCmd.class); cmdList.add(UpdateImageStoreCmd.class); + cmdList.add(ConfigureStorageAccessCmd.class); cmdList.add(DestroySystemVmCmd.class); cmdList.add(ListSystemVMsCmd.class); cmdList.add(MigrateSystemVMCmd.class); diff --git a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java index 54299f55d02d..de27ddbafbe4 100644 --- a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java @@ -39,6 +39,7 @@ import java.util.LinkedHashSet; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Random; import java.util.Set; import java.util.UUID; @@ -51,14 +52,20 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; +import java.util.stream.Stream; import javax.inject.Inject; +import com.cloud.dc.HostPodVO; +import com.cloud.dc.dao.HostPodDao; +import com.cloud.resource.ResourceManager; +import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao; import org.apache.cloudstack.annotation.AnnotationService; import org.apache.cloudstack.annotation.dao.AnnotationDao; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.command.admin.storage.CancelPrimaryStorageMaintenanceCmd; import org.apache.cloudstack.api.command.admin.storage.ChangeStoragePoolScopeCmd; +import org.apache.cloudstack.api.command.admin.storage.ConfigureStorageAccessCmd; import org.apache.cloudstack.api.command.admin.storage.CreateSecondaryStagingStoreCmd; import org.apache.cloudstack.api.command.admin.storage.CreateStoragePoolCmd; import org.apache.cloudstack.api.command.admin.storage.DeleteImageStoreCmd; @@ -144,6 +151,7 @@ import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.MapUtils; import org.apache.commons.lang.time.DateUtils; +import org.apache.commons.lang3.ArrayUtils; import org.apache.commons.lang3.EnumUtils; import org.springframework.stereotype.Component; @@ -368,6 +376,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C @Inject StoragePoolTagsDao _storagePoolTagsDao; @Inject + StoragePoolAndAccessGroupMapDao _storagePoolAccessGroupMapDao; + @Inject PrimaryDataStoreDao primaryStoreDao; @Inject DiskOfferingDetailsDao _diskOfferingDetailsDao; @@ -397,6 +407,12 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C ConfigurationDao configurationDao; @Inject private ImageStoreDetailsUtil imageStoreDetailsUtil; + @Inject + protected HostPodDao _podDao; + @Inject + ResourceManager _resourceMgr; + @Inject + StorageManager storageManager; protected List _discoverers; @@ -673,7 +689,7 @@ public boolean configure(String name, Map params) { _storagePoolAcquisitionWaitSeconds = NumbersUtil.parseInt(configs.get("pool.acquisition.wait.seconds"), 1800); logger.info("pool.acquisition.wait.seconds is configured as " + _storagePoolAcquisitionWaitSeconds + " seconds"); - _agentMgr.registerForHostEvents(new StoragePoolMonitor(this, _storagePoolDao, _dataStoreProviderMgr), true, false, true); + _agentMgr.registerForHostEvents(new StoragePoolMonitor(this, _storagePoolDao, _storagePoolHostDao, _dataStoreProviderMgr), true, false, true); logger.info("Storage cleanup enabled: " + StorageCleanupEnabled.value() + ", interval: " + StorageCleanupInterval.value() + ", delay: " + StorageCleanupDelay.value() + ", template cleanup enabled: " + TemplateCleanupEnabled.value()); @@ -1021,6 +1037,7 @@ public PrimaryDataStoreInfo createPool(CreateStoragePoolCmd cmd) throws Resource params.put("hypervisorType", hypervisorType); params.put("url", cmd.getUrl()); params.put("tags", cmd.getTags()); + params.put(ApiConstants.STORAGE_ACCESS_GROUPS, cmd.getStorageAccessGroups()); params.put("isTagARule", cmd.isTagARule()); params.put("name", cmd.getStoragePoolName()); params.put("details", details); @@ -1388,6 +1405,228 @@ public void changeStoragePoolScope(ChangeStoragePoolScopeCmd cmd) throws Illegal } } + @Override + @ActionEvent(eventType = EventTypes.EVENT_CONFIGURE_STORAGE_ACCESS, eventDescription = "configuring storage groups", async = true) + public boolean configureStorageAccess(ConfigureStorageAccessCmd cmd) { + Long zoneId = cmd.getZoneId(); + Long podId = cmd.getPodId(); + Long clusterId = cmd.getClusterId(); + Long hostId = cmd.getHostId(); + Long storagePoolId = cmd.getStorageId(); + + long nonNullCount = Stream.of(zoneId, podId, clusterId, hostId, storagePoolId) + .filter(Objects::nonNull) + .count(); + + if (nonNullCount != 1) { + throw new IllegalArgumentException("Exactly one of zoneid, podid, clusterid, hostid or storagepoolid is required"); + } + + // SAG -> Storage Access Group + List storageAccessGroups = cmd.getStorageAccessGroups(); + if (storageAccessGroups == null) { + throw new InvalidParameterValueException("storageaccessgroups parameter is required"); + } + + if (zoneId != null) { + DataCenterVO zone = _dcDao.findById(zoneId); + Set existingSAGsSet = (zone.getStorageAccessGroups() == null || zone.getStorageAccessGroups().isEmpty()) + ? Collections.emptySet() + : new HashSet<>(Arrays.asList(zone.getStorageAccessGroups().split(","))); + + Set storagePoolSAGsSet = new HashSet<>(storageAccessGroups); + if (!existingSAGsSet.equals(storagePoolSAGsSet)) { + _resourceMgr.updateZoneStorageAccessGroups(zone.getId(), storageAccessGroups); + String preparedStoragePoolTags = CollectionUtils.isEmpty(storageAccessGroups) ? null : String.join(",", storageAccessGroups); + zone.setStorageAccessGroups(preparedStoragePoolTags); + + if (!_dcDao.update(zoneId, zone)) { + throw new CloudRuntimeException("Failed to update zone with the storage access groups."); + } + } + } + + if (podId != null) { + HostPodVO pod = _podDao.findById(podId); + Set existingTagsSet = (pod.getStorageAccessGroups() == null || pod.getStorageAccessGroups().isEmpty()) + ? Collections.emptySet() + : new HashSet<>(Arrays.asList(pod.getStorageAccessGroups().split(","))); + + if (CollectionUtils.isNotEmpty(storageAccessGroups)) { + checkIfStorageAccessGroupsExistsOnZone(pod.getDataCenterId(), storageAccessGroups); + } + + Set storagePoolTagsSet = new HashSet<>(storageAccessGroups); + if (!existingTagsSet.equals(storagePoolTagsSet)) { + _resourceMgr.updatePodStorageAccessGroups(podId, storageAccessGroups); + String preparedStoragePoolTags = CollectionUtils.isEmpty(storageAccessGroups) ? null : String.join(",", storageAccessGroups); + pod.setStorageAccessGroups(preparedStoragePoolTags); + + if (!_podDao.update(podId, pod)) { + throw new CloudRuntimeException("Failed to update pod with the storage access groups."); + } + } + } + + if (clusterId != null) { + ClusterVO cluster = _clusterDao.findById(clusterId); + Set existingTagsSet = (cluster.getStorageAccessGroups() == null || cluster.getStorageAccessGroups().isEmpty()) + ? Collections.emptySet() + : new HashSet<>(Arrays.asList(cluster.getStorageAccessGroups().split(","))); + + if (CollectionUtils.isNotEmpty(storageAccessGroups)) { + checkIfStorageAccessGroupsExistsOnPod(cluster.getPodId(), storageAccessGroups); + } + + Set storagePoolTagsSet = new HashSet<>(storageAccessGroups); + if (!existingTagsSet.equals(storagePoolTagsSet)) { + _resourceMgr.updateClusterStorageAccessGroups(cluster.getId(), storageAccessGroups); + String preparedStoragePoolTags = CollectionUtils.isEmpty(storageAccessGroups) ? null : String.join(",", storageAccessGroups); + cluster.setStorageAccessGroups(preparedStoragePoolTags); + + if (!_clusterDao.update(clusterId, cluster)) { + throw new CloudRuntimeException("Failed to update cluster with the storage access groups."); + } + } + } + + if (hostId != null) { + HostVO host = _hostDao.findById(hostId); + Set existingTagsSet = (host.getStorageAccessGroups() == null || host.getStorageAccessGroups().isEmpty()) + ? Collections.emptySet() + : new HashSet<>(Arrays.asList(host.getStorageAccessGroups().split(","))); + + if (CollectionUtils.isNotEmpty(storageAccessGroups)) { + checkIfStorageAccessGroupsExistsOnCluster(host.getClusterId(), storageAccessGroups); + } + + Set storageAccessGroupsSet = new HashSet<>(storageAccessGroups); + if (!existingTagsSet.equals(storageAccessGroupsSet)) { + _resourceMgr.updateHostStorageAccessGroups(hostId, storageAccessGroups); + String preparedStoragePoolTags = CollectionUtils.isEmpty(storageAccessGroups) ? null : String.join(",", storageAccessGroups); + host.setStorageAccessGroups(preparedStoragePoolTags); + + if (!_hostDao.update(hostId, host)) { + throw new CloudRuntimeException("Failed to update host with the storage access groups."); + } + } + } + + if (storagePoolId != null) { + StoragePoolVO storagePool = _storagePoolDao.findById(storagePoolId); + if (logger.isDebugEnabled()) { + logger.debug("Updating Storage Pool Access Group Maps to :" + storageAccessGroups); + } + + if (storagePool.getPoolType() == StoragePoolType.DatastoreCluster) { + List childStoragePools = _storagePoolDao.listChildStoragePoolsInDatastoreCluster(storagePool.getId()); + for (StoragePoolVO childPool : childStoragePools) { + _resourceMgr.updateStoragePoolConnectionsOnHosts(childPool.getId(), storageAccessGroups); + _storagePoolAccessGroupMapDao.persist(childPool.getId(), storageAccessGroups); + } + } else { + _resourceMgr.updateStoragePoolConnectionsOnHosts(storagePool.getId(), storageAccessGroups); + } + + _storagePoolAccessGroupMapDao.persist(storagePool.getId(), storageAccessGroups); + } + + return true; + } + + protected void checkIfStorageAccessGroupsExistsOnZone(long zoneId, List storageAccessGroups) { + DataCenterVO zoneVO = _dcDao.findById(zoneId); + + String storageAccessGroupsOnZone = zoneVO.getStorageAccessGroups(); + List zoneTagsList = parseTags(storageAccessGroupsOnZone); + List newTags = storageAccessGroups; + + List existingTagsOnZone = (List) CollectionUtils.intersection(newTags, zoneTagsList); + + if (CollectionUtils.isNotEmpty(existingTagsOnZone)) { + throw new CloudRuntimeException(String.format("access groups already exist on the zone: %s", existingTagsOnZone)); + } + } + + protected void checkIfStorageAccessGroupsExistsOnPod(long podId, List storageAccessGroups) { + HostPodVO podVO = _podDao.findById(podId); + DataCenterVO zoneVO = _dcDao.findById(podVO.getDataCenterId()); + + String storageAccessGroupsOnPod = podVO.getStorageAccessGroups(); + String storageAccessGroupsOnZone = zoneVO.getStorageAccessGroups(); + + List podTagsList = parseTags(storageAccessGroupsOnPod); + List zoneTagsList = parseTags(storageAccessGroupsOnZone); + List newTags = storageAccessGroups; + + List existingTagsOnPod = (List) CollectionUtils.intersection(newTags, podTagsList); + List existingTagsOnZone = (List) CollectionUtils.intersection(newTags, zoneTagsList); + + if (CollectionUtils.isNotEmpty(existingTagsOnPod) || CollectionUtils.isNotEmpty(existingTagsOnZone)) { + String message = "access groups already exist "; + + if (CollectionUtils.isNotEmpty(existingTagsOnPod)) { + message += String.format("on the pod: %s", existingTagsOnPod); + } + if (CollectionUtils.isNotEmpty(existingTagsOnZone)) { + if (CollectionUtils.isNotEmpty(existingTagsOnPod)) { + message += ", "; + } + message += String.format("on the zone: %s", existingTagsOnZone); + } + + throw new CloudRuntimeException(message); + } + } + + protected void checkIfStorageAccessGroupsExistsOnCluster(long clusterId, List storageAccessGroups) { + ClusterVO clusterVO = _clusterDao.findById(clusterId); + HostPodVO podVO = _podDao.findById(clusterVO.getPodId()); + DataCenterVO zoneVO = _dcDao.findById(podVO.getDataCenterId()); + + String storageAccessGroupsOnCluster = clusterVO.getStorageAccessGroups(); + String storageAccessGroupsOnPod = podVO.getStorageAccessGroups(); + String storageAccessGroupsOnZone = zoneVO.getStorageAccessGroups(); + + List podTagsList = parseTags(storageAccessGroupsOnPod); + List zoneTagsList = parseTags(storageAccessGroupsOnZone); + List clusterTagsList = parseTags(storageAccessGroupsOnCluster); + List newTags = storageAccessGroups; + + List existingTagsOnCluster = (List) CollectionUtils.intersection(newTags, clusterTagsList); + List existingTagsOnPod = (List) CollectionUtils.intersection(newTags, podTagsList); + List existingTagsOnZone = (List) CollectionUtils.intersection(newTags, zoneTagsList); + + if (CollectionUtils.isNotEmpty(existingTagsOnCluster) || CollectionUtils.isNotEmpty(existingTagsOnPod) || CollectionUtils.isNotEmpty(existingTagsOnZone)) { + String message = "access groups already exist "; + + if (CollectionUtils.isNotEmpty(existingTagsOnCluster)) { + message += String.format("on the cluster: %s", existingTagsOnCluster); + } + if (CollectionUtils.isNotEmpty(existingTagsOnPod)) { + if (CollectionUtils.isNotEmpty(existingTagsOnCluster)) { + message += ", "; + } + message += String.format("on the pod: %s", existingTagsOnPod); + } + if (CollectionUtils.isNotEmpty(existingTagsOnZone)) { + if (CollectionUtils.isNotEmpty(existingTagsOnCluster) || CollectionUtils.isNotEmpty(existingTagsOnPod)) { + message += ", "; + } + message += String.format("on the zone: %s", existingTagsOnZone); + } + + throw new CloudRuntimeException(message); + } + } + + private List parseTags(String tags) { + if (tags == null || tags.trim().isEmpty()) { + return Collections.emptyList(); + } + return Arrays.asList(tags.split(",")); + } + @Override public void removeStoragePoolFromCluster(long hostId, String iScsiName, StoragePool storagePool) { final Map details = new HashMap<>(); @@ -2609,11 +2848,152 @@ private StoragePoolVO createChildDatastoreVO(StoragePoolVO datastoreClusterPool, storagePoolTags = storagePoolTagVOList.parallelStream().map(StoragePoolTagVO::getTag).collect(Collectors.toList()); isTagARule = storagePoolTagVOList.get(0).isTagARule(); } + List storageAccessGroups = _storagePoolAccessGroupMapDao.getStorageAccessGroups(datastoreClusterPool.getId()); - _storagePoolDao.persist(dataStoreVO, details, storagePoolTags, isTagARule); + _storagePoolDao.persist(dataStoreVO, details, storagePoolTags, isTagARule, storageAccessGroups); return dataStoreVO; } + @Override + public boolean checkIfHostAndStoragePoolHasCommonStorageAccessGroups(Host host, StoragePool pool) { + String[] hostStorageAccessGroups = getStorageAccessGroups(null, null, null, host.getId()); + List storagePoolAccessGroups = _storagePoolAccessGroupMapDao.getStorageAccessGroups(pool.getId()); + + if (CollectionUtils.isEmpty(storagePoolAccessGroups)) { + return true; + } + + if (ArrayUtils.isEmpty(hostStorageAccessGroups)) { + return false; + } + + if (ArrayUtils.isNotEmpty(hostStorageAccessGroups)) { + logger.debug(String.format("Storage access groups on the host %s are %s", host, hostStorageAccessGroups)); + } + + if (CollectionUtils.isNotEmpty(storagePoolAccessGroups)) { + logger.debug(String.format("Storage access groups on the storage pool %s are %s", host, storagePoolAccessGroups)); + } + + List hostTagList = Arrays.asList(hostStorageAccessGroups); + return CollectionUtils.containsAny(hostTagList, storagePoolAccessGroups); + } + + @Override + public Pair checkIfReadyVolumeFitsInStoragePoolWithStorageAccessGroups(StoragePool destPool, Volume volume) { + if (Volume.State.Ready.equals(volume.getState())) { + Long vmId = volume.getInstanceId(); + VMInstanceVO vm = null; + if (vmId != null) { + vm = _vmInstanceDao.findById(vmId); + } + + if (vm == null || State.Stopped.equals(vm.getState())) { + Long srcPoolId = volume.getPoolId(); + StoragePoolVO srcPool = _storagePoolDao.findById(srcPoolId); + List srcStorageAccessGroups = _storagePoolAccessGroupMapDao.getStorageAccessGroups(srcPoolId); + List destStorageAccessGroups = _storagePoolAccessGroupMapDao.getStorageAccessGroups(destPool.getId()); + + if (CollectionUtils.isNotEmpty(srcStorageAccessGroups) && CollectionUtils.isNotEmpty(destStorageAccessGroups)) { + logger.debug(String.format("Storage access groups on source storage %s are %s and destination storage %s are %s", + srcPool, srcStorageAccessGroups, destPool, destStorageAccessGroups)); + List intersection = new ArrayList<>(srcStorageAccessGroups); + intersection.retainAll(destStorageAccessGroups); + if (CollectionUtils.isNotEmpty(intersection)) { + return new Pair<>(true, "Success"); + } else { + List poolIds = new ArrayList<>(); + poolIds.add(srcPool.getId()); + poolIds.add(destPool.getId()); + Host hostWithPoolsAccess = findUpAndEnabledHostWithAccessToStoragePools(poolIds); + if (hostWithPoolsAccess == null) { + logger.debug("Storage access groups on source and destination storages do not match, and there is no common host connected to these storages"); + return new Pair<>(false, "No common host connected to source and destination storages"); + } + } + } + return new Pair<>(true, "Success"); + } else { + if (State.Running.equals(vm.getState())) { + Long hostId = vm.getHostId(); + String[] hostStorageAccessGroups = getStorageAccessGroups(null, null, null, hostId); + Long srcPoolId = volume.getPoolId(); + StoragePoolVO srcPool = _storagePoolDao.findById(srcPoolId); + List srcStorageAccessGroups = _storagePoolAccessGroupMapDao.getStorageAccessGroups(srcPoolId); + List destStorageAccessGroups = _storagePoolAccessGroupMapDao.getStorageAccessGroups(destPool.getId()); + + logger.debug(String.format("Storage access groups on source storage %s are %s and destination storage %s are %s", + srcPool, srcStorageAccessGroups, destPool, destStorageAccessGroups)); + + if (CollectionUtils.isEmpty(srcStorageAccessGroups) && CollectionUtils.isEmpty(destStorageAccessGroups)) { + return new Pair<>(true, "Success"); + } + + if (CollectionUtils.isNotEmpty(srcStorageAccessGroups) && CollectionUtils.isNotEmpty(destStorageAccessGroups)) { + List intersection = new ArrayList<>(srcStorageAccessGroups); + intersection.retainAll(destStorageAccessGroups); + + if (ArrayUtils.isNotEmpty(hostStorageAccessGroups)) { + boolean hasSrcCommon = srcStorageAccessGroups.stream() + .anyMatch(group -> Arrays.asList(hostStorageAccessGroups).contains(group)); + boolean hasDestCommon = destStorageAccessGroups.stream() + .anyMatch(group -> Arrays.asList(hostStorageAccessGroups).contains(group)); + if (hasSrcCommon && hasDestCommon) { + return new Pair<>(true, "Success"); + } + } + + return new Pair<>(false, "No common storage access groups between source, destination pools and host"); + } + + if (CollectionUtils.isEmpty(srcStorageAccessGroups)) { + if (ArrayUtils.isNotEmpty(hostStorageAccessGroups)) { + List hostAccessGroupList = Arrays.asList(hostStorageAccessGroups); + hostAccessGroupList.retainAll(destStorageAccessGroups); + if (CollectionUtils.isNotEmpty(hostAccessGroupList)) { + return new Pair<>(true, "Success"); + } + } + return new Pair<>(false, "Host lacks access to destination storage groups"); + } + + return new Pair<>(true, "Success"); + } + } + } + return new Pair<>(true, "Success"); + } + + @Override + public String[] getStorageAccessGroups(Long zoneId, Long podId, Long clusterId, Long hostId) { + List storageAccessGroups = new ArrayList<>(); + if (hostId != null) { + HostVO host = _hostDao.findById(hostId); + ClusterVO cluster = _clusterDao.findById(host.getClusterId()); + HostPodVO pod = _podDao.findById(cluster.getPodId()); + DataCenterVO zone = _dcDao.findById(pod.getDataCenterId()); + storageAccessGroups.addAll(List.of(com.cloud.utils.StringUtils.splitCommaSeparatedStrings(host.getStorageAccessGroups(), cluster.getStorageAccessGroups(), pod.getStorageAccessGroups(), zone.getStorageAccessGroups()))); + } else if (clusterId != null) { + ClusterVO cluster = _clusterDao.findById(clusterId); + HostPodVO pod = _podDao.findById(cluster.getPodId()); + DataCenterVO zone = _dcDao.findById(pod.getDataCenterId()); + storageAccessGroups.addAll(List.of(com.cloud.utils.StringUtils.splitCommaSeparatedStrings(cluster.getStorageAccessGroups(), pod.getStorageAccessGroups(), zone.getStorageAccessGroups()))); + } else if (podId != null) { + HostPodVO pod = _podDao.findById(podId); + DataCenterVO zone = _dcDao.findById(pod.getDataCenterId()); + storageAccessGroups.addAll(List.of(com.cloud.utils.StringUtils.splitCommaSeparatedStrings(pod.getStorageAccessGroups(), zone.getStorageAccessGroups()))); + } else if (zoneId != null) { + DataCenterVO zone = _dcDao.findById(zoneId); + storageAccessGroups.addAll(List.of(com.cloud.utils.StringUtils.splitCommaSeparatedStrings(zone.getStorageAccessGroups()))); + } + + storageAccessGroups.removeIf(tag -> tag == null || tag.trim().isEmpty()); + + return storageAccessGroups.isEmpty() + ? new String[0] + : storageAccessGroups.toArray(org.apache.commons.lang.ArrayUtils.EMPTY_STRING_ARRAY); + } + private void handleRemoveChildStoragePoolFromDatastoreCluster(Set childDatastoreUUIDs) { for (String childDatastoreUUID : childDatastoreUUIDs) { diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java index 64fed4ab4c6d..2048ee4cfc9f 100644 --- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java @@ -63,6 +63,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; +import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; @@ -358,6 +359,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic private StatsCollector statsCollector; @Inject HostPodDao podDao; + @Inject + EndPointSelector _epSelector; protected Gson _gson; @@ -3408,6 +3411,18 @@ public Volume migrateVolume(MigrateVolumeCmd cmd) { destPool = _volumeMgr.findChildDataStoreInDataStoreCluster(dc, destPoolPod, destPool.getClusterId(), null, null, destPool.getId()); } + Pair checkResult = storageMgr.checkIfReadyVolumeFitsInStoragePoolWithStorageAccessGroups(destPool, vol); + if (!checkResult.first()) { + throw new CloudRuntimeException(checkResult.second()); + } + + if (!liveMigrateVolume && vm != null) { + DataStore primaryStore = dataStoreMgr.getPrimaryDataStore(destPool.getId()); + if (_epSelector.select(primaryStore) == null) { + throw new CloudRuntimeException("Unable to find accessible host for volume migration"); + } + } + if (!storageMgr.storagePoolCompatibleWithVolumePool(destPool, (Volume) vol)) { throw new CloudRuntimeException("Storage pool " + destPool.getName() + " is not suitable to migrate volume " + vol.getName()); } diff --git a/server/src/main/java/com/cloud/storage/listener/StoragePoolMonitor.java b/server/src/main/java/com/cloud/storage/listener/StoragePoolMonitor.java index 6f484870e724..01fcb43c4c4b 100644 --- a/server/src/main/java/com/cloud/storage/listener/StoragePoolMonitor.java +++ b/server/src/main/java/com/cloud/storage/listener/StoragePoolMonitor.java @@ -16,17 +16,24 @@ // under the License. package com.cloud.storage.listener; +import java.util.ArrayList; import java.util.List; import javax.inject.Inject; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.dc.dao.HostPodDao; +import com.cloud.exception.StorageConflictException; import com.cloud.storage.StorageManager; +import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.utils.exception.CloudRuntimeException; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager; import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.commons.lang3.ArrayUtils; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; @@ -52,12 +59,18 @@ public class StoragePoolMonitor implements Listener { private final StorageManagerImpl _storageManager; private final PrimaryDataStoreDao _poolDao; private DataStoreProviderManager _dataStoreProviderMgr; + private final StoragePoolHostDao _storagePoolHostDao; + @Inject + ClusterDao _clusterDao; + @Inject + HostPodDao _podDao; @Inject OCFS2Manager _ocfs2Mgr; - public StoragePoolMonitor(StorageManagerImpl mgr, PrimaryDataStoreDao poolDao, DataStoreProviderManager dataStoreProviderMgr) { + public StoragePoolMonitor(StorageManagerImpl mgr, PrimaryDataStoreDao poolDao, StoragePoolHostDao storagePoolHostDao, DataStoreProviderManager dataStoreProviderMgr) { _storageManager = mgr; _poolDao = poolDao; + _storagePoolHostDao = storagePoolHostDao; _dataStoreProviderMgr = dataStoreProviderMgr; } @@ -104,13 +117,34 @@ public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) scCmd.getHypervisorType() == HypervisorType.VMware || scCmd.getHypervisorType() == HypervisorType.Simulator || scCmd.getHypervisorType() == HypervisorType.Ovm || scCmd.getHypervisorType() == HypervisorType.Hyperv || scCmd.getHypervisorType() == HypervisorType.LXC || scCmd.getHypervisorType() == HypervisorType.Ovm3) { - List pools = _poolDao.listBy(host.getDataCenterId(), host.getPodId(), host.getClusterId(), ScopeType.CLUSTER); - List zoneStoragePoolsByTags = _poolDao.findZoneWideStoragePoolsByTags(host.getDataCenterId(), null, false); - List zoneStoragePoolsByHypervisor = _poolDao.findZoneWideStoragePoolsByHypervisor(host.getDataCenterId(), scCmd.getHypervisorType()); - zoneStoragePoolsByTags.retainAll(zoneStoragePoolsByHypervisor); - pools.addAll(zoneStoragePoolsByTags); - List zoneStoragePoolsByAnyHypervisor = _poolDao.findZoneWideStoragePoolsByHypervisor(host.getDataCenterId(), HypervisorType.Any); - pools.addAll(zoneStoragePoolsByAnyHypervisor); + String sags[] = _storageManager.getStorageAccessGroups(null, null, null, host.getId()); + + List pools = new ArrayList<>(); + // SAG -> Storage Access Group + if (ArrayUtils.isEmpty(sags)) { + List clusterStoragePoolsByEmptySAGs = _poolDao.findStoragePoolsByEmptyStorageAccessGroups(host.getDataCenterId(), host.getPodId(), host.getClusterId(), ScopeType.CLUSTER, null); + List storagePoolsByEmptySAGs = _poolDao.findStoragePoolsByEmptyStorageAccessGroups(host.getDataCenterId(), null, null, ScopeType.ZONE, null); + List zoneStoragePoolsByHypervisor = _poolDao.findStoragePoolsByEmptyStorageAccessGroups(host.getDataCenterId(), null, null, ScopeType.ZONE, scCmd.getHypervisorType()); + storagePoolsByEmptySAGs.retainAll(zoneStoragePoolsByHypervisor); + pools.addAll(storagePoolsByEmptySAGs); + pools.addAll(clusterStoragePoolsByEmptySAGs); + List zoneStoragePoolsByAnyHypervisor = _poolDao.findStoragePoolsByEmptyStorageAccessGroups(host.getDataCenterId(), null, null, ScopeType.ZONE, HypervisorType.Any); + pools.addAll(zoneStoragePoolsByAnyHypervisor); + } else { + List storagePoolsBySAGs = new ArrayList<>(); + List clusterStoragePoolsBySAGs = _poolDao.findPoolsByAccessGroupsForHostConnection(host.getDataCenterId(), host.getPodId(), host.getClusterId(), ScopeType.CLUSTER, sags); + List clusterStoragePoolsByEmptySAGs = _poolDao.findStoragePoolsByEmptyStorageAccessGroups(host.getDataCenterId(), host.getPodId(), host.getClusterId(), ScopeType.CLUSTER, null); + List zoneStoragePoolsBySAGs = _poolDao.findZoneWideStoragePoolsByAccessGroupsAndHypervisorTypeForHostConnection(host.getDataCenterId(), sags, scCmd.getHypervisorType()); + List zoneStoragePoolsByHypervisorTypeAny = _poolDao.findZoneWideStoragePoolsByAccessGroupsAndHypervisorTypeForHostConnection(host.getDataCenterId(), sags, HypervisorType.Any); + List zoneStoragePoolsByEmptySAGs = _poolDao.findStoragePoolsByEmptyStorageAccessGroups(host.getDataCenterId(), null, null, ScopeType.ZONE, null); + + storagePoolsBySAGs.addAll(zoneStoragePoolsBySAGs); + storagePoolsBySAGs.addAll(zoneStoragePoolsByEmptySAGs); + storagePoolsBySAGs.addAll(zoneStoragePoolsByHypervisorTypeAny); + storagePoolsBySAGs.addAll(clusterStoragePoolsBySAGs); + storagePoolsBySAGs.addAll(clusterStoragePoolsByEmptySAGs); + pools.addAll(storagePoolsBySAGs); + } // get the zone wide disabled pools list if global setting is true. if (StorageManager.MountDisabledStoragePool.value()) { @@ -122,6 +156,9 @@ public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) pools.addAll(_poolDao.findDisabledPoolsByScope(host.getDataCenterId(), host.getPodId(), host.getClusterId(), ScopeType.CLUSTER)); } + List previouslyConnectedPools = new ArrayList<>(); + previouslyConnectedPools.addAll(_storageManager.findStoragePoolsConnectedToHost(host.getId())); + for (StoragePoolVO pool : pools) { if (!pool.isShared()) { continue; @@ -141,6 +178,21 @@ public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) } catch (Exception e) { throw new ConnectionException(true, String.format("Unable to connect host %s to storage pool %s due to %s", host, pool, e.toString()), e); } + + previouslyConnectedPools.removeIf(sp -> sp.getPoolId() == pool.getId()); + } + + // Disconnect any pools which are not expected to be connected + for (StoragePoolHostVO poolToDisconnect: previouslyConnectedPools) { + StoragePoolVO pool = _poolDao.findById(poolToDisconnect.getPoolId()); + try { + _storageManager.disconnectHostFromSharedPool(host, pool); + _storagePoolHostDao.deleteStoragePoolHostDetails(host.getId(), pool.getId()); + } catch (StorageConflictException se) { + throw new CloudRuntimeException(String.format("Unable to disconnect the pool %s and the host %s", pool, host)); + } catch (Exception e) { + logger.warn(String.format("Unable to disconnect the pool %s and the host %s", pool, host), e); + } } } } diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index f90f612f3306..8c14903e60dd 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -6647,6 +6647,7 @@ public VirtualMachine vmStorageMigration(Long vmId, StoragePool destPool) { VMInstanceVO vm = preVmStorageMigrationCheck(vmId); Map volumeToPoolIds = new HashMap<>(); checkDestinationHypervisorType(destPool, vm); + checkIfDestinationPoolHasSameStoragePool(destPool, vm); List volumes = _volsDao.findByInstance(vm.getId()); StoragePoolVO destinationPoolVo = _storagePoolDao.findById(destPool.getId()); Long destPoolPodId = ScopeType.CLUSTER.equals(destinationPoolVo.getScope()) || ScopeType.HOST.equals(destinationPoolVo.getScope()) ? @@ -6662,6 +6663,10 @@ public VirtualMachine vmStorageMigration(Long vmId, StoragePool destPool) { throw new InvalidParameterValueException("Storage migration of non-user VMs cannot be done between storage pools of different pods"); } } + Pair checkResult = storageManager.checkIfReadyVolumeFitsInStoragePoolWithStorageAccessGroups(destPool, volume); + if (!checkResult.first()) { + throw new CloudRuntimeException(String.format("Storage suitability check failed for volume %s with error, %s", volume, checkResult.second())); + } volumeToPoolIds.put(volume.getId(), destPool.getId()); } _itMgr.storageMigration(vm.getUuid(), volumeToPoolIds); @@ -6686,12 +6691,27 @@ public VirtualMachine vmStorageMigration(Long vmId, Map volumeTo poolClusterId = pool.getClusterId(); } checkDestinationHypervisorType(pool, vm); + Pair checkResult = storageManager.checkIfReadyVolumeFitsInStoragePoolWithStorageAccessGroups(pool, volume); + if (!checkResult.first()) { + throw new CloudRuntimeException(String.format("Storage suitability check failed for volume %s with error %s", volume, checkResult.second())); + } + volumeToPoolIds.put(volume.getId(), pool.getId()); } _itMgr.storageMigration(vm.getUuid(), volumeToPoolIds); return findMigratedVm(vm.getId(), vm.getType()); } + private void checkIfDestinationPoolHasSameStoragePool(StoragePool destPool, VMInstanceVO vm) { + Long hostId = vm.getHostId(); + if (hostId != null) { + Host host = _hostDao.findById(hostId); + if (!storageManager.checkIfHostAndStoragePoolHasCommonStorageAccessGroups(host, destPool)) { + throw new InvalidParameterValueException(String.format("Destination pool %s does not have matching storage tags as host %s", destPool.getName(), host.getName())); + } + } + } + private void checkDestinationHypervisorType(StoragePool destPool, VMInstanceVO vm) { HypervisorType destHypervisorType = destPool.getHypervisor(); if (destHypervisorType == null) { @@ -6811,6 +6831,26 @@ private boolean checkEnforceStrictHostTagCheck(HostVO host, ServiceOffering serv return host.checkHostServiceOfferingAndTemplateTags(serviceOffering, template, strictHostTags); } + protected void validateStorageAccessGroupsOnHosts(Host srcHost, Host destinationHost) { + String[] storageAccessGroupsOnSrcHost = storageManager.getStorageAccessGroups(null, null, null, srcHost.getId()); + String[] storageAccessGroupsOnDestHost = storageManager.getStorageAccessGroups(null, null, null, destinationHost.getId()); + + List srcHostStorageAccessGroupsList = storageAccessGroupsOnSrcHost != null ? Arrays.asList(storageAccessGroupsOnSrcHost) : Collections.emptyList(); + List destHostStorageAccessGroupsList = storageAccessGroupsOnDestHost != null ? Arrays.asList(storageAccessGroupsOnDestHost) : Collections.emptyList(); + + if (CollectionUtils.isEmpty(srcHostStorageAccessGroupsList)) { + return; + } + + if (CollectionUtils.isEmpty(destHostStorageAccessGroupsList)) { + throw new CloudRuntimeException("Source host has storage access groups, but destination host has none."); + } + + if (!destHostStorageAccessGroupsList.containsAll(srcHostStorageAccessGroupsList)) { + throw new CloudRuntimeException("Storage access groups on the source and destination hosts did not match."); + } + } + protected void validateStrictHostTagCheck(VMInstanceVO vm, HostVO host) { ServiceOffering serviceOffering = serviceOfferingDao.findByIdIncludingRemoved(vm.getServiceOfferingId()); VirtualMachineTemplate template = _templateDao.findByIdIncludingRemoved(vm.getTemplateId()); @@ -6853,6 +6893,7 @@ private DeployDestination checkVmMigrationDestination(VMInstanceVO vm, Host srcH HostVO destinationHostVO = _hostDao.findById(destinationHost.getId()); _hostDao.loadHostTags(destinationHostVO); validateStrictHostTagCheck(vm, destinationHostVO); + validateStorageAccessGroupsOnHosts(srcHost, destinationHost); checkHostsDedication(vm, srcHost.getId(), destinationHost.getId()); @@ -7213,6 +7254,8 @@ private Pair getHostsForMigrateVmWithStorage(VMInstanceVO vm, Host d destinationHost.getName(), destinationHost.getUuid())); } + validateStorageAccessGroupsOnHosts(srcHost, destinationHost); + return new Pair<>(srcHost, destinationHost); } @@ -7250,8 +7293,12 @@ private Map getVolumePoolMappingForMigrateVmWithStorage(VMInstanceVO } volToPoolObjectMap.put(volume.getId(), pool.getId()); } - HypervisorType hypervisorType = _volsDao.getHypervisorType(volume.getId()); + HostVO host = _hostDao.findById(vm.getHostId()); + if (!storageManager.checkIfHostAndStoragePoolHasCommonStorageAccessGroups(host, pool)) { + throw new InvalidParameterValueException(String.format("Destination pool %s for the volume %s does not have matching storage tags as host %s", pool.getName(), volume.getName(), host.getName())); + } + HypervisorType hypervisorType = _volsDao.getHypervisorType(volume.getId()); try { snapshotHelper.checkKvmVolumeSnapshotsOnlyInPrimaryStorage(volume, hypervisorType); } catch (CloudRuntimeException ex) { diff --git a/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java b/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java index c2c78402aa1e..772b5590411a 100644 --- a/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java +++ b/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java @@ -1427,7 +1427,7 @@ public void testEdgeZoneCreatePod() { return pod; }); Mockito.doNothing().when(messageBus).publish(Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any()); - configurationMgr.createPod(zoneId, "TestPod", null, null, null, null, null); + configurationMgr.createPod(zoneId, "TestPod", null, null, null, null, null, null); } @Test diff --git a/server/src/test/java/com/cloud/resource/MockResourceManagerImpl.java b/server/src/test/java/com/cloud/resource/MockResourceManagerImpl.java index 32acdcd4a779..587aafa1587c 100755 --- a/server/src/test/java/com/cloud/resource/MockResourceManagerImpl.java +++ b/server/src/test/java/com/cloud/resource/MockResourceManagerImpl.java @@ -52,6 +52,8 @@ import org.apache.cloudstack.api.command.admin.host.ReconnectHostCmd; import org.apache.cloudstack.api.command.admin.host.UpdateHostCmd; import org.apache.cloudstack.api.command.admin.host.UpdateHostPasswordCmd; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; import org.apache.cloudstack.framework.config.ConfigKey; import javax.naming.ConfigurationException; @@ -568,6 +570,7 @@ public List listAllUpAndEnabledNonHAHosts(final Type type, final Long cl return null; } + /* (non-Javadoc) * @see com.cloud.utils.component.Manager#configure(java.lang.String, java.util.Map) */ @@ -628,6 +631,24 @@ public boolean releaseHostReservation(final Long hostId) { return false; } + @Override + public void updatePodStorageAccessGroups(long podId, List newStorageAccessGroups) { + } + + @Override + public void updateZoneStorageAccessGroups(long zoneId, List newStorageAccessGroups) { + } + + @Override + public void updateClusterStorageAccessGroups(Long clusterId, List newStorageAccessGroups) { + + } + + @Override + public void updateHostStorageAccessGroups(Long hostId, List newStorageAccessGroups) { + + } + @Override public boolean isGPUDeviceAvailable(final Host host, final String groupName, final String vgpuType) { // TODO Auto-generated method stub @@ -668,6 +689,25 @@ public boolean cancelMaintenance(long hostId) { return false; } + @Override + public void updateStoragePoolConnectionsOnHosts(Long poolId, List storageAccessGroups) { + } + + @Override + public List getEligibleUpHostsInClusterForStorageConnection(PrimaryDataStoreInfo primaryDataStoreInfo) { + return null; + } + + @Override + public List getEligibleUpAndEnabledHostsInClusterForStorageConnection(PrimaryDataStoreInfo primaryStore) { + return null; + } + + @Override + public List getEligibleUpAndEnabledHostsInZoneForStorageConnection(DataStore dataStore, long zoneId, HypervisorType hypervisorType) { + return null; + } + @Override public boolean isHostGpuEnabled(final long hostId) { // TODO Auto-generated method stub diff --git a/server/src/test/java/com/cloud/resource/ResourceManagerImplTest.java b/server/src/test/java/com/cloud/resource/ResourceManagerImplTest.java index 414d41145f7b..adb212e8dfb8 100644 --- a/server/src/test/java/com/cloud/resource/ResourceManagerImplTest.java +++ b/server/src/test/java/com/cloud/resource/ResourceManagerImplTest.java @@ -29,9 +29,12 @@ import com.cloud.host.Status; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor; +import com.cloud.storage.ScopeType; import com.cloud.storage.StorageManager; +import com.cloud.storage.StoragePoolHostVO; import com.cloud.storage.Volume; import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.utils.Ternary; import com.cloud.utils.exception.CloudRuntimeException; @@ -46,6 +49,8 @@ import org.apache.cloudstack.api.command.admin.host.CancelHostAsDegradedCmd; import org.apache.cloudstack.api.command.admin.host.DeclareHostAsDegradedCmd; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -102,6 +107,10 @@ public class ResourceManagerImplTest { private ConfigurationDao configurationDao; @Mock private VolumeDao volumeDao; + @Mock + private PrimaryDataStoreDao storagePoolDao; + @Mock + private StoragePoolHostDao storagePoolHostDao; @Spy @InjectMocks @@ -583,4 +592,366 @@ public void testDestroyLocalStoragePoolVolumesNoDisks() { resourceManager.destroyLocalStoragePoolVolumes(poolId); verify(volumeDao, never()).updateAndRemoveVolume(any(VolumeVO.class)); } + + @Test + public void testEmptyHostList() { + List allHosts = new ArrayList<>(); + List storageAccessGroups = Arrays.asList("group1", "group2"); + + List hostsToConnect = resourceManager.filterHostsBasedOnStorageAccessGroups(allHosts, storageAccessGroups); + + Assert.assertTrue("No hosts should be returned when the host list is empty.", hostsToConnect.isEmpty()); + } + + @Test + public void testEmptyStorageAccessGroups() { + List allHosts = Arrays.asList(Mockito.mock(HostVO.class), Mockito.mock(HostVO.class)); + List storageAccessGroups = new ArrayList<>(); + + for (HostVO host : allHosts) { + Mockito.when(host.getId()).thenReturn(1L); + Mockito.doReturn(new String[]{"group1", "group2"}) + .when(storageManager).getStorageAccessGroups(null, null, null, 1L); + } + + List hostsToConnect = resourceManager.filterHostsBasedOnStorageAccessGroups(allHosts, storageAccessGroups); + + Assert.assertTrue("All hosts should be returned when storage access groups are empty.", hostsToConnect.containsAll(allHosts)); + Assert.assertEquals("The number of returned hosts should match the total number of hosts.", allHosts.size(), hostsToConnect.size()); + } + + @Test + public void testHostWithMatchingStorageAccessGroups() { + HostVO host1 = Mockito.mock(HostVO.class); + HostVO host2 = Mockito.mock(HostVO.class); + List allHosts = Arrays.asList(host1, host2); + List storageAccessGroups = Arrays.asList("group1", "group2"); + + Mockito.when(host1.getId()).thenReturn(1L); + Mockito.doReturn(new String[]{"group1"}) + .when(storageManager).getStorageAccessGroups(null, null, null, 1L); + + Mockito.when(host2.getId()).thenReturn(2L); + Mockito.doReturn(new String[]{"group3"}) + .when(storageManager).getStorageAccessGroups(null, null, null, 2L); + + List hostsToConnect = resourceManager.filterHostsBasedOnStorageAccessGroups(allHosts, storageAccessGroups); + + Assert.assertTrue("Only hosts with matching storage access groups should be included.", hostsToConnect.contains(host1)); + Assert.assertFalse("Hosts without matching storage access groups should not be included.", hostsToConnect.contains(host2)); + Assert.assertEquals("Only one host should match the storage access groups.", 1, hostsToConnect.size()); + } + + @Test + public void testHostWithoutMatchingStorageAccessGroups() { + HostVO host1 = Mockito.mock(HostVO.class); + HostVO host2 = Mockito.mock(HostVO.class); + List allHosts = Arrays.asList(host1, host2); + List storageAccessGroups = Arrays.asList("group1", "group2"); + + Mockito.when(host1.getId()).thenReturn(1L); + Mockito.doReturn(new String[]{"group3"}) + .when(storageManager).getStorageAccessGroups(null, null, null, 1L); + + Mockito.when(host2.getId()).thenReturn(2L); + Mockito.doReturn(new String[]{"group4"}) + .when(storageManager).getStorageAccessGroups(null, null, null, 2L); + + List hostsToConnect = resourceManager.filterHostsBasedOnStorageAccessGroups(allHosts, storageAccessGroups); + + Assert.assertTrue("No hosts should match the storage access groups.", hostsToConnect.isEmpty()); + } + + @Test + public void testMixedMatchingAndNonMatchingHosts() { + HostVO host1 = Mockito.mock(HostVO.class); + HostVO host2 = Mockito.mock(HostVO.class); + HostVO host3 = Mockito.mock(HostVO.class); + List allHosts = Arrays.asList(host1, host2, host3); + List storageAccessGroups = Arrays.asList("group1", "group2"); + + Mockito.when(host1.getId()).thenReturn(1L); + Mockito.doReturn(new String[]{"group1"}) + .when(storageManager).getStorageAccessGroups(null, null, null, 1L); + + Mockito.when(host2.getId()).thenReturn(2L); + Mockito.doReturn(new String[]{"group3"}) + .when(storageManager).getStorageAccessGroups(null, null, null, 2L); + + Mockito.when(host3.getId()).thenReturn(3L); + Mockito.doReturn(new String[]{"group2"}) + .when(storageManager).getStorageAccessGroups(null, null, null, 3L); + + List hostsToConnect = resourceManager.filterHostsBasedOnStorageAccessGroups(allHosts, storageAccessGroups); + + Assert.assertTrue("Host1 should be included as it matches 'group1'.", hostsToConnect.contains(host1)); + Assert.assertFalse("Host2 should not be included as it doesn't match any group.", hostsToConnect.contains(host2)); + Assert.assertTrue("Host3 should be included as it matches 'group2'.", hostsToConnect.contains(host3)); + } + + @Test + public void testHostsWithEmptyStorageAccessGroups() { + HostVO host1 = Mockito.mock(HostVO.class); + HostVO host2 = Mockito.mock(HostVO.class); + List allHosts = Arrays.asList(host1, host2); + List storageAccessGroups = Arrays.asList("group1", "group2"); + + Mockito.when(host1.getId()).thenReturn(1L); + Mockito.doReturn(new String[0]) + .when(storageManager).getStorageAccessGroups(null, null, null, 1L); + + Mockito.when(host2.getId()).thenReturn(2L); + Mockito.doReturn(new String[0]) + .when(storageManager).getStorageAccessGroups(null, null, null, 2L); + + List hostsToConnect = resourceManager.filterHostsBasedOnStorageAccessGroups(allHosts, storageAccessGroups); + + Assert.assertTrue("No hosts should be included if storage access groups are empty.", hostsToConnect.isEmpty()); + } + + @Test + public void testZoneLevelWithAllHostsUsingTags() { + List sagsToDelete = Arrays.asList("tag1", "tag2"); + Long clusterId = null; + Long podId = null; + Long zoneId = 3L; + + List hostIdsUsingStorageTags = Arrays.asList(1L, 2L); + Mockito.doReturn(hostIdsUsingStorageTags).when(resourceManager).listOfHostIdsUsingTheStorageAccessGroups(sagsToDelete, clusterId, podId, zoneId); + + List hostsInZone = Arrays.asList(Mockito.mock(HostVO.class), Mockito.mock(HostVO.class)); + Mockito.doReturn(hostsInZone).when(hostDao).findByDataCenterId(zoneId); + + Mockito.doReturn(1L).when(hostsInZone.get(0)).getId(); + Mockito.doReturn(2L).when(hostsInZone.get(1)).getId(); + + try { + resourceManager.checkIfAllHostsInUse(sagsToDelete, clusterId, podId, zoneId); + Assert.fail("Exception should be thrown when all hosts in the zone are using the storage access groups."); + } catch (CloudRuntimeException e) { + Assert.assertEquals("All hosts in the zone are using the storage access groups", e.getMessage()); + } + } + + @Test + public void testClusterLevelWithAllHostsUsingTags() { + List sagsToDelete = Arrays.asList("tag1", "tag2"); + Long clusterId = 1L; + Long podId = null; + Long zoneId = null; + + List hostIdsUsingStorageTags = Arrays.asList(1L, 2L); + Mockito.doReturn(hostIdsUsingStorageTags).when(resourceManager).listOfHostIdsUsingTheStorageAccessGroups(sagsToDelete, clusterId, podId, zoneId); + + List hostsInCluster = Arrays.asList(Mockito.mock(HostVO.class), Mockito.mock(HostVO.class)); + Mockito.doReturn(hostsInCluster).when(hostDao).findByClusterId(clusterId, Host.Type.Routing); + + Mockito.doReturn(1L).when(hostsInCluster.get(0)).getId(); + Mockito.doReturn(2L).when(hostsInCluster.get(1)).getId(); + + try { + resourceManager.checkIfAllHostsInUse(sagsToDelete, clusterId, podId, zoneId); + Assert.fail("Exception should be thrown when all hosts in the cluster are using the storage access groups."); + } catch (CloudRuntimeException e) { + Assert.assertEquals("All hosts in the cluster are using the storage access groups", e.getMessage()); + } + } + + @Test + public void testPodLevelWithAllHostsUsingTags() { + List sagsToDelete = Arrays.asList("tag1", "tag2"); + Long clusterId = null; + Long podId = 2L; + Long zoneId = null; + + List hostIdsUsingStorageTags = Arrays.asList(1L, 2L); + Mockito.doReturn(hostIdsUsingStorageTags).when(resourceManager).listOfHostIdsUsingTheStorageAccessGroups(sagsToDelete, clusterId, podId, zoneId); + + List hostsInPod = Arrays.asList(Mockito.mock(HostVO.class), Mockito.mock(HostVO.class)); + Mockito.doReturn(hostsInPod).when(hostDao).findByPodId(podId, Host.Type.Routing); + + Mockito.doReturn(1L).when(hostsInPod.get(0)).getId(); + Mockito.doReturn(2L).when(hostsInPod.get(1)).getId(); + + try { + resourceManager.checkIfAllHostsInUse(sagsToDelete, clusterId, podId, zoneId); + Assert.fail("Exception should be thrown when all hosts in the pod are using the storage access groups."); + } catch (CloudRuntimeException e) { + Assert.assertEquals("All hosts in the pod are using the storage access groups", e.getMessage()); + } + } + + @Test + public void testCheckIfAnyVolumesInUseWithPoolsToAdd() { + List sagsToAdd = Arrays.asList("sag1", "sag2"); + List sagsToDelete = Arrays.asList("sag3", "sag4"); + + HostVO host = Mockito.mock(HostVO.class); + Mockito.when(host.getId()).thenReturn(1L); + Mockito.when(host.getDataCenterId()).thenReturn(2L); + Mockito.when(host.getPodId()).thenReturn(3L); + Mockito.when(host.getClusterId()).thenReturn(4L); + + VolumeVO volume1 = Mockito.mock(VolumeVO.class); + VolumeVO volume2 = Mockito.mock(VolumeVO.class); + Mockito.when(volume1.getPoolId()).thenReturn(10L); + Mockito.when(volume2.getPoolId()).thenReturn(11L); + List volumesUsingTheStoragePoolAccessGroups = new ArrayList<>(Arrays.asList(volume1, volume2)); + Mockito.doReturn(volumesUsingTheStoragePoolAccessGroups).when(resourceManager).listOfVolumesUsingTheStorageAccessGroups(sagsToDelete, 1L, null, null, null); + + StoragePoolVO pool1 = Mockito.mock(StoragePoolVO.class); + StoragePoolVO pool2 = Mockito.mock(StoragePoolVO.class); + Mockito.when(pool1.getId()).thenReturn(10L); + Mockito.when(pool2.getId()).thenReturn(12L); + List poolsToAdd = Arrays.asList(pool1, pool2); + + Mockito.doReturn(poolsToAdd) + .when(resourceManager).getStoragePoolsByAccessGroups(2L, 3L, 4L, sagsToAdd.toArray(new String[0]), true); + + try { + resourceManager.checkIfAnyVolumesInUse(sagsToAdd, sagsToDelete, host); + Assert.fail("Expected a CloudRuntimeException to be thrown."); + } catch (CloudRuntimeException e) { + Assert.assertTrue("Exception message should mention volumes in use.", + e.getMessage().contains("There are volumes in storage pools with the Storage Access Groups that need to be deleted")); + } + } + + @Test + public void testUpdateStoragePoolConnectionsOnHostsConnect1AndDisconnect2() { + Long poolId = 1L; + List storageAccessGroups = Arrays.asList("sag1", "sag2"); + + StoragePoolVO storagePool = Mockito.mock(StoragePoolVO.class); + Mockito.when(storagePool.getId()).thenReturn(poolId); + Mockito.when(storagePool.getScope()).thenReturn(ScopeType.CLUSTER); + Mockito.when(storagePool.getClusterId()).thenReturn(1L); + Mockito.when(storagePool.getPodId()).thenReturn(1L); + Mockito.when(storagePool.getDataCenterId()).thenReturn(1L); + + Mockito.when(storagePoolDao.findById(poolId)).thenReturn(storagePool); + + HostVO host1 = Mockito.mock(HostVO.class); + HostVO host2 = Mockito.mock(HostVO.class); + Mockito.when(host1.getId()).thenReturn(2L); + Mockito.when(host2.getId()).thenReturn(3L); + + List clusterHosts = Arrays.asList(host1, host2); + Mockito.doReturn(clusterHosts).when(resourceManager).listAllUpHosts(Host.Type.Routing, 1L, 1L, 1L); + + StoragePoolHostVO hostPoolRecord = Mockito.mock(StoragePoolHostVO.class); + Mockito.when(storagePoolHostDao.findByPoolHost(poolId, 2L)).thenReturn(null); + Mockito.when(storagePoolHostDao.findByPoolHost(poolId, 3L)).thenReturn(hostPoolRecord); + + Mockito.doReturn(new String[]{"sag1", "sag2"}).when(storageManager).getStorageAccessGroups(null, null, null, 2L); + Mockito.doReturn(new String[]{"sag3"}).when(storageManager).getStorageAccessGroups(null, null, null, 3L); + + Mockito.doReturn(new ArrayList()).when(resourceManager).listOfHostIdsUsingTheStoragePool(poolId); + + try { + resourceManager.updateStoragePoolConnectionsOnHosts(poolId, storageAccessGroups); + + Mockito.verify(resourceManager, Mockito.times(1)).connectHostToStoragePool(host1, storagePool); + Mockito.verify(resourceManager, Mockito.never()).connectHostToStoragePool(host2, storagePool); + Mockito.verify(resourceManager, Mockito.times(1)).disconnectHostFromStoragePool(host2, storagePool); + Mockito.verify(resourceManager, Mockito.never()).disconnectHostFromStoragePool(host1, storagePool); + } catch (CloudRuntimeException e) { + Assert.fail("No exception should be thrown."); + } + } + + @Test + public void testUpdateStoragePoolConnectionsOnHosts_ZoneScope_NoAccessGroups() { + Long poolId = 1L; + List storageAccessGroups = new ArrayList<>(); + + StoragePoolVO storagePool = Mockito.mock(StoragePoolVO.class); + Mockito.when(storagePool.getId()).thenReturn(poolId); + Mockito.when(storagePool.getScope()).thenReturn(ScopeType.ZONE); + Mockito.when(storagePool.getDataCenterId()).thenReturn(1L); + + Mockito.when(storagePoolDao.findById(poolId)).thenReturn(storagePool); + + HostVO host1 = Mockito.mock(HostVO.class); + HostVO host2 = Mockito.mock(HostVO.class); + Mockito.when(host1.getId()).thenReturn(2L); + Mockito.when(host2.getId()).thenReturn(3L); + + List zoneHosts = Arrays.asList(host1, host2); + Mockito.doReturn(zoneHosts).when(resourceManager).listAllUpHosts(Host.Type.Routing, null, null, 1L); + + Mockito.doReturn(new String[]{"sag1", "sag2"}).when(storageManager).getStorageAccessGroups(null, null, null, 2L); + Mockito.doReturn(new String[]{""}).when(storageManager).getStorageAccessGroups(null, null, null, 3L); + + Mockito.when(storagePoolHostDao.findByPoolHost(poolId, 2L)).thenReturn(null); + Mockito.when(storagePoolHostDao.findByPoolHost(poolId, 3L)).thenReturn(null); + + try { + resourceManager.updateStoragePoolConnectionsOnHosts(poolId, storageAccessGroups); + + Mockito.verify(resourceManager, Mockito.times(1)).connectHostToStoragePool(host1, storagePool); + Mockito.verify(resourceManager, Mockito.times(1)).connectHostToStoragePool(host2, storagePool); + Mockito.verify(resourceManager, Mockito.never()).disconnectHostFromStoragePool(Mockito.any(), Mockito.eq(storagePool)); + } catch (CloudRuntimeException e) { + Assert.fail("No exception should be thrown."); + } + } + + @Test + public void testUpdateStoragePoolConnectionsOnHosts_ConflictWithHostIdsAndVolumes() { + Long poolId = 1L; + List storageAccessGroups = Arrays.asList("sag1", "sag2"); + + StoragePoolVO storagePool = Mockito.mock(StoragePoolVO.class); + Mockito.when(storagePool.getId()).thenReturn(poolId); + Mockito.when(storagePool.getScope()).thenReturn(ScopeType.CLUSTER); + Mockito.when(storagePool.getClusterId()).thenReturn(1L); + Mockito.when(storagePool.getPodId()).thenReturn(1L); + Mockito.when(storagePool.getDataCenterId()).thenReturn(1L); + + Mockito.when(storagePoolDao.findById(poolId)).thenReturn(storagePool); + + HostVO host1 = Mockito.mock(HostVO.class); + HostVO host2 = Mockito.mock(HostVO.class); + Mockito.when(host1.getId()).thenReturn(2L); + Mockito.when(host2.getId()).thenReturn(3L); + + List clusterHosts = Arrays.asList(host1, host2); + Mockito.doReturn(clusterHosts).when(resourceManager).listAllUpHosts(Host.Type.Routing, 1L, 1L, 1L); + + VolumeVO volume1 = Mockito.mock(VolumeVO.class); + VolumeVO volume2 = Mockito.mock(VolumeVO.class); + + Mockito.when(volume1.getInstanceId()).thenReturn(100L); + Mockito.when(volume2.getInstanceId()).thenReturn(101L); + + List volumesInPool = Arrays.asList(volume1, volume2); + Mockito.doReturn(volumesInPool).when(volumeDao).findByPoolId(poolId); + + VMInstanceVO vmInstance1 = Mockito.mock(VMInstanceVO.class); + VMInstanceVO vmInstance2 = Mockito.mock(VMInstanceVO.class); + Mockito.when(vmInstance1.getHostId()).thenReturn(2L); + Mockito.when(vmInstance2.getHostId()).thenReturn(3L); + + Mockito.doReturn(vmInstance1).when(vmInstanceDao).findById(100L); + Mockito.doReturn(vmInstance2).when(vmInstanceDao).findById(101L); + + Mockito.when(storagePoolHostDao.findByPoolHost(poolId, 2L)).thenReturn(null); + Mockito.when(storagePoolHostDao.findByPoolHost(poolId, 3L)).thenReturn(null); + + Mockito.doReturn(new String[]{"sag1"}).when(storageManager).getStorageAccessGroups(null, null, null, 2L); + Mockito.doReturn(new String[]{"sag3"}).when(storageManager).getStorageAccessGroups(null, null, null, 3L); + + Mockito.doReturn(Arrays.asList(2L, 3L)).when(resourceManager).listOfHostIdsUsingTheStoragePool(poolId); + + try { + resourceManager.updateStoragePoolConnectionsOnHosts(poolId, storageAccessGroups); + Assert.fail("Expected a CloudRuntimeException to be thrown."); + } catch (CloudRuntimeException e) { + Assert.assertTrue(e.getMessage().contains("Storage access groups cannot be updated as they are currently in use by some hosts.")); + Mockito.verify(resourceManager, Mockito.never()).connectHostToStoragePool(Mockito.any(), Mockito.eq(storagePool)); + Mockito.verify(resourceManager, Mockito.never()).disconnectHostFromStoragePool(Mockito.any(), Mockito.eq(storagePool)); + } + } + } diff --git a/server/src/test/java/com/cloud/storage/StorageManagerImplTest.java b/server/src/test/java/com/cloud/storage/StorageManagerImplTest.java index 999bf85907be..da00f5b3a7f9 100644 --- a/server/src/test/java/com/cloud/storage/StorageManagerImplTest.java +++ b/server/src/test/java/com/cloud/storage/StorageManagerImplTest.java @@ -23,8 +23,15 @@ import java.util.List; import java.util.Map; +import com.cloud.dc.HostPodVO; +import com.cloud.dc.dao.HostPodDao; +import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; +import com.cloud.resource.ResourceManager; +import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.command.admin.storage.ChangeStoragePoolScopeCmd; +import org.apache.cloudstack.api.command.admin.storage.ConfigureStorageAccessCmd; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; import org.apache.cloudstack.framework.config.ConfigDepot; import org.apache.cloudstack.framework.config.ConfigKey; @@ -75,6 +82,10 @@ import com.cloud.vm.VMInstanceVO; import com.cloud.vm.dao.VMInstanceDao; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.mockito.Mockito.doReturn; + @RunWith(MockitoJUnitRunner.class) public class StorageManagerImplTest { @@ -125,6 +136,18 @@ public class StorageManagerImplTest { @Mock private VMInstanceVO vmInstanceVOMock; + @Mock + private HostDao hostDao; + @Mock + private HostPodDao podDao; + + @Mock + private StoragePoolAndAccessGroupMapDao storagePoolAccessGroupMapDao; + + @Mock + private ResourceManager resourceMgr; + + @Test public void createLocalStoragePoolName() { String hostMockName = "host1"; @@ -149,7 +172,7 @@ private void executeCreateLocalStoragePoolNameForHostName(String hostMockName) { Mockito.when(storagePoolInfoMock.getUuid()).thenReturn(firstBlockUuid + "-213151-df21ef333d-2d33f1"); String localStoragePoolName = storageManagerImpl.createLocalStoragePoolName(hostMock, storagePoolInfoMock); - Assert.assertEquals(expectedLocalStorageName, localStoragePoolName); + assertEquals(expectedLocalStorageName, localStoragePoolName); } private VolumeVO mockVolumeForIsVolumeSuspectedDestroyDuplicateTest() { @@ -836,6 +859,66 @@ public void testCheckPoolforSpaceForResize4() throws NoSuchFieldException, Illeg Assert.assertTrue(result); } + @Test + public void testGetStorageAccessGroupsOnHostAllSAGsPresent() { + long hostId = 1L; + + HostVO host = Mockito.mock(HostVO.class); + ClusterVO cluster = Mockito.mock(ClusterVO.class); + HostPodVO pod = Mockito.mock(HostPodVO.class); + DataCenterVO zone = Mockito.mock(DataCenterVO.class); + + Mockito.when(hostDao.findById(hostId)).thenReturn(host); + Mockito.when(host.getClusterId()).thenReturn(2L); + Mockito.when(clusterDao.findById(2L)).thenReturn(cluster); + Mockito.when(cluster.getPodId()).thenReturn(3L); + Mockito.when(podDao.findById(3L)).thenReturn(pod); + Mockito.when(pod.getDataCenterId()).thenReturn(4L); + Mockito.when(dataCenterDao.findById(4L)).thenReturn(zone); + + Mockito.when(host.getStorageAccessGroups()).thenReturn("sag1"); + Mockito.when(cluster.getStorageAccessGroups()).thenReturn("sag2"); + Mockito.when(pod.getStorageAccessGroups()).thenReturn("sag3"); + Mockito.when(zone.getStorageAccessGroups()).thenReturn("sag4"); + + String[] sags = storageManagerImpl.getStorageAccessGroups(null, null, null, hostId); + + assertNotNull(sags); + assertEquals(4, sags.length); + assertEquals("sag1", sags[0]); + assertEquals("sag2", sags[1]); + assertEquals("sag3", sags[2]); + assertEquals("sag4", sags[3]); + } + + @Test + public void testGetSingleStorageAccessGroupOnHost() { + long hostId = 1L; + + HostVO host = Mockito.mock(HostVO.class); + ClusterVO cluster = Mockito.mock(ClusterVO.class); + HostPodVO pod = Mockito.mock(HostPodVO.class); + DataCenterVO zone = Mockito.mock(DataCenterVO.class); + + Mockito.when(hostDao.findById(hostId)).thenReturn(host); + Mockito.when(host.getClusterId()).thenReturn(2L); + Mockito.when(clusterDao.findById(2L)).thenReturn(cluster); + Mockito.when(cluster.getPodId()).thenReturn(3L); + Mockito.when(podDao.findById(3L)).thenReturn(pod); + Mockito.when(pod.getDataCenterId()).thenReturn(4L); + Mockito.when(dataCenterDao.findById(4L)).thenReturn(zone); + + Mockito.when(host.getStorageAccessGroups()).thenReturn(""); + Mockito.when(cluster.getStorageAccessGroups()).thenReturn("sag2"); + Mockito.when(pod.getStorageAccessGroups()).thenReturn(null); + + String[] sags = storageManagerImpl.getStorageAccessGroups(null, null, null, hostId); + + assertNotNull(sags); + assertEquals(1, sags.length); + assertEquals("sag2", sags[0]); + } + @Test public void testGetStoragePoolIopsStats_ReturnsDriverResultWhenNotNull() { StoragePool pool = Mockito.mock(StoragePool.class); @@ -894,4 +977,384 @@ public void testGetStoragePoolIopsStats_UsedIopsNegative() { Assert.assertEquals("Capacity IOPS should match pool's capacity IOPS", 1000L, result.first().longValue()); Assert.assertNull("Used IOPS should be null when usedIops <= 0", result.second()); } + + + @Test + public void testNoStorageAccessGroupsOnHostAndStoragePool() { + HostVO host = Mockito.mock(HostVO.class); + StoragePoolVO storagePool = Mockito.mock(StoragePoolVO.class); + long hostId = 1L; + long poolId = 2L; + + Mockito.when(host.getId()).thenReturn(hostId); + doReturn(new String[0]).when(storageManagerImpl).getStorageAccessGroups(null, null, null, hostId); + + Mockito.when(storagePool.getId()).thenReturn(poolId); + storageManagerImpl._storagePoolAccessGroupMapDao = storagePoolAccessGroupMapDao; + Mockito.when(storagePoolAccessGroupMapDao.getStorageAccessGroups(poolId)) + .thenReturn(new ArrayList<>()); + + boolean result = storageManagerImpl.checkIfHostAndStoragePoolHasCommonStorageAccessGroups(host, storagePool); + + Assert.assertTrue("Host without storage access groups should connect to a storage pool without storage access groups.", result); + } + + @Test + public void testHostWithStorageAccessGroupsAndStoragePoolWithoutStorageAccessGroups() { + HostVO host = Mockito.mock(HostVO.class); + StoragePoolVO storagePool = Mockito.mock(StoragePoolVO.class); + long hostId = 1L; + long poolId = 2L; + + Mockito.when(host.getId()).thenReturn(hostId); + doReturn(new String[]{"StorageAccessGroup1"}).when(storageManagerImpl).getStorageAccessGroups(null, null, null, hostId); + + Mockito.when(storagePool.getId()).thenReturn(poolId); + storageManagerImpl._storagePoolAccessGroupMapDao = storagePoolAccessGroupMapDao; + Mockito.when(storagePoolAccessGroupMapDao.getStorageAccessGroups(poolId)) + .thenReturn(new ArrayList<>()); + + boolean result = storageManagerImpl.checkIfHostAndStoragePoolHasCommonStorageAccessGroups(host, storagePool); + + Assert.assertTrue("Host with storage access groups should connect to a storage pool without storage access groups.", result); + } + + @Test + public void testHostWithStorageAccessGroupsAndStoragePoolWithDifferentStorageAccessGroups() { + HostVO host = Mockito.mock(HostVO.class); + StoragePoolVO storagePool = Mockito.mock(StoragePoolVO.class); + long hostId = 1L; + long poolId = 2L; + + Mockito.when(host.getId()).thenReturn(hostId); + doReturn(new String[]{"StorageAccessGroup1"}).when(storageManagerImpl).getStorageAccessGroups(null, null, null, hostId); + + Mockito.when(storagePool.getId()).thenReturn(poolId); + storageManagerImpl._storagePoolAccessGroupMapDao = storagePoolAccessGroupMapDao; + Mockito.when(storagePoolAccessGroupMapDao.getStorageAccessGroups(poolId)) + .thenReturn(Arrays.asList("StorageAccessGroup2", "StorageAccessGroup3")); + + boolean result = storageManagerImpl.checkIfHostAndStoragePoolHasCommonStorageAccessGroups(host, storagePool); + + Assert.assertFalse("Host with storage access groups should not connect to a storage pool with different storage access groups.", result); + } + + @Test + public void testHostWithStorageAccessGroupsAndStoragePoolWithMatchingStorageAccessGroups() { + HostVO host = Mockito.mock(HostVO.class); + StoragePoolVO storagePool = Mockito.mock(StoragePoolVO.class); + long hostId = 1L; + long poolId = 2L; + + Mockito.when(host.getId()).thenReturn(hostId); + doReturn(new String[]{"StorageAccessGroup1"}).when(storageManagerImpl).getStorageAccessGroups(null, null, null, hostId); + + Mockito.when(storagePool.getId()).thenReturn(poolId); + storageManagerImpl._storagePoolAccessGroupMapDao = storagePoolAccessGroupMapDao; + Mockito.when(storagePoolAccessGroupMapDao.getStorageAccessGroups(poolId)) + .thenReturn(Arrays.asList("StorageAccessGroup1", "StorageAccessGroup2")); + + boolean result = storageManagerImpl.checkIfHostAndStoragePoolHasCommonStorageAccessGroups(host, storagePool); + + Assert.assertTrue("Host with matching storage access groups should connect to a storage pool with matching storage access groups.", result); + } + + @Test + public void testHostWithEmptySAGsOnHost() { + HostVO host = Mockito.mock(HostVO.class); + StoragePoolVO storagePool = Mockito.mock(StoragePoolVO.class); + long hostId = 1L; + long poolId = 2L; + + Mockito.when(host.getId()).thenReturn(hostId); + doReturn(new String[0]).when(storageManagerImpl).getStorageAccessGroups(null, null, null, hostId); + + Mockito.when(storagePool.getId()).thenReturn(poolId); + storageManagerImpl._storagePoolAccessGroupMapDao = storagePoolAccessGroupMapDao; + Mockito.when(storagePoolAccessGroupMapDao.getStorageAccessGroups(poolId)) + .thenReturn(Arrays.asList("StorageAccessGroup1", "StorageAccessGroup2")); + + boolean result = storageManagerImpl.checkIfHostAndStoragePoolHasCommonStorageAccessGroups(host, storagePool); + + Assert.assertFalse("Host with matching storage access groups should connect to a storage pool with matching storage access groups.", result); + } + + @Test + public void testVolumeReadyNoVMOrVMStoppedAndPoolsWithMatchingStorageAccessGroups() { + StoragePoolVO destPool = Mockito.mock(StoragePoolVO.class); + Volume volume = Mockito.mock(Volume.class); + long srcPoolId = 2L; + long destPoolId = 3L; + + Mockito.when(volume.getState()).thenReturn(Volume.State.Ready); + Mockito.when(volume.getInstanceId()).thenReturn(null); + Mockito.when(volume.getPoolId()).thenReturn(srcPoolId); + + Mockito.when(destPool.getId()).thenReturn(destPoolId); + + List srcStorageAccessGroups = Arrays.asList("StorageAccessGroup1", "StorageAccessGroup2"); + List destStorageAccessGroups = Arrays.asList("StorageAccessGroup1", "StorageAccessGroup2"); + + doReturn(srcStorageAccessGroups).when(storagePoolAccessGroupMapDao).getStorageAccessGroups(srcPoolId); + doReturn(destStorageAccessGroups).when(storagePoolAccessGroupMapDao).getStorageAccessGroups(destPoolId); + + Pair result = storageManagerImpl.checkIfReadyVolumeFitsInStoragePoolWithStorageAccessGroups(destPool, volume); + + Assert.assertTrue("Volume in Ready state and no VM or VM stopped should migrate if both pools have matching storage access groups.", result.first()); + } + + @Test + public void testVolumeReadyNoVMOrVMStoppedAndPoolsWithEmptyStorageAccessGroups() { + StoragePoolVO destPool = Mockito.mock(StoragePoolVO.class); + Volume volume = Mockito.mock(Volume.class); + long srcPoolId = 2L; + long destPoolId = 3L; + + Mockito.when(volume.getState()).thenReturn(Volume.State.Ready); + Mockito.when(volume.getInstanceId()).thenReturn(null); + Mockito.when(volume.getPoolId()).thenReturn(srcPoolId); + + Mockito.when(destPool.getId()).thenReturn(destPoolId); + + List srcStorageAccessGroups = new ArrayList<>(); + List destStorageAccessGroups = new ArrayList<>(); + + doReturn(srcStorageAccessGroups).when(storagePoolAccessGroupMapDao).getStorageAccessGroups(srcPoolId); + doReturn(destStorageAccessGroups).when(storagePoolAccessGroupMapDao).getStorageAccessGroups(destPoolId); + + Pair result = storageManagerImpl.checkIfReadyVolumeFitsInStoragePoolWithStorageAccessGroups(destPool, volume); + + Assert.assertTrue("Volume with empty storage access groups should be able to fit in the destination pool.", result.first()); + } + + @Test + public void testVolumeReadyVMRunningAndHostHasCommonSAGsForBothPools() { + StoragePoolVO destPool = Mockito.mock(StoragePoolVO.class); + Volume volume = Mockito.mock(Volume.class); + long vmId = 10L; + long srcPoolId = 2L; + long destPoolId = 3L; + + Mockito.when(volume.getState()).thenReturn(Volume.State.Ready); + Mockito.when(volume.getInstanceId()).thenReturn(vmId); + Mockito.when(volume.getPoolId()).thenReturn(srcPoolId); + + Mockito.when(destPool.getId()).thenReturn(destPoolId); + + List srcStorageAccessGroups = Arrays.asList("StorageAccessGroup1", "StorageAccessGroup2"); + List destStorageAccessGroups = Arrays.asList("StorageAccessGroup2", "StorageAccessGroup3"); + + doReturn(srcStorageAccessGroups).when(storagePoolAccessGroupMapDao).getStorageAccessGroups(srcPoolId); + doReturn(destStorageAccessGroups).when(storagePoolAccessGroupMapDao).getStorageAccessGroups(destPoolId); + + Pair result = storageManagerImpl.checkIfReadyVolumeFitsInStoragePoolWithStorageAccessGroups(destPool, volume); + + Assert.assertTrue("Volume with host having common storage access groups should fit in both source and destination pools.", result.first()); + } + + @Test + public void testVolumeReadyVMRunningAndHostHasCommonSAGForSourcePoolButNotDestinationPool() { + StoragePoolVO destPool = Mockito.mock(StoragePoolVO.class); + Volume volume = Mockito.mock(Volume.class); + StoragePoolVO srcPool = Mockito.mock(StoragePoolVO.class); + long vmId = 10L; + long srcPoolId = 2L; + long destPoolId = 3L; + + Mockito.when(volume.getState()).thenReturn(Volume.State.Ready); + Mockito.when(volume.getInstanceId()).thenReturn(vmId); + Mockito.when(volume.getPoolId()).thenReturn(srcPoolId); + + Mockito.when(destPool.getId()).thenReturn(destPoolId); + Mockito.when(srcPool.getId()).thenReturn(destPoolId); + Mockito.doReturn(srcPool).when(storagePoolDao).findById(srcPoolId); + + List srcStorageAccessGroups = Arrays.asList("StorageAccessGroup1", "StorageAccessGroup2"); + List destStorageAccessGroups = Arrays.asList("StorageAccessGroup3", "StorageAccessGroup4"); + + doReturn(srcStorageAccessGroups).when(storagePoolAccessGroupMapDao).getStorageAccessGroups(srcPoolId); + doReturn(destStorageAccessGroups).when(storagePoolAccessGroupMapDao).getStorageAccessGroups(destPoolId); + List poolIds = new ArrayList<>(); + poolIds.add(srcPool.getId()); + poolIds.add(destPool.getId()); + Mockito.doReturn(null).when(storageManagerImpl).findUpAndEnabledHostWithAccessToStoragePools(poolIds); + + Pair result = storageManagerImpl.checkIfReadyVolumeFitsInStoragePoolWithStorageAccessGroups(destPool, volume); + + Assert.assertFalse("Volume with host having common storage access group for source pool but not destination pool should not fit.", result.first()); + } + + @Test + public void testNoCommonHostConnected() { + StoragePoolVO destPool = Mockito.mock(StoragePoolVO.class); + StoragePoolVO srcPool = Mockito.mock(StoragePoolVO.class); + Volume volume = Mockito.mock(Volume.class); + long vmId = 10L; + long srcPoolId = 2L; + long destPoolId = 3L; + + Mockito.when(volume.getState()).thenReturn(Volume.State.Ready); + Mockito.when(volume.getInstanceId()).thenReturn(vmId); + Mockito.when(volume.getPoolId()).thenReturn(srcPoolId); + + Mockito.when(destPool.getId()).thenReturn(destPoolId); + Mockito.when(srcPool.getId()).thenReturn(destPoolId); + Mockito.doReturn(srcPool).when(storagePoolDao).findById(srcPoolId); + List srcStorageAccessGroups = Arrays.asList("StorageAccessGroup3", "StorageAccessGroup4"); + List destStorageAccessGroups = Arrays.asList("StorageAccessGroup1", "StorageAccessGroup2"); + + Mockito.doReturn(srcStorageAccessGroups).when(storagePoolAccessGroupMapDao).getStorageAccessGroups(srcPoolId); + Mockito.doReturn(destStorageAccessGroups).when(storagePoolAccessGroupMapDao).getStorageAccessGroups(destPoolId); + List poolIds = new ArrayList<>(); + poolIds.add(srcPool.getId()); + poolIds.add(destPool.getId()); + Mockito.doReturn(null).when(storageManagerImpl).findUpAndEnabledHostWithAccessToStoragePools(poolIds); + Pair result = storageManagerImpl.checkIfReadyVolumeFitsInStoragePoolWithStorageAccessGroups(destPool, volume); + + Assert.assertFalse("Volume with host having common storage access group for destination pool but not source pool should not fit.", result.first()); + Assert.assertEquals("No common host connected to source and destination storages", result.second()); + } + + @Test + public void testConfigureStorageAccess_SkipUpdateForZone() { + Long zoneId = 1L; + List storageAccessGroups = Arrays.asList("sag1", "sag2"); + + ConfigureStorageAccessCmd cmd = Mockito.mock(ConfigureStorageAccessCmd.class); + Mockito.when(cmd.getZoneId()).thenReturn(zoneId); + Mockito.when(cmd.getPodId()).thenReturn(null); + Mockito.when(cmd.getClusterId()).thenReturn(null); + Mockito.when(cmd.getHostId()).thenReturn(null); + Mockito.when(cmd.getStorageId()).thenReturn(null); + Mockito.when(cmd.getStorageAccessGroups()).thenReturn(storageAccessGroups); + + DataCenterVO zone = Mockito.mock(DataCenterVO.class); + Mockito.when(zone.getStorageAccessGroups()).thenReturn("sag2,sag1"); + Mockito.when(dataCenterDao.findById(zoneId)).thenReturn(zone); + + boolean result = storageManagerImpl.configureStorageAccess(cmd); + + Mockito.verify(resourceMgr, Mockito.never()).updateZoneStorageAccessGroups(Mockito.anyLong(), Mockito.anyList()); + Mockito.verify(dataCenterDao, Mockito.never()).update(Mockito.eq(zoneId), Mockito.any(DataCenterVO.class)); + + Assert.assertTrue(result); + } + + @Test + public void testConfigureStorageAccess_SkipUpdateForPod() { + Long podId = 1L; + List storageAccessGroups = Arrays.asList("sag1", "sag2"); + + ConfigureStorageAccessCmd cmd = Mockito.mock(ConfigureStorageAccessCmd.class); + Mockito.when(cmd.getZoneId()).thenReturn(null); + Mockito.when(cmd.getPodId()).thenReturn(podId); + Mockito.when(cmd.getClusterId()).thenReturn(null); + Mockito.when(cmd.getHostId()).thenReturn(null); + Mockito.when(cmd.getStorageId()).thenReturn(null); + Mockito.when(cmd.getStorageAccessGroups()).thenReturn(storageAccessGroups); + + HostPodVO pod = Mockito.mock(HostPodVO.class); + Mockito.when(pod.getDataCenterId()).thenReturn(1L); + Mockito.when(pod.getStorageAccessGroups()).thenReturn("sag1,sag2"); + Mockito.when(podDao.findById(podId)).thenReturn(pod); + Mockito.doNothing().when(storageManagerImpl).checkIfStorageAccessGroupsExistsOnZone(1L, storageAccessGroups); + + boolean result = storageManagerImpl.configureStorageAccess(cmd); + + Mockito.verify(resourceMgr, Mockito.never()).updatePodStorageAccessGroups(Mockito.anyLong(), Mockito.anyList()); + Mockito.verify(podDao, Mockito.never()).update(Mockito.eq(podId), Mockito.any(HostPodVO.class)); + + Assert.assertTrue(result); + } + + @Test + public void testConfigureStorageAccess_SkipUpdateForCluster() { + Long clusterId = 1L; + List storageAccessGroups = Arrays.asList("sag1", "sag2"); + + ConfigureStorageAccessCmd cmd = Mockito.mock(ConfigureStorageAccessCmd.class); + Mockito.when(cmd.getZoneId()).thenReturn(null); + Mockito.when(cmd.getPodId()).thenReturn(null); + Mockito.when(cmd.getClusterId()).thenReturn(clusterId); + Mockito.when(cmd.getHostId()).thenReturn(null); + Mockito.when(cmd.getStorageId()).thenReturn(null); + Mockito.when(cmd.getStorageAccessGroups()).thenReturn(storageAccessGroups); + + ClusterVO cluster = Mockito.mock(ClusterVO.class); + Mockito.when(cluster.getPodId()).thenReturn(1L); + Mockito.when(cluster.getStorageAccessGroups()).thenReturn("sag1,sag2"); + Mockito.when(clusterDao.findById(clusterId)).thenReturn(cluster); + Mockito.doNothing().when(storageManagerImpl).checkIfStorageAccessGroupsExistsOnPod(1L, storageAccessGroups); + + boolean result = storageManagerImpl.configureStorageAccess(cmd); + + Mockito.verify(resourceMgr, Mockito.never()).updateClusterStorageAccessGroups(Mockito.anyLong(), Mockito.anyList()); + Mockito.verify(clusterDao, Mockito.never()).update(Mockito.eq(clusterId), Mockito.any(ClusterVO.class)); + + Assert.assertTrue(result); + } + + @Test + public void testConfigureStorageAccess_SkipUpdateForHost() { + Long hostId = 1L; + List storageAccessGroups = Arrays.asList("sag1", "sag2"); + + ConfigureStorageAccessCmd cmd = Mockito.mock(ConfigureStorageAccessCmd.class); + Mockito.when(cmd.getZoneId()).thenReturn(null); + Mockito.when(cmd.getPodId()).thenReturn(null); + Mockito.when(cmd.getClusterId()).thenReturn(null); + Mockito.when(cmd.getHostId()).thenReturn(hostId); + Mockito.when(cmd.getStorageId()).thenReturn(null); + Mockito.when(cmd.getStorageAccessGroups()).thenReturn(storageAccessGroups); + + HostVO host = Mockito.mock(HostVO.class); + Mockito.when(host.getClusterId()).thenReturn(1L); + Mockito.when(host.getStorageAccessGroups()).thenReturn("sag1,sag2"); + Mockito.when(hostDao.findById(hostId)).thenReturn(host); + Mockito.doNothing().when(storageManagerImpl).checkIfStorageAccessGroupsExistsOnCluster(1L, storageAccessGroups); + + boolean result = storageManagerImpl.configureStorageAccess(cmd); + + Mockito.verify(resourceMgr, Mockito.never()).updateHostStorageAccessGroups(Mockito.anyLong(), Mockito.anyList()); + Mockito.verify(hostDao, Mockito.never()).update(Mockito.eq(hostId), Mockito.any(HostVO.class)); + + Assert.assertTrue(result); + } + + @Test + public void testConfigureStorageAccess_InvalidNonNullCount() { + ConfigureStorageAccessCmd cmd = Mockito.mock(ConfigureStorageAccessCmd.class); + + Mockito.when(cmd.getZoneId()).thenReturn(1L); + Mockito.when(cmd.getPodId()).thenReturn(1L); + Mockito.when(cmd.getClusterId()).thenReturn(null); + Mockito.when(cmd.getHostId()).thenReturn(null); + Mockito.when(cmd.getStorageId()).thenReturn(null); + + try { + storageManagerImpl.configureStorageAccess(cmd); + Assert.fail("Expected IllegalArgumentException to be thrown due to nonNullCount validation"); + } catch (IllegalArgumentException e) { + Assert.assertTrue(e.getMessage().contains("Exactly one of zoneid, podid, clusterid, hostid or storagepoolid is required")); + } + } + + @Test + public void testConfigureStorageAccess_MissingStorageAccessGroups() { + ConfigureStorageAccessCmd cmd = Mockito.mock(ConfigureStorageAccessCmd.class); + + Mockito.when(cmd.getZoneId()).thenReturn(1L); + Mockito.when(cmd.getPodId()).thenReturn(null); + Mockito.when(cmd.getClusterId()).thenReturn(null); + Mockito.when(cmd.getHostId()).thenReturn(null); + Mockito.when(cmd.getStorageId()).thenReturn(null); + Mockito.when(cmd.getStorageAccessGroups()).thenReturn(null); + + try { + storageManagerImpl.configureStorageAccess(cmd); + Assert.fail("Expected InvalidParameterValueException to be thrown due to missing storageAccessGroups"); + } catch (InvalidParameterValueException e) { + Assert.assertTrue(e.getMessage().contains("storageaccessgroups parameter is required")); + } + } } diff --git a/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java b/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java index 7dcf30c55e40..7b24451f066a 100644 --- a/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java +++ b/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java @@ -1679,6 +1679,8 @@ public void testStoragePoolCompatibilityAndAllowEncryptedVolumeMigrationForPower Mockito.when(primaryDataStoreDaoMock.findById(1L)).thenReturn(srcStoragePoolVOMock); Mockito.when(srcStoragePoolVOMock.getPoolType()).thenReturn(Storage.StoragePoolType.PowerFlex); Mockito.when(dataStoreMgr.getDataStore(2L, DataStoreRole.Primary)).thenReturn( dataStore); + Pair checkResult = new Pair<>(true, "success"); + Mockito.doReturn(checkResult).when(storageMgr).checkIfReadyVolumeFitsInStoragePoolWithStorageAccessGroups(any(), any()); volumeApiServiceImpl.migrateVolume(migrateVolumeCmd); } catch (InvalidParameterValueException e) { diff --git a/server/src/test/java/com/cloud/storage/listener/StoragePoolMonitorTest.java b/server/src/test/java/com/cloud/storage/listener/StoragePoolMonitorTest.java index c700188a5999..7421eb7ae2d9 100644 --- a/server/src/test/java/com/cloud/storage/listener/StoragePoolMonitorTest.java +++ b/server/src/test/java/com/cloud/storage/listener/StoragePoolMonitorTest.java @@ -24,6 +24,7 @@ import com.cloud.storage.Storage; import com.cloud.storage.StorageManagerImpl; import com.cloud.storage.StoragePoolStatus; +import com.cloud.storage.dao.StoragePoolHostDao; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.junit.Before; @@ -32,12 +33,11 @@ import java.util.Collections; -import static org.mockito.ArgumentMatchers.nullable; - public class StoragePoolMonitorTest { private StorageManagerImpl storageManager; private PrimaryDataStoreDao poolDao; + private StoragePoolHostDao storagePoolHostDao; private StoragePoolMonitor storagePoolMonitor; private HostVO host; private StoragePoolVO pool; @@ -47,8 +47,9 @@ public class StoragePoolMonitorTest { public void setUp() throws Exception { storageManager = Mockito.mock(StorageManagerImpl.class); poolDao = Mockito.mock(PrimaryDataStoreDao.class); + storagePoolHostDao = Mockito.mock(StoragePoolHostDao.class); - storagePoolMonitor = new StoragePoolMonitor(storageManager, poolDao, null); + storagePoolMonitor = new StoragePoolMonitor(storageManager, poolDao, storagePoolHostDao, null); host = new HostVO("some-uuid"); pool = new StoragePoolVO(); pool.setScope(ScopeType.CLUSTER); @@ -61,14 +62,26 @@ public void setUp() throws Exception { @Test public void testProcessConnectStoragePoolNormal() throws Exception { - Mockito.when(poolDao.listBy(nullable(Long.class), nullable(Long.class), nullable(Long.class), Mockito.any(ScopeType.class))).thenReturn(Collections.singletonList(pool)); - Mockito.when(poolDao.findZoneWideStoragePoolsByTags(Mockito.anyLong(), Mockito.any(String[].class), Mockito.anyBoolean())).thenReturn(Collections.emptyList()); - Mockito.when(poolDao.findZoneWideStoragePoolsByHypervisor(Mockito.anyLong(), Mockito.any(Hypervisor.HypervisorType.class))).thenReturn(Collections.emptyList()); - Mockito.doReturn(true).when(storageManager).connectHostToSharedPool(host, pool.getId()); + HostVO hostMock = Mockito.mock(HostVO.class); + StartupRoutingCommand startupRoutingCommand = Mockito.mock(StartupRoutingCommand.class); + StoragePoolVO poolMock = Mockito.mock(StoragePoolVO.class); + Mockito.when(poolMock.getScope()).thenReturn(ScopeType.CLUSTER); + Mockito.when(poolMock.getStatus()).thenReturn(StoragePoolStatus.Up); + Mockito.when(poolMock.getId()).thenReturn(123L); + Mockito.when(poolMock.getPoolType()).thenReturn(Storage.StoragePoolType.Filesystem); + Mockito.when(hostMock.getDataCenterId()).thenReturn(1L); + Mockito.when(hostMock.getPodId()).thenReturn(1L); + Mockito.when(hostMock.getClusterId()).thenReturn(1L); + Mockito.when(startupRoutingCommand.getHypervisorType()).thenReturn(Hypervisor.HypervisorType.KVM); + Mockito.when(poolDao.findStoragePoolsByEmptyStorageAccessGroups(1L, 1L, 1L, ScopeType.CLUSTER, null)).thenReturn(Collections.singletonList(pool)); + Mockito.when(poolDao.findStoragePoolsByEmptyStorageAccessGroups(1L, null, null, ScopeType.ZONE, null)).thenReturn(Collections.emptyList()); + Mockito.when(poolDao.findStoragePoolsByEmptyStorageAccessGroups(1L, null, null, ScopeType.ZONE, Hypervisor.HypervisorType.KVM)).thenReturn(Collections.emptyList()); + Mockito.when(poolDao.findStoragePoolsByEmptyStorageAccessGroups(1L, null, null, ScopeType.ZONE, Hypervisor.HypervisorType.Any)).thenReturn(Collections.emptyList()); + Mockito.doReturn(true).when(storageManager).connectHostToSharedPool(hostMock, 123L); - storagePoolMonitor.processConnect(host, cmd, false); + storagePoolMonitor.processConnect(hostMock, startupRoutingCommand, false); - Mockito.verify(storageManager, Mockito.times(1)).connectHostToSharedPool(Mockito.eq(host), Mockito.eq(pool.getId())); + Mockito.verify(storageManager, Mockito.times(1)).connectHostToSharedPool(Mockito.eq(hostMock), Mockito.eq(pool.getId())); Mockito.verify(storageManager, Mockito.times(1)).createCapacityEntry(Mockito.eq(pool.getId())); } diff --git a/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java b/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java index f07d2af21af2..7c61fa04a727 100644 --- a/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java +++ b/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java @@ -33,6 +33,7 @@ import static org.mockito.Mockito.lenient; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; import static org.mockito.Mockito.when; import java.util.ArrayList; @@ -40,6 +41,7 @@ import java.util.List; import java.util.Map; +import com.cloud.storage.StorageManager; import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.acl.SecurityChecker; import org.apache.cloudstack.api.BaseCmd.HTTPMethod; @@ -365,6 +367,9 @@ public class UserVmManagerImplTest { @Mock private VMInstanceVO vmInstanceMock; + @Mock + StorageManager storageManager; + private static final long vmId = 1l; private static final long zoneId = 2L; private static final long accountId = 3L; @@ -483,7 +488,7 @@ public void updateVirtualMachineTestDisplayChanged() throws ResourceUnavailableE verifyMethodsThatAreAlwaysExecuted(); Mockito.verify(userVmManagerImpl).updateDisplayVmFlag(false, vmId, userVmVoMock); - Mockito.verify(userVmDetailsDao, Mockito.times(0)).removeDetail(anyLong(), anyString()); + Mockito.verify(userVmDetailsDao, times(0)).removeDetail(anyLong(), anyString()); } @Test @@ -501,8 +506,8 @@ public void updateVirtualMachineTestCleanUpTrue() throws ResourceUnavailableExce userVmManagerImpl.updateVirtualMachine(updateVmCommand); verifyMethodsThatAreAlwaysExecuted(); Mockito.verify(userVmDetailsDao).removeDetail(vmId, "userdetail"); - Mockito.verify(userVmDetailsDao, Mockito.times(0)).removeDetail(vmId, "systemdetail"); - Mockito.verify(userVmManagerImpl, Mockito.times(0)).updateDisplayVmFlag(false, vmId, userVmVoMock); + Mockito.verify(userVmDetailsDao, times(0)).removeDetail(vmId, "systemdetail"); + Mockito.verify(userVmManagerImpl, times(0)).updateDisplayVmFlag(false, vmId, userVmVoMock); } @Test @@ -566,11 +571,11 @@ private void prepareAndExecuteMethodDealingWithDetails(boolean cleanUpDetails, b userVmManagerImpl.updateVirtualMachine(updateVmCommand); verifyMethodsThatAreAlwaysExecuted(); - Mockito.verify(userVmVoMock, Mockito.times(cleanUpDetails || isDetailsEmpty ? 0 : 1)).setDetails(details); - Mockito.verify(userVmDetailsDao, Mockito.times(cleanUpDetails ? 1 : 0)).removeDetail(vmId, "existingdetail"); - Mockito.verify(userVmDetailsDao, Mockito.times(0)).removeDetail(vmId, "systemdetail"); - Mockito.verify(userVmDao, Mockito.times(cleanUpDetails || isDetailsEmpty ? 0 : 1)).saveDetails(userVmVoMock); - Mockito.verify(userVmManagerImpl, Mockito.times(0)).updateDisplayVmFlag(false, vmId, userVmVoMock); + Mockito.verify(userVmVoMock, times(cleanUpDetails || isDetailsEmpty ? 0 : 1)).setDetails(details); + Mockito.verify(userVmDetailsDao, times(cleanUpDetails ? 1 : 0)).removeDetail(vmId, "existingdetail"); + Mockito.verify(userVmDetailsDao, times(0)).removeDetail(vmId, "systemdetail"); + Mockito.verify(userVmDao, times(cleanUpDetails || isDetailsEmpty ? 0 : 1)).saveDetails(userVmVoMock); + Mockito.verify(userVmManagerImpl, times(0)).updateDisplayVmFlag(false, vmId, userVmVoMock); } private void configureDoNothingForDetailsMethod() { @@ -659,7 +664,7 @@ private void configureValidateOrReplaceMacAddressTest(int times, String macAddre String returnedMacAddress = userVmManagerImpl.validateOrReplaceMacAddress(macAddress, networkMock); - Mockito.verify(networkModel, Mockito.times(times)).getNextAvailableMacAddressInNetwork(Mockito.anyLong()); + Mockito.verify(networkModel, times(times)).getNextAvailableMacAddressInNetwork(Mockito.anyLong()); assertEquals(expectedMacAddress, returnedMacAddress); } @@ -736,7 +741,7 @@ private void prepareAndRunConfigureCustomRootDiskSizeTest(Map cu long rootDiskSize = userVmManagerImpl.configureCustomRootDiskSize(customParameters, template, Hypervisor.HypervisorType.KVM, diskfferingVo); Assert.assertEquals(expectedRootDiskSize, rootDiskSize); - Mockito.verify(userVmManagerImpl, Mockito.times(timesVerifyIfHypervisorSupports)).verifyIfHypervisorSupportsRootdiskSizeOverride(Mockito.any()); + Mockito.verify(userVmManagerImpl, times(timesVerifyIfHypervisorSupports)).verifyIfHypervisorSupportsRootdiskSizeOverride(Mockito.any()); } @Test @@ -1613,18 +1618,18 @@ public void testCheckVolumesLimits() { Long size = volumes.stream().filter(VolumeVO::isDisplay).mapToLong(VolumeVO::getSize).sum(); try { userVmManagerImpl.checkVolumesLimits(account, volumes); - Mockito.verify(resourceLimitMgr, Mockito.times(1)) + Mockito.verify(resourceLimitMgr, times(1)) .checkResourceLimit(account, Resource.ResourceType.volume, 4); - Mockito.verify(resourceLimitMgr, Mockito.times(1)) + Mockito.verify(resourceLimitMgr, times(1)) .checkResourceLimit(account, Resource.ResourceType.primary_storage, size); - Mockito.verify(resourceLimitMgr, Mockito.times(1)) + Mockito.verify(resourceLimitMgr, times(1)) .checkResourceLimitWithTag(account, Resource.ResourceType.volume, "tag1", 2); - Mockito.verify(resourceLimitMgr, Mockito.times(1)) + Mockito.verify(resourceLimitMgr, times(1)) .checkResourceLimitWithTag(account, Resource.ResourceType.volume, "tag2", 3); - Mockito.verify(resourceLimitMgr, Mockito.times(1)) + Mockito.verify(resourceLimitMgr, times(1)) .checkResourceLimitWithTag(account, Resource.ResourceType.primary_storage, "tag1", vol1.getSize() + vol5.getSize()); - Mockito.verify(resourceLimitMgr, Mockito.times(1)) + Mockito.verify(resourceLimitMgr, times(1)) .checkResourceLimitWithTag(account, Resource.ResourceType.primary_storage, "tag2", vol1.getSize() + vol3.getSize() + vol5.getSize()); } catch (ResourceAllocationException e) { @@ -1651,7 +1656,7 @@ public void testValidateStrictHostTagCheckPass() { userVmManagerImpl.validateStrictHostTagCheck(vm, destinationHostVO); Mockito.verify( - destinationHostVO, Mockito.times(1) + destinationHostVO, times(1) ).checkHostServiceOfferingAndTemplateTags(Mockito.any(ServiceOffering.class), Mockito.any(VirtualMachineTemplate.class), Mockito.anySet()); } @@ -2857,7 +2862,7 @@ public void addNicsToApplicableNetworksAndReturnDefaultNetworkTestApplicableNetw NetworkVO defaultNetwork = userVmManagerImpl.addNicsToApplicableNetworksAndReturnDefaultNetwork(applicableNetworks, requestedIPv4ForNics, requestedIPv6ForNics, networks); - Mockito.verify(networks, Mockito.times(2)).put(Mockito.any(), Mockito.any()); + Mockito.verify(networks, times(2)).put(Mockito.any(), Mockito.any()); Assert.assertEquals(defaultNetwork, networkMock); } @@ -3125,4 +3130,94 @@ public void executeStepsToChangeOwnershipOfVmTestResourceCountRunningVmsOnlyEnab Mockito.verify(userVmManagerImpl, Mockito.never()).resourceCountIncrement(Mockito.anyLong(), Mockito.any(), Mockito.any(), Mockito.any()); } } + + @Test + public void validateStorageAccessGroupsOnHostsMatchingSAGsNoException() { + Host srcHost = Mockito.mock(Host.class); + Host destHost = Mockito.mock(Host.class); + + Mockito.when(srcHost.getId()).thenReturn(1L); + Mockito.when(destHost.getId()).thenReturn(2L); + when(storageManager.getStorageAccessGroups(null, null, null, srcHost.getId())).thenReturn(new String[]{"sag1", "sag2"}); + when(storageManager.getStorageAccessGroups(null, null, null, destHost.getId())).thenReturn(new String[]{"sag1", "sag2", "sag3"}); + + userVmManagerImpl.validateStorageAccessGroupsOnHosts(srcHost, destHost); + + Mockito.verify(storageManager, times(1)).getStorageAccessGroups(null, null, null, srcHost.getId()); + Mockito.verify(storageManager, times(1)).getStorageAccessGroups(null, null, null, destHost.getId()); + } + + @Test(expected = CloudRuntimeException.class) + public void validateSAGsOnHostsNonMatchingSAGsThrowsException() { + Host srcHost = Mockito.mock(Host.class); + Host destHost = Mockito.mock(Host.class); + + Mockito.when(srcHost.getId()).thenReturn(1L); + Mockito.when(destHost.getId()).thenReturn(2L); + when(storageManager.getStorageAccessGroups(null, null, null, srcHost.getId())).thenReturn(new String[]{"sag1", "sag2"}); + when(storageManager.getStorageAccessGroups(null, null, null, destHost.getId())).thenReturn(new String[]{"sag1", "sag3"}); + + userVmManagerImpl.validateStorageAccessGroupsOnHosts(srcHost, destHost); + } + + @Test + public void validateEmptyStorageAccessGroupOnHosts() { + Host srcHost = Mockito.mock(Host.class); + Host destHost = Mockito.mock(Host.class); + + Mockito.when(srcHost.getId()).thenReturn(1L); + Mockito.when(destHost.getId()).thenReturn(2L); + when(storageManager.getStorageAccessGroups(null, null, null, srcHost.getId())).thenReturn(new String[]{}); + when(storageManager.getStorageAccessGroups(null, null, null, destHost.getId())).thenReturn(new String[]{}); + + userVmManagerImpl.validateStorageAccessGroupsOnHosts(srcHost, destHost); + + Mockito.verify(storageManager, times(1)).getStorageAccessGroups(null, null, null, srcHost.getId()); + Mockito.verify(storageManager, times(1)).getStorageAccessGroups(null, null, null, destHost.getId()); + } + + @Test + public void validateSAGsOnHostsNullStorageAccessGroups() { + Host srcHost = Mockito.mock(Host.class); + Host destHost = Mockito.mock(Host.class); + + Mockito.when(srcHost.getId()).thenReturn(1L); + Mockito.when(destHost.getId()).thenReturn(2L); + when(storageManager.getStorageAccessGroups(null, null, null, srcHost.getId())).thenReturn(null); + when(storageManager.getStorageAccessGroups(null, null, null, destHost.getId())).thenReturn(null); + + userVmManagerImpl.validateStorageAccessGroupsOnHosts(srcHost, destHost); + + Mockito.verify(storageManager, times(1)).getStorageAccessGroups(null, null, null, srcHost.getId()); + Mockito.verify(storageManager, times(1)).getStorageAccessGroups(null, null, null, destHost.getId()); + } + + @Test(expected = CloudRuntimeException.class) + public void validateSAGsOnDestHostNullStorageAccessGroups() { + Host srcHost = Mockito.mock(Host.class); + Host destHost = Mockito.mock(Host.class); + + Mockito.when(srcHost.getId()).thenReturn(1L); + Mockito.when(destHost.getId()).thenReturn(2L); + when(storageManager.getStorageAccessGroups(null, null, null, srcHost.getId())).thenReturn(new String[]{"sag1", "sag2"}); + when(storageManager.getStorageAccessGroups(null, null, null, destHost.getId())).thenReturn(null); + + userVmManagerImpl.validateStorageAccessGroupsOnHosts(srcHost, destHost); + } + + @Test + public void validateNullStorageAccessGroupsOnSrcHost() { + Host srcHost = Mockito.mock(Host.class); + Host destHost = Mockito.mock(Host.class); + + Mockito.when(srcHost.getId()).thenReturn(1L); + Mockito.when(destHost.getId()).thenReturn(2L); + when(storageManager.getStorageAccessGroups(null, null, null, srcHost.getId())).thenReturn(null); + when(storageManager.getStorageAccessGroups(null, null, null, destHost.getId())).thenReturn(new String[]{"sag1", "sag2"}); + + userVmManagerImpl.validateStorageAccessGroupsOnHosts(srcHost, destHost); + + Mockito.verify(storageManager, times(1)).getStorageAccessGroups(null, null, null, srcHost.getId()); + Mockito.verify(storageManager, times(1)).getStorageAccessGroups(null, null, null, destHost.getId()); + } } diff --git a/server/src/test/java/com/cloud/vpc/MockConfigurationManagerImpl.java b/server/src/test/java/com/cloud/vpc/MockConfigurationManagerImpl.java index cdd23b0ccc2c..d4f3569cb577 100644 --- a/server/src/test/java/com/cloud/vpc/MockConfigurationManagerImpl.java +++ b/server/src/test/java/com/cloud/vpc/MockConfigurationManagerImpl.java @@ -189,7 +189,7 @@ public List getDiskOfferingZones(Long diskOfferingId) { * @see com.cloud.configuration.ConfigurationService#createPod(long, java.lang.String, java.lang.String, java.lang.String, java.lang.String, java.lang.String, java.lang.String) */ @Override - public Pod createPod(long zoneId, String name, String startIp, String endIp, String gateway, String netmask, String allocationState) { + public Pod createPod(long zoneId, String name, String startIp, String endIp, String gateway, String netmask, String allocationState, List storageAccessGroups) { // TODO Auto-generated method stub return null; } @@ -507,7 +507,7 @@ public String updateConfiguration(long userId, String name, String category, Str */ @Override public HostPodVO createPod(long userId, String podName, DataCenter zone, String gateway, String cidr, String startIp, String endIp, String allocationState, - boolean skipGatewayOverlapCheck) { + boolean skipGatewayOverlapCheck, List storageAccessGroups) { // TODO Auto-generated method stub return null; } @@ -632,8 +632,8 @@ public AllocationState findClusterAllocationState(ClusterVO cluster) { */ @Override public DataCenterVO createZone(long userId, String zoneName, String dns1, String dns2, String internalDns1, String internalDns2, String guestCidr, String domain, - Long domainId, NetworkType zoneType, String allocationState, String networkDomain, boolean isSecurityGroupEnabled, boolean isLocalStorageEnabled, String ip6Dns1, - String ip6Dns2, boolean isEdge) { + Long domainId, NetworkType zoneType, String allocationState, String networkDomain, boolean isSecurityGroupEnabled, boolean isLocalStorageEnabled, String ip6Dns1, + String ip6Dns2, boolean isEdge, List storageAccessGroups) { // TODO Auto-generated method stub return null; } diff --git a/server/src/test/java/org/apache/cloudstack/networkoffering/ChildTestConfiguration.java b/server/src/test/java/org/apache/cloudstack/networkoffering/ChildTestConfiguration.java index dac6674109a6..d2f2cc1d1847 100644 --- a/server/src/test/java/org/apache/cloudstack/networkoffering/ChildTestConfiguration.java +++ b/server/src/test/java/org/apache/cloudstack/networkoffering/ChildTestConfiguration.java @@ -75,6 +75,7 @@ import com.cloud.offerings.dao.NetworkOfferingDao; import com.cloud.offerings.dao.NetworkOfferingServiceMapDao; import com.cloud.projects.ProjectManager; +import com.cloud.resource.ResourceManager; import com.cloud.server.ConfigurationServer; import com.cloud.server.ManagementService; import com.cloud.service.dao.ServiceOfferingDaoImpl; @@ -254,6 +255,11 @@ public Ipv6AddressManager ipv6Mgr() { return Mockito.mock(Ipv6AddressManager.class); } + @Bean + public ResourceManager resourceManager() { + return Mockito.mock(ResourceManager.class); + } + @Bean public ConfigurationDao configDao() { return Mockito.mock(ConfigurationDao.class); diff --git a/server/src/test/resources/createNetworkOffering.xml b/server/src/test/resources/createNetworkOffering.xml index a3f43407c61c..e62c4c0660fd 100644 --- a/server/src/test/resources/createNetworkOffering.xml +++ b/server/src/test/resources/createNetworkOffering.xml @@ -57,6 +57,7 @@ + diff --git a/tools/apidoc/gen_toc.py b/tools/apidoc/gen_toc.py index c05b8fe27987..d9c900555b46 100644 --- a/tools/apidoc/gen_toc.py +++ b/tools/apidoc/gen_toc.py @@ -257,6 +257,7 @@ 'deleteASNRange': 'AS Number Range', 'listASNumbers': 'AS Number', 'releaseASNumber': 'AS Number', + 'configureStorageAccess': 'Primary Storage Access' } diff --git a/ui/public/locales/en.json b/ui/public/locales/en.json index ebfe6bda1b2c..0c7c64b31838 100644 --- a/ui/public/locales/en.json +++ b/ui/public/locales/en.json @@ -72,6 +72,7 @@ "label.action.delete.webhook.deliveries": "Delete deliveries", "label.action.change.primary.storage.scope": "Change primary storage scope", "label.action.configure.stickiness": "Stickiness", +"label.action.configure.storage.access.group": "Update storage access group", "label.action.copy.iso": "Copy ISO", "label.action.copy.snapshot": "Copy Snapshot", "label.action.copy.template": "Copy Template", @@ -220,6 +221,10 @@ "label.action.unmanage.instance": "Unmanage Instance", "label.action.unmanage.instances": "Unmanage Instances", "label.action.unmanage.virtualmachine": "Unmanage Instance", +"label.action.update.cluster": "Update cluster", +"label.action.update.pod": "Update pod", +"label.action.update.zone": "Update zone", +"label.action.update.storage.pool": "Update storage pool", "label.action.unmanage.volume": "Unmanage Volume", "label.action.unmanage.volumes": "Unmanage Volumes", "label.action.update.host": "Update host", @@ -2165,6 +2170,10 @@ "label.srx": "SRX", "label.srx.firewall": "Juniper SRX firewall", "label.ssh.key.pairs": "SSH key pairs", +"label.storageaccessgroups": "Storage Access Groups", +"label.clusterstorageaccessgroups": "Cluster Storage Access Groups", +"label.podstorageaccessgroups": "Pod Storage Access Groups", +"label.zonestorageaccessgroups": "Zone Storage Access Groups", "label.uefi.supported": "UEFI supported", "label.usediops": "IOPS used", "label.userdataid": "Userdata ID", @@ -2883,6 +2892,7 @@ "message.configuring.guest.traffic": "Configuring guest traffic", "message.configuring.physical.networks": "Configuring physical Networks", "message.configuring.public.traffic": "Configuring public traffic", +"message.configuring.storage.access.failed": "Configuring storage access failed", "message.configuring.nsx.public.traffic": "Configuring NSX public traffic", "message.configuring.storage.traffic": "Configuring storage traffic", "message.confirm.action.force.reconnect": "Please confirm that you want to force reconnect this host.", diff --git a/ui/src/config/section/infra/clusters.js b/ui/src/config/section/infra/clusters.js index 883efd463c38..c03a1716a8d4 100644 --- a/ui/src/config/section/infra/clusters.js +++ b/ui/src/config/section/infra/clusters.js @@ -35,7 +35,7 @@ export default { fields.push('zonename') return fields }, - details: ['name', 'id', 'allocationstate', 'clustertype', 'managedstate', 'arch', 'hypervisortype', 'podname', 'zonename', 'drsimbalance'], + details: ['name', 'id', 'allocationstate', 'clustertype', 'managedstate', 'arch', 'hypervisortype', 'podname', 'zonename', 'drsimbalance', 'storageaccessgroups', 'podstorageaccessgroups', 'zonestorageaccessgroups'], related: [{ name: 'host', title: 'label.hosts', @@ -83,12 +83,8 @@ export default { icon: 'edit-outlined', label: 'label.edit', dataView: true, - args: ['clustername', 'arch'], - mapping: { - arch: { - options: ['x86_64', 'aarch64'] - } - } + popup: true, + component: shallowRef(defineAsyncComponent(() => import('@/views/infra/ClusterUpdate.vue'))) }, { api: 'updateCluster', diff --git a/ui/src/config/section/infra/hosts.js b/ui/src/config/section/infra/hosts.js index 501283984b82..474177918e4d 100644 --- a/ui/src/config/section/infra/hosts.js +++ b/ui/src/config/section/infra/hosts.js @@ -45,7 +45,7 @@ export default { fields.push('managementservername') return fields }, - details: ['name', 'id', 'resourcestate', 'ipaddress', 'hypervisor', 'arch', 'type', 'clustername', 'podname', 'zonename', 'managementservername', 'disconnected', 'created'], + details: ['name', 'id', 'resourcestate', 'ipaddress', 'hypervisor', 'arch', 'type', 'clustername', 'podname', 'zonename', 'storageaccessgroups', 'clusterstorageaccessgroups', 'podstorageaccessgroups', 'zonestorageaccessgroups', 'managementservername', 'disconnected', 'created'], tabs: [{ name: 'details', component: shallowRef(defineAsyncComponent(() => import('@/components/view/DetailsTab.vue'))) diff --git a/ui/src/config/section/infra/pods.js b/ui/src/config/section/infra/pods.js index 595b35f4fb99..66d38c088964 100644 --- a/ui/src/config/section/infra/pods.js +++ b/ui/src/config/section/infra/pods.js @@ -26,7 +26,7 @@ export default { permission: ['listPods'], searchFilters: ['name', 'zoneid'], columns: ['name', 'allocationstate', 'gateway', 'netmask', 'zonename'], - details: ['name', 'id', 'allocationstate', 'netmask', 'gateway', 'zonename'], + details: ['name', 'id', 'allocationstate', 'netmask', 'gateway', 'zonename', 'storageaccessgroups', 'zonestorageaccessgroups'], related: [{ name: 'cluster', title: 'label.clusters', @@ -71,7 +71,8 @@ export default { icon: 'edit-outlined', label: 'label.edit', dataView: true, - args: ['name', 'netmask', 'gateway'] + popup: true, + component: shallowRef(defineAsyncComponent(() => import('@/views/infra/PodUpdate.vue'))) }, { api: 'updatePod', diff --git a/ui/src/config/section/infra/primaryStorages.js b/ui/src/config/section/infra/primaryStorages.js index 1b0e5ef1634d..f127a0853b9e 100644 --- a/ui/src/config/section/infra/primaryStorages.js +++ b/ui/src/config/section/infra/primaryStorages.js @@ -35,7 +35,7 @@ export default { fields.push('zonename') return fields }, - details: ['name', 'id', 'ipaddress', 'type', 'details', 'nfsmountopts', 'scope', 'tags', 'path', 'provider', 'hypervisor', 'overprovisionfactor', 'disksizetotal', 'disksizeallocated', 'disksizeused', 'capacityiops', 'usediops', 'clustername', 'podname', 'zonename', 'created'], + details: ['name', 'id', 'ipaddress', 'type', 'details', 'nfsmountopts', 'scope', 'tags', 'storageaccessgroups', 'path', 'provider', 'hypervisor', 'overprovisionfactor', 'disksizetotal', 'disksizeallocated', 'disksizeused', 'capacityiops', 'usediops', 'clustername', 'podname', 'zonename', 'created'], related: [{ name: 'volume', title: 'label.volumes', diff --git a/ui/src/config/section/infra/zones.js b/ui/src/config/section/infra/zones.js index cb95bce8f75e..de971858ab4d 100644 --- a/ui/src/config/section/infra/zones.js +++ b/ui/src/config/section/infra/zones.js @@ -34,7 +34,7 @@ export default { fields.push('order') return fields }, - details: ['name', 'id', 'allocationstate', 'type', 'networktype', 'guestcidraddress', 'localstorageenabled', 'securitygroupsenabled', 'dns1', 'dns2', 'internaldns1', 'internaldns2', 'asnrange'], + details: ['name', 'id', 'allocationstate', 'type', 'networktype', 'guestcidraddress', 'localstorageenabled', 'securitygroupsenabled', 'dns1', 'dns2', 'internaldns1', 'internaldns2', 'asnrange', 'storageaccessgroups'], related: [{ name: 'pod', title: 'label.pods', @@ -118,8 +118,8 @@ export default { icon: 'edit-outlined', label: 'label.action.edit.zone', dataView: true, - args: ['name', 'dns1', 'dns2', 'ip6dns1', 'ip6dns2', 'internaldns1', 'internaldns2', 'guestcidraddress', 'domain', 'localstorageenabled'], - show: (record) => { return record.networktype === 'Advanced' } + popup: true, + component: shallowRef(defineAsyncComponent(() => import('@/views/infra/ZoneUpdate.vue'))) }, { api: 'updateZone', diff --git a/ui/src/config/section/offering.js b/ui/src/config/section/offering.js index f83daaea7638..8da45a63699b 100644 --- a/ui/src/config/section/offering.js +++ b/ui/src/config/section/offering.js @@ -40,7 +40,7 @@ export default { filters: ['active', 'inactive'], columns: ['name', 'displaytext', 'state', 'cpunumber', 'cpuspeed', 'memory', 'domain', 'zone', 'order'], details: () => { - var fields = ['name', 'id', 'displaytext', 'offerha', 'provisioningtype', 'storagetype', 'iscustomized', 'iscustomizediops', 'limitcpuuse', 'cpunumber', 'cpuspeed', 'memory', 'hosttags', 'tags', 'storagetags', 'domain', 'zone', 'created', 'dynamicscalingenabled', 'diskofferingstrictness', 'encryptroot', 'purgeresources'] + var fields = ['name', 'id', 'displaytext', 'offerha', 'provisioningtype', 'storagetype', 'iscustomized', 'iscustomizediops', 'limitcpuuse', 'cpunumber', 'cpuspeed', 'memory', 'hosttags', 'tags', 'storageaccessgroups', 'storagetags', 'domain', 'zone', 'created', 'dynamicscalingenabled', 'diskofferingstrictness', 'encryptroot', 'purgeresources'] if (store.getters.apis.createServiceOffering && store.getters.apis.createServiceOffering.params.filter(x => x.name === 'storagepolicy').length > 0) { fields.splice(6, 0, 'vspherestoragepolicy') @@ -95,7 +95,7 @@ export default { label: 'label.edit', docHelp: 'adminguide/service_offerings.html#modifying-or-deleting-a-service-offering', dataView: true, - args: ['name', 'displaytext', 'storagetags', 'hosttags'] + args: ['name', 'displaytext', 'storageaccessgroups', 'hosttags'] }, { api: 'updateServiceOffering', icon: 'lock-outlined', diff --git a/ui/src/views/AutogenView.vue b/ui/src/views/AutogenView.vue index 17f3460614cd..7d7d5ddb6b7a 100644 --- a/ui/src/views/AutogenView.vue +++ b/ui/src/views/AutogenView.vue @@ -1191,7 +1191,7 @@ export default { this.showAction = true const listIconForFillValues = ['copy-outlined', 'CopyOutlined', 'edit-outlined', 'EditOutlined', 'share-alt-outlined', 'ShareAltOutlined'] for (const param of this.currentAction.paramFields) { - if (param.type === 'list' && ['tags', 'hosttags', 'storagetags', 'files'].includes(param.name)) { + if (param.type === 'list' && ['tags', 'hosttags', 'storagetags', 'storageaccessgroups', 'files'].includes(param.name)) { param.type = 'string' } this.setRules(param) @@ -1586,7 +1586,7 @@ export default { } break } - if (input === '' && !['tags', 'hosttags', 'storagetags', 'dns2', 'ip6dns1', + if (input === '' && !['tags', 'hosttags', 'storagetags', 'storageaccessgroups', 'dns2', 'ip6dns1', 'ip6dns2', 'internaldns2', 'networkdomain', 'secretkey'].includes(key)) { break } diff --git a/ui/src/views/infra/ClusterUpdate.vue b/ui/src/views/infra/ClusterUpdate.vue new file mode 100644 index 000000000000..ebb11f8a01c6 --- /dev/null +++ b/ui/src/views/infra/ClusterUpdate.vue @@ -0,0 +1,196 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + + + + + + diff --git a/ui/src/views/infra/HostInfo.vue b/ui/src/views/infra/HostInfo.vue index 259445154a01..994c6f18b598 100644 --- a/ui/src/views/infra/HostInfo.vue +++ b/ui/src/views/infra/HostInfo.vue @@ -70,6 +70,14 @@ + +
+ {{ $t('label.storageaccessgroups') }} +
+ {{ host.storageaccessgroups }} +
+
+
{{ $t('label.oscategoryid') }} diff --git a/ui/src/views/infra/HostUpdate.vue b/ui/src/views/infra/HostUpdate.vue index aeb2a3c92a64..eb1c757263f0 100644 --- a/ui/src/views/infra/HostUpdate.vue +++ b/ui/src/views/infra/HostUpdate.vue @@ -45,6 +45,12 @@ + + + +