From 58d46030bfaa16cab49087110a41234a1561341d Mon Sep 17 00:00:00 2001 From: saketa Date: Tue, 6 May 2025 01:49:13 -0700 Subject: [PATCH 1/8] HDDS-10979. Support STANDARD_IA S3 storage class to accept EC replication config. --- .../ozone/s3/endpoint/EndpointBase.java | 3 +- .../ozone/s3/endpoint/ObjectEndpoint.java | 33 ++-- .../ozone/s3/exception/S3ErrorTable.java | 6 + .../apache/hadoop/ozone/s3/util/S3Consts.java | 1 + .../hadoop/ozone/s3/util/S3StorageType.java | 38 ++-- .../apache/hadoop/ozone/s3/util/S3Utils.java | 71 +++----- .../hadoop/ozone/s3/util/TestS3Utils.java | 164 ++++++++---------- 7 files changed, 139 insertions(+), 177 deletions(-) diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java index 69d2ed1a79c2..d0bde53b3262 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java @@ -24,6 +24,7 @@ import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.newError; import static org.apache.hadoop.ozone.s3.util.S3Consts.AWS_TAG_PREFIX; import static org.apache.hadoop.ozone.s3.util.S3Consts.CUSTOM_METADATA_HEADER_PREFIX; +import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CONFIG_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_KEY_LENGTH_LIMIT; import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_NUM_LIMIT; @@ -97,7 +98,7 @@ public abstract class EndpointBase implements Auditor { private ContainerRequestContext context; private Set excludeMetadataFields = - new HashSet<>(Arrays.asList(OzoneConsts.GDPR_FLAG)); + new HashSet<>(Arrays.asList(OzoneConsts.GDPR_FLAG, STORAGE_CONFIG_HEADER)); private static final Logger LOG = LoggerFactory.getLogger(EndpointBase.class); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index 1b11d47a166e..5373d114f54e 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -29,7 +29,6 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_DATASTREAM_AUTO_THRESHOLD_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_TYPE; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_TYPE_DEFAULT; import static org.apache.hadoop.ozone.audit.AuditLogger.PerformanceStringBuilder; import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_CLIENT_BUFFER_SIZE_DEFAULT; import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_CLIENT_BUFFER_SIZE_KEY; @@ -49,12 +48,14 @@ import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_IF_MODIFIED_SINCE; import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_IF_UNMODIFIED_SINCE; import static org.apache.hadoop.ozone.s3.util.S3Consts.CUSTOM_METADATA_COPY_DIRECTIVE_HEADER; +import static org.apache.hadoop.ozone.s3.util.S3Consts.CUSTOM_METADATA_HEADER_PREFIX; import static org.apache.hadoop.ozone.s3.util.S3Consts.CopyDirective; import static org.apache.hadoop.ozone.s3.util.S3Consts.DECODED_CONTENT_LENGTH_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.MP_PARTS_COUNT; import static org.apache.hadoop.ozone.s3.util.S3Consts.RANGE_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.RANGE_HEADER_SUPPORTED_UNIT; import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER; +import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CONFIG_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_COUNT_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_DIRECTIVE_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Utils.urlDecode; @@ -230,7 +231,7 @@ public Response put( boolean auditSuccess = true; PerformanceStringBuilder perf = new PerformanceStringBuilder(); - String copyHeader = null, storageType = null; + String copyHeader = null, storageType = null, storageConfig = null; DigestInputStream digestInputStream = null; try { if (aclMarker != null) { @@ -256,12 +257,13 @@ public Response put( copyHeader = headers.getHeaderString(COPY_SOURCE_HEADER); storageType = headers.getHeaderString(STORAGE_CLASS_HEADER); + storageConfig = headers.getHeaderString(CUSTOM_METADATA_HEADER_PREFIX + STORAGE_CONFIG_HEADER); boolean storageTypeDefault = StringUtils.isEmpty(storageType); // Normal put object OzoneBucket bucket = volume.getBucket(bucketName); ReplicationConfig replicationConfig = - getReplicationConfig(bucket, storageType); + getReplicationConfig(bucket, storageType, storageConfig); boolean enableEC = false; if ((replicationConfig != null && @@ -812,6 +814,7 @@ public Response initializeMultipartUpload( try { OzoneBucket ozoneBucket = getBucket(bucket); String storageType = headers.getHeaderString(STORAGE_CLASS_HEADER); + String storageConfig = headers.getHeaderString(CUSTOM_METADATA_HEADER_PREFIX + STORAGE_CONFIG_HEADER); Map customMetadata = getCustomMetadataFromHeaders(headers.getRequestHeaders()); @@ -819,7 +822,7 @@ public Response initializeMultipartUpload( Map tags = getTaggingFromHeaders(headers); ReplicationConfig replicationConfig = - getReplicationConfig(ozoneBucket, storageType); + getReplicationConfig(ozoneBucket, storageType, storageConfig); OmMultipartInfo multipartInfo = ozoneBucket.initiateMultipartUpload(key, replicationConfig, customMetadata, tags); @@ -852,21 +855,20 @@ public Response initializeMultipartUpload( } private ReplicationConfig getReplicationConfig(OzoneBucket ozoneBucket, - String storageType) throws OS3Exception { - if (StringUtils.isEmpty(storageType)) { - S3StorageType defaultStorageType = S3StorageType.getDefault(ozoneConfiguration); - storageType = (defaultStorageType != null ? defaultStorageType.toString() : null); - } + String storageType, String storageConfig) throws OS3Exception { ReplicationConfig clientConfiguredReplicationConfig = null; + ReplicationType replicationType = ReplicationType.valueOf( + ozoneConfiguration.get(OZONE_REPLICATION_TYPE)); String replication = ozoneConfiguration.get(OZONE_REPLICATION); + if (replication != null) { - clientConfiguredReplicationConfig = ReplicationConfig.parse( - ReplicationType.valueOf(ozoneConfiguration - .get(OZONE_REPLICATION_TYPE, OZONE_REPLICATION_TYPE_DEFAULT)), - replication, ozoneConfiguration); + clientConfiguredReplicationConfig = + (replicationType == ReplicationType.EC) ? + new ECReplicationConfig(replication) : ReplicationConfig.parse( + replicationType, replication, ozoneConfiguration); } - return S3Utils.resolveS3ClientSideReplicationConfig(storageType, + return S3Utils.resolveS3ClientSideReplicationConfig(storageType, storageConfig, clientConfiguredReplicationConfig, ozoneBucket.getReplicationConfig()); } @@ -969,9 +971,10 @@ private Response createMultipartKey(OzoneVolume volume, String bucket, copyHeader = headers.getHeaderString(COPY_SOURCE_HEADER); String storageType = headers.getHeaderString(STORAGE_CLASS_HEADER); + String storageConfig = headers.getHeaderString(CUSTOM_METADATA_HEADER_PREFIX + STORAGE_CONFIG_HEADER); final OzoneBucket ozoneBucket = volume.getBucket(bucket); ReplicationConfig replicationConfig = - getReplicationConfig(ozoneBucket, storageType); + getReplicationConfig(ozoneBucket, storageType, storageConfig); boolean enableEC = false; if ((replicationConfig != null && diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java index b09ba5c9547f..5160c19dbb42 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java @@ -150,6 +150,12 @@ public final class S3ErrorTable { HTTP_FORBIDDEN ); + public static final OS3Exception INVALID_STORAGE_CLASS = new OS3Exception( + "InvalidStorageClass", "The storage class that you specified is not valid. " + + "Check that storage class is supported and if using STANDARD_IA check that " + + "storage config is a valid EC replication string.", + HTTP_BAD_REQUEST); + private static Function generateInternalError = e -> new OS3Exception("InternalError", e.getMessage(), HTTP_INTERNAL_ERROR); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java index 1060f2568c80..f963fe9996d8 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java @@ -69,6 +69,7 @@ public final class S3Consts { // Constants related to custom metadata public static final String CUSTOM_METADATA_HEADER_PREFIX = "x-amz-meta-"; public static final String CUSTOM_METADATA_COPY_DIRECTIVE_HEADER = "x-amz-metadata-directive"; + public static final String STORAGE_CONFIG_HEADER = "storage-config"; public static final String DECODED_CONTENT_LENGTH_HEADER = "x-amz-decoded-content-length"; diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java index 9f2b5f777268..71f8330e799c 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java @@ -21,9 +21,7 @@ import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.ozone.OzoneConfigKeys; /** * Maps S3 storage class values to Ozone replication values. @@ -31,17 +29,21 @@ public enum S3StorageType { - REDUCED_REDUNDANCY(ReplicationType.RATIS, ReplicationFactor.ONE), - STANDARD(ReplicationType.RATIS, ReplicationFactor.THREE); + REDUCED_REDUNDANCY(ReplicationType.RATIS, ReplicationFactor.ONE, null), + STANDARD(ReplicationType.RATIS, ReplicationFactor.THREE, null), + STANDARD_IA(ReplicationType.EC, null, ECReplicationConfig.EcCodec.RS + "-3-2-1024k"); private final ReplicationType type; private final ReplicationFactor factor; + private final String ecReplicationString; S3StorageType( ReplicationType type, - ReplicationFactor factor) { + ReplicationFactor factor, + String ecReplicationString) { this.type = type; this.factor = factor; + this.ecReplicationString = ecReplicationString; } public ReplicationFactor getFactor() { @@ -52,33 +54,13 @@ public ReplicationType getType() { return type; } - /** - * Get default S3StorageType for a new key to be uploaded. - * This should align to the ozone cluster configuration. - * @param config OzoneConfiguration - * @return S3StorageType which wraps ozone replication type and factor - */ - public static S3StorageType getDefault(ConfigurationSource config) { - String replicationString = config.get(OzoneConfigKeys.OZONE_REPLICATION); - ReplicationFactor configFactor; - if (replicationString == null) { - // if no config is set then let server take decision - return null; - } - try { - configFactor = ReplicationFactor.valueOf( - Integer.parseInt(replicationString)); - } catch (NumberFormatException ex) { - // conservatively defaults to STANDARD on wrong config value - return STANDARD; - } - return configFactor == ReplicationFactor.ONE - ? REDUCED_REDUNDANCY : STANDARD; + public String getEcReplicationString() { + return ecReplicationString; } public static S3StorageType fromReplicationConfig(ReplicationConfig config) { if (config instanceof ECReplicationConfig) { - return S3StorageType.STANDARD; + return STANDARD_IA; } if (config.getReplicationType() == HddsProtos.ReplicationType.STAND_ALONE || config.getRequiredNodes() == 1) { diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Utils.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Utils.java index a99bfca73721..29547d4c96e7 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Utils.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Utils.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.s3.util; import static java.nio.charset.StandardCharsets.UTF_8; -import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.INVALID_ARGUMENT; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.INVALID_STORAGE_CLASS; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.newError; import static org.apache.hadoop.ozone.s3.util.S3Consts.STREAMING_AWS4_ECDSA_P256_SHA256_PAYLOAD; import static org.apache.hadoop.ozone.s3.util.S3Consts.STREAMING_AWS4_ECDSA_P256_SHA256_PAYLOAD_TRAILER; @@ -32,10 +32,11 @@ import javax.ws.rs.WebApplicationException; import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.Response; +import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.s3.exception.OS3Exception; /** @@ -63,64 +64,44 @@ private S3Utils() { * * @param s3StorageTypeHeader - s3 user passed storage type * header. - * @param clientConfiguredReplConfig - Client side configured replication - * config. * @param bucketReplConfig - server side bucket default replication * config. + * @param clientConfiguredReplConfig - Client side configured replication + * config. * @return client resolved replication config. */ public static ReplicationConfig resolveS3ClientSideReplicationConfig( - String s3StorageTypeHeader, ReplicationConfig clientConfiguredReplConfig, + String s3StorageTypeHeader, String s3StorageConfigHeader, + ReplicationConfig clientConfiguredReplConfig, ReplicationConfig bucketReplConfig) throws OS3Exception { - ReplicationConfig clientDeterminedReplConfig = null; - // Let's map the user provided s3 storage type header to ozone s3 storage - // type. - S3StorageType s3StorageType = null; - if (s3StorageTypeHeader != null && !s3StorageTypeHeader.equals("")) { - s3StorageType = toS3StorageType(s3StorageTypeHeader); + // If user provided s3 storage type header is not null then map it + // to ozone replication config + if (!StringUtils.isEmpty(s3StorageTypeHeader)) { + return toReplicationConfig(s3StorageTypeHeader, s3StorageConfigHeader); } - boolean isECBucket = bucketReplConfig != null && bucketReplConfig - .getReplicationType() == HddsProtos.ReplicationType.EC; - - // if bucket replication config configured with EC, we will give high - // preference to server side bucket defaults. - // Why we give high preference to EC is, there is no way for file system - // interfaces to pass EC replication. So, if one configures EC at bucket, - // we consider EC to take preference. in short, keys created from file - // system under EC bucket will always be EC'd. - if (isECBucket) { - // if bucket is EC, don't bother client provided configs, let's pass - // bucket config. - clientDeterminedReplConfig = bucketReplConfig; - } else { - // Let's validate the client side available replication configs. - boolean isUserPassedReplicationInSupportedList = - s3StorageType != null && (s3StorageType.getFactor() - .getValue() == ReplicationFactor.ONE.getValue() || s3StorageType - .getFactor().getValue() == ReplicationFactor.THREE.getValue()); - if (isUserPassedReplicationInSupportedList) { - clientDeterminedReplConfig = ReplicationConfig.fromProtoTypeAndFactor( - ReplicationType.toProto(s3StorageType.getType()), - ReplicationFactor.toProto(s3StorageType.getFactor())); - } else { - // API passed replication number is not in supported replication list. - // So, let's use whatever available in client side configured. - // By default it will be null, so server will use server defaults. - clientDeterminedReplConfig = clientConfiguredReplConfig; - } - } - return clientDeterminedReplConfig; + // If client configured replication config is null then default to bucket replication + // otherwise default to server side default replication config. + return (clientConfiguredReplConfig != null) ? + clientConfiguredReplConfig : bucketReplConfig; } - public static S3StorageType toS3StorageType(String storageType) + public static ReplicationConfig toReplicationConfig(String storageType, String storageConfig) throws OS3Exception { try { - return S3StorageType.valueOf(storageType); + if (S3StorageType.STANDARD_IA.name().equals(storageType)) { + return (!StringUtils.isEmpty(storageConfig)) ? new ECReplicationConfig(storageConfig) : + new ECReplicationConfig(S3StorageType.STANDARD_IA.getEcReplicationString()); + } else { + S3StorageType s3StorageType = S3StorageType.valueOf(storageType); + return ReplicationConfig.fromProtoTypeAndFactor( + ReplicationType.toProto(s3StorageType.getType()), + ReplicationFactor.toProto(s3StorageType.getFactor())); + } } catch (IllegalArgumentException ex) { - throw newError(INVALID_ARGUMENT, storageType, ex); + throw newError(INVALID_STORAGE_CLASS, storageType, ex); } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestS3Utils.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestS3Utils.java index bc61df82e7cf..fa9a09301123 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestS3Utils.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestS3Utils.java @@ -21,116 +21,101 @@ import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; /** * Tests the S3Utils APIs. */ public class TestS3Utils { - private ReplicationConfig ecReplicationConfig = - new ECReplicationConfig("rs-3-2-1024K"); - private ReplicationConfig ratis3ReplicationConfig = + private static final ReplicationConfig ECREPLICATIONCONFIG = + new ECReplicationConfig(S3StorageType.STANDARD_IA.getEcReplicationString()); + private static final ReplicationConfig RATIS3REPLICATIONCONFIG = RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE); - private ReplicationConfig ratis1ReplicationConfig = + private static final ReplicationConfig RATIS1REPLICATIONCONFIG = RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE); - @Test - public void testResolveClientSideRepConfigWhenBucketHasEC() - throws OS3Exception { - ReplicationConfig replicationConfig = S3Utils - .resolveS3ClientSideReplicationConfig(S3StorageType.STANDARD.name(), - null, ecReplicationConfig); - // Bucket default is EC. - assertEquals(ecReplicationConfig, replicationConfig); - } + private static final List REPLICATIONS = Arrays.asList( + null, + RATIS1REPLICATIONCONFIG, + RATIS3REPLICATIONCONFIG, + ECREPLICATIONCONFIG + ); - /** - * When bucket replication is null and it should respect user passed value. - */ - @Test - public void testResolveClientSideRepConfigWhenBucketHasNull() - throws OS3Exception { - ReplicationConfig replicationConfig = S3Utils - .resolveS3ClientSideReplicationConfig(S3StorageType.STANDARD.name(), - null, null); - // Passed replication is 3 - Ozone mapped replication is ratis THREE - assertEquals(ratis3ReplicationConfig, replicationConfig); - } + private static final List S3STORAGETYPES = Arrays.asList( + null, + "", + S3StorageType.STANDARD.name(), + S3StorageType.REDUCED_REDUNDANCY.name(), + S3StorageType.STANDARD_IA.name() + ); - /** - * When bucket replication is null and it should return null if user passed - * value is invalid. - */ - @Test - public void testResolveClientSideRepConfigWhenUserPassedReplicationIsEmpty() - throws OS3Exception { - ReplicationConfig replicationConfig = - S3Utils.resolveS3ClientSideReplicationConfig("", null, null); - // client configured value also null. - // This API caller should leave the decision to server. - assertNull(replicationConfig); - } + private static final List S3STORAGECONFIG = Arrays.asList( + null, + "", + "rs-6-3-1024k" + ); - /** - * When bucket default is non-EC and client side values are not valid, we - * would just return null, so servers can make decision in this case. - */ - @Test - public void testResolveRepConfWhenUserPassedIsInvalidButBucketDefaultNonEC() - throws OS3Exception { - ReplicationConfig replicationConfig = S3Utils - .resolveS3ClientSideReplicationConfig(null, null, - ratis3ReplicationConfig); - // Configured client config also null. - assertNull(replicationConfig); + public static List validS3ReplicationConfigs() { + List args = new ArrayList<>(); + for (String s3StorageType : S3STORAGETYPES) { + for (String s3StorageConfig : S3STORAGECONFIG) { + for (ReplicationConfig clientReplConfig : REPLICATIONS) { + for (ReplicationConfig bucketReplConfig: REPLICATIONS) { + args.add(Arguments.of(s3StorageType, s3StorageConfig, clientReplConfig, bucketReplConfig)); + } + } + } + } + return args; } - /** - * When bucket default is non-EC and client side value is valid, we - * would should return client side valid value. - */ - @Test - public void testResolveRepConfWhenUserPassedIsValidButBucketDefaultNonEC() + @ParameterizedTest + @MethodSource("validS3ReplicationConfigs") + public void testValidResolveS3ClientSideReplicationConfig(String s3StorageType, String s3StorageConfig, + ReplicationConfig clientConfiguredReplConfig, ReplicationConfig bucketReplConfig) throws OS3Exception { ReplicationConfig replicationConfig = S3Utils - .resolveS3ClientSideReplicationConfig( - S3StorageType.REDUCED_REDUNDANCY.name(), null, - ratis3ReplicationConfig); - // Passed value is replication one - Ozone mapped value is ratis ONE - assertEquals(ratis1ReplicationConfig, replicationConfig); - } + .resolveS3ClientSideReplicationConfig(s3StorageType, s3StorageConfig, + clientConfiguredReplConfig, bucketReplConfig); - /** - * When bucket default is EC and client side value also valid, we would just - * return bucket default EC. - */ - @Test - public void testResolveRepConfWhenUserPassedIsValidButBucketDefaultEC() - throws OS3Exception { - ReplicationConfig replicationConfig = S3Utils - .resolveS3ClientSideReplicationConfig(S3StorageType.STANDARD.name(), - ratis3ReplicationConfig, ecReplicationConfig); - // Bucket default is EC - assertEquals(ecReplicationConfig, replicationConfig); - } + final ReplicationConfig expectedReplConfig; + if (!StringUtils.isEmpty(s3StorageType)) { + if (S3StorageType.STANDARD_IA.name().equals(s3StorageType)) { + if (!StringUtils.isEmpty(s3StorageConfig)) { + expectedReplConfig = new ECReplicationConfig(s3StorageConfig); + } else { + expectedReplConfig = ECREPLICATIONCONFIG; + } + } else if (S3StorageType.STANDARD.name().equals(s3StorageType)) { + expectedReplConfig = RATIS3REPLICATIONCONFIG; + } else { + expectedReplConfig = RATIS1REPLICATIONCONFIG; + } + } else if (clientConfiguredReplConfig != null) { + expectedReplConfig = clientConfiguredReplConfig; + } else if (bucketReplConfig != null) { + expectedReplConfig = bucketReplConfig; + } else { + expectedReplConfig = null; + } - /** - * When bucket default is non-EC and client side passed value also not valid - * but configured value is valid, we would just return configured value. - */ - @Test - public void testResolveRepConfWhenUserPassedIsInvalidAndBucketDefaultNonEC() - throws OS3Exception { - ReplicationConfig replicationConfig = S3Utils - .resolveS3ClientSideReplicationConfig(null, ratis3ReplicationConfig, - ratis1ReplicationConfig); - // Configured value is ratis THREE - assertEquals(ratis3ReplicationConfig, replicationConfig); + if (expectedReplConfig == null) { + assertNull(replicationConfig); + } else { + assertEquals(expectedReplConfig, replicationConfig); + } } /** @@ -141,7 +126,10 @@ public void testResolveRepConfWhenUserPassedIsInvalidAndBucketDefaultNonEC() public void testResolveRepConfWhenUserPassedIsInvalid() throws OS3Exception { assertThrows(OS3Exception.class, () -> S3Utils. resolveS3ClientSideReplicationConfig( - "INVALID", ratis3ReplicationConfig, ratis1ReplicationConfig)); + "INVALID", null, RATIS3REPLICATIONCONFIG, RATIS1REPLICATIONCONFIG)); + assertThrows(OS3Exception.class, () -> S3Utils. + resolveS3ClientSideReplicationConfig(S3StorageType.STANDARD_IA.name(), + "INVALID", RATIS3REPLICATIONCONFIG, RATIS1REPLICATIONCONFIG)); } } From 718bea2f3211c11325787d0e8d38e74176dae775 Mon Sep 17 00:00:00 2001 From: saketa Date: Tue, 13 May 2025 10:42:07 -0700 Subject: [PATCH 2/8] HDDS-10979. Added acceptance test. --- .../dist/src/main/compose/common/ec-test.sh | 4 +- .../main/compose/ozone-ha/test-haproxy-s3g.sh | 4 +- .../dist/src/main/compose/ozone-ha/test.sh | 4 +- .../ozonesecure-ha/test-haproxy-s3g.sh | 4 +- .../src/main/compose/ozonesecure-ha/test.sh | 4 +- .../main/compose/ozonesecure/test-vault.sh | 2 +- .../dist/src/main/compose/ozonesecure/test.sh | 4 +- .../main/smoketest/s3/awss3ecstorage.robot | 97 +++++++++++++++++++ .../ozone/s3/endpoint/ObjectEndpoint.java | 5 +- .../ozone/s3/exception/S3ErrorTable.java | 4 +- .../hadoop/ozone/s3/util/S3StorageType.java | 2 +- .../apache/hadoop/ozone/s3/util/S3Utils.java | 14 +-- .../hadoop/ozone/s3/util/TestS3Utils.java | 27 ++++-- 13 files changed, 142 insertions(+), 33 deletions(-) create mode 100644 hadoop-ozone/dist/src/main/smoketest/s3/awss3ecstorage.robot diff --git a/hadoop-ozone/dist/src/main/compose/common/ec-test.sh b/hadoop-ozone/dist/src/main/compose/common/ec-test.sh index 04df2b2787d3..3ef1a1aa9ea4 100755 --- a/hadoop-ozone/dist/src/main/compose/common/ec-test.sh +++ b/hadoop-ozone/dist/src/main/compose/common/ec-test.sh @@ -18,7 +18,7 @@ start_docker_env 5 ## Exclude virtual-host tests. This is tested separately as it requires additional config. -execute_robot_test scm -v BUCKET:erasure --exclude virtual-host s3 +execute_robot_test scm -v BUCKET:erasure --exclude virtual-host --exclude ec-storage-class s3 execute_robot_test scm ec/rewrite.robot @@ -30,3 +30,5 @@ docker-compose up -d --no-recreate --scale datanode=3 execute_robot_test scm -v PREFIX:${prefix} -N read-3-datanodes ec/read.robot docker-compose up -d --no-recreate --scale datanode=5 execute_robot_test scm -v container:1 -v count:5 -N EC-recovery replication/wait.robot +docker-compose up -d --no-recreate --scale datanode=9 +execute_robot_test scm s3/awss3ecstorage.robot diff --git a/hadoop-ozone/dist/src/main/compose/ozone-ha/test-haproxy-s3g.sh b/hadoop-ozone/dist/src/main/compose/ozone-ha/test-haproxy-s3g.sh index af67a7099dde..2bb91413f8fc 100755 --- a/hadoop-ozone/dist/src/main/compose/ozone-ha/test-haproxy-s3g.sh +++ b/hadoop-ozone/dist/src/main/compose/ozone-ha/test-haproxy-s3g.sh @@ -31,10 +31,10 @@ source "$COMPOSE_DIR/../testlib.sh" start_docker_env ## Exclude virtual-host tests. This is tested separately as it requires additional config. -exclude="--exclude virtual-host" +exclude="--exclude virtual-host --exclude ec-storage-class" for bucket in generated; do execute_robot_test ${SCM} -v BUCKET:${bucket} -N s3-${bucket} ${exclude} s3 # some tests are independent of the bucket type, only need to be run once ## Exclude awss3virtualhost.robot - exclude="--exclude virtual-host --exclude no-bucket-type" + exclude="--exclude virtual-host --exclude ec-storage-class --exclude no-bucket-type" done diff --git a/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh b/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh index 6c09e7b76158..fd4d389931a8 100755 --- a/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh +++ b/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh @@ -38,12 +38,12 @@ execute_robot_test ${SCM} basic/links.robot execute_robot_test ${SCM} -v SCHEME:ofs -v BUCKET_TYPE:link -N ozonefs-ofs-link ozonefs/ozonefs.robot ## Exclude virtual-host tests. This is tested separately as it requires additional config. -exclude="--exclude virtual-host" +exclude="--exclude virtual-host --exclude ec-storage-class" for bucket in generated; do for layout in OBJECT_STORE LEGACY FILE_SYSTEM_OPTIMIZED; do execute_robot_test ${SCM} -v BUCKET:${bucket} -v BUCKET_LAYOUT:${layout} -N s3-${layout}-${bucket} ${exclude} s3 # some tests are independent of the bucket type, only need to be run once - exclude="--exclude virtual-host --exclude no-bucket-type" + exclude="--exclude virtual-host --exclude no-bucket-type --exclude ec-storage-class" done done diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test-haproxy-s3g.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test-haproxy-s3g.sh index a2b11418a88c..5cde07f1e0d5 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test-haproxy-s3g.sh +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test-haproxy-s3g.sh @@ -36,10 +36,10 @@ start_docker_env execute_command_in_container kms hadoop key create ${OZONE_BUCKET_KEY_NAME} ## Exclude virtual-host tests. This is tested separately as it requires additional config. -exclude="--exclude virtual-host" +exclude="--exclude virtual-host --exclude ec-storage-class" for bucket in encrypted; do execute_robot_test recon -v BUCKET:${bucket} -N s3-${bucket} ${exclude} s3 # some tests are independent of the bucket type, only need to be run once ## Exclude virtual-host.robot - exclude="--exclude virtual-host --exclude no-bucket-type" + exclude="--exclude virtual-host --exclude ec-storage-class --exclude no-bucket-type" done diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test.sh index 1c134350c784..0244991e05d7 100755 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test.sh +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test.sh @@ -44,10 +44,10 @@ execute_robot_test s3g -v SCHEME:o3fs -v BUCKET_TYPE:link -N ozonefs-o3fs-link o execute_robot_test s3g basic/links.robot ## Exclude virtual-host tests. This is tested separately as it requires additional config. -exclude="--exclude virtual-host" +exclude="--exclude virtual-host --exclude ec-storage-class" for bucket in link; do execute_robot_test s3g -v BUCKET:${bucket} -N s3-${bucket} ${exclude} s3 # some tests are independent of the bucket type, only need to be run once ## Exclude virtual-host.robot - exclude="--exclude virtual-host --exclude no-bucket-type" + exclude="--exclude virtual-host --exclude ec-storage-class --exclude no-bucket-type" done diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/test-vault.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure/test-vault.sh index 0d1fa16a927f..00efbae0dbce 100755 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure/test-vault.sh +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/test-vault.sh @@ -31,4 +31,4 @@ export COMPOSE_FILE=docker-compose.yaml:vault.yaml start_docker_env ## Exclude virtual-host tests. This is tested separately as it requires additional config. -execute_robot_test scm --exclude virtual-host s3 +execute_robot_test scm --exclude virtual-host --exclude ec-storage-class s3 diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh index 426537955224..04128fd46d13 100755 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh @@ -43,12 +43,12 @@ execute_robot_test scm repair/bucket-encryption.robot execute_robot_test scm -v SCHEME:ofs -v BUCKET_TYPE:bucket -N ozonefs-ofs-bucket ozonefs/ozonefs.robot ## Exclude virtual-host tests. This is tested separately as it requires additional config. -exclude="--exclude virtual-host" +exclude="--exclude virtual-host --exclude ec-storage-class" for bucket in encrypted; do execute_robot_test s3g -v BUCKET:${bucket} -N s3-${bucket} ${exclude} s3 # some tests are independent of the bucket type, only need to be run once ## Exclude virtual-host.robot - exclude="--exclude virtual-host --exclude no-bucket-type" + exclude="--exclude virtual-host --exclude ec-storage-class --exclude no-bucket-type" done #expects 4 pipelines, should be run before diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/awss3ecstorage.robot b/hadoop-ozone/dist/src/main/smoketest/s3/awss3ecstorage.robot new file mode 100644 index 000000000000..ee6585467bf1 --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/s3/awss3ecstorage.robot @@ -0,0 +1,97 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Documentation S3 gateway test with aws cli +Library OperatingSystem +Library String +Library DateTime +Resource ../commonlib.robot +Resource commonawslib.robot +Resource mpu_lib.robot +Test Timeout 5 minutes +Suite Setup Setup Multipart Tests +Suite Teardown Teardown Multipart Tests +Test Setup Generate random prefix +Documentation S3 gateway test with aws cli with STANDARD_IA storage class +Library OperatingSystem +Library String +Library DateTime +Resource ../commonlib.robot +Resource commonawslib.robot +Resource mpu_lib.robot +Test Timeout 5 minutes +Suite Setup Setup EC Multipart Tests +Suite Teardown Teardown EC Multipart Tests +Test Setup Generate random prefix +Default Tags ec-storage-class + +*** Keywords *** +Setup EC Multipart Tests + Setup s3 tests + Create Random File KB 1023 /tmp/1mb + +Teardown EC Multipart Tests + Remove Files /tmp/1mb + +*** Variables *** +${ENDPOINT_URL} http://s3g:9878 +${BUCKET} generated + +*** Test Cases *** + +Put Object with STANDARD_IA storage class + ${file_checksum} = Execute md5sum /tmp/1mb | awk '{print $1}' + + ${result} = Execute AWSS3ApiCli put-object --bucket ${BUCKET} --key ${PREFIX}/ecKey32 --body /tmp/1mb --storage-class STANDARD_IA + ${eTag} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 + Should Be Equal ${eTag} \"${file_checksum}\" + + ${result} = Execute AWSS3ApiCli put-object --bucket ${BUCKET} --key ${PREFIX}/ecKey63 --body /tmp/1mb --storage-class STANDARD_IA --metadata="storage-config=rs-6-3-1024k" + ${eTag} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 + Should Be Equal ${eTag} \"${file_checksum}\" + +Test multipart upload with STANDARD_IA storage + ${uploadID} = Initiate MPU ${BUCKET} ${PREFIX}/ecmultipartKey32 --storage-class STANDARD_IA + ${eTag1} = Upload MPU part ${BUCKET} ${PREFIX}/ecmultipartKey32 ${uploadID} 1 /tmp/1mb + ${result} = Execute AWSS3APICli list-parts --bucket ${BUCKET} --key ${PREFIX}/ecmultipartKey32 --upload-id ${uploadID} + ${part1} = Execute and checkrc echo '${result}' | jq -r '.Parts[0].ETag' 0 + Should Be equal ${part1} ${eTag1} + Should contain ${result} STANDARD_IA + ${result} = Abort MPU ${BUCKET} ${PREFIX}/ecmultipartKey32 ${uploadID} 0 + + ${uploadID} = Initiate MPU ${BUCKET} ${PREFIX}/ecmultipartKey63 --storage-class STANDARD_IA --metadata="storage-config=rs-6-3-1024k" + ${eTag1} = Upload MPU part ${BUCKET} ${PREFIX}/ecmultipartKey63 ${uploadID} 1 /tmp/part1 + ${result} = Execute AWSS3APICli list-parts --bucket ${BUCKET} --key ${PREFIX}/ecmultipartKey63 --upload-id ${uploadID} + ${part1} = Execute and checkrc echo '${result}' | jq -r '.Parts[0].ETag' 0 + Should Be equal ${part1} ${eTag1} + Should contain ${result} STANDARD_IA + ${result} = Abort MPU ${BUCKET} ${PREFIX}/ecmultipartKey63 ${uploadID} 0 + +Copy Object change storage class to STANDARD_IA + ${file_checksum} = Execute md5sum /tmp/1mb | awk '{print $1}' + ${result} = Execute AWSS3ApiCli put-object --bucket ${BUCKET} --key ${PREFIX}/copyobject/Key1 --body /tmp/1mb + ${eTag} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 + Should Be Equal ${eTag} \"${file_checksum}\" + + ${result} = Execute AWSS3APICli copy-object --storage-class STANDARD_IA --bucket ${BUCKET} --key ${PREFIX}/copyobject/Key1 --copy-source ${BUCKET}/${PREFIX}/copyobject/Key1 + Should contain ${result} ETag + ${eTag} = Execute and checkrc echo '${result}' | jq -r '.CopyObjectResult.ETag' 0 + Should Be Equal ${eTag} \"${file_checksum}\" + + ${result} = Execute AWSS3APICli copy-object --storage-class STANDARD_IA --metadata="storage-config=rs-6-3-1024k" --bucket ${BUCKET} --key ${PREFIX}/copyobject/Key1 --copy-source ${BUCKET}/${PREFIX}/copyobject/Key1 + Should contain ${result} ETag + ${eTag} = Execute and checkrc echo '${result}' | jq -r '.CopyObjectResult.ETag' 0 + Should Be Equal ${eTag} \"${file_checksum}\" diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index 5373d114f54e..70ef9451f7f7 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -29,6 +29,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_DATASTREAM_AUTO_THRESHOLD_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_TYPE; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_TYPE_DEFAULT; import static org.apache.hadoop.ozone.audit.AuditLogger.PerformanceStringBuilder; import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_CLIENT_BUFFER_SIZE_DEFAULT; import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_CLIENT_BUFFER_SIZE_KEY; @@ -858,11 +859,11 @@ private ReplicationConfig getReplicationConfig(OzoneBucket ozoneBucket, String storageType, String storageConfig) throws OS3Exception { ReplicationConfig clientConfiguredReplicationConfig = null; - ReplicationType replicationType = ReplicationType.valueOf( - ozoneConfiguration.get(OZONE_REPLICATION_TYPE)); String replication = ozoneConfiguration.get(OZONE_REPLICATION); if (replication != null) { + ReplicationType replicationType = ReplicationType.valueOf( + ozoneConfiguration.get(OZONE_REPLICATION_TYPE, OZONE_REPLICATION_TYPE_DEFAULT)); clientConfiguredReplicationConfig = (replicationType == ReplicationType.EC) ? new ECReplicationConfig(replication) : ReplicationConfig.parse( diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java index 5160c19dbb42..200d9e8acb74 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java @@ -152,8 +152,8 @@ public final class S3ErrorTable { public static final OS3Exception INVALID_STORAGE_CLASS = new OS3Exception( "InvalidStorageClass", "The storage class that you specified is not valid. " + - "Check that storage class is supported and if using STANDARD_IA check that " + - "storage config is a valid EC replication string.", + "Provide a supported storage class[STANDARD|REDUCED_REDUNDANCY|STANDARD_IA] or " + + "a valid custom EC storage config for if using STANDARD_IA.", HTTP_BAD_REQUEST); private static Function generateInternalError = diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java index 71f8330e799c..9e584d823633 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java @@ -59,7 +59,7 @@ public String getEcReplicationString() { } public static S3StorageType fromReplicationConfig(ReplicationConfig config) { - if (config instanceof ECReplicationConfig) { + if (config.getReplicationType() == HddsProtos.ReplicationType.EC) { return STANDARD_IA; } if (config.getReplicationType() == HddsProtos.ReplicationType.STAND_ALONE || diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Utils.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Utils.java index 29547d4c96e7..aaeb860ff898 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Utils.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Utils.java @@ -88,20 +88,20 @@ public static ReplicationConfig resolveS3ClientSideReplicationConfig( clientConfiguredReplConfig : bucketReplConfig; } - public static ReplicationConfig toReplicationConfig(String storageType, String storageConfig) + public static ReplicationConfig toReplicationConfig(String s3StorageType, String s3StorageConfig) throws OS3Exception { try { - if (S3StorageType.STANDARD_IA.name().equals(storageType)) { - return (!StringUtils.isEmpty(storageConfig)) ? new ECReplicationConfig(storageConfig) : + if (S3StorageType.STANDARD_IA.name().equals(s3StorageType)) { + return (!StringUtils.isEmpty(s3StorageConfig)) ? new ECReplicationConfig(s3StorageConfig) : new ECReplicationConfig(S3StorageType.STANDARD_IA.getEcReplicationString()); } else { - S3StorageType s3StorageType = S3StorageType.valueOf(storageType); + S3StorageType storageType = S3StorageType.valueOf(s3StorageType); return ReplicationConfig.fromProtoTypeAndFactor( - ReplicationType.toProto(s3StorageType.getType()), - ReplicationFactor.toProto(s3StorageType.getFactor())); + ReplicationType.toProto(storageType.getType()), + ReplicationFactor.toProto(storageType.getFactor())); } } catch (IllegalArgumentException ex) { - throw newError(INVALID_STORAGE_CLASS, storageType, ex); + throw newError(INVALID_STORAGE_CLASS, s3StorageType, ex); } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestS3Utils.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestS3Utils.java index fa9a09301123..33f1ca27a113 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestS3Utils.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestS3Utils.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; @@ -118,18 +119,26 @@ public void testValidResolveS3ClientSideReplicationConfig(String s3StorageType, } } + public static List invalidS3ReplicationConfigs() { + List args = new ArrayList<>(); + args.add(Arguments.of("GLACIER", null, RATIS3REPLICATIONCONFIG, RATIS1REPLICATIONCONFIG)); + args.add(Arguments.of(S3StorageType.STANDARD_IA.name(), "INVALID", + RATIS3REPLICATIONCONFIG, RATIS1REPLICATIONCONFIG)); + return args; + } /** - * When bucket default is non-EC and client side passed value also not valid - * but configured value is valid, we would just return configured value. + * When client side passed value also not valid + * OS3Exception is thrown. */ - @Test - public void testResolveRepConfWhenUserPassedIsInvalid() throws OS3Exception { - assertThrows(OS3Exception.class, () -> S3Utils. + @ParameterizedTest + @MethodSource("invalidS3ReplicationConfigs") + public void testResolveRepConfWhenUserPassedIsInvalid(String s3StorageType, String s3StorageConfig, + ReplicationConfig clientConfiguredReplConfig, ReplicationConfig bucketReplConfig) + throws OS3Exception { + OS3Exception exception = assertThrows(OS3Exception.class, () -> S3Utils. resolveS3ClientSideReplicationConfig( - "INVALID", null, RATIS3REPLICATIONCONFIG, RATIS1REPLICATIONCONFIG)); - assertThrows(OS3Exception.class, () -> S3Utils. - resolveS3ClientSideReplicationConfig(S3StorageType.STANDARD_IA.name(), - "INVALID", RATIS3REPLICATIONCONFIG, RATIS1REPLICATIONCONFIG)); + s3StorageType, s3StorageConfig, clientConfiguredReplConfig, bucketReplConfig)); + assertEquals(S3ErrorTable.INVALID_STORAGE_CLASS.getCode(), exception.getCode()); } } From b0f7a6ed9629829fd2810f730a6141c8e469cfa3 Mon Sep 17 00:00:00 2001 From: saketa Date: Tue, 13 May 2025 17:00:37 -0700 Subject: [PATCH 3/8] HDDS-10979. Fixed test and checkstyle. --- .../src/main/smoketest/s3/awss3ecstorage.robot | 16 ++-------------- .../apache/hadoop/ozone/s3/util/TestS3Utils.java | 2 +- 2 files changed, 3 insertions(+), 15 deletions(-) diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/awss3ecstorage.robot b/hadoop-ozone/dist/src/main/smoketest/s3/awss3ecstorage.robot index ee6585467bf1..48fed4a3663d 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/awss3ecstorage.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/awss3ecstorage.robot @@ -14,21 +14,9 @@ # limitations under the License. *** Settings *** -Documentation S3 gateway test with aws cli -Library OperatingSystem -Library String -Library DateTime -Resource ../commonlib.robot -Resource commonawslib.robot -Resource mpu_lib.robot -Test Timeout 5 minutes -Suite Setup Setup Multipart Tests -Suite Teardown Teardown Multipart Tests -Test Setup Generate random prefix Documentation S3 gateway test with aws cli with STANDARD_IA storage class Library OperatingSystem Library String -Library DateTime Resource ../commonlib.robot Resource commonawslib.robot Resource mpu_lib.robot @@ -64,7 +52,7 @@ Put Object with STANDARD_IA storage class Should Be Equal ${eTag} \"${file_checksum}\" Test multipart upload with STANDARD_IA storage - ${uploadID} = Initiate MPU ${BUCKET} ${PREFIX}/ecmultipartKey32 --storage-class STANDARD_IA + ${uploadID} = Initiate MPU ${BUCKET} ${PREFIX}/ecmultipartKey32 0 --storage-class STANDARD_IA ${eTag1} = Upload MPU part ${BUCKET} ${PREFIX}/ecmultipartKey32 ${uploadID} 1 /tmp/1mb ${result} = Execute AWSS3APICli list-parts --bucket ${BUCKET} --key ${PREFIX}/ecmultipartKey32 --upload-id ${uploadID} ${part1} = Execute and checkrc echo '${result}' | jq -r '.Parts[0].ETag' 0 @@ -72,7 +60,7 @@ Test multipart upload with STANDARD_IA storage Should contain ${result} STANDARD_IA ${result} = Abort MPU ${BUCKET} ${PREFIX}/ecmultipartKey32 ${uploadID} 0 - ${uploadID} = Initiate MPU ${BUCKET} ${PREFIX}/ecmultipartKey63 --storage-class STANDARD_IA --metadata="storage-config=rs-6-3-1024k" + ${uploadID} = Initiate MPU ${BUCKET} ${PREFIX}/ecmultipartKey63 0 --storage-class STANDARD_IA --metadata="storage-config=rs-6-3-1024k" ${eTag1} = Upload MPU part ${BUCKET} ${PREFIX}/ecmultipartKey63 ${uploadID} 1 /tmp/part1 ${result} = Execute AWSS3APICli list-parts --bucket ${BUCKET} --key ${PREFIX}/ecmultipartKey63 --upload-id ${uploadID} ${part1} = Execute and checkrc echo '${result}' | jq -r '.Parts[0].ETag' 0 diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestS3Utils.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestS3Utils.java index 33f1ca27a113..75ef7c2820a3 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestS3Utils.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestS3Utils.java @@ -31,7 +31,6 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; -import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; @@ -126,6 +125,7 @@ public static List invalidS3ReplicationConfigs() { RATIS3REPLICATIONCONFIG, RATIS1REPLICATIONCONFIG)); return args; } + /** * When client side passed value also not valid * OS3Exception is thrown. From 57edbeb8a286e0a84ac2eb4b93d298cd050f0dc0 Mon Sep 17 00:00:00 2001 From: saketa Date: Wed, 14 May 2025 10:15:56 -0700 Subject: [PATCH 4/8] HDDS-10979. Fixed error message in integration test. --- .../java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java index 4f22bd418447..5f1fd24dbe33 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java @@ -676,7 +676,7 @@ void testInvalidStorageType() { OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( BUCKET_NAME, KEY_NAME, CONTENT.length(), 1, null, null, null, body)); - assertEquals(S3ErrorTable.INVALID_ARGUMENT.getErrorMessage(), + assertEquals(S3ErrorTable.INVALID_STORAGE_CLASS.getErrorMessage(), e.getErrorMessage()); assertEquals("random", e.getResource()); } From f9187da95ad045ab5770c36bd11dbc310e552886 Mon Sep 17 00:00:00 2001 From: saketa Date: Mon, 19 May 2025 17:07:54 -0700 Subject: [PATCH 5/8] HDDS-10979. Optimized S3StorageType. Handled bucket HEAD op. Addressed comments. --- .../dist/src/main/compose/common/ec-test.sh | 4 +-- .../main/compose/ozone-ha/test-haproxy-s3g.sh | 4 +-- .../dist/src/main/compose/ozone-ha/test.sh | 4 +-- .../ozonesecure-ha/test-haproxy-s3g.sh | 4 +-- .../src/main/compose/ozonesecure-ha/test.sh | 4 +-- .../main/compose/ozonesecure/test-vault.sh | 2 +- .../dist/src/main/compose/ozonesecure/test.sh | 4 +-- .../smoketest/{s3 => ec}/awss3ecstorage.robot | 15 +++++--- .../dist/src/main/smoketest/s3/awss3.robot | 4 +++ .../ozone/s3/endpoint/BucketEndpoint.java | 9 ++--- .../hadoop/ozone/s3/util/S3StorageType.java | 35 ++++++------------- .../apache/hadoop/ozone/s3/util/S3Utils.java | 15 +++----- .../hadoop/ozone/s3/util/TestS3Utils.java | 8 ++--- 13 files changed, 48 insertions(+), 64 deletions(-) rename hadoop-ozone/dist/src/main/smoketest/{s3 => ec}/awss3ecstorage.robot (83%) diff --git a/hadoop-ozone/dist/src/main/compose/common/ec-test.sh b/hadoop-ozone/dist/src/main/compose/common/ec-test.sh index 3ef1a1aa9ea4..556590a14a29 100755 --- a/hadoop-ozone/dist/src/main/compose/common/ec-test.sh +++ b/hadoop-ozone/dist/src/main/compose/common/ec-test.sh @@ -18,7 +18,7 @@ start_docker_env 5 ## Exclude virtual-host tests. This is tested separately as it requires additional config. -execute_robot_test scm -v BUCKET:erasure --exclude virtual-host --exclude ec-storage-class s3 +execute_robot_test scm -v BUCKET:erasure --exclude virtual-host s3 execute_robot_test scm ec/rewrite.robot @@ -31,4 +31,4 @@ execute_robot_test scm -v PREFIX:${prefix} -N read-3-datanodes ec/read.robot docker-compose up -d --no-recreate --scale datanode=5 execute_robot_test scm -v container:1 -v count:5 -N EC-recovery replication/wait.robot docker-compose up -d --no-recreate --scale datanode=9 -execute_robot_test scm s3/awss3ecstorage.robot +execute_robot_test scm -N S3-EC-Storage ec/awss3ecstorage.robot diff --git a/hadoop-ozone/dist/src/main/compose/ozone-ha/test-haproxy-s3g.sh b/hadoop-ozone/dist/src/main/compose/ozone-ha/test-haproxy-s3g.sh index 2bb91413f8fc..af67a7099dde 100755 --- a/hadoop-ozone/dist/src/main/compose/ozone-ha/test-haproxy-s3g.sh +++ b/hadoop-ozone/dist/src/main/compose/ozone-ha/test-haproxy-s3g.sh @@ -31,10 +31,10 @@ source "$COMPOSE_DIR/../testlib.sh" start_docker_env ## Exclude virtual-host tests. This is tested separately as it requires additional config. -exclude="--exclude virtual-host --exclude ec-storage-class" +exclude="--exclude virtual-host" for bucket in generated; do execute_robot_test ${SCM} -v BUCKET:${bucket} -N s3-${bucket} ${exclude} s3 # some tests are independent of the bucket type, only need to be run once ## Exclude awss3virtualhost.robot - exclude="--exclude virtual-host --exclude ec-storage-class --exclude no-bucket-type" + exclude="--exclude virtual-host --exclude no-bucket-type" done diff --git a/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh b/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh index fd4d389931a8..6c09e7b76158 100755 --- a/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh +++ b/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh @@ -38,12 +38,12 @@ execute_robot_test ${SCM} basic/links.robot execute_robot_test ${SCM} -v SCHEME:ofs -v BUCKET_TYPE:link -N ozonefs-ofs-link ozonefs/ozonefs.robot ## Exclude virtual-host tests. This is tested separately as it requires additional config. -exclude="--exclude virtual-host --exclude ec-storage-class" +exclude="--exclude virtual-host" for bucket in generated; do for layout in OBJECT_STORE LEGACY FILE_SYSTEM_OPTIMIZED; do execute_robot_test ${SCM} -v BUCKET:${bucket} -v BUCKET_LAYOUT:${layout} -N s3-${layout}-${bucket} ${exclude} s3 # some tests are independent of the bucket type, only need to be run once - exclude="--exclude virtual-host --exclude no-bucket-type --exclude ec-storage-class" + exclude="--exclude virtual-host --exclude no-bucket-type" done done diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test-haproxy-s3g.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test-haproxy-s3g.sh index 5cde07f1e0d5..a2b11418a88c 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test-haproxy-s3g.sh +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test-haproxy-s3g.sh @@ -36,10 +36,10 @@ start_docker_env execute_command_in_container kms hadoop key create ${OZONE_BUCKET_KEY_NAME} ## Exclude virtual-host tests. This is tested separately as it requires additional config. -exclude="--exclude virtual-host --exclude ec-storage-class" +exclude="--exclude virtual-host" for bucket in encrypted; do execute_robot_test recon -v BUCKET:${bucket} -N s3-${bucket} ${exclude} s3 # some tests are independent of the bucket type, only need to be run once ## Exclude virtual-host.robot - exclude="--exclude virtual-host --exclude ec-storage-class --exclude no-bucket-type" + exclude="--exclude virtual-host --exclude no-bucket-type" done diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test.sh index 0244991e05d7..1c134350c784 100755 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test.sh +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test.sh @@ -44,10 +44,10 @@ execute_robot_test s3g -v SCHEME:o3fs -v BUCKET_TYPE:link -N ozonefs-o3fs-link o execute_robot_test s3g basic/links.robot ## Exclude virtual-host tests. This is tested separately as it requires additional config. -exclude="--exclude virtual-host --exclude ec-storage-class" +exclude="--exclude virtual-host" for bucket in link; do execute_robot_test s3g -v BUCKET:${bucket} -N s3-${bucket} ${exclude} s3 # some tests are independent of the bucket type, only need to be run once ## Exclude virtual-host.robot - exclude="--exclude virtual-host --exclude ec-storage-class --exclude no-bucket-type" + exclude="--exclude virtual-host --exclude no-bucket-type" done diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/test-vault.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure/test-vault.sh index 00efbae0dbce..0d1fa16a927f 100755 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure/test-vault.sh +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/test-vault.sh @@ -31,4 +31,4 @@ export COMPOSE_FILE=docker-compose.yaml:vault.yaml start_docker_env ## Exclude virtual-host tests. This is tested separately as it requires additional config. -execute_robot_test scm --exclude virtual-host --exclude ec-storage-class s3 +execute_robot_test scm --exclude virtual-host s3 diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh index 04128fd46d13..426537955224 100755 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh @@ -43,12 +43,12 @@ execute_robot_test scm repair/bucket-encryption.robot execute_robot_test scm -v SCHEME:ofs -v BUCKET_TYPE:bucket -N ozonefs-ofs-bucket ozonefs/ozonefs.robot ## Exclude virtual-host tests. This is tested separately as it requires additional config. -exclude="--exclude virtual-host --exclude ec-storage-class" +exclude="--exclude virtual-host" for bucket in encrypted; do execute_robot_test s3g -v BUCKET:${bucket} -N s3-${bucket} ${exclude} s3 # some tests are independent of the bucket type, only need to be run once ## Exclude virtual-host.robot - exclude="--exclude virtual-host --exclude ec-storage-class --exclude no-bucket-type" + exclude="--exclude virtual-host --exclude no-bucket-type" done #expects 4 pipelines, should be run before diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/awss3ecstorage.robot b/hadoop-ozone/dist/src/main/smoketest/ec/awss3ecstorage.robot similarity index 83% rename from hadoop-ozone/dist/src/main/smoketest/s3/awss3ecstorage.robot rename to hadoop-ozone/dist/src/main/smoketest/ec/awss3ecstorage.robot index 48fed4a3663d..2893048a6ea8 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/awss3ecstorage.robot +++ b/hadoop-ozone/dist/src/main/smoketest/ec/awss3ecstorage.robot @@ -18,13 +18,13 @@ Documentation S3 gateway test with aws cli with STANDARD_IA storage class Library OperatingSystem Library String Resource ../commonlib.robot -Resource commonawslib.robot -Resource mpu_lib.robot +Resource ../s3/commonawslib.robot +Resource ../s3/mpu_lib.robot +Resource ../ozone-lib/shell.robot Test Timeout 5 minutes Suite Setup Setup EC Multipart Tests Suite Teardown Teardown EC Multipart Tests Test Setup Generate random prefix -Default Tags ec-storage-class *** Keywords *** Setup EC Multipart Tests @@ -46,10 +46,12 @@ Put Object with STANDARD_IA storage class ${result} = Execute AWSS3ApiCli put-object --bucket ${BUCKET} --key ${PREFIX}/ecKey32 --body /tmp/1mb --storage-class STANDARD_IA ${eTag} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 Should Be Equal ${eTag} \"${file_checksum}\" + Verify Key EC Replication Config /s3v/${BUCKET}/${PREFIX}/ecKey32 RS 3 2 1048576 ${result} = Execute AWSS3ApiCli put-object --bucket ${BUCKET} --key ${PREFIX}/ecKey63 --body /tmp/1mb --storage-class STANDARD_IA --metadata="storage-config=rs-6-3-1024k" ${eTag} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 Should Be Equal ${eTag} \"${file_checksum}\" + Verify Key EC Replication Config /s3v/${BUCKET}/${PREFIX}/ecKey63 RS 6 3 1048576 Test multipart upload with STANDARD_IA storage ${uploadID} = Initiate MPU ${BUCKET} ${PREFIX}/ecmultipartKey32 0 --storage-class STANDARD_IA @@ -58,7 +60,8 @@ Test multipart upload with STANDARD_IA storage ${part1} = Execute and checkrc echo '${result}' | jq -r '.Parts[0].ETag' 0 Should Be equal ${part1} ${eTag1} Should contain ${result} STANDARD_IA - ${result} = Abort MPU ${BUCKET} ${PREFIX}/ecmultipartKey32 ${uploadID} 0 + Complete MPU ${BUCKET} ${PREFIX}/ecmultipartKey32 ${uploadID} {ETag=${eTag1},PartNumber=1} + Verify Key EC Replication Config /s3v/${BUCKET}/${PREFIX}/ecmultipartKey32 RS 3 2 1048576 ${uploadID} = Initiate MPU ${BUCKET} ${PREFIX}/ecmultipartKey63 0 --storage-class STANDARD_IA --metadata="storage-config=rs-6-3-1024k" ${eTag1} = Upload MPU part ${BUCKET} ${PREFIX}/ecmultipartKey63 ${uploadID} 1 /tmp/part1 @@ -66,7 +69,8 @@ Test multipart upload with STANDARD_IA storage ${part1} = Execute and checkrc echo '${result}' | jq -r '.Parts[0].ETag' 0 Should Be equal ${part1} ${eTag1} Should contain ${result} STANDARD_IA - ${result} = Abort MPU ${BUCKET} ${PREFIX}/ecmultipartKey63 ${uploadID} 0 + Complete MPU ${BUCKET} ${PREFIX}/ecmultipartKey63 ${uploadID} {ETag=${eTag1},PartNumber=1} + Verify Key EC Replication Config /s3v/${BUCKET}/${PREFIX}/ecmultipartKey63 RS 6 3 1048576 Copy Object change storage class to STANDARD_IA ${file_checksum} = Execute md5sum /tmp/1mb | awk '{print $1}' @@ -83,3 +87,4 @@ Copy Object change storage class to STANDARD_IA Should contain ${result} ETag ${eTag} = Execute and checkrc echo '${result}' | jq -r '.CopyObjectResult.ETag' 0 Should Be Equal ${eTag} \"${file_checksum}\" + ## TODO: Verify Key EC Replication Config when we support changing storage class diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/awss3.robot b/hadoop-ozone/dist/src/main/smoketest/s3/awss3.robot index be2e24b6e4c9..b7613722a5ca 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/awss3.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/awss3.robot @@ -46,6 +46,10 @@ File upload and directory list Should not contain ${result} testfile Should not contain ${result} dir1 Should contain ${result} file + # Verify S3 storage class if file is replicated or erasure coded. + ${result} = Execute AWSS3CliDebug ls s3://${BUCKET}/dir1/dir2/file + Run Keyword If '${BUCKET}' == 'generated' Should contain ${result} STANDARD + Run Keyword If '${BUCKET}' == 'erasure' Should contain ${result} STANDARD_IA File upload with special chars Execute date > /tmp/testfile diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java index 118900aa53d0..172cc240aa3a 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java @@ -53,7 +53,6 @@ import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.audit.S3GAction; @@ -738,12 +737,8 @@ private void addKey(ListObjectResponse response, OzoneKey next) { if (eTag != null) { keyMetadata.setETag(ObjectEndpoint.wrapInQuotes(eTag)); } - if (next.getReplicationType().toString().equals(ReplicationType - .STAND_ALONE.toString())) { - keyMetadata.setStorageClass(S3StorageType.REDUCED_REDUNDANCY.toString()); - } else { - keyMetadata.setStorageClass(S3StorageType.STANDARD.toString()); - } + keyMetadata.setStorageClass(S3StorageType.fromReplicationConfig( + next.getReplicationConfig()).toString()); keyMetadata.setLastModified(next.getModificationTime()); String ownerName = next.getOwner(); String displayName = ownerName; diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java index 9e584d823633..164595031a12 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java @@ -18,9 +18,8 @@ package org.apache.hadoop.ozone.s3.util; import org.apache.hadoop.hdds.client.ECReplicationConfig; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; /** @@ -29,33 +28,19 @@ public enum S3StorageType { - REDUCED_REDUNDANCY(ReplicationType.RATIS, ReplicationFactor.ONE, null), - STANDARD(ReplicationType.RATIS, ReplicationFactor.THREE, null), - STANDARD_IA(ReplicationType.EC, null, ECReplicationConfig.EcCodec.RS + "-3-2-1024k"); + REDUCED_REDUNDANCY(RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)), + STANDARD( + RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE)), + STANDARD_IA(new ECReplicationConfig(ECReplicationConfig.EcCodec.RS + "-3-2-1024k")); - private final ReplicationType type; - private final ReplicationFactor factor; - private final String ecReplicationString; + private final ReplicationConfig replicationConfig; - S3StorageType( - ReplicationType type, - ReplicationFactor factor, - String ecReplicationString) { - this.type = type; - this.factor = factor; - this.ecReplicationString = ecReplicationString; + S3StorageType(ReplicationConfig replicationConfig) { + this.replicationConfig = replicationConfig; } - public ReplicationFactor getFactor() { - return factor; - } - - public ReplicationType getType() { - return type; - } - - public String getEcReplicationString() { - return ecReplicationString; + public ReplicationConfig getReplicationConfig() { + return replicationConfig; } public static S3StorageType fromReplicationConfig(ReplicationConfig config) { diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Utils.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Utils.java index aaeb860ff898..1f05f17558c8 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Utils.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Utils.java @@ -35,8 +35,6 @@ import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.ozone.s3.exception.OS3Exception; /** @@ -91,15 +89,12 @@ public static ReplicationConfig resolveS3ClientSideReplicationConfig( public static ReplicationConfig toReplicationConfig(String s3StorageType, String s3StorageConfig) throws OS3Exception { try { - if (S3StorageType.STANDARD_IA.name().equals(s3StorageType)) { - return (!StringUtils.isEmpty(s3StorageConfig)) ? new ECReplicationConfig(s3StorageConfig) : - new ECReplicationConfig(S3StorageType.STANDARD_IA.getEcReplicationString()); - } else { - S3StorageType storageType = S3StorageType.valueOf(s3StorageType); - return ReplicationConfig.fromProtoTypeAndFactor( - ReplicationType.toProto(storageType.getType()), - ReplicationFactor.toProto(storageType.getFactor())); + S3StorageType storageType = S3StorageType.valueOf(s3StorageType); + if (S3StorageType.STANDARD_IA.equals(storageType) && + !StringUtils.isEmpty(s3StorageConfig)) { + return new ECReplicationConfig(s3StorageConfig); } + return storageType.getReplicationConfig(); } catch (IllegalArgumentException ex) { throw newError(INVALID_STORAGE_CLASS, s3StorageType, ex); } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestS3Utils.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestS3Utils.java index 75ef7c2820a3..7976b3952722 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestS3Utils.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestS3Utils.java @@ -39,8 +39,8 @@ * Tests the S3Utils APIs. */ public class TestS3Utils { - private static final ReplicationConfig ECREPLICATIONCONFIG = - new ECReplicationConfig(S3StorageType.STANDARD_IA.getEcReplicationString()); + private static final ReplicationConfig EC32REPLICATIONCONFIG = + new ECReplicationConfig(ECReplicationConfig.EcCodec.RS + "-3-2-1024k"); private static final ReplicationConfig RATIS3REPLICATIONCONFIG = RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE); private static final ReplicationConfig RATIS1REPLICATIONCONFIG = @@ -50,7 +50,7 @@ public class TestS3Utils { null, RATIS1REPLICATIONCONFIG, RATIS3REPLICATIONCONFIG, - ECREPLICATIONCONFIG + EC32REPLICATIONCONFIG ); private static final List S3STORAGETYPES = Arrays.asList( @@ -96,7 +96,7 @@ public void testValidResolveS3ClientSideReplicationConfig(String s3StorageType, if (!StringUtils.isEmpty(s3StorageConfig)) { expectedReplConfig = new ECReplicationConfig(s3StorageConfig); } else { - expectedReplConfig = ECREPLICATIONCONFIG; + expectedReplConfig = EC32REPLICATIONCONFIG; } } else if (S3StorageType.STANDARD.name().equals(s3StorageType)) { expectedReplConfig = RATIS3REPLICATIONCONFIG; From b273851aaad53d8bcd9fc687a3707aa96f161e84 Mon Sep 17 00:00:00 2001 From: saketa Date: Mon, 19 May 2025 17:13:41 -0700 Subject: [PATCH 6/8] HDDS-10979. Changed Execute and check rc to Execute in acceptance test. --- .../src/main/smoketest/ec/awss3ecstorage.robot | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/hadoop-ozone/dist/src/main/smoketest/ec/awss3ecstorage.robot b/hadoop-ozone/dist/src/main/smoketest/ec/awss3ecstorage.robot index 2893048a6ea8..27ddffb0ebfc 100644 --- a/hadoop-ozone/dist/src/main/smoketest/ec/awss3ecstorage.robot +++ b/hadoop-ozone/dist/src/main/smoketest/ec/awss3ecstorage.robot @@ -44,12 +44,12 @@ Put Object with STANDARD_IA storage class ${file_checksum} = Execute md5sum /tmp/1mb | awk '{print $1}' ${result} = Execute AWSS3ApiCli put-object --bucket ${BUCKET} --key ${PREFIX}/ecKey32 --body /tmp/1mb --storage-class STANDARD_IA - ${eTag} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 + ${eTag} = Execute echo '${result}' | jq -r '.ETag' Should Be Equal ${eTag} \"${file_checksum}\" Verify Key EC Replication Config /s3v/${BUCKET}/${PREFIX}/ecKey32 RS 3 2 1048576 ${result} = Execute AWSS3ApiCli put-object --bucket ${BUCKET} --key ${PREFIX}/ecKey63 --body /tmp/1mb --storage-class STANDARD_IA --metadata="storage-config=rs-6-3-1024k" - ${eTag} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 + ${eTag} = Execute echo '${result}' | jq -r '.ETag' Should Be Equal ${eTag} \"${file_checksum}\" Verify Key EC Replication Config /s3v/${BUCKET}/${PREFIX}/ecKey63 RS 6 3 1048576 @@ -57,7 +57,7 @@ Test multipart upload with STANDARD_IA storage ${uploadID} = Initiate MPU ${BUCKET} ${PREFIX}/ecmultipartKey32 0 --storage-class STANDARD_IA ${eTag1} = Upload MPU part ${BUCKET} ${PREFIX}/ecmultipartKey32 ${uploadID} 1 /tmp/1mb ${result} = Execute AWSS3APICli list-parts --bucket ${BUCKET} --key ${PREFIX}/ecmultipartKey32 --upload-id ${uploadID} - ${part1} = Execute and checkrc echo '${result}' | jq -r '.Parts[0].ETag' 0 + ${part1} = Execute echo '${result}' | jq -r '.Parts[0].ETag' Should Be equal ${part1} ${eTag1} Should contain ${result} STANDARD_IA Complete MPU ${BUCKET} ${PREFIX}/ecmultipartKey32 ${uploadID} {ETag=${eTag1},PartNumber=1} @@ -66,7 +66,7 @@ Test multipart upload with STANDARD_IA storage ${uploadID} = Initiate MPU ${BUCKET} ${PREFIX}/ecmultipartKey63 0 --storage-class STANDARD_IA --metadata="storage-config=rs-6-3-1024k" ${eTag1} = Upload MPU part ${BUCKET} ${PREFIX}/ecmultipartKey63 ${uploadID} 1 /tmp/part1 ${result} = Execute AWSS3APICli list-parts --bucket ${BUCKET} --key ${PREFIX}/ecmultipartKey63 --upload-id ${uploadID} - ${part1} = Execute and checkrc echo '${result}' | jq -r '.Parts[0].ETag' 0 + ${part1} = Execute echo '${result}' | jq -r '.Parts[0].ETag' Should Be equal ${part1} ${eTag1} Should contain ${result} STANDARD_IA Complete MPU ${BUCKET} ${PREFIX}/ecmultipartKey63 ${uploadID} {ETag=${eTag1},PartNumber=1} @@ -75,16 +75,16 @@ Test multipart upload with STANDARD_IA storage Copy Object change storage class to STANDARD_IA ${file_checksum} = Execute md5sum /tmp/1mb | awk '{print $1}' ${result} = Execute AWSS3ApiCli put-object --bucket ${BUCKET} --key ${PREFIX}/copyobject/Key1 --body /tmp/1mb - ${eTag} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 + ${eTag} = Execute echo '${result}' | jq -r '.ETag' Should Be Equal ${eTag} \"${file_checksum}\" ${result} = Execute AWSS3APICli copy-object --storage-class STANDARD_IA --bucket ${BUCKET} --key ${PREFIX}/copyobject/Key1 --copy-source ${BUCKET}/${PREFIX}/copyobject/Key1 Should contain ${result} ETag - ${eTag} = Execute and checkrc echo '${result}' | jq -r '.CopyObjectResult.ETag' 0 + ${eTag} = Execute echo '${result}' | jq -r '.CopyObjectResult.ETag' Should Be Equal ${eTag} \"${file_checksum}\" ${result} = Execute AWSS3APICli copy-object --storage-class STANDARD_IA --metadata="storage-config=rs-6-3-1024k" --bucket ${BUCKET} --key ${PREFIX}/copyobject/Key1 --copy-source ${BUCKET}/${PREFIX}/copyobject/Key1 Should contain ${result} ETag - ${eTag} = Execute and checkrc echo '${result}' | jq -r '.CopyObjectResult.ETag' 0 + ${eTag} = Execute echo '${result}' | jq -r '.CopyObjectResult.ETag' Should Be Equal ${eTag} \"${file_checksum}\" ## TODO: Verify Key EC Replication Config when we support changing storage class From 5773d583b059247ce2a2d8282f833521ea3d45a7 Mon Sep 17 00:00:00 2001 From: saketa Date: Tue, 20 May 2025 12:05:22 -0700 Subject: [PATCH 7/8] HDDS-10979. Fixed awss3 acceptance test. Addressed comments. --- .../dist/src/main/smoketest/s3/awss3.robot | 8 ++++++-- hadoop-ozone/s3gateway/pom.xml | 4 ++++ .../ozone/s3/endpoint/ObjectEndpoint.java | 17 +++-------------- .../hadoop/ozone/s3/util/S3StorageType.java | 2 +- .../hadoop/ozone/s3/util/TestS3Utils.java | 2 +- 5 files changed, 15 insertions(+), 18 deletions(-) diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/awss3.robot b/hadoop-ozone/dist/src/main/smoketest/s3/awss3.robot index b7613722a5ca..f805f6ed4dce 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/awss3.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/awss3.robot @@ -48,8 +48,12 @@ File upload and directory list Should contain ${result} file # Verify S3 storage class if file is replicated or erasure coded. ${result} = Execute AWSS3CliDebug ls s3://${BUCKET}/dir1/dir2/file - Run Keyword If '${BUCKET}' == 'generated' Should contain ${result} STANDARD - Run Keyword If '${BUCKET}' == 'erasure' Should contain ${result} STANDARD_IA + IF '${BUCKET}' == 'erasure' + Should contain ${result} STANDARD_IA + ELSE + Should contain ${result} STANDARD + Should not contain ${result} STANDARD_IA + END File upload with special chars Execute date > /tmp/testfile diff --git a/hadoop-ozone/s3gateway/pom.xml b/hadoop-ozone/s3gateway/pom.xml index 365010e7b702..c2d1c978f961 100644 --- a/hadoop-ozone/s3gateway/pom.xml +++ b/hadoop-ozone/s3gateway/pom.xml @@ -166,6 +166,10 @@ org.apache.ozone ozone-common + + org.apache.ozone + ozone-filesystem-common + org.apache.ozone ozone-interface-client diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index 2c0ffaebd159..6bd7b77060c1 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -27,9 +27,6 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_DATASTREAM_AUTO_THRESHOLD; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_DATASTREAM_AUTO_THRESHOLD_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_TYPE; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_TYPE_DEFAULT; import static org.apache.hadoop.ozone.audit.AuditLogger.PerformanceStringBuilder; import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_CLIENT_BUFFER_SIZE_DEFAULT; import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_CLIENT_BUFFER_SIZE_KEY; @@ -105,9 +102,9 @@ import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.fs.ozone.OzoneClientUtils; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; -import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.ozone.OzoneConsts; @@ -863,17 +860,9 @@ public Response initializeMultipartUpload( private ReplicationConfig getReplicationConfig(OzoneBucket ozoneBucket, String storageType, String storageConfig) throws OS3Exception { - ReplicationConfig clientConfiguredReplicationConfig = null; - String replication = ozoneConfiguration.get(OZONE_REPLICATION); + ReplicationConfig clientConfiguredReplicationConfig = + OzoneClientUtils.getClientConfiguredReplicationConfig(ozoneConfiguration); - if (replication != null) { - ReplicationType replicationType = ReplicationType.valueOf( - ozoneConfiguration.get(OZONE_REPLICATION_TYPE, OZONE_REPLICATION_TYPE_DEFAULT)); - clientConfiguredReplicationConfig = - (replicationType == ReplicationType.EC) ? - new ECReplicationConfig(replication) : ReplicationConfig.parse( - replicationType, replication, ozoneConfiguration); - } return S3Utils.resolveS3ClientSideReplicationConfig(storageType, storageConfig, clientConfiguredReplicationConfig, ozoneBucket.getReplicationConfig()); } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java index 164595031a12..c400bade4170 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java @@ -31,7 +31,7 @@ public enum S3StorageType { REDUCED_REDUNDANCY(RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)), STANDARD( RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE)), - STANDARD_IA(new ECReplicationConfig(ECReplicationConfig.EcCodec.RS + "-3-2-1024k")); + STANDARD_IA(new ECReplicationConfig(3, 2)); private final ReplicationConfig replicationConfig; diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestS3Utils.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestS3Utils.java index 7976b3952722..369c1d2278c9 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestS3Utils.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestS3Utils.java @@ -40,7 +40,7 @@ */ public class TestS3Utils { private static final ReplicationConfig EC32REPLICATIONCONFIG = - new ECReplicationConfig(ECReplicationConfig.EcCodec.RS + "-3-2-1024k"); + new ECReplicationConfig(3, 2); private static final ReplicationConfig RATIS3REPLICATIONCONFIG = RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE); private static final ReplicationConfig RATIS1REPLICATIONCONFIG = From 7f66cd6366929100cd8fdf96c4f7c197dc288ab3 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Wed, 21 May 2025 15:27:23 +0200 Subject: [PATCH 8/8] move OzoneClientUtils to ozone-client --- .../org/apache/hadoop/ozone/shell/ReplicationOptions.java | 2 +- .../apache/hadoop/ozone/shell/keys/ChecksumKeyHandler.java | 2 +- .../org/apache/hadoop/ozone/client}/OzoneClientUtils.java | 5 +---- .../apache/hadoop/ozone/client}/TestOzoneClientUtils.java | 4 +--- .../apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java | 1 + .../org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java | 1 + .../hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java | 1 + .../apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java | 1 + hadoop-ozone/s3gateway/pom.xml | 4 ---- .../org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java | 2 +- 10 files changed, 9 insertions(+), 14 deletions(-) rename hadoop-ozone/{ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone => client/src/main/java/org/apache/hadoop/ozone/client}/OzoneClientUtils.java (98%) rename hadoop-ozone/{ozonefs-common/src/test/java/org/apache/hadoop/fs/ozone => client/src/test/java/org/apache/hadoop/ozone/client}/TestOzoneClientUtils.java (98%) diff --git a/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/ReplicationOptions.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/ReplicationOptions.java index c9dad3df8800..ad13b3b1ae1a 100644 --- a/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/ReplicationOptions.java +++ b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/ReplicationOptions.java @@ -22,10 +22,10 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_TYPE; import java.util.Optional; -import org.apache.hadoop.fs.ozone.OzoneClientUtils; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.ozone.client.OzoneClientUtils; /** * Common options for specifying replication config: specialized for diff --git a/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/ChecksumKeyHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/ChecksumKeyHandler.java index 489abed03958..c36c1af99102 100644 --- a/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/ChecksumKeyHandler.java +++ b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/ChecksumKeyHandler.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.shell.keys; -import static org.apache.hadoop.fs.ozone.OzoneClientUtils.getFileChecksumWithCombineMode; +import static org.apache.hadoop.ozone.client.OzoneClientUtils.getFileChecksumWithCombineMode; import com.fasterxml.jackson.annotation.JsonAutoDetect; import java.io.IOException; diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientUtils.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java similarity index 98% rename from hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientUtils.java rename to hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java index a885116902b4..0d5504bb167b 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientUtils.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.hadoop.fs.ozone; +package org.apache.hadoop.ozone.client; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_TYPE; @@ -36,9 +36,6 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.OzoneClientConfig; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.checksum.BaseFileChecksumHelper; import org.apache.hadoop.ozone.client.checksum.ChecksumHelperFactory; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; diff --git a/hadoop-ozone/ozonefs-common/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneClientUtils.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientUtils.java similarity index 98% rename from hadoop-ozone/ozonefs-common/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneClientUtils.java rename to hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientUtils.java index baff7bb67db3..c2154fe880b9 100644 --- a/hadoop-ozone/ozonefs-common/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneClientUtils.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientUtils.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.hadoop.fs.ozone; +package org.apache.hadoop.ozone.client; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNull; @@ -32,8 +32,6 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; import org.junit.jupiter.api.Test; diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java index 1abddfc8afc6..8abe5147b56f 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java @@ -67,6 +67,7 @@ import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientFactory; +import org.apache.hadoop.ozone.client.OzoneClientUtils; import org.apache.hadoop.ozone.client.OzoneKey; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput; diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java index 8e1588889010..67a252e69568 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java @@ -74,6 +74,7 @@ import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneFsServerDefaults; +import org.apache.hadoop.ozone.client.OzoneClientUtils; import org.apache.hadoop.ozone.client.io.SelectorOutputStream; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java index edbd7b905634..cd05b9de5ee4 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java @@ -81,6 +81,7 @@ import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientFactory; +import org.apache.hadoop.ozone.client.OzoneClientUtils; import org.apache.hadoop.ozone.client.OzoneKey; import org.apache.hadoop.ozone.client.OzoneSnapshot; import org.apache.hadoop.ozone.client.OzoneVolume; diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java index 2e05b8e401d1..d355f59899d6 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java @@ -83,6 +83,7 @@ import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneFsServerDefaults; import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneClientUtils; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.io.SelectorOutputStream; import org.apache.hadoop.ozone.om.exceptions.OMException; diff --git a/hadoop-ozone/s3gateway/pom.xml b/hadoop-ozone/s3gateway/pom.xml index c2d1c978f961..365010e7b702 100644 --- a/hadoop-ozone/s3gateway/pom.xml +++ b/hadoop-ozone/s3gateway/pom.xml @@ -166,10 +166,6 @@ org.apache.ozone ozone-common - - org.apache.ozone - ozone-filesystem-common - org.apache.ozone ozone-interface-client diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index d6e393b9d5e1..7ebed80c2878 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -107,7 +107,6 @@ import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.tuple.Pair; -import org.apache.hadoop.fs.ozone.OzoneClientUtils; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -115,6 +114,7 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.S3GAction; import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneClientUtils; import org.apache.hadoop.ozone.client.OzoneKey; import org.apache.hadoop.ozone.client.OzoneKeyDetails; import org.apache.hadoop.ozone.client.OzoneMultipartUploadPartListParts;