diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java new file mode 100644 index 000000000000..546b1762ada1 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java @@ -0,0 +1,230 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om; + +import org.apache.commons.lang3.RandomStringUtils; +import org.apache.commons.lang3.RandomUtils; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.StorageType; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.TestDataUtil; +import org.apache.hadoop.ozone.client.BucketArgs; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.client.io.OzoneOutputStream; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo; +import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo; + +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.Timeout; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Arrays; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.UUID; + +/** + * Test verifies object store with OZONE_OM_ENABLE_FILESYSTEM_PATHS enabled. + */ +public class TestObjectStoreWithLegacyFS { + + @Rule + public Timeout timeout = Timeout.seconds(200); + + private static MiniOzoneCluster cluster = null; + + private String volumeName; + + private String bucketName; + + private OzoneVolume volume; + + private static final Logger LOG = + LoggerFactory.getLogger(TestObjectStoreWithLegacyFS.class); + + @BeforeClass + public static void initClass() throws Exception { + OzoneConfiguration conf = new OzoneConfiguration(); + + conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true); + conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT, + BucketLayout.LEGACY.name()); + cluster = MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(3) + .build(); + cluster.waitForClusterToBeReady(); + } + + /** + * Shutdown MiniOzoneCluster. + */ + @AfterClass + public static void shutdown() { + if (cluster != null) { + cluster.shutdown(); + } + } + + @Before + public void init() throws Exception { + volumeName = RandomStringUtils.randomAlphabetic(10).toLowerCase(); + bucketName = RandomStringUtils.randomAlphabetic(10).toLowerCase(); + + // create a volume and a bucket to be used by OzoneFileSystem + TestDataUtil.createVolumeAndBucket(cluster, volumeName, bucketName, + BucketLayout.OBJECT_STORE); + volume = + cluster.getRpcClient().getObjectStore().getVolume(volumeName); + } + + /** + * Test verifies that OBS bucket keys should create flat key-value + * structure and intermediate directories shouldn't be created even + * if the OZONE_OM_ENABLE_FILESYSTEM_PATHS flag is TRUE. + */ + @Test + public void testFlatKeyStructureWithOBS() throws Exception { + OzoneBucket ozoneBucket = volume.getBucket(bucketName); + OzoneOutputStream stream = ozoneBucket + .createKey("dir1/dir2/dir3/key-1", 0); + stream.close(); + Table keyTable = + cluster.getOzoneManager().getMetadataManager() + .getKeyTable(BucketLayout.OBJECT_STORE); + + TableIterator> + iterator = keyTable.iterator(); + + String seekKey = "dir"; + String dbKey = cluster.getOzoneManager().getMetadataManager() + .getOzoneKey(volumeName, bucketName, seekKey); + iterator.seek(dbKey); + + int countKeys = 0; + while (iterator.hasNext()) { + Table.KeyValue keyValue = iterator.next(); + if (!keyValue.getKey().startsWith(dbKey)) { + break; + } + countKeys++; + Assert.assertTrue(keyValue.getKey().endsWith("dir1/dir2/dir3/key-1")); + } + Assert.assertEquals(1, countKeys); + } + + @Test + public void testMultiPartCompleteUpload() throws Exception { + // Test-1: Upload MPU to an OBS layout with Directory Exists + String legacyBuckName = UUID.randomUUID().toString(); + BucketArgs.Builder builder = BucketArgs.newBuilder(); + builder.setStorageType(StorageType.DISK); + builder.setBucketLayout(BucketLayout.OBJECT_STORE); + BucketArgs omBucketArgs = builder.build(); + volume.createBucket(legacyBuckName, omBucketArgs); + OzoneBucket bucket = volume.getBucket(legacyBuckName); + + String keyName = "abc/def/mpu-key1"; + + OmMultipartUploadCompleteInfo + omMultipartUploadCompleteInfo = + uploadMPUWithDirectoryExists(bucket, keyName); + // successfully uploaded MPU key + Assert.assertNotNull(omMultipartUploadCompleteInfo); + + // Test-2: Upload MPU to an LEGACY layout with Directory Exists + legacyBuckName = UUID.randomUUID().toString(); + builder = BucketArgs.newBuilder(); + builder.setStorageType(StorageType.DISK); + builder.setBucketLayout(BucketLayout.LEGACY); + omBucketArgs = builder.build(); + volume.createBucket(legacyBuckName, omBucketArgs); + bucket = volume.getBucket(legacyBuckName); + + try { + uploadMPUWithDirectoryExists(bucket, keyName); + Assert.fail("Must throw error as there is " + + "already directory in the given path"); + } catch (OMException ome) { + Assert.assertEquals(OMException.ResultCodes.NOT_A_FILE, ome.getResult()); + } + } + + private OmMultipartUploadCompleteInfo uploadMPUWithDirectoryExists( + OzoneBucket bucket, String keyName) throws IOException { + OmMultipartInfo omMultipartInfo = bucket.initiateMultipartUpload(keyName, + RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)); + + Assert.assertNotNull(omMultipartInfo.getUploadID()); + + String uploadID = omMultipartInfo.getUploadID(); + + // upload part 1. + byte[] data = generateData(128, (byte) RandomUtils.nextLong()); + OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, + data.length, 1, uploadID); + ozoneOutputStream.write(data, 0, data.length); + ozoneOutputStream.close(); + + if (bucket.getBucketLayout() == BucketLayout.OBJECT_STORE) { + // Create a path with trailing slash, in LEGACY this represents a dir + OzoneOutputStream stream = bucket.createKey( + omMultipartInfo.getKeyName() + OzoneConsts.OZONE_URI_DELIMITER, 0); + stream.close(); + } else if (bucket.getBucketLayout() == BucketLayout.LEGACY) { + // Create an intermediate path with trailing slash, + // in LEGACY this represents a directory + OzoneOutputStream stream = bucket.createKey( + keyName + OzoneConsts.OZONE_URI_DELIMITER + "newKey-1", 0); + stream.close(); + } + + OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = + ozoneOutputStream.getCommitUploadPartInfo(); + + Map partsMap = new LinkedHashMap<>(); + partsMap.put(1, omMultipartCommitUploadPartInfo.getPartName()); + OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = + bucket.completeMultipartUpload(keyName, + uploadID, partsMap); + return omMultipartUploadCompleteInfo; + } + + private byte[] generateData(int size, byte val) { + byte[] chars = new byte[size]; + Arrays.fill(chars, val); + return chars; + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java index ab903416e22d..d3890a8a9fa7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java @@ -180,7 +180,12 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName); // Check for directory exists with same name, if it exists throw error. - if (ozoneManager.getEnableFileSystemPaths()) { + if (LOG.isDebugEnabled()) { + LOG.debug("BucketName: {}, BucketLayout: {}", + omBucketInfo.getBucketName(), omBucketInfo.getBucketLayout()); + } + if (omBucketInfo.getBucketLayout() + .shouldNormalizePaths(ozoneManager.getEnableFileSystemPaths())) { if (checkDirectoryAlreadyExists(volumeName, bucketName, keyName, omMetadataManager)) { throw new OMException("Can not create file: " + keyName + diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java index 99986a7410cd..8d51e90efe8e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java @@ -237,7 +237,12 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, // If FILE_EXISTS we just override like how we used to do for Key Create. List< OzoneAcl > inheritAcls; - if (ozoneManager.getEnableFileSystemPaths()) { + if (LOG.isDebugEnabled()) { + LOG.debug("BucketName: {}, BucketLayout: {}", + bucketInfo.getBucketName(), bucketInfo.getBucketLayout()); + } + if (bucketInfo.getBucketLayout() + .shouldNormalizePaths(ozoneManager.getEnableFileSystemPaths())) { OMFileRequest.OMPathInfo pathInfo = OMFileRequest.verifyFilesInPath(omMetadataManager, volumeName, bucketName, keyName, Paths.get(keyName)); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java index b3f501d807d2..d51ed15be3b0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java @@ -166,8 +166,9 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OmMultipartKeyInfo multipartKeyInfo = omMetadataManager .getMultipartInfoTable().get(multipartKey); - // Check for directory exists with same name, if it exists throw error. - checkDirectoryAlreadyExists(ozoneManager, volumeName, bucketName, keyName, + // Check for directory exists with same name for the LEGACY_FS, + // if it exists throw error. + checkDirectoryAlreadyExists(ozoneManager, omBucketInfo, keyName, omMetadataManager); if (multipartKeyInfo == null) { @@ -309,11 +310,16 @@ protected OMClientResponse getOmClientResponse(String multipartKey, } protected void checkDirectoryAlreadyExists(OzoneManager ozoneManager, - String volumeName, String bucketName, String keyName, + OmBucketInfo omBucketInfo, String keyName, OMMetadataManager omMetadataManager) throws IOException { - if (ozoneManager.getEnableFileSystemPaths()) { - if (checkDirectoryAlreadyExists(volumeName, bucketName, keyName, - omMetadataManager)) { + if (LOG.isDebugEnabled()) { + LOG.debug("BucketName: {}, BucketLayout: {}", + omBucketInfo.getBucketName(), omBucketInfo.getBucketLayout()); + } + if (omBucketInfo.getBucketLayout() + .shouldNormalizePaths(ozoneManager.getEnableFileSystemPaths())) { + if (checkDirectoryAlreadyExists(omBucketInfo.getVolumeName(), + omBucketInfo.getBucketName(), keyName, omMetadataManager)) { throw new OMException("Can not Complete MPU for file: " + keyName + " as there is already directory in the given path", NOT_A_FILE); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestWithFSO.java index 0e25276a6b63..dfcdda06bffd 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestWithFSO.java @@ -59,13 +59,14 @@ public S3MultipartUploadCompleteRequestWithFSO(OMRequest omRequest, @Override protected void checkDirectoryAlreadyExists(OzoneManager ozoneManager, - String volumeName, String bucketName, String keyName, + OmBucketInfo omBucketInfo, String keyName, OMMetadataManager omMetadataManager) throws IOException { Path keyPath = Paths.get(keyName); OMFileRequest.OMPathInfoWithFSO pathInfoFSO = OMFileRequest.verifyDirectoryKeysInPath(omMetadataManager, - volumeName, bucketName, keyName, keyPath); + omBucketInfo.getVolumeName(), omBucketInfo.getBucketName(), + keyName, keyPath); // Check for directory exists with same name, if it exists throw error. if (pathInfoFSO.getDirectoryResult() == DIRECTORY_EXISTS) { throw new OMException("Can not Complete MPU for file: " + keyName +