From 615ce2a3fa543298f5cbda2fe2c4e59712f9a1ea Mon Sep 17 00:00:00 2001
From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com>
Date: Fri, 12 Apr 2024 12:40:35 +0200
Subject: [PATCH 1/8] HDDS-8450. Dedicated acceptance test suite for s3a
(#6458)
(cherry picked from commit a523fd9d234752e3b758222ca19a89b085550003)
---
hadoop-ozone/dev-support/checks/_lib.sh | 15 +++
.../dev-support/checks/_mvn_unit_report.sh | 4 +-
hadoop-ozone/dev-support/checks/acceptance.sh | 52 ++++++--
.../dist/src/main/compose/common/s3a-test.sh | 112 ++++++++++++++++++
.../dist/src/main/compose/ozone/test-s3a.sh | 25 ++++
.../main/compose/ozonesecure-ha/test-s3a.sh | 27 +++++
.../dist/src/main/compose/test-all.sh | 7 +-
hadoop-ozone/dist/src/main/compose/testlib.sh | 4 +-
8 files changed, 230 insertions(+), 16 deletions(-)
create mode 100644 hadoop-ozone/dist/src/main/compose/common/s3a-test.sh
create mode 100644 hadoop-ozone/dist/src/main/compose/ozone/test-s3a.sh
create mode 100644 hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test-s3a.sh
diff --git a/hadoop-ozone/dev-support/checks/_lib.sh b/hadoop-ozone/dev-support/checks/_lib.sh
index b81acf989930..134c8f53c6e8 100644
--- a/hadoop-ozone/dev-support/checks/_lib.sh
+++ b/hadoop-ozone/dev-support/checks/_lib.sh
@@ -149,3 +149,18 @@ install_spotbugs() {
_install_spotbugs() {
curl -LSs https://repo.maven.apache.org/maven2/com/github/spotbugs/spotbugs/3.1.12/spotbugs-3.1.12.tgz | tar -xz -f -
}
+
+download_hadoop_aws() {
+ local dir="$1"
+
+ if [[ -z ${dir} ]]; then
+ echo "Required argument: target directory for Hadoop AWS sources" >&2
+ return 1
+ fi
+
+ if [[ ! -e "${dir}" ]] || [[ ! -d "${dir}"/src/test/resources ]]; then
+ mkdir -p "${dir}"
+ [[ -f "${dir}.tar.gz" ]] || curl -LSs -o "${dir}.tar.gz" https://archive.apache.org/dist/hadoop/common/hadoop-${HADOOP_VERSION}/hadoop-${HADOOP_VERSION}-src.tar.gz
+ tar -x -z -C "${dir}" --strip-components=3 -f "${dir}.tar.gz" --wildcards 'hadoop-*-src/hadoop-tools/hadoop-aws' || return 1
+ fi
+}
diff --git a/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh b/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh
index 5139dddcd8c1..e6059cd82566 100755
--- a/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh
+++ b/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh
@@ -81,8 +81,8 @@ for failed_test in $(< ${REPORT_DIR}/summary.txt); do
\( -name "${failed_test}.txt" -or -name "${failed_test}-output.txt" -or -name "TEST-${failed_test}.xml" \)); do
dir=$(dirname "${file}")
dest_dir=$(_realpath --relative-to="${PWD}" "${dir}/../..") || continue
- mkdir -p "${REPORT_DIR}/${dest_dir}"
- mv "${file}" "${REPORT_DIR}/${dest_dir}"/
+ mkdir -pv "${REPORT_DIR}/${dest_dir}"
+ mv -v "${file}" "${REPORT_DIR}/${dest_dir}"/
done
done
diff --git a/hadoop-ozone/dev-support/checks/acceptance.sh b/hadoop-ozone/dev-support/checks/acceptance.sh
index 0489fa24384a..5be3f7b5879a 100755
--- a/hadoop-ozone/dev-support/checks/acceptance.sh
+++ b/hadoop-ozone/dev-support/checks/acceptance.sh
@@ -19,15 +19,20 @@ set -u -o pipefail
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
cd "$DIR/../../.." || exit 1
-source "${DIR}/_lib.sh"
+OZONE_ROOT=$(pwd -P)
+
+: ${HADOOP_AWS_DIR:=""}
+: ${OZONE_ACCEPTANCE_SUITE:=""}
+: ${OZONE_TEST_SELECTOR:=""}
+: ${OZONE_ACCEPTANCE_TEST_TYPE:="robot"}
+: ${OZONE_WITH_COVERAGE:="false"}
-install_virtualenv
-install_robot
+source "${DIR}/_lib.sh"
-REPORT_DIR=${OUTPUT_DIR:-"$DIR/../../../target/acceptance"}
+REPORT_DIR=${OUTPUT_DIR:-"${OZONE_ROOT}/target/acceptance"}
OZONE_VERSION=$(mvn help:evaluate -Dexpression=ozone.version -q -DforceStdout)
-DIST_DIR="$DIR/../../dist/target/ozone-$OZONE_VERSION"
+DIST_DIR="${OZONE_ROOT}/hadoop-ozone/dist/target/ozone-$OZONE_VERSION"
if [ ! -d "$DIST_DIR" ]; then
echo "Distribution dir is missing. Doing a full build"
@@ -36,15 +41,42 @@ fi
mkdir -p "$REPORT_DIR"
-export OZONE_ACCEPTANCE_SUITE
+if [[ "${OZONE_ACCEPTANCE_SUITE}" == "s3a" ]]; then
+ OZONE_ACCEPTANCE_TEST_TYPE="maven"
+
+ if [[ -z "${HADOOP_AWS_DIR}" ]]; then
+ HADOOP_VERSION=$(mvn help:evaluate -Dexpression=hadoop.version -q -DforceStdout)
+ export HADOOP_AWS_DIR=${OZONE_ROOT}/target/hadoop-src
+ fi
+
+ download_hadoop_aws "${HADOOP_AWS_DIR}"
+fi
+
+if [[ "${OZONE_ACCEPTANCE_TEST_TYPE}" == "robot" ]]; then
+ install_virtualenv
+ install_robot
+fi
+
+export OZONE_ACCEPTANCE_SUITE OZONE_ACCEPTANCE_TEST_TYPE
cd "$DIST_DIR/compose" || exit 1
./test-all.sh 2>&1 | tee "${REPORT_DIR}/output.log"
RES=$?
-cp -rv result/* "$REPORT_DIR/"
-cp "$REPORT_DIR/log.html" "$REPORT_DIR/summary.html"
-find "$REPORT_DIR" -type f -empty -print0 | xargs -0 rm -v
-grep -A1 FAIL "${REPORT_DIR}/output.log" | grep -v '^Output' > "${REPORT_DIR}/summary.txt"
+if [[ "${OZONE_ACCEPTANCE_TEST_TYPE}" == "maven" ]]; then
+ pushd result
+ source "${DIR}/_mvn_unit_report.sh"
+ find . -name junit -print0 | xargs -r -0 rm -frv
+ cp -rv * "${REPORT_DIR}"/
+ popd
+else
+ cp -rv result/* "$REPORT_DIR/"
+ if [[ -f "${REPORT_DIR}/log.html" ]]; then
+ cp "$REPORT_DIR/log.html" "$REPORT_DIR/summary.html"
+ fi
+ grep -A1 FAIL "${REPORT_DIR}/output.log" | grep -v '^Output' > "${REPORT_DIR}/summary.txt"
+fi
+
+find "$REPORT_DIR" -type f -empty -not -name summary.txt -print0 | xargs -0 rm -v
exit $RES
diff --git a/hadoop-ozone/dist/src/main/compose/common/s3a-test.sh b/hadoop-ozone/dist/src/main/compose/common/s3a-test.sh
new file mode 100644
index 000000000000..85dbc5feced2
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/compose/common/s3a-test.sh
@@ -0,0 +1,112 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script runs S3A contract tests against various bucket types on
+# a Docker Compose-based Ozone cluster.
+# Requires HADOOP_AWS_DIR to point the directory containing hadoop-aws sources.
+
+if [[ -z ${HADOOP_AWS_DIR} ]] || [[ ! -e ${HADOOP_AWS_DIR} ]]; then
+ echo "Skipping S3A tests due to missing HADOOP_AWS_DIR (directory with hadoop-aws sources)" >&2
+ exit
+fi
+
+# shellcheck source=/dev/null
+source "$COMPOSE_DIR/../testlib.sh"
+
+## @description Run S3A contract tests against Ozone.
+## @param Ozone S3 bucket
+execute_s3a_tests() {
+ local bucket="$1"
+
+ pushd "${HADOOP_AWS_DIR}"
+
+ # S3A contract tests are enabled by presence of `auth-keys.xml`.
+ # https://hadoop.apache.org/docs/r3.3.6/hadoop-aws/tools/hadoop-aws/testing.html#Setting_up_the_tests
+ cat > src/test/resources/auth-keys.xml <<-EOF
+
+
+
+ fs.s3a.endpoint
+ http://localhost:9878
+
+
+
+ test.fs.s3a.endpoint
+ http://localhost:9878
+
+
+
+ fs.contract.test.fs.s3a
+ s3a://${bucket}/
+
+
+
+ test.fs.s3a.name
+ s3a://${bucket}/
+
+
+
+ test.fs.s3a.sts.enabled
+ false
+
+
+
+ fs.s3a.path.style.access
+ true
+
+
+
+ fs.s3a.directory.marker.retention
+ keep
+
+
+
+EOF
+
+ # Some tests are skipped due to known issues.
+ # - ITestS3AContractDistCp: HDDS-10616
+ # - ITestS3AContractEtag, ITestS3AContractRename: HDDS-10615
+ # - ITestS3AContractGetFileStatusV1List: HDDS-10617
+ # - ITestS3AContractMkdir: HDDS-10572
+ mvn -B -V --fail-never --no-transfer-progress \
+ -Dtest='ITestS3AContract*, !ITestS3AContractDistCp, !ITestS3AContractEtag, !ITestS3AContractGetFileStatusV1List, !ITestS3AContractMkdir, !ITestS3AContractRename' \
+ clean test
+
+ local target="${RESULT_DIR}/junit/${bucket}/target"
+ mkdir -p "${target}"
+ mv -iv target/surefire-reports "${target}"/
+ popd
+}
+
+start_docker_env
+
+if [[ ${SECURITY_ENABLED} == "true" ]]; then
+ execute_command_in_container s3g kinit -kt /etc/security/keytabs/testuser.keytab "testuser/s3g@EXAMPLE.COM"
+ access=$(execute_command_in_container s3g ozone s3 getsecret -e)
+ eval "$access"
+else
+ export AWS_ACCESS_KEY_ID="s3a-contract"
+ export AWS_SECRET_ACCESS_KEY="unsecure"
+fi
+
+execute_command_in_container s3g ozone sh bucket create --layout OBJECT_STORE /s3v/obs-bucket
+execute_command_in_container s3g ozone sh bucket create --layout LEGACY /s3v/leg-bucket
+execute_command_in_container s3g ozone sh bucket create --layout FILE_SYSTEM_OPTIMIZED /s3v/fso-bucket
+
+for bucket in obs-bucket leg-bucket fso-bucket; do
+ execute_s3a_tests "$bucket"
+done
diff --git a/hadoop-ozone/dist/src/main/compose/ozone/test-s3a.sh b/hadoop-ozone/dist/src/main/compose/ozone/test-s3a.sh
new file mode 100644
index 000000000000..c277e71a4bf0
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/compose/ozone/test-s3a.sh
@@ -0,0 +1,25 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#suite:s3a
+
+COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
+export COMPOSE_DIR
+
+export SECURITY_ENABLED=false
+
+source "$COMPOSE_DIR/../common/s3a-test.sh"
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test-s3a.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test-s3a.sh
new file mode 100644
index 000000000000..78b8b51d9d81
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test-s3a.sh
@@ -0,0 +1,27 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#suite:s3a
+
+COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
+export COMPOSE_DIR
+
+export SECURITY_ENABLED=true
+export OM_SERVICE_ID="omservice"
+export SCM=scm1.org
+
+source "$COMPOSE_DIR/../common/s3a-test.sh"
diff --git a/hadoop-ozone/dist/src/main/compose/test-all.sh b/hadoop-ozone/dist/src/main/compose/test-all.sh
index a998690032a7..85294b6b7938 100755
--- a/hadoop-ozone/dist/src/main/compose/test-all.sh
+++ b/hadoop-ozone/dist/src/main/compose/test-all.sh
@@ -27,6 +27,7 @@ rm "$ALL_RESULT_DIR"/* || true
source "$SCRIPT_DIR"/testlib.sh
+: ${OZONE_ACCEPTANCE_TEST_TYPE:="robot"}
: ${OZONE_WITH_COVERAGE:="false"}
if [[ "${OZONE_WITH_COVERAGE}" == "true" ]]; then
@@ -46,7 +47,9 @@ if [[ "${OZONE_WITH_COVERAGE}" == "true" ]]; then
cp /tmp/jacoco-combined.exec "$SCRIPT_DIR"/result
fi
-generate_report "acceptance" "${ALL_RESULT_DIR}" "${XUNIT_RESULT_DIR}"
-
+if [[ "${OZONE_ACCEPTANCE_TEST_TYPE}" == "robot" ]]; then
+ # does not apply to JUnit tests run via Maven
+ generate_report "acceptance" "${ALL_RESULT_DIR}" "${XUNIT_RESULT_DIR}"
+fi
exit $RESULT
diff --git a/hadoop-ozone/dist/src/main/compose/testlib.sh b/hadoop-ozone/dist/src/main/compose/testlib.sh
index 505cb1ae77c9..11db3879bb16 100755
--- a/hadoop-ozone/dist/src/main/compose/testlib.sh
+++ b/hadoop-ozone/dist/src/main/compose/testlib.sh
@@ -429,12 +429,12 @@ copy_results() {
target_dir="${target_dir}/${test_script_name}"
fi
- if [[ -n "$(find "${result_dir}" -name "*.xml")" ]]; then
+ if command -v rebot > /dev/null 2>&1 && [[ -n "$(find "${result_dir}" -name "*.xml")" ]]; then
rebot --nostatusrc -N "${test_name}" -l NONE -r NONE -o "${all_result_dir}/${test_name}.xml" "${result_dir}"/*.xml \
&& rm -fv "${result_dir}"/*.xml "${result_dir}"/log.html "${result_dir}"/report.html
fi
- mkdir -p "${target_dir}"
+ mkdir -pv "${target_dir}"
mv -v "${result_dir}"/* "${target_dir}"/
}
From 9bb6575d24c42054aed7cb20a52afe0f5fc46b2d Mon Sep 17 00:00:00 2001
From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com>
Date: Thu, 28 Mar 2024 02:31:26 +0800
Subject: [PATCH 2/8] HDDS-10574. Improve TestObjectPut (#6426)
(cherry picked from commit e68183e169dc3b2dc19786894a8e976579c33b7e)
---
.../ozone/client/ClientProtocolStub.java | 2 +-
.../hadoop/ozone/client/OzoneBucketStub.java | 82 ++--
.../hadoop/ozone/client/OzoneVolumeStub.java | 9 +-
.../ozone/s3/endpoint/TestObjectPut.java | 368 +++++++-----------
4 files changed, 179 insertions(+), 282 deletions(-)
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java
index 8bf0a8821863..299653d0e7cf 100644
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java
@@ -491,7 +491,7 @@ public OzoneFileStatus getOzoneFileStatus(String volumeName,
@Override
public void createDirectory(String volumeName, String bucketName,
String keyName) throws IOException {
-
+ getBucket(volumeName, bucketName).createDirectory(keyName);
}
@Override
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java
index 1e28c310b852..b5f37aaef3e0 100644
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java
@@ -37,12 +37,11 @@
import javax.xml.bind.DatatypeConverter;
import org.apache.commons.codec.digest.DigestUtils;
-import org.apache.hadoop.hdds.client.DefaultReplicationConfig;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.protocol.StorageType;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.storage.ByteBufferStreamOutput;
import org.apache.hadoop.ozone.OzoneAcl;
@@ -56,6 +55,8 @@
import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo;
import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import static org.apache.hadoop.ozone.OzoneConsts.ETAG;
import static org.apache.hadoop.ozone.OzoneConsts.MD5_HASH;
@@ -64,7 +65,9 @@
/**
* In-memory ozone bucket for testing.
*/
-public class OzoneBucketStub extends OzoneBucket {
+public final class OzoneBucketStub extends OzoneBucket {
+
+ private static final Logger LOG = LoggerFactory.getLogger(OzoneBucketStub.class);
private Map keyDetails = new HashMap<>();
@@ -81,7 +84,7 @@ public static Builder newBuilder() {
return new Builder();
}
- public OzoneBucketStub(Builder b) {
+ private OzoneBucketStub(Builder b) {
super(b);
this.replicationConfig = super.getReplicationConfig();
}
@@ -94,43 +97,6 @@ public static final class Builder extends OzoneBucket.Builder {
private Builder() {
}
- @Override
- public Builder setVolumeName(String volumeName) {
- super.setVolumeName(volumeName);
- return this;
- }
-
- @Override
- public Builder setName(String name) {
- super.setName(name);
- return this;
- }
-
- @Override
- public Builder setDefaultReplicationConfig(
- DefaultReplicationConfig defaultReplicationConfig) {
- super.setDefaultReplicationConfig(defaultReplicationConfig);
- return this;
- }
-
- @Override
- public Builder setStorageType(StorageType storageType) {
- super.setStorageType(storageType);
- return this;
- }
-
- @Override
- public Builder setVersioning(Boolean versioning) {
- super.setVersioning(versioning);
- return this;
- }
-
- @Override
- public Builder setCreationTime(long creationTime) {
- super.setCreationTime(creationTime);
- return this;
- }
-
@Override
public OzoneBucketStub build() {
return new OzoneBucketStub(this);
@@ -150,31 +116,16 @@ public OzoneOutputStream createKey(String key, long size,
ReplicationFactor factor,
Map metadata)
throws IOException {
- ByteArrayOutputStream byteArrayOutputStream =
- new ByteArrayOutputStream((int) size) {
- @Override
- public void close() throws IOException {
- keyContents.put(key, toByteArray());
- keyDetails.put(key, new OzoneKeyDetails(
- getVolumeName(),
- getName(),
- key,
- size,
- System.currentTimeMillis(),
- System.currentTimeMillis(),
- new ArrayList<>(), replicationConfig, metadata, null,
- () -> readKey(key), true
- ));
- super.close();
- }
- };
- return new OzoneOutputStream(byteArrayOutputStream, null);
+ ReplicationConfig replication = ReplicationConfig.fromTypeAndFactor(type, factor);
+ return createKey(key, size, replication, metadata);
}
@Override
public OzoneOutputStream createKey(String key, long size,
ReplicationConfig rConfig, Map metadata)
throws IOException {
+ assertDoesNotExist(key + "/");
+
final ReplicationConfig repConfig;
if (rConfig == null) {
repConfig = getReplicationConfig();
@@ -209,6 +160,8 @@ public OzoneDataStreamOutput createStreamKey(String key, long size,
ReplicationConfig rConfig,
Map keyMetadata)
throws IOException {
+ assertDoesNotExist(key + "/");
+
ByteBufferStreamOutput byteBufferStreamOutput =
new KeyMetadataAwareByteBufferStreamOutput(keyMetadata) {
@@ -629,6 +582,9 @@ public ReplicationConfig getReplicationConfig() {
@Override
public void createDirectory(String keyName) throws IOException {
+ assertDoesNotExist(StringUtils.stripEnd(keyName, "/"));
+
+ LOG.info("createDirectory({})", keyName);
keyDetails.put(keyName, new OzoneKeyDetails(
getVolumeName(),
getName(),
@@ -640,6 +596,12 @@ public void createDirectory(String keyName) throws IOException {
() -> readKey(keyName), false));
}
+ private void assertDoesNotExist(String keyName) throws OMException {
+ if (keyDetails.get(keyName) != null) {
+ throw new OMException("already exists", ResultCodes.FILE_ALREADY_EXISTS);
+ }
+ }
+
/**
* ByteArrayOutputStream stub with metadata.
*/
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneVolumeStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneVolumeStub.java
index 9fab5a181b56..4ce18b41f1cf 100644
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneVolumeStub.java
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneVolumeStub.java
@@ -38,17 +38,17 @@
/**
* Ozone volume with in-memory state for testing.
*/
-public class OzoneVolumeStub extends OzoneVolume {
+public final class OzoneVolumeStub extends OzoneVolume {
- private Map buckets = new HashMap<>();
+ private final Map buckets = new HashMap<>();
- private ArrayList aclList = new ArrayList<>();
+ private final ArrayList aclList = new ArrayList<>();
public static Builder newBuilder() {
return new Builder();
}
- public OzoneVolumeStub(Builder b) {
+ private OzoneVolumeStub(Builder b) {
super(b);
}
@@ -124,6 +124,7 @@ public void createBucket(String bucketName, BucketArgs bucketArgs) {
.setDefaultReplicationConfig(new DefaultReplicationConfig(
RatisReplicationConfig.getInstance(
HddsProtos.ReplicationFactor.THREE)))
+ .setBucketLayout(bucketArgs.getBucketLayout())
.setStorageType(bucketArgs.getStorageType())
.setVersioning(bucketArgs.getVersioning())
.setCreationTime(Time.now())
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java
index b8c3a1c805ba..bdb657cf3ce2 100644
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java
@@ -23,6 +23,7 @@
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
+import java.util.stream.Stream;
import java.io.OutputStream;
import java.security.MessageDigest;
import javax.ws.rs.core.HttpHeaders;
@@ -30,27 +31,30 @@
import javax.ws.rs.core.MultivaluedMap;
import javax.ws.rs.core.Response;
import org.apache.commons.io.IOUtils;
-import org.apache.commons.lang3.StringUtils;
+import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
-import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.hdds.client.RatisReplicationConfig;
+import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.client.ObjectStore;
+import org.apache.hadoop.ozone.client.BucketArgs;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneClientStub;
import org.apache.hadoop.ozone.client.OzoneKeyDetails;
import org.apache.hadoop.ozone.client.OzoneVolume;
import org.apache.hadoop.ozone.client.io.OzoneInputStream;
-import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.s3.exception.OS3Exception;
import org.apache.hadoop.ozone.s3.exception.S3ErrorTable;
import org.apache.http.HttpStatus;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
-import org.mockito.Mockito;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.Arguments;
+import org.junit.jupiter.params.provider.MethodSource;
import org.mockito.MockedStatic;
import static java.nio.charset.StandardCharsets.UTF_8;
@@ -60,6 +64,7 @@
import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER;
import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER;
import static org.apache.hadoop.ozone.s3.util.S3Utils.urlEncode;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotNull;
@@ -67,8 +72,6 @@
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.eq;
-import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.mockStatic;
import static org.mockito.Mockito.spy;
@@ -79,110 +82,102 @@
/**
* Test put object.
*/
-public class TestObjectPut {
- public static final String CONTENT = "0123456789";
- private String bucketName = "b1";
- private String keyName = "key=value/1";
- private String destBucket = "b2";
- private String destkey = "key=value/2";
- private String nonexist = "nonexist";
+class TestObjectPut {
+ private static final String CONTENT = "0123456789";
+ private static final String FSO_BUCKET_NAME = "fso-bucket";
+ private static final String BUCKET_NAME = "b1";
+ private static final String KEY_NAME = "key=value/1";
+ private static final String DEST_BUCKET_NAME = "b2";
+ private static final String DEST_KEY = "key=value/2";
+ private static final String NO_SUCH_BUCKET = "nonexist";
+
private OzoneClient clientStub;
private ObjectEndpoint objectEndpoint;
+ private HttpHeaders headers;
+ private OzoneBucket bucket;
+ private OzoneBucket fsoBucket;
+
+ static Stream argumentsForPutObject() {
+ ReplicationConfig ratis3 = RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE);
+ ECReplicationConfig ec = new ECReplicationConfig("rs-3-2-1024K");
+ return Stream.of(
+ Arguments.of(10, ratis3),
+ Arguments.of(10, ec)
+ );
+ }
@BeforeEach
- public void setup() throws IOException {
+ void setup() throws IOException {
+ OzoneConfiguration config = new OzoneConfiguration();
+
//Create client stub and object store stub.
clientStub = new OzoneClientStub();
// Create bucket
- clientStub.getObjectStore().createS3Bucket(bucketName);
- clientStub.getObjectStore().createS3Bucket(destBucket);
+ clientStub.getObjectStore().createS3Bucket(BUCKET_NAME);
+ bucket = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME);
+ clientStub.getObjectStore().createS3Bucket(DEST_BUCKET_NAME);
// Create PutObject and setClient to OzoneClientStub
objectEndpoint = spy(new ObjectEndpoint());
objectEndpoint.setClient(clientStub);
- objectEndpoint.setOzoneConfiguration(new OzoneConfiguration());
+ objectEndpoint.setOzoneConfiguration(config);
+
+ headers = mock(HttpHeaders.class);
+ objectEndpoint.setHeaders(headers);
+
+ String volumeName = config.get(OzoneConfigKeys.OZONE_S3_VOLUME_NAME,
+ OzoneConfigKeys.OZONE_S3_VOLUME_NAME_DEFAULT);
+ OzoneVolume volume = clientStub.getObjectStore().getVolume(volumeName);
+ BucketArgs fsoBucketArgs = BucketArgs.newBuilder()
+ .setBucketLayout(BucketLayout.FILE_SYSTEM_OPTIMIZED)
+ .build();
+ volume.createBucket(FSO_BUCKET_NAME, fsoBucketArgs);
+ fsoBucket = volume.getBucket(FSO_BUCKET_NAME);
}
- @Test
- public void testPutObject() throws IOException, OS3Exception {
+ @ParameterizedTest
+ @MethodSource("argumentsForPutObject")
+ void testPutObject(int length, ReplicationConfig replication) throws IOException, OS3Exception {
//GIVEN
- HttpHeaders headers = Mockito.mock(HttpHeaders.class);
- ByteArrayInputStream body =
- new ByteArrayInputStream(CONTENT.getBytes(UTF_8));
- objectEndpoint.setHeaders(headers);
+ final String content = RandomStringUtils.randomAlphanumeric(length);
+ ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8));
+ bucket.setReplicationConfig(replication);
//WHEN
- Response response = objectEndpoint.put(bucketName, keyName, CONTENT
- .length(), 1, null, body);
-
+ Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, length, 1, null, body);
//THEN
- OzoneInputStream ozoneInputStream =
- clientStub.getObjectStore().getS3Bucket(bucketName)
- .readKey(keyName);
- String keyContent =
- IOUtils.toString(ozoneInputStream, UTF_8);
- OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(bucketName).getKey(keyName);
-
assertEquals(200, response.getStatus());
- assertEquals(CONTENT, keyContent);
- assertNotNull(keyDetails.getMetadata());
- assertTrue(StringUtils.isNotEmpty(keyDetails.getMetadata().get(OzoneConsts.ETAG)));
- }
- @Test
- public void testPutObjectWithECReplicationConfig()
- throws IOException, OS3Exception {
- //GIVEN
- HttpHeaders headers = Mockito.mock(HttpHeaders.class);
- ByteArrayInputStream body =
- new ByteArrayInputStream(CONTENT.getBytes(UTF_8));
- objectEndpoint.setHeaders(headers);
- ECReplicationConfig ecReplicationConfig =
- new ECReplicationConfig("rs-3-2-1024K");
- clientStub.getObjectStore().getS3Bucket(bucketName)
- .setReplicationConfig(ecReplicationConfig);
- Response response = objectEndpoint.put(bucketName, keyName, CONTENT
- .length(), 1, null, body);
-
- assertEquals(ecReplicationConfig,
- clientStub.getObjectStore().getS3Bucket(bucketName).getKey(keyName)
- .getReplicationConfig());
- OzoneInputStream ozoneInputStream =
- clientStub.getObjectStore().getS3Bucket(bucketName)
- .readKey(keyName);
- String keyContent =
- IOUtils.toString(ozoneInputStream, UTF_8);
- OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(bucketName).getKey(keyName);
+ String keyContent;
+ try (InputStream input = bucket.readKey(KEY_NAME)) {
+ keyContent = IOUtils.toString(input, UTF_8);
+ }
+ assertEquals(content, keyContent);
- assertEquals(200, response.getStatus());
- assertEquals(CONTENT, keyContent);
+ OzoneKeyDetails keyDetails = bucket.getKey(KEY_NAME);
+ assertEquals(replication, keyDetails.getReplicationConfig());
assertNotNull(keyDetails.getMetadata());
- assertTrue(StringUtils.isNotEmpty(keyDetails.getMetadata().get(OzoneConsts.ETAG)));
+ assertThat(keyDetails.getMetadata().get(OzoneConsts.ETAG)).isNotEmpty();
}
@Test
- public void testPutObjectContentLength() throws IOException, OS3Exception {
+ void testPutObjectContentLength() throws IOException, OS3Exception {
// The contentLength specified when creating the Key should be the same as
// the Content-Length, the key Commit will compare the Content-Length with
// the actual length of the data written.
- HttpHeaders headers = Mockito.mock(HttpHeaders.class);
ByteArrayInputStream body =
new ByteArrayInputStream(CONTENT.getBytes(UTF_8));
- objectEndpoint.setHeaders(headers);
long dataSize = CONTENT.length();
- objectEndpoint.put(bucketName, keyName, dataSize, 0, null, body);
- assertEquals(dataSize, getKeyDataSize(keyName));
+ objectEndpoint.put(BUCKET_NAME, KEY_NAME, dataSize, 0, null, body);
+ assertEquals(dataSize, getKeyDataSize());
}
@Test
- public void testPutObjectContentLengthForStreaming()
+ void testPutObjectContentLengthForStreaming()
throws IOException, OS3Exception {
- HttpHeaders headers = Mockito.mock(HttpHeaders.class);
- objectEndpoint.setHeaders(headers);
-
String chunkedContent = "0a;chunk-signature=signature\r\n"
+ "1234567890\r\n"
+ "05;chunk-signature=signature\r\n"
@@ -193,22 +188,19 @@ public void testPutObjectContentLengthForStreaming()
when(headers.getHeaderString(DECODED_CONTENT_LENGTH_HEADER))
.thenReturn("15");
- objectEndpoint.put(bucketName, keyName, chunkedContent.length(), 0, null,
+ objectEndpoint.put(BUCKET_NAME, KEY_NAME, chunkedContent.length(), 0, null,
new ByteArrayInputStream(chunkedContent.getBytes(UTF_8)));
- assertEquals(15, getKeyDataSize(keyName));
+ assertEquals(15, getKeyDataSize());
}
- private long getKeyDataSize(String key) throws IOException {
- return clientStub.getObjectStore().getS3Bucket(bucketName)
- .getKey(key).getDataSize();
+ private long getKeyDataSize() throws IOException {
+ return clientStub.getObjectStore().getS3Bucket(BUCKET_NAME)
+ .getKey(KEY_NAME).getDataSize();
}
@Test
- public void testPutObjectWithSignedChunks() throws IOException, OS3Exception {
+ void testPutObjectWithSignedChunks() throws IOException, OS3Exception {
//GIVEN
- HttpHeaders headers = Mockito.mock(HttpHeaders.class);
- objectEndpoint.setHeaders(headers);
-
String chunkedContent = "0a;chunk-signature=signature\r\n"
+ "1234567890\r\n"
+ "05;chunk-signature=signature\r\n"
@@ -220,21 +212,21 @@ public void testPutObjectWithSignedChunks() throws IOException, OS3Exception {
.thenReturn("15");
//WHEN
- Response response = objectEndpoint.put(bucketName, keyName,
+ Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME,
chunkedContent.length(), 1, null,
new ByteArrayInputStream(chunkedContent.getBytes(UTF_8)));
//THEN
OzoneInputStream ozoneInputStream =
- clientStub.getObjectStore().getS3Bucket(bucketName)
- .readKey(keyName);
+ clientStub.getObjectStore().getS3Bucket(BUCKET_NAME)
+ .readKey(KEY_NAME);
String keyContent = IOUtils.toString(ozoneInputStream, UTF_8);
- OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(bucketName).getKey(keyName);
+ OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME).getKey(KEY_NAME);
assertEquals(200, response.getStatus());
assertEquals("1234567890abcde", keyContent);
assertNotNull(keyDetails.getMetadata());
- assertTrue(StringUtils.isNotEmpty(keyDetails.getMetadata().get(OzoneConsts.ETAG)));
+ assertThat(keyDetails.getMetadata().get(OzoneConsts.ETAG)).isNotEmpty();
}
@Test
@@ -246,12 +238,10 @@ public void testPutObjectMessageDigestResetDuringException() throws OS3Exception
.thenThrow(IOException.class);
when(objectEndpoint.getMessageDigestInstance()).thenReturn(messageDigest);
- HttpHeaders headers = mock(HttpHeaders.class);
ByteArrayInputStream body =
new ByteArrayInputStream(CONTENT.getBytes(UTF_8));
- objectEndpoint.setHeaders(headers);
try {
- objectEndpoint.put(bucketName, keyName, CONTENT
+ objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT
.length(), 1, null, body);
fail("Should throw IOException");
} catch (IOException ignored) {
@@ -263,13 +253,10 @@ public void testPutObjectMessageDigestResetDuringException() throws OS3Exception
}
@Test
- public void testCopyObject() throws IOException, OS3Exception {
+ void testCopyObject() throws IOException, OS3Exception {
// Put object in to source bucket
- HttpHeaders headers = Mockito.mock(HttpHeaders.class);
ByteArrayInputStream body =
new ByteArrayInputStream(CONTENT.getBytes(UTF_8));
- objectEndpoint.setHeaders(headers);
- keyName = "sourceKey";
// Add some custom metadata
MultivaluedMap metadataHeaders = new MultivaluedHashMap<>();
@@ -279,20 +266,20 @@ public void testCopyObject() throws IOException, OS3Exception {
// Add COPY metadata directive (default)
when(headers.getHeaderString(CUSTOM_METADATA_COPY_DIRECTIVE_HEADER)).thenReturn("COPY");
- Response response = objectEndpoint.put(bucketName, keyName,
+ Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME,
CONTENT.length(), 1, null, body);
OzoneInputStream ozoneInputStream = clientStub.getObjectStore()
- .getS3Bucket(bucketName)
- .readKey(keyName);
+ .getS3Bucket(BUCKET_NAME)
+ .readKey(KEY_NAME);
String keyContent = IOUtils.toString(ozoneInputStream, UTF_8);
- OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(bucketName).getKey(keyName);
+ OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME).getKey(KEY_NAME);
assertEquals(200, response.getStatus());
assertEquals(CONTENT, keyContent);
assertNotNull(keyDetails.getMetadata());
- assertTrue(StringUtils.isNotEmpty(keyDetails.getMetadata().get(OzoneConsts.ETAG)));
+ assertThat(keyDetails.getMetadata().get(OzoneConsts.ETAG)).isNotEmpty();
assertEquals("custom-value-1", keyDetails.getMetadata().get("custom-key-1"));
assertEquals("custom-value-2", keyDetails.getMetadata().get("custom-key-2"));
@@ -303,25 +290,25 @@ public void testCopyObject() throws IOException, OS3Exception {
// Add copy header, and then call put
when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn(
- bucketName + "/" + urlEncode(keyName));
+ BUCKET_NAME + "/" + urlEncode(KEY_NAME));
- response = objectEndpoint.put(destBucket, destkey, CONTENT.length(), 1,
+ response = objectEndpoint.put(DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1,
null, body);
// Check destination key and response
- ozoneInputStream = clientStub.getObjectStore().getS3Bucket(destBucket)
- .readKey(destkey);
+ ozoneInputStream = clientStub.getObjectStore().getS3Bucket(DEST_BUCKET_NAME)
+ .readKey(DEST_KEY);
keyContent = IOUtils.toString(ozoneInputStream, UTF_8);
OzoneKeyDetails sourceKeyDetails = clientStub.getObjectStore()
- .getS3Bucket(bucketName).getKey(keyName);
+ .getS3Bucket(BUCKET_NAME).getKey(KEY_NAME);
OzoneKeyDetails destKeyDetails = clientStub.getObjectStore()
- .getS3Bucket(destBucket).getKey(destkey);
+ .getS3Bucket(DEST_BUCKET_NAME).getKey(DEST_KEY);
assertEquals(200, response.getStatus());
assertEquals(CONTENT, keyContent);
assertNotNull(keyDetails.getMetadata());
- assertTrue(StringUtils.isNotEmpty(keyDetails.getMetadata().get(OzoneConsts.ETAG)));
+ assertThat(keyDetails.getMetadata().get(OzoneConsts.ETAG)).isNotEmpty();
// Source key eTag should remain unchanged and the dest key should have
// the same Etag since the key content is the same
assertEquals(sourceETag, sourceKeyDetails.getMetadata().get(OzoneConsts.ETAG));
@@ -335,17 +322,17 @@ public void testCopyObject() throws IOException, OS3Exception {
metadataHeaders.remove(CUSTOM_METADATA_HEADER_PREFIX + "custom-key-1");
metadataHeaders.remove(CUSTOM_METADATA_HEADER_PREFIX + "custom-key-2");
- response = objectEndpoint.put(destBucket, destkey, CONTENT.length(), 1,
- null, body);
+ response = objectEndpoint.put(DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1,
+ null, body);
- ozoneInputStream = clientStub.getObjectStore().getS3Bucket(destBucket)
- .readKey(destkey);
+ ozoneInputStream = clientStub.getObjectStore().getS3Bucket(DEST_BUCKET_NAME)
+ .readKey(DEST_KEY);
keyContent = IOUtils.toString(ozoneInputStream, UTF_8);
sourceKeyDetails = clientStub.getObjectStore()
- .getS3Bucket(bucketName).getKey(keyName);
+ .getS3Bucket(BUCKET_NAME).getKey(KEY_NAME);
destKeyDetails = clientStub.getObjectStore()
- .getS3Bucket(destBucket).getKey(destkey);
+ .getS3Bucket(DEST_BUCKET_NAME).getKey(DEST_KEY);
assertEquals(200, response.getStatus());
assertEquals(CONTENT, keyContent);
@@ -363,46 +350,40 @@ public void testCopyObject() throws IOException, OS3Exception {
// wrong copy metadata directive
when(headers.getHeaderString(CUSTOM_METADATA_COPY_DIRECTIVE_HEADER)).thenReturn("INVALID");
OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(
- destBucket, destkey, CONTENT.length(), 1, null, body),
- "test copy object failed");
- assertEquals(400, e.getHttpCode());
- assertEquals("InvalidArgument", e.getCode());
- assertTrue(e.getErrorMessage().contains("The metadata directive specified is invalid"));
+ DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, null, body),
+ "test copy object failed");
+ assertThat(e.getHttpCode()).isEqualTo(400);
+ assertThat(e.getCode()).isEqualTo("InvalidArgument");
+ assertThat(e.getErrorMessage()).contains("The metadata directive specified is invalid");
when(headers.getHeaderString(CUSTOM_METADATA_COPY_DIRECTIVE_HEADER)).thenReturn("COPY");
- // source and dest same
- e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(
- bucketName, keyName, CONTENT.length(), 1, null, body),
- "test copy object failed");
- assertTrue(e.getErrorMessage().contains("This copy request is illegal"));
-
// source bucket not found
when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn(
- nonexist + "/" + urlEncode(keyName));
- e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(destBucket,
- destkey, CONTENT.length(), 1, null, body), "test copy object failed");
- assertTrue(e.getCode().contains("NoSuchBucket"));
+ NO_SUCH_BUCKET + "/" + urlEncode(KEY_NAME));
+ e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(DEST_BUCKET_NAME,
+ DEST_KEY, CONTENT.length(), 1, null, body), "test copy object failed");
+ assertThat(e.getCode()).contains("NoSuchBucket");
// dest bucket not found
when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn(
- bucketName + "/" + urlEncode(keyName));
- e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(nonexist,
- destkey, CONTENT.length(), 1, null, body), "test copy object failed");
+ BUCKET_NAME + "/" + urlEncode(KEY_NAME));
+ e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(NO_SUCH_BUCKET,
+ DEST_KEY, CONTENT.length(), 1, null, body), "test copy object failed");
assertTrue(e.getCode().contains("NoSuchBucket"));
//Both source and dest bucket not found
when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn(
- nonexist + "/" + urlEncode(keyName));
- e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(nonexist,
- destkey, CONTENT.length(), 1, null, body), "test copy object failed");
+ NO_SUCH_BUCKET + "/" + urlEncode(KEY_NAME));
+ e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(NO_SUCH_BUCKET,
+ DEST_KEY, CONTENT.length(), 1, null, body), "test copy object failed");
assertTrue(e.getCode().contains("NoSuchBucket"));
// source key not found
when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn(
- bucketName + "/" + urlEncode(nonexist));
+ BUCKET_NAME + "/" + urlEncode(NO_SUCH_BUCKET));
e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(
- "nonexistent", keyName, CONTENT.length(), 1, null, body),
+ "nonexistent", KEY_NAME, CONTENT.length(), 1, null, body),
"test copy object failed");
assertTrue(e.getCode().contains("NoSuchBucket"));
}
@@ -410,26 +391,23 @@ public void testCopyObject() throws IOException, OS3Exception {
@Test
public void testCopyObjectMessageDigestResetDuringException() throws IOException, OS3Exception {
// Put object in to source bucket
- HttpHeaders headers = mock(HttpHeaders.class);
ByteArrayInputStream body =
new ByteArrayInputStream(CONTENT.getBytes(UTF_8));
- objectEndpoint.setHeaders(headers);
- keyName = "sourceKey";
- Response response = objectEndpoint.put(bucketName, keyName,
+ Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME,
CONTENT.length(), 1, null, body);
OzoneInputStream ozoneInputStream = clientStub.getObjectStore()
- .getS3Bucket(bucketName)
- .readKey(keyName);
+ .getS3Bucket(BUCKET_NAME)
+ .readKey(KEY_NAME);
String keyContent = IOUtils.toString(ozoneInputStream, UTF_8);
- OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(bucketName).getKey(keyName);
+ OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME).getKey(KEY_NAME);
assertEquals(200, response.getStatus());
assertEquals(CONTENT, keyContent);
assertNotNull(keyDetails.getMetadata());
- assertTrue(StringUtils.isNotEmpty(keyDetails.getMetadata().get(OzoneConsts.ETAG)));
+ assertThat(keyDetails.getMetadata().get(OzoneConsts.ETAG)).isNotEmpty();
MessageDigest messageDigest = mock(MessageDigest.class);
try (MockedStatic mocked = mockStatic(IOUtils.class)) {
@@ -440,10 +418,10 @@ public void testCopyObjectMessageDigestResetDuringException() throws IOException
// Add copy header, and then call put
when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn(
- bucketName + "/" + urlEncode(keyName));
+ BUCKET_NAME + "/" + urlEncode(KEY_NAME));
try {
- objectEndpoint.put(destBucket, destkey, CONTENT.length(), 1,
+ objectEndpoint.put(DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1,
null, body);
fail("Should throw IOException");
} catch (IOException ignored) {
@@ -455,113 +433,69 @@ public void testCopyObjectMessageDigestResetDuringException() throws IOException
}
@Test
- public void testInvalidStorageType() throws IOException {
- HttpHeaders headers = Mockito.mock(HttpHeaders.class);
+ void testInvalidStorageType() {
ByteArrayInputStream body =
new ByteArrayInputStream(CONTENT.getBytes(UTF_8));
- objectEndpoint.setHeaders(headers);
- keyName = "sourceKey";
when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn("random");
OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(
- bucketName, keyName, CONTENT.length(), 1, null, body));
+ BUCKET_NAME, KEY_NAME, CONTENT.length(), 1, null, body));
assertEquals(S3ErrorTable.INVALID_ARGUMENT.getErrorMessage(),
e.getErrorMessage());
assertEquals("random", e.getResource());
}
@Test
- public void testEmptyStorageType() throws IOException, OS3Exception {
- HttpHeaders headers = Mockito.mock(HttpHeaders.class);
+ void testEmptyStorageType() throws IOException, OS3Exception {
ByteArrayInputStream body =
new ByteArrayInputStream(CONTENT.getBytes(UTF_8));
- objectEndpoint.setHeaders(headers);
- keyName = "sourceKey";
when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn("");
- objectEndpoint.put(bucketName, keyName, CONTENT
+ objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT
.length(), 1, null, body);
OzoneKeyDetails key =
- clientStub.getObjectStore().getS3Bucket(bucketName)
- .getKey(keyName);
-
+ clientStub.getObjectStore().getS3Bucket(BUCKET_NAME)
+ .getKey(KEY_NAME);
//default type is set
- assertEquals(ReplicationType.RATIS, key.getReplicationType());
+ assertEquals(
+ RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE),
+ key.getReplicationConfig());
}
@Test
- public void testDirectoryCreation() throws IOException,
+ void testDirectoryCreation() throws IOException,
OS3Exception {
// GIVEN
final String path = "dir";
- final long length = 0L;
- final int partNumber = 0;
- final String uploadId = "";
- final InputStream body = null;
- final HttpHeaders headers = Mockito.mock(HttpHeaders.class);
- final ObjectEndpoint objEndpoint = new ObjectEndpoint();
- objEndpoint.setOzoneConfiguration(new OzoneConfiguration());
- objEndpoint.setHeaders(headers);
- final OzoneClient client = Mockito.mock(OzoneClient.class);
- objEndpoint.setClient(client);
- final ObjectStore objectStore = Mockito.mock(ObjectStore.class);
- final OzoneVolume volume = Mockito.mock(OzoneVolume.class);
- final OzoneBucket bucket = Mockito.mock(OzoneBucket.class);
- final ClientProtocol protocol = Mockito.mock(ClientProtocol.class);
// WHEN
- when(client.getObjectStore()).thenReturn(objectStore);
- when(client.getObjectStore().getS3Volume()).thenReturn(volume);
- when(volume.getBucket(bucketName)).thenReturn(bucket);
- when(bucket.getBucketLayout())
- .thenReturn(BucketLayout.FILE_SYSTEM_OPTIMIZED);
- when(client.getProxy()).thenReturn(protocol);
- final Response response = objEndpoint.put(bucketName, path, length,
- partNumber, uploadId, body);
+ try (Response response = objectEndpoint.put(fsoBucket.getName(), path,
+ 0L, 0, "", null)) {
+ assertEquals(HttpStatus.SC_OK, response.getStatus());
+ }
// THEN
- assertEquals(HttpStatus.SC_OK, response.getStatus());
- Mockito.verify(protocol).createDirectory(any(), eq(bucketName), eq(path));
+ OzoneKeyDetails key = fsoBucket.getKey(path);
+ assertThat(key.isFile()).as("directory").isFalse();
}
@Test
- public void testDirectoryCreationOverFile() throws IOException {
+ void testDirectoryCreationOverFile() throws IOException, OS3Exception {
// GIVEN
- final String path = "key";
- final long length = 0L;
- final int partNumber = 0;
- final String uploadId = "";
+ final String path = "dir";
final ByteArrayInputStream body =
- new ByteArrayInputStream("content".getBytes(UTF_8));
- final HttpHeaders headers = Mockito.mock(HttpHeaders.class);
- final ObjectEndpoint objEndpoint = new ObjectEndpoint();
- objEndpoint.setOzoneConfiguration(new OzoneConfiguration());
- objEndpoint.setHeaders(headers);
- final OzoneClient client = Mockito.mock(OzoneClient.class);
- objEndpoint.setClient(client);
- final ObjectStore objectStore = Mockito.mock(ObjectStore.class);
- final OzoneVolume volume = Mockito.mock(OzoneVolume.class);
- final OzoneBucket bucket = Mockito.mock(OzoneBucket.class);
- final ClientProtocol protocol = Mockito.mock(ClientProtocol.class);
+ new ByteArrayInputStream(CONTENT.getBytes(UTF_8));
+ objectEndpoint.put(FSO_BUCKET_NAME, path, CONTENT.length(), 0, "", body);
// WHEN
- when(client.getObjectStore()).thenReturn(objectStore);
- when(client.getObjectStore().getS3Volume()).thenReturn(volume);
- when(volume.getBucket(bucketName)).thenReturn(bucket);
- when(bucket.getBucketLayout())
- .thenReturn(BucketLayout.FILE_SYSTEM_OPTIMIZED);
- when(client.getProxy()).thenReturn(protocol);
- doThrow(new OMException(OMException.ResultCodes.FILE_ALREADY_EXISTS))
- .when(protocol)
- .createDirectory(any(), any(), any());
+ final OS3Exception exception = assertThrows(OS3Exception.class,
+ () -> objectEndpoint
+ .put(FSO_BUCKET_NAME, path, 0, 0, "", null)
+ .close());
// THEN
- final OS3Exception exception = assertThrows(OS3Exception.class,
- () -> objEndpoint
- .put(bucketName, path, length, partNumber, uploadId, body));
- assertEquals("Conflict", exception.getCode());
- assertEquals(409, exception.getHttpCode());
- Mockito.verify(protocol, times(1)).createDirectory(any(), any(), any());
+ assertEquals(S3ErrorTable.NO_OVERWRITE.getCode(), exception.getCode());
+ assertEquals(S3ErrorTable.NO_OVERWRITE.getHttpCode(), exception.getHttpCode());
}
}
From 29d1ab8bdffa6a054489239c8f842caa3dceaeaf Mon Sep 17 00:00:00 2001
From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com>
Date: Fri, 29 Mar 2024 09:29:56 +0100
Subject: [PATCH 3/8] HDDS-10570. S3A: `fs -touch` creates directory instead of
empty file in FSO bucket (#6452)
(cherry picked from commit 85c9c97fc5d11018233dea7d82726b86ad0e878e)
---
.../dist/src/main/compose/ozone-ha/test.sh | 8 ++++---
.../src/main/smoketest/s3/commonawslib.robot | 24 +++++++++----------
.../src/main/smoketest/s3/objecthead.robot | 17 ++++++-------
.../src/main/smoketest/s3/objectputget.robot | 24 +++----------------
.../ozone/s3/endpoint/ObjectEndpoint.java | 4 +++-
.../ozone/s3/endpoint/TestObjectPut.java | 8 ++++---
6 files changed, 36 insertions(+), 49 deletions(-)
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh b/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh
index 1361a4c0c335..976e490d32ca 100755
--- a/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh
+++ b/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh
@@ -38,9 +38,11 @@ execute_robot_test ${SCM} -v SCHEME:ofs -v BUCKET_TYPE:link -N ozonefs-ofs-link
## Exclude virtual-host tests. This is tested separately as it requires additional config.
exclude="--exclude virtual-host"
for bucket in generated; do
- execute_robot_test ${SCM} -v BUCKET:${bucket} -N s3-${bucket} ${exclude} s3
- # some tests are independent of the bucket type, only need to be run once
- exclude="--exclude virtual-host --exclude no-bucket-type"
+ for layout in OBJECT_STORE LEGACY FILE_SYSTEM_OPTIMIZED; do
+ execute_robot_test ${SCM} -v BUCKET:${bucket} -v BUCKET_LAYOUT:${layout} -N s3-${layout}-${bucket} ${exclude} s3
+ # some tests are independent of the bucket type, only need to be run once
+ exclude="--exclude virtual-host --exclude no-bucket-type"
+ done
done
execute_robot_test ${SCM} freon
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot b/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot
index 840fb963d8d1..b20537014dd1 100644
--- a/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot
@@ -23,6 +23,7 @@ ${ENDPOINT_URL} http://s3g:9878
${OZONE_S3_HEADER_VERSION} v4
${OZONE_S3_SET_CREDENTIALS} true
${BUCKET} generated
+${BUCKET_LAYOUT} OBJECT_STORE
${KEY_NAME} key1
${OZONE_S3_TESTS_SET_UP} ${FALSE}
${OZONE_AWS_ACCESS_KEY_ID} ${EMPTY}
@@ -127,16 +128,12 @@ Create bucket with name
${result} = Execute AWSS3APICli create-bucket --bucket ${bucket}
Should contain ${result} Location
Should contain ${result} ${bucket}
-Create legacy bucket
- ${postfix} = Generate Ozone String
- ${legacy_bucket} = Set Variable legacy-bucket-${postfix}
- ${result} = Execute and checkrc ozone sh bucket create -l LEGACY s3v/${legacy_bucket} 0
- [Return] ${legacy_bucket}
-Create obs bucket
+Create bucket with layout
+ [Arguments] ${layout}
${postfix} = Generate Ozone String
- ${bucket} = Set Variable obs-bucket-${postfix}
- ${result} = Execute and checkrc ozone sh bucket create -l OBJECT_STORE s3v/${bucket} 0
+ ${bucket} = Set Variable bucket-${postfix}
+ ${result} = Execute ozone sh bucket create --layout ${layout} s3v/${bucket}
[Return] ${bucket}
Setup s3 tests
@@ -144,7 +141,7 @@ Setup s3 tests
Run Keyword Generate random prefix
Run Keyword Install aws cli
Run Keyword if '${OZONE_S3_SET_CREDENTIALS}' == 'true' Setup v4 headers
- Run Keyword if '${BUCKET}' == 'generated' Create generated bucket
+ Run Keyword if '${BUCKET}' == 'generated' Create generated bucket ${BUCKET_LAYOUT}
Run Keyword if '${BUCKET}' == 'link' Setup links for S3 tests
Run Keyword if '${BUCKET}' == 'encrypted' Create encrypted bucket
Run Keyword if '${BUCKET}' == 'erasure' Create EC bucket
@@ -154,18 +151,19 @@ Setup links for S3 tests
${exists} = Bucket Exists o3://${OM_SERVICE_ID}/s3v/link
Return From Keyword If ${exists}
Execute ozone sh volume create o3://${OM_SERVICE_ID}/legacy
- Execute ozone sh bucket create o3://${OM_SERVICE_ID}/legacy/source-bucket
+ Execute ozone sh bucket create --layout ${BUCKET_LAYOUT} o3://${OM_SERVICE_ID}/legacy/source-bucket
Create link link
Create generated bucket
- ${BUCKET} = Create bucket
+ [Arguments] ${layout}=OBJECT_STORE
+ ${BUCKET} = Create bucket with layout ${layout}
Set Global Variable ${BUCKET}
Create encrypted bucket
Return From Keyword if '${SECURITY_ENABLED}' == 'false'
${exists} = Bucket Exists o3://${OM_SERVICE_ID}/s3v/encrypted
Return From Keyword If ${exists}
- Execute ozone sh bucket create -k ${KEY_NAME} o3://${OM_SERVICE_ID}/s3v/encrypted
+ Execute ozone sh bucket create -k ${KEY_NAME} --layout ${BUCKET_LAYOUT} o3://${OM_SERVICE_ID}/s3v/encrypted
Create link
[arguments] ${bucket}
@@ -175,7 +173,7 @@ Create link
Create EC bucket
${exists} = Bucket Exists o3://${OM_SERVICE_ID}/s3v/erasure
Return From Keyword If ${exists}
- Execute ozone sh bucket create --replication rs-3-2-1024k --type EC o3://${OM_SERVICE_ID}/s3v/erasure
+ Execute ozone sh bucket create --replication rs-3-2-1024k --type EC --layout ${BUCKET_LAYOUT} o3://${OM_SERVICE_ID}/s3v/erasure
Generate random prefix
${random} = Generate Ozone String
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/objecthead.robot b/hadoop-ozone/dist/src/main/smoketest/s3/objecthead.robot
index be0582edd1f2..66f3461b01dd 100644
--- a/hadoop-ozone/dist/src/main/smoketest/s3/objecthead.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/s3/objecthead.robot
@@ -40,22 +40,23 @@ Head object in non existing bucket
${result} = Execute AWSS3APICli and checkrc head-object --bucket ${BUCKET}-non-existent --key ${PREFIX}/headobject/key=value/f1 255
Should contain ${result} 404
Should contain ${result} Not Found
+
Head object where path is a directory
- ${legacy-bucket} = Create legacy bucket
- ${result} = Execute AWSS3APICli and checkrc put-object --bucket ${legacy-bucket} --key ${PREFIX}/headobject/keyvalue/f1 --body /tmp/testfile 0
- ${result} = Execute AWSS3APICli and checkrc head-object --bucket ${legacy-bucket} --key ${PREFIX}/headobject/keyvalue/ 255
+ Pass Execution If '${BUCKET_LAYOUT}' == 'FILE_SYSTEM_OPTIMIZED' does not apply to FSO buckets
+ ${result} = Execute AWSS3APICli and checkrc put-object --bucket ${BUCKET} --key ${PREFIX}/headobject/keyvalue/f1 --body /tmp/testfile 0
+ ${result} = Execute AWSS3APICli and checkrc head-object --bucket ${BUCKET} --key ${PREFIX}/headobject/keyvalue/ 255
Should contain ${result} 404
Should contain ${result} Not Found
Head directory objects
- ${obs-bucket} = Create obs bucket
- ${result} = Execute AWSS3APICli and checkrc put-object --bucket ${obs-bucket} --key ${PREFIX}/mydir/ --body /tmp/testfile 0
- ${result} = Execute AWSS3APICli and checkrc head-object --bucket ${obs-bucket} --key ${PREFIX}/mydir 255
+ Pass Execution If '${BUCKET_LAYOUT}' == 'FILE_SYSTEM_OPTIMIZED' does not apply to FSO buckets
+ ${result} = Execute AWSS3APICli and checkrc put-object --bucket ${BUCKET} --key ${PREFIX}/mydir/ --body /tmp/testfile 0
+ ${result} = Execute AWSS3APICli and checkrc head-object --bucket ${BUCKET} --key ${PREFIX}/mydir 255
Should contain ${result} 404
Should contain ${result} Not Found
- ${result} = Execute AWSS3APICli and checkrc head-object --bucket ${obs-bucket} --key ${PREFIX}/mydir/ 0
+ ${result} = Execute AWSS3APICli and checkrc head-object --bucket ${BUCKET} --key ${PREFIX}/mydir/ 0
Head non existing key
${result} = Execute AWSS3APICli and checkrc head-object --bucket ${BUCKET} --key ${PREFIX}/non-existent 255
Should contain ${result} 404
- Should contain ${result} Not Found
\ No newline at end of file
+ Should contain ${result} Not Found
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot b/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot
index 5d340e53324a..bbff89e71f83 100644
--- a/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot
@@ -44,6 +44,8 @@ Put object to s3
Get object from s3
${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key ${PREFIX}/putobject/key=value/f1 /tmp/testfile.result
Compare files /tmp/testfile /tmp/testfile.result
+ ${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key ${PREFIX}/putobject/key=value/zerobyte /tmp/zerobyte.result
+ Compare files /tmp/zerobyte /tmp/zerobyte.result
#This test depends on the previous test case. Can't be executed alone
Get object with wrong signature
@@ -151,34 +153,14 @@ Incorrect values for end and start offset
Should Be Equal ${expectedData} ${actualData}
Zero byte file
- ${result} = Execute ozone sh bucket info /s3v/${BUCKET}
- ${linked} = Execute echo '${result}' | jq -j '.sourceVolume,"/",.sourceBucket'
- ${eval} = Evaluate "source" in """${linked}"""
- IF ${eval} == ${True}
- ${result} = Execute ozone sh bucket info ${linked}
- END
- ${fsolayout} = Evaluate "OPTIMIZED" in """${result}"""
-
${result} = Execute AWSS3APICli and checkrc get-object --bucket ${BUCKET} --key ${PREFIX}/putobject/key=value/zerobyte --range bytes=0-0 /tmp/testfile2.result 255
- IF ${fsolayout} == ${True}
- Should contain ${result} NoSuchKey
- ELSE
Should contain ${result} InvalidRange
- END
${result} = Execute AWSS3APICli and checkrc get-object --bucket ${BUCKET} --key ${PREFIX}/putobject/key=value/zerobyte --range bytes=0-1 /tmp/testfile2.result 255
- IF ${fsolayout} == ${True}
- Should contain ${result} NoSuchKey
- ELSE
Should contain ${result} InvalidRange
- END
${result} = Execute AWSS3APICli and checkrc get-object --bucket ${BUCKET} --key ${PREFIX}/putobject/key=value/zerobyte --range bytes=0-10000 /tmp/testfile2.result 255
- IF ${fsolayout} == ${True}
- Should contain ${result} NoSuchKey
- ELSE
Should contain ${result} InvalidRange
- END
Create file with user defined metadata
Execute echo "Randomtext" > /tmp/testfile2
@@ -258,4 +240,4 @@ Create key twice with different content and expect different ETags
# clean up
Execute AWSS3Cli rm s3://${BUCKET}/test_key_to_check_etag_differences
Execute rm -rf /tmp/file1
- Execute rm -rf /tmp/file2
\ No newline at end of file
+ Execute rm -rf /tmp/file2
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
index 44b53f5bd877..731da3a1b3d6 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
@@ -275,7 +275,9 @@ public Response put(
boolean hasAmzDecodedLengthZero = amzDecodedLength != null &&
Long.parseLong(amzDecodedLength) == 0;
if (canCreateDirectory &&
- (length == 0 || hasAmzDecodedLengthZero)) {
+ (length == 0 || hasAmzDecodedLengthZero) &&
+ StringUtils.endsWith(keyPath, "/")
+ ) {
s3GAction = S3GAction.CREATE_DIRECTORY;
getClientProtocol()
.createDirectory(volume.getName(), bucketName, keyPath);
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java
index bdb657cf3ce2..2074eed69887 100644
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java
@@ -101,7 +101,9 @@ static Stream argumentsForPutObject() {
ReplicationConfig ratis3 = RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE);
ECReplicationConfig ec = new ECReplicationConfig("rs-3-2-1024K");
return Stream.of(
+ Arguments.of(0, ratis3),
Arguments.of(10, ratis3),
+ Arguments.of(0, ec),
Arguments.of(10, ec)
);
}
@@ -467,7 +469,7 @@ void testEmptyStorageType() throws IOException, OS3Exception {
void testDirectoryCreation() throws IOException,
OS3Exception {
// GIVEN
- final String path = "dir";
+ final String path = "dir/";
// WHEN
try (Response response = objectEndpoint.put(fsoBucket.getName(), path,
@@ -483,7 +485,7 @@ void testDirectoryCreation() throws IOException,
@Test
void testDirectoryCreationOverFile() throws IOException, OS3Exception {
// GIVEN
- final String path = "dir";
+ final String path = "key";
final ByteArrayInputStream body =
new ByteArrayInputStream(CONTENT.getBytes(UTF_8));
objectEndpoint.put(FSO_BUCKET_NAME, path, CONTENT.length(), 0, "", body);
@@ -491,7 +493,7 @@ void testDirectoryCreationOverFile() throws IOException, OS3Exception {
// WHEN
final OS3Exception exception = assertThrows(OS3Exception.class,
() -> objectEndpoint
- .put(FSO_BUCKET_NAME, path, 0, 0, "", null)
+ .put(FSO_BUCKET_NAME, path + "/", 0, 0, "", null)
.close());
// THEN
From 137ba8e67db051b55454f27c000d47a6d684bbf0 Mon Sep 17 00:00:00 2001
From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com>
Date: Tue, 16 Apr 2024 20:43:55 +0800
Subject: [PATCH 4/8] HDDS-10615. ETag change detected in S3A contract test
(#6519)
(cherry picked from commit 92f2449ccaf6c45c464d92b2584d9f7efe226142)
---
.../hadoop/ozone/client/OzoneBucket.java | 51 +++++++++----------
.../ozone/om/helpers/BasicOmKeyInfo.java | 27 ++++++++--
.../dist/src/main/compose/common/s3a-test.sh | 4 +-
.../src/main/proto/OmClientProtocol.proto | 1 +
.../ozone/om/request/RequestAuditor.java | 7 +++
.../ozone/s3/endpoint/BucketEndpoint.java | 6 ++-
.../ozone/s3/endpoint/ObjectEndpoint.java | 4 +-
7 files changed, 64 insertions(+), 36 deletions(-)
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
index 6cb69ca77bb1..b831ae8fee50 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
@@ -62,6 +62,7 @@
import java.util.NoSuchElementException;
import java.util.stream.Collectors;
+import static org.apache.hadoop.ozone.OzoneConsts.ETAG;
import static org.apache.hadoop.ozone.OzoneConsts.QUOTA_RESET;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND;
@@ -1271,25 +1272,33 @@ protected void initDelimiterKeyPrefix() {
protected List buildKeysWithKeyPrefix(
List statuses) {
return statuses.stream()
- .map(status -> {
- BasicOmKeyInfo keyInfo = status.getKeyInfo();
- String keyName = keyInfo.getKeyName();
- if (status.isDirectory()) {
- // add trailing slash to represent directory
- keyName = OzoneFSUtils.addTrailingSlashIfNeeded(keyName);
- }
- return new OzoneKey(keyInfo.getVolumeName(),
- keyInfo.getBucketName(), keyName,
- keyInfo.getDataSize(), keyInfo.getCreationTime(),
- keyInfo.getModificationTime(),
- keyInfo.getReplicationConfig(), keyInfo.isFile());
- })
+ .map(OzoneBucket::toOzoneKey)
.filter(key -> StringUtils.startsWith(key.getName(), getKeyPrefix()))
.collect(Collectors.toList());
}
}
+ private static OzoneKey toOzoneKey(OzoneFileStatusLight status) {
+ BasicOmKeyInfo keyInfo = status.getKeyInfo();
+ String keyName = keyInfo.getKeyName();
+ final Map metadata;
+ if (status.isDirectory()) {
+ // add trailing slash to represent directory
+ keyName = OzoneFSUtils.addTrailingSlashIfNeeded(keyName);
+ metadata = Collections.emptyMap();
+ } else {
+ metadata = Collections.singletonMap(ETAG, keyInfo.getETag());
+ }
+ return new OzoneKey(keyInfo.getVolumeName(),
+ keyInfo.getBucketName(), keyName,
+ keyInfo.getDataSize(), keyInfo.getCreationTime(),
+ keyInfo.getModificationTime(),
+ keyInfo.getReplicationConfig(),
+ metadata,
+ keyInfo.isFile());
+ }
+
/**
* An Iterator to iterate over {@link OzoneKey} list.
@@ -1657,21 +1666,7 @@ private boolean getChildrenKeys(String keyPrefix, String startKey,
for (int indx = 0; indx < statuses.size(); indx++) {
OzoneFileStatusLight status = statuses.get(indx);
BasicOmKeyInfo keyInfo = status.getKeyInfo();
- String keyName = keyInfo.getKeyName();
-
- OzoneKey ozoneKey;
- // Add dir to the dirList
- if (status.isDirectory()) {
- // add trailing slash to represent directory
- keyName = OzoneFSUtils.addTrailingSlashIfNeeded(keyName);
- }
- ozoneKey = new OzoneKey(keyInfo.getVolumeName(),
- keyInfo.getBucketName(), keyName,
- keyInfo.getDataSize(), keyInfo.getCreationTime(),
- keyInfo.getModificationTime(),
- keyInfo.getReplicationConfig(),
- keyInfo.isFile());
-
+ OzoneKey ozoneKey = toOzoneKey(status);
keysResultList.add(ozoneKey);
if (status.isDirectory()) {
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BasicOmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BasicOmKeyInfo.java
index 9c9a5027774f..679f4b72c079 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BasicOmKeyInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BasicOmKeyInfo.java
@@ -25,6 +25,8 @@
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BasicKeyInfo;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListKeysRequest;
+import static org.apache.hadoop.ozone.OzoneConsts.ETAG;
+
/**
* Lightweight OmKeyInfo class.
*/
@@ -38,11 +40,12 @@ public class BasicOmKeyInfo {
private long modificationTime;
private ReplicationConfig replicationConfig;
private boolean isFile;
+ private final String eTag;
@SuppressWarnings("parameternumber")
public BasicOmKeyInfo(String volumeName, String bucketName, String keyName,
long dataSize, long creationTime, long modificationTime,
- ReplicationConfig replicationConfig, boolean isFile) {
+ ReplicationConfig replicationConfig, boolean isFile, String eTag) {
this.volumeName = volumeName;
this.bucketName = bucketName;
this.keyName = keyName;
@@ -51,6 +54,7 @@ public BasicOmKeyInfo(String volumeName, String bucketName, String keyName,
this.modificationTime = modificationTime;
this.replicationConfig = replicationConfig;
this.isFile = isFile;
+ this.eTag = eTag;
}
public String getVolumeName() {
@@ -85,6 +89,10 @@ public boolean isFile() {
return isFile;
}
+ public String getETag() {
+ return eTag;
+ }
+
/**
* Builder of BasicOmKeyInfo.
*/
@@ -97,6 +105,7 @@ public static class Builder {
private long modificationTime;
private ReplicationConfig replicationConfig;
private boolean isFile;
+ private String eTag;
public Builder setVolumeName(String volumeName) {
this.volumeName = volumeName;
@@ -138,9 +147,14 @@ public Builder setIsFile(boolean isFile) {
return this;
}
+ public Builder setETag(String etag) {
+ this.eTag = etag;
+ return this;
+ }
+
public BasicOmKeyInfo build() {
return new BasicOmKeyInfo(volumeName, bucketName, keyName, dataSize,
- creationTime, modificationTime, replicationConfig, isFile);
+ creationTime, modificationTime, replicationConfig, isFile, eTag);
}
}
@@ -157,6 +171,9 @@ public BasicKeyInfo getProtobuf() {
} else {
builder.setFactor(ReplicationConfig.getLegacyFactor(replicationConfig));
}
+ if (eTag != null) {
+ builder.setETag(eTag);
+ }
return builder.build();
}
@@ -181,6 +198,7 @@ public static BasicOmKeyInfo getFromProtobuf(BasicKeyInfo basicKeyInfo,
basicKeyInfo.getType(),
basicKeyInfo.getFactor(),
basicKeyInfo.getEcReplicationConfig()))
+ .setETag(basicKeyInfo.getETag())
.setIsFile(!keyName.endsWith("/"));
return builder.build();
@@ -205,6 +223,7 @@ public static BasicOmKeyInfo getFromProtobuf(String volumeName,
basicKeyInfo.getType(),
basicKeyInfo.getFactor(),
basicKeyInfo.getEcReplicationConfig()))
+ .setETag(basicKeyInfo.getETag())
.setIsFile(!keyName.endsWith("/"));
return builder.build();
@@ -225,6 +244,7 @@ public boolean equals(Object o) {
creationTime == basicOmKeyInfo.creationTime &&
modificationTime == basicOmKeyInfo.modificationTime &&
replicationConfig.equals(basicOmKeyInfo.replicationConfig) &&
+ Objects.equals(eTag, basicOmKeyInfo.eTag) &&
isFile == basicOmKeyInfo.isFile;
}
@@ -241,6 +261,7 @@ public static BasicOmKeyInfo fromOmKeyInfo(OmKeyInfo omKeyInfo) {
omKeyInfo.getCreationTime(),
omKeyInfo.getModificationTime(),
omKeyInfo.getReplicationConfig(),
- omKeyInfo.isFile());
+ omKeyInfo.isFile(),
+ omKeyInfo.getMetadata().get(ETAG));
}
}
diff --git a/hadoop-ozone/dist/src/main/compose/common/s3a-test.sh b/hadoop-ozone/dist/src/main/compose/common/s3a-test.sh
index 85dbc5feced2..d58b91b4e526 100644
--- a/hadoop-ozone/dist/src/main/compose/common/s3a-test.sh
+++ b/hadoop-ozone/dist/src/main/compose/common/s3a-test.sh
@@ -79,11 +79,11 @@ EOF
# Some tests are skipped due to known issues.
# - ITestS3AContractDistCp: HDDS-10616
- # - ITestS3AContractEtag, ITestS3AContractRename: HDDS-10615
# - ITestS3AContractGetFileStatusV1List: HDDS-10617
# - ITestS3AContractMkdir: HDDS-10572
+ # - ITestS3AContractRename: HDDS-10665
mvn -B -V --fail-never --no-transfer-progress \
- -Dtest='ITestS3AContract*, !ITestS3AContractDistCp, !ITestS3AContractEtag, !ITestS3AContractGetFileStatusV1List, !ITestS3AContractMkdir, !ITestS3AContractRename' \
+ -Dtest='ITestS3AContract*, !ITestS3AContractDistCp, !ITestS3AContractGetFileStatusV1List, !ITestS3AContractMkdir, !ITestS3AContractRename' \
clean test
local target="${RESULT_DIR}/junit/${bucket}/target"
diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
index 8b931c49c961..7f15ee996339 100644
--- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
+++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
@@ -1113,6 +1113,7 @@ message BasicKeyInfo {
optional hadoop.hdds.ReplicationType type = 5;
optional hadoop.hdds.ReplicationFactor factor = 6;
optional hadoop.hdds.ECReplicationConfig ecReplicationConfig = 7;
+ optional string eTag = 8;
}
message DirectoryInfo {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java
index c0872db0fd61..78e67bb8ed5c 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java
@@ -32,6 +32,8 @@
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.UserInfo;
+import static org.apache.hadoop.ozone.OzoneConsts.ETAG;
+
/**
* Interface for OM Requests to convert to audit objects.
*/
@@ -80,6 +82,11 @@ default Map buildKeyArgsAuditMap(KeyArgs keyArgs) {
auditMap.put(OzoneConsts.REPLICATION_CONFIG,
ECReplicationConfig.toString(keyArgs.getEcReplicationConfig()));
}
+ for (HddsProtos.KeyValue item : keyArgs.getMetadataList()) {
+ if (ETAG.equals(item.getKey())) {
+ auditMap.put(ETAG, item.getValue());
+ }
+ }
return auditMap;
}
}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
index b8cd56d5f954..80933c67d158 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
@@ -69,6 +69,7 @@
import java.util.List;
import java.util.Set;
+import static org.apache.hadoop.ozone.OzoneConsts.ETAG;
import static org.apache.hadoop.ozone.audit.AuditLogger.PerformanceStringBuilder;
import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
@@ -714,7 +715,10 @@ private void addKey(ListObjectResponse response, OzoneKey next) {
keyMetadata.setKey(EncodingTypeObject.createNullable(next.getName(),
response.getEncodingType()));
keyMetadata.setSize(next.getDataSize());
- keyMetadata.setETag("" + next.getModificationTime());
+ String eTag = next.getMetadata().get(ETAG);
+ if (eTag != null) {
+ keyMetadata.setETag(ObjectEndpoint.wrapInQuotes(eTag));
+ }
if (next.getReplicationType().toString().equals(ReplicationType
.STAND_ALONE.toString())) {
keyMetadata.setStorageClass(S3StorageType.REDUCED_REDUNDANCY.toString());
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
index 731da3a1b3d6..b77f6b733ebe 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
@@ -602,7 +602,7 @@ public Response head(
// Should not return ETag header if the ETag is not set
// doing so will result in "null" string being returned instead
// which breaks some AWS SDK implementation
- response.header(ETAG, "" + wrapInQuotes(key.getMetadata().get(ETAG)));
+ response.header(ETAG, wrapInQuotes(key.getMetadata().get(ETAG)));
}
addLastModifiedDate(response, key);
@@ -1348,7 +1348,7 @@ public boolean isDatastreamEnabled() {
return datastreamEnabled;
}
- private String wrapInQuotes(String value) {
+ static String wrapInQuotes(String value) {
return "\"" + value + "\"";
}
From 79d0dffbbd567d784855740680cbc9a364fc1462 Mon Sep 17 00:00:00 2001
From: Tejaskriya <87555809+tejaskriya@users.noreply.github.com>
Date: Fri, 7 Jun 2024 15:17:58 +0800
Subject: [PATCH 5/8] HDDS-10572. Implement multiDelete using
OMKeysDeleteRequest (#6751)
(cherry picked from commit 4b8767a9343f392f0d89b88d38b58f1969e12dc8)
---
.../hadoop/ozone/client/OzoneBucket.java | 11 ++++
.../ozone/client/protocol/ClientProtocol.java | 13 ++++
.../hadoop/ozone/client/rpc/RpcClient.java | 13 ++++
.../hadoop/ozone/om/helpers/ErrorInfo.java | 49 +++++++++++++++
.../om/protocol/OzoneManagerProtocol.java | 17 ++++++
...ManagerProtocolClientSideTranslatorPB.java | 23 ++++++-
.../dist/src/main/compose/common/s3a-test.sh | 3 +-
.../src/main/proto/OmClientProtocol.proto | 7 +++
.../om/request/key/OMKeysDeleteRequest.java | 17 +++++-
.../key/OmKeysDeleteRequestWithFSO.java | 12 +++-
.../request/key/TestOMKeysDeleteRequest.java | 7 +++
.../ozone/s3/endpoint/BucketEndpoint.java | 61 ++++++++++---------
.../ozone/client/ClientProtocolStub.java | 9 +++
.../hadoop/ozone/client/OzoneBucketStub.java | 12 ++++
.../s3/endpoint/TestPermissionCheck.java | 8 ++-
15 files changed, 224 insertions(+), 38 deletions(-)
create mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ErrorInfo.java
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
index b831ae8fee50..30c889205204 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
@@ -39,6 +39,7 @@
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.BasicOmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.ErrorInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
@@ -621,6 +622,16 @@ public void deleteKeys(List keyList) throws IOException {
proxy.deleteKeys(volumeName, name, keyList);
}
+ /**
+ * Deletes the given list of keys from the bucket.
+ * @param keyList List of the key name to be deleted.
+ * @param quiet flag to not throw exception if delete fails
+ * @throws IOException
+ */
+ public Map deleteKeys(List keyList, boolean quiet) throws IOException {
+ return proxy.deleteKeys(volumeName, name, keyList, quiet);
+ }
+
/**
* Rename the keyname from fromKeyName to toKeyName.
* @param fromKeyName The original key name.
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
index 91b407e631ae..2a22ae305df0 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
@@ -49,6 +49,7 @@
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.DeleteTenantState;
+import org.apache.hadoop.ozone.om.helpers.ErrorInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo;
@@ -404,6 +405,18 @@ void deleteKeys(String volumeName, String bucketName,
List keyNameList)
throws IOException;
+ /**
+ * Deletes keys through the list.
+ * @param volumeName Name of the Volume
+ * @param bucketName Name of the Bucket
+ * @param keyNameList List of the Key
+ * @param quiet flag to not throw exception if delete fails
+ * @throws IOException
+ */
+ Map deleteKeys(String volumeName, String bucketName,
+ List keyNameList, boolean quiet)
+ throws IOException;
+
/**
* Renames an existing key within a bucket.
* @param volumeName Name of the Volume
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index 4fda47553f54..bd95ac6dffd9 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -98,6 +98,7 @@
import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.DeleteTenantState;
+import org.apache.hadoop.ozone.om.helpers.ErrorInfo;
import org.apache.hadoop.ozone.om.helpers.KeyInfoWithVolumeContext;
import org.apache.hadoop.ozone.om.helpers.OmBucketArgs;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
@@ -1556,6 +1557,18 @@ public void deleteKeys(
ozoneManagerClient.deleteKeys(omDeleteKeys);
}
+ @Override
+ public Map deleteKeys(
+ String volumeName, String bucketName, List keyNameList, boolean quiet)
+ throws IOException {
+ verifyVolumeName(volumeName);
+ verifyBucketName(bucketName);
+ Preconditions.checkNotNull(keyNameList);
+ OmDeleteKeys omDeleteKeys = new OmDeleteKeys(volumeName, bucketName,
+ keyNameList);
+ return ozoneManagerClient.deleteKeys(omDeleteKeys, quiet);
+ }
+
@Override
public void renameKey(String volumeName, String bucketName,
String fromKeyName, String toKeyName) throws IOException {
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ErrorInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ErrorInfo.java
new file mode 100644
index 000000000000..7889a568be84
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ErrorInfo.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.helpers;
+
+/**
+ * Represent class which has info of error thrown for any operation.
+ */
+public class ErrorInfo {
+ private String code;
+ private String message;
+
+ public ErrorInfo(String errorCode, String errorMessage) {
+ this.code = errorCode;
+ this.message = errorMessage;
+ }
+
+ public String getCode() {
+ return code;
+ }
+
+ public void setCode(String errorCode) {
+ this.code = errorCode;
+ }
+
+ public String getMessage() {
+ return message;
+ }
+
+ public void setMessage(String errorMessage) {
+ this.message = errorMessage;
+ }
+
+}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java
index ad394bf4d1db..306d32eb3968 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java
@@ -21,6 +21,7 @@
import java.io.Closeable;
import java.io.IOException;
import java.util.List;
+import java.util.Map;
import java.util.UUID;
import javax.annotation.Nonnull;
@@ -32,6 +33,7 @@
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.DBUpdates;
import org.apache.hadoop.ozone.om.helpers.DeleteTenantState;
+import org.apache.hadoop.ozone.om.helpers.ErrorInfo;
import org.apache.hadoop.ozone.om.helpers.KeyInfoWithVolumeContext;
import org.apache.hadoop.ozone.om.helpers.OmBucketArgs;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
@@ -359,6 +361,21 @@ default void deleteKeys(OmDeleteKeys deleteKeys) throws IOException {
"this to be implemented, as write requests use a new approach.");
}
+ /**
+ * Deletes existing key/keys. This interface supports delete
+ * multiple keys and a single key. Used by deleting files
+ * through OzoneFileSystem.
+ *
+ * @param deleteKeys
+ * @param quiet - flag to not throw exception if delete fails
+ * @throws IOException
+ */
+ default Map deleteKeys(OmDeleteKeys deleteKeys, boolean quiet)
+ throws IOException {
+ throw new UnsupportedOperationException("OzoneManager does not require " +
+ "this to be implemented, as write requests use a new approach.");
+ }
+
/**
* Deletes an existing empty bucket from volume.
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
index e1d6aca863cb..809c1daab789 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
@@ -20,6 +20,7 @@
import java.io.IOException;
import java.time.Instant;
import java.util.ArrayList;
+import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.UUID;
@@ -41,6 +42,7 @@
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.BasicOmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.ErrorInfo;
import org.apache.hadoop.ozone.om.helpers.ListKeysLightResult;
import org.apache.hadoop.ozone.om.helpers.ListKeysResult;
import org.apache.hadoop.ozone.om.helpers.DBUpdates;
@@ -943,6 +945,12 @@ public void deleteKey(OmKeyArgs args) throws IOException {
*/
@Override
public void deleteKeys(OmDeleteKeys deleteKeys) throws IOException {
+ deleteKeys(deleteKeys, false);
+ }
+
+ @Override
+ public Map deleteKeys(OmDeleteKeys deleteKeys, boolean quiet)
+ throws IOException {
DeleteKeysRequest.Builder req = DeleteKeysRequest.newBuilder();
DeleteKeyArgs deletedKeys = DeleteKeyArgs.newBuilder()
.setBucketName(deleteKeys.getBucket())
@@ -952,9 +960,20 @@ public void deleteKeys(OmDeleteKeys deleteKeys) throws IOException {
OMRequest omRequest = createOMRequest(Type.DeleteKeys)
.setDeleteKeysRequest(req)
.build();
+ OMResponse omResponse = submitRequest(omRequest);
- handleError(submitRequest(omRequest));
-
+ Map keyToErrors = new HashMap<>();
+ if (quiet) {
+ List errors =
+ omResponse.getDeleteKeysResponse().getErrorsList();
+ for (OzoneManagerProtocolProtos.DeleteKeyError deleteKeyError : errors) {
+ keyToErrors.put(deleteKeyError.getKey(),
+ new ErrorInfo(deleteKeyError.getErrorCode(), deleteKeyError.getErrorMsg()));
+ }
+ } else {
+ handleError(omResponse);
+ }
+ return keyToErrors;
}
/**
diff --git a/hadoop-ozone/dist/src/main/compose/common/s3a-test.sh b/hadoop-ozone/dist/src/main/compose/common/s3a-test.sh
index d58b91b4e526..8df0e380a44d 100644
--- a/hadoop-ozone/dist/src/main/compose/common/s3a-test.sh
+++ b/hadoop-ozone/dist/src/main/compose/common/s3a-test.sh
@@ -80,10 +80,9 @@ EOF
# Some tests are skipped due to known issues.
# - ITestS3AContractDistCp: HDDS-10616
# - ITestS3AContractGetFileStatusV1List: HDDS-10617
- # - ITestS3AContractMkdir: HDDS-10572
# - ITestS3AContractRename: HDDS-10665
mvn -B -V --fail-never --no-transfer-progress \
- -Dtest='ITestS3AContract*, !ITestS3AContractDistCp, !ITestS3AContractGetFileStatusV1List, !ITestS3AContractMkdir, !ITestS3AContractRename' \
+ -Dtest='ITestS3AContract*, !ITestS3AContractDistCp, !ITestS3AContractGetFileStatusV1List, !ITestS3AContractRename' \
clean test
local target="${RESULT_DIR}/junit/${bucket}/target"
diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
index 7f15ee996339..8a20e0a0efe0 100644
--- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
+++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
@@ -1289,9 +1289,16 @@ message DeleteKeyArgs {
repeated string keys = 3;
}
+message DeleteKeyError {
+ optional string key = 1;
+ optional string errorCode = 2;
+ optional string errorMsg = 3;
+}
+
message DeleteKeysResponse {
optional DeleteKeyArgs unDeletedKeys = 1;
optional bool status = 2;
+ repeated DeleteKeyError errors = 3;
}
message DeleteKeyResponse {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java
index aa99ac8afeca..5213df0fdecf 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java
@@ -28,6 +28,7 @@
import org.apache.hadoop.ozone.om.ResolvedBucket;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
+import org.apache.hadoop.ozone.om.helpers.ErrorInfo;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
@@ -54,6 +55,7 @@
import java.io.IOException;
import java.nio.file.InvalidPathException;
import java.util.ArrayList;
+import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
@@ -92,6 +94,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
Exception exception = null;
OMClientResponse omClientResponse = null;
Result result = null;
+ Map keyToError = new HashMap<>();
OMMetrics omMetrics = ozoneManager.getMetrics();
omMetrics.incNumKeyDeletes();
@@ -147,6 +150,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
objectKey);
deleteKeys.remove(keyName);
unDeletedKeys.addKeys(keyName);
+ keyToError.put(keyName, new ErrorInfo(OMException.ResultCodes.KEY_NOT_FOUND.name(), "Key does not exist"));
continue;
}
@@ -164,6 +168,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
LOG.error("Acl check failed for Key: {}", objectKey, ex);
deleteKeys.remove(keyName);
unDeletedKeys.addKeys(keyName);
+ keyToError.put(keyName, new ErrorInfo(OMException.ResultCodes.ACCESS_DENIED.name(), "ACL check failed"));
}
}
@@ -181,7 +186,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
final long volumeId = omMetadataManager.getVolumeId(volumeName);
omClientResponse =
getOmClientResponse(ozoneManager, omKeyInfoList, dirList, omResponse,
- unDeletedKeys, deleteStatus, omBucketInfo, volumeId);
+ unDeletedKeys, keyToError, deleteStatus, omBucketInfo, volumeId);
result = Result.SUCCESS;
@@ -195,6 +200,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
// Add all keys which are failed due to any other exception .
for (int i = indexFailed; i < length; i++) {
unDeletedKeys.addKeys(deleteKeyArgs.getKeys(i));
+ keyToError.put(deleteKeyArgs.getKeys(i), new ErrorInfo(OMException.ResultCodes.INTERNAL_ERROR.name(),
+ ex.getMessage()));
}
omResponse.setDeleteKeysResponse(
@@ -256,12 +263,18 @@ protected OMClientResponse getOmClientResponse(OzoneManager ozoneManager,
List omKeyInfoList, List dirList,
OMResponse.Builder omResponse,
OzoneManagerProtocolProtos.DeleteKeyArgs.Builder unDeletedKeys,
+ Map keyToErrors,
boolean deleteStatus, OmBucketInfo omBucketInfo, long volumeId) {
OMClientResponse omClientResponse;
+ List deleteKeyErrors = new ArrayList<>();
+ for (Map.Entry key : keyToErrors.entrySet()) {
+ deleteKeyErrors.add(OzoneManagerProtocolProtos.DeleteKeyError.newBuilder().setKey(key.getKey())
+ .setErrorCode(key.getValue().getCode()).setErrorMsg(key.getValue().getMessage()).build());
+ }
omClientResponse = new OMKeysDeleteResponse(omResponse
.setDeleteKeysResponse(
DeleteKeysResponse.newBuilder().setStatus(deleteStatus)
- .setUnDeletedKeys(unDeletedKeys))
+ .setUnDeletedKeys(unDeletedKeys).addAllErrors(deleteKeyErrors))
.setStatus(deleteStatus ? OK : PARTIAL_DELETE).setSuccess(deleteStatus)
.build(), omKeyInfoList, ozoneManager.isRatisEnabled(),
omBucketInfo.copyObject());
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OmKeysDeleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OmKeysDeleteRequestWithFSO.java
index 7dd6798f0f46..9e7703ec054f 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OmKeysDeleteRequestWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OmKeysDeleteRequestWithFSO.java
@@ -22,6 +22,7 @@
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
+import org.apache.hadoop.ozone.om.helpers.ErrorInfo;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
@@ -34,7 +35,9 @@
import org.slf4j.LoggerFactory;
import java.io.IOException;
+import java.util.ArrayList;
import java.util.List;
+import java.util.Map;
import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK;
import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.PARTIAL_DELETE;
@@ -134,12 +137,19 @@ protected OMClientResponse getOmClientResponse(OzoneManager ozoneManager,
List omKeyInfoList, List dirList,
OzoneManagerProtocolProtos.OMResponse.Builder omResponse,
OzoneManagerProtocolProtos.DeleteKeyArgs.Builder unDeletedKeys,
+ Map keyToErrors,
boolean deleteStatus, OmBucketInfo omBucketInfo, long volumeId) {
OMClientResponse omClientResponse;
+ List deleteKeyErrors = new ArrayList<>();
+ for (Map.Entry key : keyToErrors.entrySet()) {
+ deleteKeyErrors.add(OzoneManagerProtocolProtos.DeleteKeyError.newBuilder()
+ .setKey(key.getKey()).setErrorCode(key.getValue().getCode())
+ .setErrorMsg(key.getValue().getMessage()).build());
+ }
omClientResponse = new OMKeysDeleteResponseWithFSO(omResponse
.setDeleteKeysResponse(
OzoneManagerProtocolProtos.DeleteKeysResponse.newBuilder()
- .setStatus(deleteStatus).setUnDeletedKeys(unDeletedKeys))
+ .setStatus(deleteStatus).setUnDeletedKeys(unDeletedKeys).addAllErrors(deleteKeyErrors))
.setStatus(deleteStatus ? OK : PARTIAL_DELETE).setSuccess(deleteStatus)
.build(), omKeyInfoList, dirList, ozoneManager.isRatisEnabled(),
omBucketInfo.copyObject(), volumeId);
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequest.java
index 91801e1d5199..2075bbc8de64 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequest.java
@@ -23,6 +23,7 @@
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeyArgs;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeyError;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeysRequest;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
import org.junit.jupiter.api.Assertions;
@@ -70,6 +71,9 @@ protected void checkDeleteKeysResponse(
.getUnDeletedKeys();
Assertions.assertEquals(0,
unDeletedKeys.getKeysCount());
+ List keyErrors = omClientResponse.getOMResponse().getDeleteKeysResponse()
+ .getErrorsList();
+ Assertions.assertEquals(0, keyErrors.size());
// Check all keys are deleted.
for (String deleteKey : deleteKeyList) {
@@ -120,6 +124,9 @@ protected void checkDeleteKeysResponseForFailure(
.getDeleteKeysResponse().getUnDeletedKeys();
Assertions.assertEquals(1,
unDeletedKeys.getKeysCount());
+ List keyErrors = omClientResponse.getOMResponse().getDeleteKeysResponse()
+ .getErrorsList();
+ Assertions.assertEquals(1, keyErrors.size());
Assertions.assertEquals("dummy", unDeletedKeys.getKeys(0));
}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
index 80933c67d158..6f4a4d20f369 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
@@ -28,6 +28,7 @@
import org.apache.hadoop.ozone.client.OzoneVolume;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
+import org.apache.hadoop.ozone.om.helpers.ErrorInfo;
import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
import org.apache.hadoop.ozone.s3.commontypes.EncodingTypeObject;
import org.apache.hadoop.ozone.s3.commontypes.KeyMetadata;
@@ -67,6 +68,7 @@
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
+import java.util.Map;
import java.util.Set;
import static org.apache.hadoop.ozone.OzoneConsts.ETAG;
@@ -445,47 +447,48 @@ public MultiDeleteResponse multiDelete(@PathParam("bucket") String bucketName,
OzoneBucket bucket = getBucket(bucketName);
MultiDeleteResponse result = new MultiDeleteResponse();
+ List deleteKeys = new ArrayList<>();
+
if (request.getObjects() != null) {
+ Map undeletedKeyResultMap;
for (DeleteObject keyToDelete : request.getObjects()) {
- long startNanos = Time.monotonicNowNanos();
- try {
- bucket.deleteKey(keyToDelete.getKey());
- getMetrics().updateDeleteKeySuccessStats(startNanos);
-
- if (!request.isQuiet()) {
- result.addDeleted(new DeletedObject(keyToDelete.getKey()));
- }
- } catch (OMException ex) {
- if (isAccessDenied(ex)) {
- getMetrics().updateDeleteKeyFailureStats(startNanos);
- result.addError(
- new Error(keyToDelete.getKey(), "PermissionDenied",
- ex.getMessage()));
- } else if (ex.getResult() != ResultCodes.KEY_NOT_FOUND) {
- getMetrics().updateDeleteKeyFailureStats(startNanos);
- result.addError(
- new Error(keyToDelete.getKey(), "InternalError",
- ex.getMessage()));
- } else {
+ deleteKeys.add(keyToDelete.getKey());
+ }
+ long startNanos = Time.monotonicNowNanos();
+ try {
+ undeletedKeyResultMap = bucket.deleteKeys(deleteKeys, true);
+ for (DeleteObject d : request.getObjects()) {
+ ErrorInfo error = undeletedKeyResultMap.get(d.getKey());
+ boolean deleted = error == null ||
+ // if the key is not found, it is assumed to be successfully deleted
+ ResultCodes.KEY_NOT_FOUND.name().equals(error.getCode());
+ if (deleted) {
+ deleteKeys.remove(d.getKey());
if (!request.isQuiet()) {
- result.addDeleted(new DeletedObject(keyToDelete.getKey()));
+ result.addDeleted(new DeletedObject(d.getKey()));
}
- getMetrics().updateDeleteKeySuccessStats(startNanos);
+ } else {
+ result.addError(new Error(d.getKey(), error.getCode(), error.getMessage()));
}
- } catch (Exception ex) {
- getMetrics().updateDeleteKeyFailureStats(startNanos);
- result.addError(
- new Error(keyToDelete.getKey(), "InternalError",
- ex.getMessage()));
}
+ getMetrics().updateDeleteKeySuccessStats(startNanos);
+ } catch (IOException ex) {
+ LOG.error("Delete key failed: {}", ex.getMessage());
+ getMetrics().updateDeleteKeyFailureStats(startNanos);
+ result.addError(
+ new Error("ALL", "InternalError",
+ ex.getMessage()));
}
}
+
+ Map auditMap = getAuditParameters();
+ auditMap.put("failedDeletes", deleteKeys.toString());
if (result.getErrors().size() != 0) {
AUDIT.logWriteFailure(buildAuditMessageForFailure(s3GAction,
- getAuditParameters(), new Exception("MultiDelete Exception")));
+ auditMap, new Exception("MultiDelete Exception")));
} else {
AUDIT.logWriteSuccess(
- buildAuditMessageForSuccess(s3GAction, getAuditParameters()));
+ buildAuditMessageForSuccess(s3GAction, auditMap));
}
return result;
}
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java
index 299653d0e7cf..f9b659cd1caa 100644
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java
@@ -33,6 +33,7 @@
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
import org.apache.hadoop.ozone.om.helpers.DeleteTenantState;
+import org.apache.hadoop.ozone.om.helpers.ErrorInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo;
@@ -57,6 +58,7 @@
import java.io.IOException;
import java.net.URI;
import java.util.Collections;
+import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -250,6 +252,13 @@ public void deleteKeys(String volumeName, String bucketName,
}
+ @Override
+ public Map deleteKeys(String volumeName, String bucketName,
+ List keyNameList, boolean quiet)
+ throws IOException {
+ return new HashMap<>();
+ }
+
@Override
public void renameKey(String volumeName, String bucketName,
String fromKeyName, String toKeyName)
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java
index b5f37aaef3e0..a886e6cfad7b 100644
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java
@@ -52,6 +52,7 @@
import org.apache.hadoop.ozone.client.OzoneMultipartUploadPartListParts.PartInfo;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
+import org.apache.hadoop.ozone.om.helpers.ErrorInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo;
import org.apache.hadoop.util.Time;
@@ -349,6 +350,17 @@ public void deleteKey(String key) throws IOException {
keyDetails.remove(key);
}
+ @Override
+ public Map deleteKeys(List keyList, boolean quiet) throws IOException {
+ Map keyErrorMap = new HashMap<>();
+ for (String key : keyList) {
+ if (keyDetails.remove(key) == null) {
+ keyErrorMap.put(key, new ErrorInfo("KEY_NOT_FOUND", "Key does not exist"));
+ }
+ }
+ return keyErrorMap;
+ }
+
@Override
public void renameKey(String fromKeyName, String toKeyName)
throws IOException {
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java
index e9a93e3c5213..65f7f6c74edc 100644
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java
@@ -27,6 +27,7 @@
import org.apache.hadoop.ozone.client.OzoneVolume;
import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.ErrorInfo;
import org.apache.hadoop.ozone.s3.exception.OS3Exception;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
@@ -38,6 +39,7 @@
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.util.ArrayList;
+import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -165,7 +167,9 @@ public void testListKey() throws IOException {
public void testDeleteKeys() throws IOException, OS3Exception {
Mockito.when(objectStore.getVolume(anyString())).thenReturn(volume);
Mockito.when(objectStore.getS3Bucket(anyString())).thenReturn(bucket);
- doThrow(exception).when(bucket).deleteKey(any());
+ Map deleteErrors = new HashMap<>();
+ deleteErrors.put("deleteKeyName", new ErrorInfo("ACCESS_DENIED", "ACL check failed"));
+ when(bucket.deleteKeys(any(), anyBoolean())).thenReturn(deleteErrors);
BucketEndpoint bucketEndpoint = new BucketEndpoint();
bucketEndpoint.setClient(client);
MultiDeleteRequest request = new MultiDeleteRequest();
@@ -178,7 +182,7 @@ public void testDeleteKeys() throws IOException, OS3Exception {
bucketEndpoint.multiDelete("BucketName", "keyName", request);
assertEquals(1, response.getErrors().size());
assertTrue(
- response.getErrors().get(0).getCode().equals("PermissionDenied"));
+ response.getErrors().get(0).getCode().equals("ACCESS_DENIED"));
}
@Test
From d01327ee118dc3de4297f7d4904e020d418c4062 Mon Sep 17 00:00:00 2001
From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com>
Date: Sun, 21 Apr 2024 14:33:24 +0800
Subject: [PATCH 6/8] HDDS-10719. Avoid empty ETag for key created outside of
S3 (#6563)
(cherry picked from commit 96fc70e66cec3bbbf843c3f1cfa293ccc7e13470)
---
.../org/apache/hadoop/ozone/om/helpers/BasicOmKeyInfo.java | 5 +++--
.../dist/src/main/smoketest/basic/ozone-shell-lib.robot | 1 +
2 files changed, 4 insertions(+), 2 deletions(-)
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BasicOmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BasicOmKeyInfo.java
index 679f4b72c079..d69c14807cec 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BasicOmKeyInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BasicOmKeyInfo.java
@@ -20,6 +20,7 @@
import java.io.IOException;
import java.util.Objects;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BasicKeyInfo;
@@ -54,7 +55,7 @@ public BasicOmKeyInfo(String volumeName, String bucketName, String keyName,
this.modificationTime = modificationTime;
this.replicationConfig = replicationConfig;
this.isFile = isFile;
- this.eTag = eTag;
+ this.eTag = StringUtils.isNotEmpty(eTag) ? eTag : null;
}
public String getVolumeName() {
@@ -171,7 +172,7 @@ public BasicKeyInfo getProtobuf() {
} else {
builder.setFactor(ReplicationConfig.getLegacyFactor(replicationConfig));
}
- if (eTag != null) {
+ if (StringUtils.isNotEmpty(eTag)) {
builder.setETag(eTag);
}
diff --git a/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell-lib.robot b/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell-lib.robot
index d1a6c5c7d00c..685f57fd2bb8 100644
--- a/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell-lib.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell-lib.robot
@@ -167,6 +167,7 @@ Test key handling
Should Not Contain ${result} NOTICE.txt.1 exists
${result} = Execute ozone sh key info ${protocol}${server}/${volume}/bb1/key1 | jq -r '. | select(.name=="key1")'
Should contain ${result} creationTime
+ Should not contain ${result} ETag
${result} = Execute ozone sh key list ${protocol}${server}/${volume}/bb1 | jq -r '.[] | select(.name=="key1") | .name'
Should Be Equal ${result} key1
Execute ozone sh key rename ${protocol}${server}/${volume}/bb1 key1 key2
From 57f69e3173c2a3a11887388e451b0d2d4048a3a5 Mon Sep 17 00:00:00 2001
From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com>
Date: Wed, 17 Apr 2024 23:54:55 +0800
Subject: [PATCH 7/8] HDDS-10679. Enable ITestS3ACommitterMRJob (#6539)
(cherry picked from commit 233c0690835b9f55fc2f19f9f27cd78a964b520a)
---
.../dist/src/main/compose/common/s3a-test.sh | 17 ++++++++++++++++-
1 file changed, 16 insertions(+), 1 deletion(-)
diff --git a/hadoop-ozone/dist/src/main/compose/common/s3a-test.sh b/hadoop-ozone/dist/src/main/compose/common/s3a-test.sh
index 8df0e380a44d..554b22b5a394 100644
--- a/hadoop-ozone/dist/src/main/compose/common/s3a-test.sh
+++ b/hadoop-ozone/dist/src/main/compose/common/s3a-test.sh
@@ -59,11 +59,26 @@ execute_s3a_tests() {
s3a://${bucket}/
+
+ fs.s3a.access.key
+ ${AWS_ACCESS_KEY_ID}
+
+
+
+ fs.s3a.secret.key
+ ${AWS_SECRET_ACCESS_KEY}
+
+
test.fs.s3a.sts.enabled
false
+
+ fs.s3a.committer.staging.conflict-mode
+ replace
+
+
fs.s3a.path.style.access
true
@@ -82,7 +97,7 @@ EOF
# - ITestS3AContractGetFileStatusV1List: HDDS-10617
# - ITestS3AContractRename: HDDS-10665
mvn -B -V --fail-never --no-transfer-progress \
- -Dtest='ITestS3AContract*, !ITestS3AContractDistCp, !ITestS3AContractGetFileStatusV1List, !ITestS3AContractRename' \
+ -Dtest='ITestS3AContract*, ITestS3ACommitterMRJob, !ITestS3AContractDistCp, !ITestS3AContractGetFileStatusV1List, !ITestS3AContractRename' \
clean test
local target="${RESULT_DIR}/junit/${bucket}/target"
From ffcbb21010964e78374491ae32adbf3678cdbdfc Mon Sep 17 00:00:00 2001
From: Ivan Zlenko <241953+ivanzlenko@users.noreply.github.com>
Date: Wed, 10 Jul 2024 15:06:45 +0400
Subject: [PATCH 8/8] HDDS-11040. Disable REST endpoint for S3 secret
manipulation by username (#6839)
(cherry picked from commit 56ce59166d2a7354b1b459dc325dba5e2e65c6fb)
---
.../dist/src/main/smoketest/s3/secretgenerate.robot | 4 +++-
hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot | 4 +++-
.../hadoop/ozone/s3secret/S3SecretManagementEndpoint.java | 7 +++++--
.../apache/hadoop/ozone/s3secret/TestSecretGenerate.java | 2 ++
.../org/apache/hadoop/ozone/s3secret/TestSecretRevoke.java | 2 ++
5 files changed, 15 insertions(+), 4 deletions(-)
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot b/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot
index 70dcfa1abede..e9b5dd5df724 100644
--- a/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot
@@ -45,13 +45,15 @@ S3 Gateway Secret Already Exists
Should contain ${result} HTTP/1.1 400 S3_SECRET_ALREADY_EXISTS ignore_case=True
S3 Gateway Generate Secret By Username
+ [Tags] robot:skip # TODO: Enable after HDDS-11041 is done.
Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled
${result} = Execute curl -X PUT --negotiate -u : -v ${ENDPOINT_URL}/secret/testuser
Should contain ${result} HTTP/1.1 200 OK ignore_case=True
Should Match Regexp ${result} .*.*
S3 Gateway Generate Secret By Username For Other User
+ [Tags] robot:skip # TODO: Enable after HDDS-11041 is done.
Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled
${result} = Execute curl -X PUT --negotiate -u : -v ${ENDPOINT_URL}/secret/testuser2
Should contain ${result} HTTP/1.1 200 OK ignore_case=True
- Should Match Regexp ${result} .*.*
\ No newline at end of file
+ Should Match Regexp ${result} .*.*
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot b/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot
index 0f15f23067b0..59725c0416c9 100644
--- a/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot
@@ -38,13 +38,15 @@ S3 Gateway Revoke Secret
Should contain ${result} HTTP/1.1 200 OK ignore_case=True
S3 Gateway Revoke Secret By Username
+ [Tags] robot:skip # TODO: Enable after HDDS-11041 is done.
Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled
Execute ozone s3 getsecret -u testuser ${OM_HA_PARAM}
${result} = Execute curl -X DELETE --negotiate -u : -v ${ENDPOINT_URL}/secret/testuser
Should contain ${result} HTTP/1.1 200 OK ignore_case=True
S3 Gateway Revoke Secret By Username For Other User
+ [Tags] robot:skip # TODO: Enable after HDDS-11041 is done.
Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled
Execute ozone s3 getsecret -u testuser2 ${OM_HA_PARAM}
${result} = Execute curl -X DELETE --negotiate -u : -v ${ENDPOINT_URL}/secret/testuser2
- Should contain ${result} HTTP/1.1 200 OK ignore_case=True
\ No newline at end of file
+ Should contain ${result} HTTP/1.1 200 OK ignore_case=True
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretManagementEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretManagementEndpoint.java
index a86a92820c06..7448bda3001c 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretManagementEndpoint.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretManagementEndpoint.java
@@ -33,6 +33,7 @@
import java.io.IOException;
import static javax.ws.rs.core.Response.Status.BAD_REQUEST;
+import static javax.ws.rs.core.Response.Status.METHOD_NOT_ALLOWED;
import static javax.ws.rs.core.Response.Status.NOT_FOUND;
/**
@@ -53,7 +54,8 @@ public Response generate() throws IOException {
@Path("/{username}")
public Response generate(@PathParam("username") String username)
throws IOException {
- return generateInternal(username);
+ // TODO: It is a temporary solution. To be removed after HDDS-11041 is done.
+ return Response.status(METHOD_NOT_ALLOWED).build();
}
private Response generateInternal(@Nullable String username) throws IOException {
@@ -93,7 +95,8 @@ public Response revoke() throws IOException {
@Path("/{username}")
public Response revoke(@PathParam("username") String username)
throws IOException {
- return revokeInternal(username);
+ // TODO: It is a temporary solution. To be removed after HDDS-11041 is done.
+ return Response.status(METHOD_NOT_ALLOWED).build();
}
private Response revokeInternal(@Nullable String username)
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretGenerate.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretGenerate.java
index 007fa9099ee3..78efa464c671 100644
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretGenerate.java
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretGenerate.java
@@ -33,6 +33,7 @@
import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
+import org.apache.ozone.test.tag.Unhealthy;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
@@ -111,6 +112,7 @@ void testIfSecretAlreadyExists() throws IOException {
}
@Test
+ @Unhealthy("HDDS-11041")
void testSecretGenerateWithUsername() throws IOException {
hasNoSecretYet();
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretRevoke.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretRevoke.java
index a319496419db..6461cfe80bc8 100644
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretRevoke.java
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretRevoke.java
@@ -30,6 +30,7 @@
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneClientStub;
import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.ozone.test.tag.Unhealthy;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
@@ -97,6 +98,7 @@ void testSecretRevoke() throws IOException {
}
@Test
+ @Unhealthy("HDDS-11041")
void testSecretRevokeWithUsername() throws IOException {
endpoint.revoke(OTHER_USER_NAME);
verify(objectStore, times(1))