From 08d012ba536c8e901398ef68483704b2241df66d Mon Sep 17 00:00:00 2001 From: Sammi Chen Date: Fri, 14 Feb 2025 21:51:54 +0800 Subject: [PATCH 1/6] HDDS-6611. Remove chunksPath and metadataPath from container yaml file --- .../hdds/upgrade/HDDSLayoutFeature.java | 3 +- .../org/apache/hadoop/ozone/OzoneConsts.java | 3 + .../ozone/container/ContainerTestHelper.java | 1 + .../common/helpers/ContainerUtils.java | 2 +- .../common/impl/ContainerDataYaml.java | 42 +- .../statemachine/DatanodeConfiguration.java | 3 + .../DeleteBlocksCommandHandler.java | 2 + .../container/keyvalue/KeyValueContainer.java | 31 +- .../keyvalue/KeyValueContainerCheck.java | 3 +- .../keyvalue/KeyValueContainerData.java | 33 +- .../KeyValueContainerMetadataInspector.java | 3 +- .../keyvalue/TarContainerPacker.java | 5 +- .../keyvalue/helpers/BlockUtils.java | 8 +- .../KeyValueContainerLocationUtil.java | 2 +- .../helpers/KeyValueContainerUtil.java | 16 +- .../background/BlockDeletingTask.java | 3 +- .../upgrade/VersionedDatanodeFeatures.java | 28 ++ .../common/TestBlockDeletingService.java | 10 +- .../TestSchemaOneBackwardsCompatibility.java | 2 +- .../common/impl/TestContainerPersistence.java | 3 +- .../TestDeleteBlocksCommandHandler.java | 3 + .../keyvalue/ContainerTestVersionInfo.java | 5 +- .../keyvalue/TestKeyValueContainer.java | 10 +- .../keyvalue/TestKeyValueContainerCheck.java | 2 + .../TestDatanodeUpgradeToSchemaV3.java | 3 + .../TestDatanodeUpgradeToSchemaV4.java | 385 ++++++++++++++++++ .../compatibility/dn-one-rocksdb.robot | 2 +- 27 files changed, 557 insertions(+), 56 deletions(-) create mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV4.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutFeature.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutFeature.java index 02e68515f389..7b0689214c8a 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutFeature.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutFeature.java @@ -41,7 +41,8 @@ public enum HDDSLayoutFeature implements LayoutFeature { HADOOP_PRC_PORTS_IN_DATANODEDETAILS(7, "Adding Hadoop RPC ports " + "to DatanodeDetails."), HBASE_SUPPORT(8, "Datanode RocksDB Schema Version 3 has an extra table " + - "for the last chunk of blocks to support HBase.)"); + "for the last chunk of blocks to support HBase.)"), + DATANODE_SCHEMA_V4(9, "Container yaml file doesn't require chunksPath and metadataPath"); ////////////////////////////// ////////////////////////////// diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index d03cc2a22fe5..949a964bd85c 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -243,6 +243,9 @@ private OzoneConsts() { // V3: Column families definitions are close to V2, // but have containerID as key prefixes. public static final String SCHEMA_V3 = "3"; + // V4: Column families is same as V3, + // removed chunkPath and metadataPath in .container file + public static final String SCHEMA_V4 = "4"; // Supported store types. public static final String OZONE = "ozone"; diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java index 6277a3584c6b..a13a58dd8537 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java @@ -316,6 +316,7 @@ private static Builder getContainerCommandRequestBuilder(long containerID, request.setCreateContainer( ContainerProtos.CreateContainerRequestProto.getDefaultInstance().toBuilder().setState(state).build()); request.setDatanodeUuid(pipeline.getFirstNode().getUuidString()); + request.setPipelineID(pipeline.getId().getId().toString()); return request; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java index bbc012d3cb98..bbe6bc694165 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java @@ -201,7 +201,7 @@ public static void verifyChecksum(ContainerData containerData, String storedChecksum = containerData.getChecksum(); Yaml yaml = ContainerDataYaml.getYamlForContainerType( - containerData.getContainerType(), + containerData.getContainerType(), containerData, containerData instanceof KeyValueContainerData && ((KeyValueContainerData)containerData).getReplicaIndex() > 0); containerData.computeAndSetChecksum(yaml); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java index ac57be2e2638..c5ac8d4bed6e 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java @@ -17,8 +17,15 @@ package org.apache.hadoop.ozone.container.common.impl; -import static org.apache.hadoop.ozone.OzoneConsts.REPLICA_INDEX; import static org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData.KEYVALUE_YAML_TAG; +import static org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil.isSameSchemaVersion; +import static org.apache.hadoop.ozone.OzoneConsts.CHUNKS_PATH; +import static org.apache.hadoop.ozone.OzoneConsts.METADATA_PATH; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; +import static org.apache.hadoop.ozone.OzoneConsts.REPLICA_INDEX; +import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V4; +import static org.apache.hadoop.ozone.OzoneConsts.STORAGE_DIR_CHUNKS; + import com.google.common.base.Preconditions; import java.io.ByteArrayInputStream; @@ -41,6 +48,7 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.yaml.snakeyaml.DumperOptions; @@ -88,7 +96,7 @@ public static void createContainerFile(ContainerType containerType, ((KeyValueContainerData) containerData).getReplicaIndex() > 0; // Create Yaml for given container type - Yaml yaml = getYamlForContainerType(containerType, withReplicaIndex); + Yaml yaml = getYamlForContainerType(containerType, containerData, withReplicaIndex); // Compute Checksum and update ContainerData containerData.computeAndSetChecksum(yaml); @@ -122,9 +130,16 @@ public static ContainerData readContainerFile(File containerFile) throws IOException { Preconditions.checkNotNull(containerFile, "containerFile cannot be null"); try (FileInputStream inputFileStream = new FileInputStream(containerFile)) { - return readContainer(inputFileStream); + KeyValueContainerData containerData = (KeyValueContainerData) readContainer(inputFileStream); + if (containerData.getChunksPath() == null) { + containerData.setChunksPath(containerFile.getParentFile().getParentFile().getAbsolutePath() + .concat(OZONE_URI_DELIMITER).concat(STORAGE_DIR_CHUNKS)); + } + if (containerData.getMetadataPath() == null) { + containerData.setMetadataPath(containerFile.getParentFile().getAbsolutePath()); + } + return containerData; } - } /** @@ -183,11 +198,12 @@ public static ContainerData readContainer(InputStream input) * the container properties. * * @param containerType type of container + * @parm ContainerData container data * @param withReplicaIndex in the container yaml * @return Yamal representation of container properties * @throws StorageContainerException if the type is unrecognized */ - public static Yaml getYamlForContainerType(ContainerType containerType, + public static Yaml getYamlForContainerType(ContainerType containerType, ContainerData containerData, boolean withReplicaIndex) throws StorageContainerException { PropertyUtils propertyUtils = new PropertyUtils(); @@ -201,6 +217,11 @@ public static Yaml getYamlForContainerType(ContainerType containerType, yamlFields = new ArrayList<>(yamlFields); yamlFields.add(REPLICA_INDEX); } + if (!isSameSchemaVersion(((KeyValueContainerData)containerData).getSchemaVersion(), SCHEMA_V4)) { + yamlFields = new ArrayList<>(yamlFields); + yamlFields.add(METADATA_PATH); + yamlFields.add(CHUNKS_PATH); + } Representer representer = new ContainerDataRepresenter(yamlFields); representer.setPropertyUtils(propertyUtils); representer.addClassTag( @@ -299,9 +320,12 @@ public Object construct(Node node) { kvData.setContainerDBType((String)nodes.get( OzoneConsts.CONTAINER_DB_TYPE)); - kvData.setMetadataPath((String) nodes.get( - OzoneConsts.METADATA_PATH)); - kvData.setChunksPath((String) nodes.get(OzoneConsts.CHUNKS_PATH)); + String schemaVersion = (String) nodes.get(OzoneConsts.SCHEMA_VERSION); + kvData.setSchemaVersion(schemaVersion); + if (!kvData.hasSchema(SCHEMA_V4)) { + kvData.setMetadataPath((String) nodes.get(OzoneConsts.METADATA_PATH)); + kvData.setChunksPath((String) nodes.get(OzoneConsts.CHUNKS_PATH)); + } Map meta = (Map) nodes.get(OzoneConsts.METADATA); kvData.setMetadata(meta); kvData.setChecksum((String) nodes.get(OzoneConsts.CHECKSUM)); @@ -310,8 +334,6 @@ public Object construct(Node node) { String state = (String) nodes.get(OzoneConsts.STATE); kvData .setState(ContainerProtos.ContainerDataProto.State.valueOf(state)); - String schemaVersion = (String) nodes.get(OzoneConsts.SCHEMA_VERSION); - kvData.setSchemaVersion(schemaVersion); final Object replicaIndex = nodes.get(REPLICA_INDEX); if (replicaIndex != null) { kvData.setReplicaIndex( diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java index 7731fdf1e273..c102c07fe131 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java @@ -73,6 +73,9 @@ public class DatanodeConfiguration extends ReconfigurableConfig { "hdds.datanode.wait.on.all.followers"; public static final String CONTAINER_SCHEMA_V3_ENABLED = "hdds.datanode.container.schema.v3.enabled"; + public static final String CONTAINER_SCHEMA_V4_ENABLED = + "hdds.datanode.container.schema.v4.enabled"; + public static final boolean CONTAINER_SCHEMA_V4_ENABLED_DEFAULT = true; static final boolean CHUNK_DATA_VALIDATION_CHECK_DEFAULT = false; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java index 80c078c5087d..774305092add 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java @@ -21,6 +21,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V1; import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V2; import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V3; +import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V4; import com.google.common.annotations.VisibleForTesting; import com.google.common.util.concurrent.ThreadFactoryBuilder; @@ -105,6 +106,7 @@ public DeleteBlocksCommandHandler(OzoneContainer container, schemaHandlers.put(SCHEMA_V1, this::markBlocksForDeletionSchemaV1); schemaHandlers.put(SCHEMA_V2, this::markBlocksForDeletionSchemaV2); schemaHandlers.put(SCHEMA_V3, this::markBlocksForDeletionSchemaV3); + schemaHandlers.put(SCHEMA_V4, this::markBlocksForDeletionSchemaV3); ThreadFactory threadFactory = new ThreadFactoryBuilder() .setNameFormat(threadNamePrefix + diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java index c5af0c7d9ed2..d74cc5b21a9b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java @@ -28,6 +28,8 @@ import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.IO_EXCEPTION; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNSUPPORTED_REQUEST; import static org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil.onFailure; +import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V3; +import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V4; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; @@ -176,7 +178,7 @@ public void create(VolumeSet volumeSet, VolumeChoosingPolicy containerVolume, clusterId); // Set schemaVersion before the dbFile since we have to // choose the dbFile location based on schema version. - String schemaVersion = VersionedDatanodeFeatures.SchemaV3 + String schemaVersion = VersionedDatanodeFeatures.SchemaV4 .chooseSchemaVersion(config); containerData.setSchemaVersion(schemaVersion); @@ -655,8 +657,17 @@ public void importContainerData(InputStream input, new File(getContainerData().getContainerPath())); } catch (Exception deleteex) { LOG.error( - "Can not cleanup destination directories after a container import" - + " error (cid: {}", containerId, deleteex); + "Can not cleanup container directory after a container import" + + " error (cid: {})", containerId, deleteex); + } finally { + if (containerData.sharedDB()) { + try { + BlockUtils.removeContainerFromDB(containerData, config); + LOG.debug("Container {} metadata is removed from DB", containerId); + } catch (IOException e) { + LOG.error("Can not remove container metadata from DB (cid: {})", containerId, e); + } + } } throw ex; } finally { @@ -669,12 +680,18 @@ public void importContainerData(KeyValueContainerData originalContainerData) containerData.setState(originalContainerData.getState()); containerData .setContainerDBType(originalContainerData.getContainerDBType()); - containerData.setSchemaVersion(originalContainerData.getSchemaVersion()); + // migrate V3 to V4 on container import + if (VersionedDatanodeFeatures.SchemaV4.isFinalizedAndEnabled(config) && + originalContainerData.hasSchema(SCHEMA_V3)) { + containerData.setSchemaVersion(SCHEMA_V4); + } else { + containerData.setSchemaVersion(originalContainerData.getSchemaVersion()); + } //rewriting the yaml file with new checksum calculation. update(originalContainerData.getMetadata(), true); - if (containerData.hasSchema(OzoneConsts.SCHEMA_V3)) { + if (containerData.sharedDB()) { // load metadata from received dump files before we try to parse kv BlockUtils.loadKVContainerDataFromFiles(containerData, config); } @@ -702,7 +719,7 @@ public void exportContainerData(OutputStream destination, } try { - if (!containerData.hasSchema(OzoneConsts.SCHEMA_V3)) { + if (!containerData.sharedDB()) { compactDB(); // Close DB (and remove from cache) to avoid concurrent modification // while packing it. @@ -1000,7 +1017,7 @@ private File createTempFile(File file) throws IOException { private void packContainerToDestination(OutputStream destination, ContainerPacker packer) throws IOException { - if (containerData.hasSchema(OzoneConsts.SCHEMA_V3)) { + if (containerData.sharedDB()) { // Synchronize the dump and pack operation, // so concurrent exports don't get dump files overwritten. // We seldom got concurrent exports for a container, diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java index 7b392896b5f2..fa9d61221021 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java @@ -31,7 +31,6 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdfs.util.Canceler; import org.apache.hadoop.hdfs.util.DataTransferThrottler; -import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.common.Checksum; import org.apache.hadoop.ozone.common.ChecksumData; import org.apache.hadoop.ozone.container.common.helpers.BlockData; @@ -266,7 +265,7 @@ private ScanResult scanData(DataTransferThrottler throttler, } else { // If schema V3 and container details not in DB or // if containerDBPath is removed - if ((onDiskContainerData.hasSchema(OzoneConsts.SCHEMA_V3) && + if ((onDiskContainerData.sharedDB() && db.getStore().getMetadataTable().get( onDiskContainerData.getBcsIdKey()) == null) || !new File(onDiskContainerData.getDbFile() diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java index f0e350c2fb31..31b2bd732935 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java @@ -31,8 +31,10 @@ import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V1; import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V2; import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V3; +import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V4; import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_VERSION; import static org.apache.hadoop.ozone.container.metadata.DatanodeSchemaThreeDBDefinition.getContainerKeyPrefix; +import static org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures.isFinalized; import com.fasterxml.jackson.annotation.JsonIgnore; import com.google.common.base.Preconditions; @@ -46,12 +48,14 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicLong; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos + .ContainerDataProto; +import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature; import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter; import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.ozone.container.common.impl.ContainerData; import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; +import org.apache.hadoop.ozone.container.common.impl.ContainerData; import org.apache.hadoop.ozone.container.common.interfaces.DBHandle; import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil; import org.yaml.snakeyaml.nodes.Tag; @@ -68,6 +72,8 @@ public class KeyValueContainerData extends ContainerData { // Fields need to be stored in .container file. private static final List KV_YAML_FIELDS; + // Fields need to be stored in .container file for Schema V4; + private static final List KV_YAML_FIELDS_SCHEMA_V4; // Path to Container metadata Level DB/RocksDB Store and .container file. private String metadataPath; @@ -98,6 +104,11 @@ public class KeyValueContainerData extends ContainerData { KV_YAML_FIELDS.add(CHUNKS_PATH); KV_YAML_FIELDS.add(CONTAINER_DB_TYPE); KV_YAML_FIELDS.add(SCHEMA_VERSION); + + KV_YAML_FIELDS_SCHEMA_V4 = Lists.newArrayList(); + KV_YAML_FIELDS_SCHEMA_V4.addAll(YAML_FIELDS); + KV_YAML_FIELDS_SCHEMA_V4.add(CONTAINER_DB_TYPE); + KV_YAML_FIELDS_SCHEMA_V4.add(SCHEMA_VERSION); } /** @@ -150,7 +161,7 @@ public String getSchemaVersion() { * @throws UnsupportedOperationException If no valid schema version is found. */ public String getSupportedSchemaVersionOrDefault() { - String[] versions = {SCHEMA_V1, SCHEMA_V2, SCHEMA_V3}; + String[] versions = {SCHEMA_V1, SCHEMA_V2, SCHEMA_V3, SCHEMA_V4}; for (String version : versions) { if (this.hasSchema(version)) { @@ -336,7 +347,11 @@ public ContainerDataProto getProtoBufMessage() { } public static List getYamlFields() { - return Collections.unmodifiableList(KV_YAML_FIELDS); + if (isFinalized(HDDSLayoutFeature.DATANODE_SCHEMA_V4)) { + return KV_YAML_FIELDS_SCHEMA_V4; + } else { + return Collections.unmodifiableList(KV_YAML_FIELDS); + } } /** @@ -426,7 +441,7 @@ public KeyPrefixFilter getDeletingBlockKeyFilter() { * for other schemas just return null. */ public String startKeyEmpty() { - if (hasSchema(SCHEMA_V3)) { + if (sharedDB()) { return getContainerKeyPrefix(getContainerID()); } return null; @@ -437,7 +452,7 @@ public String startKeyEmpty() { * for other schemas just return null. */ public String containerPrefix() { - if (hasSchema(SCHEMA_V3)) { + if (sharedDB()) { return getContainerKeyPrefix(getContainerID()); } return ""; @@ -451,7 +466,7 @@ public String containerPrefix() { * @return formatted key */ private String formatKey(String key) { - if (hasSchema(SCHEMA_V3)) { + if (sharedDB()) { key = getContainerKeyPrefix(getContainerID()) + key; } return key; @@ -461,4 +476,8 @@ public boolean hasSchema(String version) { return KeyValueContainerUtil.isSameSchemaVersion(schemaVersion, version); } + public boolean sharedDB() { + return KeyValueContainerUtil.isSameSchemaVersion(schemaVersion, SCHEMA_V3) || + KeyValueContainerUtil.isSameSchemaVersion(schemaVersion, SCHEMA_V4); + } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java index a4ebabe3c31f..546d412011c5 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.container.keyvalue; import static org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil.isSameSchemaVersion; +import static org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil.isSharedDBVersion; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; @@ -292,7 +293,7 @@ static ObjectNode getAggregateValues(DatanodeStore store, (DatanodeStoreSchemaTwoImpl) store; pendingDelete = countPendingDeletesSchemaV2(schemaTwoStore, containerData); - } else if (isSameSchemaVersion(schemaVersion, OzoneConsts.SCHEMA_V3)) { + } else if (isSharedDBVersion(schemaVersion)) { DatanodeStoreSchemaThreeImpl schemaThreeStore = (DatanodeStoreSchemaThreeImpl) store; pendingDelete = diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/TarContainerPacker.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/TarContainerPacker.java index 5d3c001eaf73..f9ce3ef1aff1 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/TarContainerPacker.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/TarContainerPacker.java @@ -19,7 +19,6 @@ import static java.util.stream.Collectors.toList; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_ALREADY_EXISTS; -import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V3; import com.google.common.annotations.VisibleForTesting; import java.io.BufferedOutputStream; @@ -185,7 +184,7 @@ public byte[] unpackContainerDescriptor(InputStream input) } public static Path getDbPath(KeyValueContainerData containerData) { - if (containerData.hasSchema(SCHEMA_V3)) { + if (containerData.sharedDB()) { return DatanodeStoreSchemaThreeImpl.getDumpDir( new File(containerData.getMetadataPath())).toPath(); } else { @@ -203,7 +202,7 @@ public static Path getDbPath(Path baseDir, Path dbPath = Paths.get(containerData.getDbFile().getPath()); Path relativePath = containerPath.relativize(dbPath); - if (containerData.hasSchema(SCHEMA_V3)) { + if (containerData.sharedDB()) { Path metadataDir = KeyValueContainerLocationUtil.getContainerMetaDataPath( baseDir.toString()).toPath(); return DatanodeStoreSchemaThreeImpl.getDumpDir(metadataDir.toFile()) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java index 730689539f94..be8bd2cb3833 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java @@ -25,6 +25,7 @@ import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNKNOWN_BCSID; import static org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil.onFailure; import static org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil.isSameSchemaVersion; +import static org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil.isSharedDBVersion; import com.google.common.base.Preconditions; import java.io.File; @@ -79,7 +80,7 @@ public static DatanodeStore getUncachedDatanodeStore( store = new DatanodeStoreSchemaOneImpl(conf, containerDBPath, readOnly); } else if (isSameSchemaVersion(schemaVersion, OzoneConsts.SCHEMA_V2)) { store = new DatanodeStoreSchemaTwoImpl(conf, containerDBPath, readOnly); - } else if (isSameSchemaVersion(schemaVersion, OzoneConsts.SCHEMA_V3)) { + } else if (isSharedDBVersion(schemaVersion)) { store = new DatanodeStoreSchemaThreeImpl(conf, containerDBPath, readOnly); } else { @@ -124,7 +125,7 @@ public static DBHandle getDB(KeyValueContainerData containerData, String containerDBPath = containerData.getDbFile().getAbsolutePath(); try { - if (containerData.hasSchema(OzoneConsts.SCHEMA_V3)) { + if (containerData.sharedDB()) { DatanodeStoreCache cache = DatanodeStoreCache.getInstance(); Preconditions.checkNotNull(cache); return cache.getDB(containerDBPath, conf); @@ -154,6 +155,7 @@ public static void removeDB(KeyValueContainerData container, Preconditions.checkNotNull(container); Preconditions.checkNotNull(container.getDbFile()); Preconditions.checkState(!container.hasSchema(OzoneConsts.SCHEMA_V3)); + Preconditions.checkState(!container.hasSchema(OzoneConsts.SCHEMA_V4)); ContainerCache cache = ContainerCache.getInstance(conf); Preconditions.checkNotNull(cache); @@ -180,7 +182,7 @@ public static void shutdownCache(ConfigurationSource config) { */ public static void addDB(DatanodeStore store, String containerDBPath, ConfigurationSource conf, String schemaVersion) { - if (isSameSchemaVersion(schemaVersion, OzoneConsts.SCHEMA_V3)) { + if (isSharedDBVersion(schemaVersion)) { DatanodeStoreCache cache = DatanodeStoreCache.getInstance(); Preconditions.checkNotNull(cache); cache.addDB(containerDBPath, new RawDB(store, containerDBPath)); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java index d2882780a794..c978629cb959 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java @@ -122,7 +122,7 @@ private static String getContainerSubDirectory(long containerId) { * Return containerDB File. */ public static File getContainerDBFile(KeyValueContainerData containerData) { - if (containerData.hasSchema(OzoneConsts.SCHEMA_V3)) { + if (containerData.sharedDB()) { Preconditions.checkNotNull(containerData.getVolume().getDbParentDir(), "Base Directory cannot be null"); return new File(containerData.getVolume().getDbParentDir(), OzoneConsts.CONTAINER_DB_NAME); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java index ab87875dbdc3..904dc7be6d17 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java @@ -18,6 +18,8 @@ package org.apache.hadoop.ozone.container.keyvalue.helpers; import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V1; +import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V3; +import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V4; import com.google.common.base.Preconditions; import java.io.File; @@ -104,8 +106,8 @@ public static void createContainerMetaData( } else if (isSameSchemaVersion(schemaVersion, OzoneConsts.SCHEMA_V2)) { store = new DatanodeStoreSchemaTwoImpl(conf, dbFile.getAbsolutePath(), false); - } else if (isSameSchemaVersion(schemaVersion, OzoneConsts.SCHEMA_V3)) { - // We don't create per-container store for schema v3 containers, + } else if (isSharedDBVersion(schemaVersion)) { + // We don't create per-container store for schema v3/v4 containers, // they should use per-volume db store. return; } else { @@ -142,7 +144,7 @@ public static void removeContainer( public static void removeContainerDB( KeyValueContainerData containerData, ConfigurationSource conf) throws IOException { - if (containerData.hasSchema(OzoneConsts.SCHEMA_V3)) { + if (containerData.sharedDB()) { // DB failure is catastrophic, the disk needs to be replaced. // In case of an exception, LOG the message and rethrow the exception. try { @@ -230,7 +232,7 @@ public static void parseKVContainerData(KeyValueContainerData kvContainerData, config.getObject(DatanodeConfiguration.class); boolean bCheckChunksFilePath = dnConf.getCheckEmptyContainerDir(); - if (kvContainerData.hasSchema(OzoneConsts.SCHEMA_V3)) { + if (kvContainerData.sharedDB()) { try (DBHandle db = BlockUtils.getDB(kvContainerData, config)) { populateContainerMetadata(kvContainerData, db.getStore(), bCheckChunksFilePath); @@ -434,6 +436,12 @@ public static boolean isSameSchemaVersion(String schema, String other) { return effective1.equals(effective2); } + public static boolean isSharedDBVersion(String schema) { + String effective = schema != null ? schema : SCHEMA_V1; + return effective.equals(SCHEMA_V3) || effective.equals(SCHEMA_V4); + } + + /** * Moves container directory to a new location * under "volume/hdds/cluster-id/tmp/deleted-containers" diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingTask.java index 38fe872f30e4..4ca4c093d56d 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingTask.java @@ -19,7 +19,6 @@ import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V1; import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V2; -import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V3; import java.io.File; import java.io.IOException; @@ -147,7 +146,7 @@ private ContainerBackgroundTaskResult handleDeleteTask() throws Exception { crr = deleteViaSchema1(meta, container, dataDir, startTime); } else if (containerData.hasSchema(SCHEMA_V2)) { crr = deleteViaSchema2(meta, container, dataDir, startTime); - } else if (containerData.hasSchema(SCHEMA_V3)) { + } else if (containerData.sharedDB()) { crr = deleteViaSchema3(meta, container, dataDir, startTime); } else { throw new UnsupportedOperationException( diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/VersionedDatanodeFeatures.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/VersionedDatanodeFeatures.java index ffd038afdcc2..aad48538bff9 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/VersionedDatanodeFeatures.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/VersionedDatanodeFeatures.java @@ -17,6 +17,9 @@ package org.apache.hadoop.ozone.container.upgrade; +import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.CONTAINER_SCHEMA_V4_ENABLED; +import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.CONTAINER_SCHEMA_V4_ENABLED_DEFAULT; + import java.io.File; import java.io.IOException; import org.apache.hadoop.hdds.conf.ConfigurationSource; @@ -148,4 +151,29 @@ public static boolean isFinalizedAndEnabled(ConfigurationSource conf) { return false; } } + + /** + * Utilities for container Schema V4 layout feature. + * This schema put all container metadata info into a per-disk + * rocksdb instance instead of a per-container instance. + */ + public static class SchemaV4 { + public static String chooseSchemaVersion(ConfigurationSource conf) { + if (isFinalizedAndEnabled(conf)) { + return OzoneConsts.SCHEMA_V4; + } else { + return SchemaV3.chooseSchemaVersion(conf); + } + } + + public static boolean isFinalizedAndEnabled(ConfigurationSource conf) { + DatanodeConfiguration dcf = conf.getObject(DatanodeConfiguration.class); + if (isFinalized(HDDSLayoutFeature.DATANODE_SCHEMA_V4) && + dcf.getContainerSchemaV3Enabled() && + conf.getBoolean(CONTAINER_SCHEMA_V4_ENABLED, CONTAINER_SCHEMA_V4_ENABLED_DEFAULT)) { + return true; + } + return false; + } + } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java index 34a5553f311c..abf761ee7a4c 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java @@ -22,13 +22,13 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V1; import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V2; -import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V3; import static org.apache.hadoop.ozone.container.common.ContainerTestUtils.COMMIT_STAGE; import static org.apache.hadoop.ozone.container.common.ContainerTestUtils.WRITE_STAGE; import static org.apache.hadoop.ozone.container.common.ContainerTestUtils.createDbInstancesForTestIfNeeded; import static org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion.FILE_PER_BLOCK; import static org.apache.hadoop.ozone.container.common.states.endpoint.VersionEndpointTask.LOG; import static org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil.isSameSchemaVersion; +import static org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil.isSharedDBVersion; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -189,7 +189,7 @@ private KeyValueContainerData createToDeleteBlocks(ContainerSet containerSet, createPendingDeleteBlocksSchema1(numOfBlocksPerContainer, data, containerID, numOfChunksPerBlock, buffer, chunkManager, container); } else if (isSameSchemaVersion(schemaVersion, SCHEMA_V2) - || isSameSchemaVersion(schemaVersion, SCHEMA_V3)) { + || isSharedDBVersion(schemaVersion)) { createPendingDeleteBlocksViaTxn(numOfBlocksPerContainer, txnID, containerID, numOfChunksPerBlock, buffer, chunkManager, container, data); @@ -274,7 +274,7 @@ private void createTxn(KeyValueContainerData data, List containerBlocks, .initBatchOperation()) { DatanodeStore ds = metadata.getStore(); - if (isSameSchemaVersion(schemaVersion, SCHEMA_V3)) { + if (isSharedDBVersion(schemaVersion)) { DatanodeStoreSchemaThreeImpl dnStoreThreeImpl = (DatanodeStoreSchemaThreeImpl) ds; dnStoreThreeImpl.getDeleteTransactionTable() @@ -381,7 +381,7 @@ private int getUnderDeletionBlocksCount(DBHandle meta, } } return pendingBlocks; - } else if (data.hasSchema(SCHEMA_V3)) { + } else if (data.sharedDB()) { int pendingBlocks = 0; DatanodeStore ds = meta.getStore(); DatanodeStoreSchemaThreeImpl dnStoreThreeImpl = @@ -983,7 +983,7 @@ public void testContainerMaxLockHoldingTime( (containerData.get(0).getBytesUsed() == 0), 100, 3000); if (schemaVersion != null && ( - schemaVersion.equals(SCHEMA_V2) || schemaVersion.equals(SCHEMA_V3))) { + schemaVersion.equals(SCHEMA_V2) || isSharedDBVersion(schemaVersion))) { // Since MaxLockHoldingTime is -1, every "deletion transaction" triggers // a timeout except the last one, where a "deletion transaction" diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java index aa3ec32280fc..ad0867a90b6f 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java @@ -605,7 +605,7 @@ private KeyValueContainerData newKvData() throws IOException { // Changing the paths above affects the checksum, so it was also removed // from the container file and calculated at run time. Yaml yaml = ContainerDataYaml.getYamlForContainerType( - kvData.getContainerType(), + kvData.getContainerType(), kvData, kvData.getReplicaIndex() > 0); kvData.computeAndSetChecksum(yaml); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java index db6ca37fa652..42082962fa76 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java @@ -23,6 +23,7 @@ import static org.apache.hadoop.ozone.container.ContainerTestHelper.getData; import static org.apache.hadoop.ozone.container.ContainerTestHelper.setDataChecksum; import static org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil.isSameSchemaVersion; +import static org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil.isSharedDBVersion; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -258,7 +259,7 @@ public void testAddingBlockToDeletedContainer( initSchemaAndVersionInfo(versionInfo); // With schema v3, we don't have a container dedicated db, // so skip check the behaviors related to it. - assumeFalse(isSameSchemaVersion(schemaVersion, OzoneConsts.SCHEMA_V3)); + assumeFalse(isSharedDBVersion(schemaVersion)); long testContainerID = getTestContainerID(); Thread.sleep(100); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java index 5b9a5abe0d85..f8ef7b70b6a4 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java @@ -21,6 +21,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V1; import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V2; import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V3; +import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V4; import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.BLOCK_DELETE_COMMAND_WORKER_INTERVAL; import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.BLOCK_DELETE_COMMAND_WORKER_INTERVAL_DEFAULT; import static org.apache.hadoop.ozone.container.common.statemachine.commandhandler.DeleteBlocksCommandHandler.DeleteBlockTransactionExecutionResult; @@ -135,10 +136,12 @@ private void setup() throws Exception { TestSchemaHandler testSchemaHandler1 = spy(new TestSchemaHandler()); TestSchemaHandler testSchemaHandler2 = spy(new TestSchemaHandler()); TestSchemaHandler testSchemaHandler3 = spy(new TestSchemaHandler()); + TestSchemaHandler testSchemaHandler4 = spy(new TestSchemaHandler()); handler.getSchemaHandlers().put(SCHEMA_V1, testSchemaHandler1); handler.getSchemaHandlers().put(SCHEMA_V2, testSchemaHandler2); handler.getSchemaHandlers().put(SCHEMA_V3, testSchemaHandler3); + handler.getSchemaHandlers().put(SCHEMA_V4, testSchemaHandler4); } @AfterEach diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/ContainerTestVersionInfo.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/ContainerTestVersionInfo.java index bb336482c3dc..b96c914dc1ee 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/ContainerTestVersionInfo.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/ContainerTestVersionInfo.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.container.keyvalue; -import static org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil.isSameSchemaVersion; +import static org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil.isSharedDBVersion; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; @@ -55,6 +55,7 @@ public class ContainerTestVersionInfo { OzoneConsts.SCHEMA_V1, OzoneConsts.SCHEMA_V2, OzoneConsts.SCHEMA_V3, + OzoneConsts.SCHEMA_V4 }; private final String schemaVersion; @@ -93,7 +94,7 @@ public static List getLayoutList() { } public static void setTestSchemaVersion(String schemaVersion, OzoneConfiguration conf) { - if (isSameSchemaVersion(schemaVersion, OzoneConsts.SCHEMA_V3)) { + if (isSharedDBVersion(schemaVersion)) { ContainerTestUtils.enableSchemaV3(conf); } else { ContainerTestUtils.disableSchemaV3(conf); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java index 083afa4b0560..9014af1a0a19 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java @@ -21,7 +21,8 @@ import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V2; import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V3; import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.CONTAINER_SCHEMA_V3_ENABLED; -import static org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil.isSameSchemaVersion; +import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.CONTAINER_SCHEMA_V4_ENABLED; +import static org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil.isSharedDBVersion; import static org.apache.hadoop.ozone.container.replication.CopyContainerCompression.NO_COMPRESSION; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -582,7 +583,7 @@ public void testDeleteContainer(ContainerTestVersionInfo versionInfo) assertFalse(keyValueContainer.getContainerFile().exists(), "Container File still exists"); - if (isSameSchemaVersion(schemaVersion, OzoneConsts.SCHEMA_V3)) { + if (isSharedDBVersion(schemaVersion)) { assertTrue(keyValueContainer.getContainerDBFile().exists()); } else { assertFalse(keyValueContainer.getContainerDBFile().exists(), @@ -766,7 +767,7 @@ public void testDBProfileAffectsDBOptions( } // DBOtions should be different, except SCHEMA-V3 - if (isSameSchemaVersion(schemaVersion, OzoneConsts.SCHEMA_V3)) { + if (isSharedDBVersion(schemaVersion)) { assertEquals( outProfile1.getDBOptions().compactionReadaheadSize(), outProfile2.getDBOptions().compactionReadaheadSize()); @@ -810,7 +811,7 @@ public void testKeyValueDataProtoBufMsg(ContainerTestVersionInfo versionInfo) void testAutoCompactionSmallSstFile( ContainerTestVersionInfo versionInfo) throws Exception { init(versionInfo); - assumeTrue(isSameSchemaVersion(schemaVersion, OzoneConsts.SCHEMA_V3)); + assumeTrue(isSharedDBVersion(schemaVersion)); // Create a new HDDS volume String volumeDirPath = Files.createDirectory(folder.toPath().resolve("volumeDir")).toFile() @@ -1032,6 +1033,7 @@ private void testMixedSchemaImport(String dir, boolean schemaV3Enabled) throws IOException { OzoneConfiguration conf = new OzoneConfiguration(); final String dir1 = dir + (schemaV3Enabled ? "/v3" : "/v2"); + conf.setBoolean(CONTAINER_SCHEMA_V4_ENABLED, false); // create HddsVolume HddsVolume hddsVolume1 = new HddsVolume.Builder(dir1) diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java index 87a15d58af7b..3ceda8ded73c 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java @@ -186,6 +186,8 @@ void testKeyValueContainerCheckDeleted(ContainerTestVersionInfo versionInfo) .thenReturn(containerData.getContainerDBType()); when(mockContainerData.getSchemaVersion()) .thenReturn(containerData.getSchemaVersion()); + when(mockContainerData.sharedDB()) + .thenReturn(containerData.sharedDB()); // Mimic the scenario where scanning starts just before // blocks are marked for deletion. diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java index c873a4b6f0e5..c61664f981d3 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java @@ -444,6 +444,8 @@ public void testWrite(boolean enable, String expectedVersion) // Set SchemaV3 enable status conf.setBoolean(DatanodeConfiguration.CONTAINER_SCHEMA_V3_ENABLED, enable); + conf.setBoolean(DatanodeConfiguration.CONTAINER_SCHEMA_V4_ENABLED, + false); dsm = UpgradeTestHelper.restartDatanode(conf, dsm, false, tempFolder, address, HDDSLayoutFeature.DATANODE_SCHEMA_V3.layoutVersion(), false); @@ -459,6 +461,7 @@ public void testWrite(boolean enable, String expectedVersion) // If SchemaV3 is still disabled, new data should still be saved as SchemaV2 assertEquals(expectedVersion, container.getContainerData().getSchemaVersion()); + conf.setBoolean(DatanodeConfiguration.CONTAINER_SCHEMA_V4_ENABLED, true); } /** diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV4.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV4.java new file mode 100644 index 000000000000..6d8a4a07b273 --- /dev/null +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV4.java @@ -0,0 +1,385 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.container.upgrade; + +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.pipeline.MockPipeline; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature; +import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.container.common.SCMTestUtils; +import org.apache.hadoop.ozone.container.common.ScmTestMock; +import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; +import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml; +import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; +import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; +import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; +import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; +import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; +import org.apache.hadoop.ozone.container.keyvalue.TarContainerPacker; +import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.io.TempDir; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.net.InetSocketAddress; +import java.nio.charset.Charset; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.nio.file.attribute.FileTime; +import java.util.Collections; +import java.util.stream.Stream; + +import static org.apache.hadoop.ozone.OzoneConsts.CHUNKS_PATH; +import static org.apache.hadoop.ozone.OzoneConsts.METADATA_PATH; +import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V2; +import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V4; +import static org.apache.hadoop.ozone.OzoneConsts.STORAGE_DIR_CHUNKS; +import static org.apache.hadoop.ozone.container.replication.CopyContainerCompression.NO_COMPRESSION; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.params.provider.Arguments.arguments; + +/** + * Tests upgrading a single datanode from container Schema V4. + */ +public class TestDatanodeUpgradeToSchemaV4 { + @TempDir + private File tempFolder; + + private DatanodeStateMachine dsm; + private OzoneConfiguration conf; + private static final String CLUSTER_ID = "clusterID"; + + private RPC.Server scmRpcServer; + private InetSocketAddress address; + + private void initTests(Boolean enable) throws Exception { + boolean schemaV3Enabled = enable; + conf = new OzoneConfiguration(); + conf.setBoolean(DatanodeConfiguration.CONTAINER_SCHEMA_V3_ENABLED, schemaV3Enabled); + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); + setup(); + } + + private void setup() throws Exception { + address = SCMTestUtils.getReuseableAddress(); + conf.setSocketAddr(ScmConfigKeys.OZONE_SCM_NAMES, address); + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, tempFolder.toString()); + } + + @AfterEach + public void teardown() throws Exception { + if (scmRpcServer != null) { + scmRpcServer.stop(); + } + + if (dsm != null) { + dsm.close(); + } + } + + public static Stream parameters() { + return Stream.of( + arguments(true, false), + arguments(true, true), + arguments(false, false), + arguments(false, true) + ); + } + + /** + * a. new container will be schema V2/V3 before DATANODE_SCHEMA_V4 is finalized, + * depending on whether V3 is enabled or not. + * b. new container will be schema V2/V4 after DATANODE_SCHEMA_V4 is finalized, + * depending on whether V3 is enabled or not. + */ + @ParameterizedTest(name = "schema V3 enabled :{0}, SchemaV4 finalized: {1}") + @MethodSource("parameters") + public void testContainerSchemaV4(boolean schemaV3Enabled, boolean finalize) throws Exception { + initTests(schemaV3Enabled); + // start DN and SCM + ScmTestMock scmTestMock = new ScmTestMock(CLUSTER_ID); + scmRpcServer = SCMTestUtils.startScmRpcServer(conf, scmTestMock, address, 10); + UpgradeTestHelper.addHddsVolume(conf, tempFolder.toPath()); + dsm = UpgradeTestHelper.startPreFinalizedDatanode(conf, tempFolder.toPath(), dsm, address, + HDDSLayoutFeature.HBASE_SUPPORT.layoutVersion()); + ContainerDispatcher dispatcher = dsm.getContainer().getDispatcher(); + dispatcher.setClusterId(CLUSTER_ID); + if (finalize) { + dsm.finalizeUpgrade(); + } + + final Pipeline pipeline = MockPipeline.createPipeline( + Collections.singletonList(dsm.getDatanodeDetails())); + // Create a container to write data. + final long containerID1 = UpgradeTestHelper.addContainer(dispatcher, pipeline); + UpgradeTestHelper.putBlock(dispatcher, containerID1, pipeline); + UpgradeTestHelper.closeContainer(dispatcher, containerID1, pipeline); + KeyValueContainer container = (KeyValueContainer) + dsm.getContainer().getContainerSet().getContainer(containerID1); + File yamlFile = container.getContainerFile(); + String content = + FileUtils.readFileToString(yamlFile, Charset.defaultCharset()); + System.out.println(content); + if (finalize) { + if (schemaV3Enabled) { + assertThat(content).doesNotContain(METADATA_PATH); + assertThat(content).doesNotContain(CHUNKS_PATH); + } else { + assertThat(content).contains(METADATA_PATH); + assertThat(content).contains(CHUNKS_PATH); + } + } else { + assertThat(content).contains(METADATA_PATH); + assertThat(content).contains(CHUNKS_PATH); + } + assertEquals(yamlFile.getParentFile().getParentFile().toPath().resolve(STORAGE_DIR_CHUNKS).toString(), + container.getContainerData().getChunksPath()); + assertEquals(yamlFile.getParentFile().getAbsolutePath(), container.getContainerData().getMetadataPath()); + File containerDir = new File(container.getContainerData().getContainerPath()); + assertTrue(containerDir.exists() && containerDir.isDirectory()); + FileTime creationTime1 = (FileTime) Files.getAttribute(containerDir.toPath(), "creationTime"); + + // export the container + File folderToExport = Files.createFile( + tempFolder.toPath().resolve("export-testContainerSchemaV4.tar")).toFile(); + TarContainerPacker packer = new TarContainerPacker(NO_COMPRESSION); + + //export the container + try (FileOutputStream fos = new FileOutputStream(folderToExport)) { + container.exportContainerData(fos, packer); + } + + //delete the original one + KeyValueContainerUtil.removeContainer(container.getContainerData(), conf); + container.delete(); + assertFalse(new File(container.getContainerData().getContainerPath()).exists()); + if (schemaV3Enabled) { + assertTrue(container.getContainerData().getDbFile().exists()); + } + + //create a new one + KeyValueContainerData oldContainerData = container.getContainerData(); + KeyValueContainerData newContainerData = + new KeyValueContainerData(containerID1, + oldContainerData.getLayoutVersion(), + oldContainerData.getMaxSize(), pipeline.getId().getId().toString(), + dsm.getDatanodeDetails().getUuidString()); + newContainerData.setSchemaVersion(oldContainerData.getSchemaVersion()); + KeyValueContainer newContainer = new KeyValueContainer(newContainerData, conf); + newContainer.populatePathFields(scmTestMock.getClusterId(), oldContainerData.getVolume()); + + // verify yaml file checksum + try (FileInputStream fis = new FileInputStream(folderToExport)) { + byte[] containerDescriptorYaml = packer.unpackContainerDescriptor(fis); + KeyValueContainerData data = (KeyValueContainerData) ContainerDataYaml + .readContainer(containerDescriptorYaml); + ContainerUtils.verifyChecksum(data, conf); + } + + // sleep 1s to make sure creationTime will change + Thread.sleep(1000); + try (FileInputStream fis = new FileInputStream(folderToExport)) { + newContainer.importContainerData(fis, packer); + } + + assertEquals(newContainerData.getContainerDBType(), oldContainerData.getContainerDBType()); + assertEquals(newContainerData.getState(), oldContainerData.getState()); + assertEquals(newContainerData.getBlockCount(), oldContainerData.getBlockCount()); + assertEquals(newContainerData.getLayoutVersion(), oldContainerData.getLayoutVersion()); + assertEquals(newContainerData.getMaxSize(), oldContainerData.getMaxSize()); + assertEquals(newContainerData.getBytesUsed(), oldContainerData.getBytesUsed()); + assertEquals(newContainerData.getMetadataPath(), oldContainerData.getMetadataPath()); + assertEquals(newContainerData.getChunksPath(), oldContainerData.getChunksPath()); + assertEquals(newContainerData.getContainerPath(), oldContainerData.getContainerPath()); + assertTrue(new File(newContainerData.getContainerPath()).exists()); + assertTrue(new File(newContainerData.getChunksPath()).exists()); + assertTrue(new File(newContainerData.getMetadataPath()).exists()); + if (schemaV3Enabled) { + assertTrue(newContainerData.getDbFile().exists()); + assertEquals(newContainerData.getDbFile(), oldContainerData.getDbFile()); + } + yamlFile = newContainer.getContainerFile(); + content = FileUtils.readFileToString(yamlFile, Charset.defaultCharset()); + System.out.println(content); + if (finalize) { + if (schemaV3Enabled) { + assertThat(content).doesNotContain(METADATA_PATH); + assertThat(content).doesNotContain(CHUNKS_PATH); + } else { + assertThat(content).contains(METADATA_PATH); + assertThat(content).contains(CHUNKS_PATH); + } + } else { + assertThat(content).contains(METADATA_PATH); + assertThat(content).contains(CHUNKS_PATH); + } + assertEquals(yamlFile.getParentFile().getParentFile().toPath().resolve(STORAGE_DIR_CHUNKS).toString(), + newContainer.getContainerData().getChunksPath()); + assertEquals(yamlFile.getParentFile().getAbsolutePath(), newContainer.getContainerData().getMetadataPath()); + FileTime creationTime2 = (FileTime) Files.getAttribute( + Paths.get(newContainer.getContainerData().getContainerPath()), "creationTime"); + assertNotEquals(creationTime1.toInstant(), creationTime2.toInstant()); + } + + /** + * Test container created before finalization, still be accessible. + * V3 container will be automatically migrated to V4 if there is any container yaml file update on disk. + */ + @ParameterizedTest(name = "schema V3 enabled :{0}, export container before finalization: {1}") + @MethodSource("parameters") + public void testContainerBeforeFinalization( + boolean schemaV3Enabled, boolean exportBeforeFinalization) throws Exception { + initTests(schemaV3Enabled); + // start DN and SCM + ScmTestMock scmTestMock = new ScmTestMock(CLUSTER_ID); + scmRpcServer = SCMTestUtils.startScmRpcServer(conf, scmTestMock, address, 10); + UpgradeTestHelper.addHddsVolume(conf, tempFolder.toPath()); + dsm = UpgradeTestHelper.startPreFinalizedDatanode(conf, tempFolder.toPath(), dsm, address, + HDDSLayoutFeature.HBASE_SUPPORT.layoutVersion()); + ContainerDispatcher dispatcher = dsm.getContainer().getDispatcher(); + dispatcher.setClusterId(CLUSTER_ID); + + // create container + final Pipeline pipeline = MockPipeline.createPipeline( + Collections.singletonList(dsm.getDatanodeDetails())); + // Create a container to write data. + final long containerID1 = UpgradeTestHelper.addContainer(dispatcher, pipeline); + UpgradeTestHelper.putBlock(dispatcher, containerID1, pipeline); + UpgradeTestHelper.closeContainer(dispatcher, containerID1, pipeline); + KeyValueContainer container = (KeyValueContainer) + dsm.getContainer().getContainerSet().getContainer(containerID1); + File yamlFile = container.getContainerFile(); + String content = + FileUtils.readFileToString(yamlFile, Charset.defaultCharset()); + System.out.println(content); + // yaml file contains chunkPath and metadataPath + assertThat(content).contains(METADATA_PATH); + assertThat(content).contains(CHUNKS_PATH); + assertEquals(yamlFile.getParentFile().getParentFile().toPath().resolve(STORAGE_DIR_CHUNKS).toString(), + container.getContainerData().getChunksPath()); + assertEquals(yamlFile.getParentFile().getAbsolutePath(), container.getContainerData().getMetadataPath()); + File containerDir = new File(container.getContainerData().getContainerPath()); + assertTrue(containerDir.exists() && containerDir.isDirectory()); + FileTime creationTime1 = (FileTime) Files.getAttribute(containerDir.toPath(), "creationTime"); + + File folderToExport = Files.createFile( + tempFolder.toPath().resolve("export-testContainerBeforeFinalization.tar")).toFile(); + TarContainerPacker packer = new TarContainerPacker(NO_COMPRESSION); + if (exportBeforeFinalization) { + //export the container + try (FileOutputStream fos = new FileOutputStream(folderToExport)) { + container.exportContainerData(fos, packer); + } + } + + dsm.finalizeUpgrade(); + + if (!exportBeforeFinalization) { + //export the container + try (FileOutputStream fos = new FileOutputStream(folderToExport)) { + container.exportContainerData(fos, packer); + } + } + + //delete the original one + KeyValueContainerUtil.removeContainer(container.getContainerData(), conf); + container.delete(); + assertFalse(new File(container.getContainerData().getContainerPath()).exists()); + if (schemaV3Enabled) { + assertTrue(container.getContainerData().getDbFile().exists()); + } + + //create a new one + KeyValueContainerData oldContainerData = container.getContainerData(); + KeyValueContainerData newContainerData = + new KeyValueContainerData(containerID1, + oldContainerData.getLayoutVersion(), + oldContainerData.getMaxSize(), pipeline.getId().getId().toString(), + dsm.getDatanodeDetails().getUuidString()); + newContainerData.setSchemaVersion(oldContainerData.getSchemaVersion()); + KeyValueContainer newContainer = new KeyValueContainer(newContainerData, conf); + newContainer.populatePathFields(scmTestMock.getClusterId(), oldContainerData.getVolume()); + + // verify yaml file checksum + try (FileInputStream fis = new FileInputStream(folderToExport)) { + byte[] containerDescriptorYaml = packer.unpackContainerDescriptor(fis); + KeyValueContainerData data = (KeyValueContainerData) ContainerDataYaml + .readContainer(containerDescriptorYaml); + ContainerUtils.verifyChecksum(data, conf); + } + + // sleep 1s to make sure creationTime will change + Thread.sleep(1000); + try (FileInputStream fis = new FileInputStream(folderToExport)) { + newContainer.importContainerData(fis, packer); + } + + assertEquals(newContainerData.getContainerDBType(), oldContainerData.getContainerDBType()); + assertEquals(newContainerData.getState(), oldContainerData.getState()); + assertEquals(newContainerData.getBlockCount(), oldContainerData.getBlockCount()); + assertEquals(newContainerData.getLayoutVersion(), oldContainerData.getLayoutVersion()); + assertEquals(newContainerData.getMaxSize(), oldContainerData.getMaxSize()); + assertEquals(newContainerData.getBytesUsed(), oldContainerData.getBytesUsed()); + assertEquals(newContainerData.getMetadataPath(), oldContainerData.getMetadataPath()); + assertEquals(newContainerData.getChunksPath(), oldContainerData.getChunksPath()); + assertEquals(newContainerData.getContainerPath(), oldContainerData.getContainerPath()); + assertTrue(new File(newContainerData.getContainerPath()).exists()); + assertTrue(new File(newContainerData.getChunksPath()).exists()); + assertTrue(new File(newContainerData.getMetadataPath()).exists()); + if (schemaV3Enabled) { + assertTrue(newContainerData.getDbFile().exists()); + assertEquals(newContainerData.getDbFile(), oldContainerData.getDbFile()); + } + yamlFile = newContainer.getContainerFile(); + content = FileUtils.readFileToString(yamlFile, Charset.defaultCharset()); + System.out.println(content); + if (schemaV3Enabled) { + assertThat(content).doesNotContain(METADATA_PATH); + assertThat(content).doesNotContain(CHUNKS_PATH); + // V3 migrate to V4 automatically + assertTrue(newContainer.getContainerData().hasSchema(SCHEMA_V4)); + } else { + assertThat(content).contains(METADATA_PATH); + assertThat(content).contains(CHUNKS_PATH); + assertTrue(newContainer.getContainerData().hasSchema(SCHEMA_V2)); + } + + assertEquals(yamlFile.getParentFile().getParentFile().toPath().resolve(STORAGE_DIR_CHUNKS).toString(), + newContainer.getContainerData().getChunksPath()); + assertEquals(yamlFile.getParentFile().getAbsolutePath(), newContainer.getContainerData().getMetadataPath()); + FileTime creationTime2 = (FileTime) Files.getAttribute( + Paths.get(newContainer.getContainerData().getContainerPath()), "creationTime"); + assertNotEquals(creationTime1.toInstant(), creationTime2.toInstant()); + } +} diff --git a/hadoop-ozone/dist/src/main/smoketest/compatibility/dn-one-rocksdb.robot b/hadoop-ozone/dist/src/main/smoketest/compatibility/dn-one-rocksdb.robot index 282aa8f168fe..09fd28e0da3c 100644 --- a/hadoop-ozone/dist/src/main/smoketest/compatibility/dn-one-rocksdb.robot +++ b/hadoop-ozone/dist/src/main/smoketest/compatibility/dn-one-rocksdb.robot @@ -26,4 +26,4 @@ Create a container and check container schema version ${output} = Execute ozone admin container create Should not contain ${output} Failed ${output} = Execute ozone debug datanode container list - Should contain ${output} \"schemaVersion\" : \"3\" + Should contain ${output} \"schemaVersion\" : \"4\" From 22932f70fddaa8b4064312c909a9f94fcd7651b6 Mon Sep 17 00:00:00 2001 From: Sammi Chen Date: Wed, 19 Feb 2025 18:48:06 +0800 Subject: [PATCH 2/6] fix import order --- .../common/impl/ContainerDataYaml.java | 6 +- .../container/keyvalue/KeyValueContainer.java | 2 +- .../keyvalue/KeyValueContainerData.java | 2 +- .../TestDatanodeUpgradeToSchemaV4.java | 62 +++++++++---------- 4 files changed, 34 insertions(+), 38 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java index c5ac8d4bed6e..f21d3c4c1490 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java @@ -17,15 +17,14 @@ package org.apache.hadoop.ozone.container.common.impl; -import static org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData.KEYVALUE_YAML_TAG; -import static org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil.isSameSchemaVersion; import static org.apache.hadoop.ozone.OzoneConsts.CHUNKS_PATH; import static org.apache.hadoop.ozone.OzoneConsts.METADATA_PATH; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.apache.hadoop.ozone.OzoneConsts.REPLICA_INDEX; import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V4; import static org.apache.hadoop.ozone.OzoneConsts.STORAGE_DIR_CHUNKS; - +import static org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData.KEYVALUE_YAML_TAG; +import static org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil.isSameSchemaVersion; import com.google.common.base.Preconditions; import java.io.ByteArrayInputStream; @@ -48,7 +47,6 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.yaml.snakeyaml.DumperOptions; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java index d74cc5b21a9b..6fce1104b5f0 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java @@ -27,9 +27,9 @@ import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.INVALID_CONTAINER_STATE; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.IO_EXCEPTION; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNSUPPORTED_REQUEST; -import static org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil.onFailure; import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V3; import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V4; +import static org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil.onFailure; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java index 31b2bd732935..121f00edfaa0 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java @@ -54,8 +54,8 @@ import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter; import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.common.impl.ContainerData; +import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.common.interfaces.DBHandle; import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil; import org.yaml.snakeyaml.nodes.Tag; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV4.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV4.java index 6d8a4a07b273..0fa031329dcc 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV4.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV4.java @@ -1,13 +1,12 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,6 +17,29 @@ package org.apache.hadoop.ozone.container.upgrade; +import static org.apache.hadoop.ozone.OzoneConsts.CHUNKS_PATH; +import static org.apache.hadoop.ozone.OzoneConsts.METADATA_PATH; +import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V2; +import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V4; +import static org.apache.hadoop.ozone.OzoneConsts.STORAGE_DIR_CHUNKS; +import static org.apache.hadoop.ozone.container.replication.CopyContainerCompression.NO_COMPRESSION; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.params.provider.Arguments.arguments; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.net.InetSocketAddress; +import java.nio.charset.Charset; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.nio.file.attribute.FileTime; +import java.util.Collections; +import java.util.stream.Stream; import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -44,30 +66,6 @@ import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.net.InetSocketAddress; -import java.nio.charset.Charset; -import java.nio.file.Files; -import java.nio.file.Paths; -import java.nio.file.attribute.FileTime; -import java.util.Collections; -import java.util.stream.Stream; - -import static org.apache.hadoop.ozone.OzoneConsts.CHUNKS_PATH; -import static org.apache.hadoop.ozone.OzoneConsts.METADATA_PATH; -import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V2; -import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V4; -import static org.apache.hadoop.ozone.OzoneConsts.STORAGE_DIR_CHUNKS; -import static org.apache.hadoop.ozone.container.replication.CopyContainerCompression.NO_COMPRESSION; -import static org.assertj.core.api.Assertions.assertThat; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertNotEquals; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.params.provider.Arguments.arguments; - /** * Tests upgrading a single datanode from container Schema V4. */ From 8edb068f0424090bdac63799ee1c4e6e501f575e Mon Sep 17 00:00:00 2001 From: Sammi Chen Date: Mon, 24 Feb 2025 16:27:12 +0800 Subject: [PATCH 3/6] address comments --- .../common/impl/ContainerDataYaml.java | 4 +- .../statemachine/DatanodeConfiguration.java | 3 - .../container/keyvalue/KeyValueContainer.java | 27 +++---- .../keyvalue/KeyValueContainerData.java | 17 +++-- .../upgrade/VersionedDatanodeFeatures.java | 10 +-- .../keyvalue/TestKeyValueContainer.java | 9 +-- .../TestDatanodeUpgradeToSchemaV3.java | 10 +-- .../TestDatanodeUpgradeToSchemaV4.java | 71 ++++++++++--------- 8 files changed, 74 insertions(+), 77 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java index f21d3c4c1490..1c090780efff 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java @@ -215,7 +215,7 @@ public static Yaml getYamlForContainerType(ContainerType containerType, Containe yamlFields = new ArrayList<>(yamlFields); yamlFields.add(REPLICA_INDEX); } - if (!isSameSchemaVersion(((KeyValueContainerData)containerData).getSchemaVersion(), SCHEMA_V4)) { + if (((KeyValueContainerData)containerData).olderSchemaThan(SCHEMA_V4)) { yamlFields = new ArrayList<>(yamlFields); yamlFields.add(METADATA_PATH); yamlFields.add(CHUNKS_PATH); @@ -320,7 +320,7 @@ public Object construct(Node node) { OzoneConsts.CONTAINER_DB_TYPE)); String schemaVersion = (String) nodes.get(OzoneConsts.SCHEMA_VERSION); kvData.setSchemaVersion(schemaVersion); - if (!kvData.hasSchema(SCHEMA_V4)) { + if (kvData.olderSchemaThan(SCHEMA_V4)) { kvData.setMetadataPath((String) nodes.get(OzoneConsts.METADATA_PATH)); kvData.setChunksPath((String) nodes.get(OzoneConsts.CHUNKS_PATH)); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java index c102c07fe131..7731fdf1e273 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java @@ -73,9 +73,6 @@ public class DatanodeConfiguration extends ReconfigurableConfig { "hdds.datanode.wait.on.all.followers"; public static final String CONTAINER_SCHEMA_V3_ENABLED = "hdds.datanode.container.schema.v3.enabled"; - public static final String CONTAINER_SCHEMA_V4_ENABLED = - "hdds.datanode.container.schema.v4.enabled"; - public static final boolean CONTAINER_SCHEMA_V4_ENABLED_DEFAULT = true; static final boolean CHUNK_DATA_VALIDATION_CHECK_DEFAULT = false; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java index 6fce1104b5f0..c538a23b3db3 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java @@ -30,6 +30,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V3; import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V4; import static org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil.onFailure; +import static org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures.isFinalized; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; @@ -59,6 +60,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; +import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature; import org.apache.hadoop.hdfs.util.Canceler; import org.apache.hadoop.hdfs.util.DataTransferThrottler; import org.apache.hadoop.io.nativeio.NativeIO; @@ -296,6 +298,10 @@ private void writeToContainerFile(File containerFile, boolean isCreate) long containerId = containerData.getContainerID(); try { tempContainerFile = createTempFile(containerFile); + if (containerData.hasSchema(SCHEMA_V3) && isFinalized(HDDSLayoutFeature.DATANODE_SCHEMA_V4)) { + // convert container from V3 to V4 on yaml file update + containerData.setSchemaVersion(SCHEMA_V4); + } ContainerDataYaml.createContainerFile( ContainerType.KeyValueContainer, containerData, tempContainerFile); @@ -648,7 +654,7 @@ public void importContainerData(InputStream input, // delete all other temporary data in case of any exception. try { - if (containerData.hasSchema(OzoneConsts.SCHEMA_V3)) { + if (containerData.sharedDB()) { BlockUtils.removeContainerFromDB(containerData, config); } FileUtils.deleteDirectory(new File(containerData.getMetadataPath())); @@ -657,17 +663,8 @@ public void importContainerData(InputStream input, new File(getContainerData().getContainerPath())); } catch (Exception deleteex) { LOG.error( - "Can not cleanup container directory after a container import" - + " error (cid: {})", containerId, deleteex); - } finally { - if (containerData.sharedDB()) { - try { - BlockUtils.removeContainerFromDB(containerData, config); - LOG.debug("Container {} metadata is removed from DB", containerId); - } catch (IOException e) { - LOG.error("Can not remove container metadata from DB (cid: {})", containerId, e); - } - } + "Can not cleanup destination directories after a container import" + + " error (cid: {}", containerId, deleteex); } throw ex; } finally { @@ -680,10 +677,14 @@ public void importContainerData(KeyValueContainerData originalContainerData) containerData.setState(originalContainerData.getState()); containerData .setContainerDBType(originalContainerData.getContainerDBType()); - // migrate V3 to V4 on container import if (VersionedDatanodeFeatures.SchemaV4.isFinalizedAndEnabled(config) && originalContainerData.hasSchema(SCHEMA_V3)) { + // migrate V3 to V4 on container import containerData.setSchemaVersion(SCHEMA_V4); + } else if (!VersionedDatanodeFeatures.SchemaV4.isFinalizedAndEnabled(config) && + originalContainerData.hasSchema(SCHEMA_V4)) { + // if V4 is not finalized, covert V4 back to V3 on container import + containerData.setSchemaVersion(SCHEMA_V3); } else { containerData.setSchemaVersion(originalContainerData.getSchemaVersion()); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java index 121f00edfaa0..e6317476a492 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java @@ -347,11 +347,10 @@ public ContainerDataProto getProtoBufMessage() { } public static List getYamlFields() { - if (isFinalized(HDDSLayoutFeature.DATANODE_SCHEMA_V4)) { - return KV_YAML_FIELDS_SCHEMA_V4; - } else { - return Collections.unmodifiableList(KV_YAML_FIELDS); - } + List list = isFinalized(HDDSLayoutFeature.DATANODE_SCHEMA_V4) + ? KV_YAML_FIELDS_SCHEMA_V4 + : KV_YAML_FIELDS; + return Collections.unmodifiableList(list); } /** @@ -480,4 +479,12 @@ public boolean sharedDB() { return KeyValueContainerUtil.isSameSchemaVersion(schemaVersion, SCHEMA_V3) || KeyValueContainerUtil.isSameSchemaVersion(schemaVersion, SCHEMA_V4); } + + /** + * Whether this container's schema version is lower than @param version + */ + public boolean olderSchemaThan(String version) { + String target = version != null ? version : SCHEMA_V1; + return Integer.parseInt(schemaVersion) < Integer.parseInt(target); + } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/VersionedDatanodeFeatures.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/VersionedDatanodeFeatures.java index aad48538bff9..8f6a15bfd1ce 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/VersionedDatanodeFeatures.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/VersionedDatanodeFeatures.java @@ -17,9 +17,6 @@ package org.apache.hadoop.ozone.container.upgrade; -import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.CONTAINER_SCHEMA_V4_ENABLED; -import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.CONTAINER_SCHEMA_V4_ENABLED_DEFAULT; - import java.io.File; import java.io.IOException; import org.apache.hadoop.hdds.conf.ConfigurationSource; @@ -154,8 +151,7 @@ public static boolean isFinalizedAndEnabled(ConfigurationSource conf) { /** * Utilities for container Schema V4 layout feature. - * This schema put all container metadata info into a per-disk - * rocksdb instance instead of a per-container instance. + * Compared to Schema V3, this schema doesn't save chunksPath and metadataPath into container yaml file. */ public static class SchemaV4 { public static String chooseSchemaVersion(ConfigurationSource conf) { @@ -168,9 +164,7 @@ public static String chooseSchemaVersion(ConfigurationSource conf) { public static boolean isFinalizedAndEnabled(ConfigurationSource conf) { DatanodeConfiguration dcf = conf.getObject(DatanodeConfiguration.class); - if (isFinalized(HDDSLayoutFeature.DATANODE_SCHEMA_V4) && - dcf.getContainerSchemaV3Enabled() && - conf.getBoolean(CONTAINER_SCHEMA_V4_ENABLED, CONTAINER_SCHEMA_V4_ENABLED_DEFAULT)) { + if (isFinalized(HDDSLayoutFeature.DATANODE_SCHEMA_V4) && dcf.getContainerSchemaV3Enabled()) { return true; } return false; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java index 9014af1a0a19..925cce537ff4 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java @@ -20,8 +20,8 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DB_PROFILE; import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V2; import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V3; +import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V4; import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.CONTAINER_SCHEMA_V3_ENABLED; -import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.CONTAINER_SCHEMA_V4_ENABLED; import static org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil.isSharedDBVersion; import static org.apache.hadoop.ozone.container.replication.CopyContainerCompression.NO_COMPRESSION; import static org.assertj.core.api.Assertions.assertThat; @@ -1033,7 +1033,6 @@ private void testMixedSchemaImport(String dir, boolean schemaV3Enabled) throws IOException { OzoneConfiguration conf = new OzoneConfiguration(); final String dir1 = dir + (schemaV3Enabled ? "/v3" : "/v2"); - conf.setBoolean(CONTAINER_SCHEMA_V4_ENABLED, false); // create HddsVolume HddsVolume hddsVolume1 = new HddsVolume.Builder(dir1) @@ -1061,7 +1060,8 @@ private void testMixedSchemaImport(String dir, // verify container schema if (schemaV3Enabled) { - assertEquals(SCHEMA_V3, + // After HDDS-6611, it's V4 when schemaV3Enabled is true + assertEquals(SCHEMA_V4, container.getContainerData().getSchemaVersion()); } else { assertEquals(SCHEMA_V2, @@ -1094,7 +1094,8 @@ private void testMixedSchemaImport(String dir, importedContainer.importContainerData(fio, packer); } - assertEquals(schemaV3Enabled ? SCHEMA_V3 : SCHEMA_V2, + // After HDDS-6611, it's V4 when schemaV3Enabled is true + assertEquals(schemaV3Enabled ? SCHEMA_V4 : SCHEMA_V2, importedContainer.getContainerData().getSchemaVersion()); assertEquals(pendingDeleteBlockCount, importedContainer.getContainerData().getNumPendingDeletionBlocks()); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java index c61664f981d3..fa5b5ec8f16a 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java @@ -84,10 +84,6 @@ private void initTests(Boolean enable) throws Exception { conf = new OzoneConfiguration(); conf.setBoolean(DatanodeConfiguration.CONTAINER_SCHEMA_V3_ENABLED, schemaV3Enabled); - conf.setBoolean( - OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); - conf.setBoolean( - OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); setup(); } @@ -413,7 +409,8 @@ public void testWriteWithV3Enabled(boolean schemaV3Enabled) throws Exception { public void testWriteWithV3Disabled(boolean schemaV3Enabled) throws Exception { initTests(schemaV3Enabled); - testWrite(true, OzoneConsts.SCHEMA_V3); + // After HDDS-6611, it's V4 + testWrite(true, OzoneConsts.SCHEMA_V4); } public void testWrite(boolean enable, String expectedVersion) @@ -444,8 +441,6 @@ public void testWrite(boolean enable, String expectedVersion) // Set SchemaV3 enable status conf.setBoolean(DatanodeConfiguration.CONTAINER_SCHEMA_V3_ENABLED, enable); - conf.setBoolean(DatanodeConfiguration.CONTAINER_SCHEMA_V4_ENABLED, - false); dsm = UpgradeTestHelper.restartDatanode(conf, dsm, false, tempFolder, address, HDDSLayoutFeature.DATANODE_SCHEMA_V3.layoutVersion(), false); @@ -461,7 +456,6 @@ public void testWrite(boolean enable, String expectedVersion) // If SchemaV3 is still disabled, new data should still be saved as SchemaV2 assertEquals(expectedVersion, container.getContainerData().getSchemaVersion()); - conf.setBoolean(DatanodeConfiguration.CONTAINER_SCHEMA_V4_ENABLED, true); } /** diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV4.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV4.java index 0fa031329dcc..d45dc1c50a8b 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV4.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV4.java @@ -20,6 +20,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.CHUNKS_PATH; import static org.apache.hadoop.ozone.OzoneConsts.METADATA_PATH; import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V2; +import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V3; import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V4; import static org.apache.hadoop.ozone.OzoneConsts.STORAGE_DIR_CHUNKS; import static org.apache.hadoop.ozone.container.replication.CopyContainerCompression.NO_COMPRESSION; @@ -41,6 +42,7 @@ import java.util.Collections; import java.util.stream.Stream; import org.apache.commons.io.FileUtils; +import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.ScmConfigKeys; @@ -48,7 +50,6 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature; import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.container.common.SCMTestUtils; import org.apache.hadoop.ozone.container.common.ScmTestMock; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; @@ -84,8 +85,6 @@ private void initTests(Boolean enable) throws Exception { boolean schemaV3Enabled = enable; conf = new OzoneConfiguration(); conf.setBoolean(DatanodeConfiguration.CONTAINER_SCHEMA_V3_ENABLED, schemaV3Enabled); - conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); - conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); setup(); } @@ -148,18 +147,25 @@ public void testContainerSchemaV4(boolean schemaV3Enabled, boolean finalize) thr File yamlFile = container.getContainerFile(); String content = FileUtils.readFileToString(yamlFile, Charset.defaultCharset()); - System.out.println(content); if (finalize) { if (schemaV3Enabled) { assertThat(content).doesNotContain(METADATA_PATH); assertThat(content).doesNotContain(CHUNKS_PATH); + // V3 is converted to V4 on container yaml file update during container close. + assertTrue(container.getContainerData().getSchemaVersion().equals(SCHEMA_V4)); } else { assertThat(content).contains(METADATA_PATH); assertThat(content).contains(CHUNKS_PATH); + assertTrue(container.getContainerData().getSchemaVersion().equals(SCHEMA_V2)); } } else { assertThat(content).contains(METADATA_PATH); assertThat(content).contains(CHUNKS_PATH); + if (schemaV3Enabled) { + assertTrue(container.getContainerData().getSchemaVersion().equals(SCHEMA_V3)); + } else { + assertTrue(container.getContainerData().getSchemaVersion().equals(SCHEMA_V2)); + } } assertEquals(yamlFile.getParentFile().getParentFile().toPath().resolve(STORAGE_DIR_CHUNKS).toString(), container.getContainerData().getChunksPath()); @@ -183,7 +189,7 @@ public void testContainerSchemaV4(boolean schemaV3Enabled, boolean finalize) thr container.delete(); assertFalse(new File(container.getContainerData().getContainerPath()).exists()); if (schemaV3Enabled) { - assertTrue(container.getContainerData().getDbFile().exists()); + assertThat(container.getContainerData().getDbFile()).exists(); } //create a new one @@ -205,31 +211,22 @@ public void testContainerSchemaV4(boolean schemaV3Enabled, boolean finalize) thr ContainerUtils.verifyChecksum(data, conf); } - // sleep 1s to make sure creationTime will change + // sleep 1s to make sure creationTime will have different value. Thread.sleep(1000); try (FileInputStream fis = new FileInputStream(folderToExport)) { newContainer.importContainerData(fis, packer); } - assertEquals(newContainerData.getContainerDBType(), oldContainerData.getContainerDBType()); - assertEquals(newContainerData.getState(), oldContainerData.getState()); - assertEquals(newContainerData.getBlockCount(), oldContainerData.getBlockCount()); - assertEquals(newContainerData.getLayoutVersion(), oldContainerData.getLayoutVersion()); - assertEquals(newContainerData.getMaxSize(), oldContainerData.getMaxSize()); - assertEquals(newContainerData.getBytesUsed(), oldContainerData.getBytesUsed()); - assertEquals(newContainerData.getMetadataPath(), oldContainerData.getMetadataPath()); - assertEquals(newContainerData.getChunksPath(), oldContainerData.getChunksPath()); - assertEquals(newContainerData.getContainerPath(), oldContainerData.getContainerPath()); - assertTrue(new File(newContainerData.getContainerPath()).exists()); - assertTrue(new File(newContainerData.getChunksPath()).exists()); - assertTrue(new File(newContainerData.getMetadataPath()).exists()); + assertTrue(isContainerEqual(newContainerData, oldContainerData)); + assertThat(new File(newContainerData.getContainerPath())).exists(); + assertThat(new File(newContainerData.getChunksPath())).exists(); + assertThat(new File(newContainerData.getMetadataPath())).exists(); if (schemaV3Enabled) { - assertTrue(newContainerData.getDbFile().exists()); + assertThat(newContainerData.getDbFile()).exists(); assertEquals(newContainerData.getDbFile(), oldContainerData.getDbFile()); } yamlFile = newContainer.getContainerFile(); content = FileUtils.readFileToString(yamlFile, Charset.defaultCharset()); - System.out.println(content); if (finalize) { if (schemaV3Enabled) { assertThat(content).doesNotContain(METADATA_PATH); @@ -337,26 +334,18 @@ public void testContainerBeforeFinalization( ContainerUtils.verifyChecksum(data, conf); } - // sleep 1s to make sure creationTime will change + // sleep 1s to make sure creationTime will have different value. Thread.sleep(1000); try (FileInputStream fis = new FileInputStream(folderToExport)) { newContainer.importContainerData(fis, packer); } - assertEquals(newContainerData.getContainerDBType(), oldContainerData.getContainerDBType()); - assertEquals(newContainerData.getState(), oldContainerData.getState()); - assertEquals(newContainerData.getBlockCount(), oldContainerData.getBlockCount()); - assertEquals(newContainerData.getLayoutVersion(), oldContainerData.getLayoutVersion()); - assertEquals(newContainerData.getMaxSize(), oldContainerData.getMaxSize()); - assertEquals(newContainerData.getBytesUsed(), oldContainerData.getBytesUsed()); - assertEquals(newContainerData.getMetadataPath(), oldContainerData.getMetadataPath()); - assertEquals(newContainerData.getChunksPath(), oldContainerData.getChunksPath()); - assertEquals(newContainerData.getContainerPath(), oldContainerData.getContainerPath()); - assertTrue(new File(newContainerData.getContainerPath()).exists()); - assertTrue(new File(newContainerData.getChunksPath()).exists()); - assertTrue(new File(newContainerData.getMetadataPath()).exists()); + assertTrue(isContainerEqual(newContainerData, oldContainerData)); + assertThat(new File(newContainerData.getContainerPath())).exists(); + assertThat(new File(newContainerData.getChunksPath())).exists(); + assertThat(new File(newContainerData.getMetadataPath())).exists(); if (schemaV3Enabled) { - assertTrue(newContainerData.getDbFile().exists()); + assertThat(newContainerData.getDbFile()).exists(); assertEquals(newContainerData.getDbFile(), oldContainerData.getDbFile()); } yamlFile = newContainer.getContainerFile(); @@ -380,4 +369,18 @@ public void testContainerBeforeFinalization( Paths.get(newContainer.getContainerData().getContainerPath()), "creationTime"); assertNotEquals(creationTime1.toInstant(), creationTime2.toInstant()); } + + private boolean isContainerEqual(KeyValueContainerData containerData1, KeyValueContainerData containerData2) { + return new EqualsBuilder() + .append(containerData1.getContainerID(), containerData2.getContainerID()) + .append(containerData1.getContainerDBType(), containerData2.getContainerDBType()) + .append(containerData1.getState(), containerData2.getState()) + .append(containerData1.getLayoutVersion(), containerData2.getLayoutVersion()) + .append(containerData1.getBlockCount(), containerData2.getBlockCount()) + .append(containerData1.getBytesUsed(), containerData2.getBytesUsed()) + .append(containerData1.getMetadataPath(), containerData2.getMetadataPath()) + .append(containerData1.getContainerPath(), containerData2.getContainerPath()) + .append(containerData1.getChunksPath(), containerData2.getChunksPath()) + .isEquals(); + } } From 25ff254b11e88a7a8ec9b11158a015412d3476d8 Mon Sep 17 00:00:00 2001 From: Sammi Chen Date: Mon, 24 Feb 2025 16:34:37 +0800 Subject: [PATCH 4/6] fix checkstyle --- .../hadoop/ozone/container/common/impl/ContainerDataYaml.java | 1 - .../hadoop/ozone/container/keyvalue/KeyValueContainerData.java | 2 +- .../hadoop/ozone/container/keyvalue/TestKeyValueContainer.java | 1 - .../ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java | 1 - 4 files changed, 1 insertion(+), 4 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java index 1c090780efff..779852c32e65 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java @@ -24,7 +24,6 @@ import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V4; import static org.apache.hadoop.ozone.OzoneConsts.STORAGE_DIR_CHUNKS; import static org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData.KEYVALUE_YAML_TAG; -import static org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil.isSameSchemaVersion; import com.google.common.base.Preconditions; import java.io.ByteArrayInputStream; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java index e6317476a492..5a8ffa5d7af1 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java @@ -481,7 +481,7 @@ public boolean sharedDB() { } /** - * Whether this container's schema version is lower than @param version + * Whether this container's schema version is lower than @param version. */ public boolean olderSchemaThan(String version) { String target = version != null ? version : SCHEMA_V1; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java index 925cce537ff4..4e6e0af798ba 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java @@ -19,7 +19,6 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DB_PROFILE; import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V2; -import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V3; import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V4; import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.CONTAINER_SCHEMA_V3_ENABLED; import static org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil.isSharedDBVersion; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java index fa5b5ec8f16a..0881c230eefa 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java @@ -47,7 +47,6 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature; import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.ContainerTestUtils; import org.apache.hadoop.ozone.container.common.DatanodeLayoutStorage; From 55720bd0e6cb1f1d7d8b3da1ff6a3f847a99e2ed Mon Sep 17 00:00:00 2001 From: Sammi Chen Date: Mon, 24 Feb 2025 19:34:35 +0800 Subject: [PATCH 5/6] fix failed UT --- .../hadoop/ozone/container/keyvalue/KeyValueContainerData.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java index 5a8ffa5d7af1..e91b0bd84197 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java @@ -485,6 +485,7 @@ public boolean sharedDB() { */ public boolean olderSchemaThan(String version) { String target = version != null ? version : SCHEMA_V1; - return Integer.parseInt(schemaVersion) < Integer.parseInt(target); + String self = schemaVersion != null ? schemaVersion : SCHEMA_V1; + return Integer.parseInt(self) < Integer.parseInt(target); } } From b3c584d37255dfeb0518e982cee482a4c8ba1109 Mon Sep 17 00:00:00 2001 From: Sammi Chen Date: Tue, 25 Feb 2025 16:56:44 +0800 Subject: [PATCH 6/6] remove System.out.println(content) which is for debug --- .../ozone/container/upgrade/TestDatanodeUpgradeToSchemaV4.java | 2 -- 1 file changed, 2 deletions(-) diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV4.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV4.java index d45dc1c50a8b..0d16ea6d9639 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV4.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV4.java @@ -277,7 +277,6 @@ public void testContainerBeforeFinalization( File yamlFile = container.getContainerFile(); String content = FileUtils.readFileToString(yamlFile, Charset.defaultCharset()); - System.out.println(content); // yaml file contains chunkPath and metadataPath assertThat(content).contains(METADATA_PATH); assertThat(content).contains(CHUNKS_PATH); @@ -350,7 +349,6 @@ public void testContainerBeforeFinalization( } yamlFile = newContainer.getContainerFile(); content = FileUtils.readFileToString(yamlFile, Charset.defaultCharset()); - System.out.println(content); if (schemaV3Enabled) { assertThat(content).doesNotContain(METADATA_PATH); assertThat(content).doesNotContain(CHUNKS_PATH);