From b3555cd4b13dff0d8654562e7673616131625d6c Mon Sep 17 00:00:00 2001 From: Rakesh Radhakrishnan Date: Fri, 10 Jul 2020 15:53:54 +0530 Subject: [PATCH 001/165] HDDS-3947: Sort DNs for client when the key is a file for #getFileStatus #listStatus APIs --- .../hadoop/ozone/client/rpc/RpcClient.java | 2 + ...ManagerProtocolClientSideTranslatorPB.java | 2 + .../hadoop/ozone/om/TestKeyManagerImpl.java | 47 ++++++++++--------- .../hadoop/ozone/om/KeyManagerImpl.java | 21 ++++++--- .../apache/hadoop/ozone/om/OzoneManager.java | 5 +- .../hadoop/ozone/om/fs/OzoneManagerFS.java | 30 +++++++++++- .../OzoneManagerRequestHandler.java | 2 + 7 files changed, 75 insertions(+), 34 deletions(-) diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 56c867d45ea0..a44aa53954fa 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -1016,6 +1016,7 @@ public OzoneFileStatus getOzoneFileStatus(String volumeName, .setBucketName(bucketName) .setKeyName(keyName) .setRefreshPipeline(true) + .setSortDatanodesInPipeline(topologyAwareReadEnabled) .build(); return ozoneManagerClient.getFileStatus(keyArgs); } @@ -1098,6 +1099,7 @@ public List listStatus(String volumeName, String bucketName, .setBucketName(bucketName) .setKeyName(keyName) .setRefreshPipeline(true) + .setSortDatanodesInPipeline(topologyAwareReadEnabled) .build(); return ozoneManagerClient .listStatus(keyArgs, recursive, startKey, numEntries); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java index ae2c622be6db..647d545f645e 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java @@ -1147,6 +1147,7 @@ public OzoneFileStatus getFileStatus(OmKeyArgs args) throws IOException { .setVolumeName(args.getVolumeName()) .setBucketName(args.getBucketName()) .setKeyName(args.getKeyName()) + .setSortDatanodes(args.getSortDatanodes()) .build(); GetFileStatusRequest req = GetFileStatusRequest.newBuilder() @@ -1360,6 +1361,7 @@ public List listStatus(OmKeyArgs args, boolean recursive, .setVolumeName(args.getVolumeName()) .setBucketName(args.getBucketName()) .setKeyName(args.getKeyName()) + .setSortDatanodes(args.getSortDatanodes()) .build(); ListStatusRequest listStatusRequest = ListStatusRequest.newBuilder() diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java index 7a1cb5b278dd..bbfa0d2c8b0f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java @@ -206,7 +206,8 @@ public static void cleanup() throws Exception { @After public void cleanupTest() throws IOException { List fileStatuses = keyManager - .listStatus(createBuilder().setKeyName("").build(), true, "", 100000); + .listStatus(createBuilder().setKeyName("").build(), true, "", 100000, + null); for (OzoneFileStatus fileStatus : fileStatuses) { if (fileStatus.isFile()) { keyManager.deleteKey( @@ -314,7 +315,7 @@ public void testCreateDirectory() throws IOException { Path path = Paths.get(keyName); while (path != null) { // verify parent directories are created - Assert.assertTrue(keyManager.getFileStatus(keyArgs).isDirectory()); + Assert.assertTrue(keyManager.getFileStatus(keyArgs, null).isDirectory()); path = path.getParent(); } @@ -344,7 +345,7 @@ public void testCreateDirectory() throws IOException { .setKeyName(keyName) .build(); keyManager.createDirectory(keyArgs); - Assert.assertTrue(keyManager.getFileStatus(keyArgs).isDirectory()); + Assert.assertTrue(keyManager.getFileStatus(keyArgs, null).isDirectory()); // create directory where parent is root keyName = RandomStringUtils.randomAlphabetic(5); @@ -352,7 +353,7 @@ public void testCreateDirectory() throws IOException { .setKeyName(keyName) .build(); keyManager.createDirectory(keyArgs); - OzoneFileStatus fileStatus = keyManager.getFileStatus(keyArgs); + OzoneFileStatus fileStatus = keyManager.getFileStatus(keyArgs, null); Assert.assertTrue(fileStatus.isDirectory()); Assert.assertTrue(fileStatus.getKeyInfo().getKeyLocationVersions().get(0) .getLocationList().isEmpty()); @@ -407,7 +408,7 @@ public void testOpenFile() throws IOException { keySession.getKeyInfo().getLatestVersionLocations().getLocationList()); keyManager.commitKey(keyArgs, keySession.getId()); Assert.assertTrue(keyManager - .getFileStatus(keyArgs).isFile()); + .getFileStatus(keyArgs, null).isFile()); // try creating a file over a directory keyArgs = createBuilder() @@ -823,17 +824,17 @@ public void testListStatusWithTableCache() throws Exception { OmKeyArgs rootDirArgs = createKeyArgs(""); // Get entries in both TableCache and DB List fileStatuses = - keyManager.listStatus(rootDirArgs, true, "", 1000); + keyManager.listStatus(rootDirArgs, true, "", 1000, null); Assert.assertEquals(100, fileStatuses.size()); // Get entries with startKey=prefixKeyInDB fileStatuses = - keyManager.listStatus(rootDirArgs, true, prefixKeyInDB, 1000); + keyManager.listStatus(rootDirArgs, true, prefixKeyInDB, 1000, null); Assert.assertEquals(50, fileStatuses.size()); // Get entries with startKey=prefixKeyInCache fileStatuses = - keyManager.listStatus(rootDirArgs, true, prefixKeyInCache, 1000); + keyManager.listStatus(rootDirArgs, true, prefixKeyInCache, 1000, null); Assert.assertEquals(100, fileStatuses.size()); // Clean up cache by marking those keys in cache as deleted @@ -865,12 +866,12 @@ public void testListStatusWithTableCacheRecursive() throws Exception { OmKeyArgs rootDirArgs = createKeyArgs(""); // Test listStatus with recursive=false, should only have dirs under root List fileStatuses = - keyManager.listStatus(rootDirArgs, false, "", 1000); + keyManager.listStatus(rootDirArgs, false, "", 1000, null); Assert.assertEquals(2, fileStatuses.size()); // Test listStatus with recursive=true, should have dirs under root and fileStatuses = - keyManager.listStatus(rootDirArgs, true, "", 1000); + keyManager.listStatus(rootDirArgs, true, "", 1000, null); Assert.assertEquals(3, fileStatuses.size()); // Add a total of 10 key entries to DB and TableCache under dir1 @@ -894,12 +895,12 @@ public void testListStatusWithTableCacheRecursive() throws Exception { // Test non-recursive, should return the dir under root fileStatuses = - keyManager.listStatus(rootDirArgs, false, "", 1000); + keyManager.listStatus(rootDirArgs, false, "", 1000, null); Assert.assertEquals(2, fileStatuses.size()); // Test recursive, should return the dir and the keys in it fileStatuses = - keyManager.listStatus(rootDirArgs, true, "", 1000); + keyManager.listStatus(rootDirArgs, true, "", 1000, null); Assert.assertEquals(10 + 3, fileStatuses.size()); // Clean up @@ -944,12 +945,12 @@ public void testListStatusWithDeletedEntriesInCache() throws Exception { OmKeyArgs rootDirArgs = createKeyArgs(""); List fileStatuses = - keyManager.listStatus(rootDirArgs, true, "", 1000); + keyManager.listStatus(rootDirArgs, true, "", 1000, null); // Should only get entries that are not marked as deleted. Assert.assertEquals(50, fileStatuses.size()); // Test startKey fileStatuses = - keyManager.listStatus(rootDirArgs, true, prefixKey, 1000); + keyManager.listStatus(rootDirArgs, true, prefixKey, 1000, null); // Should only get entries that are not marked as deleted. Assert.assertEquals(50, fileStatuses.size()); // Verify result @@ -981,7 +982,7 @@ public void testListStatusWithDeletedEntriesInCache() throws Exception { existKeySet.removeAll(deletedKeySet); fileStatuses = keyManager.listStatus( - rootDirArgs, true, "", 1000); + rootDirArgs, true, "", 1000, null); // Should only get entries that are not marked as deleted. Assert.assertEquals(50 / 2, fileStatuses.size()); @@ -1000,7 +1001,7 @@ public void testListStatusWithDeletedEntriesInCache() throws Exception { expectedKeys.clear(); do { fileStatuses = keyManager.listStatus( - rootDirArgs, true, startKey, batchSize); + rootDirArgs, true, startKey, batchSize, null); // Note fileStatuses will never be empty since we are using the last // keyName as the startKey of next batch, // the startKey itself will show up in the next batch of results. @@ -1048,11 +1049,11 @@ public void testListStatus() throws IOException { OmKeyArgs rootDirArgs = createKeyArgs(""); List fileStatuses = - keyManager.listStatus(rootDirArgs, true, "", 100); + keyManager.listStatus(rootDirArgs, true, "", 100, null); // verify the number of status returned is same as number of entries Assert.assertEquals(numEntries, fileStatuses.size()); - fileStatuses = keyManager.listStatus(rootDirArgs, false, "", 100); + fileStatuses = keyManager.listStatus(rootDirArgs, false, "", 100, null); // the number of immediate children of root is 1 Assert.assertEquals(1, fileStatuses.size()); @@ -1060,19 +1061,19 @@ public void testListStatus() throws IOException { // return all the entries. String startKey = children.iterator().next(); fileStatuses = keyManager.listStatus(rootDirArgs, true, - startKey.substring(0, startKey.length() - 1), 100); + startKey.substring(0, startKey.length() - 1), 100, null); Assert.assertEquals(numEntries, fileStatuses.size()); for (String directory : directorySet) { // verify status list received for each directory with recursive flag set // to false OmKeyArgs dirArgs = createKeyArgs(directory); - fileStatuses = keyManager.listStatus(dirArgs, false, "", 100); + fileStatuses = keyManager.listStatus(dirArgs, false, "", 100, null); verifyFileStatus(directory, fileStatuses, directorySet, fileSet, false); // verify status list received for each directory with recursive flag set // to true - fileStatuses = keyManager.listStatus(dirArgs, true, "", 100); + fileStatuses = keyManager.listStatus(dirArgs, true, "", 100, null); verifyFileStatus(directory, fileStatuses, directorySet, fileSet, true); // verify list status call with using the startKey parameter and @@ -1086,7 +1087,7 @@ public void testListStatus() throws IOException { tempFileStatus != null ? tempFileStatus.get(tempFileStatus.size() - 1).getKeyInfo() .getKeyName() : null, - 2); + 2, null); tmpStatusSet.addAll(tempFileStatus); } while (tempFileStatus.size() == 2); verifyFileStatus(directory, new ArrayList<>(tmpStatusSet), directorySet, @@ -1104,7 +1105,7 @@ public void testListStatus() throws IOException { tempFileStatus.get(tempFileStatus.size() - 1).getKeyInfo() .getKeyName() : null, - 2); + 2, null); tmpStatusSet.addAll(tempFileStatus); } while (tempFileStatus.size() == 2); verifyFileStatus(directory, new ArrayList<>(tmpStatusSet), directorySet, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index 9e12e131899f..c75239b5b684 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -1611,7 +1611,7 @@ public boolean checkAccess(OzoneObj ozObject, RequestContext context) keyInfo = metadataManager.getOpenKeyTable().get(objectKey); } else { try { - OzoneFileStatus fileStatus = getFileStatus(args); + OzoneFileStatus fileStatus = getFileStatus(args, null); keyInfo = fileStatus.getKeyInfo(); } catch (IOException e) { // OzoneFS will check whether the key exists when write a new key. @@ -1689,7 +1689,8 @@ private void validateOzoneObj(OzoneObj obj) throws OMException { * @throws IOException if there is error in the db * invalid arguments */ - public OzoneFileStatus getFileStatus(OmKeyArgs args) throws IOException { + public OzoneFileStatus getFileStatus(OmKeyArgs args, String clientAddress) + throws IOException { Preconditions.checkNotNull(args, "Key args can not be null"); String volumeName = args.getVolumeName(); String bucketName = args.getBucketName(); @@ -1712,6 +1713,9 @@ public OzoneFileStatus getFileStatus(OmKeyArgs args) throws IOException { if (args.getRefreshPipeline()) { refreshPipeline(fileKeyInfo); } + if (args.getSortDatanodes()) { + sortDatanodeInPipeline(fileKeyInfo, clientAddress); + } // this is a file return new OzoneFileStatus(fileKeyInfo, scmBlockSize, false); } @@ -1832,7 +1836,7 @@ public OpenKeySession createFile(OmKeyArgs args, boolean isOverWrite, try { OzoneFileStatus fileStatus; try { - fileStatus = getFileStatus(args); + fileStatus = getFileStatus(args, null); if (fileStatus.isDirectory()) { throw new OMException("Can not write to directory: " + keyName, ResultCodes.NOT_A_FILE); @@ -1881,7 +1885,7 @@ public OmKeyInfo lookupFile(OmKeyArgs args, String clientAddress) metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName, bucketName); try { - OzoneFileStatus fileStatus = getFileStatus(args); + OzoneFileStatus fileStatus = getFileStatus(args, null); if (fileStatus.isFile()) { if (args.getRefreshPipeline()) { refreshPipeline(fileStatus.getKeyInfo()); @@ -1951,7 +1955,7 @@ private void listStatusFindKeyInTableCache( * @return list of file status */ public List listStatus(OmKeyArgs args, boolean recursive, - String startKey, long numEntries) throws IOException { + String startKey, long numEntries, String clientAddress) throws IOException { Preconditions.checkNotNull(args, "Key args can not be null"); List fileStatusList = new ArrayList<>(); @@ -1971,7 +1975,7 @@ public List listStatus(OmKeyArgs args, boolean recursive, bucketName); try { if (Strings.isNullOrEmpty(startKey)) { - OzoneFileStatus fileStatus = getFileStatus(args); + OzoneFileStatus fileStatus = getFileStatus(args, null); if (fileStatus.isFile()) { return Collections.singletonList(fileStatus); } @@ -2070,6 +2074,9 @@ public List listStatus(OmKeyArgs args, boolean recursive, if (args.getRefreshPipeline()) { refreshPipeline(entry.getValue().getKeyInfo()); } + if (args.getSortDatanodes()) { + sortDatanodeInPipeline(entry.getValue().getKeyInfo(), clientAddress); + } fileStatusList.add(entry.getValue()); countEntries++; if (countEntries >= numEntries) { @@ -2127,7 +2134,7 @@ private OzoneFileStatus verifyNoFilesInPath(String volumeName, String keyName = path.toString(); try { OzoneFileStatus fileStatus = - getFileStatus(argsBuilder.setKeyName(keyName).build()); + getFileStatus(argsBuilder.setKeyName(keyName).build(), null); if (fileStatus.isFile()) { LOG.error("Unable to create directory (File already exists): " + "volume: {} bucket: {} key: {}", volumeName, bucketName, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index 1b75b95a1598..96d81f5943e7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -2678,7 +2678,7 @@ public OzoneFileStatus getFileStatus(OmKeyArgs args) throws IOException { boolean auditSuccess = true; try { metrics.incNumGetFileStatus(); - return keyManager.getFileStatus(args); + return keyManager.getFileStatus(args, getClientAddress()); } catch (IOException ex) { metrics.incNumGetFileStatusFails(); auditSuccess = false; @@ -2787,7 +2787,8 @@ public List listStatus(OmKeyArgs args, boolean recursive, boolean auditSuccess = true; try { metrics.incNumListStatus(); - return keyManager.listStatus(args, recursive, startKey, numEntries); + return keyManager.listStatus(args, recursive, startKey, numEntries, + getClientAddress()); } catch (Exception ex) { metrics.incNumListStatusFails(); auditSuccess = false; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/fs/OzoneManagerFS.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/fs/OzoneManagerFS.java index 647931af0d04..4bcba45b6b9c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/fs/OzoneManagerFS.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/fs/OzoneManagerFS.java @@ -31,7 +31,18 @@ * Ozone Manager FileSystem interface. */ public interface OzoneManagerFS extends IOzoneAcl { - OzoneFileStatus getFileStatus(OmKeyArgs args) throws IOException; + + /** + * Get file status for a file or a directory. + * + * @param args the args of the key provided by client. + * @param clientAddress a hint to key manager, order the datanode in returned + * pipeline by distance between client and datanode. + * @return file status. + * @throws IOException + */ + OzoneFileStatus getFileStatus(OmKeyArgs args, String clientAddress) + throws IOException; void createDirectory(OmKeyArgs args) throws IOException; @@ -49,6 +60,21 @@ OpenKeySession createFile(OmKeyArgs args, boolean isOverWrite, */ OmKeyInfo lookupFile(OmKeyArgs args, String clientAddress) throws IOException; + /** + * List the status for a file or a directory and its contents. + * + * @param keyArgs the args of the key provided by client. + * @param recursive For a directory if true all the descendants of a + * particular directory are listed + * @param startKey Key from which listing needs to start. If startKey + * exists its status is included in the final list. + * @param numEntries Number of entries to list from the start key + * @param clientAddress a hint to key manager, order the datanode in returned + * pipeline by distance between client and datanode. + * @return list of file status + * @throws IOException + */ List listStatus(OmKeyArgs keyArgs, boolean recursive, - String startKey, long numEntries) throws IOException; + String startKey, long numEntries, String clientAddress) + throws IOException; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java index 3183aa15ff75..af4d15260688 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java @@ -536,6 +536,7 @@ private GetFileStatusResponse getOzoneFileStatus( .setBucketName(keyArgs.getBucketName()) .setKeyName(keyArgs.getKeyName()) .setRefreshPipeline(true) + .setSortDatanodesInPipeline(keyArgs.getSortDatanodes()) .build(); GetFileStatusResponse.Builder rb = GetFileStatusResponse.newBuilder(); @@ -568,6 +569,7 @@ private ListStatusResponse listStatus( .setBucketName(keyArgs.getBucketName()) .setKeyName(keyArgs.getKeyName()) .setRefreshPipeline(true) + .setSortDatanodesInPipeline(keyArgs.getSortDatanodes()) .build(); List statuses = impl.listStatus(omKeyArgs, request.getRecursive(), From adaa3f7a46d2322d5b19b16cf5b5eb3e0a6f0f98 Mon Sep 17 00:00:00 2001 From: Rakesh Radhakrishnan Date: Fri, 10 Jul 2020 22:00:44 +0530 Subject: [PATCH 002/165] HDDS-3947: Fixed checkstyle warning --- .../main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index c75239b5b684..c0230ab37975 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -1955,7 +1955,8 @@ private void listStatusFindKeyInTableCache( * @return list of file status */ public List listStatus(OmKeyArgs args, boolean recursive, - String startKey, long numEntries, String clientAddress) throws IOException { + String startKey, long numEntries, String clientAddress) + throws IOException { Preconditions.checkNotNull(args, "Key args can not be null"); List fileStatusList = new ArrayList<>(); From 67e8ce2d1c6c6ae49239df1d1779841d5aad864e Mon Sep 17 00:00:00 2001 From: Rakesh Radhakrishnan Date: Tue, 14 Jul 2020 11:54:05 +0530 Subject: [PATCH 003/165] trigger new CI check From 8a480e29ea2c307ea4d48a568e6ce4f45031684d Mon Sep 17 00:00:00 2001 From: Hanisha Koneru Date: Fri, 10 Jul 2020 12:50:15 -0700 Subject: [PATCH 004/165] HDDS-3914. Remove LevelDB configuration option for DN Metastore (#1166) --- .../apache/hadoop/ozone/OzoneConfigKeys.java | 9 ---- .../org/apache/hadoop/ozone/OzoneConsts.java | 2 + .../src/main/resources/ozone-default.xml | 20 -------- .../container/keyvalue/KeyValueContainer.java | 5 -- .../keyvalue/KeyValueContainerCheck.java | 8 ++-- .../keyvalue/KeyValueContainerData.java | 3 +- .../keyvalue/TestKeyValueBlockIterator.java | 16 ++----- .../keyvalue/TestKeyValueContainerCheck.java | 19 ++------ .../hadoop/hdds/utils/LevelDBStore.java | 1 + .../hdds/utils/LevelDBStoreIterator.java | 1 + .../hdds/utils/MetadataStoreBuilder.java | 25 ++++------ .../hadoop/hdds/utils/TestMetadataStore.java | 48 +++++++++++-------- .../hdds/utils/TestRocksDBStoreMBean.java | 3 -- .../apache/hadoop/ozone/om/TestOmSQLCli.java | 21 -------- .../hadoop/ozone/om/TestScmSafeMode.java | 3 -- .../ozone/recon/ReconServerConfigKeys.java | 7 --- 16 files changed, 55 insertions(+), 136 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java index 7d46b01a6dbf..212931142f83 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java @@ -79,15 +79,6 @@ public final class OzoneConfigKeys { "ozone.trace.enabled"; public static final boolean OZONE_TRACE_ENABLED_DEFAULT = false; - public static final String OZONE_METADATA_STORE_IMPL = - "ozone.metastore.impl"; - public static final String OZONE_METADATA_STORE_IMPL_LEVELDB = - "LevelDB"; - public static final String OZONE_METADATA_STORE_IMPL_ROCKSDB = - "RocksDB"; - public static final String OZONE_METADATA_STORE_IMPL_DEFAULT = - OZONE_METADATA_STORE_IMPL_ROCKSDB; - public static final String OZONE_METADATA_STORE_ROCKSDB_STATISTICS = "ozone.metastore.rocksdb.statistics"; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index a6833a5ee990..ea0466f6cea5 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -360,4 +360,6 @@ private OzoneConsts() { public static final String TRANSACTION_INFO_KEY = "#TRANSACTIONINFO"; public static final String TRANSACTION_INFO_SPLIT_KEY = "#"; + public static final String CONTAINER_DB_TYPE_ROCKSDB = "RocksDB"; + public static final String CONTAINER_DB_TYPE_LEVELDB = "LevelDB"; } diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 8b724d9818e7..c72dad0cee93 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -634,17 +634,6 @@ dfs.container.ratis.datanode.storage.dir be configured separately. - - ozone.metastore.impl - RocksDB - OZONE, OM, SCM, CONTAINER, STORAGE - - Ozone metadata store implementation. Ozone metadata are well - distributed to multiple services such as ozoneManager, scm. They are stored in - some local key-value databases. This property determines which database - library to use. Supported value is either LevelDB or RocksDB. - - ozone.metastore.rocksdb.statistics @@ -2291,15 +2280,6 @@ Whether to enable topology aware read to improve the read performance. - - ozone.recon.container.db.impl - RocksDB - OZONE, RECON, STORAGE - - Ozone Recon container DB store implementation.Supported value is either - LevelDB or RocksDB. - - ozone.recon.om.db.dir diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java index 49b907f10ec5..a80841f60035 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java @@ -39,7 +39,6 @@ import org.apache.hadoop.hdfs.util.Canceler; import org.apache.hadoop.hdfs.util.DataTransferThrottler; import org.apache.hadoop.io.nativeio.NativeIO; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml; @@ -129,12 +128,8 @@ public void create(VolumeSet volumeSet, VolumeChoosingPolicy KeyValueContainerUtil.createContainerMetaData(containerMetaDataPath, chunksPath, dbFile, config); - String impl = config.getTrimmed(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, - OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_DEFAULT); - //Set containerData for the KeyValueContainer. containerData.setChunksPath(chunksPath.getPath()); - containerData.setContainerDBType(impl); containerData.setDbFile(dbFile); containerData.setVolume(containerVolume); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java index 95795e64c953..d6c4ff0c5575 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java @@ -47,8 +47,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB; +import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_TYPE_LEVELDB; +import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_TYPE_ROCKSDB; /** * Class to run integrity checks on Datanode Containers. @@ -186,8 +186,8 @@ private void checkContainerFile() throws IOException { } dbType = onDiskContainerData.getContainerDBType(); - if (!dbType.equals(OZONE_METADATA_STORE_IMPL_ROCKSDB) && - !dbType.equals(OZONE_METADATA_STORE_IMPL_LEVELDB)) { + if (!dbType.equals(CONTAINER_DB_TYPE_ROCKSDB) && + !dbType.equals(CONTAINER_DB_TYPE_LEVELDB)) { String errStr = "Unknown DBType [" + dbType + "] in Container File for [" + containerID + "]"; throw new IOException(errStr); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java index 373b3223a68d..5698d7267882 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java @@ -41,6 +41,7 @@ import java.util.concurrent.atomic.AtomicInteger; import static java.lang.Math.max; +import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_TYPE_ROCKSDB; import static org.apache.hadoop.ozone.OzoneConsts.DB_BLOCK_COUNT_KEY; import static org.apache.hadoop.ozone.OzoneConsts.CHUNKS_PATH; import static org.apache.hadoop.ozone.OzoneConsts.DB_CONTAINER_BYTES_USED_KEY; @@ -65,7 +66,7 @@ public class KeyValueContainerData extends ContainerData { private String metadataPath; //Type of DB used to store key to chunks mapping - private String containerDBType; + private String containerDBType = CONTAINER_DB_TYPE_ROCKSDB; private File dbFile = null; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java index 62097b38e8b3..aff0528bdef4 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java @@ -45,9 +45,6 @@ import com.google.common.primitives.Longs; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB; import static org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion.FILE_PER_BLOCK; import static org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion.FILE_PER_CHUNK; import org.junit.After; @@ -71,22 +68,18 @@ public class TestKeyValueBlockIterator { private OzoneConfiguration conf; private File testRoot; - private final String storeImpl; private final ChunkLayOutVersion layout; - public TestKeyValueBlockIterator(String metadataImpl, - ChunkLayOutVersion layout) { - this.storeImpl = metadataImpl; + public TestKeyValueBlockIterator(ChunkLayOutVersion layout) { this.layout = layout; } @Parameterized.Parameters public static Collection data() { return Arrays.asList(new Object[][] { - {OZONE_METADATA_STORE_IMPL_LEVELDB, FILE_PER_CHUNK}, - {OZONE_METADATA_STORE_IMPL_ROCKSDB, FILE_PER_CHUNK}, - {OZONE_METADATA_STORE_IMPL_LEVELDB, FILE_PER_BLOCK}, - {OZONE_METADATA_STORE_IMPL_ROCKSDB, FILE_PER_BLOCK}}); + {FILE_PER_CHUNK}, + {FILE_PER_BLOCK} + }); } @Before @@ -94,7 +87,6 @@ public void setUp() throws Exception { testRoot = GenericTestUtils.getRandomizedTestDir(); conf = new OzoneConfiguration(); conf.set(HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath()); - conf.set(OZONE_METADATA_STORE_IMPL, storeImpl); volumeSet = new MutableVolumeSet(UUID.randomUUID().toString(), conf); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java index cb8ef3406c63..4583a54f5c54 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java @@ -58,10 +58,6 @@ import java.util.UUID; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL; - -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; @@ -76,7 +72,6 @@ private static final Logger LOG = LoggerFactory.getLogger(TestKeyValueContainerCheck.class); - private final String storeImpl; private final ChunkLayoutTestInfo chunkManagerTestInfo; private KeyValueContainer container; private KeyValueContainerData containerData; @@ -85,28 +80,22 @@ private File testRoot; private ChunkManager chunkManager; - public TestKeyValueContainerCheck(String metadataImpl, - ChunkLayoutTestInfo chunkManagerTestInfo) { - this.storeImpl = metadataImpl; + public TestKeyValueContainerCheck(ChunkLayoutTestInfo chunkManagerTestInfo) { this.chunkManagerTestInfo = chunkManagerTestInfo; } @Parameterized.Parameters public static Collection data() { return Arrays.asList(new Object[][] { - {OZONE_METADATA_STORE_IMPL_LEVELDB, ChunkLayoutTestInfo.FILE_PER_CHUNK}, - {OZONE_METADATA_STORE_IMPL_LEVELDB, ChunkLayoutTestInfo.FILE_PER_BLOCK}, - {OZONE_METADATA_STORE_IMPL_ROCKSDB, ChunkLayoutTestInfo.FILE_PER_CHUNK}, - {OZONE_METADATA_STORE_IMPL_ROCKSDB, ChunkLayoutTestInfo.FILE_PER_BLOCK} + {ChunkLayoutTestInfo.FILE_PER_CHUNK}, + {ChunkLayoutTestInfo.FILE_PER_BLOCK} }); } @Before public void setUp() throws Exception { - LOG.info("Testing store:{} layout:{}", - storeImpl, chunkManagerTestInfo.getLayout()); + LOG.info("Testing layout:{}", chunkManagerTestInfo.getLayout()); this.testRoot = GenericTestUtils.getRandomizedTestDir(); conf = new OzoneConfiguration(); conf.set(HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath()); - conf.set(OZONE_METADATA_STORE_IMPL, storeImpl); chunkManagerTestInfo.updateConfig(conf); volumeSet = new MutableVolumeSet(UUID.randomUUID().toString(), conf); chunkManager = chunkManagerTestInfo.createChunkManager(true, null); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/LevelDBStore.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/LevelDBStore.java index f5088683efff..8998f6c9d6d4 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/LevelDBStore.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/LevelDBStore.java @@ -42,6 +42,7 @@ /** * LevelDB interface. */ +@Deprecated public class LevelDBStore implements MetadataStore { private static final Logger LOG = diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/LevelDBStoreIterator.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/LevelDBStoreIterator.java index f5b6769b70de..0a7abe33a60e 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/LevelDBStoreIterator.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/LevelDBStoreIterator.java @@ -25,6 +25,7 @@ /** * LevelDB store iterator. */ +@Deprecated public class LevelDBStoreIterator implements MetaStoreIterator { diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/MetadataStoreBuilder.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/MetadataStoreBuilder.java index d697fdfaccda..5e1a5a8dad9a 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/MetadataStoreBuilder.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/MetadataStoreBuilder.java @@ -26,14 +26,14 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.OzoneConfigKeys; import com.google.common.annotations.VisibleForTesting; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF; +import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_TYPE_LEVELDB; +import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_TYPE_ROCKSDB; + import org.iq80.leveldb.Options; import org.rocksdb.BlockBasedTableConfig; import org.rocksdb.Statistics; @@ -104,22 +104,19 @@ public MetadataStore build() throws IOException { final ConfigurationSource conf = optionalConf.orElse(DEFAULT_CONF); if (dbType == null) { - LOG.debug("dbType is null, using "); - dbType = conf.getTrimmed(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, - OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_DEFAULT); - LOG.debug("dbType is null, using dbType {} from ozone configuration", - dbType); + dbType = CONTAINER_DB_TYPE_ROCKSDB; + LOG.debug("dbType is null, using dbType {}.", dbType); } else { LOG.debug("Using dbType {} for metastore", dbType); } - if (OZONE_METADATA_STORE_IMPL_LEVELDB.equals(dbType)) { + if (CONTAINER_DB_TYPE_LEVELDB.equals(dbType)) { Options options = new Options(); options.createIfMissing(createIfMissing); if (cacheSize > 0) { options.cacheSize(cacheSize); } return new LevelDBStore(dbFile, options); - } else if (OZONE_METADATA_STORE_IMPL_ROCKSDB.equals(dbType)) { + } else if (CONTAINER_DB_TYPE_ROCKSDB.equals(dbType)) { org.rocksdb.Options opts; // Used cached options if config object passed down is the same if (CACHED_OPTS.containsKey(conf)) { @@ -147,10 +144,8 @@ public MetadataStore build() throws IOException { return new RocksDBStore(dbFile, opts); } - throw new IllegalArgumentException("Invalid argument for " - + OzoneConfigKeys.OZONE_METADATA_STORE_IMPL - + ". Expecting " + OZONE_METADATA_STORE_IMPL_LEVELDB - + " or " + OZONE_METADATA_STORE_IMPL_ROCKSDB - + ", but met " + dbType); + throw new IllegalArgumentException("Invalid Container DB type. Expecting " + + CONTAINER_DB_TYPE_LEVELDB + " or " + + CONTAINER_DB_TYPE_ROCKSDB + ", but met " + dbType); } } diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestMetadataStore.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestMetadataStore.java index 3eb832f21084..ed9bfb3851c3 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestMetadataStore.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestMetadataStore.java @@ -32,28 +32,30 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter; import org.apache.hadoop.hdds.utils.MetadataKeyFilters.MetadataKeyFilter; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.test.GenericTestUtils; import com.google.common.collect.Lists; -import static java.nio.charset.StandardCharsets.UTF_8; import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.tuple.ImmutablePair; -import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows; + import org.junit.After; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; -import static org.junit.runners.Parameterized.Parameters; import org.slf4j.event.Level; +import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_TYPE_LEVELDB; +import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_TYPE_ROCKSDB; +import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.runners.Parameterized.Parameters; +import static java.nio.charset.StandardCharsets.UTF_8; /** * Test class for ozone metadata store. */ @@ -74,14 +76,14 @@ public TestMetadataStore(String metadataImpl) { @Parameters public static Collection data() { return Arrays.asList(new Object[][] { - {OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB}, - {OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB} + {CONTAINER_DB_TYPE_LEVELDB}, + {CONTAINER_DB_TYPE_ROCKSDB} }); } @Before public void init() throws IOException { - if (OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB.equals(storeImpl)) { + if (CONTAINER_DB_TYPE_ROCKSDB.equals(storeImpl)) { // The initialization of RocksDB fails on Windows assumeNotWindows(); } @@ -90,12 +92,12 @@ public void init() throws IOException { + "-" + storeImpl.toLowerCase()); OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl); store = MetadataStoreBuilder.newBuilder() .setConf(conf) .setCreateIfMissing(true) .setDbFile(testDir) + .setDBType(storeImpl) .build(); // Add 20 entries. @@ -110,12 +112,13 @@ public void init() throws IOException { @Test public void testIterator() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl); + File dbDir = GenericTestUtils.getRandomizedTestDir(); MetadataStore dbStore = MetadataStoreBuilder.newBuilder() .setConf(conf) .setCreateIfMissing(true) .setDbFile(dbDir) + .setDBType(storeImpl) .build(); //As database is empty, check whether iterator is working as expected or @@ -166,15 +169,15 @@ public void testIterator() throws Exception { public void testMetaStoreConfigDifferentFromType() throws IOException { OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl); + String dbType; GenericTestUtils.setLogLevel(MetadataStoreBuilder.LOG, Level.DEBUG); GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer.captureLogs(MetadataStoreBuilder.LOG); - if (storeImpl.equals(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB)) { - dbType = "RocksDB"; - } else { + if (storeImpl.equals(CONTAINER_DB_TYPE_LEVELDB)) { dbType = "LevelDB"; + } else { + dbType = "RocksDB"; } File dbDir = GenericTestUtils.getTestDir(getClass().getSimpleName() @@ -193,7 +196,7 @@ public void testMetaStoreConfigDifferentFromType() throws IOException { public void testdbTypeNotSet() throws IOException { OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl); + GenericTestUtils.setLogLevel(MetadataStoreBuilder.LOG, Level.DEBUG); GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer.captureLogs(MetadataStoreBuilder.LOG); @@ -203,7 +206,7 @@ public void testdbTypeNotSet() throws IOException { MetadataStore dbStore = MetadataStoreBuilder.newBuilder().setConf(conf) .setCreateIfMissing(true).setDbFile(dbDir).build(); assertTrue(logCapturer.getOutput().contains("dbType is null, using dbType" + - " " + storeImpl)); + " " + CONTAINER_DB_TYPE_ROCKSDB)); dbStore.close(); dbStore.destroy(); FileUtils.deleteDirectory(dbDir); @@ -213,8 +216,11 @@ public void testdbTypeNotSet() throws IOException { @After public void cleanup() throws IOException { if (store != null) { + System.out.println("--- Closing Store: " + store.getClass()); store.close(); store.destroy(); + } else { + System.out.println("--- Store already closed: " + store.getClass()); } if (testDir != null) { FileUtils.deleteDirectory(testDir); @@ -460,7 +466,6 @@ public void testInvalidStartKey() throws IOException { public void testDestroyDB() throws IOException { // create a new DB to test db destroy OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl); File dbDir = GenericTestUtils.getTestDir(getClass().getSimpleName() + "-" + storeImpl.toLowerCase() + "-toDestroy"); @@ -468,6 +473,7 @@ public void testDestroyDB() throws IOException { .setConf(conf) .setCreateIfMissing(true) .setDbFile(dbDir) + .setDBType(storeImpl) .build(); dbStore.put(getBytes("key1"), getBytes("value1")); @@ -485,7 +491,6 @@ public void testDestroyDB() throws IOException { @Test public void testBatchWrite() throws IOException { OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl); File dbDir = GenericTestUtils.getTestDir(getClass().getSimpleName() + "-" + storeImpl.toLowerCase() + "-batchWrite"); @@ -493,6 +498,7 @@ public void testBatchWrite() throws IOException { .setConf(conf) .setCreateIfMissing(true) .setDbFile(dbDir) + .setDBType(storeImpl) .build(); List expectedResult = Lists.newArrayList(); diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestRocksDBStoreMBean.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestRocksDBStoreMBean.java index 610e898a2d70..8b3554a014cc 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestRocksDBStoreMBean.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestRocksDBStoreMBean.java @@ -53,9 +53,6 @@ public class TestRocksDBStoreMBean { @Before public void init() throws Exception { conf = new OzoneConfiguration(); - - conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, - OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java index a082e995afa1..b1ce4ba81cda 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java @@ -26,7 +26,6 @@ import java.sql.Statement; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.UUID; @@ -34,7 +33,6 @@ import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.TestDataUtil; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.scm.cli.SQLCLI; @@ -51,13 +49,10 @@ import org.junit.Rule; import org.junit.Test; import org.junit.rules.Timeout; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; /** * This class tests the CLI that transforms om.db into SQLite DB files. */ -@RunWith(Parameterized.class) public class TestOmSQLCli { /** @@ -82,21 +77,6 @@ public class TestOmSQLCli { private String keyName2 = "key2"; private String keyName3 = "key3"; - @Parameterized.Parameters - public static Collection data() { - return Arrays.asList(new Object[][] { - // Uncomment the below line if we support leveldb in future. - //{OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB}, - {OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB} - }); - } - - private String metaStoreType; - - public TestOmSQLCli(String type) { - metaStoreType = type; - } - /** * Create a MiniDFSCluster for testing. *

@@ -123,7 +103,6 @@ public void setup() throws Exception { cluster.getOzoneManager().stop(); cluster.getStorageContainerManager().stop(); - conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, metaStoreType); cli = new SQLCLI(conf); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java index 6c858abe7032..d3e228f09c84 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java @@ -35,7 +35,6 @@ import org.apache.hadoop.hdds.server.events.EventQueue; import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.TestStorageContainerManagerHelper; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; @@ -289,8 +288,6 @@ public void testSCMSafeMode() throws Exception { @Test(timeout = 300_000) public void testSCMSafeModeRestrictedOp() throws Exception { - conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, - OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB); cluster.stop(); cluster = builder.build(); StorageContainerManager scm = cluster.getStorageContainerManager(); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java index 360589688a60..704c18e18357 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java @@ -17,8 +17,6 @@ */ package org.apache.hadoop.ozone.recon; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB; - import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.annotation.InterfaceStability; @@ -80,11 +78,6 @@ public final class ReconServerConfigKeys { public static final String RECON_OM_SNAPSHOT_TASK_INITIAL_DELAY_DEFAULT = "1m"; - public static final String OZONE_RECON_CONTAINER_DB_STORE_IMPL = - "ozone.recon.container.db.impl"; - public static final String OZONE_RECON_CONTAINER_DB_STORE_IMPL_DEFAULT = - OZONE_METADATA_STORE_IMPL_ROCKSDB; - public static final String RECON_OM_SNAPSHOT_TASK_INTERVAL = "recon.om.snapshot.task.interval.delay"; public static final String RECON_OM_SNAPSHOT_TASK_INTERVAL_DEFAULT From b86424a9a71b8451e3258347fa67069337163640 Mon Sep 17 00:00:00 2001 From: Sammi Chen Date: Mon, 13 Jul 2020 14:38:34 +0800 Subject: [PATCH 005/165] =?UTF-8?q?HDDS-3921.=20IllegalArgumentException?= =?UTF-8?q?=20triggered=20in=20SCMContainerPlacemen=E2=80=A6=20(#1162)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../hdds/scm/container/ReplicationManager.java | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java index 617e231073c7..7a250687c7ee 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java @@ -512,7 +512,7 @@ private void forceCloseContainer(final ContainerInfo container, */ private void handleUnderReplicatedContainer(final ContainerInfo container, final Set replicas) { - LOG.debug("Handling underreplicated container: {}", + LOG.debug("Handling under-replicated container: {}", container.getContainerID()); try { final ContainerID id = container.containerID(); @@ -543,14 +543,18 @@ private void handleUnderReplicatedContainer(final ContainerInfo container, List targetReplicas = new ArrayList<>(source); // Then add any pending additions targetReplicas.addAll(replicationInFlight); - - int delta = replicationFactor - getReplicaCount(id, replicas); final ContainerPlacementStatus placementStatus = containerPlacement.validateContainerPlacement( targetReplicas, replicationFactor); + int delta = replicationFactor - getReplicaCount(id, replicas); final int misRepDelta = placementStatus.misReplicationCount(); final int replicasNeeded = delta < misRepDelta ? misRepDelta : delta; + if (replicasNeeded <= 0) { + LOG.debug("Container {} meets replication requirement with " + + "inflight replicas", id); + return; + } final List excludeList = replicas.stream() .map(ContainerReplica::getDatanodeDetails) @@ -611,7 +615,7 @@ private void handleOverReplicatedContainer(final ContainerInfo container, final ContainerID id = container.containerID(); final int replicationFactor = container.getReplicationFactor().getNumber(); - // Dont consider inflight replication while calculating excess here. + // Don't consider inflight replication while calculating excess here. int excess = replicas.size() - replicationFactor - inflightDeletion.getOrDefault(id, Collections.emptyList()).size(); From eed4697d045425ef95eabf6ffeb6514bcda1ad64 Mon Sep 17 00:00:00 2001 From: Vivek Ratnavel Subramanian Date: Sun, 12 Jul 2020 23:39:24 -0700 Subject: [PATCH 006/165] HDDS-3937. Update jquery to v3.5.1 (#1177) --- LICENSE.txt | 4 ++-- hadoop-hdds/docs/pom.xml | 2 +- hadoop-hdds/docs/themes/ozonedoc/layouts/partials/footer.html | 2 +- .../docs/themes/ozonedoc/static/js/jquery-3.4.1.min.js | 2 -- .../docs/themes/ozonedoc/static/js/jquery-3.5.1.min.js | 2 ++ .../src/main/resources/webapps/static/jquery-3.4.1.min.js | 2 -- .../src/main/resources/webapps/static/jquery-3.5.1.min.js | 2 ++ hadoop-hdds/pom.xml | 2 +- .../server-scm/src/main/resources/webapps/scm/index.html | 2 +- hadoop-ozone/dist/src/main/license/bin/LICENSE.txt | 2 +- .../src/main/resources/webapps/ozoneManager/index.html | 2 +- hadoop-ozone/pom.xml | 2 +- hadoop-ozone/s3gateway/src/main/resources/browser.html | 2 +- .../s3gateway/src/main/resources/webapps/static/index.html | 2 +- 14 files changed, 15 insertions(+), 15 deletions(-) delete mode 100644 hadoop-hdds/docs/themes/ozonedoc/static/js/jquery-3.4.1.min.js create mode 100644 hadoop-hdds/docs/themes/ozonedoc/static/js/jquery-3.5.1.min.js delete mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/jquery-3.4.1.min.js create mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/jquery-3.5.1.min.js diff --git a/LICENSE.txt b/LICENSE.txt index 98fd68deb1cc..e7a35220a330 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -235,5 +235,5 @@ hadoop-hdds/framework/src/main/resources/webapps/static/angular-route-1.7.9.min. hadoop-hdds/framework/src/main/resources/webapps/static/angular-nvd3-1.0.9.min.js hadoop-hdds/framework/src/main/resources/webapps/static/angular-1.7.9.min.js -hadoop-hdds/framework/src/main/resources/webapps/static/jquery-3.4.1.min.js -hadoop-hdds/docs/themes/ozonedoc/static/js/jquery-3.4.1.min.js +hadoop-hdds/framework/src/main/resources/webapps/static/jquery-3.5.1.min.js +hadoop-hdds/docs/themes/ozonedoc/static/js/jquery-3.5.1.min.js diff --git a/hadoop-hdds/docs/pom.xml b/hadoop-hdds/docs/pom.xml index 8c1b6225d959..dd4e5b74927b 100644 --- a/hadoop-hdds/docs/pom.xml +++ b/hadoop-hdds/docs/pom.xml @@ -55,7 +55,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> static/slides/* themes/ozonedoc/static/js/bootstrap.min.js - themes/ozonedoc/static/js/jquery-3.4.1.min.js + themes/ozonedoc/static/js/jquery-3.5.1.min.js themes/ozonedoc/static/css/bootstrap-theme.min.css themes/ozonedoc/static/css/bootstrap.min.css.map diff --git a/hadoop-hdds/docs/themes/ozonedoc/layouts/partials/footer.html b/hadoop-hdds/docs/themes/ozonedoc/layouts/partials/footer.html index 0e5ca0fec2b1..20bf76e176a2 100644 --- a/hadoop-hdds/docs/themes/ozonedoc/layouts/partials/footer.html +++ b/hadoop-hdds/docs/themes/ozonedoc/layouts/partials/footer.html @@ -17,6 +17,6 @@ - + diff --git a/hadoop-hdds/docs/themes/ozonedoc/static/js/jquery-3.4.1.min.js b/hadoop-hdds/docs/themes/ozonedoc/static/js/jquery-3.4.1.min.js deleted file mode 100644 index a1c07fd803b5..000000000000 --- a/hadoop-hdds/docs/themes/ozonedoc/static/js/jquery-3.4.1.min.js +++ /dev/null @@ -1,2 +0,0 @@ -/*! jQuery v3.4.1 | (c) JS Foundation and other contributors | jquery.org/license */ -!function(e,t){"use strict";"object"==typeof module&&"object"==typeof module.exports?module.exports=e.document?t(e,!0):function(e){if(!e.document)throw new Error("jQuery requires a window with a document");return t(e)}:t(e)}("undefined"!=typeof window?window:this,function(C,e){"use strict";var t=[],E=C.document,r=Object.getPrototypeOf,s=t.slice,g=t.concat,u=t.push,i=t.indexOf,n={},o=n.toString,v=n.hasOwnProperty,a=v.toString,l=a.call(Object),y={},m=function(e){return"function"==typeof e&&"number"!=typeof e.nodeType},x=function(e){return null!=e&&e===e.window},c={type:!0,src:!0,nonce:!0,noModule:!0};function b(e,t,n){var r,i,o=(n=n||E).createElement("script");if(o.text=e,t)for(r in c)(i=t[r]||t.getAttribute&&t.getAttribute(r))&&o.setAttribute(r,i);n.head.appendChild(o).parentNode.removeChild(o)}function w(e){return null==e?e+"":"object"==typeof e||"function"==typeof e?n[o.call(e)]||"object":typeof e}var f="3.4.1",k=function(e,t){return new k.fn.init(e,t)},p=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g;function d(e){var t=!!e&&"length"in e&&e.length,n=w(e);return!m(e)&&!x(e)&&("array"===n||0===t||"number"==typeof t&&0+~]|"+M+")"+M+"*"),U=new RegExp(M+"|>"),X=new RegExp($),V=new RegExp("^"+I+"$"),G={ID:new RegExp("^#("+I+")"),CLASS:new RegExp("^\\.("+I+")"),TAG:new RegExp("^("+I+"|[*])"),ATTR:new RegExp("^"+W),PSEUDO:new RegExp("^"+$),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new RegExp("^(?:"+R+")$","i"),needsContext:new RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},Y=/HTML$/i,Q=/^(?:input|select|textarea|button)$/i,J=/^h\d$/i,K=/^[^{]+\{\s*\[native \w/,Z=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ee=/[+~]/,te=new RegExp("\\\\([\\da-f]{1,6}"+M+"?|("+M+")|.)","ig"),ne=function(e,t,n){var r="0x"+t-65536;return r!=r||n?t:r<0?String.fromCharCode(r+65536):String.fromCharCode(r>>10|55296,1023&r|56320)},re=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,ie=function(e,t){return t?"\0"===e?"\ufffd":e.slice(0,-1)+"\\"+e.charCodeAt(e.length-1).toString(16)+" ":"\\"+e},oe=function(){T()},ae=be(function(e){return!0===e.disabled&&"fieldset"===e.nodeName.toLowerCase()},{dir:"parentNode",next:"legend"});try{H.apply(t=O.call(m.childNodes),m.childNodes),t[m.childNodes.length].nodeType}catch(e){H={apply:t.length?function(e,t){L.apply(e,O.call(t))}:function(e,t){var n=e.length,r=0;while(e[n++]=t[r++]);e.length=n-1}}}function se(t,e,n,r){var i,o,a,s,u,l,c,f=e&&e.ownerDocument,p=e?e.nodeType:9;if(n=n||[],"string"!=typeof t||!t||1!==p&&9!==p&&11!==p)return n;if(!r&&((e?e.ownerDocument||e:m)!==C&&T(e),e=e||C,E)){if(11!==p&&(u=Z.exec(t)))if(i=u[1]){if(9===p){if(!(a=e.getElementById(i)))return n;if(a.id===i)return n.push(a),n}else if(f&&(a=f.getElementById(i))&&y(e,a)&&a.id===i)return n.push(a),n}else{if(u[2])return H.apply(n,e.getElementsByTagName(t)),n;if((i=u[3])&&d.getElementsByClassName&&e.getElementsByClassName)return H.apply(n,e.getElementsByClassName(i)),n}if(d.qsa&&!A[t+" "]&&(!v||!v.test(t))&&(1!==p||"object"!==e.nodeName.toLowerCase())){if(c=t,f=e,1===p&&U.test(t)){(s=e.getAttribute("id"))?s=s.replace(re,ie):e.setAttribute("id",s=k),o=(l=h(t)).length;while(o--)l[o]="#"+s+" "+xe(l[o]);c=l.join(","),f=ee.test(t)&&ye(e.parentNode)||e}try{return H.apply(n,f.querySelectorAll(c)),n}catch(e){A(t,!0)}finally{s===k&&e.removeAttribute("id")}}}return g(t.replace(B,"$1"),e,n,r)}function ue(){var r=[];return function e(t,n){return r.push(t+" ")>b.cacheLength&&delete e[r.shift()],e[t+" "]=n}}function le(e){return e[k]=!0,e}function ce(e){var t=C.createElement("fieldset");try{return!!e(t)}catch(e){return!1}finally{t.parentNode&&t.parentNode.removeChild(t),t=null}}function fe(e,t){var n=e.split("|"),r=n.length;while(r--)b.attrHandle[n[r]]=t}function pe(e,t){var n=t&&e,r=n&&1===e.nodeType&&1===t.nodeType&&e.sourceIndex-t.sourceIndex;if(r)return r;if(n)while(n=n.nextSibling)if(n===t)return-1;return e?1:-1}function de(t){return function(e){return"input"===e.nodeName.toLowerCase()&&e.type===t}}function he(n){return function(e){var t=e.nodeName.toLowerCase();return("input"===t||"button"===t)&&e.type===n}}function ge(t){return function(e){return"form"in e?e.parentNode&&!1===e.disabled?"label"in e?"label"in e.parentNode?e.parentNode.disabled===t:e.disabled===t:e.isDisabled===t||e.isDisabled!==!t&&ae(e)===t:e.disabled===t:"label"in e&&e.disabled===t}}function ve(a){return le(function(o){return o=+o,le(function(e,t){var n,r=a([],e.length,o),i=r.length;while(i--)e[n=r[i]]&&(e[n]=!(t[n]=e[n]))})})}function ye(e){return e&&"undefined"!=typeof e.getElementsByTagName&&e}for(e in d=se.support={},i=se.isXML=function(e){var t=e.namespaceURI,n=(e.ownerDocument||e).documentElement;return!Y.test(t||n&&n.nodeName||"HTML")},T=se.setDocument=function(e){var t,n,r=e?e.ownerDocument||e:m;return r!==C&&9===r.nodeType&&r.documentElement&&(a=(C=r).documentElement,E=!i(C),m!==C&&(n=C.defaultView)&&n.top!==n&&(n.addEventListener?n.addEventListener("unload",oe,!1):n.attachEvent&&n.attachEvent("onunload",oe)),d.attributes=ce(function(e){return e.className="i",!e.getAttribute("className")}),d.getElementsByTagName=ce(function(e){return e.appendChild(C.createComment("")),!e.getElementsByTagName("*").length}),d.getElementsByClassName=K.test(C.getElementsByClassName),d.getById=ce(function(e){return a.appendChild(e).id=k,!C.getElementsByName||!C.getElementsByName(k).length}),d.getById?(b.filter.ID=function(e){var t=e.replace(te,ne);return function(e){return e.getAttribute("id")===t}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n=t.getElementById(e);return n?[n]:[]}}):(b.filter.ID=function(e){var n=e.replace(te,ne);return function(e){var t="undefined"!=typeof e.getAttributeNode&&e.getAttributeNode("id");return t&&t.value===n}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n,r,i,o=t.getElementById(e);if(o){if((n=o.getAttributeNode("id"))&&n.value===e)return[o];i=t.getElementsByName(e),r=0;while(o=i[r++])if((n=o.getAttributeNode("id"))&&n.value===e)return[o]}return[]}}),b.find.TAG=d.getElementsByTagName?function(e,t){return"undefined"!=typeof t.getElementsByTagName?t.getElementsByTagName(e):d.qsa?t.querySelectorAll(e):void 0}:function(e,t){var n,r=[],i=0,o=t.getElementsByTagName(e);if("*"===e){while(n=o[i++])1===n.nodeType&&r.push(n);return r}return o},b.find.CLASS=d.getElementsByClassName&&function(e,t){if("undefined"!=typeof t.getElementsByClassName&&E)return t.getElementsByClassName(e)},s=[],v=[],(d.qsa=K.test(C.querySelectorAll))&&(ce(function(e){a.appendChild(e).innerHTML="",e.querySelectorAll("[msallowcapture^='']").length&&v.push("[*^$]="+M+"*(?:''|\"\")"),e.querySelectorAll("[selected]").length||v.push("\\["+M+"*(?:value|"+R+")"),e.querySelectorAll("[id~="+k+"-]").length||v.push("~="),e.querySelectorAll(":checked").length||v.push(":checked"),e.querySelectorAll("a#"+k+"+*").length||v.push(".#.+[+~]")}),ce(function(e){e.innerHTML="";var t=C.createElement("input");t.setAttribute("type","hidden"),e.appendChild(t).setAttribute("name","D"),e.querySelectorAll("[name=d]").length&&v.push("name"+M+"*[*^$|!~]?="),2!==e.querySelectorAll(":enabled").length&&v.push(":enabled",":disabled"),a.appendChild(e).disabled=!0,2!==e.querySelectorAll(":disabled").length&&v.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),v.push(",.*:")})),(d.matchesSelector=K.test(c=a.matches||a.webkitMatchesSelector||a.mozMatchesSelector||a.oMatchesSelector||a.msMatchesSelector))&&ce(function(e){d.disconnectedMatch=c.call(e,"*"),c.call(e,"[s!='']:x"),s.push("!=",$)}),v=v.length&&new RegExp(v.join("|")),s=s.length&&new RegExp(s.join("|")),t=K.test(a.compareDocumentPosition),y=t||K.test(a.contains)?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},D=t?function(e,t){if(e===t)return l=!0,0;var n=!e.compareDocumentPosition-!t.compareDocumentPosition;return n||(1&(n=(e.ownerDocument||e)===(t.ownerDocument||t)?e.compareDocumentPosition(t):1)||!d.sortDetached&&t.compareDocumentPosition(e)===n?e===C||e.ownerDocument===m&&y(m,e)?-1:t===C||t.ownerDocument===m&&y(m,t)?1:u?P(u,e)-P(u,t):0:4&n?-1:1)}:function(e,t){if(e===t)return l=!0,0;var n,r=0,i=e.parentNode,o=t.parentNode,a=[e],s=[t];if(!i||!o)return e===C?-1:t===C?1:i?-1:o?1:u?P(u,e)-P(u,t):0;if(i===o)return pe(e,t);n=e;while(n=n.parentNode)a.unshift(n);n=t;while(n=n.parentNode)s.unshift(n);while(a[r]===s[r])r++;return r?pe(a[r],s[r]):a[r]===m?-1:s[r]===m?1:0}),C},se.matches=function(e,t){return se(e,null,null,t)},se.matchesSelector=function(e,t){if((e.ownerDocument||e)!==C&&T(e),d.matchesSelector&&E&&!A[t+" "]&&(!s||!s.test(t))&&(!v||!v.test(t)))try{var n=c.call(e,t);if(n||d.disconnectedMatch||e.document&&11!==e.document.nodeType)return n}catch(e){A(t,!0)}return 0":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(te,ne),e[3]=(e[3]||e[4]||e[5]||"").replace(te,ne),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||se.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&se.error(e[0]),e},PSEUDO:function(e){var t,n=!e[6]&&e[2];return G.CHILD.test(e[0])?null:(e[3]?e[2]=e[4]||e[5]||"":n&&X.test(n)&&(t=h(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(te,ne).toLowerCase();return"*"===e?function(){return!0}:function(e){return e.nodeName&&e.nodeName.toLowerCase()===t}},CLASS:function(e){var t=p[e+" "];return t||(t=new RegExp("(^|"+M+")"+e+"("+M+"|$)"))&&p(e,function(e){return t.test("string"==typeof e.className&&e.className||"undefined"!=typeof e.getAttribute&&e.getAttribute("class")||"")})},ATTR:function(n,r,i){return function(e){var t=se.attr(e,n);return null==t?"!="===r:!r||(t+="","="===r?t===i:"!="===r?t!==i:"^="===r?i&&0===t.indexOf(i):"*="===r?i&&-1:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i;function j(e,n,r){return m(n)?k.grep(e,function(e,t){return!!n.call(e,t,e)!==r}):n.nodeType?k.grep(e,function(e){return e===n!==r}):"string"!=typeof n?k.grep(e,function(e){return-1)[^>]*|#([\w-]+))$/;(k.fn.init=function(e,t,n){var r,i;if(!e)return this;if(n=n||q,"string"==typeof e){if(!(r="<"===e[0]&&">"===e[e.length-1]&&3<=e.length?[null,e,null]:L.exec(e))||!r[1]&&t)return!t||t.jquery?(t||n).find(e):this.constructor(t).find(e);if(r[1]){if(t=t instanceof k?t[0]:t,k.merge(this,k.parseHTML(r[1],t&&t.nodeType?t.ownerDocument||t:E,!0)),D.test(r[1])&&k.isPlainObject(t))for(r in t)m(this[r])?this[r](t[r]):this.attr(r,t[r]);return this}return(i=E.getElementById(r[2]))&&(this[0]=i,this.length=1),this}return e.nodeType?(this[0]=e,this.length=1,this):m(e)?void 0!==n.ready?n.ready(e):e(k):k.makeArray(e,this)}).prototype=k.fn,q=k(E);var H=/^(?:parents|prev(?:Until|All))/,O={children:!0,contents:!0,next:!0,prev:!0};function P(e,t){while((e=e[t])&&1!==e.nodeType);return e}k.fn.extend({has:function(e){var t=k(e,this),n=t.length;return this.filter(function(){for(var e=0;e\x20\t\r\n\f]*)/i,he=/^$|^module$|\/(?:java|ecma)script/i,ge={option:[1,""],thead:[1,"","
"],col:[2,"","
"],tr:[2,"","
"],td:[3,"","
"],_default:[0,"",""]};function ve(e,t){var n;return n="undefined"!=typeof e.getElementsByTagName?e.getElementsByTagName(t||"*"):"undefined"!=typeof e.querySelectorAll?e.querySelectorAll(t||"*"):[],void 0===t||t&&A(e,t)?k.merge([e],n):n}function ye(e,t){for(var n=0,r=e.length;nx",y.noCloneChecked=!!me.cloneNode(!0).lastChild.defaultValue;var Te=/^key/,Ce=/^(?:mouse|pointer|contextmenu|drag|drop)|click/,Ee=/^([^.]*)(?:\.(.+)|)/;function ke(){return!0}function Se(){return!1}function Ne(e,t){return e===function(){try{return E.activeElement}catch(e){}}()==("focus"===t)}function Ae(e,t,n,r,i,o){var a,s;if("object"==typeof t){for(s in"string"!=typeof n&&(r=r||n,n=void 0),t)Ae(e,s,n,r,t[s],o);return e}if(null==r&&null==i?(i=n,r=n=void 0):null==i&&("string"==typeof n?(i=r,r=void 0):(i=r,r=n,n=void 0)),!1===i)i=Se;else if(!i)return e;return 1===o&&(a=i,(i=function(e){return k().off(e),a.apply(this,arguments)}).guid=a.guid||(a.guid=k.guid++)),e.each(function(){k.event.add(this,t,i,r,n)})}function De(e,i,o){o?(Q.set(e,i,!1),k.event.add(e,i,{namespace:!1,handler:function(e){var t,n,r=Q.get(this,i);if(1&e.isTrigger&&this[i]){if(r.length)(k.event.special[i]||{}).delegateType&&e.stopPropagation();else if(r=s.call(arguments),Q.set(this,i,r),t=o(this,i),this[i](),r!==(n=Q.get(this,i))||t?Q.set(this,i,!1):n={},r!==n)return e.stopImmediatePropagation(),e.preventDefault(),n.value}else r.length&&(Q.set(this,i,{value:k.event.trigger(k.extend(r[0],k.Event.prototype),r.slice(1),this)}),e.stopImmediatePropagation())}})):void 0===Q.get(e,i)&&k.event.add(e,i,ke)}k.event={global:{},add:function(t,e,n,r,i){var o,a,s,u,l,c,f,p,d,h,g,v=Q.get(t);if(v){n.handler&&(n=(o=n).handler,i=o.selector),i&&k.find.matchesSelector(ie,i),n.guid||(n.guid=k.guid++),(u=v.events)||(u=v.events={}),(a=v.handle)||(a=v.handle=function(e){return"undefined"!=typeof k&&k.event.triggered!==e.type?k.event.dispatch.apply(t,arguments):void 0}),l=(e=(e||"").match(R)||[""]).length;while(l--)d=g=(s=Ee.exec(e[l])||[])[1],h=(s[2]||"").split(".").sort(),d&&(f=k.event.special[d]||{},d=(i?f.delegateType:f.bindType)||d,f=k.event.special[d]||{},c=k.extend({type:d,origType:g,data:r,handler:n,guid:n.guid,selector:i,needsContext:i&&k.expr.match.needsContext.test(i),namespace:h.join(".")},o),(p=u[d])||((p=u[d]=[]).delegateCount=0,f.setup&&!1!==f.setup.call(t,r,h,a)||t.addEventListener&&t.addEventListener(d,a)),f.add&&(f.add.call(t,c),c.handler.guid||(c.handler.guid=n.guid)),i?p.splice(p.delegateCount++,0,c):p.push(c),k.event.global[d]=!0)}},remove:function(e,t,n,r,i){var o,a,s,u,l,c,f,p,d,h,g,v=Q.hasData(e)&&Q.get(e);if(v&&(u=v.events)){l=(t=(t||"").match(R)||[""]).length;while(l--)if(d=g=(s=Ee.exec(t[l])||[])[1],h=(s[2]||"").split(".").sort(),d){f=k.event.special[d]||{},p=u[d=(r?f.delegateType:f.bindType)||d]||[],s=s[2]&&new RegExp("(^|\\.)"+h.join("\\.(?:.*\\.|)")+"(\\.|$)"),a=o=p.length;while(o--)c=p[o],!i&&g!==c.origType||n&&n.guid!==c.guid||s&&!s.test(c.namespace)||r&&r!==c.selector&&("**"!==r||!c.selector)||(p.splice(o,1),c.selector&&p.delegateCount--,f.remove&&f.remove.call(e,c));a&&!p.length&&(f.teardown&&!1!==f.teardown.call(e,h,v.handle)||k.removeEvent(e,d,v.handle),delete u[d])}else for(d in u)k.event.remove(e,d+t[l],n,r,!0);k.isEmptyObject(u)&&Q.remove(e,"handle events")}},dispatch:function(e){var t,n,r,i,o,a,s=k.event.fix(e),u=new Array(arguments.length),l=(Q.get(this,"events")||{})[s.type]||[],c=k.event.special[s.type]||{};for(u[0]=s,t=1;t\x20\t\r\n\f]*)[^>]*)\/>/gi,qe=/\s*$/g;function Oe(e,t){return A(e,"table")&&A(11!==t.nodeType?t:t.firstChild,"tr")&&k(e).children("tbody")[0]||e}function Pe(e){return e.type=(null!==e.getAttribute("type"))+"/"+e.type,e}function Re(e){return"true/"===(e.type||"").slice(0,5)?e.type=e.type.slice(5):e.removeAttribute("type"),e}function Me(e,t){var n,r,i,o,a,s,u,l;if(1===t.nodeType){if(Q.hasData(e)&&(o=Q.access(e),a=Q.set(t,o),l=o.events))for(i in delete a.handle,a.events={},l)for(n=0,r=l[i].length;n")},clone:function(e,t,n){var r,i,o,a,s,u,l,c=e.cloneNode(!0),f=oe(e);if(!(y.noCloneChecked||1!==e.nodeType&&11!==e.nodeType||k.isXMLDoc(e)))for(a=ve(c),r=0,i=(o=ve(e)).length;r").attr(n.scriptAttrs||{}).prop({charset:n.scriptCharset,src:n.url}).on("load error",i=function(e){r.remove(),i=null,e&&t("error"===e.type?404:200,e.type)}),E.head.appendChild(r[0])},abort:function(){i&&i()}}});var Vt,Gt=[],Yt=/(=)\?(?=&|$)|\?\?/;k.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var e=Gt.pop()||k.expando+"_"+kt++;return this[e]=!0,e}}),k.ajaxPrefilter("json jsonp",function(e,t,n){var r,i,o,a=!1!==e.jsonp&&(Yt.test(e.url)?"url":"string"==typeof e.data&&0===(e.contentType||"").indexOf("application/x-www-form-urlencoded")&&Yt.test(e.data)&&"data");if(a||"jsonp"===e.dataTypes[0])return r=e.jsonpCallback=m(e.jsonpCallback)?e.jsonpCallback():e.jsonpCallback,a?e[a]=e[a].replace(Yt,"$1"+r):!1!==e.jsonp&&(e.url+=(St.test(e.url)?"&":"?")+e.jsonp+"="+r),e.converters["script json"]=function(){return o||k.error(r+" was not called"),o[0]},e.dataTypes[0]="json",i=C[r],C[r]=function(){o=arguments},n.always(function(){void 0===i?k(C).removeProp(r):C[r]=i,e[r]&&(e.jsonpCallback=t.jsonpCallback,Gt.push(r)),o&&m(i)&&i(o[0]),o=i=void 0}),"script"}),y.createHTMLDocument=((Vt=E.implementation.createHTMLDocument("").body).innerHTML="

",2===Vt.childNodes.length),k.parseHTML=function(e,t,n){return"string"!=typeof e?[]:("boolean"==typeof t&&(n=t,t=!1),t||(y.createHTMLDocument?((r=(t=E.implementation.createHTMLDocument("")).createElement("base")).href=E.location.href,t.head.appendChild(r)):t=E),o=!n&&[],(i=D.exec(e))?[t.createElement(i[1])]:(i=we([e],t,o),o&&o.length&&k(o).remove(),k.merge([],i.childNodes)));var r,i,o},k.fn.load=function(e,t,n){var r,i,o,a=this,s=e.indexOf(" ");return-1").append(k.parseHTML(e)).find(r):e)}).always(n&&function(e,t){a.each(function(){n.apply(this,o||[e.responseText,t,e])})}),this},k.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(e,t){k.fn[t]=function(e){return this.on(t,e)}}),k.expr.pseudos.animated=function(t){return k.grep(k.timers,function(e){return t===e.elem}).length},k.offset={setOffset:function(e,t,n){var r,i,o,a,s,u,l=k.css(e,"position"),c=k(e),f={};"static"===l&&(e.style.position="relative"),s=c.offset(),o=k.css(e,"top"),u=k.css(e,"left"),("absolute"===l||"fixed"===l)&&-1<(o+u).indexOf("auto")?(a=(r=c.position()).top,i=r.left):(a=parseFloat(o)||0,i=parseFloat(u)||0),m(t)&&(t=t.call(e,n,k.extend({},s))),null!=t.top&&(f.top=t.top-s.top+a),null!=t.left&&(f.left=t.left-s.left+i),"using"in t?t.using.call(e,f):c.css(f)}},k.fn.extend({offset:function(t){if(arguments.length)return void 0===t?this:this.each(function(e){k.offset.setOffset(this,t,e)});var e,n,r=this[0];return r?r.getClientRects().length?(e=r.getBoundingClientRect(),n=r.ownerDocument.defaultView,{top:e.top+n.pageYOffset,left:e.left+n.pageXOffset}):{top:0,left:0}:void 0},position:function(){if(this[0]){var e,t,n,r=this[0],i={top:0,left:0};if("fixed"===k.css(r,"position"))t=r.getBoundingClientRect();else{t=this.offset(),n=r.ownerDocument,e=r.offsetParent||n.documentElement;while(e&&(e===n.body||e===n.documentElement)&&"static"===k.css(e,"position"))e=e.parentNode;e&&e!==r&&1===e.nodeType&&((i=k(e).offset()).top+=k.css(e,"borderTopWidth",!0),i.left+=k.css(e,"borderLeftWidth",!0))}return{top:t.top-i.top-k.css(r,"marginTop",!0),left:t.left-i.left-k.css(r,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var e=this.offsetParent;while(e&&"static"===k.css(e,"position"))e=e.offsetParent;return e||ie})}}),k.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(t,i){var o="pageYOffset"===i;k.fn[t]=function(e){return _(this,function(e,t,n){var r;if(x(e)?r=e:9===e.nodeType&&(r=e.defaultView),void 0===n)return r?r[i]:e[t];r?r.scrollTo(o?r.pageXOffset:n,o?n:r.pageYOffset):e[t]=n},t,e,arguments.length)}}),k.each(["top","left"],function(e,n){k.cssHooks[n]=ze(y.pixelPosition,function(e,t){if(t)return t=_e(e,n),$e.test(t)?k(e).position()[n]+"px":t})}),k.each({Height:"height",Width:"width"},function(a,s){k.each({padding:"inner"+a,content:s,"":"outer"+a},function(r,o){k.fn[o]=function(e,t){var n=arguments.length&&(r||"boolean"!=typeof e),i=r||(!0===e||!0===t?"margin":"border");return _(this,function(e,t,n){var r;return x(e)?0===o.indexOf("outer")?e["inner"+a]:e.document.documentElement["client"+a]:9===e.nodeType?(r=e.documentElement,Math.max(e.body["scroll"+a],r["scroll"+a],e.body["offset"+a],r["offset"+a],r["client"+a])):void 0===n?k.css(e,t,i):k.style(e,t,n,i)},s,n?e:void 0,n)}})}),k.each("blur focus focusin focusout resize scroll click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup contextmenu".split(" "),function(e,n){k.fn[n]=function(e,t){return 0+~]|"+M+")"+M+"*"),U=new RegExp(M+"|>"),X=new RegExp(F),V=new RegExp("^"+I+"$"),G={ID:new RegExp("^#("+I+")"),CLASS:new RegExp("^\\.("+I+")"),TAG:new RegExp("^("+I+"|[*])"),ATTR:new RegExp("^"+W),PSEUDO:new RegExp("^"+F),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new RegExp("^(?:"+R+")$","i"),needsContext:new RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},Y=/HTML$/i,Q=/^(?:input|select|textarea|button)$/i,J=/^h\d$/i,K=/^[^{]+\{\s*\[native \w/,Z=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ee=/[+~]/,te=new RegExp("\\\\[\\da-fA-F]{1,6}"+M+"?|\\\\([^\\r\\n\\f])","g"),ne=function(e,t){var n="0x"+e.slice(1)-65536;return t||(n<0?String.fromCharCode(n+65536):String.fromCharCode(n>>10|55296,1023&n|56320))},re=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,ie=function(e,t){return t?"\0"===e?"\ufffd":e.slice(0,-1)+"\\"+e.charCodeAt(e.length-1).toString(16)+" ":"\\"+e},oe=function(){T()},ae=be(function(e){return!0===e.disabled&&"fieldset"===e.nodeName.toLowerCase()},{dir:"parentNode",next:"legend"});try{H.apply(t=O.call(p.childNodes),p.childNodes),t[p.childNodes.length].nodeType}catch(e){H={apply:t.length?function(e,t){L.apply(e,O.call(t))}:function(e,t){var n=e.length,r=0;while(e[n++]=t[r++]);e.length=n-1}}}function se(t,e,n,r){var i,o,a,s,u,l,c,f=e&&e.ownerDocument,p=e?e.nodeType:9;if(n=n||[],"string"!=typeof t||!t||1!==p&&9!==p&&11!==p)return n;if(!r&&(T(e),e=e||C,E)){if(11!==p&&(u=Z.exec(t)))if(i=u[1]){if(9===p){if(!(a=e.getElementById(i)))return n;if(a.id===i)return n.push(a),n}else if(f&&(a=f.getElementById(i))&&y(e,a)&&a.id===i)return n.push(a),n}else{if(u[2])return H.apply(n,e.getElementsByTagName(t)),n;if((i=u[3])&&d.getElementsByClassName&&e.getElementsByClassName)return H.apply(n,e.getElementsByClassName(i)),n}if(d.qsa&&!N[t+" "]&&(!v||!v.test(t))&&(1!==p||"object"!==e.nodeName.toLowerCase())){if(c=t,f=e,1===p&&(U.test(t)||z.test(t))){(f=ee.test(t)&&ye(e.parentNode)||e)===e&&d.scope||((s=e.getAttribute("id"))?s=s.replace(re,ie):e.setAttribute("id",s=S)),o=(l=h(t)).length;while(o--)l[o]=(s?"#"+s:":scope")+" "+xe(l[o]);c=l.join(",")}try{return H.apply(n,f.querySelectorAll(c)),n}catch(e){N(t,!0)}finally{s===S&&e.removeAttribute("id")}}}return g(t.replace($,"$1"),e,n,r)}function ue(){var r=[];return function e(t,n){return r.push(t+" ")>b.cacheLength&&delete e[r.shift()],e[t+" "]=n}}function le(e){return e[S]=!0,e}function ce(e){var t=C.createElement("fieldset");try{return!!e(t)}catch(e){return!1}finally{t.parentNode&&t.parentNode.removeChild(t),t=null}}function fe(e,t){var n=e.split("|"),r=n.length;while(r--)b.attrHandle[n[r]]=t}function pe(e,t){var n=t&&e,r=n&&1===e.nodeType&&1===t.nodeType&&e.sourceIndex-t.sourceIndex;if(r)return r;if(n)while(n=n.nextSibling)if(n===t)return-1;return e?1:-1}function de(t){return function(e){return"input"===e.nodeName.toLowerCase()&&e.type===t}}function he(n){return function(e){var t=e.nodeName.toLowerCase();return("input"===t||"button"===t)&&e.type===n}}function ge(t){return function(e){return"form"in e?e.parentNode&&!1===e.disabled?"label"in e?"label"in e.parentNode?e.parentNode.disabled===t:e.disabled===t:e.isDisabled===t||e.isDisabled!==!t&&ae(e)===t:e.disabled===t:"label"in e&&e.disabled===t}}function ve(a){return le(function(o){return o=+o,le(function(e,t){var n,r=a([],e.length,o),i=r.length;while(i--)e[n=r[i]]&&(e[n]=!(t[n]=e[n]))})})}function ye(e){return e&&"undefined"!=typeof e.getElementsByTagName&&e}for(e in d=se.support={},i=se.isXML=function(e){var t=e.namespaceURI,n=(e.ownerDocument||e).documentElement;return!Y.test(t||n&&n.nodeName||"HTML")},T=se.setDocument=function(e){var t,n,r=e?e.ownerDocument||e:p;return r!=C&&9===r.nodeType&&r.documentElement&&(a=(C=r).documentElement,E=!i(C),p!=C&&(n=C.defaultView)&&n.top!==n&&(n.addEventListener?n.addEventListener("unload",oe,!1):n.attachEvent&&n.attachEvent("onunload",oe)),d.scope=ce(function(e){return a.appendChild(e).appendChild(C.createElement("div")),"undefined"!=typeof e.querySelectorAll&&!e.querySelectorAll(":scope fieldset div").length}),d.attributes=ce(function(e){return e.className="i",!e.getAttribute("className")}),d.getElementsByTagName=ce(function(e){return e.appendChild(C.createComment("")),!e.getElementsByTagName("*").length}),d.getElementsByClassName=K.test(C.getElementsByClassName),d.getById=ce(function(e){return a.appendChild(e).id=S,!C.getElementsByName||!C.getElementsByName(S).length}),d.getById?(b.filter.ID=function(e){var t=e.replace(te,ne);return function(e){return e.getAttribute("id")===t}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n=t.getElementById(e);return n?[n]:[]}}):(b.filter.ID=function(e){var n=e.replace(te,ne);return function(e){var t="undefined"!=typeof e.getAttributeNode&&e.getAttributeNode("id");return t&&t.value===n}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n,r,i,o=t.getElementById(e);if(o){if((n=o.getAttributeNode("id"))&&n.value===e)return[o];i=t.getElementsByName(e),r=0;while(o=i[r++])if((n=o.getAttributeNode("id"))&&n.value===e)return[o]}return[]}}),b.find.TAG=d.getElementsByTagName?function(e,t){return"undefined"!=typeof t.getElementsByTagName?t.getElementsByTagName(e):d.qsa?t.querySelectorAll(e):void 0}:function(e,t){var n,r=[],i=0,o=t.getElementsByTagName(e);if("*"===e){while(n=o[i++])1===n.nodeType&&r.push(n);return r}return o},b.find.CLASS=d.getElementsByClassName&&function(e,t){if("undefined"!=typeof t.getElementsByClassName&&E)return t.getElementsByClassName(e)},s=[],v=[],(d.qsa=K.test(C.querySelectorAll))&&(ce(function(e){var t;a.appendChild(e).innerHTML="",e.querySelectorAll("[msallowcapture^='']").length&&v.push("[*^$]="+M+"*(?:''|\"\")"),e.querySelectorAll("[selected]").length||v.push("\\["+M+"*(?:value|"+R+")"),e.querySelectorAll("[id~="+S+"-]").length||v.push("~="),(t=C.createElement("input")).setAttribute("name",""),e.appendChild(t),e.querySelectorAll("[name='']").length||v.push("\\["+M+"*name"+M+"*="+M+"*(?:''|\"\")"),e.querySelectorAll(":checked").length||v.push(":checked"),e.querySelectorAll("a#"+S+"+*").length||v.push(".#.+[+~]"),e.querySelectorAll("\\\f"),v.push("[\\r\\n\\f]")}),ce(function(e){e.innerHTML="";var t=C.createElement("input");t.setAttribute("type","hidden"),e.appendChild(t).setAttribute("name","D"),e.querySelectorAll("[name=d]").length&&v.push("name"+M+"*[*^$|!~]?="),2!==e.querySelectorAll(":enabled").length&&v.push(":enabled",":disabled"),a.appendChild(e).disabled=!0,2!==e.querySelectorAll(":disabled").length&&v.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),v.push(",.*:")})),(d.matchesSelector=K.test(c=a.matches||a.webkitMatchesSelector||a.mozMatchesSelector||a.oMatchesSelector||a.msMatchesSelector))&&ce(function(e){d.disconnectedMatch=c.call(e,"*"),c.call(e,"[s!='']:x"),s.push("!=",F)}),v=v.length&&new RegExp(v.join("|")),s=s.length&&new RegExp(s.join("|")),t=K.test(a.compareDocumentPosition),y=t||K.test(a.contains)?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},D=t?function(e,t){if(e===t)return l=!0,0;var n=!e.compareDocumentPosition-!t.compareDocumentPosition;return n||(1&(n=(e.ownerDocument||e)==(t.ownerDocument||t)?e.compareDocumentPosition(t):1)||!d.sortDetached&&t.compareDocumentPosition(e)===n?e==C||e.ownerDocument==p&&y(p,e)?-1:t==C||t.ownerDocument==p&&y(p,t)?1:u?P(u,e)-P(u,t):0:4&n?-1:1)}:function(e,t){if(e===t)return l=!0,0;var n,r=0,i=e.parentNode,o=t.parentNode,a=[e],s=[t];if(!i||!o)return e==C?-1:t==C?1:i?-1:o?1:u?P(u,e)-P(u,t):0;if(i===o)return pe(e,t);n=e;while(n=n.parentNode)a.unshift(n);n=t;while(n=n.parentNode)s.unshift(n);while(a[r]===s[r])r++;return r?pe(a[r],s[r]):a[r]==p?-1:s[r]==p?1:0}),C},se.matches=function(e,t){return se(e,null,null,t)},se.matchesSelector=function(e,t){if(T(e),d.matchesSelector&&E&&!N[t+" "]&&(!s||!s.test(t))&&(!v||!v.test(t)))try{var n=c.call(e,t);if(n||d.disconnectedMatch||e.document&&11!==e.document.nodeType)return n}catch(e){N(t,!0)}return 0":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(te,ne),e[3]=(e[3]||e[4]||e[5]||"").replace(te,ne),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||se.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&se.error(e[0]),e},PSEUDO:function(e){var t,n=!e[6]&&e[2];return G.CHILD.test(e[0])?null:(e[3]?e[2]=e[4]||e[5]||"":n&&X.test(n)&&(t=h(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(te,ne).toLowerCase();return"*"===e?function(){return!0}:function(e){return e.nodeName&&e.nodeName.toLowerCase()===t}},CLASS:function(e){var t=m[e+" "];return t||(t=new RegExp("(^|"+M+")"+e+"("+M+"|$)"))&&m(e,function(e){return t.test("string"==typeof e.className&&e.className||"undefined"!=typeof e.getAttribute&&e.getAttribute("class")||"")})},ATTR:function(n,r,i){return function(e){var t=se.attr(e,n);return null==t?"!="===r:!r||(t+="","="===r?t===i:"!="===r?t!==i:"^="===r?i&&0===t.indexOf(i):"*="===r?i&&-1:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i;function D(e,n,r){return m(n)?S.grep(e,function(e,t){return!!n.call(e,t,e)!==r}):n.nodeType?S.grep(e,function(e){return e===n!==r}):"string"!=typeof n?S.grep(e,function(e){return-1)[^>]*|#([\w-]+))$/;(S.fn.init=function(e,t,n){var r,i;if(!e)return this;if(n=n||j,"string"==typeof e){if(!(r="<"===e[0]&&">"===e[e.length-1]&&3<=e.length?[null,e,null]:q.exec(e))||!r[1]&&t)return!t||t.jquery?(t||n).find(e):this.constructor(t).find(e);if(r[1]){if(t=t instanceof S?t[0]:t,S.merge(this,S.parseHTML(r[1],t&&t.nodeType?t.ownerDocument||t:E,!0)),N.test(r[1])&&S.isPlainObject(t))for(r in t)m(this[r])?this[r](t[r]):this.attr(r,t[r]);return this}return(i=E.getElementById(r[2]))&&(this[0]=i,this.length=1),this}return e.nodeType?(this[0]=e,this.length=1,this):m(e)?void 0!==n.ready?n.ready(e):e(S):S.makeArray(e,this)}).prototype=S.fn,j=S(E);var L=/^(?:parents|prev(?:Until|All))/,H={children:!0,contents:!0,next:!0,prev:!0};function O(e,t){while((e=e[t])&&1!==e.nodeType);return e}S.fn.extend({has:function(e){var t=S(e,this),n=t.length;return this.filter(function(){for(var e=0;e\x20\t\r\n\f]*)/i,he=/^$|^module$|\/(?:java|ecma)script/i;ce=E.createDocumentFragment().appendChild(E.createElement("div")),(fe=E.createElement("input")).setAttribute("type","radio"),fe.setAttribute("checked","checked"),fe.setAttribute("name","t"),ce.appendChild(fe),y.checkClone=ce.cloneNode(!0).cloneNode(!0).lastChild.checked,ce.innerHTML="",y.noCloneChecked=!!ce.cloneNode(!0).lastChild.defaultValue,ce.innerHTML="",y.option=!!ce.lastChild;var ge={thead:[1,"","
"],col:[2,"","
"],tr:[2,"","
"],td:[3,"","
"],_default:[0,"",""]};function ve(e,t){var n;return n="undefined"!=typeof e.getElementsByTagName?e.getElementsByTagName(t||"*"):"undefined"!=typeof e.querySelectorAll?e.querySelectorAll(t||"*"):[],void 0===t||t&&A(e,t)?S.merge([e],n):n}function ye(e,t){for(var n=0,r=e.length;n",""]);var me=/<|&#?\w+;/;function xe(e,t,n,r,i){for(var o,a,s,u,l,c,f=t.createDocumentFragment(),p=[],d=0,h=e.length;d\s*$/g;function qe(e,t){return A(e,"table")&&A(11!==t.nodeType?t:t.firstChild,"tr")&&S(e).children("tbody")[0]||e}function Le(e){return e.type=(null!==e.getAttribute("type"))+"/"+e.type,e}function He(e){return"true/"===(e.type||"").slice(0,5)?e.type=e.type.slice(5):e.removeAttribute("type"),e}function Oe(e,t){var n,r,i,o,a,s;if(1===t.nodeType){if(Y.hasData(e)&&(s=Y.get(e).events))for(i in Y.remove(t,"handle events"),s)for(n=0,r=s[i].length;n").attr(n.scriptAttrs||{}).prop({charset:n.scriptCharset,src:n.url}).on("load error",i=function(e){r.remove(),i=null,e&&t("error"===e.type?404:200,e.type)}),E.head.appendChild(r[0])},abort:function(){i&&i()}}});var Ut,Xt=[],Vt=/(=)\?(?=&|$)|\?\?/;S.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var e=Xt.pop()||S.expando+"_"+Ct.guid++;return this[e]=!0,e}}),S.ajaxPrefilter("json jsonp",function(e,t,n){var r,i,o,a=!1!==e.jsonp&&(Vt.test(e.url)?"url":"string"==typeof e.data&&0===(e.contentType||"").indexOf("application/x-www-form-urlencoded")&&Vt.test(e.data)&&"data");if(a||"jsonp"===e.dataTypes[0])return r=e.jsonpCallback=m(e.jsonpCallback)?e.jsonpCallback():e.jsonpCallback,a?e[a]=e[a].replace(Vt,"$1"+r):!1!==e.jsonp&&(e.url+=(Et.test(e.url)?"&":"?")+e.jsonp+"="+r),e.converters["script json"]=function(){return o||S.error(r+" was not called"),o[0]},e.dataTypes[0]="json",i=C[r],C[r]=function(){o=arguments},n.always(function(){void 0===i?S(C).removeProp(r):C[r]=i,e[r]&&(e.jsonpCallback=t.jsonpCallback,Xt.push(r)),o&&m(i)&&i(o[0]),o=i=void 0}),"script"}),y.createHTMLDocument=((Ut=E.implementation.createHTMLDocument("").body).innerHTML="
",2===Ut.childNodes.length),S.parseHTML=function(e,t,n){return"string"!=typeof e?[]:("boolean"==typeof t&&(n=t,t=!1),t||(y.createHTMLDocument?((r=(t=E.implementation.createHTMLDocument("")).createElement("base")).href=E.location.href,t.head.appendChild(r)):t=E),o=!n&&[],(i=N.exec(e))?[t.createElement(i[1])]:(i=xe([e],t,o),o&&o.length&&S(o).remove(),S.merge([],i.childNodes)));var r,i,o},S.fn.load=function(e,t,n){var r,i,o,a=this,s=e.indexOf(" ");return-1").append(S.parseHTML(e)).find(r):e)}).always(n&&function(e,t){a.each(function(){n.apply(this,o||[e.responseText,t,e])})}),this},S.expr.pseudos.animated=function(t){return S.grep(S.timers,function(e){return t===e.elem}).length},S.offset={setOffset:function(e,t,n){var r,i,o,a,s,u,l=S.css(e,"position"),c=S(e),f={};"static"===l&&(e.style.position="relative"),s=c.offset(),o=S.css(e,"top"),u=S.css(e,"left"),("absolute"===l||"fixed"===l)&&-1<(o+u).indexOf("auto")?(a=(r=c.position()).top,i=r.left):(a=parseFloat(o)||0,i=parseFloat(u)||0),m(t)&&(t=t.call(e,n,S.extend({},s))),null!=t.top&&(f.top=t.top-s.top+a),null!=t.left&&(f.left=t.left-s.left+i),"using"in t?t.using.call(e,f):("number"==typeof f.top&&(f.top+="px"),"number"==typeof f.left&&(f.left+="px"),c.css(f))}},S.fn.extend({offset:function(t){if(arguments.length)return void 0===t?this:this.each(function(e){S.offset.setOffset(this,t,e)});var e,n,r=this[0];return r?r.getClientRects().length?(e=r.getBoundingClientRect(),n=r.ownerDocument.defaultView,{top:e.top+n.pageYOffset,left:e.left+n.pageXOffset}):{top:0,left:0}:void 0},position:function(){if(this[0]){var e,t,n,r=this[0],i={top:0,left:0};if("fixed"===S.css(r,"position"))t=r.getBoundingClientRect();else{t=this.offset(),n=r.ownerDocument,e=r.offsetParent||n.documentElement;while(e&&(e===n.body||e===n.documentElement)&&"static"===S.css(e,"position"))e=e.parentNode;e&&e!==r&&1===e.nodeType&&((i=S(e).offset()).top+=S.css(e,"borderTopWidth",!0),i.left+=S.css(e,"borderLeftWidth",!0))}return{top:t.top-i.top-S.css(r,"marginTop",!0),left:t.left-i.left-S.css(r,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var e=this.offsetParent;while(e&&"static"===S.css(e,"position"))e=e.offsetParent;return e||re})}}),S.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(t,i){var o="pageYOffset"===i;S.fn[t]=function(e){return $(this,function(e,t,n){var r;if(x(e)?r=e:9===e.nodeType&&(r=e.defaultView),void 0===n)return r?r[i]:e[t];r?r.scrollTo(o?r.pageXOffset:n,o?n:r.pageYOffset):e[t]=n},t,e,arguments.length)}}),S.each(["top","left"],function(e,n){S.cssHooks[n]=$e(y.pixelPosition,function(e,t){if(t)return t=Be(e,n),Me.test(t)?S(e).position()[n]+"px":t})}),S.each({Height:"height",Width:"width"},function(a,s){S.each({padding:"inner"+a,content:s,"":"outer"+a},function(r,o){S.fn[o]=function(e,t){var n=arguments.length&&(r||"boolean"!=typeof e),i=r||(!0===e||!0===t?"margin":"border");return $(this,function(e,t,n){var r;return x(e)?0===o.indexOf("outer")?e["inner"+a]:e.document.documentElement["client"+a]:9===e.nodeType?(r=e.documentElement,Math.max(e.body["scroll"+a],r["scroll"+a],e.body["offset"+a],r["offset"+a],r["client"+a])):void 0===n?S.css(e,t,i):S.style(e,t,n,i)},s,n?e:void 0,n)}})}),S.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(e,t){S.fn[t]=function(e){return this.on(t,e)}}),S.fn.extend({bind:function(e,t,n){return this.on(e,null,t,n)},unbind:function(e,t){return this.off(e,null,t)},delegate:function(e,t,n,r){return this.on(t,e,n,r)},undelegate:function(e,t,n){return 1===arguments.length?this.off(e,"**"):this.off(t,e||"**",n)},hover:function(e,t){return this.mouseenter(e).mouseleave(t||e)}}),S.each("blur focus focusin focusout resize scroll click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup contextmenu".split(" "),function(e,n){S.fn[n]=function(e,t){return 0+~]|"+M+")"+M+"*"),U=new RegExp(M+"|>"),X=new RegExp($),V=new RegExp("^"+I+"$"),G={ID:new RegExp("^#("+I+")"),CLASS:new RegExp("^\\.("+I+")"),TAG:new RegExp("^("+I+"|[*])"),ATTR:new RegExp("^"+W),PSEUDO:new RegExp("^"+$),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new RegExp("^(?:"+R+")$","i"),needsContext:new RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},Y=/HTML$/i,Q=/^(?:input|select|textarea|button)$/i,J=/^h\d$/i,K=/^[^{]+\{\s*\[native \w/,Z=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ee=/[+~]/,te=new RegExp("\\\\([\\da-f]{1,6}"+M+"?|("+M+")|.)","ig"),ne=function(e,t,n){var r="0x"+t-65536;return r!=r||n?t:r<0?String.fromCharCode(r+65536):String.fromCharCode(r>>10|55296,1023&r|56320)},re=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,ie=function(e,t){return t?"\0"===e?"\ufffd":e.slice(0,-1)+"\\"+e.charCodeAt(e.length-1).toString(16)+" ":"\\"+e},oe=function(){T()},ae=be(function(e){return!0===e.disabled&&"fieldset"===e.nodeName.toLowerCase()},{dir:"parentNode",next:"legend"});try{H.apply(t=O.call(m.childNodes),m.childNodes),t[m.childNodes.length].nodeType}catch(e){H={apply:t.length?function(e,t){L.apply(e,O.call(t))}:function(e,t){var n=e.length,r=0;while(e[n++]=t[r++]);e.length=n-1}}}function se(t,e,n,r){var i,o,a,s,u,l,c,f=e&&e.ownerDocument,p=e?e.nodeType:9;if(n=n||[],"string"!=typeof t||!t||1!==p&&9!==p&&11!==p)return n;if(!r&&((e?e.ownerDocument||e:m)!==C&&T(e),e=e||C,E)){if(11!==p&&(u=Z.exec(t)))if(i=u[1]){if(9===p){if(!(a=e.getElementById(i)))return n;if(a.id===i)return n.push(a),n}else if(f&&(a=f.getElementById(i))&&y(e,a)&&a.id===i)return n.push(a),n}else{if(u[2])return H.apply(n,e.getElementsByTagName(t)),n;if((i=u[3])&&d.getElementsByClassName&&e.getElementsByClassName)return H.apply(n,e.getElementsByClassName(i)),n}if(d.qsa&&!A[t+" "]&&(!v||!v.test(t))&&(1!==p||"object"!==e.nodeName.toLowerCase())){if(c=t,f=e,1===p&&U.test(t)){(s=e.getAttribute("id"))?s=s.replace(re,ie):e.setAttribute("id",s=k),o=(l=h(t)).length;while(o--)l[o]="#"+s+" "+xe(l[o]);c=l.join(","),f=ee.test(t)&&ye(e.parentNode)||e}try{return H.apply(n,f.querySelectorAll(c)),n}catch(e){A(t,!0)}finally{s===k&&e.removeAttribute("id")}}}return g(t.replace(B,"$1"),e,n,r)}function ue(){var r=[];return function e(t,n){return r.push(t+" ")>b.cacheLength&&delete e[r.shift()],e[t+" "]=n}}function le(e){return e[k]=!0,e}function ce(e){var t=C.createElement("fieldset");try{return!!e(t)}catch(e){return!1}finally{t.parentNode&&t.parentNode.removeChild(t),t=null}}function fe(e,t){var n=e.split("|"),r=n.length;while(r--)b.attrHandle[n[r]]=t}function pe(e,t){var n=t&&e,r=n&&1===e.nodeType&&1===t.nodeType&&e.sourceIndex-t.sourceIndex;if(r)return r;if(n)while(n=n.nextSibling)if(n===t)return-1;return e?1:-1}function de(t){return function(e){return"input"===e.nodeName.toLowerCase()&&e.type===t}}function he(n){return function(e){var t=e.nodeName.toLowerCase();return("input"===t||"button"===t)&&e.type===n}}function ge(t){return function(e){return"form"in e?e.parentNode&&!1===e.disabled?"label"in e?"label"in e.parentNode?e.parentNode.disabled===t:e.disabled===t:e.isDisabled===t||e.isDisabled!==!t&&ae(e)===t:e.disabled===t:"label"in e&&e.disabled===t}}function ve(a){return le(function(o){return o=+o,le(function(e,t){var n,r=a([],e.length,o),i=r.length;while(i--)e[n=r[i]]&&(e[n]=!(t[n]=e[n]))})})}function ye(e){return e&&"undefined"!=typeof e.getElementsByTagName&&e}for(e in d=se.support={},i=se.isXML=function(e){var t=e.namespaceURI,n=(e.ownerDocument||e).documentElement;return!Y.test(t||n&&n.nodeName||"HTML")},T=se.setDocument=function(e){var t,n,r=e?e.ownerDocument||e:m;return r!==C&&9===r.nodeType&&r.documentElement&&(a=(C=r).documentElement,E=!i(C),m!==C&&(n=C.defaultView)&&n.top!==n&&(n.addEventListener?n.addEventListener("unload",oe,!1):n.attachEvent&&n.attachEvent("onunload",oe)),d.attributes=ce(function(e){return e.className="i",!e.getAttribute("className")}),d.getElementsByTagName=ce(function(e){return e.appendChild(C.createComment("")),!e.getElementsByTagName("*").length}),d.getElementsByClassName=K.test(C.getElementsByClassName),d.getById=ce(function(e){return a.appendChild(e).id=k,!C.getElementsByName||!C.getElementsByName(k).length}),d.getById?(b.filter.ID=function(e){var t=e.replace(te,ne);return function(e){return e.getAttribute("id")===t}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n=t.getElementById(e);return n?[n]:[]}}):(b.filter.ID=function(e){var n=e.replace(te,ne);return function(e){var t="undefined"!=typeof e.getAttributeNode&&e.getAttributeNode("id");return t&&t.value===n}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n,r,i,o=t.getElementById(e);if(o){if((n=o.getAttributeNode("id"))&&n.value===e)return[o];i=t.getElementsByName(e),r=0;while(o=i[r++])if((n=o.getAttributeNode("id"))&&n.value===e)return[o]}return[]}}),b.find.TAG=d.getElementsByTagName?function(e,t){return"undefined"!=typeof t.getElementsByTagName?t.getElementsByTagName(e):d.qsa?t.querySelectorAll(e):void 0}:function(e,t){var n,r=[],i=0,o=t.getElementsByTagName(e);if("*"===e){while(n=o[i++])1===n.nodeType&&r.push(n);return r}return o},b.find.CLASS=d.getElementsByClassName&&function(e,t){if("undefined"!=typeof t.getElementsByClassName&&E)return t.getElementsByClassName(e)},s=[],v=[],(d.qsa=K.test(C.querySelectorAll))&&(ce(function(e){a.appendChild(e).innerHTML="",e.querySelectorAll("[msallowcapture^='']").length&&v.push("[*^$]="+M+"*(?:''|\"\")"),e.querySelectorAll("[selected]").length||v.push("\\["+M+"*(?:value|"+R+")"),e.querySelectorAll("[id~="+k+"-]").length||v.push("~="),e.querySelectorAll(":checked").length||v.push(":checked"),e.querySelectorAll("a#"+k+"+*").length||v.push(".#.+[+~]")}),ce(function(e){e.innerHTML="";var t=C.createElement("input");t.setAttribute("type","hidden"),e.appendChild(t).setAttribute("name","D"),e.querySelectorAll("[name=d]").length&&v.push("name"+M+"*[*^$|!~]?="),2!==e.querySelectorAll(":enabled").length&&v.push(":enabled",":disabled"),a.appendChild(e).disabled=!0,2!==e.querySelectorAll(":disabled").length&&v.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),v.push(",.*:")})),(d.matchesSelector=K.test(c=a.matches||a.webkitMatchesSelector||a.mozMatchesSelector||a.oMatchesSelector||a.msMatchesSelector))&&ce(function(e){d.disconnectedMatch=c.call(e,"*"),c.call(e,"[s!='']:x"),s.push("!=",$)}),v=v.length&&new RegExp(v.join("|")),s=s.length&&new RegExp(s.join("|")),t=K.test(a.compareDocumentPosition),y=t||K.test(a.contains)?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},D=t?function(e,t){if(e===t)return l=!0,0;var n=!e.compareDocumentPosition-!t.compareDocumentPosition;return n||(1&(n=(e.ownerDocument||e)===(t.ownerDocument||t)?e.compareDocumentPosition(t):1)||!d.sortDetached&&t.compareDocumentPosition(e)===n?e===C||e.ownerDocument===m&&y(m,e)?-1:t===C||t.ownerDocument===m&&y(m,t)?1:u?P(u,e)-P(u,t):0:4&n?-1:1)}:function(e,t){if(e===t)return l=!0,0;var n,r=0,i=e.parentNode,o=t.parentNode,a=[e],s=[t];if(!i||!o)return e===C?-1:t===C?1:i?-1:o?1:u?P(u,e)-P(u,t):0;if(i===o)return pe(e,t);n=e;while(n=n.parentNode)a.unshift(n);n=t;while(n=n.parentNode)s.unshift(n);while(a[r]===s[r])r++;return r?pe(a[r],s[r]):a[r]===m?-1:s[r]===m?1:0}),C},se.matches=function(e,t){return se(e,null,null,t)},se.matchesSelector=function(e,t){if((e.ownerDocument||e)!==C&&T(e),d.matchesSelector&&E&&!A[t+" "]&&(!s||!s.test(t))&&(!v||!v.test(t)))try{var n=c.call(e,t);if(n||d.disconnectedMatch||e.document&&11!==e.document.nodeType)return n}catch(e){A(t,!0)}return 0":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(te,ne),e[3]=(e[3]||e[4]||e[5]||"").replace(te,ne),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||se.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&se.error(e[0]),e},PSEUDO:function(e){var t,n=!e[6]&&e[2];return G.CHILD.test(e[0])?null:(e[3]?e[2]=e[4]||e[5]||"":n&&X.test(n)&&(t=h(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(te,ne).toLowerCase();return"*"===e?function(){return!0}:function(e){return e.nodeName&&e.nodeName.toLowerCase()===t}},CLASS:function(e){var t=p[e+" "];return t||(t=new RegExp("(^|"+M+")"+e+"("+M+"|$)"))&&p(e,function(e){return t.test("string"==typeof e.className&&e.className||"undefined"!=typeof e.getAttribute&&e.getAttribute("class")||"")})},ATTR:function(n,r,i){return function(e){var t=se.attr(e,n);return null==t?"!="===r:!r||(t+="","="===r?t===i:"!="===r?t!==i:"^="===r?i&&0===t.indexOf(i):"*="===r?i&&-1:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i;function j(e,n,r){return m(n)?k.grep(e,function(e,t){return!!n.call(e,t,e)!==r}):n.nodeType?k.grep(e,function(e){return e===n!==r}):"string"!=typeof n?k.grep(e,function(e){return-1)[^>]*|#([\w-]+))$/;(k.fn.init=function(e,t,n){var r,i;if(!e)return this;if(n=n||q,"string"==typeof e){if(!(r="<"===e[0]&&">"===e[e.length-1]&&3<=e.length?[null,e,null]:L.exec(e))||!r[1]&&t)return!t||t.jquery?(t||n).find(e):this.constructor(t).find(e);if(r[1]){if(t=t instanceof k?t[0]:t,k.merge(this,k.parseHTML(r[1],t&&t.nodeType?t.ownerDocument||t:E,!0)),D.test(r[1])&&k.isPlainObject(t))for(r in t)m(this[r])?this[r](t[r]):this.attr(r,t[r]);return this}return(i=E.getElementById(r[2]))&&(this[0]=i,this.length=1),this}return e.nodeType?(this[0]=e,this.length=1,this):m(e)?void 0!==n.ready?n.ready(e):e(k):k.makeArray(e,this)}).prototype=k.fn,q=k(E);var H=/^(?:parents|prev(?:Until|All))/,O={children:!0,contents:!0,next:!0,prev:!0};function P(e,t){while((e=e[t])&&1!==e.nodeType);return e}k.fn.extend({has:function(e){var t=k(e,this),n=t.length;return this.filter(function(){for(var e=0;e\x20\t\r\n\f]*)/i,he=/^$|^module$|\/(?:java|ecma)script/i,ge={option:[1,""],thead:[1,"","
"],col:[2,"","
"],tr:[2,"","
"],td:[3,"","
"],_default:[0,"",""]};function ve(e,t){var n;return n="undefined"!=typeof e.getElementsByTagName?e.getElementsByTagName(t||"*"):"undefined"!=typeof e.querySelectorAll?e.querySelectorAll(t||"*"):[],void 0===t||t&&A(e,t)?k.merge([e],n):n}function ye(e,t){for(var n=0,r=e.length;nx",y.noCloneChecked=!!me.cloneNode(!0).lastChild.defaultValue;var Te=/^key/,Ce=/^(?:mouse|pointer|contextmenu|drag|drop)|click/,Ee=/^([^.]*)(?:\.(.+)|)/;function ke(){return!0}function Se(){return!1}function Ne(e,t){return e===function(){try{return E.activeElement}catch(e){}}()==("focus"===t)}function Ae(e,t,n,r,i,o){var a,s;if("object"==typeof t){for(s in"string"!=typeof n&&(r=r||n,n=void 0),t)Ae(e,s,n,r,t[s],o);return e}if(null==r&&null==i?(i=n,r=n=void 0):null==i&&("string"==typeof n?(i=r,r=void 0):(i=r,r=n,n=void 0)),!1===i)i=Se;else if(!i)return e;return 1===o&&(a=i,(i=function(e){return k().off(e),a.apply(this,arguments)}).guid=a.guid||(a.guid=k.guid++)),e.each(function(){k.event.add(this,t,i,r,n)})}function De(e,i,o){o?(Q.set(e,i,!1),k.event.add(e,i,{namespace:!1,handler:function(e){var t,n,r=Q.get(this,i);if(1&e.isTrigger&&this[i]){if(r.length)(k.event.special[i]||{}).delegateType&&e.stopPropagation();else if(r=s.call(arguments),Q.set(this,i,r),t=o(this,i),this[i](),r!==(n=Q.get(this,i))||t?Q.set(this,i,!1):n={},r!==n)return e.stopImmediatePropagation(),e.preventDefault(),n.value}else r.length&&(Q.set(this,i,{value:k.event.trigger(k.extend(r[0],k.Event.prototype),r.slice(1),this)}),e.stopImmediatePropagation())}})):void 0===Q.get(e,i)&&k.event.add(e,i,ke)}k.event={global:{},add:function(t,e,n,r,i){var o,a,s,u,l,c,f,p,d,h,g,v=Q.get(t);if(v){n.handler&&(n=(o=n).handler,i=o.selector),i&&k.find.matchesSelector(ie,i),n.guid||(n.guid=k.guid++),(u=v.events)||(u=v.events={}),(a=v.handle)||(a=v.handle=function(e){return"undefined"!=typeof k&&k.event.triggered!==e.type?k.event.dispatch.apply(t,arguments):void 0}),l=(e=(e||"").match(R)||[""]).length;while(l--)d=g=(s=Ee.exec(e[l])||[])[1],h=(s[2]||"").split(".").sort(),d&&(f=k.event.special[d]||{},d=(i?f.delegateType:f.bindType)||d,f=k.event.special[d]||{},c=k.extend({type:d,origType:g,data:r,handler:n,guid:n.guid,selector:i,needsContext:i&&k.expr.match.needsContext.test(i),namespace:h.join(".")},o),(p=u[d])||((p=u[d]=[]).delegateCount=0,f.setup&&!1!==f.setup.call(t,r,h,a)||t.addEventListener&&t.addEventListener(d,a)),f.add&&(f.add.call(t,c),c.handler.guid||(c.handler.guid=n.guid)),i?p.splice(p.delegateCount++,0,c):p.push(c),k.event.global[d]=!0)}},remove:function(e,t,n,r,i){var o,a,s,u,l,c,f,p,d,h,g,v=Q.hasData(e)&&Q.get(e);if(v&&(u=v.events)){l=(t=(t||"").match(R)||[""]).length;while(l--)if(d=g=(s=Ee.exec(t[l])||[])[1],h=(s[2]||"").split(".").sort(),d){f=k.event.special[d]||{},p=u[d=(r?f.delegateType:f.bindType)||d]||[],s=s[2]&&new RegExp("(^|\\.)"+h.join("\\.(?:.*\\.|)")+"(\\.|$)"),a=o=p.length;while(o--)c=p[o],!i&&g!==c.origType||n&&n.guid!==c.guid||s&&!s.test(c.namespace)||r&&r!==c.selector&&("**"!==r||!c.selector)||(p.splice(o,1),c.selector&&p.delegateCount--,f.remove&&f.remove.call(e,c));a&&!p.length&&(f.teardown&&!1!==f.teardown.call(e,h,v.handle)||k.removeEvent(e,d,v.handle),delete u[d])}else for(d in u)k.event.remove(e,d+t[l],n,r,!0);k.isEmptyObject(u)&&Q.remove(e,"handle events")}},dispatch:function(e){var t,n,r,i,o,a,s=k.event.fix(e),u=new Array(arguments.length),l=(Q.get(this,"events")||{})[s.type]||[],c=k.event.special[s.type]||{};for(u[0]=s,t=1;t\x20\t\r\n\f]*)[^>]*)\/>/gi,qe=/\s*$/g;function Oe(e,t){return A(e,"table")&&A(11!==t.nodeType?t:t.firstChild,"tr")&&k(e).children("tbody")[0]||e}function Pe(e){return e.type=(null!==e.getAttribute("type"))+"/"+e.type,e}function Re(e){return"true/"===(e.type||"").slice(0,5)?e.type=e.type.slice(5):e.removeAttribute("type"),e}function Me(e,t){var n,r,i,o,a,s,u,l;if(1===t.nodeType){if(Q.hasData(e)&&(o=Q.access(e),a=Q.set(t,o),l=o.events))for(i in delete a.handle,a.events={},l)for(n=0,r=l[i].length;n")},clone:function(e,t,n){var r,i,o,a,s,u,l,c=e.cloneNode(!0),f=oe(e);if(!(y.noCloneChecked||1!==e.nodeType&&11!==e.nodeType||k.isXMLDoc(e)))for(a=ve(c),r=0,i=(o=ve(e)).length;r").attr(n.scriptAttrs||{}).prop({charset:n.scriptCharset,src:n.url}).on("load error",i=function(e){r.remove(),i=null,e&&t("error"===e.type?404:200,e.type)}),E.head.appendChild(r[0])},abort:function(){i&&i()}}});var Vt,Gt=[],Yt=/(=)\?(?=&|$)|\?\?/;k.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var e=Gt.pop()||k.expando+"_"+kt++;return this[e]=!0,e}}),k.ajaxPrefilter("json jsonp",function(e,t,n){var r,i,o,a=!1!==e.jsonp&&(Yt.test(e.url)?"url":"string"==typeof e.data&&0===(e.contentType||"").indexOf("application/x-www-form-urlencoded")&&Yt.test(e.data)&&"data");if(a||"jsonp"===e.dataTypes[0])return r=e.jsonpCallback=m(e.jsonpCallback)?e.jsonpCallback():e.jsonpCallback,a?e[a]=e[a].replace(Yt,"$1"+r):!1!==e.jsonp&&(e.url+=(St.test(e.url)?"&":"?")+e.jsonp+"="+r),e.converters["script json"]=function(){return o||k.error(r+" was not called"),o[0]},e.dataTypes[0]="json",i=C[r],C[r]=function(){o=arguments},n.always(function(){void 0===i?k(C).removeProp(r):C[r]=i,e[r]&&(e.jsonpCallback=t.jsonpCallback,Gt.push(r)),o&&m(i)&&i(o[0]),o=i=void 0}),"script"}),y.createHTMLDocument=((Vt=E.implementation.createHTMLDocument("").body).innerHTML="
",2===Vt.childNodes.length),k.parseHTML=function(e,t,n){return"string"!=typeof e?[]:("boolean"==typeof t&&(n=t,t=!1),t||(y.createHTMLDocument?((r=(t=E.implementation.createHTMLDocument("")).createElement("base")).href=E.location.href,t.head.appendChild(r)):t=E),o=!n&&[],(i=D.exec(e))?[t.createElement(i[1])]:(i=we([e],t,o),o&&o.length&&k(o).remove(),k.merge([],i.childNodes)));var r,i,o},k.fn.load=function(e,t,n){var r,i,o,a=this,s=e.indexOf(" ");return-1").append(k.parseHTML(e)).find(r):e)}).always(n&&function(e,t){a.each(function(){n.apply(this,o||[e.responseText,t,e])})}),this},k.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(e,t){k.fn[t]=function(e){return this.on(t,e)}}),k.expr.pseudos.animated=function(t){return k.grep(k.timers,function(e){return t===e.elem}).length},k.offset={setOffset:function(e,t,n){var r,i,o,a,s,u,l=k.css(e,"position"),c=k(e),f={};"static"===l&&(e.style.position="relative"),s=c.offset(),o=k.css(e,"top"),u=k.css(e,"left"),("absolute"===l||"fixed"===l)&&-1<(o+u).indexOf("auto")?(a=(r=c.position()).top,i=r.left):(a=parseFloat(o)||0,i=parseFloat(u)||0),m(t)&&(t=t.call(e,n,k.extend({},s))),null!=t.top&&(f.top=t.top-s.top+a),null!=t.left&&(f.left=t.left-s.left+i),"using"in t?t.using.call(e,f):c.css(f)}},k.fn.extend({offset:function(t){if(arguments.length)return void 0===t?this:this.each(function(e){k.offset.setOffset(this,t,e)});var e,n,r=this[0];return r?r.getClientRects().length?(e=r.getBoundingClientRect(),n=r.ownerDocument.defaultView,{top:e.top+n.pageYOffset,left:e.left+n.pageXOffset}):{top:0,left:0}:void 0},position:function(){if(this[0]){var e,t,n,r=this[0],i={top:0,left:0};if("fixed"===k.css(r,"position"))t=r.getBoundingClientRect();else{t=this.offset(),n=r.ownerDocument,e=r.offsetParent||n.documentElement;while(e&&(e===n.body||e===n.documentElement)&&"static"===k.css(e,"position"))e=e.parentNode;e&&e!==r&&1===e.nodeType&&((i=k(e).offset()).top+=k.css(e,"borderTopWidth",!0),i.left+=k.css(e,"borderLeftWidth",!0))}return{top:t.top-i.top-k.css(r,"marginTop",!0),left:t.left-i.left-k.css(r,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var e=this.offsetParent;while(e&&"static"===k.css(e,"position"))e=e.offsetParent;return e||ie})}}),k.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(t,i){var o="pageYOffset"===i;k.fn[t]=function(e){return _(this,function(e,t,n){var r;if(x(e)?r=e:9===e.nodeType&&(r=e.defaultView),void 0===n)return r?r[i]:e[t];r?r.scrollTo(o?r.pageXOffset:n,o?n:r.pageYOffset):e[t]=n},t,e,arguments.length)}}),k.each(["top","left"],function(e,n){k.cssHooks[n]=ze(y.pixelPosition,function(e,t){if(t)return t=_e(e,n),$e.test(t)?k(e).position()[n]+"px":t})}),k.each({Height:"height",Width:"width"},function(a,s){k.each({padding:"inner"+a,content:s,"":"outer"+a},function(r,o){k.fn[o]=function(e,t){var n=arguments.length&&(r||"boolean"!=typeof e),i=r||(!0===e||!0===t?"margin":"border");return _(this,function(e,t,n){var r;return x(e)?0===o.indexOf("outer")?e["inner"+a]:e.document.documentElement["client"+a]:9===e.nodeType?(r=e.documentElement,Math.max(e.body["scroll"+a],r["scroll"+a],e.body["offset"+a],r["offset"+a],r["client"+a])):void 0===n?k.css(e,t,i):k.style(e,t,n,i)},s,n?e:void 0,n)}})}),k.each("blur focus focusin focusout resize scroll click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup contextmenu".split(" "),function(e,n){k.fn[n]=function(e,t){return 0+~]|"+M+")"+M+"*"),U=new RegExp(M+"|>"),X=new RegExp(F),V=new RegExp("^"+I+"$"),G={ID:new RegExp("^#("+I+")"),CLASS:new RegExp("^\\.("+I+")"),TAG:new RegExp("^("+I+"|[*])"),ATTR:new RegExp("^"+W),PSEUDO:new RegExp("^"+F),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new RegExp("^(?:"+R+")$","i"),needsContext:new RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},Y=/HTML$/i,Q=/^(?:input|select|textarea|button)$/i,J=/^h\d$/i,K=/^[^{]+\{\s*\[native \w/,Z=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ee=/[+~]/,te=new RegExp("\\\\[\\da-fA-F]{1,6}"+M+"?|\\\\([^\\r\\n\\f])","g"),ne=function(e,t){var n="0x"+e.slice(1)-65536;return t||(n<0?String.fromCharCode(n+65536):String.fromCharCode(n>>10|55296,1023&n|56320))},re=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,ie=function(e,t){return t?"\0"===e?"\ufffd":e.slice(0,-1)+"\\"+e.charCodeAt(e.length-1).toString(16)+" ":"\\"+e},oe=function(){T()},ae=be(function(e){return!0===e.disabled&&"fieldset"===e.nodeName.toLowerCase()},{dir:"parentNode",next:"legend"});try{H.apply(t=O.call(p.childNodes),p.childNodes),t[p.childNodes.length].nodeType}catch(e){H={apply:t.length?function(e,t){L.apply(e,O.call(t))}:function(e,t){var n=e.length,r=0;while(e[n++]=t[r++]);e.length=n-1}}}function se(t,e,n,r){var i,o,a,s,u,l,c,f=e&&e.ownerDocument,p=e?e.nodeType:9;if(n=n||[],"string"!=typeof t||!t||1!==p&&9!==p&&11!==p)return n;if(!r&&(T(e),e=e||C,E)){if(11!==p&&(u=Z.exec(t)))if(i=u[1]){if(9===p){if(!(a=e.getElementById(i)))return n;if(a.id===i)return n.push(a),n}else if(f&&(a=f.getElementById(i))&&y(e,a)&&a.id===i)return n.push(a),n}else{if(u[2])return H.apply(n,e.getElementsByTagName(t)),n;if((i=u[3])&&d.getElementsByClassName&&e.getElementsByClassName)return H.apply(n,e.getElementsByClassName(i)),n}if(d.qsa&&!N[t+" "]&&(!v||!v.test(t))&&(1!==p||"object"!==e.nodeName.toLowerCase())){if(c=t,f=e,1===p&&(U.test(t)||z.test(t))){(f=ee.test(t)&&ye(e.parentNode)||e)===e&&d.scope||((s=e.getAttribute("id"))?s=s.replace(re,ie):e.setAttribute("id",s=S)),o=(l=h(t)).length;while(o--)l[o]=(s?"#"+s:":scope")+" "+xe(l[o]);c=l.join(",")}try{return H.apply(n,f.querySelectorAll(c)),n}catch(e){N(t,!0)}finally{s===S&&e.removeAttribute("id")}}}return g(t.replace($,"$1"),e,n,r)}function ue(){var r=[];return function e(t,n){return r.push(t+" ")>b.cacheLength&&delete e[r.shift()],e[t+" "]=n}}function le(e){return e[S]=!0,e}function ce(e){var t=C.createElement("fieldset");try{return!!e(t)}catch(e){return!1}finally{t.parentNode&&t.parentNode.removeChild(t),t=null}}function fe(e,t){var n=e.split("|"),r=n.length;while(r--)b.attrHandle[n[r]]=t}function pe(e,t){var n=t&&e,r=n&&1===e.nodeType&&1===t.nodeType&&e.sourceIndex-t.sourceIndex;if(r)return r;if(n)while(n=n.nextSibling)if(n===t)return-1;return e?1:-1}function de(t){return function(e){return"input"===e.nodeName.toLowerCase()&&e.type===t}}function he(n){return function(e){var t=e.nodeName.toLowerCase();return("input"===t||"button"===t)&&e.type===n}}function ge(t){return function(e){return"form"in e?e.parentNode&&!1===e.disabled?"label"in e?"label"in e.parentNode?e.parentNode.disabled===t:e.disabled===t:e.isDisabled===t||e.isDisabled!==!t&&ae(e)===t:e.disabled===t:"label"in e&&e.disabled===t}}function ve(a){return le(function(o){return o=+o,le(function(e,t){var n,r=a([],e.length,o),i=r.length;while(i--)e[n=r[i]]&&(e[n]=!(t[n]=e[n]))})})}function ye(e){return e&&"undefined"!=typeof e.getElementsByTagName&&e}for(e in d=se.support={},i=se.isXML=function(e){var t=e.namespaceURI,n=(e.ownerDocument||e).documentElement;return!Y.test(t||n&&n.nodeName||"HTML")},T=se.setDocument=function(e){var t,n,r=e?e.ownerDocument||e:p;return r!=C&&9===r.nodeType&&r.documentElement&&(a=(C=r).documentElement,E=!i(C),p!=C&&(n=C.defaultView)&&n.top!==n&&(n.addEventListener?n.addEventListener("unload",oe,!1):n.attachEvent&&n.attachEvent("onunload",oe)),d.scope=ce(function(e){return a.appendChild(e).appendChild(C.createElement("div")),"undefined"!=typeof e.querySelectorAll&&!e.querySelectorAll(":scope fieldset div").length}),d.attributes=ce(function(e){return e.className="i",!e.getAttribute("className")}),d.getElementsByTagName=ce(function(e){return e.appendChild(C.createComment("")),!e.getElementsByTagName("*").length}),d.getElementsByClassName=K.test(C.getElementsByClassName),d.getById=ce(function(e){return a.appendChild(e).id=S,!C.getElementsByName||!C.getElementsByName(S).length}),d.getById?(b.filter.ID=function(e){var t=e.replace(te,ne);return function(e){return e.getAttribute("id")===t}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n=t.getElementById(e);return n?[n]:[]}}):(b.filter.ID=function(e){var n=e.replace(te,ne);return function(e){var t="undefined"!=typeof e.getAttributeNode&&e.getAttributeNode("id");return t&&t.value===n}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n,r,i,o=t.getElementById(e);if(o){if((n=o.getAttributeNode("id"))&&n.value===e)return[o];i=t.getElementsByName(e),r=0;while(o=i[r++])if((n=o.getAttributeNode("id"))&&n.value===e)return[o]}return[]}}),b.find.TAG=d.getElementsByTagName?function(e,t){return"undefined"!=typeof t.getElementsByTagName?t.getElementsByTagName(e):d.qsa?t.querySelectorAll(e):void 0}:function(e,t){var n,r=[],i=0,o=t.getElementsByTagName(e);if("*"===e){while(n=o[i++])1===n.nodeType&&r.push(n);return r}return o},b.find.CLASS=d.getElementsByClassName&&function(e,t){if("undefined"!=typeof t.getElementsByClassName&&E)return t.getElementsByClassName(e)},s=[],v=[],(d.qsa=K.test(C.querySelectorAll))&&(ce(function(e){var t;a.appendChild(e).innerHTML="",e.querySelectorAll("[msallowcapture^='']").length&&v.push("[*^$]="+M+"*(?:''|\"\")"),e.querySelectorAll("[selected]").length||v.push("\\["+M+"*(?:value|"+R+")"),e.querySelectorAll("[id~="+S+"-]").length||v.push("~="),(t=C.createElement("input")).setAttribute("name",""),e.appendChild(t),e.querySelectorAll("[name='']").length||v.push("\\["+M+"*name"+M+"*="+M+"*(?:''|\"\")"),e.querySelectorAll(":checked").length||v.push(":checked"),e.querySelectorAll("a#"+S+"+*").length||v.push(".#.+[+~]"),e.querySelectorAll("\\\f"),v.push("[\\r\\n\\f]")}),ce(function(e){e.innerHTML="";var t=C.createElement("input");t.setAttribute("type","hidden"),e.appendChild(t).setAttribute("name","D"),e.querySelectorAll("[name=d]").length&&v.push("name"+M+"*[*^$|!~]?="),2!==e.querySelectorAll(":enabled").length&&v.push(":enabled",":disabled"),a.appendChild(e).disabled=!0,2!==e.querySelectorAll(":disabled").length&&v.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),v.push(",.*:")})),(d.matchesSelector=K.test(c=a.matches||a.webkitMatchesSelector||a.mozMatchesSelector||a.oMatchesSelector||a.msMatchesSelector))&&ce(function(e){d.disconnectedMatch=c.call(e,"*"),c.call(e,"[s!='']:x"),s.push("!=",F)}),v=v.length&&new RegExp(v.join("|")),s=s.length&&new RegExp(s.join("|")),t=K.test(a.compareDocumentPosition),y=t||K.test(a.contains)?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},D=t?function(e,t){if(e===t)return l=!0,0;var n=!e.compareDocumentPosition-!t.compareDocumentPosition;return n||(1&(n=(e.ownerDocument||e)==(t.ownerDocument||t)?e.compareDocumentPosition(t):1)||!d.sortDetached&&t.compareDocumentPosition(e)===n?e==C||e.ownerDocument==p&&y(p,e)?-1:t==C||t.ownerDocument==p&&y(p,t)?1:u?P(u,e)-P(u,t):0:4&n?-1:1)}:function(e,t){if(e===t)return l=!0,0;var n,r=0,i=e.parentNode,o=t.parentNode,a=[e],s=[t];if(!i||!o)return e==C?-1:t==C?1:i?-1:o?1:u?P(u,e)-P(u,t):0;if(i===o)return pe(e,t);n=e;while(n=n.parentNode)a.unshift(n);n=t;while(n=n.parentNode)s.unshift(n);while(a[r]===s[r])r++;return r?pe(a[r],s[r]):a[r]==p?-1:s[r]==p?1:0}),C},se.matches=function(e,t){return se(e,null,null,t)},se.matchesSelector=function(e,t){if(T(e),d.matchesSelector&&E&&!N[t+" "]&&(!s||!s.test(t))&&(!v||!v.test(t)))try{var n=c.call(e,t);if(n||d.disconnectedMatch||e.document&&11!==e.document.nodeType)return n}catch(e){N(t,!0)}return 0":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(te,ne),e[3]=(e[3]||e[4]||e[5]||"").replace(te,ne),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||se.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&se.error(e[0]),e},PSEUDO:function(e){var t,n=!e[6]&&e[2];return G.CHILD.test(e[0])?null:(e[3]?e[2]=e[4]||e[5]||"":n&&X.test(n)&&(t=h(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(te,ne).toLowerCase();return"*"===e?function(){return!0}:function(e){return e.nodeName&&e.nodeName.toLowerCase()===t}},CLASS:function(e){var t=m[e+" "];return t||(t=new RegExp("(^|"+M+")"+e+"("+M+"|$)"))&&m(e,function(e){return t.test("string"==typeof e.className&&e.className||"undefined"!=typeof e.getAttribute&&e.getAttribute("class")||"")})},ATTR:function(n,r,i){return function(e){var t=se.attr(e,n);return null==t?"!="===r:!r||(t+="","="===r?t===i:"!="===r?t!==i:"^="===r?i&&0===t.indexOf(i):"*="===r?i&&-1:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i;function D(e,n,r){return m(n)?S.grep(e,function(e,t){return!!n.call(e,t,e)!==r}):n.nodeType?S.grep(e,function(e){return e===n!==r}):"string"!=typeof n?S.grep(e,function(e){return-1)[^>]*|#([\w-]+))$/;(S.fn.init=function(e,t,n){var r,i;if(!e)return this;if(n=n||j,"string"==typeof e){if(!(r="<"===e[0]&&">"===e[e.length-1]&&3<=e.length?[null,e,null]:q.exec(e))||!r[1]&&t)return!t||t.jquery?(t||n).find(e):this.constructor(t).find(e);if(r[1]){if(t=t instanceof S?t[0]:t,S.merge(this,S.parseHTML(r[1],t&&t.nodeType?t.ownerDocument||t:E,!0)),N.test(r[1])&&S.isPlainObject(t))for(r in t)m(this[r])?this[r](t[r]):this.attr(r,t[r]);return this}return(i=E.getElementById(r[2]))&&(this[0]=i,this.length=1),this}return e.nodeType?(this[0]=e,this.length=1,this):m(e)?void 0!==n.ready?n.ready(e):e(S):S.makeArray(e,this)}).prototype=S.fn,j=S(E);var L=/^(?:parents|prev(?:Until|All))/,H={children:!0,contents:!0,next:!0,prev:!0};function O(e,t){while((e=e[t])&&1!==e.nodeType);return e}S.fn.extend({has:function(e){var t=S(e,this),n=t.length;return this.filter(function(){for(var e=0;e\x20\t\r\n\f]*)/i,he=/^$|^module$|\/(?:java|ecma)script/i;ce=E.createDocumentFragment().appendChild(E.createElement("div")),(fe=E.createElement("input")).setAttribute("type","radio"),fe.setAttribute("checked","checked"),fe.setAttribute("name","t"),ce.appendChild(fe),y.checkClone=ce.cloneNode(!0).cloneNode(!0).lastChild.checked,ce.innerHTML="",y.noCloneChecked=!!ce.cloneNode(!0).lastChild.defaultValue,ce.innerHTML="",y.option=!!ce.lastChild;var ge={thead:[1,"","
"],col:[2,"","
"],tr:[2,"","
"],td:[3,"","
"],_default:[0,"",""]};function ve(e,t){var n;return n="undefined"!=typeof e.getElementsByTagName?e.getElementsByTagName(t||"*"):"undefined"!=typeof e.querySelectorAll?e.querySelectorAll(t||"*"):[],void 0===t||t&&A(e,t)?S.merge([e],n):n}function ye(e,t){for(var n=0,r=e.length;n",""]);var me=/<|&#?\w+;/;function xe(e,t,n,r,i){for(var o,a,s,u,l,c,f=t.createDocumentFragment(),p=[],d=0,h=e.length;d\s*$/g;function qe(e,t){return A(e,"table")&&A(11!==t.nodeType?t:t.firstChild,"tr")&&S(e).children("tbody")[0]||e}function Le(e){return e.type=(null!==e.getAttribute("type"))+"/"+e.type,e}function He(e){return"true/"===(e.type||"").slice(0,5)?e.type=e.type.slice(5):e.removeAttribute("type"),e}function Oe(e,t){var n,r,i,o,a,s;if(1===t.nodeType){if(Y.hasData(e)&&(s=Y.get(e).events))for(i in Y.remove(t,"handle events"),s)for(n=0,r=s[i].length;n").attr(n.scriptAttrs||{}).prop({charset:n.scriptCharset,src:n.url}).on("load error",i=function(e){r.remove(),i=null,e&&t("error"===e.type?404:200,e.type)}),E.head.appendChild(r[0])},abort:function(){i&&i()}}});var Ut,Xt=[],Vt=/(=)\?(?=&|$)|\?\?/;S.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var e=Xt.pop()||S.expando+"_"+Ct.guid++;return this[e]=!0,e}}),S.ajaxPrefilter("json jsonp",function(e,t,n){var r,i,o,a=!1!==e.jsonp&&(Vt.test(e.url)?"url":"string"==typeof e.data&&0===(e.contentType||"").indexOf("application/x-www-form-urlencoded")&&Vt.test(e.data)&&"data");if(a||"jsonp"===e.dataTypes[0])return r=e.jsonpCallback=m(e.jsonpCallback)?e.jsonpCallback():e.jsonpCallback,a?e[a]=e[a].replace(Vt,"$1"+r):!1!==e.jsonp&&(e.url+=(Et.test(e.url)?"&":"?")+e.jsonp+"="+r),e.converters["script json"]=function(){return o||S.error(r+" was not called"),o[0]},e.dataTypes[0]="json",i=C[r],C[r]=function(){o=arguments},n.always(function(){void 0===i?S(C).removeProp(r):C[r]=i,e[r]&&(e.jsonpCallback=t.jsonpCallback,Xt.push(r)),o&&m(i)&&i(o[0]),o=i=void 0}),"script"}),y.createHTMLDocument=((Ut=E.implementation.createHTMLDocument("").body).innerHTML="
",2===Ut.childNodes.length),S.parseHTML=function(e,t,n){return"string"!=typeof e?[]:("boolean"==typeof t&&(n=t,t=!1),t||(y.createHTMLDocument?((r=(t=E.implementation.createHTMLDocument("")).createElement("base")).href=E.location.href,t.head.appendChild(r)):t=E),o=!n&&[],(i=N.exec(e))?[t.createElement(i[1])]:(i=xe([e],t,o),o&&o.length&&S(o).remove(),S.merge([],i.childNodes)));var r,i,o},S.fn.load=function(e,t,n){var r,i,o,a=this,s=e.indexOf(" ");return-1").append(S.parseHTML(e)).find(r):e)}).always(n&&function(e,t){a.each(function(){n.apply(this,o||[e.responseText,t,e])})}),this},S.expr.pseudos.animated=function(t){return S.grep(S.timers,function(e){return t===e.elem}).length},S.offset={setOffset:function(e,t,n){var r,i,o,a,s,u,l=S.css(e,"position"),c=S(e),f={};"static"===l&&(e.style.position="relative"),s=c.offset(),o=S.css(e,"top"),u=S.css(e,"left"),("absolute"===l||"fixed"===l)&&-1<(o+u).indexOf("auto")?(a=(r=c.position()).top,i=r.left):(a=parseFloat(o)||0,i=parseFloat(u)||0),m(t)&&(t=t.call(e,n,S.extend({},s))),null!=t.top&&(f.top=t.top-s.top+a),null!=t.left&&(f.left=t.left-s.left+i),"using"in t?t.using.call(e,f):("number"==typeof f.top&&(f.top+="px"),"number"==typeof f.left&&(f.left+="px"),c.css(f))}},S.fn.extend({offset:function(t){if(arguments.length)return void 0===t?this:this.each(function(e){S.offset.setOffset(this,t,e)});var e,n,r=this[0];return r?r.getClientRects().length?(e=r.getBoundingClientRect(),n=r.ownerDocument.defaultView,{top:e.top+n.pageYOffset,left:e.left+n.pageXOffset}):{top:0,left:0}:void 0},position:function(){if(this[0]){var e,t,n,r=this[0],i={top:0,left:0};if("fixed"===S.css(r,"position"))t=r.getBoundingClientRect();else{t=this.offset(),n=r.ownerDocument,e=r.offsetParent||n.documentElement;while(e&&(e===n.body||e===n.documentElement)&&"static"===S.css(e,"position"))e=e.parentNode;e&&e!==r&&1===e.nodeType&&((i=S(e).offset()).top+=S.css(e,"borderTopWidth",!0),i.left+=S.css(e,"borderLeftWidth",!0))}return{top:t.top-i.top-S.css(r,"marginTop",!0),left:t.left-i.left-S.css(r,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var e=this.offsetParent;while(e&&"static"===S.css(e,"position"))e=e.offsetParent;return e||re})}}),S.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(t,i){var o="pageYOffset"===i;S.fn[t]=function(e){return $(this,function(e,t,n){var r;if(x(e)?r=e:9===e.nodeType&&(r=e.defaultView),void 0===n)return r?r[i]:e[t];r?r.scrollTo(o?r.pageXOffset:n,o?n:r.pageYOffset):e[t]=n},t,e,arguments.length)}}),S.each(["top","left"],function(e,n){S.cssHooks[n]=$e(y.pixelPosition,function(e,t){if(t)return t=Be(e,n),Me.test(t)?S(e).position()[n]+"px":t})}),S.each({Height:"height",Width:"width"},function(a,s){S.each({padding:"inner"+a,content:s,"":"outer"+a},function(r,o){S.fn[o]=function(e,t){var n=arguments.length&&(r||"boolean"!=typeof e),i=r||(!0===e||!0===t?"margin":"border");return $(this,function(e,t,n){var r;return x(e)?0===o.indexOf("outer")?e["inner"+a]:e.document.documentElement["client"+a]:9===e.nodeType?(r=e.documentElement,Math.max(e.body["scroll"+a],r["scroll"+a],e.body["offset"+a],r["offset"+a],r["client"+a])):void 0===n?S.css(e,t,i):S.style(e,t,n,i)},s,n?e:void 0,n)}})}),S.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(e,t){S.fn[t]=function(e){return this.on(t,e)}}),S.fn.extend({bind:function(e,t,n){return this.on(e,null,t,n)},unbind:function(e,t){return this.off(e,null,t)},delegate:function(e,t,n,r){return this.on(t,e,n,r)},undelegate:function(e,t,n){return 1===arguments.length?this.off(e,"**"):this.off(t,e||"**",n)},hover:function(e,t){return this.mouseenter(e).mouseleave(t||e)}}),S.each("blur focus focusin focusout resize scroll click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup contextmenu".split(" "),function(e,n){S.fn[n]=function(e,t){return 0 src/main/resources/webapps/static/nvd3-1.8.5.min.css src/main/resources/webapps/static/nvd3-1.8.5.min.js.map src/main/resources/webapps/static/nvd3-1.8.5.min.js - src/main/resources/webapps/static/jquery-3.4.1.min.js + src/main/resources/webapps/static/jquery-3.5.1.min.js src/main/resources/webapps/static/bootstrap-3.4.1/** src/test/resources/additionalfields.container src/test/resources/incorrect.checksum.container diff --git a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/index.html b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/index.html index d5868563d71d..5122caea53c3 100644 --- a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/index.html +++ b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/index.html @@ -63,7 +63,7 @@ - + diff --git a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt index 54c8fe2ce206..df63c6e7debb 100644 --- a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt +++ b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt @@ -424,7 +424,7 @@ bootstrap.min.js angular-route-1.7.9.min.js angular-nvd3-1.0.9.min.js angular-1.7.9.min.js -jquery-3.4.1.min.js +jquery-3.5.1.min.js -------------------------------------------------------------------------------- recon server uses a huge number of javascript and css dependencies. See the diff --git a/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/index.html b/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/index.html index 5fb6e44f83ee..8a5297b40f75 100644 --- a/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/index.html +++ b/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/index.html @@ -57,7 +57,7 @@ - + diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml index 9455fd28f954..1b28ea45790e 100644 --- a/hadoop-ozone/pom.xml +++ b/hadoop-ozone/pom.xml @@ -291,7 +291,7 @@ webapps/static/angular-route-1.7.9.min.js webapps/static/bootstrap-3.4.1/** webapps/static/d3-3.5.17.min.js - webapps/static/jquery-3.4.1.min.js + webapps/static/jquery-3.5.1.min.js webapps/static/jquery.dataTables.min.js webapps/static/nvd3-1.8.5.min.css.map webapps/static/nvd3-1.8.5.min.css diff --git a/hadoop-ozone/s3gateway/src/main/resources/browser.html b/hadoop-ozone/s3gateway/src/main/resources/browser.html index 0405b17e90b5..4e6a00bfec9d 100644 --- a/hadoop-ozone/s3gateway/src/main/resources/browser.html +++ b/hadoop-ozone/s3gateway/src/main/resources/browser.html @@ -138,7 +138,7 @@ - + diff --git a/hadoop-ozone/s3gateway/src/main/resources/webapps/static/index.html b/hadoop-ozone/s3gateway/src/main/resources/webapps/static/index.html index b20bf3530da0..81158f1a1306 100644 --- a/hadoop-ozone/s3gateway/src/main/resources/webapps/static/index.html +++ b/hadoop-ozone/s3gateway/src/main/resources/webapps/static/index.html @@ -75,7 +75,7 @@

S3 gateway

- + From 6498ff77a8f0b9b737bd6de49b873d6dfe46e272 Mon Sep 17 00:00:00 2001 From: runzhiwang <51938049+runzhiwang@users.noreply.github.com> Date: Mon, 13 Jul 2020 14:41:46 +0800 Subject: [PATCH 007/165] HDDS-3941. Enable core dump when crash in C++ (#1186) --- hadoop-hdds/common/src/main/conf/hadoop-env.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/hadoop-hdds/common/src/main/conf/hadoop-env.sh b/hadoop-hdds/common/src/main/conf/hadoop-env.sh index e43cd95b047e..51ee585ba5f5 100644 --- a/hadoop-hdds/common/src/main/conf/hadoop-env.sh +++ b/hadoop-hdds/common/src/main/conf/hadoop-env.sh @@ -30,6 +30,9 @@ ## {YARN_xyz|HDFS_xyz} > HADOOP_xyz > hard-coded defaults ## +# Enable core dump when crash in C++ +ulimit -c unlimited + # Many of the options here are built from the perspective that users # may want to provide OVERWRITING values on the command line. # For example: From 093ee131f4fe3a007d21720b3daa09e8e83e21f2 Mon Sep 17 00:00:00 2001 From: Sammi Chen Date: Mon, 13 Jul 2020 14:51:52 +0800 Subject: [PATCH 008/165] =?UTF-8?q?HDDS-3920.=20Too=20many=20redudant=20re?= =?UTF-8?q?plications=20due=20to=20fail=20to=20get=20node's=20a=E2=80=A6?= =?UTF-8?q?=20(#1163)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../hadoop/hdds/protocol/DatanodeDetails.java | 1 + .../hdds/scm/block/DeletedBlockLogImpl.java | 2 +- .../scm/container/ContainerReportHandler.java | 9 ++- .../IncrementalContainerReportHandler.java | 13 +++- .../hadoop/hdds/scm/node/SCMNodeManager.java | 63 +++++++++++-------- ...TestIncrementalContainerReportHandler.java | 36 ++++++++++- .../hdds/scm/node/TestNodeReportHandler.java | 5 +- .../hdds/scm/node/TestSCMNodeManager.java | 2 + .../ozone/scm/node/TestSCMNodeMetrics.java | 5 +- .../ozone/om/TestContainerReportWithKeys.java | 10 +++ ...econIncrementalContainerReportHandler.java | 12 +++- ...econIncrementalContainerReportHandler.java | 30 +++++++-- 12 files changed, 140 insertions(+), 48 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java index 7fa77654ee7b..1b6a2141e0f0 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java @@ -78,6 +78,7 @@ public DatanodeDetails(DatanodeDetails datanodeDetails) { this.hostName = datanodeDetails.hostName; this.ports = datanodeDetails.ports; this.setNetworkName(datanodeDetails.getNetworkName()); + this.setParent(datanodeDetails.getParent()); } /** diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java index 08639baaac1e..bf0317431b02 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java @@ -132,7 +132,7 @@ public void incrementCount(List txIDs) throws IOException { if (block == null) { // Should we make this an error ? How can we not find the deleted // TXID? - LOG.warn("Deleted TXID not found."); + LOG.warn("Deleted TXID {} not found.", txID); continue; } DeletedBlocksTransaction.Builder builder = block.toBuilder(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java index 1b0b81f41eb2..8432e29ddbb8 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java @@ -102,8 +102,15 @@ public ContainerReportHandler(final NodeManager nodeManager, public void onMessage(final ContainerReportFromDatanode reportFromDatanode, final EventPublisher publisher) { - final DatanodeDetails datanodeDetails = + final DatanodeDetails dnFromReport = reportFromDatanode.getDatanodeDetails(); + DatanodeDetails datanodeDetails = + nodeManager.getNodeByUuid(dnFromReport.getUuidString()); + if (datanodeDetails == null) { + LOG.warn("Received container report from unknown datanode {}", + dnFromReport); + return; + } final ContainerReportsProto containerReport = reportFromDatanode.getReport(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java index 017cc5c06225..c2148df17dc2 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java @@ -54,16 +54,23 @@ public IncrementalContainerReportHandler( @Override public void onMessage(final IncrementalContainerReportFromDatanode report, final EventPublisher publisher) { + final DatanodeDetails dnFromReport = report.getDatanodeDetails(); if (LOG.isDebugEnabled()) { LOG.debug("Processing incremental container report from data node {}", - report.getDatanodeDetails().getUuid()); + dnFromReport.getUuid()); + } + DatanodeDetails dd = + nodeManager.getNodeByUuid(dnFromReport.getUuidString()); + if (dd == null) { + LOG.warn("Received container report from unknown datanode {}", + dnFromReport); + return; } boolean success = true; for (ContainerReplicaProto replicaProto : report.getReport().getReportList()) { try { - final DatanodeDetails dd = report.getDatanodeDetails(); final ContainerID id = ContainerID.valueof( replicaProto.getContainerID()); if (!replicaProto.getState().equals( @@ -81,7 +88,7 @@ public void onMessage(final IncrementalContainerReportFromDatanode report, } catch (IOException e) { success = false; LOG.error("Exception while processing ICR for container {}", - replicaProto.getContainerID()); + replicaProto.getContainerID(), e); } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java index 5a3851a2d87a..005881c01175 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java @@ -241,34 +241,43 @@ public RegisteredCommand register( DatanodeDetails datanodeDetails, NodeReportProto nodeReport, PipelineReportsProto pipelineReportsProto) { - InetAddress dnAddress = Server.getRemoteIp(); - if (dnAddress != null) { - // Mostly called inside an RPC, update ip and peer hostname - datanodeDetails.setHostName(dnAddress.getHostName()); - datanodeDetails.setIpAddress(dnAddress.getHostAddress()); - } - try { - String dnsName; - String networkLocation; - datanodeDetails.setNetworkName(datanodeDetails.getUuidString()); - if (useHostname) { - dnsName = datanodeDetails.getHostName(); - } else { - dnsName = datanodeDetails.getIpAddress(); - } - networkLocation = nodeResolve(dnsName); - if (networkLocation != null) { - datanodeDetails.setNetworkLocation(networkLocation); + if (!isNodeRegistered(datanodeDetails)) { + InetAddress dnAddress = Server.getRemoteIp(); + if (dnAddress != null) { + // Mostly called inside an RPC, update ip and peer hostname + datanodeDetails.setHostName(dnAddress.getHostName()); + datanodeDetails.setIpAddress(dnAddress.getHostAddress()); } - nodeStateManager.addNode(datanodeDetails); - clusterMap.add(datanodeDetails); - addEntryTodnsToUuidMap(dnsName, datanodeDetails.getUuidString()); - // Updating Node Report, as registration is successful - processNodeReport(datanodeDetails, nodeReport); - LOG.info("Registered Data node : {}", datanodeDetails); - } catch (NodeAlreadyExistsException e) { - if (LOG.isTraceEnabled()) { - LOG.trace("Datanode is already registered. Datanode: {}", + try { + String dnsName; + String networkLocation; + datanodeDetails.setNetworkName(datanodeDetails.getUuidString()); + if (useHostname) { + dnsName = datanodeDetails.getHostName(); + } else { + dnsName = datanodeDetails.getIpAddress(); + } + networkLocation = nodeResolve(dnsName); + if (networkLocation != null) { + datanodeDetails.setNetworkLocation(networkLocation); + } + + clusterMap.add(datanodeDetails); + nodeStateManager.addNode(datanodeDetails); + // Check that datanode in nodeStateManager has topology parent set + DatanodeDetails dn = nodeStateManager.getNode(datanodeDetails); + Preconditions.checkState(dn.getParent() != null); + addEntryTodnsToUuidMap(dnsName, datanodeDetails.getUuidString()); + // Updating Node Report, as registration is successful + processNodeReport(datanodeDetails, nodeReport); + LOG.info("Registered Data node : {}", datanodeDetails); + } catch (NodeAlreadyExistsException e) { + if (LOG.isTraceEnabled()) { + LOG.trace("Datanode is already registered. Datanode: {}", + datanodeDetails.toString()); + } + } catch (NodeNotFoundException e) { + LOG.error("Cannot find datanode {} from nodeStateManager", datanodeDetails.toString()); } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java index efa333dd85e2..1af2f732a342 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdds.scm.container; -import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -26,10 +26,16 @@ .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto; +import org.apache.hadoop.hdds.scm.net.NetworkTopology; +import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl; import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.hdds.scm.node.SCMNodeManager; import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher .IncrementalContainerReportFromDatanode; +import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; import org.apache.hadoop.hdds.server.events.EventPublisher; +import org.apache.hadoop.hdds.server.events.EventQueue; +import org.apache.hadoop.test.GenericTestUtils; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -37,7 +43,10 @@ import org.mockito.Mockito; import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; import java.util.Set; +import java.util.UUID; import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; import static org.apache.hadoop.hdds.scm.TestUtils.getContainer; @@ -55,9 +64,18 @@ public class TestIncrementalContainerReportHandler { @Before public void setup() throws IOException { - final ConfigurationSource conf = new OzoneConfiguration(); + final OzoneConfiguration conf = new OzoneConfiguration(); + final String path = + GenericTestUtils.getTempPath(UUID.randomUUID().toString()); + Path scmPath = Paths.get(path, "scm-meta"); + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); this.containerManager = Mockito.mock(ContainerManager.class); - this.nodeManager = Mockito.mock(NodeManager.class); + NetworkTopology clusterMap = new NetworkTopologyImpl(conf); + EventQueue eventQueue = new EventQueue(); + SCMStorageConfig storageConfig = new SCMStorageConfig(conf); + this.nodeManager = + new SCMNodeManager(conf, storageConfig, eventQueue, clusterMap); + this.containerStateManager = new ContainerStateManager(conf); this.publisher = Mockito.mock(EventPublisher.class); @@ -105,6 +123,9 @@ public void testClosingToClosed() throws IOException { final DatanodeDetails datanodeOne = randomDatanodeDetails(); final DatanodeDetails datanodeTwo = randomDatanodeDetails(); final DatanodeDetails datanodeThree = randomDatanodeDetails(); + nodeManager.register(datanodeOne, null, null); + nodeManager.register(datanodeTwo, null, null); + nodeManager.register(datanodeThree, null, null); final Set containerReplicas = getReplicas( container.containerID(), ContainerReplicaProto.State.CLOSING, @@ -139,6 +160,9 @@ public void testClosingToQuasiClosed() throws IOException { final DatanodeDetails datanodeOne = randomDatanodeDetails(); final DatanodeDetails datanodeTwo = randomDatanodeDetails(); final DatanodeDetails datanodeThree = randomDatanodeDetails(); + nodeManager.register(datanodeOne, null, null); + nodeManager.register(datanodeTwo, null, null); + nodeManager.register(datanodeThree, null, null); final Set containerReplicas = getReplicas( container.containerID(), ContainerReplicaProto.State.CLOSING, @@ -174,6 +198,9 @@ public void testQuasiClosedToClosed() throws IOException { final DatanodeDetails datanodeOne = randomDatanodeDetails(); final DatanodeDetails datanodeTwo = randomDatanodeDetails(); final DatanodeDetails datanodeThree = randomDatanodeDetails(); + nodeManager.register(datanodeOne, null, null); + nodeManager.register(datanodeTwo, null, null); + nodeManager.register(datanodeThree, null, null); final Set containerReplicas = getReplicas( container.containerID(), ContainerReplicaProto.State.CLOSING, @@ -212,6 +239,9 @@ public void testDeleteContainer() throws IOException { final DatanodeDetails datanodeOne = randomDatanodeDetails(); final DatanodeDetails datanodeTwo = randomDatanodeDetails(); final DatanodeDetails datanodeThree = randomDatanodeDetails(); + nodeManager.register(datanodeOne, null, null); + nodeManager.register(datanodeTwo, null, null); + nodeManager.register(datanodeThree, null, null); final Set containerReplicas = getReplicas( container.containerID(), ContainerReplicaProto.State.CLOSED, diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java index 7b4d841fe76f..69b031c552f1 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hdds.scm.TestUtils; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; import org.apache.hadoop.hdds.scm.net.NetworkTopology; +import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl; import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.NodeReportFromDatanode; import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; import org.apache.hadoop.hdds.server.events.Event; @@ -56,9 +57,9 @@ public void resetEventCollector() throws IOException { OzoneConfiguration conf = new OzoneConfiguration(); SCMStorageConfig storageConfig = Mockito.mock(SCMStorageConfig.class); Mockito.when(storageConfig.getClusterID()).thenReturn("cluster1"); + NetworkTopology clusterMap = new NetworkTopologyImpl(conf); nodeManager = - new SCMNodeManager(conf, storageConfig, new EventQueue(), Mockito.mock( - NetworkTopology.class)); + new SCMNodeManager(conf, storageConfig, new EventQueue(), clusterMap); nodeReportHandler = new NodeReportHandler(nodeManager); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java index b167a38b7254..df5cb2de2550 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java @@ -1133,6 +1133,8 @@ public void testScmRegisterNodeWith4LayerNetworkTopology() List nodeList = nodeManager.getAllNodes(); nodeList.stream().forEach(node -> Assert.assertTrue(node.getNetworkLocation().startsWith("/rack1/ng"))); + nodeList.stream().forEach(node -> + Assert.assertTrue(node.getParent() != null)); } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java index 3625e3475d4a..7576e8babd45 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java @@ -30,7 +30,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto; import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.net.NetworkTopology; +import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl; import org.apache.hadoop.hdds.scm.node.SCMNodeManager; import org.apache.hadoop.hdds.scm.node.SCMNodeMetrics; import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; @@ -45,7 +45,6 @@ import static org.junit.Assert.assertEquals; import org.junit.BeforeClass; import org.junit.Test; -import org.mockito.Mockito; /** * Test cases to verify the metrics exposed by SCMNodeManager. @@ -64,7 +63,7 @@ public static void setup() throws Exception { SCMStorageConfig config = new SCMStorageConfig(NodeType.DATANODE, new File("/tmp"), "storage"); nodeManager = new SCMNodeManager(source, config, publisher, - Mockito.mock(NetworkTopology.class)); + new NetworkTopologyImpl(source)); registeredDatanode = DatanodeDetails.newBuilder() .setHostName("localhost") diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java index 8984e76ea92b..7f049a3f6585 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java @@ -22,7 +22,9 @@ import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.ContainerReplica; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.client.*; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; @@ -32,6 +34,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.junit.AfterClass; +import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.Test; @@ -42,6 +45,7 @@ import java.io.IOException; import java.util.HashMap; +import java.util.Set; /** * This class tests container report with DN container state info. @@ -122,6 +126,12 @@ public void testContainerReportKeyWrite() throws Exception { ContainerInfo cinfo = scm.getContainerInfo(keyInfo.getContainerID()); + Set replicas = + scm.getContainerManager().getContainerReplicas( + new ContainerID(keyInfo.getContainerID())); + Assert.assertTrue(replicas.size() == 1); + replicas.stream().forEach(rp -> + Assert.assertTrue(rp.getDatanodeDetails().getParent() != null)); LOG.info("SCM Container Info keyCount: {} usedBytes: {}", cinfo.getNumberOfKeys(), cinfo.getUsedBytes()); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java index a5d946e7f3db..b538caf4b26a 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java @@ -50,9 +50,18 @@ public ReconIncrementalContainerReportHandler(NodeManager nodeManager, @Override public void onMessage(final IncrementalContainerReportFromDatanode report, final EventPublisher publisher) { + final DatanodeDetails dnFromReport = report.getDatanodeDetails(); if (LOG.isDebugEnabled()) { LOG.debug("Processing incremental container report from data node {}", - report.getDatanodeDetails()); + dnFromReport); + } + + DatanodeDetails dd = + getNodeManager().getNodeByUuid(dnFromReport.getUuidString()); + if (dd == null) { + LOG.warn("Received container report from unknown datanode {}", + dnFromReport); + return; } ReconContainerManager containerManager = @@ -61,7 +70,6 @@ public void onMessage(final IncrementalContainerReportFromDatanode report, for (ContainerReplicaProto replicaProto : report.getReport().getReportList()) { try { - final DatanodeDetails dd = report.getDatanodeDetails(); final ContainerID id = ContainerID.valueof( replicaProto.getContainerID()); try { diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java index d4f28c01fec5..dacf29381779 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java @@ -22,21 +22,30 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.UUID; +import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto; import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.net.NetworkTopology; +import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl; import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.hdds.scm.node.SCMNodeManager; import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.IncrementalContainerReportFromDatanode; +import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; import org.apache.hadoop.hdds.server.events.EventPublisher; +import org.apache.hadoop.hdds.server.events.EventQueue; +import org.apache.hadoop.test.GenericTestUtils; import org.junit.Test; /** @@ -59,17 +68,26 @@ public void testProcessICR() throws IOException, NodeNotFoundException { datanodeDetails.getUuidString()); when(reportMock.getReport()).thenReturn(containerReport); - NodeManager nodeManagerMock = mock(NodeManager.class); + final String path = + GenericTestUtils.getTempPath(UUID.randomUUID().toString()); + Path scmPath = Paths.get(path, "scm-meta"); + final OzoneConfiguration conf = new OzoneConfiguration(); + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); + NetworkTopology clusterMap = new NetworkTopologyImpl(conf); + EventQueue eventQueue = new EventQueue(); + SCMStorageConfig storageConfig = new SCMStorageConfig(conf); + NodeManager nodeManager = + new SCMNodeManager(conf, storageConfig, eventQueue, clusterMap); + nodeManager.register(datanodeDetails, null, null); ReconContainerManager containerManager = getContainerManager(); ReconIncrementalContainerReportHandler reconIcr = - new ReconIncrementalContainerReportHandler(nodeManagerMock, + new ReconIncrementalContainerReportHandler(nodeManager, containerManager); EventPublisher eventPublisherMock = mock(EventPublisher.class); reconIcr.onMessage(reportMock, eventPublisherMock); - verify(nodeManagerMock, times(1)) - .addContainer(datanodeDetails, containerID); + nodeManager.addContainer(datanodeDetails, containerID); assertTrue(containerManager.exists(containerID)); assertEquals(1, containerManager.getContainerReplicas(containerID).size()); } From 28c352db8a77faa26ad5fcaed55a402c85458f8a Mon Sep 17 00:00:00 2001 From: maobaolong <307499405@qq.com> Date: Tue, 14 Jul 2020 01:51:29 +0800 Subject: [PATCH 009/165] HDDS-3951. Rename the num.write.chunk.thread key. (#1187) --- .../org/apache/hadoop/hdds/scm/ScmConfigKeys.java | 8 +++++--- .../java/org/apache/hadoop/ozone/OzoneConfigKeys.java | 11 +++++++---- .../common/src/main/resources/ozone-default.xml | 2 +- .../transport/server/ratis/XceiverServerRatis.java | 6 ++++-- .../apache/hadoop/ozone/MiniOzoneChaosCluster.java | 3 ++- .../src/test/resources/ozone-site.xml | 2 +- 6 files changed, 20 insertions(+), 12 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index 2efd82b62977..ddb988ae7e1e 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -44,9 +44,11 @@ public final class ScmConfigKeys { = "dfs.container.ratis.rpc.type"; public static final String DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT = "GRPC"; - public static final String DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_KEY - = "dfs.container.ratis.num.write.chunk.threads"; - public static final int DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_DEFAULT + public static final String + DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME + = "dfs.container.ratis.num.write.chunk.threads.per.volume"; + public static final int + DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT = 10; public static final String DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY = "dfs.container.ratis.replication.level"; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java index 212931142f83..dc8b231fbbdc 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java @@ -239,10 +239,13 @@ public final class OzoneConfigKeys { = ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY; public static final String DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT = ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT; - public static final String DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_KEY; - public static final int DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_DEFAULT - = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_DEFAULT; + public static final String + DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY + = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME; + public static final int + DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT + = ScmConfigKeys. + DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT; public static final String DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY = ScmConfigKeys.DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY; public static final ReplicationLevel diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index c72dad0cee93..d8fc591a1795 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -196,7 +196,7 @@
- dfs.container.ratis.num.write.chunk.threads + dfs.container.ratis.num.write.chunk.threads.per.volume 10 OZONE, RATIS, PERFORMANCE Maximum number of threads in the thread pool that Datanode diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java index 53fa2d8e5263..6c98e81f1c43 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java @@ -782,8 +782,10 @@ private static List createChunkExecutors( ConfigurationSource conf) { // TODO create single pool with N threads if using non-incremental chunks final int threadCountPerDisk = conf.getInt( - OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_KEY, - OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_DEFAULT); + OzoneConfigKeys + .DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY, + OzoneConfigKeys + .DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT); final int numberOfDisks = MutableVolumeSet.getDatanodeStorageDirs(conf).size(); diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java index 2a5cf24603e3..3267976f7670 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java @@ -225,7 +225,8 @@ protected void initializeConfiguration() throws IOException { conf.setTimeDuration(HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL, 1, TimeUnit.SECONDS); conf.setInt( - OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_KEY, + OzoneConfigKeys + .DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY, 4); conf.setInt( OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY, diff --git a/hadoop-ozone/integration-test/src/test/resources/ozone-site.xml b/hadoop-ozone/integration-test/src/test/resources/ozone-site.xml index 6d5dee1e3b16..0c5ae1fa8845 100644 --- a/hadoop-ozone/integration-test/src/test/resources/ozone-site.xml +++ b/hadoop-ozone/integration-test/src/test/resources/ozone-site.xml @@ -27,7 +27,7 @@ - dfs.container.ratis.num.write.chunk.threads + dfs.container.ratis.num.write.chunk.threads.per.volume 4 From b89978c731472a8226c641b40edf63aefd22e218 Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Mon, 13 Jul 2020 17:30:10 -0700 Subject: [PATCH 010/165] HDDS-3685. Remove replay logic from actual request logic. (#1082) --- .../ozone/om/exceptions/OMException.java | 2 - .../om/exceptions/OMReplayException.java | 50 -------- .../src/main/proto/OmClientProtocol.proto | 3 - .../src/main/proto/proto.lock | 4 - .../om/ratis/OzoneManagerStateMachine.java | 8 -- .../ozone/om/request/OMClientRequest.java | 39 +----- .../request/bucket/OMBucketCreateRequest.java | 26 +--- .../request/bucket/OMBucketDeleteRequest.java | 26 +--- .../bucket/OMBucketSetPropertyRequest.java | 12 +- .../bucket/acl/OMBucketAclRequest.java | 11 -- .../file/OMDirectoryCreateRequest.java | 47 ++----- .../om/request/file/OMFileCreateRequest.java | 48 ++----- .../request/key/OMAllocateBlockRequest.java | 50 ++------ .../om/request/key/OMKeyCommitRequest.java | 74 ++--------- .../om/request/key/OMKeyCreateRequest.java | 51 ++------ .../om/request/key/OMKeyDeleteRequest.java | 34 +---- .../om/request/key/OMKeyPurgeRequest.java | 97 +------------- .../om/request/key/OMKeyRenameRequest.java | 120 ++++-------------- .../om/request/key/OMKeysDeleteRequest.java | 37 ++---- .../om/request/key/acl/OMKeyAclRequest.java | 24 +--- .../request/key/acl/OMKeyAddAclRequest.java | 6 - .../key/acl/OMKeyRemoveAclRequest.java | 6 - .../request/key/acl/OMKeySetAclRequest.java | 6 - .../key/acl/prefix/OMPrefixAclRequest.java | 30 +---- .../key/acl/prefix/OMPrefixAddAclRequest.java | 6 - .../acl/prefix/OMPrefixRemoveAclRequest.java | 6 - .../key/acl/prefix/OMPrefixSetAclRequest.java | 6 - .../S3InitiateMultipartUploadRequest.java | 5 - .../S3MultipartUploadAbortRequest.java | 20 --- .../S3MultipartUploadCommitPartRequest.java | 34 ++--- .../S3MultipartUploadCompleteRequest.java | 82 ++---------- .../request/volume/OMVolumeCreateRequest.java | 23 +--- .../request/volume/OMVolumeDeleteRequest.java | 10 -- .../volume/OMVolumeSetOwnerRequest.java | 57 +++------ .../volume/OMVolumeSetQuotaRequest.java | 20 +-- .../volume/acl/OMVolumeAclRequest.java | 52 +++----- .../volume/acl/OMVolumeAddAclRequest.java | 6 - .../volume/acl/OMVolumeRemoveAclRequest.java | 6 - .../volume/acl/OMVolumeSetAclRequest.java | 6 - .../ozone/om/response/OMClientResponse.java | 2 +- .../bucket/OMBucketCreateResponse.java | 2 +- .../bucket/OMBucketDeleteResponse.java | 2 +- .../bucket/OMBucketSetPropertyResponse.java | 2 +- .../bucket/acl/OMBucketAclResponse.java | 2 +- .../file/OMDirectoryCreateResponse.java | 44 +++---- .../response/file/OMFileCreateResponse.java | 4 +- .../response/key/OMAllocateBlockResponse.java | 2 +- .../om/response/key/OMKeyCommitResponse.java | 24 +--- .../om/response/key/OMKeyCreateResponse.java | 2 +- .../om/response/key/OMKeyDeleteResponse.java | 46 +++---- .../om/response/key/OMKeyPurgeResponse.java | 11 +- .../om/response/key/OMKeyRenameResponse.java | 59 ++------- .../om/response/key/acl/OMKeyAclResponse.java | 2 +- .../key/acl/prefix/OMPrefixAclResponse.java | 2 +- .../S3InitiateMultipartUploadResponse.java | 2 +- .../S3MultipartUploadAbortResponse.java | 2 +- .../S3MultipartUploadCommitPartResponse.java | 40 ++---- .../S3MultipartUploadCompleteResponse.java | 48 +++---- .../volume/OMVolumeAclOpResponse.java | 2 +- .../volume/OMVolumeCreateResponse.java | 2 +- .../volume/OMVolumeDeleteResponse.java | 2 +- .../volume/OMVolumeSetOwnerResponse.java | 4 +- .../volume/OMVolumeSetQuotaResponse.java | 2 +- .../bucket/TestOMBucketCreateRequest.java | 28 ---- .../bucket/TestOMBucketDeleteRequest.java | 42 ------ .../TestOMBucketSetPropertyRequest.java | 29 ----- .../request/file/TestOMFileCreateRequest.java | 29 ----- .../om/request/key/TestOMKeyAclRequest.java | 9 +- .../request/key/TestOMKeyCommitRequest.java | 79 ------------ .../request/key/TestOMKeyCreateRequest.java | 42 ------ .../request/key/TestOMKeyDeleteRequest.java | 37 ------ .../key/TestOMKeyPurgeRequestAndResponse.java | 117 ----------------- .../request/key/TestOMKeyRenameRequest.java | 102 --------------- .../request/key/TestOMPrefixAclRequest.java | 11 +- .../volume/TestOMVolumeCreateRequest.java | 26 ---- .../volume/TestOMVolumeDeleteRequest.java | 38 ------ .../volume/TestOMVolumeSetOwnerRequest.java | 31 ----- .../volume/TestOMVolumeSetQuotaRequest.java | 30 ----- .../volume/acl/TestOMVolumeAddAclRequest.java | 33 ----- .../acl/TestOMVolumeRemoveAclRequest.java | 44 ------- .../volume/acl/TestOMVolumeSetAclRequest.java | 35 ----- .../file/TestOMDirectoryCreateResponse.java | 5 +- .../response/key/TestOMKeyDeleteResponse.java | 2 +- 83 files changed, 282 insertions(+), 1947 deletions(-) delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMReplayException.java diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java index 58d5a02e1f30..1eed619dc321 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java @@ -222,7 +222,5 @@ public enum ResultCodes { DIRECTORY_ALREADY_EXISTS, INVALID_VOLUME_NAME, - - REPLAY // When ratis logs are replayed. } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMReplayException.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMReplayException.java deleted file mode 100644 index 0eeb873a58f8..000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMReplayException.java +++ /dev/null @@ -1,50 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.exceptions; - -import java.io.IOException; - -/** - * Exception thrown by Ozone Manager when a transaction is replayed. This - * exception should not be thrown to client. It is used in - * OMClientRequest#validateAndUpdateCache to log error and continue in case - * of replay transaction. - */ -public class OMReplayException extends IOException { - - private final boolean needsDBOperations; - - public OMReplayException() { - this(false); - } - - /** - * When the transaction is a replay but still needs some DB operations to - * be performed (such as cleanup of old keys). - * @param needsDBOperations - */ - public OMReplayException(boolean needsDBOperations) { - // Dummy message. This exception is not thrown to client. - super("Replayed transaction"); - this.needsDBOperations = needsDBOperations; - } - - public boolean isDBOperationNeeded() { - return needsDBOperations; - } -} \ No newline at end of file diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index ba193c70d90c..f4cf79a6c567 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -302,9 +302,6 @@ enum Status { DIRECTORY_ALREADY_EXISTS = 60; INVALID_VOLUME_NAME = 61; - - // When transactions are replayed - REPLAY = 100; } /** diff --git a/hadoop-ozone/interface-client/src/main/proto/proto.lock b/hadoop-ozone/interface-client/src/main/proto/proto.lock index 2d90e1cd442a..f591ad1b2da6 100644 --- a/hadoop-ozone/interface-client/src/main/proto/proto.lock +++ b/hadoop-ozone/interface-client/src/main/proto/proto.lock @@ -415,10 +415,6 @@ { "name": "INVALID_VOLUME_NAME", "integer": 61 - }, - { - "name": "REPLAY", - "integer": 100 } ] }, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java index 3aff87a3941b..c042fcb7eedd 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java @@ -19,7 +19,6 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; import com.google.common.util.concurrent.ThreadFactoryBuilder; import com.google.protobuf.ServiceException; import java.io.IOException; @@ -64,7 +63,6 @@ import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.INTERNAL_ERROR; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.METADATA_ERROR; -import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.REPLAY; /** * The OM StateMachine is the state machine for OM Ratis server. It is @@ -258,12 +256,6 @@ public CompletableFuture applyTransaction(TransactionContext trx) { terminate(omResponse, OMException.ResultCodes.INTERNAL_ERROR); } else if (omResponse.getStatus() == METADATA_ERROR) { terminate(omResponse, OMException.ResultCodes.METADATA_ERROR); - } else if (omResponse.getStatus() == REPLAY) { - // For replay we do not add response to double buffer, so update - // LastAppliedIndex for the replay transactions here. - computeAndUpdateLastAppliedIndex(trxLogIndex, - trx.getLogEntry().getTerm(), Lists.newArrayList(trxLogIndex), - true); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java index 03531440e5bf..3ce059fcb6ef 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java @@ -37,7 +37,6 @@ import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.WithObjectID; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; @@ -54,7 +53,6 @@ import javax.annotation.Nonnull; -import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.REPLAY; /** * OMClientRequest provides methods which every write OM request should @@ -71,8 +69,6 @@ public abstract class OMClientRequest implements RequestAuditor { public enum Result { SUCCESS, // The request was executed successfully - REPLAY, // The request is a replay and was ignored - FAILURE // The request failed and exception was thrown } @@ -256,7 +252,6 @@ protected OMResponse createOperationKeysErrorOMResponse( /** * Add the client response to double buffer and set the flush future. - * For responses which has status set to REPLAY it is a no-op. * @param trxIndex * @param omClientResponse * @param omDoubleBufferHelper @@ -265,13 +260,8 @@ protected void addResponseToDoubleBuffer(long trxIndex, OMClientResponse omClientResponse, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) { if (omClientResponse != null) { - // For replay transaction we do not need to add to double buffer, as - // for these transactions there is nothing needs to be done for - // addDBToBatch. - if (omClientResponse.getOMResponse().getStatus() != REPLAY) { - omClientResponse.setFlushFuture( - omDoubleBufferHelper.add(omClientResponse, trxIndex)); - } + omClientResponse.setFlushFuture( + omDoubleBufferHelper.add(omClientResponse, trxIndex)); } } @@ -313,29 +303,4 @@ public Map buildVolumeAuditMap(String volume) { auditMap.put(OzoneConsts.VOLUME, volume); return auditMap; } - - /** - * Check if the transaction is a replay. - * @param ozoneObj OMVolumeArgs or OMBucketInfo or OMKeyInfo object whose - * updateID needs to be compared with - * @param transactionID the current transaction ID - * @return true if transactionID is less than or equal to updateID, false - * otherwise. - */ - protected boolean isReplay(OzoneManager om, WithObjectID ozoneObj, - long transactionID) { - return om.isRatisEnabled() && ozoneObj.isUpdateIDset() && - transactionID <= ozoneObj.getUpdateID(); - } - - /** - * Return a dummy OMClientResponse for when the transactions are replayed. - */ - protected OMResponse createReplayOMResponse( - @Nonnull OMResponse.Builder omResponse) { - - omResponse.setSuccess(false); - omResponse.setStatus(REPLAY); - return omResponse.build(); - } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java index 8181a64e3a72..9d7d133eca54 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java @@ -69,6 +69,8 @@ import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_ALREADY_EXISTS; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -167,27 +169,13 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, //Check if the volume exists if (omVolumeArgs == null) { LOG.debug("volume: {} not found ", volumeName); - throw new OMException("Volume doesn't exist", - OMException.ResultCodes.VOLUME_NOT_FOUND); + throw new OMException("Volume doesn't exist", VOLUME_NOT_FOUND); } //Check if bucket already exists - OmBucketInfo dbBucketInfo = metadataManager.getBucketTable() - .getReadCopy(bucketKey); - if (dbBucketInfo != null) { - // Check if this transaction is a replay of ratis logs. - if (isReplay(ozoneManager, dbBucketInfo, transactionLogIndex)) { - // Replay implies the response has already been returned to - // the client. So take no further action and return a dummy - // OMClientResponse. - LOG.debug("Replayed Transaction {} ignored. Request: {}", - transactionLogIndex, createBucketRequest); - return new OMBucketCreateResponse(createReplayOMResponse(omResponse)); - } else { - LOG.debug("bucket: {} already exists ", bucketName); - throw new OMException("Bucket already exist", - OMException.ResultCodes.BUCKET_ALREADY_EXISTS); - } + if (metadataManager.getBucketTable().isExist(bucketKey)) { + LOG.debug("bucket: {} already exists ", bucketName); + throw new OMException("Bucket already exist", BUCKET_ALREADY_EXISTS); } // Add objectID and updateID @@ -211,7 +199,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, } catch (IOException ex) { exception = ex; omClientResponse = new OMBucketCreateResponse( - createErrorOMResponse(omResponse, exception), omBucketInfo); + createErrorOMResponse(omResponse, exception)); } finally { addResponseToDoubleBuffer(transactionLogIndex, omClientResponse, ozoneManagerDoubleBufferHelper); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java index 18bf3ae3bb13..91aef6a2d44a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java @@ -36,7 +36,6 @@ import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.response.bucket.OMBucketDeleteResponse; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.OzoneConsts; @@ -52,6 +51,7 @@ import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK; @@ -102,7 +102,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, volumeName, bucketName, null); } - // acquire lock acquiredVolumeLock = omMetadataManager.getLock().acquireReadLock(VOLUME_LOCK, volumeName); @@ -111,25 +110,12 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, volumeName, bucketName); // No need to check volume exists here, as bucket cannot be created - // with out volume creation. - //Check if bucket exists + // with out volume creation. Check if bucket exists String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); - OmBucketInfo omBucketInfo = omMetadataManager.getBucketTable() - .getReadCopy(bucketKey); - if (omBucketInfo == null) { - LOG.debug("bucket: {} not found ", bucketName); - throw new OMException("Bucket doesn't exist", - OMException.ResultCodes.BUCKET_NOT_FOUND); - } - // Check if this transaction is a replay of ratis logs. - // If this is a replay, then the response has already been returned to - // the client. So take no further action and return a dummy - // OMClientResponse. - if (isReplay(ozoneManager, omBucketInfo, transactionLogIndex)) { - LOG.debug("Replayed Transaction {} ignored. Request: {}", - transactionLogIndex, deleteBucketRequest); - return new OMBucketDeleteResponse(createReplayOMResponse(omResponse)); + if (!omMetadataManager.getBucketTable().isExist(bucketKey)) { + LOG.debug("bucket: {} not found ", bucketName); + throw new OMException("Bucket already exist", BUCKET_NOT_FOUND); } //Check if bucket is empty @@ -155,7 +141,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, success = false; exception = ex; omClientResponse = new OMBucketDeleteResponse( - createErrorOMResponse(omResponse, exception), volumeName, bucketName); + createErrorOMResponse(omResponse, exception)); } finally { addResponseToDoubleBuffer(transactionLogIndex, omClientResponse, ozoneManagerDoubleBufferHelper); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java index 2288de73aa8e..d90f08ea7c8c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java @@ -121,16 +121,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OMException.ResultCodes.BUCKET_NOT_FOUND); } - // Check if this transaction is a replay of ratis logs. - // If a replay, then the response has already been returned to the - // client. So take no further action and return a dummy OMClientResponse. - if (isReplay(ozoneManager, dbBucketInfo, transactionLogIndex)) { - LOG.debug("Replayed Transaction {} ignored. Request: {}", - transactionLogIndex, setBucketPropertyRequest); - return new OMBucketSetPropertyResponse( - createReplayOMResponse(omResponse)); - } - OmBucketInfo.Builder bucketInfoBuilder = OmBucketInfo.newBuilder(); bucketInfoBuilder.setVolumeName(dbBucketInfo.getVolumeName()) .setBucketName(dbBucketInfo.getBucketName()) @@ -190,7 +180,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, success = false; exception = ex; omClientResponse = new OMBucketSetPropertyResponse( - createErrorOMResponse(omResponse, exception), omBucketInfo); + createErrorOMResponse(omResponse, exception)); } finally { addResponseToDoubleBuffer(transactionLogIndex, omClientResponse, ozoneManagerDoubleBufferHelper); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAclRequest.java index f162e8836a46..a493f9fa1472 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAclRequest.java @@ -30,7 +30,6 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; import org.apache.hadoop.ozone.om.request.OMClientRequest; -import org.apache.hadoop.ozone.om.response.bucket.acl.OMBucketAclResponse; import org.apache.hadoop.ozone.util.BooleanBiFunction; import org.apache.hadoop.ozone.om.request.util.ObjectParser; import org.apache.hadoop.ozone.om.response.OMClientResponse; @@ -106,16 +105,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, throw new OMException(OMException.ResultCodes.BUCKET_NOT_FOUND); } - // Check if this transaction is a replay of ratis logs. - // If this is a replay, then the response has already been returned to - // the client. So take no further action and return a dummy - // OMClientResponse. - if (isReplay(ozoneManager, omBucketInfo, transactionLogIndex)) { - LOG.debug("Replayed Transaction {} ignored. Request: {}", - transactionLogIndex, getOmRequest()); - return new OMBucketAclResponse(createReplayOMResponse(omResponse)); - } - operationResult = omBucketAclOp.apply(ozoneAcls, omBucketInfo); omBucketInfo.setUpdateID(transactionLogIndex, ozoneManager.isRatisEnabled()); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java index 7f860fc2194d..ec51333a5710 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java @@ -29,7 +29,6 @@ import com.google.common.base.Optional; import com.google.common.base.Preconditions; import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.om.exceptions.OMReplayException; import org.apache.commons.lang3.tuple.ImmutablePair; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; @@ -91,8 +90,6 @@ public class OMDirectoryCreateRequest extends OMKeyRequest { public enum Result { SUCCESS, // The request was executed successfully - REPLAY, // The request is a replay and was ignored - DIRECTORY_ALREADY_EXISTS, // Directory key already exists in DB FAILURE // The request failed and exception was thrown @@ -197,34 +194,20 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OMFileRequest.addKeyTableCacheEntries(omMetadataManager, volumeName, bucketName, Optional.of(dirKeyInfo), Optional.of(missingParentInfos), trxnLogIndex); - - omClientResponse = new OMDirectoryCreateResponse(omResponse.build(), - dirKeyInfo, missingParentInfos); result = Result.SUCCESS; + omClientResponse = new OMDirectoryCreateResponse(omResponse.build(), + dirKeyInfo, missingParentInfos, result); } else { // omDirectoryResult == DIRECTORY_EXITS - // Check if this is a replay of ratis logs - String dirKey = omMetadataManager.getOzoneDirKey(volumeName, - bucketName, keyName); - OmKeyInfo dbKeyInfo = omMetadataManager.getKeyTable().get(dirKey); - if (isReplay(ozoneManager, dbKeyInfo, trxnLogIndex)) { - throw new OMReplayException(); - } else { - result = Result.DIRECTORY_ALREADY_EXISTS; - omResponse.setStatus(Status.DIRECTORY_ALREADY_EXISTS); - omClientResponse = new OMDirectoryCreateResponse(omResponse.build()); - } + result = Result.DIRECTORY_ALREADY_EXISTS; + omResponse.setStatus(Status.DIRECTORY_ALREADY_EXISTS); + omClientResponse = new OMDirectoryCreateResponse(omResponse.build(), + result); } } catch (IOException ex) { - if (ex instanceof OMReplayException) { - result = Result.REPLAY; - omClientResponse = new OMDirectoryCreateResponse( - createReplayOMResponse(omResponse)); - } else { - exception = ex; - omClientResponse = new OMDirectoryCreateResponse( - createErrorOMResponse(omResponse, exception)); - } + exception = ex; + omClientResponse = new OMDirectoryCreateResponse( + createErrorOMResponse(omResponse, exception), result); } finally { addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper); @@ -234,10 +217,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, } } - if (result != Result.REPLAY) { - auditLog(auditLogger, buildAuditMessage(OMAction.CREATE_DIRECTORY, - auditMap, exception, userInfo)); - } + auditLog(auditLogger, buildAuditMessage(OMAction.CREATE_DIRECTORY, + auditMap, exception, userInfo)); logResult(createDirectoryRequest, keyArgs, omMetrics, result, trxnLogIndex, exception); @@ -314,12 +295,6 @@ private void logResult(CreateDirectoryRequest createDirectoryRequest, volumeName, bucketName, keyName); } break; - case REPLAY: - if (LOG.isDebugEnabled()) { - LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex, - createDirectoryRequest); - } - break; case DIRECTORY_ALREADY_EXISTS: if (LOG.isDebugEnabled()) { LOG.debug("Directory already exists. Volume:{}, Bucket:{}, Key{}", diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java index 4db8f8014f2c..3b0b02bf549d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java @@ -32,7 +32,6 @@ import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.exceptions.OMReplayException; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponse; import org.slf4j.Logger; @@ -216,27 +215,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OMException.ResultCodes.NOT_A_FILE); } - // Check if Key already exists in KeyTable and this transaction is a - // replay. String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName, keyName); OmKeyInfo dbKeyInfo = omMetadataManager.getKeyTable() .getIfExist(ozoneKey); - if (dbKeyInfo != null) { - // Check if this transaction is a replay of ratis logs. - // We check only the KeyTable here and not the OpenKeyTable. In case - // this transaction is a replay but the transaction was not committed - // to the KeyTable, then we recreate the key in OpenKey table. This is - // okay as all the subsequent transactions would also be replayed and - // the openKey table would eventually reach the same state. - // The reason we do not check the OpenKey table is to avoid a DB read - // in regular non-replay scenario. - if (isReplay(ozoneManager, dbKeyInfo, trxnLogIndex)) { - // Replay implies the response has already been returned to - // the client. So take no further action and return a dummy response. - throw new OMReplayException(); - } - } OMFileRequest.OMPathInfo pathInfo = OMFileRequest.verifyFilesInPath(omMetadataManager, volumeName, @@ -312,18 +294,12 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, result = Result.SUCCESS; } catch (IOException ex) { - if (ex instanceof OMReplayException) { - result = Result.REPLAY; - omClientResponse = new OMFileCreateResponse(createReplayOMResponse( - omResponse)); - } else { - result = Result.FAILURE; - exception = ex; - omMetrics.incNumCreateFileFails(); - omResponse.setCmdType(Type.CreateFile); - omClientResponse = new OMFileCreateResponse(createErrorOMResponse( + result = Result.FAILURE; + exception = ex; + omMetrics.incNumCreateFileFails(); + omResponse.setCmdType(Type.CreateFile); + omClientResponse = new OMFileCreateResponse(createErrorOMResponse( omResponse, exception)); - } } finally { addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper); @@ -334,18 +310,12 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, } // Audit Log outside the lock - if (result != Result.REPLAY) { - Map auditMap = buildKeyArgsAuditMap(keyArgs); - auditLog(ozoneManager.getAuditLogger(), buildAuditMessage( - OMAction.CREATE_FILE, auditMap, exception, - getOmRequest().getUserInfo())); - } + Map auditMap = buildKeyArgsAuditMap(keyArgs); + auditLog(ozoneManager.getAuditLogger(), buildAuditMessage( + OMAction.CREATE_FILE, auditMap, exception, + getOmRequest().getUserInfo())); switch (result) { - case REPLAY: - LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex, - createFileRequest); - break; case SUCCESS: LOG.debug("File created. Volume:{}, Bucket:{}, Key:{}", volumeName, bucketName, keyName); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java index 348c96a2ae6d..1a39e0b19b80 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java @@ -25,7 +25,6 @@ import com.google.common.base.Optional; import com.google.common.base.Preconditions; -import org.apache.hadoop.ozone.om.exceptions.OMReplayException; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; @@ -169,9 +168,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, getOmRequest()); OMClientResponse omClientResponse = null; - OmKeyInfo openKeyInfo = null; + OmKeyInfo openKeyInfo; IOException exception = null; - Result result = null; try { // check Acl @@ -186,30 +184,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, openKeyInfo = omMetadataManager.getOpenKeyTable().get(openKeyName); if (openKeyInfo == null) { - // Check if this transaction is a replay of ratis logs. - // If the Key was already committed and this transaction is being - // replayed, we should ignore this transaction. - String ozoneKey = omMetadataManager.getOzoneKey(volumeName, - bucketName, keyName); - OmKeyInfo dbKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey); - if (dbKeyInfo != null) { - if (isReplay(ozoneManager, dbKeyInfo, trxnLogIndex)) { - // This transaction is a replay. Send replay response. - throw new OMReplayException(); - } - } throw new OMException("Open Key not found " + openKeyName, KEY_NOT_FOUND); } - // Check if this transaction is a replay of ratis logs. - // Check the updateID of the openKey to verify that it is not greater - // than the current transactionLogIndex - if (isReplay(ozoneManager, openKeyInfo, trxnLogIndex)) { - // This transaction is a replay. Send replay response. - throw new OMReplayException(); - } - // Append new block openKeyInfo.appendNewBlocks(Collections.singletonList( OmKeyLocationInfo.getFromProtobuf(blockLocation)), false); @@ -229,35 +207,23 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, .setKeyLocation(blockLocation).build()); omClientResponse = new OMAllocateBlockResponse(omResponse.build(), openKeyInfo, clientID); - result = Result.SUCCESS; LOG.debug("Allocated block for Volume:{}, Bucket:{}, OpenKey:{}", volumeName, bucketName, openKeyName); } catch (IOException ex) { - if (ex instanceof OMReplayException) { - result = Result.REPLAY; - omClientResponse = new OMAllocateBlockResponse(createReplayOMResponse( - omResponse)); - LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex, - allocateBlockRequest); - } else { - result = Result.FAILURE; - omMetrics.incNumBlockAllocateCallFails(); - exception = ex; - omClientResponse = new OMAllocateBlockResponse(createErrorOMResponse( - omResponse, exception)); - LOG.error("Allocate Block failed. Volume:{}, Bucket:{}, OpenKey:{}. " + + omMetrics.incNumBlockAllocateCallFails(); + exception = ex; + omClientResponse = new OMAllocateBlockResponse(createErrorOMResponse( + omResponse, exception)); + LOG.error("Allocate Block failed. Volume:{}, Bucket:{}, OpenKey:{}. " + "Exception:{}", volumeName, bucketName, openKeyName, exception); - } } finally { addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper); } - if (result != Result.REPLAY) { - auditLog(auditLogger, buildAuditMessage(OMAction.ALLOCATE_BLOCK, auditMap, - exception, getOmRequest().getUserInfo())); - } + auditLog(auditLogger, buildAuditMessage(OMAction.ALLOCATE_BLOCK, auditMap, + exception, getOmRequest().getUserInfo())); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java index 7ee7db51772d..edeea3d2d449 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java @@ -41,7 +41,6 @@ import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.exceptions.OMReplayException; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.response.OMClientResponse; @@ -71,13 +70,6 @@ public class OMKeyCommitRequest extends OMKeyRequest { private static final Logger LOG = LoggerFactory.getLogger(OMKeyCommitRequest.class); - private enum Result { - SUCCESS, - REPLAY, - DELETE_OPEN_KEY_ONLY, - FAILURE - } - public OMKeyCommitRequest(OMRequest omRequest) { super(omRequest); } @@ -152,44 +144,18 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, locationInfoList.add(OmKeyLocationInfo.getFromProtobuf(keyLocation)); } - bucketLockAcquired = omMetadataManager.getLock().acquireLock(BUCKET_LOCK, - volumeName, bucketName); + bucketLockAcquired = omMetadataManager.getLock().acquireWriteLock( + BUCKET_LOCK, volumeName, bucketName); validateBucketAndVolume(omMetadataManager, volumeName, bucketName); - // Revisit this logic to see how we can skip this check when ratis is - // enabled. - if (ozoneManager.isRatisEnabled()) { - // Check if OzoneKey already exists in DB - OmKeyInfo dbKeyInfo = omMetadataManager.getKeyTable() - .getIfExist(dbOzoneKey); - if (dbKeyInfo != null) { - // Check if this transaction is a replay of ratis logs - if (isReplay(ozoneManager, dbKeyInfo, trxnLogIndex)) { - // During KeyCreate, we do not check the OpenKey Table for replay. - // This is so as to avoid an extra DB read during KeyCreate. - // If KeyCommit is a replay, the KeyCreate request could also have - // been replayed. And since we do not check for replay in KeyCreate, - // we should scrub the key from OpenKey table now, is it exists. - - omKeyInfo = omMetadataManager.getOpenKeyTable().get(dbOpenKey); - if (omKeyInfo != null) { - omMetadataManager.getOpenKeyTable().addCacheEntry( - new CacheKey<>(dbOpenKey), - new CacheValue<>(Optional.absent(), trxnLogIndex)); - - throw new OMReplayException(true); - } - throw new OMReplayException(); - } - } - } - omKeyInfo = omMetadataManager.getOpenKeyTable().get(dbOpenKey); + if (omKeyInfo == null) { throw new OMException("Failed to commit key, as " + dbOpenKey + "entry is not found in the OpenKey table", KEY_NOT_FOUND); } + omKeyInfo.setDataSize(commitKeyArgs.getDataSize()); omKeyInfo.setModificationTime(commitKeyArgs.getModificationTime()); @@ -214,22 +180,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, result = Result.SUCCESS; } catch (IOException ex) { - if (ex instanceof OMReplayException) { - if (((OMReplayException) ex).isDBOperationNeeded()) { - result = Result.DELETE_OPEN_KEY_ONLY; - omClientResponse = new OMKeyCommitResponse(omResponse.build(), - dbOpenKey); - } else { - result = Result.REPLAY; - omClientResponse = new OMKeyCommitResponse(createReplayOMResponse( - omResponse)); - } - } else { - result = Result.FAILURE; - exception = ex; - omClientResponse = new OMKeyCommitResponse(createErrorOMResponse( + result = Result.FAILURE; + exception = ex; + omClientResponse = new OMKeyCommitResponse(createErrorOMResponse( omResponse, exception)); - } } finally { addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper); @@ -240,11 +194,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, } } - // Performing audit logging outside of the lock. - if (result != Result.REPLAY && result != Result.DELETE_OPEN_KEY_ONLY) { - auditLog(auditLogger, buildAuditMessage(OMAction.COMMIT_KEY, auditMap, + auditLog(auditLogger, buildAuditMessage(OMAction.COMMIT_KEY, auditMap, exception, getOmRequest().getUserInfo())); - } switch (result) { case SUCCESS: @@ -253,21 +204,12 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, // As key also can have multiple versions, we need to increment keys // only if version is 0. Currently we have not complete support of // versioning of keys. So, this can be revisited later. - if (omKeyInfo.getKeyLocationVersions().size() == 1) { omMetrics.incNumKeys(); } LOG.debug("Key commited. Volume:{}, Bucket:{}, Key:{}", volumeName, bucketName, keyName); break; - case REPLAY: - LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex, - commitKeyRequest); - break; - case DELETE_OPEN_KEY_ONLY: - LOG.debug("Replayed Transaction {}. Deleting old key {} from OpenKey " + - "table. Request: {}", trxnLogIndex, dbOpenKey, commitKeyRequest); - break; case FAILURE: LOG.error("Key commit failed. Volume:{}, Bucket:{}, Key:{}. Exception:{}", volumeName, bucketName, keyName, exception); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java index 3f4266f635be..c6a7e52f744e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java @@ -40,7 +40,6 @@ import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.exceptions.OMReplayException; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; @@ -201,22 +200,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, keyName); OmKeyInfo dbKeyInfo = omMetadataManager.getKeyTable().getIfExist(dbKeyName); - if (dbKeyInfo != null) { - // Check if this transaction is a replay of ratis logs. - // We check only the KeyTable here and not the OpenKeyTable. In case - // this transaction is a replay but the transaction was not committed - // to the KeyTable, then we recreate the key in OpenKey table. This is - // okay as all the subsequent transactions would also be replayed and - // the openKey table would eventually reach the same state. - // The reason we do not check the OpenKey table is to avoid a DB read - // in regular non-replay scenario. - if (isReplay(ozoneManager, dbKeyInfo, trxnLogIndex)) { - // Replay implies the response has already been returned to - // the client. So take no further action and return a dummy - // OMClientResponse. - throw new OMReplayException(); - } - } OmBucketInfo bucketInfo = omMetadataManager.getBucketTable().get( omMetadataManager.getBucketKey(volumeName, bucketName)); @@ -254,18 +237,12 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, result = Result.SUCCESS; } catch (IOException ex) { - if (ex instanceof OMReplayException) { - result = Result.REPLAY; - omClientResponse = new OMKeyCreateResponse(createReplayOMResponse( - omResponse)); - } else { - result = Result.FAILURE; - exception = ex; - omMetrics.incNumKeyAllocateFails(); - omResponse.setCmdType(Type.CreateKey); - omClientResponse = new OMKeyCreateResponse(createErrorOMResponse( - omResponse, exception)); - } + result = Result.FAILURE; + exception = ex; + omMetrics.incNumKeyAllocateFails(); + omResponse.setCmdType(Type.CreateKey); + omClientResponse = new OMKeyCreateResponse( + createErrorOMResponse(omResponse, exception)); } finally { addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper); @@ -276,22 +253,18 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, } // Audit Log outside the lock - if (result != Result.REPLAY) { - Map auditMap = buildKeyArgsAuditMap(keyArgs); - auditLog(ozoneManager.getAuditLogger(), buildAuditMessage( - OMAction.ALLOCATE_KEY, auditMap, exception, - getOmRequest().getUserInfo())); - } + + Map auditMap = buildKeyArgsAuditMap(keyArgs); + auditLog(ozoneManager.getAuditLogger(), buildAuditMessage( + OMAction.ALLOCATE_KEY, auditMap, exception, + getOmRequest().getUserInfo())); + switch (result) { case SUCCESS: LOG.debug("Key created. Volume:{}, Bucket:{}, Key:{}", volumeName, bucketName, keyName); break; - case REPLAY: - LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex, - createKeyRequest); - break; case FAILURE: LOG.error("Key creation failed. Volume:{}, Bucket:{}, Key{}. " + "Exception:{}", volumeName, bucketName, keyName, exception); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java index 167330a302f1..b0eb6fd0be7b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java @@ -36,7 +36,6 @@ import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.exceptions.OMReplayException; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.om.response.key.OMKeyDeleteResponse; @@ -130,14 +129,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, throw new OMException("Key not found", KEY_NOT_FOUND); } - // Check if this transaction is a replay of ratis logs. - if (isReplay(ozoneManager, omKeyInfo, trxnLogIndex)) { - // Replay implies the response has already been returned to - // the client. So take no further action and return a dummy - // OMClientResponse. - throw new OMReplayException(); - } - // Set the UpdateID to current transactionLogIndex omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); @@ -158,16 +149,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, result = Result.SUCCESS; } catch (IOException ex) { - if (ex instanceof OMReplayException) { - result = Result.REPLAY; - omClientResponse = new OMKeyDeleteResponse(createReplayOMResponse( - omResponse)); - } else { - result = Result.FAILURE; - exception = ex; - omClientResponse = new OMKeyDeleteResponse(createErrorOMResponse( - omResponse, exception)); - } + result = Result.FAILURE; + exception = ex; + omClientResponse = new OMKeyDeleteResponse( + createErrorOMResponse(omResponse, exception)); } finally { addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper); @@ -178,10 +163,9 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, } // Performing audit logging outside of the lock. - if (result != Result.REPLAY) { - auditLog(auditLogger, buildAuditMessage(OMAction.DELETE_KEY, auditMap, - exception, userInfo)); - } + auditLog(auditLogger, buildAuditMessage(OMAction.DELETE_KEY, auditMap, + exception, userInfo)); + switch (result) { case SUCCESS: @@ -189,10 +173,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, LOG.debug("Key deleted. Volume:{}, Bucket:{}, Key:{}", volumeName, bucketName, keyName); break; - case REPLAY: - LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex, - deleteKeyRequest); - break; case FAILURE: omMetrics.incNumKeyDeleteFails(); LOG.error("Key delete failed. Volume:{}, Bucket:{}, Key:{}.", diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java index f7783dbe42c6..ce7f1e98f9c8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java @@ -18,12 +18,8 @@ package org.apache.hadoop.ozone.om.request.key; -import java.io.IOException; import java.util.ArrayList; -import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.om.response.OMClientResponse; @@ -37,8 +33,6 @@ import java.util.List; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; - /** * Handles purging of keys from OM DB. */ @@ -54,9 +48,6 @@ public OMKeyPurgeRequest(OMRequest omRequest) { @Override public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) { - - OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); - PurgeKeysRequest purgeKeysRequest = getOmRequest().getPurgeKeysRequest(); List bucketDeletedKeysList = purgeKeysRequest .getDeletedKeysList(); @@ -65,97 +56,19 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( getOmRequest()); OMClientResponse omClientResponse = null; - boolean success = true; - IOException exception = null; - // Filter the keys that have updateID > transactionLogIndex. This is done so - // that in case this transaction is a replay, we do not purge keys - // created after the original purge request. - // PurgeKeys request has keys belonging to same bucket grouped together. - // We get each bucket lock and check the above condition. - for (DeletedKeys bucketWithDeleteKeys : bucketDeletedKeysList) { - boolean acquiredLock = false; - String volumeName = bucketWithDeleteKeys.getVolumeName(); - String bucketName = bucketWithDeleteKeys.getBucketName(); - ArrayList keysNotPurged = new ArrayList<>(); - Result result = null; - try { - acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, - volumeName, bucketName); - for (String deletedKey : bucketWithDeleteKeys.getKeysList()) { - RepeatedOmKeyInfo repeatedOmKeyInfo = - omMetadataManager.getDeletedTable().get(deletedKey); - boolean purgeKey = true; - if (repeatedOmKeyInfo != null) { - for (OmKeyInfo omKeyInfo : repeatedOmKeyInfo.getOmKeyInfoList()) { - // Discard those keys whose updateID is > transactionLogIndex. - // This could happen when the PurgeRequest is replayed. - if (isReplay(ozoneManager, omKeyInfo, - trxnLogIndex)) { - purgeKey = false; - result = Result.REPLAY; - break; - } - // TODO: If a deletedKey has any one OmKeyInfo which was - // deleted after the original PurgeRequest (updateID > - // trxnLogIndex), we avoid purging that whole key in the - // replay request. Instead of discarding the whole key, we can - // identify the OmKeyInfo's which have updateID < - // trxnLogIndex and purge only those OMKeyInfo's from the - // deletedKey in DeletedTable. - } - if (purgeKey) { - keysToBePurgedList.add(deletedKey); - } else { - keysNotPurged.add(deletedKey); - } - } - } - } catch (IOException ex) { - success = false; - exception = ex; - break; - } finally { - if (acquiredLock) { - omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, - bucketName); - } - } - if (result == Result.REPLAY) { - LOG.debug("Replayed Transaction {}. Request: {}", trxnLogIndex, - purgeKeysRequest); - if (!keysNotPurged.isEmpty()) { - StringBuilder notPurgeList = new StringBuilder(); - for (String key : keysNotPurged) { - notPurgeList.append(", ").append(key); - } - LOG.debug("Following keys from Volume:{}, Bucket:{} will not be" + - " purged: {}", notPurgeList.toString().substring(2)); - } + for (DeletedKeys bucketWithDeleteKeys : bucketDeletedKeysList) { + for (String deletedKey : bucketWithDeleteKeys.getKeysList()) { + keysToBePurgedList.add(deletedKey); } } - if (success) { - if (LOG.isDebugEnabled()) { - if (keysToBePurgedList.isEmpty()) { - LOG.debug("No keys will be purged as part of KeyPurgeRequest: {}", - purgeKeysRequest); - } else { - LOG.debug("Following keys will be purged as part of " + - "KeyPurgeRequest: {} - {}", purgeKeysRequest, - String.join(",", keysToBePurgedList)); - } - } - omClientResponse = new OMKeyPurgeResponse(omResponse.build(), + omClientResponse = new OMKeyPurgeResponse(omResponse.build(), keysToBePurgedList); - } else { - omClientResponse = new OMKeyPurgeResponse(createErrorOMResponse( - omResponse, exception)); - } - addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper); + return omClientResponse; } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java index f0069a168161..dc83ff633f79 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java @@ -71,16 +71,6 @@ public OMKeyRenameRequest(OMRequest omRequest) { super(omRequest); } - /** - * Stores the result of request execution for Rename Requests. - */ - private enum Result { - SUCCESS, - DELETE_FROM_KEY_ONLY, - REPLAY, - FAILURE, - } - @Override public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { @@ -162,87 +152,40 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OmKeyInfo toKeyValue = omMetadataManager.getKeyTable().get(toKey); if (toKeyValue != null) { - - // Check if this transaction is a replay of ratis logs. - if (isReplay(ozoneManager, toKeyValue, trxnLogIndex)) { - - // Check if fromKey is still in the DB and created before this - // replay. - // For example, lets say we have the following sequence of - // transactions. - // Trxn 1 : Create Key1 - // Trnx 2 : Rename Key1 to Key2 -> Deletes Key1 and Creates Key2 - // Now if these transactions are replayed: - // Replay Trxn 1 : Creates Key1 again as Key1 does not exist in DB - // Replay Trxn 2 : Key2 is not created as it exists in DB and the - // request would be deemed a replay. But Key1 - // is still in the DB and needs to be deleted. - fromKeyValue = omMetadataManager.getKeyTable().get(fromKey); - if (fromKeyValue != null) { - // Check if this replay transaction was after the fromKey was - // created. If so, we have to delete the fromKey. - if (ozoneManager.isRatisEnabled() && - trxnLogIndex > fromKeyValue.getUpdateID()) { - // Add to cache. Only fromKey should be deleted. ToKey already - // exists in DB as this transaction is a replay. - result = Result.DELETE_FROM_KEY_ONLY; - Table keyTable = omMetadataManager - .getKeyTable(); - keyTable.addCacheEntry(new CacheKey<>(fromKey), - new CacheValue<>(Optional.absent(), trxnLogIndex)); - - omClientResponse = new OMKeyRenameResponse(omResponse - .setRenameKeyResponse(RenameKeyResponse.newBuilder()).build(), - fromKeyName, fromKeyValue); - } - } - - if (result == null) { - result = Result.REPLAY; - // If toKey exists and fromKey does not, then no further action is - // required. Return a dummy OMClientResponse. - omClientResponse = new OMKeyRenameResponse(createReplayOMResponse( - omResponse)); - } - } else { - // This transaction is not a replay. toKeyName should not exist - throw new OMException("Key already exists " + toKeyName, + throw new OMException("Key already exists " + toKeyName, OMException.ResultCodes.KEY_ALREADY_EXISTS); - } - } else { - - // This transaction is not a replay. + } - // fromKeyName should exist - fromKeyValue = omMetadataManager.getKeyTable().get(fromKey); - if (fromKeyValue == null) { + // fromKeyName should exist + fromKeyValue = omMetadataManager.getKeyTable().get(fromKey); + if (fromKeyValue == null) { // TODO: Add support for renaming open key - throw new OMException("Key not found " + fromKey, KEY_NOT_FOUND); - } + throw new OMException("Key not found " + fromKey, KEY_NOT_FOUND); + } - fromKeyValue.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); + fromKeyValue.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); - fromKeyValue.setKeyName(toKeyName); - //Set modification time - fromKeyValue.setModificationTime(renameKeyArgs.getModificationTime()); + fromKeyValue.setKeyName(toKeyName); - // Add to cache. - // fromKey should be deleted, toKey should be added with newly updated - // omKeyInfo. - Table keyTable = omMetadataManager.getKeyTable(); + //Set modification time + fromKeyValue.setModificationTime(renameKeyArgs.getModificationTime()); - keyTable.addCacheEntry(new CacheKey<>(fromKey), - new CacheValue<>(Optional.absent(), trxnLogIndex)); + // Add to cache. + // fromKey should be deleted, toKey should be added with newly updated + // omKeyInfo. + Table keyTable = omMetadataManager.getKeyTable(); - keyTable.addCacheEntry(new CacheKey<>(toKey), - new CacheValue<>(Optional.of(fromKeyValue), trxnLogIndex)); + keyTable.addCacheEntry(new CacheKey<>(fromKey), + new CacheValue<>(Optional.absent(), trxnLogIndex)); - omClientResponse = new OMKeyRenameResponse(omResponse - .setRenameKeyResponse(RenameKeyResponse.newBuilder()).build(), - fromKeyName, toKeyName, fromKeyValue); + keyTable.addCacheEntry(new CacheKey<>(toKey), + new CacheValue<>(Optional.of(fromKeyValue), trxnLogIndex)); - result = Result.SUCCESS; - } + omClientResponse = new OMKeyRenameResponse(omResponse + .setRenameKeyResponse(RenameKeyResponse.newBuilder()).build(), + fromKeyName, toKeyName, fromKeyValue); + + result = Result.SUCCESS; } catch (IOException ex) { result = Result.FAILURE; exception = ex; @@ -257,10 +200,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, } } - if (result == Result.SUCCESS || result == Result.FAILURE) { - auditLog(auditLogger, buildAuditMessage(OMAction.RENAME_KEY, auditMap, - exception, getOmRequest().getUserInfo())); - } + auditLog(auditLogger, buildAuditMessage(OMAction.RENAME_KEY, auditMap, + exception, getOmRequest().getUserInfo())); switch (result) { case SUCCESS: @@ -268,15 +209,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, " fromKey:{} toKey:{}. ", volumeName, bucketName, fromKeyName, toKeyName); break; - case DELETE_FROM_KEY_ONLY: - LOG.debug("Replayed transaction {}: {}. Renamed Key {} already exists. " + - "Deleting old key {}.", trxnLogIndex, renameKeyRequest, toKey, - fromKey); - break; - case REPLAY: - LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex, - renameKeyRequest); - break; case FAILURE: ozoneManager.getMetrics().incNumKeyRenameFails(); LOG.error("Rename key failed for volume:{} bucket:{} fromKey:{} " + diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java index b5e8dc8cbc41..9a7d9935ab3c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java @@ -25,7 +25,6 @@ import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.exceptions.OMReplayException; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; @@ -151,13 +150,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, throw new OMException("Key not found: " + keyName, KEY_NOT_FOUND); } - // Check if this transaction is a replay of ratis logs. - if (isReplay(ozoneManager, omKeyInfo, trxnLogIndex)) { - // Replay implies the response has already been returned to - // the client. So take no further action and return a dummy - // OMClientResponse. - throw new OMReplayException(); - } } omClientResponse = new OMKeysDeleteResponse(omResponse @@ -165,29 +157,20 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, omKeyInfoList, trxnLogIndex, ozoneManager.isRatisEnabled()); result = Result.SUCCESS; } catch (IOException ex) { - if (ex instanceof OMReplayException) { - result = Result.REPLAY; - omClientResponse = new OMKeyDeleteResponse(createReplayOMResponse( - omResponse)); - } else { - result = Result.FAILURE; - exception = ex; - - omClientResponse = new OMKeyDeleteResponse( - createOperationKeysErrorOMResponse(omResponse, exception, - unDeletedKeys)); - } + result = Result.FAILURE; + exception = ex; + + omClientResponse = new OMKeyDeleteResponse( + createOperationKeysErrorOMResponse(omResponse, exception, + unDeletedKeys)); } finally { addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper); } - // Performing audit logging outside of the lock. - if (result != Result.REPLAY) { - auditLog(auditLogger, buildAuditMessage( - OMAction.DELETE_KEY, auditMap, exception, userInfo)); - } + auditLog(auditLogger, buildAuditMessage( + OMAction.DELETE_KEY, auditMap, exception, userInfo)); switch (result) { case SUCCESS: @@ -195,10 +178,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, LOG.debug("Key deleted. Volume:{}, Bucket:{}, Key:{}", volumeName, bucketName, keyName); break; - case REPLAY: - LOG.debug("Replayed Transaction {} ignored. Request: {}", - trxnLogIndex, deleteKeyRequest); - break; case FAILURE: omMetrics.incNumKeyDeleteFails(); LOG.error("Key delete failed. Volume:{}, Bucket:{}, Key{}." + diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java index 025c25843adc..9fae4988fa68 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java @@ -24,7 +24,6 @@ import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.exceptions.OMReplayException; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; import org.apache.hadoop.ozone.om.request.OMClientRequest; @@ -93,14 +92,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, throw new OMException(OMException.ResultCodes.KEY_NOT_FOUND); } - // Check if this transaction is a replay of ratis logs. - // If this is a replay, then the response has already been returned to - // the client. So take no further action and return a dummy - // OMClientResponse. - if (isReplay(ozoneManager, omKeyInfo, trxnLogIndex)) { - throw new OMReplayException(); - } - operationResult = apply(omKeyInfo, trxnLogIndex); omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); @@ -112,14 +103,9 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, omClientResponse = onSuccess(omResponse, omKeyInfo, operationResult); result = Result.SUCCESS; } catch (IOException ex) { - if (ex instanceof OMReplayException) { - result = Result.REPLAY; - omClientResponse = onReplay(omResponse); - } else { - result = Result.FAILURE; - exception = ex; - omClientResponse = onFailure(omResponse, ex); - } + result = Result.FAILURE; + exception = ex; + omClientResponse = onFailure(omResponse, ex); } finally { addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper); @@ -170,10 +156,6 @@ OMClientResponse onFailure(OMResponse.Builder omResponse, return new OMKeyAclResponse(createErrorOMResponse(omResponse, exception)); } - OMClientResponse onReplay(OMResponse.Builder omResponse) { - return new OMKeyAclResponse(createReplayOMResponse(omResponse)); - } - /** * Completion hook for final processing before return without lock. * Usually used for logging without lock and metric update. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequest.java index 444c0df6efd7..3697cb8f98d2 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequest.java @@ -87,12 +87,6 @@ void onComplete(Result result, boolean operationResult, } } break; - case REPLAY: - if (LOG.isDebugEnabled()) { - LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex, - getOmRequest()); - } - break; case FAILURE: LOG.error("Add acl {} to path {} failed!", ozoneAcls, path, exception); break; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequest.java index 18e999d450a2..67b891aa73b9 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequest.java @@ -88,12 +88,6 @@ void onComplete(Result result, boolean operationResult, } } break; - case REPLAY: - if (LOG.isDebugEnabled()) { - LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex, - getOmRequest()); - } - break; case FAILURE: LOG.error("Remove acl {} to path {} failed!", ozoneAcls, path, exception); break; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequest.java index d8dbe773428a..70f7b28bc0b7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequest.java @@ -84,12 +84,6 @@ void onComplete(Result result, boolean operationResult, LOG.debug("Set acl: {} to path: {} success!", ozoneAcls, path); } break; - case REPLAY: - if (LOG.isDebugEnabled()) { - LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex, - getOmRequest()); - } - break; case FAILURE: LOG.error("Set acl {} to path {} failed!", ozoneAcls, path, exception); break; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java index 7cde2c257e9f..e928402643ef 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java @@ -26,12 +26,10 @@ import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.PrefixManagerImpl; import org.apache.hadoop.ozone.om.PrefixManagerImpl.OMPrefixAclOpResult; -import org.apache.hadoop.ozone.om.exceptions.OMReplayException; import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; import org.apache.hadoop.ozone.om.request.OMClientRequest; import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.response.key.acl.prefix.OMPrefixAclResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; @@ -86,14 +84,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, omPrefixInfo = omMetadataManager.getPrefixTable().get(prefixPath); - // Check if this transaction is a replay of ratis logs. - if (omPrefixInfo != null) { - if (isReplay(ozoneManager, omPrefixInfo, trxnLogIndex)) { - // This is a replayed transaction. Return dummy response. - throw new OMReplayException(); - } - } - try { operationResult = apply(prefixManager, omPrefixInfo, trxnLogIndex); } catch (IOException ex) { @@ -129,14 +119,9 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, result = Result.SUCCESS; } catch (IOException ex) { - if (ex instanceof OMReplayException) { - result = Result.REPLAY; - omClientResponse = onReplay(omResponse); - } else { - result = Result.FAILURE; - exception = ex; - omClientResponse = onFailure(omResponse, ex); - } + result = Result.FAILURE; + exception = ex; + omClientResponse = onFailure(omResponse, ex); } finally { addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper); @@ -186,15 +171,6 @@ abstract OMClientResponse onSuccess( abstract OMClientResponse onFailure(OMResponse.Builder omResponse, IOException exception); - /** - * Get the OM Client Response on replayed transactions. - * @param omResonse - * @return OMClientResponse - */ - OMClientResponse onReplay(OMResponse.Builder omResonse) { - return new OMPrefixAclResponse(createReplayOMResponse(omResonse)); - } - /** * Completion hook for final processing before return without lock. * Usually used for logging without lock and metric update. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAddAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAddAclRequest.java index bd25e07d3f4a..7160042b0974 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAddAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAddAclRequest.java @@ -106,12 +106,6 @@ void onComplete(boolean operationResult, IOException exception, } } break; - case REPLAY: - if (LOG.isDebugEnabled()) { - LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex, - getOmRequest()); - } - break; case FAILURE: omMetrics.incNumBucketUpdateFails(); LOG.error("Add acl {} to path {} failed!", ozoneAcls, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixRemoveAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixRemoveAclRequest.java index 72c199cc9b57..3731ad17dcf4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixRemoveAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixRemoveAclRequest.java @@ -103,12 +103,6 @@ void onComplete(boolean operationResult, IOException exception, } } break; - case REPLAY: - if (LOG.isDebugEnabled()) { - LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex, - getOmRequest()); - } - break; case FAILURE: omMetrics.incNumBucketUpdateFails(); LOG.error("Remove acl {} to path {} failed!", ozoneAcls, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixSetAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixSetAclRequest.java index 122ada18ac3c..44bc43b52120 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixSetAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixSetAclRequest.java @@ -99,12 +99,6 @@ void onComplete(boolean operationResult, IOException exception, ozoneObj.getPath()); } break; - case REPLAY: - if (LOG.isDebugEnabled()) { - LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex, - getOmRequest()); - } - break; case FAILURE: omMetrics.incNumBucketUpdateFails(); LOG.error("Set acl {} to path {} failed!", ozoneAcls, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java index f51cba8f3478..4f95fe445654 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java @@ -121,11 +121,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, validateBucketAndVolume(omMetadataManager, volumeName, bucketName); - // We do not check if this transaction is a replay here to avoid extra - // DB reads. Even if this transaction is replayed, in - // S3MultipartUploadComplete request, we would delete this entry from - // the openKeyTable. Hence, it is safe to replay this transaction here. - // We are adding uploadId to key, because if multiple users try to // perform multipart upload on the same key, each will try to upload, who // ever finally commit the key, we see that key in ozone. Suppose if we diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java index 8c8e0103fbf4..4518a3b9b6cf 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java @@ -122,26 +122,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR); } - // We do not check if this transaction is a replay. If OmKeyInfo - // exists, then we should delete it from OpenKeyTable irrespective of - // whether this transaction is a replay. There are 3 scenarios: - // Trxn 1 : Initiate Multipart Upload request for key1 - // (openKey = openKey1) - // Trxn 2 : Abort Multipart Upload request for opneKey1 - // - // Scenario 1 : This is not a replay transaction. - // omKeyInfo is not null and we proceed with the abort request to - // deleted openKey1 from openKeyTable. - // Scenario 2 : Trxn 1 and 2 are replayed. - // Replay of Trxn 1 would create openKey1 in openKeyTable as we do - // not check for replay in S3InitiateMultipartUploadRequest. - // Hence, we should replay Trxn 2 also to maintain consistency. - // Scenario 3 : Trxn 2 is replayed and not Trxn 1. - // This will result in omKeyInfo == null as openKey1 would already - // have been deleted from openKeyTable. - // So in both scenarios 1 and 2 (omKeyInfo not null), we should go - // ahead with this request irrespective of whether it is a replay or not. - multipartKeyInfo = omMetadataManager.getMultipartInfoTable() .get(multipartKey); multipartKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java index d9004c0c13a0..346ff87ff186 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java @@ -24,7 +24,6 @@ import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.exceptions.OMReplayException; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; @@ -133,15 +132,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey); if (omKeyInfo == null) { - // Check the KeyTable if this transaction is a replay of ratis logs. - String ozoneKey = omMetadataManager.getOzoneKey(volumeName, - bucketName, keyName); - OmKeyInfo dbKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey); - if (dbKeyInfo != null) { - if (isReplay(ozoneManager, dbKeyInfo, trxnLogIndex)) { - throw new OMReplayException(); - } - } throw new OMException("Failed to commit Multipart Upload key, as " + openKey + "entry is not found in the openKey table", KEY_NOT_FOUND); @@ -212,21 +202,17 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, .setPartName(partName)); omClientResponse = new S3MultipartUploadCommitPartResponse( omResponse.build(), multipartKey, openKey, - multipartKeyInfo, oldPartKeyInfo, ozoneManager.isRatisEnabled()); + multipartKeyInfo, oldPartKeyInfo, omKeyInfo, + ozoneManager.isRatisEnabled()); result = Result.SUCCESS; } catch (IOException ex) { - if (ex instanceof OMReplayException) { - result = Result.REPLAY; - omClientResponse = new S3MultipartUploadCommitPartResponse( - createReplayOMResponse(omResponse)); - } else { - result = Result.FAILURE; - exception = ex; - omClientResponse = new S3MultipartUploadCommitPartResponse( - createErrorOMResponse(omResponse, exception), openKey, omKeyInfo, - ozoneManager.isRatisEnabled()); - } + result = Result.FAILURE; + exception = ex; + omClientResponse = new S3MultipartUploadCommitPartResponse( + createErrorOMResponse(omResponse, exception), multipartKey, openKey, + multipartKeyInfo, oldPartKeyInfo, omKeyInfo, + ozoneManager.isRatisEnabled()); } finally { addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper); @@ -243,10 +229,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, getOmRequest().getUserInfo())); switch (result) { - case REPLAY: - LOG.debug("Replayed Transaction {} ignored. Request: {}", - trxnLogIndex, multipartCommitUploadPartRequest); - break; case SUCCESS: LOG.debug("MultipartUpload Commit is successfully for Key:{} in " + "Volume/Bucket {}/{}", keyName, volumeName, bucketName); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java index 17a8c6154b9b..c4e315cddace 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java @@ -31,7 +31,6 @@ import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.exceptions.OMReplayException; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; @@ -68,13 +67,6 @@ public class S3MultipartUploadCompleteRequest extends OMKeyRequest { private static final Logger LOG = LoggerFactory.getLogger(S3MultipartUploadCompleteRequest.class); - private enum Result { - SUCCESS, - REPLAY, - DELETE_OPEN_KEY_ONLY, - FAILURE - } - public S3MultipartUploadCompleteRequest(OMRequest omRequest) { super(omRequest); } @@ -132,36 +124,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, validateBucketAndVolume(omMetadataManager, volumeName, bucketName); - OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey); - - if (omKeyInfo != null) { - // Check if this transaction is a replay of ratis logs. - if (isReplay(ozoneManager, omKeyInfo, trxnLogIndex)) { - // During S3InitiateMultipartUpload or KeyCreate, we do not check - // the OpenKey Table for replay. This is so as to avoid an extra - // DB read during KeyCreate. - // If this transaction is a replay, the S3InitiateMultipartUpload - // and part key KeyCreate request could also have been replayed. - // And since we do not check for replay there, we should scrub - // the key from OpenKey table and MultipartInfo table now, if it - // exists. - - OmKeyInfo openMultipartKeyInfo = omMetadataManager - .getOpenKeyTable().get(multipartKey); - if (openMultipartKeyInfo != null) { - omMetadataManager.getOpenKeyTable().addCacheEntry( - new CacheKey<>(multipartKey), - new CacheValue<>(Optional.absent(), trxnLogIndex)); - omMetadataManager.getMultipartInfoTable().addCacheEntry( - new CacheKey<>(multipartKey), - new CacheValue<>(Optional.absent(), trxnLogIndex)); - - throw new OMReplayException(true); - } - throw new OMReplayException(false); - } - } - OmMultipartKeyInfo multipartKeyInfo = omMetadataManager .getMultipartInfoTable().get(multipartKey); @@ -259,6 +221,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, HddsProtos.ReplicationFactor factor = partKeyInfoMap.lastEntry().getValue().getPartKeyInfo().getFactor(); + OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey); if (omKeyInfo == null) { // This is a newly added key, it does not have any versions. OmKeyLocationInfoGroup keyLocationInfoGroup = new @@ -329,22 +292,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, } } catch (IOException ex) { - if (ex instanceof OMReplayException) { - if (((OMReplayException) ex).isDBOperationNeeded()) { - result = Result.DELETE_OPEN_KEY_ONLY; - omClientResponse = new S3MultipartUploadCompleteResponse( - omResponse.build(), multipartKey); - } else { - result = Result.REPLAY; - omClientResponse = new S3MultipartUploadCompleteResponse( - createReplayOMResponse(omResponse)); - } - } else { - result = Result.FAILURE; - exception = ex; - omClientResponse = new S3MultipartUploadCompleteResponse( - createErrorOMResponse(omResponse, exception)); - } + result = Result.FAILURE; + exception = ex; + omClientResponse = new S3MultipartUploadCompleteResponse( + createErrorOMResponse(omResponse, exception)); } finally { addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper); @@ -354,30 +305,19 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, } } - if (result != Result.REPLAY && result != Result.DELETE_OPEN_KEY_ONLY) { - Map auditMap = buildKeyArgsAuditMap(keyArgs); - auditMap.put(OzoneConsts.MULTIPART_LIST, partsList.toString()); + Map auditMap = buildKeyArgsAuditMap(keyArgs); + auditMap.put(OzoneConsts.MULTIPART_LIST, partsList.toString()); - // audit log - auditLog(ozoneManager.getAuditLogger(), buildAuditMessage( - OMAction.COMPLETE_MULTIPART_UPLOAD, auditMap, exception, - getOmRequest().getUserInfo())); - } + // audit log + auditLog(ozoneManager.getAuditLogger(), buildAuditMessage( + OMAction.COMPLETE_MULTIPART_UPLOAD, auditMap, exception, + getOmRequest().getUserInfo())); switch (result) { case SUCCESS: LOG.debug("MultipartUpload Complete request is successfull for Key: {} " + "in Volume/Bucket {}/{}", keyName, volumeName, bucketName); break; - case REPLAY: - LOG.debug("Replayed Transaction {} ignored. Request: {}", - trxnLogIndex, multipartUploadCompleteRequest); - break; - case DELETE_OPEN_KEY_ONLY: - LOG.debug("Replayed Transaction {}. Deleting old key {} from OpenKey " + - "table and MultipartInfo table. Request: {}", trxnLogIndex, - multipartKey, multipartUploadCompleteRequest); - break; case FAILURE: ozoneManager.getMetrics().incNumCompleteMultipartUploadFails(); LOG.error("MultipartUpload Complete request failed for Key: {} " + diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java index 765a20cfca30..7e2ccd99eec7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java @@ -147,11 +147,12 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, String dbVolumeKey = omMetadataManager.getVolumeKey(volume); - OmVolumeArgs dbVolumeArgs = - omMetadataManager.getVolumeTable().get(dbVolumeKey); - UserVolumeInfo volumeList = null; - if (dbVolumeArgs == null) { + if (omMetadataManager.getVolumeTable().isExist(dbVolumeKey)) { + LOG.debug("volume:{} already exists", omVolumeArgs.getVolume()); + throw new OMException("Volume already exists", + OMException.ResultCodes.VOLUME_ALREADY_EXISTS); + } else { String dbUserKey = omMetadataManager.getUserKey(owner); volumeList = omMetadataManager.getUserTable().get(dbUserKey); volumeList = addVolumeToOwnerList(volumeList, volume, owner, @@ -164,20 +165,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, omClientResponse = new OMVolumeCreateResponse(omResponse.build(), omVolumeArgs, volumeList); LOG.debug("volume:{} successfully created", omVolumeArgs.getVolume()); - } else { - // Check if this transaction is a replay of ratis logs. - if (isReplay(ozoneManager, dbVolumeArgs, transactionLogIndex)) { - // Replay implies the response has already been returned to - // the client. So take no further action and return a dummy - // OMClientResponse. - LOG.debug("Replayed Transaction {} ignored. Request: {}", - transactionLogIndex, createVolumeRequest); - return new OMVolumeCreateResponse(createReplayOMResponse(omResponse)); - } else { - LOG.debug("volume:{} already exists", omVolumeArgs.getVolume()); - throw new OMException("Volume already exists", - OMException.ResultCodes.VOLUME_ALREADY_EXISTS); - } } } catch (IOException ex) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java index 4d2f0557ea6e..ce93e269e250 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java @@ -99,16 +99,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OmVolumeArgs omVolumeArgs = getVolumeInfo(omMetadataManager, volume); - // Check if this transaction is a replay of ratis logs. - // If this is a replay, then the response has already been returned to - // the client. So take no further action and return a dummy - // OMClientResponse. - if (isReplay(ozoneManager, omVolumeArgs, transactionLogIndex)) { - LOG.debug("Replayed Transaction {} ignored. Request: {}", - transactionLogIndex, deleteVolumeRequest); - return new OMVolumeDeleteResponse(createReplayOMResponse(omResponse)); - } - owner = omVolumeArgs.getOwnerName(); acquiredUserLock = omMetadataManager.getLock().acquireWriteLock(USER_LOCK, owner); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java index 1eea419ef607..6873086750a4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java @@ -18,40 +18,34 @@ package org.apache.hadoop.ozone.om.request.volume; -import java.io.IOException; -import java.util.Map; - -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; -import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; -import org.apache.hadoop.util.Time; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import com.google.common.base.Optional; import com.google.common.base.Preconditions; - +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.audit.OMAction; -import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.om.response.volume.OMVolumeSetOwnerResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .SetVolumePropertyRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .SetVolumePropertyResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetVolumePropertyRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetVolumePropertyResponse; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.util.Time; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Map; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK; @@ -124,30 +118,13 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, } long maxUserVolumeCount = ozoneManager.getMaxUserVolumeCount(); - String dbVolumeKey = omMetadataManager.getVolumeKey(volume); OzoneManagerProtocolProtos.UserVolumeInfo oldOwnerVolumeList = null; OzoneManagerProtocolProtos.UserVolumeInfo newOwnerVolumeList = null; OmVolumeArgs omVolumeArgs = null; acquiredVolumeLock = omMetadataManager.getLock().acquireWriteLock( VOLUME_LOCK, volume); - omVolumeArgs = omMetadataManager.getVolumeTable().get(dbVolumeKey); - if (omVolumeArgs == null) { - LOG.debug("Changing volume ownership failed for user:{} volume:{}", - newOwner, volume); - throw new OMException("Volume " + volume + " is not found", - OMException.ResultCodes.VOLUME_NOT_FOUND); - } - - // Check if this transaction is a replay of ratis logs. - // If this is a replay, then the response has already been returned to - // the client. So take no further action and return a dummy - // OMClientResponse. - if (isReplay(ozoneManager, omVolumeArgs, transactionLogIndex)) { - LOG.debug("Replayed Transaction {} ignored. Request: {}", - transactionLogIndex, setVolumePropertyRequest); - return new OMVolumeSetOwnerResponse(createReplayOMResponse(omResponse)); - } + omVolumeArgs = getVolumeInfo(omMetadataManager, volume); oldOwner = omVolumeArgs.getOwnerName(); // Return OK immediately if newOwner is the same as oldOwner. @@ -194,7 +171,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, new CacheValue<>(Optional.of(oldOwnerVolumeList), transactionLogIndex)); omMetadataManager.getVolumeTable().addCacheEntry( - new CacheKey<>(dbVolumeKey), + new CacheKey<>(omMetadataManager.getVolumeKey(volume)), new CacheValue<>(Optional.of(omVolumeArgs), transactionLogIndex)); omResponse.setSetVolumePropertyResponse( diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java index 7e0cb72e237d..746a1a60f0a4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java @@ -35,7 +35,6 @@ import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.om.response.volume.OMVolumeSetQuotaResponse; @@ -127,23 +126,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, acquireVolumeLock = omMetadataManager.getLock().acquireWriteLock( VOLUME_LOCK, volume); - String dbVolumeKey = omMetadataManager.getVolumeKey(volume); - omVolumeArgs = omMetadataManager.getVolumeTable().get(dbVolumeKey); - if (omVolumeArgs == null) { - LOG.debug("volume:{} does not exist", volume); - throw new OMException(OMException.ResultCodes.VOLUME_NOT_FOUND); - } - - // Check if this transaction is a replay of ratis logs. - // If this is a replay, then the response has already been returned to - // the client. So take no further action and return a dummy - // OMClientResponse. - if (isReplay(ozoneManager, omVolumeArgs, transactionLogIndex)) { - LOG.debug("Replayed Transaction {} ignored. Request: {}", - transactionLogIndex, setVolumePropertyRequest); - return new OMVolumeSetQuotaResponse(createReplayOMResponse(omResponse)); - } + omVolumeArgs = getVolumeInfo(omMetadataManager, volume); omVolumeArgs.setQuotaInBytes(setVolumePropertyRequest.getQuotaInBytes()); omVolumeArgs.setUpdateID(transactionLogIndex, @@ -153,7 +137,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, // update cache. omMetadataManager.getVolumeTable().addCacheEntry( - new CacheKey<>(dbVolumeKey), + new CacheKey<>(omMetadataManager.getVolumeKey(volume)), new CacheValue<>(Optional.of(omVolumeArgs), transactionLogIndex)); omResponse.setSetVolumePropertyResponse( diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java index f2610e57e7c9..de7f0c0a36d0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java @@ -25,12 +25,10 @@ import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.exceptions.OMReplayException; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; -import org.apache.hadoop.ozone.om.request.OMClientRequest; +import org.apache.hadoop.ozone.om.request.volume.OMVolumeRequest; import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.response.volume.OMVolumeAclOpResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; @@ -46,7 +44,7 @@ /** * Base class for OMVolumeAcl Request. */ -public abstract class OMVolumeAclRequest extends OMClientRequest { +public abstract class OMVolumeAclRequest extends OMVolumeRequest { private CheckedBiFunction, OmVolumeArgs, IOException> omVolumeAclOp; @@ -84,19 +82,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, } lockAcquired = omMetadataManager.getLock().acquireWriteLock( VOLUME_LOCK, volume); - String dbVolumeKey = omMetadataManager.getVolumeKey(volume); - omVolumeArgs = omMetadataManager.getVolumeTable().get(dbVolumeKey); - if (omVolumeArgs == null) { - throw new OMException(OMException.ResultCodes.VOLUME_NOT_FOUND); - } - - // Check if this transaction is a replay of ratis logs. - // If this is a replay, then the response has already been returned to - // the client. So take no further action and return a dummy - // OMClientResponse. - if (isReplay(ozoneManager, omVolumeArgs, trxnLogIndex)) { - throw new OMReplayException(); - } + omVolumeArgs = getVolumeInfo(omMetadataManager, volume); // result is false upon add existing acl or remove non-existing acl boolean applyAcl = true; @@ -106,27 +92,23 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, applyAcl = false; } - // We set the updateID even if applyAcl = false to catch the replay - // transactions. - omVolumeArgs.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); + // Update only when + if (applyAcl) { + omVolumeArgs.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); - // update cache. - omMetadataManager.getVolumeTable().addCacheEntry( - new CacheKey<>(dbVolumeKey), - new CacheValue<>(Optional.of(omVolumeArgs), trxnLogIndex)); + // update cache. + omMetadataManager.getVolumeTable().addCacheEntry( + new CacheKey<>(omMetadataManager.getVolumeKey(volume)), + new CacheValue<>(Optional.of(omVolumeArgs), trxnLogIndex)); + } omClientResponse = onSuccess(omResponse, omVolumeArgs, applyAcl); result = Result.SUCCESS; } catch (IOException ex) { - if (ex instanceof OMReplayException) { - result = Result.REPLAY; - omClientResponse = onReplay(omResponse); - } else { - result = Result.FAILURE; - exception = ex; - omMetrics.incNumVolumeUpdateFails(); - omClientResponse = onFailure(omResponse, ex); - } + result = Result.FAILURE; + exception = ex; + omMetrics.incNumVolumeUpdateFails(); + omClientResponse = onFailure(omResponse, ex); } finally { addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper); @@ -183,10 +165,6 @@ abstract OMClientResponse onSuccess( abstract OMClientResponse onFailure(OMResponse.Builder omResponse, IOException ex); - OMClientResponse onReplay(OMResponse.Builder omResonse) { - return new OMVolumeAclOpResponse(createReplayOMResponse(omResonse)); - } - /** * Completion hook for final processing before return without lock. * Usually used for logging without lock. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAddAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAddAclRequest.java index e0f9b3d5e79a..12008e245477 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAddAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAddAclRequest.java @@ -104,12 +104,6 @@ void onComplete(Result result, IOException ex, long trxnLogIndex) { getVolumeName()); } break; - case REPLAY: - if (LOG.isDebugEnabled()) { - LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex, - getOmRequest()); - } - break; case FAILURE: LOG.error("Add acl {} to volume {} failed!", getAcl(), getVolumeName(), ex); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeRemoveAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeRemoveAclRequest.java index 6e9073192306..461ad481e6a9 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeRemoveAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeRemoveAclRequest.java @@ -103,12 +103,6 @@ void onComplete(Result result, IOException ex, long trxnLogIndex) { getVolumeName()); } break; - case REPLAY: - if (LOG.isDebugEnabled()) { - LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex, - getOmRequest()); - } - break; case FAILURE: LOG.error("Remove acl {} from volume {} failed!", getAcl(), getVolumeName(), ex); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java index 8d5bc61ceede..c73e19e75241 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java @@ -100,12 +100,6 @@ void onComplete(Result result, IOException ex, long trxnLogIndex) { getVolumeName()); } break; - case REPLAY: - if (LOG.isDebugEnabled()) { - LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex, - getOmRequest()); - } - break; case FAILURE: LOG.error("Set acls {} to volume {} failed!", getAcls(), getVolumeName(), ex); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMClientResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMClientResponse.java index aa3b3465d570..4af78fe3306c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMClientResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMClientResponse.java @@ -42,7 +42,7 @@ public OMClientResponse(OMResponse omResponse) { } /** - * For error or replay cases, check that the status of omResponse is not OK. + * For error case, check that the status of omResponse is not OK. */ public void checkStatusNotOK() { Preconditions.checkArgument(!omResponse.getStatus().equals( diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketCreateResponse.java index 6948b678385f..cb1f32247b68 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketCreateResponse.java @@ -48,7 +48,7 @@ public OMBucketCreateResponse(@Nonnull OMResponse omResponse, } /** - * For when the request is not successful or it is a replay transaction. + * For when the request is not successful. * For a successful request, the other constructor should be used. */ public OMBucketCreateResponse(@Nonnull OMResponse omResponse) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketDeleteResponse.java index 44167579b80f..c3c7fefc5bba 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketDeleteResponse.java @@ -48,7 +48,7 @@ public OMBucketDeleteResponse(@Nonnull OMResponse omResponse, } /** - * For when the request is not successful or it is a replay transaction. + * For when the request is not successful. * For a successful request, the other constructor should be used. */ public OMBucketDeleteResponse(@Nonnull OMResponse omResponse) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketSetPropertyResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketSetPropertyResponse.java index c827e68eb8bf..b9d3cf04f992 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketSetPropertyResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketSetPropertyResponse.java @@ -45,7 +45,7 @@ public OMBucketSetPropertyResponse(@Nonnull OMResponse omResponse, } /** - * For when the request is not successful or it is a replay transaction. + * For when the request is not successful. * For a successful request, the other constructor should be used. */ public OMBucketSetPropertyResponse(@Nonnull OMResponse omResponse) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/acl/OMBucketAclResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/acl/OMBucketAclResponse.java index 442dcd1abaeb..0a725231925a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/acl/OMBucketAclResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/acl/OMBucketAclResponse.java @@ -46,7 +46,7 @@ public OMBucketAclResponse(@Nonnull OMResponse omResponse, } /** - * For when the request is not successful or it is a replay transaction. + * For when the request is not successful. * For a successful request, the other constructor should be used. */ public OMBucketAclResponse(@Nonnull OMResponse omResponse) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMDirectoryCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMDirectoryCreateResponse.java index 499b6f1a2201..2608a1b372e2 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMDirectoryCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMDirectoryCreateResponse.java @@ -21,6 +21,8 @@ import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.response.CleanupTableInfo; +import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequest.Result; + import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .OMResponse; @@ -29,7 +31,6 @@ import org.slf4j.LoggerFactory; import javax.annotation.Nonnull; -import javax.annotation.Nullable; import java.io.IOException; import java.util.List; @@ -46,51 +47,50 @@ public class OMDirectoryCreateResponse extends OMClientResponse { private OmKeyInfo dirKeyInfo; private List parentKeyInfos; + private Result result; public OMDirectoryCreateResponse(@Nonnull OMResponse omResponse, - @Nullable OmKeyInfo dirKeyInfo, - @Nullable List parentKeyInfos) { - + @Nonnull OmKeyInfo dirKeyInfo, + @Nonnull List parentKeyInfos, @Nonnull Result result) { super(omResponse); this.dirKeyInfo = dirKeyInfo; this.parentKeyInfos = parentKeyInfos; + this.result = result; } /** - * For when the request is not successful or it is a replay transaction or - * the directory already exists. + * For when the request is not successful or the directory already exists. */ - public OMDirectoryCreateResponse(@Nonnull OMResponse omResponse) { + public OMDirectoryCreateResponse(@Nonnull OMResponse omResponse, + @Nonnull Result result) { super(omResponse); + this.result = result; } @Override protected void addToDBBatch(OMMetadataManager omMetadataManager, BatchOperation batchOperation) throws IOException { - if (dirKeyInfo != null) { - if (parentKeyInfos != null) { - for (OmKeyInfo parentKeyInfo : parentKeyInfos) { - String parentKey = omMetadataManager - .getOzoneDirKey(parentKeyInfo.getVolumeName(), - parentKeyInfo.getBucketName(), parentKeyInfo.getKeyName()); - LOG.debug("putWithBatch parent : key {} info : {}", parentKey, - parentKeyInfo); - omMetadataManager.getKeyTable() - .putWithBatch(batchOperation, parentKey, parentKeyInfo); - } + if (Result.SUCCESS == result) { + // Add all parent keys to batch. + for (OmKeyInfo parentKeyInfo : parentKeyInfos) { + String parentKey = omMetadataManager + .getOzoneDirKey(parentKeyInfo.getVolumeName(), + parentKeyInfo.getBucketName(), parentKeyInfo.getKeyName()); + LOG.debug("putWithBatch parent : key {} info : {}", parentKey, + parentKeyInfo); + omMetadataManager.getKeyTable() + .putWithBatch(batchOperation, parentKey, parentKeyInfo); } String dirKey = omMetadataManager.getOzoneKey(dirKeyInfo.getVolumeName(), dirKeyInfo.getBucketName(), dirKeyInfo.getKeyName()); omMetadataManager.getKeyTable().putWithBatch(batchOperation, dirKey, dirKeyInfo); - - } else { + } else if (Result.DIRECTORY_ALREADY_EXISTS == result) { // When directory already exists, we don't add it to cache. And it is // not an error, in this case dirKeyInfo will be null. - LOG.debug("Response Status is OK, dirKeyInfo is null in " + - "OMDirectoryCreateResponse"); + LOG.debug("Directory already exists. addToDBBatch is a no-op"); } } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponse.java index de069cc0990f..e54379b1616b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponse.java @@ -34,12 +34,12 @@ public class OMFileCreateResponse extends OMKeyCreateResponse { public OMFileCreateResponse(@Nonnull OMResponse omResponse, @Nonnull OmKeyInfo omKeyInfo, - List parentKeyInfos, long openKeySessionID) { + @Nonnull List parentKeyInfos, long openKeySessionID) { super(omResponse, omKeyInfo, parentKeyInfos, openKeySessionID); } /** - * For when the request is not successful or it is a replay transaction. + * For when the request is not successful. * For a successful request, the other constructor should be used. */ public OMFileCreateResponse(@Nonnull OMResponse omResponse) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java index 7d1bd44915ae..5ea44a75bc6d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java @@ -48,7 +48,7 @@ public OMAllocateBlockResponse(@Nonnull OMResponse omResponse, } /** - * For when the request is not successful or it is a replay transaction. + * For when the request is not successful. * For a successful request, the other constructor should be used. */ public OMAllocateBlockResponse(@Nonnull OMResponse omResponse) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java index 9f97bbb1a8f5..c0216eb3a054 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java @@ -50,21 +50,7 @@ public OMKeyCommitResponse(@Nonnull OMResponse omResponse, } /** - * When the KeyCommit request is a replay but the openKey should be deleted - * from the OpenKey table. - * Note that this response will result in openKey deletion only. Key will - * not be added to Key table. - * @param openKeyName openKey to be deleted from OpenKey table - */ - public OMKeyCommitResponse(@Nonnull OMResponse omResponse, - String openKeyName) { - super(omResponse); - this.omKeyInfo = null; - this.openKeyName = openKeyName; - } - - /** - * For when the request is not successful or it is a replay transaction. + * For when the request is not successful. * For a successful request, the other constructor should be used. */ public OMKeyCommitResponse(@Nonnull OMResponse omResponse) { @@ -80,12 +66,8 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, omMetadataManager.getOpenKeyTable().deleteWithBatch(batchOperation, openKeyName); - // Add entry to Key table if omKeyInfo is available i.e. it is not a - // replayed transaction. - if (omKeyInfo != null) { - omMetadataManager.getKeyTable().putWithBatch(batchOperation, ozoneKeyName, - omKeyInfo); - } + omMetadataManager.getKeyTable().putWithBatch(batchOperation, ozoneKeyName, + omKeyInfo); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java index db9815ac1ea7..4d0899df250c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java @@ -56,7 +56,7 @@ public OMKeyCreateResponse(@Nonnull OMResponse omResponse, } /** - * For when the request is not successful or it is a replay transaction. + * For when the request is not successful. * For a successful request, the other constructor should be used. */ public OMKeyCreateResponse(@Nonnull OMResponse omResponse) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java index e0228f6c202f..41853da907e7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java @@ -25,7 +25,6 @@ import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.response.CleanupTableInfo; import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .OMResponse; import org.apache.hadoop.hdds.utils.db.BatchOperation; @@ -54,7 +53,7 @@ public OMKeyDeleteResponse(@Nonnull OMResponse omResponse, } /** - * For when the request is not successful or it is a replay transaction. + * For when the request is not successful. * For a successful request, the other constructor should be used. */ public OMKeyDeleteResponse(@Nonnull OMResponse omResponse) { @@ -68,31 +67,28 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, // For OmResponse with failure, this should do nothing. This method is // not called in failure scenario in OM code. - if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) { - String ozoneKey = omMetadataManager.getOzoneKey(omKeyInfo.getVolumeName(), - omKeyInfo.getBucketName(), omKeyInfo.getKeyName()); - omMetadataManager.getKeyTable().deleteWithBatch(batchOperation, - ozoneKey); + String ozoneKey = omMetadataManager.getOzoneKey(omKeyInfo.getVolumeName(), + omKeyInfo.getBucketName(), omKeyInfo.getKeyName()); + omMetadataManager.getKeyTable().deleteWithBatch(batchOperation, ozoneKey); - // If Key is not empty add this to delete table. - if (!isKeyEmpty(omKeyInfo)) { - // If a deleted key is put in the table where a key with the same - // name already exists, then the old deleted key information would be - // lost. To avoid this, first check if a key with same name exists. - // deletedTable in OM Metadata stores . - // The RepeatedOmKeyInfo is the structure that allows us to store a - // list of OmKeyInfo that can be tied to same key name. For a keyName - // if RepeatedOMKeyInfo structure is null, we create a new instance, - // if it is not null, then we simply add to the list and store this - // instance in deletedTable. - RepeatedOmKeyInfo repeatedOmKeyInfo = - omMetadataManager.getDeletedTable().get(ozoneKey); - repeatedOmKeyInfo = OmUtils.prepareKeyForDelete( - omKeyInfo, repeatedOmKeyInfo, omKeyInfo.getUpdateID(), - isRatisEnabled); - omMetadataManager.getDeletedTable().putWithBatch(batchOperation, + // If Key is not empty add this to delete table. + if (!isKeyEmpty(omKeyInfo)) { + // If a deleted key is put in the table where a key with the same + // name already exists, then the old deleted key information would be + // lost. To avoid this, first check if a key with same name exists. + // deletedTable in OM Metadata stores . + // The RepeatedOmKeyInfo is the structure that allows us to store a + // list of OmKeyInfo that can be tied to same key name. For a keyName + // if RepeatedOMKeyInfo structure is null, we create a new instance, + // if it is not null, then we simply add to the list and store this + // instance in deletedTable. + RepeatedOmKeyInfo repeatedOmKeyInfo = + omMetadataManager.getDeletedTable().get(ozoneKey); + repeatedOmKeyInfo = OmUtils.prepareKeyForDelete( + omKeyInfo, repeatedOmKeyInfo, omKeyInfo.getUpdateID(), + isRatisEnabled); + omMetadataManager.getDeletedTable().putWithBatch(batchOperation, ozoneKey, repeatedOmKeyInfo); - } } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java index e26433f7a21a..01b7457085cc 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java @@ -39,20 +39,11 @@ public class OMKeyPurgeResponse extends OMClientResponse { private List purgeKeyList; public OMKeyPurgeResponse(@Nonnull OMResponse omResponse, - List keyList) { + @Nonnull List keyList) { super(omResponse); this.purgeKeyList = keyList; } - /** - * For when the request is not successful or it is a replay transaction. - * For a successful request, the other constructor should be used. - */ - public OMKeyPurgeResponse(@Nonnull OMResponse omResponse) { - super(omResponse); - checkStatusNotOK(); - } - @Override public void addToDBBatch(OMMetadataManager omMetadataManager, BatchOperation batchOperation) throws IOException { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java index 3e640722ddc2..7470b3788443 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.om.response.key; -import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.response.CleanupTableInfo; @@ -40,38 +39,18 @@ public class OMKeyRenameResponse extends OMClientResponse { private String fromKeyName; private String toKeyName; - private OmKeyInfo newKeyInfo; + private OmKeyInfo renameKeyInfo; public OMKeyRenameResponse(@Nonnull OMResponse omResponse, String fromKeyName, String toKeyName, @Nonnull OmKeyInfo renameKeyInfo) { super(omResponse); this.fromKeyName = fromKeyName; this.toKeyName = toKeyName; - this.newKeyInfo = renameKeyInfo; + this.renameKeyInfo = renameKeyInfo; } /** - * When Rename request is replayed and toKey already exists, but fromKey - * has not been deleted. - * For example, lets say we have the following sequence of transactions - * Trxn 1 : Create Key1 - * Trnx 2 : Rename Key1 to Key2 -> Deletes Key1 and Creates Key2 - * Now if these transactions are replayed: - * Replay Trxn 1 : Creates Key1 again as Key1 does not exist in DB - * Replay Trxn 2 : Key2 is not created as it exists in DB and the request - * would be deemed a replay. But Key1 is still in the DB and needs to be - * deleted. - */ - public OMKeyRenameResponse(@Nonnull OMResponse omResponse, - String fromKeyName, OmKeyInfo fromKeyInfo) { - super(omResponse); - this.fromKeyName = fromKeyName; - this.newKeyInfo = fromKeyInfo; - this.toKeyName = null; - } - - /** - * For when the request is not successful or it is a replay transaction. + * For when the request is not successful. * For a successful request, the other constructor should be used. */ public OMKeyRenameResponse(@Nonnull OMResponse omResponse) { @@ -82,31 +61,13 @@ public OMKeyRenameResponse(@Nonnull OMResponse omResponse) { @Override public void addToDBBatch(OMMetadataManager omMetadataManager, BatchOperation batchOperation) throws IOException { - String volumeName = newKeyInfo.getVolumeName(); - String bucketName = newKeyInfo.getBucketName(); - // If toKeyName is null, then we need to only delete the fromKeyName from - // KeyTable. This is the case of replay where toKey exists but fromKey - // has not been deleted. - if (deleteFromKeyOnly()) { - omMetadataManager.getKeyTable().deleteWithBatch(batchOperation, - omMetadataManager.getOzoneKey(volumeName, bucketName, fromKeyName)); - } else if (createToKeyAndDeleteFromKey()) { - // If both from and toKeyName are equal do nothing - omMetadataManager.getKeyTable().deleteWithBatch(batchOperation, - omMetadataManager.getOzoneKey(volumeName, bucketName, fromKeyName)); - omMetadataManager.getKeyTable().putWithBatch(batchOperation, - omMetadataManager.getOzoneKey(volumeName, bucketName, toKeyName), - newKeyInfo); - } + String volumeName = renameKeyInfo.getVolumeName(); + String bucketName = renameKeyInfo.getBucketName(); + omMetadataManager.getKeyTable().deleteWithBatch(batchOperation, + omMetadataManager.getOzoneKey(volumeName, bucketName, fromKeyName)); + omMetadataManager.getKeyTable().putWithBatch(batchOperation, + omMetadataManager.getOzoneKey(volumeName, bucketName, toKeyName), + renameKeyInfo); } - @VisibleForTesting - public boolean deleteFromKeyOnly() { - return toKeyName == null && fromKeyName != null; - } - - @VisibleForTesting - public boolean createToKeyAndDeleteFromKey() { - return toKeyName != null && !toKeyName.equals(fromKeyName); - } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/OMKeyAclResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/OMKeyAclResponse.java index e3177f8b26de..2bbeae0addde 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/OMKeyAclResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/OMKeyAclResponse.java @@ -47,7 +47,7 @@ public OMKeyAclResponse(@Nonnull OMResponse omResponse, } /** - * For when the request is not successful or it is a replay transaction. + * For when the request is not successful. * For a successful request, the other constructor should be used. */ public OMKeyAclResponse(@Nonnull OMResponse omResponse) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/OMPrefixAclResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/OMPrefixAclResponse.java index 225bad33e396..288a38fea0f3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/OMPrefixAclResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/OMPrefixAclResponse.java @@ -45,7 +45,7 @@ public OMPrefixAclResponse(@Nonnull OMResponse omResponse, } /** - * For when the request is not successful or it is a replay transaction. + * For when the request is not successful. * For a successful request, the other constructor should be used. */ public OMPrefixAclResponse(@Nonnull OMResponse omResponse) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3InitiateMultipartUploadResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3InitiateMultipartUploadResponse.java index 0cc8dff52434..ec1b3ae0ff61 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3InitiateMultipartUploadResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3InitiateMultipartUploadResponse.java @@ -52,7 +52,7 @@ public S3InitiateMultipartUploadResponse( } /** - * For when the request is not successful or it is a replay transaction. + * For when the request is not successful. * For a successful request, the other constructor should be used. */ public S3InitiateMultipartUploadResponse(@Nonnull OMResponse omResponse) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java index b47b22b92d9c..47cde0870103 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java @@ -61,7 +61,7 @@ public S3MultipartUploadAbortResponse(@Nonnull OMResponse omResponse, } /** - * For when the request is not successful or it is a replay transaction. + * For when the request is not successful. * For a successful request, the other constructor should be used. */ public S3MultipartUploadAbortResponse(@Nonnull OMResponse omResponse) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java index 298b73310e3f..28acdb5655d4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java @@ -41,6 +41,7 @@ .Status.OK; import javax.annotation.Nonnull; +import javax.annotation.Nullable; /** * Response for S3MultipartUploadCommitPart request. @@ -69,45 +70,19 @@ public class S3MultipartUploadCommitPartResponse extends OMClientResponse { */ public S3MultipartUploadCommitPartResponse(@Nonnull OMResponse omResponse, String multipartKey, String openKey, - @Nonnull OmMultipartKeyInfo omMultipartKeyInfo, - @Nonnull OzoneManagerProtocolProtos.PartKeyInfo oldPartKeyInfo, + @Nullable OmMultipartKeyInfo omMultipartKeyInfo, + @Nullable OzoneManagerProtocolProtos.PartKeyInfo oldPartKeyInfo, + @Nullable OmKeyInfo openPartKeyInfoToBeDeleted, boolean isRatisEnabled) { super(omResponse); this.multipartKey = multipartKey; this.openKey = openKey; this.omMultipartKeyInfo = omMultipartKeyInfo; this.oldPartKeyInfo = oldPartKeyInfo; - this.isRatisEnabled = isRatisEnabled; - } - - /** - * For the case when Multipart Upload does not exist (could have been - * aborted). - * 1. Put the partKeyInfo from openKeyTable into DeletedTable - * 2. Deleted openKey from OpenKeyTable - * @param omResponse - * @param openKey - * @param openPartKeyInfoToBeDeleted - */ - public S3MultipartUploadCommitPartResponse(@Nonnull OMResponse omResponse, - String openKey, @Nonnull OmKeyInfo openPartKeyInfoToBeDeleted, - boolean isRatisEnabled) { - super(omResponse); - checkStatusNotOK(); - this.openKey = openKey; this.openPartKeyInfoToBeDeleted = openPartKeyInfoToBeDeleted; this.isRatisEnabled = isRatisEnabled; } - /** - * For when the request is not successful or it is a replay transaction. - * For a successful request, the other constructor should be used. - */ - public S3MultipartUploadCommitPartResponse(@Nonnull OMResponse omResponse) { - super(omResponse); - checkStatusNotOK(); - } - @Override public void checkAndUpdateDB(OMMetadataManager omMetadataManager, BatchOperation batchOperation) throws IOException { @@ -115,12 +90,13 @@ public void checkAndUpdateDB(OMMetadataManager omMetadataManager, if (getOMResponse().getStatus() == NO_SUCH_MULTIPART_UPLOAD_ERROR) { // Means by the time we try to commit part, some one has aborted this // multipart upload. So, delete this part information. + RepeatedOmKeyInfo repeatedOmKeyInfo = omMetadataManager.getDeletedTable().get(openKey); - repeatedOmKeyInfo = OmUtils.prepareKeyForDelete( - openPartKeyInfoToBeDeleted, repeatedOmKeyInfo, - openPartKeyInfoToBeDeleted.getUpdateID(), isRatisEnabled); + repeatedOmKeyInfo = + OmUtils.prepareKeyForDelete(openPartKeyInfoToBeDeleted, + repeatedOmKeyInfo, omMultipartKeyInfo.getUpdateID(), isRatisEnabled); omMetadataManager.getDeletedTable().putWithBatch(batchOperation, openKey, repeatedOmKeyInfo); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java index 093d1808db82..20e398eb4f09 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java @@ -59,20 +59,7 @@ public S3MultipartUploadCompleteResponse( } /** - * When the S3MultipartUploadCompleteRequest is a replay but the - * openKey should be deleted from the OpenKey table. - * Note that this response will result in openKey deletion and - * multipartInfo deletion only. Key will not be added to Key table. - */ - public S3MultipartUploadCompleteResponse( - @Nonnull OMResponse omResponse, - @Nonnull String multipartKey) { - super(omResponse); - this.multipartKey = multipartKey; - } - - /** - * For when the request is not successful or it is a replay transaction. + * For when the request is not successful. * For a successful request, the other constructor should be used. */ public S3MultipartUploadCompleteResponse(@Nonnull OMResponse omResponse) { @@ -89,26 +76,23 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, omMetadataManager.getMultipartInfoTable().deleteWithBatch(batchOperation, multipartKey); - if (omKeyInfo != null) { - String ozoneKey = omMetadataManager.getOzoneKey(omKeyInfo.getVolumeName(), - omKeyInfo.getBucketName(), omKeyInfo.getKeyName()); - omMetadataManager.getKeyTable().putWithBatch(batchOperation, - ozoneKey, omKeyInfo); + String ozoneKey = omMetadataManager.getOzoneKey(omKeyInfo.getVolumeName(), + omKeyInfo.getBucketName(), omKeyInfo.getKeyName()); + omMetadataManager.getKeyTable().putWithBatch(batchOperation, ozoneKey, + omKeyInfo); - if (!partsUnusedList.isEmpty()) { - // Add unused parts to deleted key table. - RepeatedOmKeyInfo repeatedOmKeyInfo = - omMetadataManager.getDeletedTable() - .get(ozoneKey); - if (repeatedOmKeyInfo == null) { - repeatedOmKeyInfo = new RepeatedOmKeyInfo(partsUnusedList); - } else { - repeatedOmKeyInfo.addOmKeyInfo(omKeyInfo); - } - - omMetadataManager.getDeletedTable().putWithBatch(batchOperation, - ozoneKey, repeatedOmKeyInfo); + if (!partsUnusedList.isEmpty()) { + // Add unused parts to deleted key table. + RepeatedOmKeyInfo repeatedOmKeyInfo = omMetadataManager.getDeletedTable() + .get(ozoneKey); + if (repeatedOmKeyInfo == null) { + repeatedOmKeyInfo = new RepeatedOmKeyInfo(partsUnusedList); + } else { + repeatedOmKeyInfo.addOmKeyInfo(omKeyInfo); } + + omMetadataManager.getDeletedTable().putWithBatch(batchOperation, + ozoneKey, repeatedOmKeyInfo); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeAclOpResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeAclOpResponse.java index 647123dff9b4..f9f0688c3a05 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeAclOpResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeAclOpResponse.java @@ -46,7 +46,7 @@ public OMVolumeAclOpResponse(@Nonnull OMResponse omResponse, } /** - * For when the request is not successful or it is a replay transaction. + * For when the request is not successful. * For a successful request, the other constructor should be used. */ public OMVolumeAclOpResponse(@Nonnull OMResponse omResponse) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeCreateResponse.java index cd70dc2bcf1d..1b8e26e246b3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeCreateResponse.java @@ -53,7 +53,7 @@ public OMVolumeCreateResponse(@Nonnull OMResponse omResponse, } /** - * For when the request is not successful or it is a replay transaction. + * For when the request is not successful. * For a successful request, the other constructor should be used. */ public OMVolumeCreateResponse(@Nonnull OMResponse omResponse) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeDeleteResponse.java index 80d9e8c3693e..db43fa641ed8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeDeleteResponse.java @@ -52,7 +52,7 @@ public OMVolumeDeleteResponse(@Nonnull OMResponse omResponse, } /** - * For when the request is not successful or it is a replay transaction. + * For when the request is not successful. * For a successful request, the other constructor should be used. */ public OMVolumeDeleteResponse(@Nonnull OMResponse omResponse) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetOwnerResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetOwnerResponse.java index 3ed8bb0220fc..a1efe703f957 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetOwnerResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetOwnerResponse.java @@ -58,8 +58,8 @@ public OMVolumeSetOwnerResponse(@Nonnull OMResponse omResponse, } /** - * For when the request is not successful or it is a replay transaction. - * Or when newOwner is the same as oldOwner. + * For when the request is not successful or when newOwner is the same as + * oldOwner. * For other successful requests, the other constructor should be used. */ public OMVolumeSetOwnerResponse(@Nonnull OMResponse omResponse) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetQuotaResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetQuotaResponse.java index b50a923620e3..c6210254b9ec 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetQuotaResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetQuotaResponse.java @@ -46,7 +46,7 @@ public OMVolumeSetQuotaResponse(@Nonnull OMResponse omResponse, } /** - * For when the request is not successful or it is a replay transaction. + * For when the request is not successful. * For a successful request, the other constructor should be used. */ public OMVolumeSetQuotaResponse(@Nonnull OMResponse omResponse) { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java index 7bef6b8957ab..06e140b2f55a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java @@ -219,32 +219,4 @@ public static void addCreateVolumeToTable(String volumeName, .setOwnerName(UUID.randomUUID().toString()).build(); TestOMRequestUtils.addVolumeToOM(omMetadataManager, omVolumeArgs); } - - @Test - public void testReplayRequest() throws Exception { - - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - OMRequest originalRequest = TestOMRequestUtils.createBucketRequest( - bucketName, volumeName, false, StorageTypeProto.SSD); - OMBucketCreateRequest omBucketCreateRequest = new OMBucketCreateRequest( - originalRequest); - - // Manually add volume to DB table - addCreateVolumeToTable(volumeName, omMetadataManager); - - // Execute the original request - omBucketCreateRequest.preExecute(ozoneManager); - omBucketCreateRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - // Replay the transaction - Execute the same request again - OMClientResponse omClientResponse = - omBucketCreateRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - // Replay should result in Replay response - Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY, - omClientResponse.getOMResponse().getStatus()); - } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java index f99e1b6114ea..1037baa8eaf9 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java @@ -105,46 +105,4 @@ private OMRequest createDeleteBucketRequest(String volumeName, .setCmdType(OzoneManagerProtocolProtos.Type.DeleteBucket) .setClientId(UUID.randomUUID().toString()).build(); } - - @Test - public void testReplayRequest() throws Exception { - - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - - // CreateBucket request - OMBucketCreateRequest omBucketCreateRequest = new OMBucketCreateRequest( - TestOMRequestUtils.createBucketRequest(bucketName, volumeName, - false, OzoneManagerProtocolProtos.StorageTypeProto.SSD)); - - // Create volume entry in DB - TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager); - - // Execute CreateBucket request - omBucketCreateRequest.preExecute(ozoneManager); - omBucketCreateRequest.validateAndUpdateCache(ozoneManager, 2, - ozoneManagerDoubleBufferHelper); - - // Execute the original DeleteBucket request - OMRequest omRequest = createDeleteBucketRequest(volumeName, bucketName); - OMBucketDeleteRequest omBucketDeleteRequest = new OMBucketDeleteRequest( - omRequest); - omBucketDeleteRequest.preExecute(ozoneManager); - omBucketDeleteRequest.validateAndUpdateCache(ozoneManager, 4, - ozoneManagerDoubleBufferHelper); - - // Create the bucket again - omBucketCreateRequest.preExecute(ozoneManager); - omBucketCreateRequest.validateAndUpdateCache(ozoneManager, 10, - ozoneManagerDoubleBufferHelper); - - // Replay the delete transaction - Execute the same request again - OMClientResponse omClientResponse = - omBucketDeleteRequest.validateAndUpdateCache(ozoneManager, 4, - ozoneManagerDoubleBufferHelper); - - // Replay should result in Replay response - Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY, - omClientResponse.getOMResponse().getStatus()); - } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java index 0670c3ed8340..cb0468ec757f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java @@ -119,33 +119,4 @@ private OMRequest createSetBucketPropertyRequest(String volumeName, .setCmdType(OzoneManagerProtocolProtos.Type.SetBucketProperty) .setClientId(UUID.randomUUID().toString()).build(); } - - @Test - public void testReplayRequest() throws Exception { - - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - - // Create request to enable versioning - OMRequest omRequest = createSetBucketPropertyRequest(volumeName, - bucketName, true); - OMBucketSetPropertyRequest omBucketSetPropertyRequest = - new OMBucketSetPropertyRequest(omRequest); - - // Execute the original request - omBucketSetPropertyRequest.preExecute(ozoneManager); - omBucketSetPropertyRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - // Replay the transaction - Execute the same request again - OMClientResponse omClientResponse = omBucketSetPropertyRequest - .validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - // Replay should result in Replay response - Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY, - omClientResponse.getOMResponse().getStatus()); - } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java index 7b6191c16192..c7aa6be9aa37 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java @@ -38,8 +38,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .OMRequest; -import static org.apache.hadoop.ozone.om.request.TestOMRequestUtils.addKeyToTable; -import static org.apache.hadoop.ozone.om.request.TestOMRequestUtils.addVolumeAndBucketToDB; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.FILE_ALREADY_EXISTS; @@ -401,31 +399,4 @@ private OMRequest createFileRequest( .setCreateFileRequest(createFileRequest).build(); } - - @Test - public void testReplayRequest() throws Exception { - - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - - OMRequest originalRequest = createFileRequest(volumeName, bucketName, - keyName, replicationFactor, replicationType, false, false); - OMFileCreateRequest omFileCreateRequest = new OMFileCreateRequest( - originalRequest); - - // Manually add volume, bucket and key to DB table - addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); - addKeyToTable(false, false, volumeName, bucketName, keyName, clientID, - replicationType, replicationFactor, 1L, omMetadataManager); - - // Replay the transaction - Execute the createFile request again - OMClientResponse omClientResponse = - omFileCreateRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - // Replay should result in Replay response - Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY, - omClientResponse.getOMResponse().getStatus()); - } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java index 5228c5a9516c..5f704d357b0b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java @@ -36,7 +36,7 @@ public class TestOMKeyAclRequest extends TestOMKeyRequest { @Test - public void testReplayRequest() throws Exception { + public void testAclRequest() throws Exception { // Manually add volume, bucket and key to DB TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); @@ -59,13 +59,6 @@ public void testReplayRequest() throws Exception { Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, omClientResponse.getOMResponse().getStatus()); - // Replay the original request - OMClientResponse replayResponse = omKeyAddAclRequest - .validateAndUpdateCache(ozoneManager, 2, - ozoneManagerDoubleBufferHelper); - - Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY, - replayResponse.getOMResponse().getStatus()); } /** diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java index f18ca8281677..b327b76e5136 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java @@ -207,85 +207,6 @@ public void testValidateAndUpdateCacheWithKeyNotFound() throws Exception { Assert.assertNull(omKeyInfo); } - @Test - public void testReplayRequest() throws Exception { - - // Manually add Volume, Bucket to DB - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - // Manually add Key to OpenKey table in DB - TestOMRequestUtils.addKeyToTable(true, false, volumeName, bucketName, - keyName, clientID, replicationType, replicationFactor, 1L, - omMetadataManager); - - OMRequest modifiedOmRequest = doPreExecute(createCommitKeyRequest()); - - OMKeyCommitRequest omKeyCommitRequest = new OMKeyCommitRequest( - modifiedOmRequest); - - String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName, - keyName); - - // Key should not be there in key table, as validateAndUpdateCache is - // still not called. - OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey); - Assert.assertNull(omKeyInfo); - - // Execute original KeyCommit request - omKeyCommitRequest.validateAndUpdateCache(ozoneManager, 10L, - ozoneManagerDoubleBufferHelper); - - // Replay the transaction - Execute the createKey request again - OMClientResponse replayResponse = omKeyCommitRequest.validateAndUpdateCache( - ozoneManager, 10L, ozoneManagerDoubleBufferHelper); - - // Replay should result in Replay response - Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY, - replayResponse.getOMResponse().getStatus()); - } - - @Test - public void testReplayRequestDeletesOpenKeyEntry() throws Exception { - - // Manually add Volume, Bucket to DB - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - // Manually add Key to OpenKey table in DB - TestOMRequestUtils.addKeyToTable(true, false, volumeName, bucketName, - keyName, clientID, replicationType, replicationFactor, 1L, - omMetadataManager); - - OMRequest modifiedOmRequest = doPreExecute(createCommitKeyRequest()); - OMKeyCommitRequest omKeyCommitRequest = new OMKeyCommitRequest( - modifiedOmRequest); - - // Execute original KeyCommit request - omKeyCommitRequest.validateAndUpdateCache(ozoneManager, 10L, - ozoneManagerDoubleBufferHelper); - - // Replay the Key Create request - add Key to OpenKey table manually again - TestOMRequestUtils.addKeyToTable(true, true, volumeName, bucketName, - keyName, clientID, replicationType, replicationFactor, 1L, - omMetadataManager); - - // Key should be present in OpenKey table - String openKey = omMetadataManager.getOpenKey(volumeName, bucketName, - keyName, clientID); - OmKeyInfo openKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey); - Assert.assertNotNull(openKeyInfo); - - // Replay the transaction - Execute the createKey request again - OMClientResponse replayResponse = omKeyCommitRequest.validateAndUpdateCache( - ozoneManager, 10L, ozoneManagerDoubleBufferHelper); - - // Replay should result in DELETE_OPEN_KEY_ONLY response and delete the - // key from OpenKey table - Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, - replayResponse.getOMResponse().getStatus()); - openKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey); - Assert.assertNull(openKeyInfo); - } - /** * This method calls preExecute and verify the modified request. * @param originalOMRequest diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java index 7e9e09386fd5..b26505b32d37 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java @@ -37,7 +37,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .OMRequest; -import static org.apache.hadoop.ozone.om.request.TestOMRequestUtils.addKeyToTable; import static org.apache.hadoop.ozone.om.request.TestOMRequestUtils.addVolumeAndBucketToDB; /** @@ -331,45 +330,4 @@ private OMRequest createKeyRequest(boolean isMultipartKey, int partNumber) { } - @Test - public void testReplayRequest() throws Exception { - - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - - KeyArgs keyArgs = KeyArgs.newBuilder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .setFactor(replicationFactor) - .setType(replicationType) - .build(); - - CreateKeyRequest.Builder req = CreateKeyRequest.newBuilder() - .setKeyArgs(keyArgs); - OMRequest originalRequest = OMRequest.newBuilder() - .setCreateKeyRequest(req) - .setCmdType(OzoneManagerProtocolProtos.Type.CreateKey) - .setClientId(UUID.randomUUID().toString()) - .build(); - - OMKeyCreateRequest omKeyCreateRequest = new OMKeyCreateRequest( - originalRequest); - - // Manually add volume, bucket and key to DB table - addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); - addKeyToTable(false, false, volumeName, bucketName, keyName, clientID, - replicationType, replicationFactor, 1L, omMetadataManager); - - // Replay the transaction - Execute the createKey request again - OMClientResponse omClientResponse = - omKeyCreateRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - // Replay should result in Replay response - Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY, - omClientResponse.getOMResponse().getStatus()); - } - } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java index b60d68ea675f..b8e560308077 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java @@ -136,43 +136,6 @@ public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception { omClientResponse.getOMResponse().getStatus()); } - @Test - public void testReplayRequest() throws Exception { - OMRequest modifiedOmRequest = - doPreExecute(createDeleteKeyRequest()); - - OMKeyDeleteRequest omKeyDeleteRequest = - new OMKeyDeleteRequest(modifiedOmRequest); - - // Add volume, bucket and key entries to OM DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - - TestOMRequestUtils.addKeyToTableAndCache(volumeName, bucketName, keyName, - clientID, replicationType, replicationFactor, 1L, omMetadataManager); - - // Delete the key manually. Lets say the Delete Requests - // TransactionLogIndex is 10. - long deleteTrxnLogIndex = 10L; - String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName, - keyName); - TestOMRequestUtils.deleteKey(ozoneKey, omMetadataManager, 10L); - - // Create the same key again with TransactionLogIndex > Delete requests - // TransactionLogIndex - TestOMRequestUtils.addKeyToTableAndCache(volumeName, bucketName, keyName, - clientID, replicationType, replicationFactor, 20L, omMetadataManager); - - // Replay the original DeleteRequest. - OMClientResponse omClientResponse = omKeyDeleteRequest - .validateAndUpdateCache(ozoneManager, deleteTrxnLogIndex, - ozoneManagerDoubleBufferHelper); - - // Replay should result in Replay response - Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY, - omClientResponse.getOMResponse().getStatus()); - } - /** * This method calls preExecute and verify the modified request. * @param originalOmRequest diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java index 10b45ad54a02..31e6975775e4 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java @@ -27,7 +27,6 @@ import org.junit.Test; import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; -import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.om.response.key.OMKeyPurgeResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeletedKeys; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; @@ -154,120 +153,4 @@ public void testValidateAndUpdateCache() throws Exception { deletedKey)); } } - - @Test - public void testPurgeKeysAcrossBuckets() throws Exception { - String bucket1 = bucketName; - String bucket2 = UUID.randomUUID().toString(); - - // bucket1 is created during setup. Create bucket2 manually. - TestOMRequestUtils.addBucketToDB(volumeName, bucket2, omMetadataManager); - - // Create and Delete keys in Bucket1 and Bucket2. - List deletedKeyInBucket1 = createAndDeleteKeys(1, bucket1); - List deletedKeyInBucket2 = createAndDeleteKeys(1, bucket2); - List deletedKeyNames = new ArrayList<>(); - deletedKeyNames.addAll(deletedKeyInBucket1); - deletedKeyNames.addAll(deletedKeyInBucket2); - - // The keys should be present in the DeletedKeys table before purging - for (String deletedKey : deletedKeyNames) { - Assert.assertTrue(omMetadataManager.getDeletedTable().isExist( - deletedKey)); - } - - // Create PurgeKeysRequest to purge the deleted keys - DeletedKeys deletedKeysInBucket1 = DeletedKeys.newBuilder() - .setVolumeName(volumeName) - .setBucketName(bucket1) - .addAllKeys(deletedKeyInBucket1) - .build(); - DeletedKeys deletedKeysInBucket2 = DeletedKeys.newBuilder() - .setVolumeName(volumeName) - .setBucketName(bucket2) - .addAllKeys(deletedKeyInBucket1) - .build(); - PurgeKeysRequest purgeKeysRequest = PurgeKeysRequest.newBuilder() - .addDeletedKeys(deletedKeysInBucket1) - .addDeletedKeys(deletedKeysInBucket2) - .build(); - - OMRequest omRequest = OMRequest.newBuilder() - .setPurgeKeysRequest(purgeKeysRequest) - .setCmdType(Type.PurgeKeys) - .setClientId(UUID.randomUUID().toString()) - .build(); - - OMRequest preExecutedRequest = preExecute(omRequest); - OMKeyPurgeRequest omKeyPurgeRequest = - new OMKeyPurgeRequest(preExecutedRequest); - - omKeyPurgeRequest.validateAndUpdateCache(ozoneManager, 100L, - ozoneManagerDoubleBufferHelper); - - OMResponse omResponse = OMResponse.newBuilder() - .setPurgeKeysResponse(PurgeKeysResponse.getDefaultInstance()) - .setCmdType(Type.PurgeKeys) - .setStatus(Status.OK) - .build(); - - BatchOperation batchOperation = - omMetadataManager.getStore().initBatchOperation(); - - OMKeyPurgeResponse omKeyPurgeResponse = new OMKeyPurgeResponse( - omResponse, deletedKeyNames); - omKeyPurgeResponse.addToDBBatch(omMetadataManager, batchOperation); - - // Do manual commit and see whether addToBatch is successful or not. - omMetadataManager.getStore().commitBatchOperation(batchOperation); - - // The keys should not exist in the DeletedKeys table - for (String deletedKey : deletedKeyNames) { - Assert.assertFalse(omMetadataManager.getDeletedTable().isExist( - deletedKey)); - } - } - - @Test - public void testReplayRequest() throws Exception { - - // Create and Delete keys. The keys should be moved to DeletedKeys table - Integer trxnLogIndex = new Integer(1); - List deletedKeyNames = createAndDeleteKeys(trxnLogIndex, null); - int purgeRequestTrxnLogIndex = ++trxnLogIndex; - - // The keys should be present in the DeletedKeys table before purging - for (String deletedKey : deletedKeyNames) { - Assert.assertTrue(omMetadataManager.getDeletedTable().isExist( - deletedKey)); - } - - // Execute PurgeKeys request to purge the keys from Deleted table. - // Create PurgeKeysRequest to replay the purge request - OMRequest omRequest = createPurgeKeysRequest(deletedKeyNames); - OMRequest preExecutedRequest = preExecute(omRequest); - OMKeyPurgeRequest omKeyPurgeRequest = - new OMKeyPurgeRequest(preExecutedRequest); - OMClientResponse omClientResponse = omKeyPurgeRequest - .validateAndUpdateCache(ozoneManager, purgeRequestTrxnLogIndex, - ozoneManagerDoubleBufferHelper); - - Assert.assertTrue(omClientResponse.getOMResponse().getStatus().equals( - Status.OK)); - - // Create and delete the same keys again - createAndDeleteKeys(++trxnLogIndex, null); - - // Replay the PurgeKeys request. It should not purge the keys deleted - // after the original request was played. - OMClientResponse replayResponse = omKeyPurgeRequest - .validateAndUpdateCache(ozoneManager, purgeRequestTrxnLogIndex, - ozoneManagerDoubleBufferHelper); - - // Verify that the new deletedKeys exist in the DeletedKeys table - for (String deletedKey : deletedKeyNames) { - Assert.assertTrue(omMetadataManager.getDeletedTable().isExist( - deletedKey)); - } - } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java index eb79d7a6346d..fc7f9b85787e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java @@ -22,11 +22,9 @@ import org.junit.Assert; import org.junit.Test; -import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.response.key.OMKeyRenameResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .OMRequest; @@ -199,106 +197,6 @@ public void testValidateAndUpdateCacheWithFromKeyInvalid() throws Exception { omKeyRenameResponse.getOMResponse().getStatus()); } - /** - * Test replay of RenameRequest when fromKey does not exist in DB. - */ - @Test - public void testReplayRequest() throws Exception { - String toKeyName = UUID.randomUUID().toString(); - OMRequest modifiedOmRequest = doPreExecute( - createRenameKeyRequest(toKeyName)); - - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - TestOMRequestUtils.addKeyToTableAndCache(volumeName, bucketName, - keyName, clientID, replicationType, replicationFactor, 1L, - omMetadataManager); - - // Execute RenameRequest - OMKeyRenameRequest omKeyRenameRequest = - new OMKeyRenameRequest(modifiedOmRequest); - OMClientResponse omKeyRenameResponse = - omKeyRenameRequest.validateAndUpdateCache(ozoneManager, 10L, - ozoneManagerDoubleBufferHelper); - - // Commit Batch operation to add the transaction to DB - BatchOperation batchOperation = omMetadataManager.getStore() - .initBatchOperation(); - omKeyRenameResponse.checkAndUpdateDB(omMetadataManager, batchOperation); - omMetadataManager.getStore().commitBatchOperation(batchOperation); - - // Replay the RenameRequest. - OMClientResponse replayResponse = omKeyRenameRequest.validateAndUpdateCache( - ozoneManager, 10L, ozoneManagerDoubleBufferHelper); - - // Replay should result in Replay response - Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY, - replayResponse.getOMResponse().getStatus()); - } - - /** - * Test replay of RenameRequest when fromKey exists in DB. - */ - @Test - public void testReplayRequestWhenFromKeyExists() throws Exception { - - String toKeyName = UUID.randomUUID().toString(); - OMRequest modifiedOmRequest = doPreExecute( - createRenameKeyRequest(toKeyName)); - - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - TestOMRequestUtils.addKeyToTableAndCache(volumeName, bucketName, keyName, - clientID, replicationType, replicationFactor, 1L, omMetadataManager); - - // Execute RenameRequest - OMKeyRenameRequest omKeyRenameRequest = - new OMKeyRenameRequest(modifiedOmRequest); - OMClientResponse omKeyRenameResponse = omKeyRenameRequest - .validateAndUpdateCache(ozoneManager, 10L, - ozoneManagerDoubleBufferHelper); - - // Commit Batch operation to add the transaction to DB - BatchOperation batchOperation = omMetadataManager.getStore() - .initBatchOperation(); - omKeyRenameResponse.checkAndUpdateDB(omMetadataManager, batchOperation); - omMetadataManager.getStore().commitBatchOperation(batchOperation); - - // Let's say the fromKey create transaction was also replayed. In this - // case, fromKey and toKey will both exist in the DB. Replaying the - // RenameRequest should then delete fromKey but not add toKey again. - - // Replay CreateKey request for fromKey - TestOMRequestUtils.addKeyToTableAndCache(volumeName, bucketName, keyName, - clientID, replicationType, replicationFactor, 1L, omMetadataManager); - - // Verify fromKey exists in DB - String fromKey = omMetadataManager.getOzoneKey(volumeName, bucketName, - keyName); - OmKeyInfo dbFromKeyInfo = omMetadataManager.getKeyTable().get(fromKey); - Assert.assertNotNull(dbFromKeyInfo); - - // Replay original RenameRequest - OMKeyRenameResponse replayResponse = - (OMKeyRenameResponse) omKeyRenameRequest.validateAndUpdateCache( - ozoneManager, 10L, ozoneManagerDoubleBufferHelper); - - // This replay response should delete fromKey from DB - // Replay should result in Replay response - Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, - replayResponse.getOMResponse().getStatus()); - Assert.assertTrue(replayResponse.deleteFromKeyOnly()); - - // Commit response to DB - batchOperation = omMetadataManager.getStore().initBatchOperation(); - replayResponse.addToDBBatch(omMetadataManager, batchOperation); - omMetadataManager.getStore().commitBatchOperation(batchOperation); - - // Verify fromKey is deleted from DB - dbFromKeyInfo = omMetadataManager.getKeyTable().get(fromKey); - Assert.assertNull(dbFromKeyInfo); - } - /** * This method calls preExecute and verify the modified request. * @param originalOmRequest diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMPrefixAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMPrefixAclRequest.java index c25ee7b5baa1..5690ff20cb65 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMPrefixAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMPrefixAclRequest.java @@ -40,7 +40,7 @@ public class TestOMPrefixAclRequest extends TestOMKeyRequest { @Test - public void testReplayRequest() throws Exception { + public void testAclRequest() throws Exception { PrefixManager prefixManager = new PrefixManagerImpl( ozoneManager.getMetadataManager(), true); when(ozoneManager.getPrefixManager()).thenReturn(prefixManager); @@ -66,16 +66,9 @@ public void testReplayRequest() throws Exception { ozoneManagerDoubleBufferHelper); Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, omClientResponse.getOMResponse().getStatus()); - - // Replay the original request - OMClientResponse replayResponse = omKeyPrefixAclRequest - .validateAndUpdateCache(ozoneManager, 2, - ozoneManagerDoubleBufferHelper); - - Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY, - replayResponse.getOMResponse().getStatus()); } + /** * Create OMRequest which encapsulates OMKeyAddAclRequest. */ diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java index 27973ed70d83..4ac1f494b009 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java @@ -244,30 +244,4 @@ private void verifyRequest(OMRequest modifiedRequest, Assert.assertNotEquals(original.getModificationTime(), updated.getModificationTime()); } - - @Test - public void testReplayRequest() throws Exception { - - String volumeName = UUID.randomUUID().toString(); - String adminName = "user1"; - String ownerName = "user1"; - OMRequest originalRequest = createVolumeRequest(volumeName, adminName, - ownerName); - OMVolumeCreateRequest omVolumeCreateRequest = - new OMVolumeCreateRequest(originalRequest); - - // Execute the original request - omVolumeCreateRequest.preExecute(ozoneManager); - omVolumeCreateRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - // Replay the transaction - Execute the same request again - OMClientResponse omClientResponse = - omVolumeCreateRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - // Replay should result in Replay response - Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY, - omClientResponse.getOMResponse().getStatus()); - } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeDeleteRequest.java index 709f82149227..49f28d3ef9b1 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeDeleteRequest.java @@ -157,42 +157,4 @@ private OMRequest deleteVolumeRequest(String volumeName) { .setCmdType(OzoneManagerProtocolProtos.Type.DeleteVolume) .setDeleteVolumeRequest(deleteVolumeRequest).build(); } - - @Test - public void testReplayRequest() throws Exception { - - // create volume request - String volumeName = UUID.randomUUID().toString(); - String user = "user1"; - OMVolumeCreateRequest omVolumeCreateRequest = new OMVolumeCreateRequest( - createVolumeRequest(volumeName, user, user)); - - // Execute createVolume request - omVolumeCreateRequest.preExecute(ozoneManager); - omVolumeCreateRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - OMRequest originalDeleteRequest = deleteVolumeRequest(volumeName); - OMVolumeDeleteRequest omVolumeDeleteRequest = - new OMVolumeDeleteRequest(originalDeleteRequest); - - // Execute the original request - omVolumeDeleteRequest.preExecute(ozoneManager); - omVolumeDeleteRequest.validateAndUpdateCache(ozoneManager, 2, - ozoneManagerDoubleBufferHelper); - - // Create the volume again - omVolumeCreateRequest.preExecute(ozoneManager); - omVolumeCreateRequest.validateAndUpdateCache(ozoneManager, 3, - ozoneManagerDoubleBufferHelper); - - // Replay the delete transaction - Execute the same request again - OMClientResponse omClientResponse = - omVolumeDeleteRequest.validateAndUpdateCache(ozoneManager, 2, - ozoneManagerDoubleBufferHelper); - - // Replay should result in Replay response - Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY, - omClientResponse.getOMResponse().getStatus()); - } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetOwnerRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetOwnerRequest.java index 0e1ac5475277..4ccf195de42e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetOwnerRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetOwnerRequest.java @@ -169,37 +169,6 @@ public void testInvalidRequest() throws Exception { omResponse.getStatus()); } - @Test - public void testReplayRequest() throws Exception { - // create volume - String volumeName = UUID.randomUUID().toString(); - String ownerName = "user1"; - TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); - TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); - - // create request to set new owner - String newOwnerName = "user2"; - OMRequest originalRequest = - TestOMRequestUtils.createSetVolumePropertyRequest(volumeName, - newOwnerName); - OMVolumeSetOwnerRequest omVolumeSetOwnerRequest = - new OMVolumeSetOwnerRequest(originalRequest); - - // Execute the original request - omVolumeSetOwnerRequest.preExecute(ozoneManager); - omVolumeSetOwnerRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - // Replay the transaction - Execute the same request again - OMClientResponse omClientResponse = - omVolumeSetOwnerRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - // Replay should result in Replay response - Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY, - omClientResponse.getOMResponse().getStatus()); - } - @Test public void testOwnSameVolumeTwice() throws Exception { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java index bd90222dc32e..4d78ef0a37c6 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java @@ -150,34 +150,4 @@ public void testInvalidRequest() throws Exception { Assert.assertEquals(OzoneManagerProtocolProtos.Status.INVALID_REQUEST, omResponse.getStatus()); } - - @Test - public void testReplayRequest() throws Exception { - // create volume - String volumeName = UUID.randomUUID().toString(); - String ownerName = "user1"; - TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); - TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); - - // create request with quota set. - long quota = 100L; - OMRequest originalRequest = - TestOMRequestUtils.createSetVolumePropertyRequest(volumeName, quota); - OMVolumeSetQuotaRequest omVolumeSetQuotaRequest = - new OMVolumeSetQuotaRequest(originalRequest); - - // Execute the original request - omVolumeSetQuotaRequest.preExecute(ozoneManager); - omVolumeSetQuotaRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - // Replay the transaction - Execute the same request again - OMClientResponse omClientResponse = - omVolumeSetQuotaRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - // Replay should result in Replay response - Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY, - omClientResponse.getOMResponse().getStatus()); - } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeAddAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeAddAclRequest.java index 8c79e029fe18..66a122f298d5 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeAddAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeAddAclRequest.java @@ -119,37 +119,4 @@ public void testValidateAndUpdateCacheWithVolumeNotFound() Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND, omResponse.getStatus()); } - - @Test - public void testReplayRequest() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String ownerName = "user1"; - - TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); - TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); - - OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rwdlncxy[ACCESS]"); - - OMRequest originalRequest = TestOMRequestUtils.createVolumeAddAclRequest( - volumeName, acl); - - OMVolumeAddAclRequest omVolumeAddAclRequest = new OMVolumeAddAclRequest( - originalRequest); - omVolumeAddAclRequest.preExecute(ozoneManager); - - OMClientResponse omClientResponse = omVolumeAddAclRequest - .validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, - omClientResponse.getOMResponse().getStatus()); - - // Replay the original request - OMClientResponse replayResponse = omVolumeAddAclRequest - .validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY, - replayResponse.getOMResponse().getStatus()); - } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeRemoveAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeRemoveAclRequest.java index b1bbf13e0664..b2eb0bf99b6d 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeRemoveAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeRemoveAclRequest.java @@ -129,48 +129,4 @@ public void testValidateAndUpdateCacheWithVolumeNotFound() Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND, omResponse.getStatus()); } - - @Test - public void testReplayRequest() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String ownerName = "user1"; - - TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); - TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); - - OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rwdlncxy[ACCESS]"); - - // add acl first - OMRequest addAclRequest = TestOMRequestUtils.createVolumeAddAclRequest( - volumeName, acl); - OMVolumeAddAclRequest omVolumeAddAclRequest = new OMVolumeAddAclRequest( - addAclRequest); - omVolumeAddAclRequest.preExecute(ozoneManager); - OMClientResponse addAclResponse = omVolumeAddAclRequest - .validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, - addAclResponse.getOMResponse().getStatus()); - - // remove acl - OMRequest removeAclRequest = TestOMRequestUtils - .createVolumeRemoveAclRequest(volumeName, acl); - OMVolumeRemoveAclRequest omVolumeRemoveAclRequest = - new OMVolumeRemoveAclRequest(removeAclRequest); - omVolumeRemoveAclRequest.preExecute(ozoneManager); - - OMClientResponse omClientResponse = omVolumeRemoveAclRequest - .validateAndUpdateCache(ozoneManager, 2, - ozoneManagerDoubleBufferHelper); - Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, - omClientResponse.getOMResponse().getStatus()); - - // Replay the original request - OMClientResponse replayResponse = omVolumeRemoveAclRequest - .validateAndUpdateCache(ozoneManager, 2, - ozoneManagerDoubleBufferHelper); - - Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY, - replayResponse.getOMResponse().getStatus()); - } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeSetAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeSetAclRequest.java index 6d0f2b13ecbe..087ba713f6cf 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeSetAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeSetAclRequest.java @@ -132,39 +132,4 @@ public void testValidateAndUpdateCacheWithVolumeNotFound() Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND, omResponse.getStatus()); } - - @Test - public void testReplayRequest() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String ownerName = "user1"; - - TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); - TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); - - OzoneAcl userAccessAcl = OzoneAcl.parseAcl("user:bilbo:rw[ACCESS]"); - OzoneAcl groupDefaultAcl = OzoneAcl.parseAcl( - "group:admin:rwdlncxy[DEFAULT]"); - - List acls = Lists.newArrayList(userAccessAcl, groupDefaultAcl); - - OMRequest originalRequest = TestOMRequestUtils.createVolumeSetAclRequest( - volumeName, acls); - - OMVolumeSetAclRequest omVolumeSetAclRequest = new OMVolumeSetAclRequest( - originalRequest); - omVolumeSetAclRequest.preExecute(ozoneManager); - - OMClientResponse omClientResponse = omVolumeSetAclRequest - .validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, - omClientResponse.getOMResponse().getStatus()); - - OMClientResponse replayResponse = omVolumeSetAclRequest - .validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY, - replayResponse.getOMResponse().getStatus()); - } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponse.java index cca0dad28616..fbd3af0b44a3 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponse.java @@ -26,6 +26,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequest.Result; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .OMResponse; @@ -36,6 +37,7 @@ import org.junit.Test; import org.junit.rules.TemporaryFolder; +import java.util.ArrayList; import java.util.UUID; /** @@ -75,7 +77,8 @@ public void testAddToDBBatch() throws Exception { .build(); OMDirectoryCreateResponse omDirectoryCreateResponse = - new OMDirectoryCreateResponse(omResponse, omKeyInfo, null); + new OMDirectoryCreateResponse(omResponse, omKeyInfo, + new ArrayList<>(), Result.SUCCESS); omDirectoryCreateResponse.addToDBBatch(omMetadataManager, batchOperation); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java index f8b0a17dc3a0..b2626da18905 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java @@ -152,7 +152,7 @@ public void testAddToDBBatchWithErrorResponse() throws Exception { Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneKey)); - omKeyDeleteResponse.addToDBBatch(omMetadataManager, batchOperation); + omKeyDeleteResponse.checkAndUpdateDB(omMetadataManager, batchOperation); // Do manual commit and see whether addToBatch is successful or not. omMetadataManager.getStore().commitBatchOperation(batchOperation); From 4ffbf2e111b7770d00e225c12f2bc8b6829b70d1 Mon Sep 17 00:00:00 2001 From: runzhiwang <51938049+runzhiwang@users.noreply.github.com> Date: Tue, 14 Jul 2020 11:59:03 +0800 Subject: [PATCH 011/165] HDDS-3861. Fix handlePipelineFailure throw exception if role is follower (#1122) --- .../common/transport/server/ratis/XceiverServerRatis.java | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java index 6c98e81f1c43..3e6ac10e7e1a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java @@ -552,6 +552,14 @@ private void handlePipelineFailure(RaftGroupId groupId, msg = datanode + " is in candidate state for " + roleInfoProto.getCandidateInfo().getLastLeaderElapsedTimeMs() + "ms"; break; + case FOLLOWER: + msg = datanode + " closes pipeline when installSnapshot from leader " + + "because leader snapshot doesn't contain any data to replay, " + + "all the log entries prior to the snapshot might have been purged." + + "So follower should not try to install snapshot from leader but" + + "can close the pipeline here. It's in follower state for " + + roleInfoProto.getRoleElapsedTimeMs() + "ms"; + break; case LEADER: StringBuilder sb = new StringBuilder(); sb.append(datanode).append(" has not seen follower/s"); From 0b77403caa61c5978edc9c3327b9490afc756beb Mon Sep 17 00:00:00 2001 From: Lokesh Jain Date: Tue, 14 Jul 2020 16:33:48 +0530 Subject: [PATCH 012/165] HDDS-3789. Fix TestOzoneRpcClientAbstract#testDeletedKeyForGDPR. (#1198) --- .../hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java | 1 - 1 file changed, 1 deletion(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java index 591f5c68b911..fb178826180f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java @@ -2673,7 +2673,6 @@ public void testKeyReadWriteForGDPR() throws Exception { * GDPR encryption details (flag, secret, algorithm). * @throws Exception */ - @Ignore @Test public void testDeletedKeyForGDPR() throws Exception { //Step 1 From c830efe5a86765c6394c2ac2ada92372ccc6205e Mon Sep 17 00:00:00 2001 From: Lokesh Jain Date: Tue, 14 Jul 2020 18:52:24 +0530 Subject: [PATCH 013/165] HDDS-3062. Fix TestOzoneRpcClientAbstract.testListVolume. (#1194) --- .../hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java | 3 --- 1 file changed, 3 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java index fb178826180f..f0ffee342543 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java @@ -1234,9 +1234,6 @@ public void testRenameKey() Assert.assertEquals(toKeyName, key.getName()); } - // Listing all volumes in the cluster feature has to be fixed after HDDS-357. - // TODO: fix this - @Ignore @Test public void testListVolume() throws IOException { String volBase = "vol-" + RandomStringUtils.randomNumeric(3); From 92adf6e8b8e0020e240e0177916307ef172e4592 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Tue, 14 Jul 2020 15:30:01 +0200 Subject: [PATCH 014/165] HDDS-3062. Fix TestOzoneRpcClientAbstract.testListVolume - addendum for checkstyle --- .../hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java | 1 - 1 file changed, 1 deletion(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java index f0ffee342543..3b90815ec917 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java @@ -118,7 +118,6 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; -import org.junit.Ignore; import org.junit.Test; /** From 629188a6e774a46df65a2bbeff4d8543e96a5768 Mon Sep 17 00:00:00 2001 From: runzhiwang <51938049+runzhiwang@users.noreply.github.com> Date: Wed, 15 Jul 2020 00:20:16 +0800 Subject: [PATCH 015/165] HDDS-3957. Fix mixed use of Longs.toByteArray and Ints.fromByteArray (#1199) --- ...TopNOrderedContainerDeletionChoosingPolicy.java | 2 +- .../container/keyvalue/KeyValueContainerData.java | 14 +++++++------- .../keyvalue/helpers/KeyValueContainerUtil.java | 3 +-- .../container/common/TestBlockDeletingService.java | 7 ++++--- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/TopNOrderedContainerDeletionChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/TopNOrderedContainerDeletionChoosingPolicy.java index 41fc26716c19..2cee75c00fe8 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/TopNOrderedContainerDeletionChoosingPolicy.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/TopNOrderedContainerDeletionChoosingPolicy.java @@ -45,7 +45,7 @@ public class TopNOrderedContainerDeletionChoosingPolicy private static final Comparator KEY_VALUE_CONTAINER_DATA_COMPARATOR = (KeyValueContainerData c1, KeyValueContainerData c2) -> - Integer.compare(c2.getNumPendingDeletionBlocks(), + Long.compare(c2.getNumPendingDeletionBlocks(), c1.getNumPendingDeletionBlocks()); @Override diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java index 5698d7267882..68f01fbc437b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java @@ -38,7 +38,7 @@ import java.io.File; import java.util.List; import java.util.Map; -import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; import static java.lang.Math.max; import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_TYPE_ROCKSDB; @@ -73,7 +73,7 @@ public class KeyValueContainerData extends ContainerData { /** * Number of pending deletion blocks in KeyValueContainer. */ - private final AtomicInteger numPendingDeletionBlocks; + private final AtomicLong numPendingDeletionBlocks; private long deleteTransactionId; @@ -98,7 +98,7 @@ public KeyValueContainerData(long id, ChunkLayOutVersion layOutVersion, long size, String originPipelineId, String originNodeId) { super(ContainerProtos.ContainerType.KeyValueContainer, id, layOutVersion, size, originPipelineId, originNodeId); - this.numPendingDeletionBlocks = new AtomicInteger(0); + this.numPendingDeletionBlocks = new AtomicLong(0); this.deleteTransactionId = 0; } @@ -106,7 +106,7 @@ public KeyValueContainerData(ContainerData source) { super(source); Preconditions.checkArgument(source.getContainerType() == ContainerProtos.ContainerType.KeyValueContainer); - this.numPendingDeletionBlocks = new AtomicInteger(0); + this.numPendingDeletionBlocks = new AtomicLong(0); this.deleteTransactionId = 0; } @@ -188,7 +188,7 @@ public void setContainerDBType(String containerDBType) { * * @param numBlocks increment number */ - public void incrPendingDeletionBlocks(int numBlocks) { + public void incrPendingDeletionBlocks(long numBlocks) { this.numPendingDeletionBlocks.addAndGet(numBlocks); } @@ -197,14 +197,14 @@ public void incrPendingDeletionBlocks(int numBlocks) { * * @param numBlocks decrement number */ - public void decrPendingDeletionBlocks(int numBlocks) { + public void decrPendingDeletionBlocks(long numBlocks) { this.numPendingDeletionBlocks.addAndGet(-1 * numBlocks); } /** * Get the number of pending deletion blocks. */ - public int getNumPendingDeletionBlocks() { + public long getNumPendingDeletionBlocks() { return this.numPendingDeletionBlocks.get(); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java index 2141bed143a1..d5fea23a882f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java @@ -23,7 +23,6 @@ import java.nio.file.Paths; import java.util.List; -import com.google.common.primitives.Ints; import com.google.common.primitives.Longs; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; @@ -171,7 +170,7 @@ public static void parseKVContainerData(KeyValueContainerData kvContainerData, containerDB.getStore().get(DB_PENDING_DELETE_BLOCK_COUNT_KEY); if (pendingDeleteBlockCount != null) { kvContainerData.incrPendingDeletionBlocks( - Ints.fromByteArray(pendingDeleteBlockCount)); + Longs.fromByteArray(pendingDeleteBlockCount)); } else { // Set pending deleted block count. MetadataKeyFilters.KeyPrefixFilter filter = diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java index b8843dea87de..ba6999d05016 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java @@ -27,7 +27,6 @@ import java.util.concurrent.TimeoutException; import com.google.common.collect.Lists; -import com.google.common.primitives.Ints; import com.google.common.primitives.Longs; import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.StringUtils; @@ -180,7 +179,7 @@ private void createToDeleteBlocks(ContainerSet containerSet, metadata.getStore().put(OzoneConsts.DB_CONTAINER_BYTES_USED_KEY, Longs.toByteArray(blockLength * numOfBlocksPerContainer)); metadata.getStore().put(DB_PENDING_DELETE_BLOCK_COUNT_KEY, - Ints.toByteArray(numOfBlocksPerContainer)); + Longs.toByteArray(numOfBlocksPerContainer)); } } } @@ -251,6 +250,8 @@ public void testBlockDeletion() throws Exception { // Ensure there are 3 blocks under deletion and 0 deleted blocks Assert.assertEquals(3, getUnderDeletionBlocksCount(meta)); + Assert.assertEquals(3, Longs.fromByteArray( + meta.getStore().get(DB_PENDING_DELETE_BLOCK_COUNT_KEY))); Assert.assertEquals(0, getDeletedBlocksCount(meta)); // An interval will delete 1 * 2 blocks @@ -269,7 +270,7 @@ public void testBlockDeletion() throws Exception { // Check finally DB counters. // Not checking bytes used, as handler is a mock call. - Assert.assertEquals(0, Ints.fromByteArray( + Assert.assertEquals(0, Longs.fromByteArray( meta.getStore().get(DB_PENDING_DELETE_BLOCK_COUNT_KEY))); Assert.assertEquals(0, Longs.fromByteArray( meta.getStore().get(DB_BLOCK_COUNT_KEY))); From 44bc5158745529467eb23d7ff98a11dfb1f1cf5e Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Tue, 14 Jul 2020 09:23:57 -0700 Subject: [PATCH 016/165] HDDS-3930. Fix OMKeyDeletesRequest. (#1195) --- .../org/apache/hadoop/ozone/OzoneConsts.java | 2 + .../hadoop/ozone/client/rpc/RpcClient.java | 14 +- .../apache/hadoop/ozone/audit/OMAction.java | 1 + .../ozone/om/exceptions/OMException.java | 2 + .../hadoop/ozone/om/helpers/OmDeleteKeys.java | 51 +++++ .../om/protocol/OzoneManagerProtocol.java | 5 +- ...ManagerProtocolClientSideTranslatorPB.java | 24 +- .../hadoop/fs/ozone/TestOzoneFileSystem.java | 16 +- .../ozone/om/TestOzoneManagerHAWithData.java | 7 +- .../src/main/proto/OmClientProtocol.proto | 20 +- .../src/main/proto/proto.lock | 60 +++-- .../org/apache/hadoop/ozone/om/OMMetrics.java | 4 + .../apache/hadoop/ozone/om/OzoneManager.java | 18 +- .../ozone/om/request/OMClientRequest.java | 50 +---- .../om/request/key/OMKeysDeleteRequest.java | 206 +++++++++++------- .../om/response/key/OMKeysDeleteResponse.java | 100 ++++----- .../request/key/TestOMKeysDeleteRequest.java | 155 +++++++++++++ .../key/TestOMKeysDeleteResponse.java | 125 +++++++++++ 18 files changed, 606 insertions(+), 254 deletions(-) create mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDeleteKeys.java create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequest.java create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index ea0466f6cea5..a473948dcc17 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -296,6 +296,8 @@ private OzoneConsts() { public static final String MULTIPART_UPLOAD_PART_NUMBER = "partNumber"; public static final String MULTIPART_UPLOAD_PART_NAME = "partName"; public static final String BUCKET_ENCRYPTION_KEY = "bucketEncryptionKey"; + public static final String DELETED_KEYS_LIST = "deletedKeysList"; + public static final String UNDELETED_KEYS_LIST = "unDeletedKeysList"; diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index a44aa53954fa..1d69b0ae33c5 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -76,6 +76,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmBucketArgs; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDeleteKeys; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo; @@ -730,16 +731,9 @@ public void deleteKeys( throws IOException { HddsClientUtils.verifyResourceName(volumeName, bucketName); Preconditions.checkNotNull(keyNameList); - List keyArgsList = new ArrayList<>(); - for (String keyName: keyNameList) { - OmKeyArgs keyArgs = new OmKeyArgs.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .build(); - keyArgsList.add(keyArgs); - } - ozoneManagerClient.deleteKeys(keyArgsList); + OmDeleteKeys omDeleteKeys = new OmDeleteKeys(volumeName, bucketName, + keyNameList); + ozoneManagerClient.deleteKeys(omDeleteKeys); } @Override diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java index cd8b12614d79..31cccacb0c7a 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java @@ -38,6 +38,7 @@ public enum OMAction implements AuditAction { UPDATE_BUCKET, UPDATE_KEY, PURGE_KEYS, + DELETE_KEYS, // S3 Bucket CREATE_S3_BUCKET, diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java index 1eed619dc321..e2b341884318 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java @@ -222,5 +222,7 @@ public enum ResultCodes { DIRECTORY_ALREADY_EXISTS, INVALID_VOLUME_NAME, + + PARTIAL_DELETE } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDeleteKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDeleteKeys.java new file mode 100644 index 000000000000..4274078f24d5 --- /dev/null +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDeleteKeys.java @@ -0,0 +1,51 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.helpers; + +import java.util.List; + +/** + * Represent class which has info of Keys to be deleted from Client. + */ +public class OmDeleteKeys { + + private String volume; + private String bucket; + + private List keyNames; + + + public OmDeleteKeys(String volume, String bucket, List keyNames) { + this.volume = volume; + this.bucket = bucket; + this.keyNames = keyNames; + } + + public String getVolume() { + return volume; + } + + public String getBucket() { + return bucket; + } + + public List< String > getKeyNames() { + return keyNames; + } +} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java index b342ef21b890..9ae107b071ed 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java @@ -29,6 +29,7 @@ import org.apache.hadoop.ozone.om.helpers.DBUpdates; import org.apache.hadoop.ozone.om.helpers.OmBucketArgs; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDeleteKeys; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; @@ -229,10 +230,10 @@ OmKeyLocationInfo allocateBlock(OmKeyArgs args, long clientID, * multiple keys and a single key. Used by deleting files * through OzoneFileSystem. * - * @param args the list args of the key. + * @param deleteKeys * @throws IOException */ - void deleteKeys(List args) throws IOException; + void deleteKeys(OmDeleteKeys deleteKeys) throws IOException; /** * Deletes an existing empty bucket from volume. diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java index 647d545f645e..6afb28887dd7 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java @@ -33,6 +33,7 @@ import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; import org.apache.hadoop.ozone.om.helpers.OmBucketArgs; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDeleteKeys; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; @@ -71,8 +72,9 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DBUpdatesRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DBUpdatesResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteBucketRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeysRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeyArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeyRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeysRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteVolumeRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetAclRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetAclResponse; @@ -141,6 +143,7 @@ import com.google.common.base.Preconditions; import com.google.common.base.Strings; import com.google.protobuf.ByteString; + import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TOKEN_ERROR_OTHER; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.ACCESS_DENIED; @@ -717,22 +720,17 @@ public void deleteKey(OmKeyArgs args) throws IOException { * Deletes existing key/keys. This interface supports delete * multiple keys and a single key. * - * @param args the list args of the key. + * @param deleteKeys * @throws IOException */ @Override - public void deleteKeys(List args) throws IOException { + public void deleteKeys(OmDeleteKeys deleteKeys) throws IOException { DeleteKeysRequest.Builder req = DeleteKeysRequest.newBuilder(); - List keyArgsList = new ArrayList(); - for (OmKeyArgs omKeyArgs : args) { - KeyArgs keyArgs = KeyArgs.newBuilder() - .setVolumeName(omKeyArgs.getVolumeName()) - .setBucketName(omKeyArgs.getBucketName()) - .setKeyName(omKeyArgs.getKeyName()).build(); - keyArgsList.add(keyArgs); - } - req.addAllKeyArgs(keyArgsList); - + DeleteKeyArgs deletedKeys = DeleteKeyArgs.newBuilder() + .setBucketName(deleteKeys.getBucket()) + .setVolumeName(deleteKeys.getVolume()) + .addAllKeys(deleteKeys.getKeyNames()).build(); + req.setDeleteKeys(deletedKeys); OMRequest omRequest = createOMRequest(Type.DeleteKeys) .setDeleteKeysRequest(req) .build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java index 75107d0c5f22..700506a5484c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java @@ -72,8 +72,8 @@ public class TestOzoneFileSystem { /** - * Set a timeout for each test. - */ + * Set a timeout for each test. + */ @Rule public Timeout timeout = new Timeout(300000); @@ -89,7 +89,7 @@ public class TestOzoneFileSystem { @Test(timeout = 300_000) public void testCreateFileShouldCheckExistenceOfDirWithSameName() - throws Exception { + throws Exception { /* * Op 1. create file -> /d1/d2/d3/d4/key2 * Op 2. create dir -> /d1/d2/d3/d4/key2 @@ -195,11 +195,11 @@ public void tearDown() { } private void setupOzoneFileSystem() - throws IOException, TimeoutException, InterruptedException { + throws IOException, TimeoutException, InterruptedException { OzoneConfiguration conf = new OzoneConfiguration(); cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(3) - .build(); + .setNumDatanodes(3) + .build(); cluster.waitForClusterToBeReady(); // create a volume and a bucket to be used by OzoneFileSystem OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster); @@ -207,8 +207,8 @@ private void setupOzoneFileSystem() bucketName = bucket.getName(); String rootPath = String.format("%s://%s.%s/", - OzoneConsts.OZONE_URI_SCHEME, bucket.getName(), - bucket.getVolumeName()); + OzoneConsts.OZONE_URI_SCHEME, bucket.getName(), + bucket.getVolumeName()); // Set the fs.defaultFS and start the filesystem conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithData.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithData.java index 646b91571260..aed84f5dd604 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithData.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithData.java @@ -50,7 +50,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.DIRECTORY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_ALREADY_EXISTS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.PARTIAL_DELETE; import static org.junit.Assert.fail; /** @@ -187,8 +187,9 @@ public void testKeysDelete() throws Exception { ozoneBucket.deleteKeys(keyList2); fail("testFilesDelete"); } catch (OMException ex) { - // The expected exception KEY_NOT_FOUND. - Assert.assertEquals(KEY_NOT_FOUND, ex.getResult()); + // The expected exception PARTIAL_DELETE, as if not able to delete, we + // return error codee PARTIAL_DElETE. + Assert.assertEquals(PARTIAL_DELETE, ex.getResult()); } } diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index f4cf79a6c567..f6eaf3859eef 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -302,6 +302,8 @@ enum Status { DIRECTORY_ALREADY_EXISTS = 60; INVALID_VOLUME_NAME = 61; + + PARTIAL_DELETE = 62; } /** @@ -845,7 +847,18 @@ message DeleteKeyRequest { } message DeleteKeysRequest { - repeated KeyArgs keyArgs = 1; + optional DeleteKeyArgs deleteKeys = 1; +} + +message DeleteKeyArgs { + required string volumeName = 1; + required string bucketName = 2; + repeated string keys = 3; +} + +message DeleteKeysResponse { + optional DeleteKeyArgs unDeletedKeys = 1; + optional bool status = 2; } message DeleteKeyResponse { @@ -863,10 +876,7 @@ message DeletedKeys { repeated string keys = 3; } -message DeleteKeysResponse { - repeated KeyInfo deletedKeys = 1; - repeated KeyInfo unDeletedKeys = 2; -} + message PurgeKeysRequest { repeated DeletedKeys deletedKeys = 1; diff --git a/hadoop-ozone/interface-client/src/main/proto/proto.lock b/hadoop-ozone/interface-client/src/main/proto/proto.lock index f591ad1b2da6..bef3a0df1e8d 100644 --- a/hadoop-ozone/interface-client/src/main/proto/proto.lock +++ b/hadoop-ozone/interface-client/src/main/proto/proto.lock @@ -415,6 +415,10 @@ { "name": "INVALID_VOLUME_NAME", "integer": 61 + }, + { + "name": "PARTIAL_DELETE", + "integer": 62 } ] }, @@ -2430,12 +2434,47 @@ "fields": [ { "id": 1, - "name": "keyArgs", - "type": "KeyArgs", + "name": "deleteKeys", + "type": "DeleteKeyArgs" + } + ] + }, + { + "name": "DeleteKeyArgs", + "fields": [ + { + "id": 1, + "name": "volumeName", + "type": "string" + }, + { + "id": 2, + "name": "bucketName", + "type": "string" + }, + { + "id": 3, + "name": "keys", + "type": "string", "is_repeated": true } ] }, + { + "name": "DeleteKeysResponse", + "fields": [ + { + "id": 1, + "name": "unDeletedKeys", + "type": "DeleteKeyArgs" + }, + { + "id": 2, + "name": "status", + "type": "bool" + } + ] + }, { "name": "DeleteKeyResponse", "fields": [ @@ -2477,23 +2516,6 @@ } ] }, - { - "name": "DeleteKeysResponse", - "fields": [ - { - "id": 1, - "name": "deletedKeys", - "type": "KeyInfo", - "is_repeated": true - }, - { - "id": 2, - "name": "unDeletedKeys", - "type": "KeyInfo", - "is_repeated": true - } - ] - }, { "name": "PurgeKeysRequest", "fields": [ diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java index f2292779f5ee..cd6566292412 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java @@ -214,6 +214,10 @@ public void setNumKeys(long val) { this.numKeys.incr(val- oldVal); } + public void decNumKeys(long val) { + this.numKeys.incr(-val); + } + public long getNumVolumes() { return numVolumes.value(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index 96d81f5943e7..85895630eab0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -42,8 +42,8 @@ import java.util.Objects; import java.util.Timer; import java.util.TimerTask; -import java.util.concurrent.TimeUnit; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.crypto.key.KeyProvider; @@ -107,6 +107,7 @@ import org.apache.hadoop.ozone.om.helpers.DBUpdates; import org.apache.hadoop.ozone.om.helpers.OmBucketArgs; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDeleteKeys; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; @@ -161,7 +162,6 @@ import org.apache.hadoop.util.KMSUtil; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.ShutdownHookManager; - import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectReader; import com.fasterxml.jackson.databind.ObjectWriter; @@ -170,6 +170,8 @@ import com.google.protobuf.BlockingService; import com.google.protobuf.ProtocolMessageEnum; import org.apache.commons.lang3.StringUtils; + + import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED_DEFAULT; import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForBlockClients; @@ -207,6 +209,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TOKEN_ERROR_OTHER; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneManagerService.newReflectiveBlockingService; + import org.apache.ratis.proto.RaftProtos.RaftPeerRole; import org.apache.ratis.server.protocol.TermIndex; import org.apache.ratis.util.FileUtils; @@ -2220,16 +2223,13 @@ public void deleteKey(OmKeyArgs args) throws IOException { /** * Deletes an existing key. * - * @param args - List attributes of the key. + * @param deleteKeys - List of keys to be deleted from volume and a bucket. * @throws IOException */ @Override - public void deleteKeys(List args) throws IOException { - if (args != null) { - for (OmKeyArgs keyArgs : args) { - deleteKey(keyArgs); - } - } + public void deleteKeys(OmDeleteKeys deleteKeys) throws IOException { + throw new UnsupportedOperationException("OzoneManager does not require " + + "this to be implemented. As write requests use a new approach"); } @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java index 3ce059fcb6ef..4ced9fdfdba3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java @@ -18,15 +18,8 @@ package org.apache.hadoop.ozone.om.request; -import java.io.IOException; -import java.net.InetAddress; -import java.util.LinkedHashMap; -import java.util.Map; -import java.util.Set; - import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; - import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ozone.OzoneConsts; @@ -36,22 +29,21 @@ import org.apache.hadoop.ozone.audit.AuditMessage; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .DeleteKeysResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.security.UserGroupInformation; import javax.annotation.Nonnull; +import java.io.IOException; +import java.net.InetAddress; +import java.util.LinkedHashMap; +import java.util.Map; /** @@ -220,36 +212,6 @@ protected OMResponse createErrorOMResponse( return omResponse.build(); } - /** - * Set parameters needed for return error response to client. - * - * @param omResponse - * @param ex - IOException - * @param unDeletedKeys - Set - * @return error response need to be returned to client - OMResponse. - */ - protected OMResponse createOperationKeysErrorOMResponse( - @Nonnull OMResponse.Builder omResponse, - @Nonnull IOException ex, @Nonnull Set unDeletedKeys) { - omResponse.setSuccess(false); - StringBuffer errorMsg = new StringBuffer(); - DeleteKeysResponse.Builder resp = DeleteKeysResponse.newBuilder(); - for (OmKeyInfo key : unDeletedKeys) { - if(key != null) { - resp.addUnDeletedKeys(key.getProtobuf()); - } - } - if (errorMsg != null) { - omResponse.setMessage(errorMsg.toString()); - } - // TODO: Currently all delete operations in OzoneBucket.java are void. Here - // we put the List of unDeletedKeys into Response. These KeyInfo can be - // used to continue deletion if client support delete retry. - omResponse.setDeleteKeysResponse(resp.build()); - omResponse.setStatus(OzoneManagerRatisUtils.exceptionToResponseStatus(ex)); - return omResponse.build(); - } - /** * Add the client response to double buffer and set the flush future. * @param trxIndex diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java index 9a7d9935ab3c..adc42d8dc201 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java @@ -18,44 +18,42 @@ package org.apache.hadoop.ozone.om.request.key; -import com.google.common.base.Preconditions; +import com.google.common.base.Optional; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.audit.AuditLogger; -import org.apache.hadoop.ozone.audit.OMAction; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.response.key.OMKeyDeleteResponse; import org.apache.hadoop.ozone.om.response.key.OMKeysDeleteResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .DeleteKeysRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .DeleteKeysResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .KeyArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeysRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeysResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.util.Time; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.ArrayList; -import java.util.HashSet; +import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Set; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; +import static org.apache.hadoop.ozone.OzoneConsts.BUCKET; +import static org.apache.hadoop.ozone.OzoneConsts.DELETED_KEYS_LIST; +import static org.apache.hadoop.ozone.OzoneConsts.UNDELETED_KEYS_LIST; +import static org.apache.hadoop.ozone.OzoneConsts.VOLUME; +import static org.apache.hadoop.ozone.audit.OMAction.DELETE_KEYS; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK; +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.PARTIAL_DELETE; /** * Handles DeleteKey request. @@ -69,24 +67,6 @@ public OMKeysDeleteRequest(OMRequest omRequest) { super(omRequest); } - @Override - public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { - DeleteKeysRequest deleteKeyRequest = - getOmRequest().getDeleteKeysRequest(); - Preconditions.checkNotNull(deleteKeyRequest); - List newKeyArgsList = new ArrayList<>(); - for (KeyArgs keyArgs : deleteKeyRequest.getKeyArgsList()) { - newKeyArgsList.add( - keyArgs.toBuilder().setModificationTime(Time.now()).build()); - } - DeleteKeysRequest newDeleteKeyRequest = DeleteKeysRequest - .newBuilder().addAllKeyArgs(newKeyArgsList).build(); - - return getOmRequest().toBuilder() - .setDeleteKeysRequest(newDeleteKeyRequest) - .setUserInfo(getUserInfo()).build(); - } - @Override @SuppressWarnings("methodlength") public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, @@ -94,8 +74,11 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, DeleteKeysRequest deleteKeyRequest = getOmRequest().getDeleteKeysRequest(); - List deleteKeyArgsList = deleteKeyRequest.getKeyArgsList(); - Set unDeletedKeys = new HashSet<>(); + OzoneManagerProtocolProtos.DeleteKeyArgs deleteKeyArgs = + deleteKeyRequest.getDeleteKeys(); + + List deleteKeys = new ArrayList<>(deleteKeyArgs.getKeysList()); + IOException exception = null; OMClientResponse omClientResponse = null; Result result = null; @@ -103,8 +86,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OMMetrics omMetrics = ozoneManager.getMetrics(); omMetrics.incNumKeyDeletes(); Map auditMap = null; - String volumeName = ""; - String bucketName = ""; + String volumeName = deleteKeyArgs.getVolumeName(); + String bucketName = deleteKeyArgs.getBucketName(); String keyName = ""; List omKeyInfoList = new ArrayList<>(); @@ -115,79 +98,144 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( getOmRequest()); OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); - try { - for (KeyArgs deleteKeyArgs : deleteKeyArgsList) { - volumeName = deleteKeyArgs.getVolumeName(); - bucketName = deleteKeyArgs.getBucketName(); - keyName = deleteKeyArgs.getKeyName(); - String objectKey = omMetadataManager.getOzoneKey(volumeName, bucketName, - keyName); - OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(objectKey); - omKeyInfoList.add(omKeyInfo); - unDeletedKeys.add(omKeyInfo); - } - // Check if any of the key in the batch cannot be deleted. If exists the - // batch will delete failed. - for (KeyArgs deleteKeyArgs : deleteKeyArgsList) { - volumeName = deleteKeyArgs.getVolumeName(); - bucketName = deleteKeyArgs.getBucketName(); - keyName = deleteKeyArgs.getKeyName(); - auditMap = buildKeyArgsAuditMap(deleteKeyArgs); - // check Acl - checkKeyAcls(ozoneManager, volumeName, bucketName, keyName, - IAccessAuthorizer.ACLType.DELETE, OzoneObj.ResourceType.KEY); - String objectKey = omMetadataManager.getOzoneKey( - volumeName, bucketName, keyName); + boolean acquiredLock = + omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, + bucketName); + + int indexFailed = 0; + int length = deleteKeys.size(); + OzoneManagerProtocolProtos.DeleteKeyArgs.Builder unDeletedKeys = + OzoneManagerProtocolProtos.DeleteKeyArgs.newBuilder() + .setVolumeName(volumeName).setBucketName(bucketName); + + boolean deleteStatus = true; + try { - // Validate bucket and volume exists or not. - validateBucketAndVolume(omMetadataManager, volumeName, bucketName); + // Validate bucket and volume exists or not. + validateBucketAndVolume(omMetadataManager, volumeName, bucketName); + for (indexFailed = 0; indexFailed < length; indexFailed++) { + keyName = deleteKeyArgs.getKeys(indexFailed); + String objectKey = omMetadataManager.getOzoneKey(volumeName, bucketName, + keyName); OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(objectKey); if (omKeyInfo == null) { - throw new OMException("Key not found: " + keyName, KEY_NOT_FOUND); + deleteStatus = false; + LOG.error("Received a request to delete a Key does not exist {}", + objectKey); + deleteKeys.remove(keyName); + unDeletedKeys.addKeys(keyName); + continue; + } + + try { + // check Acl + checkKeyAcls(ozoneManager, volumeName, bucketName, keyName, + IAccessAuthorizer.ACLType.DELETE, OzoneObj.ResourceType.KEY); + omKeyInfoList.add(omKeyInfo); + } catch (Exception ex) { + deleteStatus = false; + LOG.error("Acl check failed for Key: {}", objectKey, ex); + deleteKeys.remove(keyName); + unDeletedKeys.addKeys(keyName); } + } + // Mark all keys which can be deleted, in cache as deleted. + for (OmKeyInfo omKeyInfo : omKeyInfoList) { + omMetadataManager.getKeyTable().addCacheEntry( + new CacheKey<>(omMetadataManager.getOzoneKey(volumeName, bucketName, + omKeyInfo.getKeyName())), + new CacheValue<>(Optional.absent(), trxnLogIndex)); } omClientResponse = new OMKeysDeleteResponse(omResponse - .setDeleteKeysResponse(DeleteKeysResponse.newBuilder()).build(), - omKeyInfoList, trxnLogIndex, ozoneManager.isRatisEnabled()); + .setDeleteKeysResponse(DeleteKeysResponse.newBuilder() + .setStatus(deleteStatus).setUnDeletedKeys(unDeletedKeys)) + .setStatus(deleteStatus ? OK : PARTIAL_DELETE) + .setSuccess(deleteStatus).build(), + omKeyInfoList, trxnLogIndex, + ozoneManager.isRatisEnabled()); + result = Result.SUCCESS; + } catch (IOException ex) { result = Result.FAILURE; exception = ex; + createErrorOMResponse(omResponse, ex); + + // reset deleteKeys as request failed. + deleteKeys = new ArrayList<>(); + // Add all keys which are failed due to any other exception . + for (int i = indexFailed; i < length; i++) { + unDeletedKeys.addKeys(deleteKeyArgs.getKeys(i)); + } - omClientResponse = new OMKeyDeleteResponse( - createOperationKeysErrorOMResponse(omResponse, exception, - unDeletedKeys)); + omResponse.setDeleteKeysResponse(DeleteKeysResponse.newBuilder() + .setStatus(false).setUnDeletedKeys(unDeletedKeys).build()).build(); + omClientResponse = new OMKeysDeleteResponse(omResponse.build()); } finally { + if (acquiredLock) { + omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, + bucketName); + } addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper); } - auditLog(auditLogger, buildAuditMessage( - OMAction.DELETE_KEY, auditMap, exception, userInfo)); + auditMap = buildDeleteKeysAuditMap(volumeName, bucketName, deleteKeys, + unDeletedKeys.getKeysList()); + + auditLog(auditLogger, buildAuditMessage(DELETE_KEYS, auditMap, exception, + userInfo)); + switch (result) { case SUCCESS: - omMetrics.decNumKeys(); - LOG.debug("Key deleted. Volume:{}, Bucket:{}, Key:{}", volumeName, - bucketName, keyName); + omMetrics.decNumKeys(deleteKeys.size()); + if (LOG.isDebugEnabled()) { + LOG.debug("Keys delete success. Volume:{}, Bucket:{}, Keys:{}", + volumeName, bucketName, auditMap.get(DELETED_KEYS_LIST)); + } break; case FAILURE: + omMetrics.decNumKeys(deleteKeys.size()); omMetrics.incNumKeyDeleteFails(); - LOG.error("Key delete failed. Volume:{}, Bucket:{}, Key{}." + - " Exception:{}", volumeName, bucketName, keyName, exception); + if (LOG.isDebugEnabled()) { + LOG.debug("Keys delete failed. Volume:{}, Bucket:{}, DeletedKeys:{}, " + + "UnDeletedKeys:{}", volumeName, bucketName, + auditMap.get(DELETED_KEYS_LIST), auditMap.get(UNDELETED_KEYS_LIST), + exception); + } break; default: - LOG.error("Unrecognized Result for OMKeyDeleteRequest: {}", + LOG.error("Unrecognized Result for OMKeysDeleteRequest: {}", deleteKeyRequest); } return omClientResponse; } + + /** + * Build audit map for DeleteKeys request. + * @param volumeName + * @param bucketName + * @param deletedKeys + * @param unDeletedKeys + * @return + */ + private Map buildDeleteKeysAuditMap(String volumeName, + String bucketName, List deletedKeys, List unDeletedKeys) { + Map< String, String > auditMap = new HashMap<>(); + auditMap.put(VOLUME, volumeName); + auditMap.put(BUCKET, bucketName); + auditMap.put(DELETED_KEYS_LIST, String.join(",", deletedKeys)); + auditMap.put(UNDELETED_KEYS_LIST, String.join(",", + unDeletedKeys)); + return auditMap; + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponse.java index 597841ca3d75..9d2cd539fbd5 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponse.java @@ -18,17 +18,13 @@ package org.apache.hadoop.ozone.om.response.key; -import com.google.common.base.Optional; import org.apache.hadoop.hdds.utils.db.BatchOperation; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.response.CleanupTableInfo; import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import javax.annotation.Nonnull; @@ -36,7 +32,8 @@ import java.util.List; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK; +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.PARTIAL_DELETE; /** * Response for DeleteKey request. @@ -48,10 +45,10 @@ public class OMKeysDeleteResponse extends OMClientResponse { private long trxnLogIndex; public OMKeysDeleteResponse(@Nonnull OMResponse omResponse, - @Nonnull List omKeyInfoList, + @Nonnull List keyDeleteList, long trxnLogIndex, boolean isRatisEnabled) { super(omResponse); - this.omKeyInfoList = omKeyInfoList; + this.omKeyInfoList = keyDeleteList; this.isRatisEnabled = isRatisEnabled; this.trxnLogIndex = trxnLogIndex; } @@ -65,69 +62,48 @@ public OMKeysDeleteResponse(@Nonnull OMResponse omResponse) { checkStatusNotOK(); } + public void checkAndUpdateDB(OMMetadataManager omMetadataManager, + BatchOperation batchOperation) throws IOException { + if (getOMResponse().getStatus() == OK || + getOMResponse().getStatus() == PARTIAL_DELETE) { + addToDBBatch(omMetadataManager, batchOperation); + } + } + @Override public void addToDBBatch(OMMetadataManager omMetadataManager, BatchOperation batchOperation) throws IOException { + String volumeName = ""; + String bucketName = ""; + String keyName = ""; for (OmKeyInfo omKeyInfo : omKeyInfoList) { - // Set the UpdateID to current transactionLogIndex - omKeyInfo.setUpdateID(trxnLogIndex, isRatisEnabled); + volumeName = omKeyInfo.getVolumeName(); + bucketName = omKeyInfo.getBucketName(); + keyName = omKeyInfo.getKeyName(); - // For OmResponse with failure, this should do nothing. This method is - // not called in failure scenario in OM code. - if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) { - boolean acquiredLock = false; - String volumeName = ""; - String bucketName = ""; + String deleteKey = omMetadataManager.getOzoneKey(volumeName, bucketName, + keyName); - try { - volumeName = omKeyInfo.getVolumeName(); - bucketName = omKeyInfo.getBucketName(); - String keyName = omKeyInfo.getKeyName(); - acquiredLock = - omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, - volumeName, bucketName); - // Update table cache. - omMetadataManager.getKeyTable().addCacheEntry( - new CacheKey<>(omMetadataManager.getOzoneKey( - volumeName, bucketName, keyName)), - new CacheValue<>(Optional.absent(), trxnLogIndex)); + omMetadataManager.getKeyTable().deleteWithBatch(batchOperation, + deleteKey); - String ozoneKey = omMetadataManager.getOzoneKey( - omKeyInfo.getVolumeName(), omKeyInfo.getBucketName(), - omKeyInfo.getKeyName()); - omMetadataManager.getKeyTable().deleteWithBatch(batchOperation, - ozoneKey); - // If a deleted key is put in the table where a key with the same - // name already exists, then the old deleted key information would - // be lost. To avoid this, first check if a key with same name - // exists. deletedTable in OM Metadata stores . The RepeatedOmKeyInfo is the structure that - // allows us to store a list of OmKeyInfo that can be tied to same - // key name. For a keyName if RepeatedOMKeyInfo structure is null, - // we create a new instance, if it is not null, then we simply add - // to the list and store this instance in deletedTable. - RepeatedOmKeyInfo repeatedOmKeyInfo = - omMetadataManager.getDeletedTable().get(ozoneKey); - repeatedOmKeyInfo = OmUtils.prepareKeyForDelete( - omKeyInfo, repeatedOmKeyInfo, omKeyInfo.getUpdateID(), - isRatisEnabled); - omMetadataManager.getDeletedTable().putWithBatch(batchOperation, - ozoneKey, repeatedOmKeyInfo); - if (acquiredLock) { - omMetadataManager.getLock().releaseWriteLock( - BUCKET_LOCK, volumeName, bucketName); - acquiredLock = false; - } - } finally { - if (acquiredLock) { - omMetadataManager.getLock() - .releaseWriteLock(BUCKET_LOCK, volumeName, - bucketName); - } - } - } + // If a deleted key is put in the table where a key with the same + // name already exists, then the old deleted key information would + // be lost. To avoid this, first check if a key with same name + // exists. deletedTable in OM Metadata stores . The RepeatedOmKeyInfo is the structure that + // allows us to store a list of OmKeyInfo that can be tied to same + // key name. For a keyName if RepeatedOMKeyInfo structure is null, + // we create a new instance, if it is not null, then we simply add + // to the list and store this instance in deletedTable. + RepeatedOmKeyInfo repeatedOmKeyInfo = + omMetadataManager.getDeletedTable().get(deleteKey); + repeatedOmKeyInfo = OmUtils.prepareKeyForDelete( + omKeyInfo, repeatedOmKeyInfo, trxnLogIndex, + isRatisEnabled); + omMetadataManager.getDeletedTable().putWithBatch(batchOperation, + deleteKey, repeatedOmKeyInfo); } } - } \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequest.java new file mode 100644 index 000000000000..ac50af8bd6be --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequest.java @@ -0,0 +1,155 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request.key; + +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeyArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeysRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.junit.Assert; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; + +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.PARTIAL_DELETE; +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.DeleteKeys; + +/** + * Class tests OMKeysDeleteRequest. + */ +public class TestOMKeysDeleteRequest extends TestOMKeyRequest { + + + private List deleteKeyList; + private OMRequest omRequest; + + @Test + public void testKeysDeleteRequest() throws Exception { + + createPreRequisites(); + + OMKeysDeleteRequest omKeysDeleteRequest = + new OMKeysDeleteRequest(omRequest); + + OMClientResponse omClientResponse = + omKeysDeleteRequest.validateAndUpdateCache(ozoneManager, 0L, + ozoneManagerDoubleBufferHelper); + + Assert.assertTrue(omClientResponse.getOMResponse().getSuccess()); + Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, + omClientResponse.getOMResponse().getStatus()); + + Assert.assertTrue(omClientResponse.getOMResponse().getDeleteKeysResponse() + .getStatus()); + DeleteKeyArgs unDeletedKeys = + omClientResponse.getOMResponse().getDeleteKeysResponse() + .getUnDeletedKeys(); + Assert.assertEquals(0, + unDeletedKeys.getKeysCount()); + + // Check all keys are deleted. + for (String deleteKey : deleteKeyList) { + Assert.assertNull(omMetadataManager.getKeyTable() + .get(omMetadataManager.getOzoneKey(volumeName, bucketName, + deleteKey))); + } + + } + + @Test + public void testKeysDeleteRequestFail() throws Exception { + + createPreRequisites(); + + // Add a key which not exist, which causes batch delete to fail. + + omRequest = omRequest.toBuilder() + .setDeleteKeysRequest(DeleteKeysRequest.newBuilder() + .setDeleteKeys(DeleteKeyArgs.newBuilder() + .setBucketName(bucketName).setVolumeName(volumeName) + .addAllKeys(deleteKeyList).addKeys("dummy"))).build(); + + OMKeysDeleteRequest omKeysDeleteRequest = + new OMKeysDeleteRequest(omRequest); + + OMClientResponse omClientResponse = + omKeysDeleteRequest.validateAndUpdateCache(ozoneManager, 0L, + ozoneManagerDoubleBufferHelper); + + Assert.assertFalse(omClientResponse.getOMResponse().getSuccess()); + Assert.assertEquals(PARTIAL_DELETE, + omClientResponse.getOMResponse().getStatus()); + + Assert.assertFalse(omClientResponse.getOMResponse().getDeleteKeysResponse() + .getStatus()); + + // Check keys are deleted and in response check unDeletedKey. + for (String deleteKey : deleteKeyList) { + Assert.assertNull(omMetadataManager.getKeyTable() + .get(omMetadataManager.getOzoneKey(volumeName, bucketName, + deleteKey))); + } + + DeleteKeyArgs unDeletedKeys = omClientResponse.getOMResponse() + .getDeleteKeysResponse().getUnDeletedKeys(); + Assert.assertEquals(1, + unDeletedKeys.getKeysCount()); + Assert.assertEquals("dummy", unDeletedKeys.getKeys(0)); + + } + + private void createPreRequisites() throws Exception { + + deleteKeyList = new ArrayList<>(); + // Add volume, bucket and key entries to OM DB. + TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + omMetadataManager); + + int count = 10; + + DeleteKeyArgs.Builder deleteKeyArgs = DeleteKeyArgs.newBuilder() + .setBucketName(bucketName).setVolumeName(volumeName); + + // Create 10 keys + String parentDir = "/user"; + String key = ""; + + + for (int i = 0; i < count; i++) { + key = parentDir.concat("/key" + i); + TestOMRequestUtils.addKeyToTableCache(volumeName, bucketName, + parentDir.concat("/key" + i), HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE, omMetadataManager); + deleteKeyArgs.addKeys(key); + deleteKeyList.add(key); + } + + omRequest = + OMRequest.newBuilder().setClientId(UUID.randomUUID().toString()) + .setCmdType(DeleteKeys) + .setDeleteKeysRequest(DeleteKeysRequest.newBuilder() + .setDeleteKeys(deleteKeyArgs).build()).build(); + } + +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java new file mode 100644 index 000000000000..c5dd96b05931 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java @@ -0,0 +1,125 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.response.key; + +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; +import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeysResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.junit.Assert; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; + +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS; +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND; +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK; +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.DeleteKeys; + +/** + * Class to test OMKeysDeleteResponse. + */ +public class TestOMKeysDeleteResponse extends TestOMKeyResponse { + + + private List omKeyInfoList; + private List ozoneKeys; + + + private void createPreRequisities() throws Exception { + String parent = "/user"; + String key = "key"; + + omKeyInfoList = new ArrayList<>(); + ozoneKeys = new ArrayList<>(); + String ozoneKey = ""; + for (int i = 0; i < 10; i++) { + keyName = parent.concat(key + i); + TestOMRequestUtils.addKeyToTable(false, volumeName, + bucketName, keyName, 0L, RATIS, THREE, omMetadataManager); + ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName, keyName); + omKeyInfoList.add(omMetadataManager.getKeyTable().get(ozoneKey)); + ozoneKeys.add(ozoneKey); + } + } + + @Test + public void testKeysDeleteResponse() throws Exception { + + createPreRequisities(); + + OMResponse omResponse = + OMResponse.newBuilder().setCmdType(DeleteKeys).setStatus(OK) + .setSuccess(true) + .setDeleteKeysResponse(DeleteKeysResponse.newBuilder() + .setStatus(true)).build(); + OMClientResponse omKeysDeleteResponse = + new OMKeysDeleteResponse(omResponse, omKeyInfoList, 10L, true); + + omKeysDeleteResponse.checkAndUpdateDB(omMetadataManager, batchOperation); + + + omMetadataManager.getStore().commitBatchOperation(batchOperation); + for (String ozKey : ozoneKeys) { + Assert.assertNull(omMetadataManager.getKeyTable().get(ozKey)); + + RepeatedOmKeyInfo repeatedOmKeyInfo = + omMetadataManager.getDeletedTable().get(ozKey); + Assert.assertNotNull(repeatedOmKeyInfo); + + Assert.assertEquals(1, repeatedOmKeyInfo.getOmKeyInfoList().size()); + Assert.assertEquals(10L, + repeatedOmKeyInfo.getOmKeyInfoList().get(0).getUpdateID()); + + } + + } + + @Test + public void testKeysDeleteResponseFail() throws Exception { + createPreRequisities(); + + OMResponse omResponse = + OMResponse.newBuilder().setCmdType(DeleteKeys).setStatus(KEY_NOT_FOUND) + .setSuccess(false) + .setDeleteKeysResponse(DeleteKeysResponse.newBuilder() + .setStatus(false)).build(); + + + OMClientResponse omKeysDeleteResponse = + new OMKeysDeleteResponse(omResponse, omKeyInfoList, 10L, true); + + omKeysDeleteResponse.checkAndUpdateDB(omMetadataManager, batchOperation); + + + for (String ozKey : ozoneKeys) { + Assert.assertNotNull(omMetadataManager.getKeyTable().get(ozKey)); + + RepeatedOmKeyInfo repeatedOmKeyInfo = + omMetadataManager.getDeletedTable().get(ozKey); + Assert.assertNull(repeatedOmKeyInfo); + + } + + } +} From aa8e1ba443000f240c659a11bdf016f56f729785 Mon Sep 17 00:00:00 2001 From: HuangTao Date: Wed, 15 Jul 2020 02:50:20 +0800 Subject: [PATCH 017/165] HDDS-3798. Display version and setupTime of DN in recon web (#1136) --- .gitignore | 2 + .../hadoop/hdds/protocol/DatanodeDetails.java | 89 +++++++++++++++- .../hadoop/ozone/HddsDatanodeService.java | 5 + .../src/main/proto/hdds.proto | 2 + .../src/main/proto/proto.lock | 12 ++- .../hadoop/ozone/recon/api/NodeEndpoint.java | 2 + .../recon/api/types/DatanodeMetadata.java | 28 +++++ .../webapps/recon/ozone-recon-web/api/db.json | 48 ++++++--- .../components/multiSelect/multiSelect.tsx | 5 +- .../src/views/datanodes/datanodes.less | 14 +++ .../src/views/datanodes/datanodes.tsx | 100 +++++++++++++++++- 11 files changed, 285 insertions(+), 22 deletions(-) diff --git a/.gitignore b/.gitignore index 551b1b5361ce..e09c2eb819c0 100644 --- a/.gitignore +++ b/.gitignore @@ -14,6 +14,7 @@ .classpath .project .settings +*.factorypath target build dependency-reduced-pom.xml @@ -61,5 +62,6 @@ output.xml report.html hadoop-hdds/docs/public +hadoop-ozone/recon/node_modules .mvn diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java index 1b6a2141e0f0..96f19a6b87a8 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java @@ -49,6 +49,8 @@ public class DatanodeDetails extends NodeImpl implements private String hostName; private List ports; private String certSerialId; + private String version; + private long setupTime; /** * Constructs DatanodeDetails instance. DatanodeDetails.Builder is used @@ -59,15 +61,21 @@ public class DatanodeDetails extends NodeImpl implements * @param networkLocation DataNode's network location path * @param ports Ports used by the DataNode * @param certSerialId serial id from SCM issued certificate. + * @param version DataNode's version + * @param setupTime the setup time of DataNode */ + @SuppressWarnings("parameternumber") private DatanodeDetails(UUID uuid, String ipAddress, String hostName, - String networkLocation, List ports, String certSerialId) { + String networkLocation, List ports, String certSerialId, + String version, long setupTime) { super(hostName, networkLocation, NetConstants.NODE_COST_DEFAULT); this.uuid = uuid; this.ipAddress = ipAddress; this.hostName = hostName; this.ports = ports; this.certSerialId = certSerialId; + this.version = version; + this.setupTime = setupTime; } public DatanodeDetails(DatanodeDetails datanodeDetails) { @@ -79,6 +87,8 @@ public DatanodeDetails(DatanodeDetails datanodeDetails) { this.ports = datanodeDetails.ports; this.setNetworkName(datanodeDetails.getNetworkName()); this.setParent(datanodeDetails.getParent()); + this.version = datanodeDetails.version; + this.setupTime = datanodeDetails.setupTime; } /** @@ -207,6 +217,12 @@ public static DatanodeDetails getFromProtoBuf( if (datanodeDetailsProto.hasNetworkLocation()) { builder.setNetworkLocation(datanodeDetailsProto.getNetworkLocation()); } + if (datanodeDetailsProto.hasVersion()) { + builder.setVersion(datanodeDetailsProto.getVersion()); + } + if (datanodeDetailsProto.hasSetupTime()) { + builder.setSetupTime(datanodeDetailsProto.getSetupTime()); + } return builder.build(); } @@ -248,6 +264,13 @@ public HddsProtos.DatanodeDetailsProto getProtoBufMessage() { .setValue(port.getValue()) .build()); } + + if (!Strings.isNullOrEmpty(getVersion())) { + builder.setVersion(getVersion()); + } + + builder.setSetupTime(getSetupTime()); + return builder.build(); } @@ -300,6 +323,8 @@ public static final class Builder { private String networkLocation; private List ports; private String certSerialId; + private String version; + private long setupTime; /** * Default private constructor. To create Builder instance use @@ -388,6 +413,30 @@ public Builder setCertSerialId(String certId) { return this; } + /** + * Sets the DataNode version. + * + * @param ver the version of DataNode. + * + * @return DatanodeDetails.Builder + */ + public Builder setVersion(String ver) { + this.version = ver; + return this; + } + + /** + * Sets the DataNode setup time. + * + * @param time the setup time of DataNode. + * + * @return DatanodeDetails.Builder + */ + public Builder setSetupTime(long time) { + this.setupTime = time; + return this; + } + /** * Builds and returns DatanodeDetails instance. * @@ -399,7 +448,7 @@ public DatanodeDetails build() { networkLocation = NetConstants.DEFAULT_RACK; } DatanodeDetails dn = new DatanodeDetails(id, ipAddress, hostName, - networkLocation, ports, certSerialId); + networkLocation, ports, certSerialId, version, setupTime); if (networkName != null) { dn.setNetworkName(networkName); } @@ -505,4 +554,40 @@ public String getCertSerialId() { public void setCertSerialId(String certSerialId) { this.certSerialId = certSerialId; } + + /** + * Returns the DataNode version. + * + * @return DataNode version + */ + public String getVersion() { + return version; + } + + /** + * Set DataNode version. + * + * @param version DataNode version + */ + public void setVersion(String version) { + this.version = version; + } + + /** + * Returns the DataNode setup time. + * + * @return DataNode setup time + */ + public long getSetupTime() { + return setupTime; + } + + /** + * Set DataNode setup time. + * + * @param setupTime DataNode setup time + */ + public void setSetupTime(long setupTime) { + this.setupTime = setupTime; + } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java index 7e896e715598..08eef6f88eac 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java @@ -65,6 +65,8 @@ import static org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest.getEncodedString; import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY; import static org.apache.hadoop.util.ExitUtil.terminate; + +import org.apache.hadoop.util.Time; import org.bouncycastle.pkcs.PKCS10CertificationRequest; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -191,6 +193,9 @@ public void start() { datanodeDetails = initializeDatanodeDetails(); datanodeDetails.setHostName(hostname); datanodeDetails.setIpAddress(ip); + datanodeDetails.setVersion( + HddsVersionInfo.HDDS_VERSION_INFO.getVersion()); + datanodeDetails.setSetupTime(Time.now()); TracingUtil.initTracing( "HddsDatanodeService." + datanodeDetails.getUuidString() .substring(0, 8), conf); diff --git a/hadoop-hdds/interface-client/src/main/proto/hdds.proto b/hadoop-hdds/interface-client/src/main/proto/hdds.proto index 23cc9cbf6ced..243e8ecaced7 100644 --- a/hadoop-hdds/interface-client/src/main/proto/hdds.proto +++ b/hadoop-hdds/interface-client/src/main/proto/hdds.proto @@ -43,6 +43,8 @@ message DatanodeDetailsProto { // network name, can be Ip address or host name, depends optional string networkName = 6; optional string networkLocation = 7; // Network topology location + optional string version = 8; // Datanode version + optional int64 setupTime = 9; // TODO(runzhiwang): when uuid is gone, specify 1 as the index of uuid128 and mark as required optional UUID uuid128 = 100; // UUID with 128 bits assigned to the Datanode. } diff --git a/hadoop-hdds/interface-client/src/main/proto/proto.lock b/hadoop-hdds/interface-client/src/main/proto/proto.lock index afdaf9618260..b27896c655e3 100644 --- a/hadoop-hdds/interface-client/src/main/proto/proto.lock +++ b/hadoop-hdds/interface-client/src/main/proto/proto.lock @@ -1530,6 +1530,16 @@ "name": "networkLocation", "type": "string" }, + { + "id": 8, + "name": "version", + "type": "string" + }, + { + "id": 9, + "name": "setupTime", + "type": "int64" + }, { "id": 100, "name": "uuid128", @@ -1925,4 +1935,4 @@ } } ] -} \ No newline at end of file +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java index 2c017491f59d..42832debfee1 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java @@ -121,6 +121,8 @@ public Response getDatanodes() { .withPipelines(pipelines) .withLeaderCount(leaderCount.get()) .withUUid(datanode.getUuidString()) + .withVersion(datanode.getVersion()) + .withSetupTime(datanode.getSetupTime()) .build()); }); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java index 02d9ae811829..542654e96e2e 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java @@ -55,6 +55,12 @@ public final class DatanodeMetadata { @XmlElement(name = "leaderCount") private int leaderCount; + @XmlElement(name = "version") + private String version; + + @XmlElement(name = "setupTime") + private long setupTime; + private DatanodeMetadata(Builder builder) { this.hostname = builder.hostname; this.uuid = builder.uuid; @@ -64,6 +70,8 @@ private DatanodeMetadata(Builder builder) { this.pipelines = builder.pipelines; this.containers = builder.containers; this.leaderCount = builder.leaderCount; + this.version = builder.version; + this.setupTime = builder.setupTime; } public String getHostname() { @@ -98,6 +106,14 @@ public String getUuid() { return uuid; } + public String getVersion() { + return version; + } + + public long getSetupTime() { + return setupTime; + } + /** * Returns new builder class that builds a DatanodeMetadata. * @@ -120,6 +136,8 @@ public static final class Builder { private List pipelines; private int containers; private int leaderCount; + private String version; + private long setupTime; public Builder() { this.containers = 0; @@ -167,6 +185,16 @@ public Builder withUUid(String uuid) { return this; } + public Builder withVersion(String version) { + this.version = version; + return this; + } + + public Builder withSetupTime(long setupTime) { + this.setupTime = setupTime; + return this; + } + /** * Constructs DatanodeMetadata. * diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json index 82fae3735b12..d8d6eac55a9f 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json @@ -41,7 +41,9 @@ } ], "containers": 80, - "leaderCount": 2 + "leaderCount": 2, + "version": "0.6.0-SNAPSHOT", + "setupTime": 1574728775759 }, { "hostname": "localhost2.storage.enterprise.com", @@ -68,7 +70,9 @@ } ], "containers": 8192, - "leaderCount": 1 + "leaderCount": 1, + "version": "0.6.0-SNAPSHOT", + "setupTime": 1574724805059 }, { "hostname": "localhost3.storage.enterprise.com", @@ -101,7 +105,9 @@ } ], "containers": 43, - "leaderCount": 2 + "leaderCount": 2, + "version": "0.6.0-SNAPSHOT", + "setupTime": 1343544679543 }, { "hostname": "localhost4.storage.enterprise.com", @@ -115,7 +121,9 @@ }, "pipelines": [], "containers": 0, - "leaderCount": 0 + "leaderCount": 0, + "version": "0.6.0-SNAPSHOT", + "setupTime": 1074724802059 }, { "hostname": "localhost5.storage.enterprise.com", @@ -142,7 +150,9 @@ } ], "containers": 643, - "leaderCount": 2 + "leaderCount": 2, + "version": "0.6.0-SNAPSHOT", + "setupTime": 1574724816029 }, { "hostname": "localhost6.storage.enterprise.com", @@ -169,7 +179,9 @@ } ], "containers": 5, - "leaderCount": 1 + "leaderCount": 1, + "version": "0.6.0-SNAPSHOT", + "setupTime": 1574724802059 }, { "hostname": "localhost7.storage.enterprise.com", @@ -202,7 +214,9 @@ } ], "containers": 64, - "leaderCount": 2 + "leaderCount": 2, + "version": "0.6.0-SNAPSHOT", + "setupTime": 1574724676009 }, { "hostname": "localhost8.storage.enterprise.com", @@ -229,7 +243,9 @@ } ], "containers": 21, - "leaderCount": 1 + "leaderCount": 1, + "version": "0.6.0-SNAPSHOT", + "setupTime": 1574724276050 }, { "hostname": "localhost9.storage.enterprise.com", @@ -256,7 +272,9 @@ } ], "containers": 897, - "leaderCount": 1 + "leaderCount": 1, + "version": "0.6.0-SNAPSHOT", + "setupTime": 1574724573011 }, { "hostname": "localhost10.storage.enterprise.com", @@ -289,7 +307,9 @@ } ], "containers": 6754, - "leaderCount": 2 + "leaderCount": 2, + "version": "0.6.0-SNAPSHOT", + "setupTime": 1574723756059 }, { "hostname": "localhost11.storage.enterprise.com", @@ -316,7 +336,9 @@ } ], "containers": 78, - "leaderCount": 2 + "leaderCount": 2, + "version": "0.6.0-SNAPSHOT", + "setupTime": 1474724705783 }, { "hostname": "localhost12.storage.enterprise.com", @@ -343,7 +365,9 @@ } ], "containers": 543, - "leaderCount": 1 + "leaderCount": 1, + "version": "0.6.0-SNAPSHOT", + "setupTime": 1574724706232 } ] }, diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/multiSelect/multiSelect.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/multiSelect/multiSelect.tsx index 19005dddd11e..417c2efdcb7a 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/multiSelect/multiSelect.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/multiSelect/multiSelect.tsx @@ -36,6 +36,7 @@ interface IMultiSelectProps extends ReactSelectProps { options: IOption[]; allowSelectAll: boolean; allOption?: IOption; + maxShowValues?: number; } const defaultProps = { @@ -48,7 +49,7 @@ const defaultProps = { export class MultiSelect extends PureComponent { static defaultProps = defaultProps; render() { - const {allowSelectAll, allOption, options, onChange} = this.props; + const {allowSelectAll, allOption, options, maxShowValues = 5, onChange} = this.props; if (allowSelectAll) { const Option = (props: OptionProps) => { return ( @@ -70,7 +71,7 @@ export class MultiSelect extends PureComponent { let toBeRendered = children; if (currentValues.some(val => val.value === allOption!.value) && children) { toBeRendered = allOption!.label; - } else if (currentValues.length >= 5) { + } else if (currentValues.length > maxShowValues) { toBeRendered = `${currentValues.length} selected`; } diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.less b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.less index 4a3cdf5accc9..10ec907a7334 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.less +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.less @@ -22,4 +22,18 @@ margin-bottom: 5px; } } + + .filter-block { + font-size: 14px; + font-weight: normal; + display: inline-block; + margin-left: 20px; + } + + .multi-select-container { + padding-left: 5px; + margin-right: 5px; + display: inline-block; + min-width: 200px; + } } diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx index bfba82a5bebb..877ebf9f2c75 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx @@ -27,6 +27,8 @@ import {DatanodeStatus, IStorageReport} from 'types/datanode.types'; import './datanodes.less'; import {AutoReloadHelper} from 'utils/autoReloadHelper'; import AutoReloadPanel from 'components/autoReloadPanel/autoReloadPanel'; +import {MultiSelect, IOption} from 'components/multiSelect/multiSelect'; +import {ActionMeta, ValueType} from 'react-select'; import {showDataFetchError} from 'utils/common'; interface IDatanodeResponse { @@ -38,6 +40,8 @@ interface IDatanodeResponse { containers: number; leaderCount: number; uuid: string; + version: string; + setupTime: number; } interface IDatanodesResponse { @@ -56,6 +60,8 @@ interface IDatanode { containers: number; leaderCount: number; uuid: string; + version: string; + setupTime: number; } interface IPipeline { @@ -70,6 +76,8 @@ interface IDatanodesState { dataSource: IDatanode[]; totalCount: number; lastUpdated: number; + selectedColumns: IOption[]; + columnOptions: IOption[]; } const renderDatanodeStatus = (status: DatanodeStatus) => { @@ -89,6 +97,7 @@ const COLUMNS = [ title: 'Status', dataIndex: 'state', key: 'state', + isVisible: true, render: (text: DatanodeStatus) => renderDatanodeStatus(text), sorter: (a: IDatanode, b: IDatanode) => a.state.localeCompare(b.state) }, @@ -96,6 +105,7 @@ const COLUMNS = [ title: 'Uuid', dataIndex: 'uuid', key: 'uuid', + isVisible: true, sorter: (a: IDatanode, b: IDatanode) => a.uuid.localeCompare(b.uuid), defaultSortOrder: 'ascend' as const }, @@ -103,6 +113,7 @@ const COLUMNS = [ title: 'Hostname', dataIndex: 'hostname', key: 'hostname', + isVisible: true, sorter: (a: IDatanode, b: IDatanode) => a.hostname.localeCompare(b.hostname), defaultSortOrder: 'ascend' as const }, @@ -110,6 +121,7 @@ const COLUMNS = [ title: 'Storage Capacity', dataIndex: 'storageUsed', key: 'storageUsed', + isVisible: true, sorter: (a: IDatanode, b: IDatanode) => a.storageRemaining - b.storageRemaining, render: (text: string, record: IDatanode) => ( a.lastHeartbeat - b.lastHeartbeat, render: (heartbeat: number) => { return heartbeat > 0 ? moment(heartbeat).format('lll') : 'NA'; @@ -129,6 +142,7 @@ const COLUMNS = [ title: 'Pipeline ID(s)', dataIndex: 'pipelines', key: 'pipelines', + isVisible: true, render: (pipelines: IPipeline[], record: IDatanode) => { return (

@@ -158,16 +172,46 @@ const COLUMNS = [ , dataIndex: 'leaderCount', key: 'leaderCount', + isVisible: true, sorter: (a: IDatanode, b: IDatanode) => a.leaderCount - b.leaderCount }, { title: 'Containers', dataIndex: 'containers', key: 'containers', + isVisible: true, sorter: (a: IDatanode, b: IDatanode) => a.containers - b.containers + }, + { + title: 'Version', + dataIndex: 'version', + key: 'version', + isVisible: false, + sorter: (a: IDatanode, b: IDatanode) => a.version.localeCompare(b.version), + defaultSortOrder: 'ascend' as const + }, + { + title: 'SetupTime', + dataIndex: 'setupTime', + key: 'setupTime', + isVisible: false, + sorter: (a: IDatanode, b: IDatanode) => a.setupTime - b.setupTime, + render: (uptime: number) => { + return uptime > 0 ? moment(uptime).format('lll') : 'NA'; + } } ]; +const allColumnsOption: IOption = { + label: 'Select all', + value: '*' +}; + +const defaultColumns: IOption[] = COLUMNS.map(column => ({ + label: column.key, + value: column.key +})); + export class Datanodes extends React.Component, IDatanodesState> { autoReload: AutoReloadHelper; @@ -177,11 +221,20 @@ export class Datanodes extends React.Component, IDatanode loading: false, dataSource: [], totalCount: 0, - lastUpdated: 0 + lastUpdated: 0, + selectedColumns: [], + columnOptions: defaultColumns }; this.autoReload = new AutoReloadHelper(this._loadData); } + _handleColumnChange = (selected: ValueType, _action: ActionMeta) => { + const selectedColumns = (selected as IOption[]); + this.setState({ + selectedColumns + }); + }; + _loadData = () => { this.setState({ loading: true @@ -201,14 +254,23 @@ export class Datanodes extends React.Component, IDatanode storageRemaining: datanode.storageReport.remaining, pipelines: datanode.pipelines, containers: datanode.containers, - leaderCount: datanode.leaderCount + leaderCount: datanode.leaderCount, + version: datanode.version, + setupTime: datanode.setupTime }; }); + const selectedColumns: IOption[] = COLUMNS.filter(column => column.isVisible).map(column => ({ + label: column.key, + value: column.key + })); + this.setState({ loading: false, dataSource, totalCount, lastUpdated: Number(moment()) + }, () => { + this._handleColumnChange(selectedColumns, {action: 'select-option'}); }); }).catch(error => { this.setState({ @@ -233,7 +295,7 @@ export class Datanodes extends React.Component, IDatanode }; render() { - const {dataSource, loading, totalCount, lastUpdated} = this.state; + const {dataSource, loading, totalCount, lastUpdated, selectedColumns, columnOptions} = this.state; const paginationConfig: PaginationConfig = { showTotal: (total: number, range) => `${range[0]}-${range[1]} of ${total} datanodes`, showSizeChanger: true, @@ -243,10 +305,38 @@ export class Datanodes extends React.Component, IDatanode
Datanodes ({totalCount}) - +
+ Columns +
+
+
- +
+ selectedColumns.some(e => e.value === column.key) + )} + loading={loading} + pagination={paginationConfig} + rowKey='hostname' + /> ); From 3f23c1bef5d064b8277d45093d717dd11d12d718 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Tue, 14 Jul 2020 20:56:01 +0200 Subject: [PATCH 018/165] HDDS-3060. /retest github comment does not work (#1137) --- .github/comment-commands/retest.sh | 66 ++++++++++++++++++++++++------ 1 file changed, 54 insertions(+), 12 deletions(-) diff --git a/.github/comment-commands/retest.sh b/.github/comment-commands/retest.sh index b9e063260cac..2214264e9b28 100755 --- a/.github/comment-commands/retest.sh +++ b/.github/comment-commands/retest.sh @@ -14,20 +14,62 @@ # See the License for the specific language governing permissions and # limitations under the License. -#doc: add new empty commit to trigger new CI build -set +x #GITHUB_TOKEN +#doc: provide help on how to trigger new CI build -PR_URL=$(jq -r '.issue.pull_request.url' "$GITHUB_EVENT_PATH") -read -r REPO_URL BRANCH <<<"$(curl "$PR_URL" | jq -r '.head.repo.clone_url + " " + .head.ref' | sed "s/github.com/$GITHUB_ACTOR:$GITHUB_TOKEN@github.com/g")" +# posting a new commit from this script does not trigger CI checks +# https://help.github.com/en/actions/reference/events-that-trigger-workflows#triggering-new-workflows-using-a-personal-access-token -git fetch "$REPO_URL" "$BRANCH" -git checkout FETCH_HEAD +set -eu + +code='```' + +pr_url="$(jq -r '.issue.pull_request.url' "${GITHUB_EVENT_PATH}")" +commenter="$(jq -r '.comment.user.login' "${GITHUB_EVENT_PATH}")" +assoc="$(jq -r '.comment.author_association' "${GITHUB_EVENT_PATH}")" -export GIT_COMMITTER_EMAIL="noreply@github.com" -export GIT_COMMITTER_NAME="GitHub actions" +curl -LSs "${pr_url}" -o pull.tmp +source_repo="$(jq -r '.head.repo.ssh_url' pull.tmp)" +branch="$(jq -r '.head.ref' pull.tmp)" +pr_owner="$(jq -r '.head.user.login' pull.tmp)" +maintainer_can_modify="$(jq -r '.maintainer_can_modify' pull.tmp)" -export GIT_AUTHOR_EMAIL="noreply@github.com" -export GIT_AUTHOR_NAME="GitHub actions" +# PR owner +# => +# has local branch, can simply push +if [[ "${commenter}" == "${pr_owner}" ]]; then + cat <<-EOF +To re-run CI checks, please follow these steps with the source branch checked out: +${code} +git commit --allow-empty -m 'trigger new CI check' +git push +${code} +EOF + +# member AND modification allowed by PR author +# OR +# repo owner +# => +# include steps to fetch branch +elif [[ "${maintainer_can_modify}" == "true" ]] && [[ "${assoc}" == "MEMBER" ]] || [[ "${assoc}" == "OWNER" ]]; then + cat <<-EOF +To re-run CI checks, please follow these steps: +${code} +git fetch "${source_repo}" "${branch}" +git checkout FETCH_HEAD +git commit --allow-empty -m 'trigger new CI check' +git push "${source_repo}" HEAD:"${branch}" +${code} +EOF -git commit --allow-empty -m "empty commit to retest build" > /dev/null -git push $REPO_URL HEAD:$BRANCH +# other folks +# => +# ping author +else + cat <<-EOF +@${pr_owner} please trigger new CI check by following these steps: +${code} +git commit --allow-empty -m 'trigger new CI check' +git push +${code} +EOF +fi From 39557889372794e0d5cf0d14d269972d31fe8aa8 Mon Sep 17 00:00:00 2001 From: Istvan Fajth Date: Tue, 14 Jul 2020 23:30:45 +0200 Subject: [PATCH 019/165] HDDS-3925. SCM Pipeline DB should directly use UUID bytes for key rather than rely on proto serialization for key. (#1197) --- .../hdds/utils/db/RDBStoreIterator.java | 16 ++ .../apache/hadoop/hdds/utils/db/RDBTable.java | 2 +- .../hadoop/hdds/utils/db/TableIterator.java | 8 + .../hadoop/hdds/utils/db/TypedTable.java | 5 + .../hdds/utils/db/TestRDBStoreIterator.java | 224 ++++++++++++++++++ .../hdds/scm/metadata/PipelineIDCodec.java | 38 ++- .../hdds/scm/pipeline/SCMPipelineManager.java | 54 ++++- .../scm/metadata/TestPipelineIDCodec.java | 144 +++++++++++ .../scm/pipeline/TestSCMPipelineManager.java | 115 +++++++++ 9 files changed, 602 insertions(+), 4 deletions(-) create mode 100644 hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreIterator.java create mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/metadata/TestPipelineIDCodec.java diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreIterator.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreIterator.java index 784738b0cec2..5902486ec6ee 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreIterator.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreIterator.java @@ -32,12 +32,18 @@ public class RDBStoreIterator implements TableIterator { private RocksIterator rocksDBIterator; + private RDBTable rocksDBTable; public RDBStoreIterator(RocksIterator iterator) { this.rocksDBIterator = iterator; rocksDBIterator.seekToFirst(); } + public RDBStoreIterator(RocksIterator iterator, RDBTable table) { + this(iterator); + this.rocksDBTable = table; + } + @Override public void forEachRemaining( Consumer action) { @@ -100,6 +106,16 @@ public ByteArrayKeyValue value() { return null; } + @Override + public void removeFromDB() throws IOException { + if (rocksDBTable == null) { + throw new UnsupportedOperationException("remove"); + } + if (rocksDBIterator.isValid()) { + rocksDBTable.delete(rocksDBIterator.key()); + } + } + @Override public void close() throws IOException { rocksDBIterator.close(); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java index 2e390e2362d1..4dbb59ad4412 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java @@ -206,7 +206,7 @@ public void deleteWithBatch(BatchOperation batch, byte[] key) public TableIterator iterator() { ReadOptions readOptions = new ReadOptions(); readOptions.setFillCache(false); - return new RDBStoreIterator(db.newIterator(handle, readOptions)); + return new RDBStoreIterator(db.newIterator(handle, readOptions), this); } @Override diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TableIterator.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TableIterator.java index a684157a43b1..c9bc045b1df1 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TableIterator.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TableIterator.java @@ -60,4 +60,12 @@ public interface TableIterator extends Iterator, Closeable { */ T value(); + /** + * Remove the actual value of the iterator from the database table on + * which the iterator is working on. + * + * @throws IOException when there is an error occured during deletion. + */ + void removeFromDB() throws IOException; + } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java index 86d23afb9318..1451946f30dc 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java @@ -420,5 +420,10 @@ public TypedKeyValue next() { return new TypedKeyValue(rawIterator.next(), keyType, valueType); } + + @Override + public void removeFromDB() throws IOException { + rawIterator.removeFromDB(); + } } } diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreIterator.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreIterator.java new file mode 100644 index 000000000000..6e85977843ac --- /dev/null +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreIterator.java @@ -0,0 +1,224 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package org.apache.hadoop.hdds.utils.db; + +import org.junit.Before; +import org.junit.Test; +import org.mockito.ArgumentCaptor; +import org.mockito.InOrder; +import org.rocksdb.RocksIterator; + +import java.util.NoSuchElementException; +import java.util.function.Consumer; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.inOrder; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +/** + * This test prescribe expected behaviour from the RDBStoreIterator which wraps + * RocksDB's own iterator. Ozone internally in TypedTableIterator uses, the + * RDBStoreIterator to provide iteration over table elements in a typed manner. + * The tests are to ensure we access RocksDB via the iterator properly. + */ +public class TestRDBStoreIterator { + + private RocksIterator rocksDBIteratorMock; + private RDBTable rocksTableMock; + + @Before + public void setup() { + rocksDBIteratorMock = mock(RocksIterator.class); + rocksTableMock = mock(RDBTable.class); + } + + @Test + public void testForeachRemainingCallsConsumerWithAllElements() { + when(rocksDBIteratorMock.isValid()) + .thenReturn(true, true, true, true, true, true, false); + when(rocksDBIteratorMock.key()) + .thenReturn(new byte[]{0x00}, new byte[]{0x01}, new byte[]{0x02}) + .thenThrow(new NoSuchElementException()); + when(rocksDBIteratorMock.value()) + .thenReturn(new byte[]{0x7f}, new byte[]{0x7e}, new byte[]{0x7d}) + .thenThrow(new NoSuchElementException()); + + + Consumer consumerStub = mock(Consumer.class); + + RDBStoreIterator iter = new RDBStoreIterator(rocksDBIteratorMock); + iter.forEachRemaining(consumerStub); + + ArgumentCaptor capture = + ArgumentCaptor.forClass(ByteArrayKeyValue.class); + verify(consumerStub, times(3)).accept(capture.capture()); + assertArrayEquals( + new byte[]{0x00}, capture.getAllValues().get(0).getKey()); + assertArrayEquals( + new byte[]{0x7f}, capture.getAllValues().get(0).getValue()); + assertArrayEquals( + new byte[]{0x01}, capture.getAllValues().get(1).getKey()); + assertArrayEquals( + new byte[]{0x7e}, capture.getAllValues().get(1).getValue()); + assertArrayEquals( + new byte[]{0x02}, capture.getAllValues().get(2).getKey()); + assertArrayEquals( + new byte[]{0x7d}, capture.getAllValues().get(2).getValue()); + } + + @Test + public void testHasNextDependsOnIsvalid(){ + when(rocksDBIteratorMock.isValid()).thenReturn(true, false); + + RDBStoreIterator iter = new RDBStoreIterator(rocksDBIteratorMock); + + assertTrue(iter.hasNext()); + assertFalse(iter.hasNext()); + } + + @Test + public void testNextCallsIsValidThenGetsTheValueAndStepsToNext() { + when(rocksDBIteratorMock.isValid()).thenReturn(true); + RDBStoreIterator iter = new RDBStoreIterator(rocksDBIteratorMock); + + InOrder verifier = inOrder(rocksDBIteratorMock); + + iter.next(); + + verifier.verify(rocksDBIteratorMock).isValid(); + verifier.verify(rocksDBIteratorMock).key(); + verifier.verify(rocksDBIteratorMock).value(); + verifier.verify(rocksDBIteratorMock).next(); + } + + @Test + public void testConstructorSeeksToFirstElement() { + new RDBStoreIterator(rocksDBIteratorMock); + + verify(rocksDBIteratorMock, times(1)).seekToFirst(); + } + + @Test + public void testSeekToFirstSeeks() { + RDBStoreIterator iter = new RDBStoreIterator(rocksDBIteratorMock); + + iter.seekToFirst(); + + verify(rocksDBIteratorMock, times(2)).seekToFirst(); + } + + @Test + public void testSeekToLastSeeks() { + RDBStoreIterator iter = new RDBStoreIterator(rocksDBIteratorMock); + + iter.seekToLast(); + + verify(rocksDBIteratorMock, times(1)).seekToLast(); + } + + @Test + public void testSeekReturnsTheActualKey() { + when(rocksDBIteratorMock.isValid()).thenReturn(true); + when(rocksDBIteratorMock.key()).thenReturn(new byte[]{0x00}); + when(rocksDBIteratorMock.value()).thenReturn(new byte[]{0x7f}); + + RDBStoreIterator iter = new RDBStoreIterator(rocksDBIteratorMock); + ByteArrayKeyValue val = iter.seek(new byte[]{0x55}); + + InOrder verifier = inOrder(rocksDBIteratorMock); + + verify(rocksDBIteratorMock, times(1)).seekToFirst(); //at construct time + verify(rocksDBIteratorMock, never()).seekToLast(); + verifier.verify(rocksDBIteratorMock, times(1)).seek(any(byte[].class)); + verifier.verify(rocksDBIteratorMock, times(1)).isValid(); + verifier.verify(rocksDBIteratorMock, times(1)).key(); + verifier.verify(rocksDBIteratorMock, times(1)).value(); + assertArrayEquals(new byte[]{0x00}, val.getKey()); + assertArrayEquals(new byte[]{0x7f}, val.getValue()); + } + + @Test + public void testGettingTheKeyIfIteratorIsValid() { + when(rocksDBIteratorMock.isValid()).thenReturn(true); + when(rocksDBIteratorMock.key()).thenReturn(new byte[]{0x00}); + + RDBStoreIterator iter = new RDBStoreIterator(rocksDBIteratorMock); + byte[] key = iter.key(); + + InOrder verifier = inOrder(rocksDBIteratorMock); + + verifier.verify(rocksDBIteratorMock, times(1)).isValid(); + verifier.verify(rocksDBIteratorMock, times(1)).key(); + assertArrayEquals(new byte[]{0x00}, key); + } + + @Test + public void testGettingTheValueIfIteratorIsValid() { + when(rocksDBIteratorMock.isValid()).thenReturn(true); + when(rocksDBIteratorMock.key()).thenReturn(new byte[]{0x00}); + when(rocksDBIteratorMock.value()).thenReturn(new byte[]{0x7f}); + + RDBStoreIterator iter = new RDBStoreIterator(rocksDBIteratorMock); + ByteArrayKeyValue val = iter.value(); + + InOrder verifier = inOrder(rocksDBIteratorMock); + + verifier.verify(rocksDBIteratorMock, times(1)).isValid(); + verifier.verify(rocksDBIteratorMock, times(1)).key(); + assertArrayEquals(new byte[]{0x00}, val.getKey()); + assertArrayEquals(new byte[]{0x7f}, val.getValue()); + } + + @Test + public void testRemovingFromDBActuallyDeletesFromTable() throws Exception { + byte[] testKey = new byte[]{0x00}; + when(rocksDBIteratorMock.isValid()).thenReturn(true); + when(rocksDBIteratorMock.key()).thenReturn(testKey); + + RDBStoreIterator iter = + new RDBStoreIterator(rocksDBIteratorMock, rocksTableMock); + iter.removeFromDB(); + + InOrder verifier = inOrder(rocksDBIteratorMock, rocksTableMock); + + verifier.verify(rocksDBIteratorMock, times(1)).isValid(); + verifier.verify(rocksTableMock, times(1)).delete(testKey); + } + + @Test(expected = UnsupportedOperationException.class) + public void testRemoveFromDBWithoutDBTableSet() throws Exception { + RDBStoreIterator iter = new RDBStoreIterator(rocksDBIteratorMock); + iter.removeFromDB(); + } + + @Test + public void testCloseCloses() throws Exception { + RDBStoreIterator iter = new RDBStoreIterator(rocksDBIteratorMock); + iter.close(); + + verify(rocksDBIteratorMock, times(1)).close(); + } +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/PipelineIDCodec.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/PipelineIDCodec.java index d661e3467b2c..e73539f70fc6 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/PipelineIDCodec.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/PipelineIDCodec.java @@ -19,6 +19,9 @@ package org.apache.hadoop.hdds.scm.metadata; import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.UUID; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.utils.db.Codec; @@ -30,12 +33,43 @@ public class PipelineIDCodec implements Codec { @Override public byte[] toPersistedFormat(PipelineID object) throws IOException { - return object.getProtobuf().toByteArray(); + byte[] bytes = new byte[16]; + System.arraycopy( + asByteArray(object.getId().getMostSignificantBits()), 0, bytes, 0, 8); + System.arraycopy( + asByteArray(object.getId().getLeastSignificantBits()), 0, bytes, 8, 8); + return bytes; + } + + private byte[] asByteArray(long bits) { + ByteBuffer buffer = ByteBuffer.allocate(Long.BYTES); + buffer.putLong(bits); + return buffer.array(); } @Override public PipelineID fromPersistedFormat(byte[] rawData) throws IOException { - return null; + long mostSiginificantBits = toLong(rawData, 0); + long leastSignificantBits = toLong(rawData, 8); + + UUID id = new UUID(mostSiginificantBits, leastSignificantBits); + return PipelineID.valueOf(id); + } + + private long toLong(byte[] arr, int startIdx) throws IOException { + if (arr.length < startIdx + 8) { + throw new IOException("Key conversion error.", + new ArrayIndexOutOfBoundsException( + "Key does not have the least expected amount of bytes," + + "and does not contain a UUID. Key: " + + Arrays.toString(arr) + ) + ); + } + ByteBuffer buffer = ByteBuffer.allocate(Long.BYTES); + buffer.put(arr, startIdx, 8); + buffer.flip(); + return buffer.getLong(); } @Override diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java index e8223ca50455..fda937134c1c 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java @@ -161,12 +161,64 @@ protected void initializePipelineState() throws IOException { TableIterator> iterator = pipelineStore.iterator(); while (iterator.hasNext()) { - Pipeline pipeline = iterator.next().getValue(); + Pipeline pipeline = nextPipelineFromIterator(iterator); stateManager.addPipeline(pipeline); nodeManager.addPipeline(pipeline); } } + private Pipeline nextPipelineFromIterator( + TableIterator> it + ) throws IOException { + KeyValue actual = it.next(); + Pipeline pipeline = actual.getValue(); + PipelineID pipelineID = actual.getKey(); + checkKeyAndReplaceIfObsolete(it, pipeline, pipelineID); + return pipeline; + } + + /** + * This method is part of the change that happens in HDDS-3925, and we can + * and should remove this on later on. + * The purpose of the change is to get rid of protobuf serialization in the + * SCM database Pipeline table keys. The keys are not used anywhere, and the + * PipelineID that is used as a key is in the value as well, so we can detect + * a change in the key translation to byte[] and if we have the old format + * we refresh the table contents during SCM startup. + * + * If this fails in the remove, then there is an IOException coming from + * RocksDB itself, in this case in memory structures will still be fine and + * SCM should be operational, however we will attempt to replace the old key + * at next startup. In this case removing of the pipeline will leave the + * pipeline in RocksDB, and during next startup we will attempt to delete it + * again. This does not affect any runtime operations. + * If a Pipeline should have been deleted but remained in RocksDB, then at + * next startup it will be replaced and added with the new key, then SCM will + * detect that it is an invalid Pipeline and successfully delete it with the + * new key. + * For further info check the JIRA. + * + * @param it the iterator used to iterate the Pipeline table + * @param pipeline the pipeline read already from the iterator + * @param pipelineID the pipeline ID read from the raw data via the iterator + */ + private void checkKeyAndReplaceIfObsolete( + TableIterator> it, + Pipeline pipeline, + PipelineID pipelineID + ) { + if (!pipelineID.equals(pipeline.getId())) { + try { + it.removeFromDB(); + pipelineStore.put(pipeline.getId(), pipeline); + } catch (IOException e) { + LOG.info("Pipeline table in RocksDB has an old key format, and " + + "removing the pipeline with the old key was unsuccessful." + + "Pipeline: {}", pipeline); + } + } + } + private void recordMetricsForPipeline(Pipeline pipeline) { metrics.incNumPipelineAllocated(); if (pipeline.isOpen()) { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/metadata/TestPipelineIDCodec.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/metadata/TestPipelineIDCodec.java new file mode 100644 index 000000000000..5543be5832b1 --- /dev/null +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/metadata/TestPipelineIDCodec.java @@ -0,0 +1,144 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.metadata; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; + +import org.apache.hadoop.hdds.scm.pipeline.PipelineID; +import org.junit.Test; + +import java.util.UUID; + +/** + * Testing serialization of PipelineID objects to/from RocksDB. + */ +public class TestPipelineIDCodec { + + @Test + public void testPersistingZeroAsUUID() throws Exception { + long leastSigBits = 0x0000_0000_0000_0000L; + long mostSigBits = 0x0000_0000_0000_0000L; + byte[] expected = new byte[] { + b(0x00), b(0x00), b(0x00), b(0x00), b(0x00), b(0x00), b(0x00), b(0x00), + b(0x00), b(0x00), b(0x00), b(0x00), b(0x00), b(0x00), b(0x00), b(0x00) + }; + + checkPersisting(leastSigBits, mostSigBits, expected); + } + + @Test + public void testPersistingFFAsUUID() throws Exception { + long leastSigBits = 0xFFFF_FFFF_FFFF_FFFFL; + long mostSigBits = 0xFFFF_FFFF_FFFF_FFFFL; + byte[] expected = new byte[] { + b(0xFF), b(0xFF), b(0xFF), b(0xFF), b(0xFF), b(0xFF), b(0xFF), b(0xFF), + b(0xFF), b(0xFF), b(0xFF), b(0xFF), b(0xFF), b(0xFF), b(0xFF), b(0xFF) + }; + + checkPersisting(leastSigBits, mostSigBits, expected); + } + + @Test + public void testPersistingARandomUUID() throws Exception { + for (int i=0; i<100; i++) { + UUID uuid = UUID.randomUUID(); + + long mask = 0x0000_0000_0000_00FFL; + + byte[] expected = new byte[] { + b(((int) (uuid.getMostSignificantBits() >> 56 & mask))), + b(((int) (uuid.getMostSignificantBits() >> 48 & mask))), + b(((int) (uuid.getMostSignificantBits() >> 40 & mask))), + b(((int) (uuid.getMostSignificantBits() >> 32 & mask))), + b(((int) (uuid.getMostSignificantBits() >> 24 & mask))), + b(((int) (uuid.getMostSignificantBits() >> 16 & mask))), + b(((int) (uuid.getMostSignificantBits() >> 8 & mask))), + b(((int) (uuid.getMostSignificantBits() & mask))), + + b(((int) (uuid.getLeastSignificantBits() >> 56 & mask))), + b(((int) (uuid.getLeastSignificantBits() >> 48 & mask))), + b(((int) (uuid.getLeastSignificantBits() >> 40 & mask))), + b(((int) (uuid.getLeastSignificantBits() >> 32 & mask))), + b(((int) (uuid.getLeastSignificantBits() >> 24 & mask))), + b(((int) (uuid.getLeastSignificantBits() >> 16 & mask))), + b(((int) (uuid.getLeastSignificantBits() >> 8 & mask))), + b(((int) (uuid.getLeastSignificantBits() & mask))), + }; + + checkPersisting( + uuid.getMostSignificantBits(), + uuid.getLeastSignificantBits(), + expected + ); + } + } + + @Test + public void testConvertAndReadBackZeroAsUUID() throws Exception { + long mostSigBits = 0x0000_0000_0000_0000L; + long leastSigBits = 0x0000_0000_0000_0000L; + UUID uuid = new UUID(mostSigBits, leastSigBits); + PipelineID pid = PipelineID.valueOf(uuid); + + byte[] encoded = new PipelineIDCodec().toPersistedFormat(pid); + PipelineID decoded = new PipelineIDCodec().fromPersistedFormat(encoded); + + assertEquals(pid, decoded); + } + + @Test + public void testConvertAndReadBackFFAsUUID() throws Exception { + long mostSigBits = 0xFFFF_FFFF_FFFF_FFFFL; + long leastSigBits = 0xFFFF_FFFF_FFFF_FFFFL; + UUID uuid = new UUID(mostSigBits, leastSigBits); + PipelineID pid = PipelineID.valueOf(uuid); + + byte[] encoded = new PipelineIDCodec().toPersistedFormat(pid); + PipelineID decoded = new PipelineIDCodec().fromPersistedFormat(encoded); + + assertEquals(pid, decoded); + } + + @Test + public void testConvertAndReadBackRandomUUID() throws Exception { + UUID uuid = UUID.randomUUID(); + PipelineID pid = PipelineID.valueOf(uuid); + + byte[] encoded = new PipelineIDCodec().toPersistedFormat(pid); + PipelineID decoded = new PipelineIDCodec().fromPersistedFormat(encoded); + + assertEquals(pid, decoded); + } + + private void checkPersisting( + long mostSigBits, long leastSigBits, byte[] expected + ) throws Exception { + UUID uuid = new UUID(mostSigBits, leastSigBits); + PipelineID pid = PipelineID.valueOf(uuid); + + byte[] encoded = new PipelineIDCodec().toPersistedFormat(pid); + + assertArrayEquals(expected, encoded); + } + + private byte b(int i) { + return (byte) (i & 0x0000_00FF); + } +} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java index 7c2f17e85840..fc8f61a7dbf1 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java @@ -21,8 +21,10 @@ import java.io.File; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.HashSet; import java.util.List; +import java.util.NoSuchElementException; import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; @@ -37,11 +39,15 @@ import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.MockNodeManager; import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.scm.metadata.PipelineIDCodec; import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore; import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStoreImpl; import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager; import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode; import org.apache.hadoop.hdds.server.events.EventQueue; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.Table.KeyValue; +import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.test.GenericTestUtils; @@ -56,7 +62,14 @@ import static org.junit.Assert.fail; import org.junit.Before; import org.junit.Test; +import org.mockito.InOrder; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.inOrder; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; /** * Test cases to verify PipelineManager. @@ -539,6 +552,108 @@ public void testSafeModeUpdatedOnSafemodeExit() pipelineManager.close(); } + /** + * This test was created for HDDS-3925 to check whether the db handling is + * proper at the SCMPipelineManager level. We should remove this test + * when we remove the key swap from the SCMPipelineManager code. + * + * The test emulates internally the values that the iterator will provide + * back to the check-fix code path. The iterator internally deserialize the + * key stored in RocksDB using the PipelineIDCodec. The older version of the + * codec serialized the PipelineIDs by taking the byte[] representation of + * the protobuf representation of the PipelineID, and deserialization was not + * implemented. + * + * In order to be able to check and fix the change, the deserialization was + * introduced, and deserialisation of the old protobuf byte representation + * with the new deserialization logic of the keys are + * checked against the PipelineID serialized in the value as well via + * protobuf. + * The DB is storing the keys now based on a byte[] serialized from the UUID + * inside the PipelineID. + * For this we emulate the getKey of the KeyValue returned by the + * iterator to return a PipelineID that is deserialized from the byte[] + * representation of the protobuf representation of the PipelineID in the + * test, as that would be the value we get from the iterator when iterating + * through a table with the old key format. + * + * @throws Exception when something goes wrong + */ + @Test + public void testPipelineDBKeyFormatChange() throws Exception { + Pipeline p1 = pipelineStub(); + Pipeline p2 = pipelineStub(); + Pipeline p3 = pipelineStub(); + + TableIterator> iteratorMock = + mock(TableIterator.class); + + KeyValue kv1 = + mockKeyValueToProvideOldKeyFormat(p1); + KeyValue kv2 = + mockKeyValueToProvideNormalFormat(p2); + KeyValue kv3 = + mockKeyValueToProvideOldKeyFormat(p3); + + when(iteratorMock.next()) + .thenReturn(kv1, kv2, kv3) + .thenThrow(new NoSuchElementException()); + when(iteratorMock.hasNext()) + .thenReturn(true, true, true, false); + + Table pipelineStore = mock(Table.class); + doReturn(iteratorMock).when(pipelineStore).iterator(); + when(pipelineStore.isEmpty()).thenReturn(false); + + InOrder inorderVerifier = inOrder(pipelineStore, iteratorMock); + + new SCMPipelineManager(conf, nodeManager, pipelineStore, new EventQueue()); + + inorderVerifier.verify(iteratorMock).removeFromDB(); + inorderVerifier.verify(pipelineStore).put(p1.getId(), p1); + inorderVerifier.verify(iteratorMock).removeFromDB(); + inorderVerifier.verify(pipelineStore).put(p3.getId(), p3); + + verify(pipelineStore, never()).put(p2.getId(), p2); + } + + private Pipeline pipelineStub() { + return Pipeline.newBuilder() + .setId(PipelineID.randomId()) + .setType(HddsProtos.ReplicationType.RATIS) + .setFactor(HddsProtos.ReplicationFactor.ONE) + .setState(Pipeline.PipelineState.OPEN) + .setNodes( + Arrays.asList( + nodeManager.getNodes(HddsProtos.NodeState.HEALTHY).get(0) + ) + ) + .setNodesInOrder(Arrays.asList(0)) + .build(); + } + + private KeyValue + mockKeyValueToProvideOldKeyFormat(Pipeline pipeline) + throws IOException { + KeyValue kv = mock(KeyValue.class); + when(kv.getValue()).thenReturn(pipeline); + when(kv.getKey()) + .thenReturn( + new PipelineIDCodec().fromPersistedFormat( + pipeline.getId().getProtobuf().toByteArray() + )); + return kv; + } + + private KeyValue + mockKeyValueToProvideNormalFormat(Pipeline pipeline) + throws IOException { + KeyValue kv = mock(KeyValue.class); + when(kv.getValue()).thenReturn(pipeline); + when(kv.getKey()).thenReturn(pipeline.getId()); + return kv; + } + private void sendPipelineReport(DatanodeDetails dn, Pipeline pipeline, PipelineReportHandler pipelineReportHandler, boolean isLeader, EventQueue eventQueue) { From 88ed42ccf802e2779bbc643c18fdc6416b3ee651 Mon Sep 17 00:00:00 2001 From: maobaolong <307499405@qq.com> Date: Wed, 15 Jul 2020 11:12:43 +0800 Subject: [PATCH 020/165] HDDS-3885. Create Datanode home page (#1139) --- hadoop-hdds/container-service/pom.xml | 31 ++++++++ .../org/apache/hadoop/ozone/DNMXBean.java} | 14 +++- .../org/apache/hadoop/ozone/DNMXBeanImpl.java | 32 ++++++++ .../hadoop/ozone/HddsDatanodeService.java | 30 +++++++- .../webapps/hddsDatanode/dn-overview.html | 21 +++++ .../main/resources/webapps/hddsDatanode/dn.js | 35 +++++++++ .../resources/webapps/hddsDatanode/index.html | 76 +++++++++++++++++++ .../resources/webapps/hddsDatanode/main.html | 20 +++++ 8 files changed, 257 insertions(+), 2 deletions(-) rename hadoop-hdds/container-service/src/main/{resources/webapps/hddsDatanode/.gitkeep => java/org/apache/hadoop/ozone/DNMXBean.java} (72%) create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/DNMXBeanImpl.java create mode 100644 hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn-overview.html create mode 100644 hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn.js create mode 100644 hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/index.html create mode 100644 hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/main.html diff --git a/hadoop-hdds/container-service/pom.xml b/hadoop-hdds/container-service/pom.xml index d10d2a358ce1..392cc44c72a1 100644 --- a/hadoop-hdds/container-service/pom.xml +++ b/hadoop-hdds/container-service/pom.xml @@ -107,6 +107,37 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> ${basedir}/dev-support/findbugsExcludeFile.xml + + org.apache.maven.plugins + maven-dependency-plugin + + + copy-common-html + prepare-package + + unpack + + + + + org.apache.hadoop + hadoop-hdds-server-framework + ${project.build.outputDirectory} + + webapps/static/**/*.* + + + org.apache.hadoop + hadoop-hdds-docs + ${project.build.outputDirectory}/webapps/hddsDatanode + docs/**/*.* + + + true + + + + diff --git a/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/.gitkeep b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/DNMXBean.java similarity index 72% rename from hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/.gitkeep rename to hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/DNMXBean.java index ff1232e5fcaa..d36fcdb6fc70 100644 --- a/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/.gitkeep +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/DNMXBean.java @@ -14,4 +14,16 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - */ \ No newline at end of file + */ + +package org.apache.hadoop.ozone; + +import org.apache.hadoop.hdds.annotation.InterfaceAudience; +import org.apache.hadoop.hdds.server.ServiceRuntimeInfo; + +/** + * This is the JMX management interface for DN information. + */ +@InterfaceAudience.Private +public interface DNMXBean extends ServiceRuntimeInfo { +} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/DNMXBeanImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/DNMXBeanImpl.java new file mode 100644 index 000000000000..18ad66ce5a69 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/DNMXBeanImpl.java @@ -0,0 +1,32 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone; + +import org.apache.hadoop.hdds.server.ServiceRuntimeInfoImpl; +import org.apache.hadoop.hdds.utils.VersionInfo; + +/** + * This is the JMX management class for DN information. + */ +public class DNMXBeanImpl extends ServiceRuntimeInfoImpl implements DNMXBean { + public DNMXBeanImpl( + VersionInfo versionInfo) { + super(versionInfo); + } +} \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java index 08eef6f88eac..a4ff67ed86fc 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java @@ -23,12 +23,14 @@ import java.security.KeyPair; import java.security.cert.CertificateException; import java.util.Arrays; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.UUID; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.ConcurrentHashMap; +import com.sun.jmx.mbeanserver.Introspector; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.hdds.DFSConfigKeysLegacy; import org.apache.hadoop.hdds.HddsUtils; @@ -49,6 +51,7 @@ import org.apache.hadoop.hdds.server.http.RatisDropwizardExports; import org.apache.hadoop.hdds.tracing.TracingUtil; import org.apache.hadoop.hdds.utils.HddsVersionInfo; +import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil; @@ -72,6 +75,8 @@ import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; +import javax.management.ObjectName; + /** * Datanode service plugin to start the HDDS container services. */ @@ -97,6 +102,10 @@ public class HddsDatanodeService extends GenericCli implements ServicePlugin { private volatile AtomicBoolean isStopped = new AtomicBoolean(false); private final Map ratisMetricsMap = new ConcurrentHashMap<>(); + private DNMXBeanImpl serviceRuntimeInfo = + new DNMXBeanImpl(HddsVersionInfo.HDDS_VERSION_INFO) {}; + private ObjectName dnInfoBeanName; + //Constructor for DataNode PluginService public HddsDatanodeService(){} @@ -134,6 +143,7 @@ private static HddsDatanodeService createHddsDatanodeService( public static void main(String[] args) { try { + Introspector.checkCompliance(DNMXBeanImpl.class); HddsDatanodeService hddsDatanodeService = createHddsDatanodeService(args, true); hddsDatanodeService.run(args); @@ -182,6 +192,8 @@ public void start(OzoneConfiguration configuration) { } public void start() { + serviceRuntimeInfo.setStartTime(); + RatisDropwizardExports. registerRatisMetricReporters(ratisMetricsMap); @@ -250,7 +262,7 @@ public void start() { .equalsIgnoreCase(System.getenv("OZONE_DATANODE_STANDALONE_TEST"))) { startRatisForTest(); } - + registerMXBean(); } catch (IOException e) { throw new RuntimeException("Can't start the HDDS datanode plugin", e); } catch (AuthenticationException ex) { @@ -350,6 +362,21 @@ private void getSCMSignedCert(OzoneConfiguration config) { } } + private void registerMXBean() { + Map jmxProperties = new HashMap<>(); + jmxProperties.put("component", "ServerRuntime"); + this.dnInfoBeanName = HddsUtils.registerWithJmxProperties( + "HddsDatanodeService", + "HddsDatanodeServiceInfo", jmxProperties, this.serviceRuntimeInfo); + } + + private void unregisterMXBean() { + if (this.dnInfoBeanName != null) { + MBeans.unregister(this.dnInfoBeanName); + this.dnInfoBeanName = null; + } + } + /** * Creates CSR for DN. * @param config @@ -517,6 +544,7 @@ public void stop() { LOG.error("Stopping HttpServer is failed.", e); } } + unregisterMXBean(); } } diff --git a/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn-overview.html b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn-overview.html new file mode 100644 index 000000000000..d4f7a17c0b76 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn-overview.html @@ -0,0 +1,21 @@ + + +

+ + +
diff --git a/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn.js b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn.js new file mode 100644 index 000000000000..c43eb42bdc25 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn.js @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +(function () { + "use strict"; + angular.module('dn', ['ozone', 'nvd3']); + + angular.module('dn').component('dnOverview', { + templateUrl: 'dn-overview.html', + require: { + overview: "^overview" + }, + controller: function ($http) { + var ctrl = this; + $http.get("jmx?qry=Hadoop:service=HddsDatanode,name=StorageContainerMetrics") + .then(function (result) { + ctrl.dnmetrics = result.data.beans[0]; + }); + } + }); +})(); diff --git a/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/index.html b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/index.html new file mode 100644 index 000000000000..b1f703c0d659 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/index.html @@ -0,0 +1,76 @@ + + + + + + + + + + + HDDS Datanode Service + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + + + + + diff --git a/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/main.html b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/main.html new file mode 100644 index 000000000000..c639b0bb2b50 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/main.html @@ -0,0 +1,20 @@ + + + + + From c7e50e9dad941176f5513dfdd10d95717909f9d0 Mon Sep 17 00:00:00 2001 From: Xiang Zhang Date: Wed, 15 Jul 2020 14:37:47 +0800 Subject: [PATCH 021/165] HDDS-2768. security/SecuringS3.md (#1188) --- .../docs/content/security/SecuringS3.md | 6 +-- .../docs/content/security/SecuringS3.zh.md | 50 +++++++++++++++++++ 2 files changed, 53 insertions(+), 3 deletions(-) create mode 100644 hadoop-hdds/docs/content/security/SecuringS3.zh.md diff --git a/hadoop-hdds/docs/content/security/SecuringS3.md b/hadoop-hdds/docs/content/security/SecuringS3.md index 1cb0c809e611..578aa33a34c8 100644 --- a/hadoop-hdds/docs/content/security/SecuringS3.md +++ b/hadoop-hdds/docs/content/security/SecuringS3.md @@ -38,14 +38,14 @@ The user needs to `kinit` first and once they have authenticated via kerberos ```bash ozone s3 getsecret ``` -This command will talk to ozone, validate the user via kerberos and generate +This command will talk to ozone, validate the user via Kerberos and generate the AWS credentials. The values will be printed out on the screen. You can set these values up in your _.aws_ file for automatic access while working against Ozone S3 buckets. diff --git a/hadoop-hdds/docs/content/security/SecuringS3.zh.md b/hadoop-hdds/docs/content/security/SecuringS3.zh.md new file mode 100644 index 000000000000..b7d52b78c072 --- /dev/null +++ b/hadoop-hdds/docs/content/security/SecuringS3.zh.md @@ -0,0 +1,50 @@ +--- +title: "安全化 S3" +date: "2019-April-03" +summary: Ozone 支持 S3 协议,并使用 AWS Signature Version 4 protocol which allows a seamless S3 + experience. +weight: 4 +icon: cloud +--- + + +用户需要由 AWS 网站生成的 AWS access key ID 和 AWS secret 来访问 AWS S3 的桶,当你使用 Ozone 的 S3 协议时,你也需要同样的 AWS access key 和 secret。 + +在 Ozone 中,用户可以直接下载 access key。用户需要先执行 `kinit` 命令进行 Kerberos 认证,认证通过后就可以下载 S3 access key 和 secret。和 AWS S3 一样,access key 和 secret 具有 S3 桶的全部权限,用户需要保管好 key 和 secret。 + +* S3 客户端可以从 OM 获取 access key id 和 secret。 + +```bash +ozone s3 getsecret +``` +这条命令会与 Ozone 进行通信,对用户进行 Kerberos 认证并生成 AWS 凭据,结果会直接打印在屏幕上,你可以将其配置在 _.aws._ 文件中,这样可以在操作 Ozone S3 桶时自动进行认证。 + + + + +* 在 aws 配置中添加上述凭据: + +```bash +aws configure set default.s3.signature_version s3v4 +aws configure set aws_access_key_id ${accessId} +aws configure set aws_secret_access_key ${secret} +aws configure set region us-west-1 +``` +关于通过命令行和 S3 API 使用 S3,请参考 AWS S3 的文档。 From b9e1418dd4ac75d108195b9b396aeb38f3cd14dc Mon Sep 17 00:00:00 2001 From: Xiang Zhang Date: Wed, 15 Jul 2020 14:54:02 +0800 Subject: [PATCH 022/165] HDDS-2769. security/SecurityWithRanger.md (#1189) --- .../content/security/SecuityWithRanger.zh.md | 35 +++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 hadoop-hdds/docs/content/security/SecuityWithRanger.zh.md diff --git a/hadoop-hdds/docs/content/security/SecuityWithRanger.zh.md b/hadoop-hdds/docs/content/security/SecuityWithRanger.zh.md new file mode 100644 index 000000000000..b879e9347c08 --- /dev/null +++ b/hadoop-hdds/docs/content/security/SecuityWithRanger.zh.md @@ -0,0 +1,35 @@ +--- +title: "Apache Ranger" +date: "2019-April-03" +weight: 5 +summary: Apache Ranger 是一个用于管理和监控 Hadoop 平台复杂数据权限的框架。 +icon: user +--- + + + +Apache Ranger™ 是一个用于管理和监控 Hadoop 平台复杂数据权限的框架。版本大于 1.20 的 Apache Ranger 都可以用于管理 Ozone 集群。 + +你需要先在你的 Hadoop 集群上安装 Apache Ranger,安装指南可以参考 [Apache Ranger 官网](https://ranger.apache.org/index.html). + +如果你已经安装好了 Apache Ranger,那么 Ozone 的配置十分简单,你只需要启用 ACL 支持并且将 ACL 授权类设置为 Ranger 授权类,在 ozone-site.xml 中添加下面的参数: + +参数名|参数值 +--------|------------------------------------------------------------ +ozone.acl.enabled | true +ozone.acl.authorizer.class| org.apache.ranger.authorization.ozone.authorizer.RangerOzoneAuthorizer From 5348c57dd3998c689c5a3b4e198fdf89a2f57c88 Mon Sep 17 00:00:00 2001 From: HuangTao Date: Wed, 15 Jul 2020 23:24:02 +0800 Subject: [PATCH 023/165] HDDS-3798. Display more accurate timestamp in recon Web (#1201) --- .../src/components/autoReloadPanel/autoReloadPanel.tsx | 4 ++-- .../recon/ozone-recon-web/src/views/datanodes/datanodes.tsx | 4 ++-- .../recon/ozone-recon-web/src/views/pipelines/pipelines.tsx | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/autoReloadPanel/autoReloadPanel.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/autoReloadPanel/autoReloadPanel.tsx index fee9a883a0dc..773828dcd0ed 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/autoReloadPanel/autoReloadPanel.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/autoReloadPanel/autoReloadPanel.tsx @@ -42,9 +42,9 @@ class AutoReloadPanel extends React.Component { const lastUpdatedText = lastUpdated === 0 ? 'NA' : ( - {moment(lastUpdated).format('LT')} + {moment(lastUpdated).format('LTS')} ); return ( diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx index 877ebf9f2c75..fada1f43c35a 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx @@ -135,7 +135,7 @@ const COLUMNS = [ isVisible: true, sorter: (a: IDatanode, b: IDatanode) => a.lastHeartbeat - b.lastHeartbeat, render: (heartbeat: number) => { - return heartbeat > 0 ? moment(heartbeat).format('lll') : 'NA'; + return heartbeat > 0 ? moment(heartbeat).format('ll LTS') : 'NA'; } }, { @@ -197,7 +197,7 @@ const COLUMNS = [ isVisible: false, sorter: (a: IDatanode, b: IDatanode) => a.setupTime - b.setupTime, render: (uptime: number) => { - return uptime > 0 ? moment(uptime).format('lll') : 'NA'; + return uptime > 0 ? moment(uptime).format('ll LTS') : 'NA'; } } ]; diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/pipelines/pipelines.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/pipelines/pipelines.tsx index 15b6858bbc4a..b898818cd234 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/pipelines/pipelines.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/pipelines/pipelines.tsx @@ -114,7 +114,7 @@ const COLUMNS = [ dataIndex: 'lastLeaderElection', key: 'lastLeaderElection', render: (lastLeaderElection: number) => lastLeaderElection > 0 ? - moment(lastLeaderElection).format('lll') : 'NA', + moment(lastLeaderElection).format('ll LTS') : 'NA', sorter: (a: IPipelineResponse, b: IPipelineResponse) => a.lastLeaderElection - b.lastLeaderElection }, { From b009a9f706f1783cae79b4f7e45e6ce4a9e4a40b Mon Sep 17 00:00:00 2001 From: avijayanhwx <14299376+avijayanhwx@users.noreply.github.com> Date: Wed, 15 Jul 2020 10:56:22 -0700 Subject: [PATCH 024/165] HDDS-3765. Fluentd writing to secure Ozone S3 API fails with 500 Error. (#1179) --- .../main/smoketest/security/ozone-secure-s3.robot | 15 +++++++++++++++ .../hadoop/ozone/s3/AWSV4SignatureProcessor.java | 8 +------- 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/hadoop-ozone/dist/src/main/smoketest/security/ozone-secure-s3.robot b/hadoop-ozone/dist/src/main/smoketest/security/ozone-secure-s3.robot index 5103e80279c2..70bade5f1e85 100644 --- a/hadoop-ozone/dist/src/main/smoketest/security/ozone-secure-s3.robot +++ b/hadoop-ozone/dist/src/main/smoketest/security/ozone-secure-s3.robot @@ -24,6 +24,8 @@ Test Timeout 5 minutes *** Variables *** ${ENDPOINT_URL} http://s3g:9878 +${TEMPDIR} /tmp +${TEST_FILE} NOTICE.txt *** Keywords *** Setup volume names @@ -38,6 +40,19 @@ Secure S3 test Success ${output} = Execute aws s3api --endpoint-url ${ENDPOINT_URL} list-buckets Should contain ${output} bucket-test123 +Secure S3 put-object test + ${testFilePath} = Set Variable ${TEMPDIR}/${TEST_FILE} + Copy File ${TEST_FILE} ${testFilePath} + ${output} = Execute aws s3api --endpoint ${ENDPOINT_URL} put-object --bucket=bucket-test123 --key=tmp1/tmp2/NOTICE.txt --body=${testFilePath} + ${output} = Execute aws s3api --endpoint ${ENDPOINT_URL} list-objects --bucket=bucket-test123 + Should contain ${output} tmp1/tmp2/NOTICE.txt + ${output} = Execute aws s3api --endpoint ${ENDPOINT_URL} put-object --bucket=bucket-test123 --key=tmp3//tmp4/NOTICE.txt --body=${testFilePath} + ${output} = Execute aws s3api --endpoint ${ENDPOINT_URL} list-objects --bucket=bucket-test123 + Should contain ${output} tmp3//tmp4/NOTICE.txt + ${output} = Execute aws s3api --endpoint ${ENDPOINT_URL} put-object --bucket=bucket-test123 --key=//tmp5/tmp6/NOTICE.txt --body=${testFilePath} + ${output} = Execute aws s3api --endpoint ${ENDPOINT_URL} list-objects --bucket=bucket-test123 + Should contain ${output} //tmp5/tmp6/NOTICE.txt + Secure S3 test Failure Run Keyword Setup dummy credentials for S3 ${rc} ${result} = Run And Return Rc And Output aws s3api --endpoint-url ${ENDPOINT_URL} create-bucket --bucket bucket-test123 diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AWSV4SignatureProcessor.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AWSV4SignatureProcessor.java index 099221daa380..1ff1a72575b7 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AWSV4SignatureProcessor.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AWSV4SignatureProcessor.java @@ -104,13 +104,7 @@ public void init() this.queryMap = context.getUriInfo().getQueryParameters(); - try { - this.uri = new URI(context.getUriInfo().getRequestUri() - .getPath().replaceAll("\\/+", - "/")).normalize().getPath(); - } catch (URISyntaxException e) { - throw S3_AUTHINFO_CREATION_ERROR; - } + this.uri = context.getUriInfo().getRequestUri().getPath(); this.method = context.getMethod(); if (v4Header == null) { From 9f67f28eb46989197f427ed1ebe65718ab3d5f72 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Thu, 16 Jul 2020 01:26:15 +0200 Subject: [PATCH 025/165] HDDS-3967. Remove leftover debug setting (#1202) --- .../integration-test/src/test/resources/log4j.properties | 3 --- 1 file changed, 3 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/resources/log4j.properties b/hadoop-ozone/integration-test/src/test/resources/log4j.properties index e5381ad212fc..b8ad21d6c7fa 100644 --- a/hadoop-ozone/integration-test/src/test/resources/log4j.properties +++ b/hadoop-ozone/integration-test/src/test/resources/log4j.properties @@ -19,6 +19,3 @@ log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:% log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR - -log4j.logger.org.apache.ratis.grpc.server.GrpcLogAppender=DEBUG -log4j.logger.org.apache.ratis.server.impl.RaftServerImpl=DEBUG From bbc93a6e5446b535cabd121b5ea64bb7f7bf25a9 Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Wed, 15 Jul 2020 17:16:22 -0700 Subject: [PATCH 026/165] HDDS-3968. LDB scan fails to read from transactionInfoTable. (#1205) --- .../hadoop/ozone/om/codec/OMDBDefinition.java | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java index f46afde8d983..24f33418b0b7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.utils.db.LongCodec; import org.apache.hadoop.hdds.utils.db.StringCodec; import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; @@ -31,6 +32,7 @@ import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; +import org.apache.hadoop.ozone.om.ratis.OMTransactionInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; @@ -139,6 +141,15 @@ public class OMDBDefinition implements DBDefinition { S3SecretValue.class, new S3SecretValueCodec()); + public static final DBColumnFamilyDefinition + TRANSACTION_INFO_TABLE = + new DBColumnFamilyDefinition<>( + OmMetadataManagerImpl.TRANSACTION_INFO_TABLE, + String.class, + new StringCodec(), + OMTransactionInfo.class, + new OMTransactionInfoCodec()); + @Override public String getName() { @@ -155,7 +166,7 @@ public DBColumnFamilyDefinition[] getColumnFamilies() { return new DBColumnFamilyDefinition[] {DELETED_TABLE, USER_TABLE, VOLUME_TABLE, S3_TABLE, OPEN_KEY_TABLE, KEY_TABLE, BUCKET_TABLE, MULTIPART_INFO_TABLE, PREFIX_TABLE, DTOKEN_TABLE, - S3_SECRET_TABLE}; + S3_SECRET_TABLE, TRANSACTION_INFO_TABLE}; } } From 03a6020e26232cc898a1e3a16be9f758fc8d89ab Mon Sep 17 00:00:00 2001 From: Xiang Zhang Date: Thu, 16 Jul 2020 09:03:35 +0800 Subject: [PATCH 027/165] HDDS-2767. security/SecuringTDE.md (#1184) --- .../docs/content/security/SecuringTDE.md | 4 +- .../docs/content/security/SecuringTDE.zh.md | 52 +++++++++++++++++++ 2 files changed, 54 insertions(+), 2 deletions(-) create mode 100644 hadoop-hdds/docs/content/security/SecuringTDE.zh.md diff --git a/hadoop-hdds/docs/content/security/SecuringTDE.md b/hadoop-hdds/docs/content/security/SecuringTDE.md index 3e8f2d16819f..0e21b62b7c85 100644 --- a/hadoop-hdds/docs/content/security/SecuringTDE.md +++ b/hadoop-hdds/docs/content/security/SecuringTDE.md @@ -1,7 +1,7 @@ --- title: "Transparent Data Encryption" date: "2019-April-03" -summary: TDE allows data on the disks to be encrypted-at-rest and automatically decrypted during access. You can enable this per key or per bucket. +summary: TDE allows data on the disks to be encrypted-at-rest and automatically decrypted during access. weight: 3 icon: lock --- @@ -28,7 +28,7 @@ when a bucket is created. ### Setting up the Key Management Server -To use TDE, clients must setup a Key Management Server and provide that URI to +To use TDE, admin must setup a Key Management Server and provide that URI to Ozone/HDFS. Since Ozone and HDFS can use the same Key Management Server, this configuration can be provided via *hdfs-site.xml*. diff --git a/hadoop-hdds/docs/content/security/SecuringTDE.zh.md b/hadoop-hdds/docs/content/security/SecuringTDE.zh.md new file mode 100644 index 000000000000..35baaa0164e8 --- /dev/null +++ b/hadoop-hdds/docs/content/security/SecuringTDE.zh.md @@ -0,0 +1,52 @@ +--- +title: "透明数据加密" +date: "2019-April-03" +summary: 透明数据加密(Transparent Data Encryption,TDE)以密文形式在磁盘上保存数据,但可以在用户访问的时候自动进行解密。 +weight: 3 +icon: lock +--- + + +Ozone TDE 的配置和使用和 HDFS TDE 十分相似,主要的区别是,Ozone 中桶级别的 TDE 必须在创建桶时启用。 + +### 配置密钥管理服务器 + +使用 TDE 之前,管理员必须要提前配置密钥管理服务 KMS,并且把 KMS 的 URI 通过 core-site.xml 提供给 Ozone。 + +参数名 | 值 +-----------------------------------|----------------------------------------- +hadoop.security.key.provider.path | KMS uri.
比如 kms://http@kms-host:9600/kms + +### 使用 TDE +如果你的集群已经配置好了 TDE,那么你只需要创建加密密钥并启用桶加密即可。 + +创建加密密钥的方法为: + * 使用 hadoop key 命令创建桶加密密钥,和 HDFS 加密区域的使用方法类似。 + + ```bash + hadoop key create encKey + ``` + 上面这个命令会创建一个用于保护桶数据的密钥。创建完成之后,你可以告诉 Ozone 在读写某个桶中的数据时使用这个密钥。 + + * 将加密密钥分配给桶 + + ```bash + ozone sh bucket create -k encKey /vol/encryptedBucket + ``` + +这条命令执行后,所以写往 _encryptedBucket_ 的数据都会用 encKey 进行加密,当读取里面的数据时,客户端通过 KMS 获取密钥进行解密。换句话说,Ozone 中存储的数据一直是加密的,但用户和客户端对此完全无感知。 From d5bebd5f5dc81840e1f55373ac42921fa9306c95 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Thu, 16 Jul 2020 03:40:50 +0200 Subject: [PATCH 028/165] HDDS-3958. Intermittent failure in Recon acceptance test due to mixed stdout and stderr (#1200) --- .../src/main/smoketest/recon/recon-api.robot | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/hadoop-ozone/dist/src/main/smoketest/recon/recon-api.robot b/hadoop-ozone/dist/src/main/smoketest/recon/recon-api.robot index 707384902b75..ada2dd8daf6a 100644 --- a/hadoop-ozone/dist/src/main/smoketest/recon/recon-api.robot +++ b/hadoop-ozone/dist/src/main/smoketest/recon/recon-api.robot @@ -28,10 +28,10 @@ ${API_ENDPOINT_URL} http://recon:9888/api/v1 *** Keywords *** Check if Recon picks up container from OM Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit HTTP user - ${result} = Execute curl --negotiate -u : -v ${API_ENDPOINT_URL}/containers + ${result} = Execute curl --negotiate -u : -LSs ${API_ENDPOINT_URL}/containers Should contain ${result} \"ContainerID\":1 - ${result} = Execute curl --negotiate -u : -v ${API_ENDPOINT_URL}/utilization/fileCount + ${result} = Execute curl --negotiate -u : -LSs ${API_ENDPOINT_URL}/utilization/fileCount Should contain ${result} \"fileSize\":2048,\"count\":10 *** Test Cases *** @@ -43,13 +43,13 @@ Check if Recon picks up OM data Wait Until Keyword Succeeds 90sec 10sec Check if Recon picks up container from OM Check if Recon picks up DN heartbeats - ${result} = Execute curl --negotiate -u : -v ${API_ENDPOINT_URL}/datanodes + ${result} = Execute curl --negotiate -u : -LSs ${API_ENDPOINT_URL}/datanodes Should contain ${result} datanodes Should contain ${result} datanode_1 Should contain ${result} datanode_2 Should contain ${result} datanode_3 - ${result} = Execute curl --negotiate -u : -v ${API_ENDPOINT_URL}/pipelines + ${result} = Execute curl --negotiate -u : -LSs ${API_ENDPOINT_URL}/pipelines Should contain ${result} pipelines Should contain ${result} RATIS Should contain ${result} OPEN @@ -57,15 +57,15 @@ Check if Recon picks up DN heartbeats Should contain ${result} datanode_2 Should contain ${result} datanode_3 - ${result} = Execute curl --negotiate -u : -v ${API_ENDPOINT_URL}/clusterState + ${result} = Execute curl --negotiate -u : -LSs ${API_ENDPOINT_URL}/clusterState Should contain ${result} \"totalDatanodes\":3 Should contain ${result} \"healthyDatanodes\":3 Should contain ${result} \"pipelines\":4 - ${result} = Execute curl --negotiate -u : -v ${API_ENDPOINT_URL}/containers/1/replicaHistory + ${result} = Execute curl --negotiate -u : -LSs ${API_ENDPOINT_URL}/containers/1/replicaHistory Should contain ${result} \"containerId\":1 Check if Recon Web UI is up Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit HTTP user - ${result} = Execute curl --negotiate -u : -v ${ENDPOINT_URL} - Should contain ${result} Ozone Recon \ No newline at end of file + ${result} = Execute curl --negotiate -u : -LSs ${ENDPOINT_URL} + Should contain ${result} Ozone Recon From deb3a17d60dca2ee539e53b933d49ab834ced839 Mon Sep 17 00:00:00 2001 From: maobaolong <307499405@qq.com> Date: Thu, 16 Jul 2020 11:13:32 +0800 Subject: [PATCH 029/165] HDDS-3923. Display the safemode status on scm page (#1165) --- .../org/apache/hadoop/hdds/scm/server/SCMMXBean.java | 2 ++ .../hdds/scm/server/StorageContainerManager.java | 10 ++++++++++ .../src/main/resources/webapps/scm/scm-overview.html | 11 +++++++++++ 3 files changed, 23 insertions(+) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java index 2c6387522241..f0a497ad375d 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java @@ -64,4 +64,6 @@ public interface SCMMXBean extends ServiceRuntimeInfo { * Returns the container count in all states. */ Map getContainerStateCount(); + + Map getRuleStatusMetrics(); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java index 0ed843c87bbd..621b126a1e8c 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java @@ -1122,4 +1122,14 @@ public NetworkTopology getClusterMap() { public Map> getRuleStatus() { return scmSafeModeManager.getRuleStatus(); } + + @Override + public Map getRuleStatusMetrics() { + Map map = new HashMap<>(); + for (Map.Entry> entry : + scmSafeModeManager.getRuleStatus().entrySet()) { + map.put(entry.getKey(), entry.getValue().getRight()); + } + return map; + } } diff --git a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html index 38ce638f4d11..ebb117077d92 100644 --- a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html +++ b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html @@ -42,3 +42,14 @@

Status

+ +

Safemode rules statuses

+ + + + + + + + +
{{typestat.key}}{{typestat.value}}
\ No newline at end of file From dc7dd86d92f02e7cf3cf149698c263da8cca4e75 Mon Sep 17 00:00:00 2001 From: bshashikant Date: Thu, 16 Jul 2020 16:08:56 +0530 Subject: [PATCH 030/165] HDDS-3807. Propagate raft log disks info to SCM from datanode. (#1107) --- .../common/impl/StorageLocationReport.java | 11 ++-- .../transport/server/XceiverServerSpi.java | 12 +++- .../server/ratis/XceiverServerRatis.java | 58 +++++++++++++++++-- .../common/volume/MutableVolumeSet.java | 10 +--- .../container/ozoneimpl/OzoneContainer.java | 18 +++++- .../ozoneimpl/TestOzoneContainer.java | 51 ++++++++++++++-- .../hadoop/hdds/utils/HddsServerUtil.java | 16 ++--- .../ScmServerDatanodeHeartbeatProtocol.proto | 6 ++ 8 files changed, 152 insertions(+), 30 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java index 061d09bd4a5e..2ad7f0d49817 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java @@ -88,11 +88,14 @@ public StorageType getStorageType() { return storageType; } + private StorageTypeProto getStorageTypeProto() throws IOException { + return getStorageTypeProto(getStorageType()); + } - private StorageTypeProto getStorageTypeProto() throws - IOException { + public static StorageTypeProto getStorageTypeProto(StorageType type) + throws IOException { StorageTypeProto storageTypeProto; - switch (getStorageType()) { + switch (type) { case SSD: storageTypeProto = StorageTypeProto.SSD; break; @@ -145,7 +148,7 @@ private static StorageType getStorageType(StorageTypeProto proto) throws * @return SCMStorageReport * @throws IOException In case, the storage type specified is invalid. */ - public StorageReportProto getProtoBufMessage() throws IOException{ + public StorageReportProto getProtoBufMessage() throws IOException { StorageReportProto.Builder srb = StorageReportProto.newBuilder(); return srb.setStorageUuid(getId()) .setCapacity(getCapacity()) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerSpi.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerSpi.java index 01f463c5cddd..d8dfefdb5f64 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerSpi.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerSpi.java @@ -24,7 +24,8 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.PipelineReport; - +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto; import java.io.IOException; import java.util.Collection; import java.util.List; @@ -83,4 +84,13 @@ default void removeGroup(HddsProtos.PipelineID pipelineId) * @return list of report for each pipeline. */ List getPipelineReport(); + + /** + * Get storage report for the XceiverServer instance. + * @return list of report for each storage location. + */ + default List getStorageReport() throws + IOException { + return null; + } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java index 3e6ac10e7e1a..c751c5b5b9d8 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java @@ -22,7 +22,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collection; -import java.util.Collections; import java.util.HashSet; import java.util.Iterator; import java.util.List; @@ -30,6 +29,7 @@ import java.util.Objects; import java.util.Set; import java.util.UUID; +import java.util.EnumMap; import java.util.concurrent.BlockingQueue; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutorService; @@ -46,6 +46,7 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ClosePipelineInfo; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineAction; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport; @@ -59,11 +60,14 @@ import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.impl.ContainerData; +import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport; import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; +import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.hdfs.server.datanode.StorageLocation; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableList; @@ -136,6 +140,11 @@ private static long nextCallId() { // Timeout used while calling submitRequest directly. private long requestTimeout; + /** + * Maintains a list of active volumes per StorageType. + */ + private EnumMap> ratisVolumeMap; + private XceiverServerRatis(DatanodeDetails dd, int port, ContainerDispatcher dispatcher, ContainerController containerController, StateContext context, GrpcTlsConfig tlsConfig, ConfigurationSource conf) @@ -163,6 +172,7 @@ private XceiverServerRatis(DatanodeDetails dd, int port, HddsConfigKeys.HDDS_DATANODE_RATIS_SERVER_REQUEST_TIMEOUT, HddsConfigKeys.HDDS_DATANODE_RATIS_SERVER_REQUEST_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS); + initializeRatisVolumeMap(); } private ContainerStateMachine getStateMachine(RaftGroupId gid) { @@ -213,9 +223,12 @@ private RaftProperties newRaftProperties() { setNodeFailureTimeout(properties); // Set the ratis storage directory - String storageDir = HddsServerUtil.getOzoneDatanodeRatisDirectory(conf); - RaftServerConfigKeys.setStorageDir(properties, - Collections.singletonList(new File(storageDir))); + Collection storageDirPaths = + HddsServerUtil.getOzoneDatanodeRatisDirectory(conf); + List storageDirs= new ArrayList<>(storageDirPaths.size()); + storageDirPaths.stream().forEach(d -> storageDirs.add(new File(d))); + + RaftServerConfigKeys.setStorageDir(properties, storageDirs); // For grpc set the maximum message size GrpcConfigKeys.setMessageSizeMax(properties, @@ -526,6 +539,43 @@ public void submitRequest(ContainerCommandRequestProto request, } } + private void initializeRatisVolumeMap() throws IOException { + ratisVolumeMap = new EnumMap<>(StorageType.class); + Collection rawLocations = HddsServerUtil. + getOzoneDatanodeRatisDirectory(conf); + + for (String locationString : rawLocations) { + try { + StorageLocation location = StorageLocation.parse(locationString); + StorageType type = location.getStorageType(); + ratisVolumeMap.computeIfAbsent(type, k -> new ArrayList(1)); + ratisVolumeMap.get(location.getStorageType()). + add(location.getUri().getPath()); + + } catch (IOException e) { + LOG.error("Failed to parse the storage location: " + + locationString, e); + } + } + } + + @Override + public List getStorageReport() + throws IOException { + List reportProto = new ArrayList<>(); + for (StorageType storageType : ratisVolumeMap.keySet()) { + for (String path : ratisVolumeMap.get(storageType)) { + MetadataStorageReportProto.Builder builder = MetadataStorageReportProto. + newBuilder(); + builder.setStorageLocation(path); + builder.setStorageType(StorageLocationReport. + getStorageTypeProto(storageType)); + reportProto.add(builder.build()); + } + } + return reportProto; + } + private RaftClientRequest createRaftClientRequest( ContainerCommandRequestProto request, HddsProtos.PipelineID pipelineID, RaftClientRequest.Type type) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java index bc61811c868b..b8c606738ef1 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java @@ -36,8 +36,6 @@ import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto; import org.apache.hadoop.hdfs.server.datanode.StorageLocation; import org.apache.hadoop.ozone.common.InconsistentStorageStateException; import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport; @@ -487,7 +485,7 @@ public Map> getVolumeStateMap() { return ImmutableMap.copyOf(volumeStateMap); } - public StorageContainerDatanodeProtocolProtos.NodeReportProto getNodeReport() + public StorageLocationReport[] getStorageReport() throws IOException { boolean failed; this.readLock(); @@ -540,11 +538,7 @@ public StorageContainerDatanodeProtocolProtos.NodeReportProto getNodeReport() StorageLocationReport r = builder.build(); reports[counter++] = r; } - NodeReportProto.Builder nrb = NodeReportProto.newBuilder(); - for (int i = 0; i < reports.length; i++) { - nrb.addStorageReport(reports[i].getProtoBufMessage()); - } - return nrb.build(); + return reports; } finally { this.readUnlock(); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java index bbbec25af783..62fd5a4e85e0 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java @@ -39,6 +39,7 @@ import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher; +import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport; import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; import org.apache.hadoop.ozone.container.common.interfaces.Handler; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; @@ -294,8 +295,21 @@ public ContainerController getController() { * Returns node report of container storage usage. */ public StorageContainerDatanodeProtocolProtos.NodeReportProto getNodeReport() - throws IOException { - return volumeSet.getNodeReport(); + throws IOException { + StorageLocationReport[] reports = volumeSet.getStorageReport(); + StorageContainerDatanodeProtocolProtos.NodeReportProto.Builder nrb + = StorageContainerDatanodeProtocolProtos. + NodeReportProto.newBuilder(); + for (int i = 0; i < reports.length; i++) { + nrb.addStorageReport(reports[i].getProtoBufMessage()); + } + List metadataReport = + writeChannel.getStorageReport(); + if (metadataReport != null) { + nrb.addAllMetadataStorageReport(metadataReport); + } + return nrb.build(); } @VisibleForTesting diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 1056a0d727b4..2bb52f65dc8f 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; +import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; @@ -45,10 +46,7 @@ import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; import org.apache.hadoop.test.LambdaTestUtils; -import org.junit.After; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; +import org.junit.*; import org.junit.rules.TemporaryFolder; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; @@ -169,6 +167,51 @@ public void testBuildContainerMap() throws Exception { verifyCommittedSpace(ozoneContainer); } + @Test + public void testBuildNodeReport() throws Exception { + String path = folder.getRoot() + .getAbsolutePath(); + conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, + String.join(",", + path + "/ratis1", path + "/ratis2", path + "ratis3")); + DatanodeStateMachine stateMachine = Mockito.mock( + DatanodeStateMachine.class); + StateContext context = Mockito.mock(StateContext.class); + Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(datanodeDetails); + Mockito.when(context.getParent()).thenReturn(stateMachine); + // When OzoneContainer is started, the containers from disk should be + // loaded into the containerSet. + // Also expected to initialize committed space for each volume. + OzoneContainer ozoneContainer = new + OzoneContainer(datanodeDetails, conf, context, null); + Assert.assertEquals(volumeSet.getVolumesList().size(), + ozoneContainer.getNodeReport().getStorageReportList().size()); + Assert.assertEquals(3, + ozoneContainer.getNodeReport().getMetadataStorageReportList() + .size()); + + } + + @Test + public void testBuildNodeReportWithDefaultRatisLogDir() throws Exception { + DatanodeStateMachine stateMachine = Mockito.mock( + DatanodeStateMachine.class); + StateContext context = Mockito.mock(StateContext.class); + Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(datanodeDetails); + Mockito.when(context.getParent()).thenReturn(stateMachine); + // When OzoneContainer is started, the containers from disk should be + // loaded into the containerSet. + // Also expected to initialize committed space for each volume. + OzoneContainer ozoneContainer = new + OzoneContainer(datanodeDetails, conf, context, null); + Assert.assertEquals(volumeSet.getVolumesList().size(), + ozoneContainer.getNodeReport().getStorageReportList().size()); + Assert.assertEquals(1, + ozoneContainer.getNodeReport().getMetadataStorageReportList() + .size()); + } + + @Test public void testContainerCreateDiskFull() throws Exception { long containerSize = (long) StorageUnit.MB.toBytes(100); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java index ee5114275d06..8e7f3263fb63 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java @@ -20,6 +20,8 @@ import java.io.File; import java.io.IOException; import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Collection; import java.util.Optional; import java.util.OptionalInt; import java.util.concurrent.TimeUnit; @@ -51,7 +53,6 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.security.UserGroupInformation; -import com.google.common.base.Strings; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL_DEFAULT; import static org.apache.hadoop.hdds.HddsUtils.getHostNameFromConfigKeys; @@ -344,15 +345,16 @@ public static int getContainerPort(ConfigurationSource conf) { OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); } - public static String getOzoneDatanodeRatisDirectory( + public static Collection getOzoneDatanodeRatisDirectory( ConfigurationSource conf) { - String storageDir = conf.get( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR); + Collection rawLocations = conf.getTrimmedStringCollection( + OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR); - if (Strings.isNullOrEmpty(storageDir)) { - storageDir = ServerUtils.getDefaultRatisDirectory(conf); + if (rawLocations.isEmpty()) { + rawLocations = new ArrayList<>(1); + rawLocations.add(ServerUtils.getDefaultRatisDirectory(conf)); } - return storageDir; + return rawLocations; } /** diff --git a/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto b/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto index 191a81af7ff0..00c8fdbf3fb4 100644 --- a/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto +++ b/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto @@ -146,6 +146,7 @@ message SCMNodeAddressList { */ message NodeReportProto { repeated StorageReportProto storageReport = 1; + repeated MetadataStorageReportProto metadataStorageReport = 2; } message StorageReportProto { @@ -158,6 +159,11 @@ message StorageReportProto { optional bool failed = 7 [default = false]; } +message MetadataStorageReportProto { + required string storageLocation = 1; + optional StorageTypeProto storageType = 2 [default = DISK]; +} + /** * Types of recognized storage media. */ From 98ff5493cf76e10e8af8b23edbbb77d02bf25afc Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Thu, 16 Jul 2020 17:19:49 +0200 Subject: [PATCH 031/165] HDDS-3966. Disable flaky TestOMRatisSnapshots --- .../java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java index ea422e1d731e..0cfbea4ef9c2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java @@ -40,6 +40,7 @@ import org.junit.After; import org.junit.Assert; import org.junit.Before; +import org.junit.Ignore; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; @@ -48,6 +49,7 @@ /** * Tests the Ratis snaphsots feature in OM. */ +@Ignore("HDDS-3966") public class TestOMRatisSnapshots { private MiniOzoneHAClusterImpl cluster = null; From a753c5487bcf93b323d752a2c130a42c0115ff8b Mon Sep 17 00:00:00 2001 From: Rakesh Radhakrishnan Date: Fri, 17 Jul 2020 02:25:07 +0530 Subject: [PATCH 032/165] HDDS-3824: OM read requests should make SCM#refreshPipeline outside BUCKET_LOCK (#1164) --- .../hadoop/ozone/om/KeyManagerImpl.java | 161 ++++++++++-------- 1 file changed, 88 insertions(+), 73 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index c0230ab37975..1fca32f688c0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -638,48 +638,53 @@ public OmKeyInfo lookupKey(OmKeyArgs args, String clientAddress) String keyName = args.getKeyName(); metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName, bucketName); + OmKeyInfo value = null; try { String keyBytes = metadataManager.getOzoneKey( volumeName, bucketName, keyName); - OmKeyInfo value = metadataManager.getKeyTable().get(keyBytes); - if (value == null) { - LOG.debug("volume:{} bucket:{} Key:{} not found", - volumeName, bucketName, keyName); - throw new OMException("Key not found", - KEY_NOT_FOUND); - } - if (grpcBlockTokenEnabled) { - String remoteUser = getRemoteUser().getShortUserName(); - for (OmKeyLocationInfoGroup key : value.getKeyLocationVersions()) { - key.getLocationList().forEach(k -> { - k.setToken(secretManager.generateToken(remoteUser, - k.getBlockID().getContainerBlockID().toString(), - getAclForUser(remoteUser), - k.getLength())); - }); - } - } - // Refresh container pipeline info from SCM - // based on OmKeyArgs.refreshPipeline flag - if (args.getRefreshPipeline()) { - refreshPipeline(value); - } - if (args.getSortDatanodes()) { - sortDatanodeInPipeline(value, clientAddress); - } - return value; + value = metadataManager.getKeyTable().get(keyBytes); } catch (IOException ex) { if (ex instanceof OMException) { throw ex; } - LOG.debug("Get key failed for volume:{} bucket:{} key:{}", - volumeName, bucketName, keyName, ex); - throw new OMException(ex.getMessage(), - KEY_NOT_FOUND); + if (LOG.isDebugEnabled()) { + LOG.debug("Get key failed for volume:{} bucket:{} key:{}", volumeName, + bucketName, keyName, ex); + } + throw new OMException(ex.getMessage(), KEY_NOT_FOUND); } finally { metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName, bucketName); } + + if (value == null) { + if (LOG.isDebugEnabled()) { + LOG.debug("volume:{} bucket:{} Key:{} not found", volumeName, + bucketName, keyName); + } + throw new OMException("Key not found", KEY_NOT_FOUND); + } + if (grpcBlockTokenEnabled) { + String remoteUser = getRemoteUser().getShortUserName(); + for (OmKeyLocationInfoGroup key : value.getKeyLocationVersions()) { + key.getLocationList().forEach(k -> { + k.setToken(secretManager.generateToken(remoteUser, + k.getBlockID().getContainerBlockID().toString(), + getAclForUser(remoteUser), k.getLength())); + }); + } + } + + // Refresh container pipeline info from SCM + // based on OmKeyArgs.refreshPipeline flag + // value won't be null as the check is done inside try/catch block. + if (args.getRefreshPipeline()) { + refreshPipeline(value); + } + if (args.getSortDatanodes()) { + sortDatanodeInPipeline(value, clientAddress); + } + return value; } /** @@ -1696,6 +1701,18 @@ public OzoneFileStatus getFileStatus(OmKeyArgs args, String clientAddress) String bucketName = args.getBucketName(); String keyName = args.getKeyName(); + return getOzoneFileStatus(volumeName, bucketName, keyName, + args.getRefreshPipeline(), false, null); + } + + private OzoneFileStatus getOzoneFileStatus(String volumeName, + String bucketName, + String keyName, + boolean refreshPipeline, + boolean sortDatanodes, + String clientAddress) + throws IOException { + OmKeyInfo fileKeyInfo = null; metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName, bucketName); try { @@ -1707,39 +1724,45 @@ public OzoneFileStatus getFileStatus(OmKeyArgs args, String clientAddress) // Check if the key is a file. String fileKeyBytes = metadataManager.getOzoneKey( - volumeName, bucketName, keyName); - OmKeyInfo fileKeyInfo = metadataManager.getKeyTable().get(fileKeyBytes); + volumeName, bucketName, keyName); + fileKeyInfo = metadataManager.getKeyTable().get(fileKeyBytes); + + // Check if the key is a directory. + if (fileKeyInfo == null) { + String dirKey = OzoneFSUtils.addTrailingSlashIfNeeded(keyName); + String dirKeyBytes = metadataManager.getOzoneKey( + volumeName, bucketName, dirKey); + OmKeyInfo dirKeyInfo = metadataManager.getKeyTable().get(dirKeyBytes); + if (dirKeyInfo != null) { + return new OzoneFileStatus(dirKeyInfo, scmBlockSize, true); + } + } + } finally { + metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName, + bucketName); + + // if the key is a file then do refresh pipeline info in OM by asking SCM if (fileKeyInfo != null) { - if (args.getRefreshPipeline()) { + if (refreshPipeline) { refreshPipeline(fileKeyInfo); } - if (args.getSortDatanodes()) { + if (sortDatanodes) { sortDatanodeInPipeline(fileKeyInfo, clientAddress); } // this is a file return new OzoneFileStatus(fileKeyInfo, scmBlockSize, false); } + } - String dirKey = OzoneFSUtils.addTrailingSlashIfNeeded(keyName); - String dirKeyBytes = metadataManager.getOzoneKey( - volumeName, bucketName, dirKey); - OmKeyInfo dirKeyInfo = metadataManager.getKeyTable().get(dirKeyBytes); - if (dirKeyInfo != null) { - return new OzoneFileStatus(dirKeyInfo, scmBlockSize, true); - } - - if (LOG.isDebugEnabled()) { - LOG.debug("Unable to get file status for the key: volume: {}, bucket:" + - " {}, key: {}, with error: No such file exists.", volumeName, - bucketName, keyName); - } - throw new OMException("Unable to get file status: volume: " + - volumeName + " bucket: " + bucketName + " key: " + keyName, - FILE_NOT_FOUND); - } finally { - metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName, - bucketName); + // Key is not found, throws exception + if (LOG.isDebugEnabled()) { + LOG.debug("Unable to get file status for the key: volume: {}, bucket:" + + " {}, key: {}, with error: No such file exists.", + volumeName, bucketName, keyName); } + throw new OMException("Unable to get file status: volume: " + + volumeName + " bucket: " + bucketName + " key: " + keyName, + FILE_NOT_FOUND); } /** @@ -1881,26 +1904,13 @@ public OmKeyInfo lookupFile(OmKeyArgs args, String clientAddress) String volumeName = args.getVolumeName(); String bucketName = args.getBucketName(); String keyName = args.getKeyName(); - - metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName, - bucketName); - try { - OzoneFileStatus fileStatus = getFileStatus(args, null); - if (fileStatus.isFile()) { - if (args.getRefreshPipeline()) { - refreshPipeline(fileStatus.getKeyInfo()); - } - if (args.getSortDatanodes()) { - sortDatanodeInPipeline(fileStatus.getKeyInfo(), clientAddress); - } - return fileStatus.getKeyInfo(); - } - //if key is not of type file or if key is not found we throw an exception - } finally { - metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName, - bucketName); + OzoneFileStatus fileStatus = getOzoneFileStatus(volumeName, bucketName, + keyName, args.getRefreshPipeline(), args.getSortDatanodes(), + clientAddress); + //if key is not of type file or if key is not found we throw an exception + if (fileStatus.isFile()) { + return fileStatus.getKeyInfo(); } - throw new OMException("Can not write to directory: " + keyName, ResultCodes.NOT_A_FILE); } @@ -2091,6 +2101,11 @@ public List listStatus(OmKeyArgs args, boolean recursive, metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName, bucketName); } + if (args.getRefreshPipeline()) { + for(OzoneFileStatus fileStatus : fileStatusList){ + refreshPipeline(fileStatus.getKeyInfo()); + } + } return fileStatusList; } From e1a687fff7181546d863e6f08e9f3b8bc7f8c8f5 Mon Sep 17 00:00:00 2001 From: prashantpogde Date: Thu, 16 Jul 2020 13:56:29 -0700 Subject: [PATCH 033/165] HDDS-3926. OM Token Identifier table should use in-house serialization. (#1182) --- .../ozone/security/OzoneTokenIdentifier.java | 56 ++++++++++++++++++- .../ozone/om/codec/TokenIdentifierCodec.java | 16 +++++- .../security/TestOzoneTokenIdentifier.java | 19 +++++++ 3 files changed, 86 insertions(+), 5 deletions(-) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneTokenIdentifier.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneTokenIdentifier.java index 290dd1d4f95f..c0b1ddbd1dd9 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneTokenIdentifier.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneTokenIdentifier.java @@ -26,13 +26,17 @@ import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.annotation.InterfaceStability; +import org.apache.hadoop.io.DataInputBuffer; +import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMTokenProto; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMTokenProto.Type; import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMTokenProto.Type.S3AUTHINFO; + /** * The token identifier for Ozone Master. */ @@ -77,6 +81,55 @@ public Text getKind() { return KIND_NAME; } + /** Instead of relying on proto serialization, this + * provides explicit serialization for OzoneTokenIdentifier. + * @return byte[] + */ + public byte[] toUniqueSerializedKey() { + DataOutputBuffer buf = new DataOutputBuffer(); + try { + super.write(buf); + WritableUtils.writeVInt(buf, getTokenType().getNumber()); + // Set s3 specific fields. + if (getTokenType().equals(S3AUTHINFO)) { + WritableUtils.writeString(buf, getAwsAccessId()); + WritableUtils.writeString(buf, getSignature()); + WritableUtils.writeString(buf, getStrToSign()); + } else { + WritableUtils.writeString(buf, getOmCertSerialId()); + WritableUtils.writeString(buf, getOmServiceId()); + } + } catch (java.io.IOException e) { + throw new IllegalArgumentException( + "Can't encode the the raw data ", e); + } + return buf.getData(); + } + + /** Instead of relying on proto deserialization, this + * provides explicit deserialization for OzoneTokenIdentifier. + * @return byte[] + */ + public OzoneTokenIdentifier fromUniqueSerializedKey(byte[] rawData) + throws IOException { + DataInputBuffer in = new DataInputBuffer(); + in.reset(rawData, rawData.length); + super.readFields(in); + int type = WritableUtils.readVInt(in); + // Set s3 specific fields. + if (type == S3AUTHINFO.getNumber()) { + this.tokenType = Type.S3AUTHINFO; + setAwsAccessId(WritableUtils.readString(in)); + setSignature(WritableUtils.readString(in)); + setStrToSign(WritableUtils.readString(in)); + } else { + this.tokenType = Type.DELEGATION_TOKEN; + setOmCertSerialId(WritableUtils.readString(in)); + setOmServiceId(WritableUtils.readString(in)); + } + return this; + } + /** * Overrides default implementation to write using Protobuf. * @@ -92,7 +145,6 @@ public void write(DataOutput out) throws IOException { .setRealUser(getRealUser().toString()) .setRenewer(getRenewer().toString()) .setIssueDate(getIssueDate()) - .setMaxDate(getMaxDate()) .setSequenceNumber(getSequenceNumber()) .setMasterKeyId(getMasterKeyId()); @@ -332,4 +384,4 @@ public String toString() { .append(", omServiceId=").append(getOmServiceId()); return buffer.toString(); } -} \ No newline at end of file +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/TokenIdentifierCodec.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/TokenIdentifierCodec.java index 22656d887b66..592cae3a366c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/TokenIdentifierCodec.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/TokenIdentifierCodec.java @@ -19,10 +19,12 @@ import com.google.common.base.Preconditions; import com.google.protobuf.InvalidProtocolBufferException; + import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; import org.apache.hadoop.hdds.utils.db.Codec; import java.io.IOException; +import java.nio.BufferUnderflowException; /** * Codec to encode TokenIdentifierCodec as byte array. @@ -33,7 +35,7 @@ public class TokenIdentifierCodec implements Codec { public byte[] toPersistedFormat(OzoneTokenIdentifier object) { Preconditions .checkNotNull(object, "Null object can't be converted to byte array."); - return object.getBytes(); + return object.toUniqueSerializedKey(); } @Override @@ -42,8 +44,16 @@ public OzoneTokenIdentifier fromPersistedFormat(byte[] rawData) Preconditions.checkNotNull(rawData, "Null byte array can't converted to real object."); try { - return OzoneTokenIdentifier.readProtoBuf(rawData); - } catch (InvalidProtocolBufferException e) { + OzoneTokenIdentifier object = OzoneTokenIdentifier.newInstance(); + return object.fromUniqueSerializedKey(rawData); + } catch (IOException ex) { + try { + return OzoneTokenIdentifier.readProtoBuf(rawData); + } catch (InvalidProtocolBufferException e) { + throw new IllegalArgumentException( + "Can't encode the the raw data from the byte array", e); + } + } catch (BufferUnderflowException e) { throw new IllegalArgumentException( "Can't encode the the raw data from the byte array", e); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java index 518953f91c62..391759a8df54 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.io.Text; +import org.apache.hadoop.ozone.om.codec.TokenIdentifierCodec; import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.apache.hadoop.security.ssl.TestSSLFactory; import org.apache.hadoop.security.token.Token; @@ -327,4 +328,22 @@ public void testTokenSerialization() throws IOException { idDecode.readFields(in); Assert.assertEquals(idEncode, idDecode); } + + @Test + public void testTokenPersistence() throws IOException { + OzoneTokenIdentifier idWrite = getIdentifierInst(); + idWrite.setOmServiceId("defaultServiceId"); + + byte[] oldIdBytes = idWrite.getBytes(); + TokenIdentifierCodec idCodec = new TokenIdentifierCodec(); + + OzoneTokenIdentifier idRead = null; + try { + idRead = idCodec.fromPersistedFormat(oldIdBytes); + } catch (IOException ex) { + Assert.fail("Should not fail to load old token format"); + } + Assert.assertEquals("Deserialize Serialized Token should equal.", + idWrite, idRead); + } } \ No newline at end of file From 81c6d4df617d7e46a4fc1511b347813ff79d87b6 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Fri, 17 Jul 2020 06:02:31 +0200 Subject: [PATCH 034/165] HDDS-3612. Allow mounting bucket under other volume (#1104) --- .../org/apache/hadoop/ozone/OzoneConsts.java | 2 + .../hadoop/ozone/client/BucketArgs.java | 33 +- .../hadoop/ozone/client/OzoneBucket.java | 23 +- .../hadoop/ozone/client/rpc/RpcClient.java | 11 +- .../apache/hadoop/ozone/audit/OMAction.java | 5 - .../ozone/om/exceptions/OMException.java | 4 +- .../om/helpers/BucketEncryptionKeyInfo.java | 4 + .../hadoop/ozone/om/helpers/OmBucketInfo.java | 142 ++++++-- .../hadoop/ozone/om/helpers/OmKeyArgs.java | 18 + .../ozone/om/helpers/TestOmBucketInfo.java | 22 +- .../dist/src/main/compose/ozone/test.sh | 18 +- .../dist/src/main/compose/ozonesecure/test.sh | 13 +- .../dist/src/main/smoketest/basic/links.robot | 152 ++++++++ .../dist/src/main/smoketest/commonlib.robot | 34 +- .../dist/src/main/smoketest/lib/os.robot | 49 +++ .../src/main/smoketest/lib/os_tests.robot | 38 ++ .../src/main/smoketest/ozone-lib/shell.robot | 48 +++ .../smoketest/ozone-lib/shell_tests.robot | 58 +++ .../src/main/smoketest/ozonefs/ozonefs.robot | 2 +- .../src/main/smoketest/ozonefs/setup.robot | 16 +- .../dist/src/main/smoketest/robot.robot | 81 +++++ .../main/smoketest/s3/MultipartUpload.robot | 7 +- .../src/main/smoketest/s3/bucketdelete.robot | 12 +- .../src/main/smoketest/s3/commonawslib.robot | 33 +- .../apache/hadoop/ozone/om/TestOmMetrics.java | 100 +++--- .../src/main/proto/OmClientProtocol.proto | 4 + .../hadoop/ozone/om/BucketManagerImpl.java | 112 +++--- .../apache/hadoop/ozone/om/OzoneManager.java | 334 +++++++++++++----- .../hadoop/ozone/om/ResolvedBucket.java | 111 ++++++ .../request/bucket/OMBucketCreateRequest.java | 14 + .../file/OMDirectoryCreateRequest.java | 4 + .../om/request/file/OMFileCreateRequest.java | 6 +- .../request/key/OMAllocateBlockRequest.java | 9 +- .../om/request/key/OMKeyCommitRequest.java | 27 +- .../om/request/key/OMKeyCreateRequest.java | 8 +- .../om/request/key/OMKeyDeleteRequest.java | 15 +- .../om/request/key/OMKeyRenameRequest.java | 19 +- .../ozone/om/request/key/OMKeyRequest.java | 11 + .../om/request/key/OMKeysDeleteRequest.java | 48 ++- .../om/request/key/OMTrashRecoverRequest.java | 7 + .../S3InitiateMultipartUploadRequest.java | 33 +- .../S3MultipartUploadAbortRequest.java | 25 +- .../S3MultipartUploadCommitPartRequest.java | 42 +-- .../S3MultipartUploadCompleteRequest.java | 71 ++-- .../file/TestOMDirectoryCreateRequest.java | 4 + .../om/request/key/TestOMKeyRequest.java | 8 + .../s3/multipart/TestS3MultipartRequest.java | 10 + .../hadoop/fs/ozone/BasicOzoneFileSystem.java | 4 +- .../ozone/shell/bucket/BucketCommands.java | 1 + .../ozone/shell/bucket/LinkBucketHandler.java | 79 +++++ 50 files changed, 1508 insertions(+), 423 deletions(-) create mode 100644 hadoop-ozone/dist/src/main/smoketest/basic/links.robot create mode 100644 hadoop-ozone/dist/src/main/smoketest/lib/os.robot create mode 100644 hadoop-ozone/dist/src/main/smoketest/lib/os_tests.robot create mode 100644 hadoop-ozone/dist/src/main/smoketest/ozone-lib/shell.robot create mode 100644 hadoop-ozone/dist/src/main/smoketest/ozone-lib/shell_tests.robot create mode 100644 hadoop-ozone/dist/src/main/smoketest/robot.robot create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ResolvedBucket.java create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/LinkBucketHandler.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index a473948dcc17..e340b3231491 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -298,6 +298,8 @@ private OzoneConsts() { public static final String BUCKET_ENCRYPTION_KEY = "bucketEncryptionKey"; public static final String DELETED_KEYS_LIST = "deletedKeysList"; public static final String UNDELETED_KEYS_LIST = "unDeletedKeysList"; + public static final String SOURCE_VOLUME = "sourceVolume"; + public static final String SOURCE_BUCKET = "sourceBucket"; diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java index 5bae15ddfe11..6c5d1dd909d3 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java @@ -54,6 +54,8 @@ public final class BucketArgs { * Bucket encryption key name. */ private String bucketEncryptionKey; + private final String sourceVolume; + private final String sourceBucket; /** * Private constructor, constructed via builder. @@ -62,15 +64,19 @@ public final class BucketArgs { * @param acls list of ACLs. * @param metadata map of bucket metadata * @param bucketEncryptionKey bucket encryption key name + * @param sourceVolume + * @param sourceBucket */ private BucketArgs(Boolean versioning, StorageType storageType, - List acls, Map metadata, - String bucketEncryptionKey) { + List acls, Map metadata, + String bucketEncryptionKey, String sourceVolume, String sourceBucket) { this.acls = acls; this.versioning = versioning; this.storageType = storageType; this.metadata = metadata; this.bucketEncryptionKey = bucketEncryptionKey; + this.sourceVolume = sourceVolume; + this.sourceBucket = sourceBucket; } /** @@ -123,6 +129,14 @@ public static BucketArgs.Builder newBuilder() { return new BucketArgs.Builder(); } + public String getSourceVolume() { + return sourceVolume; + } + + public String getSourceBucket() { + return sourceBucket; + } + /** * Builder for OmBucketInfo. */ @@ -132,6 +146,8 @@ public static class Builder { private List acls; private Map metadata; private String bucketEncryptionKey; + private String sourceVolume; + private String sourceBucket; public Builder() { metadata = new HashMap<>(); @@ -161,13 +177,24 @@ public BucketArgs.Builder setBucketEncryptionKey(String bek) { this.bucketEncryptionKey = bek; return this; } + + public BucketArgs.Builder setSourceVolume(String volume) { + sourceVolume = volume; + return this; + } + + public BucketArgs.Builder setSourceBucket(String bucket) { + sourceBucket = bucket; + return this; + } + /** * Constructs the BucketArgs. * @return instance of BucketArgs. */ public BucketArgs build() { return new BucketArgs(versioning, storageType, acls, metadata, - bucketEncryptionKey); + bucketEncryptionKey, sourceVolume, sourceBucket); } } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java index d22b846e1c85..79712bbfddb2 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java @@ -109,6 +109,8 @@ public class OzoneBucket extends WithMetadata { private OzoneObj ozoneObj; + private String sourceVolume; + private String sourceBucket; private OzoneBucket(ConfigurationSource conf, String volumeName, String bucketName, ReplicationFactor defaultReplication, @@ -138,11 +140,13 @@ private OzoneBucket(ConfigurationSource conf, String volumeName, .setResType(OzoneObj.ResourceType.BUCKET) .setStoreType(OzoneObj.StoreType.OZONE).build(); } + @SuppressWarnings("parameternumber") public OzoneBucket(ConfigurationSource conf, ClientProtocol proxy, String volumeName, String bucketName, StorageType storageType, Boolean versioning, long creationTime, Map metadata, - String encryptionKeyName) { + String encryptionKeyName, + String sourceVolume, String sourceBucket) { this(conf, volumeName, bucketName, null, null, proxy); this.storageType = storageType; this.versioning = versioning; @@ -150,6 +154,8 @@ public OzoneBucket(ConfigurationSource conf, ClientProtocol proxy, this.creationTime = Instant.ofEpochMilli(creationTime); this.metadata = metadata; this.encryptionKeyName = encryptionKeyName; + this.sourceVolume = sourceVolume; + this.sourceBucket = sourceBucket; modificationTime = Instant.now(); if (modificationTime.isBefore(this.creationTime)) { modificationTime = Instant.ofEpochSecond( @@ -161,9 +167,10 @@ public OzoneBucket(ConfigurationSource conf, ClientProtocol proxy, public OzoneBucket(ConfigurationSource conf, ClientProtocol proxy, String volumeName, String bucketName, StorageType storageType, Boolean versioning, long creationTime, long modificationTime, - Map metadata, String encryptionKeyName) { + Map metadata, String encryptionKeyName, + String sourceVolume, String sourceBucket) { this(conf, proxy, volumeName, bucketName, storageType, versioning, - creationTime, metadata, encryptionKeyName); + creationTime, metadata, encryptionKeyName, sourceVolume, sourceBucket); this.modificationTime = Instant.ofEpochMilli(modificationTime); } @@ -306,6 +313,16 @@ public String getEncryptionKeyName() { return encryptionKeyName; } + public String getSourceVolume() { + return sourceVolume; + } + + public String getSourceBucket() { + return sourceBucket; + } + + /** + * Builder for OmBucketInfo. /** * Adds ACLs to the Bucket. * @param addAcl ACL to be added diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 1d69b0ae33c5..46df61a4d5e8 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -448,6 +448,8 @@ public void createBucket( .setIsVersionEnabled(isVersionEnabled) .addAllMetadata(bucketArgs.getMetadata()) .setStorageType(storageType) + .setSourceVolume(bucketArgs.getSourceVolume()) + .setSourceBucket(bucketArgs.getSourceBucket()) .setAcls(listOfAcls.stream().distinct().collect(Collectors.toList())); if (bek != null) { @@ -614,7 +616,10 @@ public OzoneBucket getBucketDetails( bucketInfo.getModificationTime(), bucketInfo.getMetadata(), bucketInfo.getEncryptionKeyInfo() != null ? bucketInfo - .getEncryptionKeyInfo().getKeyName() : null); + .getEncryptionKeyInfo().getKeyName() : null, + bucketInfo.getSourceVolume(), + bucketInfo.getSourceBucket() + ); } @Override @@ -635,7 +640,9 @@ public List listBuckets(String volumeName, String bucketPrefix, bucket.getModificationTime(), bucket.getMetadata(), bucket.getEncryptionKeyInfo() != null ? bucket - .getEncryptionKeyInfo().getKeyName() : null)) + .getEncryptionKeyInfo().getKeyName() : null, + bucket.getSourceVolume(), + bucket.getSourceBucket())) .collect(Collectors.toList()); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java index 31cccacb0c7a..6b34e8180026 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java @@ -40,10 +40,6 @@ public enum OMAction implements AuditAction { PURGE_KEYS, DELETE_KEYS, - // S3 Bucket - CREATE_S3_BUCKET, - DELETE_S3_BUCKET, - // READ Actions CHECK_VOLUME_ACCESS, LIST_BUCKETS, @@ -53,7 +49,6 @@ public enum OMAction implements AuditAction { READ_VOLUME, READ_BUCKET, READ_KEY, - LIST_S3BUCKETS, INITIATE_MULTIPART_UPLOAD, COMMIT_MULTIPART_UPLOAD_PARTKEY, COMPLETE_MULTIPART_UPLOAD, diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java index e2b341884318..bab8d94fc410 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java @@ -223,6 +223,8 @@ public enum ResultCodes { INVALID_VOLUME_NAME, - PARTIAL_DELETE + PARTIAL_DELETE, + + DETECTED_LOOP_IN_BUCKET_LINKS, } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BucketEncryptionKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BucketEncryptionKeyInfo.java index e1ae0bbfbd86..c1801388bfe7 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BucketEncryptionKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BucketEncryptionKeyInfo.java @@ -49,6 +49,10 @@ public CryptoProtocolVersion getVersion() { return version; } + public BucketEncryptionKeyInfo copy() { + return new BucketEncryptionKeyInfo(version, suite, keyName); + } + /** * Builder for BucketEncryptionKeyInfo. */ diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java index e9a8cbcd6fb0..abbe3955f6b1 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java @@ -76,6 +76,10 @@ public final class OmBucketInfo extends WithObjectID implements Auditable { */ private BucketEncryptionKeyInfo bekInfo; + private final String sourceVolume; + + private final String sourceBucket; + /** * Private constructor, constructed via builder. * @param volumeName - Volume name. @@ -87,19 +91,23 @@ public final class OmBucketInfo extends WithObjectID implements Auditable { * @param modificationTime - Bucket modification time. * @param metadata - metadata. * @param bekInfo - bucket encryption key info. + * @param sourceVolume - source volume for bucket links, null otherwise + * @param sourceBucket - source bucket for bucket links, null otherwise */ @SuppressWarnings("checkstyle:ParameterNumber") private OmBucketInfo(String volumeName, - String bucketName, - List acls, - boolean isVersionEnabled, - StorageType storageType, - long creationTime, - long modificationTime, - long objectID, - long updateID, - Map metadata, - BucketEncryptionKeyInfo bekInfo) { + String bucketName, + List acls, + boolean isVersionEnabled, + StorageType storageType, + long creationTime, + long modificationTime, + long objectID, + long updateID, + Map metadata, + BucketEncryptionKeyInfo bekInfo, + String sourceVolume, + String sourceBucket) { this.volumeName = volumeName; this.bucketName = bucketName; this.acls = acls; @@ -111,6 +119,8 @@ private OmBucketInfo(String volumeName, this.updateID = updateID; this.metadata = metadata; this.bekInfo = bekInfo; + this.sourceVolume = sourceVolume; + this.sourceBucket = sourceBucket; } /** @@ -208,6 +218,18 @@ public BucketEncryptionKeyInfo getEncryptionKeyInfo() { return bekInfo; } + public String getSourceVolume() { + return sourceVolume; + } + + public String getSourceBucket() { + return sourceBucket; + } + + public boolean isLink() { + return sourceVolume != null && sourceBucket != null; + } + /** * Returns new builder class that builds a OmBucketInfo. * @@ -235,6 +257,10 @@ public Map toAuditMap() { (bekInfo != null) ? bekInfo.getKeyName() : null); auditMap.put(OzoneConsts.MODIFICATION_TIME, String.valueOf(this.modificationTime)); + if (isLink()) { + auditMap.put(OzoneConsts.SOURCE_VOLUME, sourceVolume); + auditMap.put(OzoneConsts.SOURCE_BUCKET, sourceBucket); + } return auditMap; } @@ -242,7 +268,22 @@ public Map toAuditMap() { * Return a new copy of the object. */ public OmBucketInfo copyObject() { - OmBucketInfo.Builder builder = new OmBucketInfo.Builder() + Builder builder = toBuilder(); + + if (bekInfo != null) { + builder.setBucketEncryptionKey(bekInfo.copy()); + } + + builder.acls.clear(); + acls.forEach(acl -> builder.addAcl(new OzoneAcl(acl.getType(), + acl.getName(), (BitSet) acl.getAclBitSet().clone(), + acl.getAclScope()))); + + return builder.build(); + } + + public Builder toBuilder() { + return new Builder() .setVolumeName(volumeName) .setBucketName(bucketName) .setStorageType(storageType) @@ -251,19 +292,11 @@ public OmBucketInfo copyObject() { .setModificationTime(modificationTime) .setObjectID(objectID) .setUpdateID(updateID) - .setBucketEncryptionKey(bekInfo != null ? - new BucketEncryptionKeyInfo(bekInfo.getVersion(), - bekInfo.getSuite(), bekInfo.getKeyName()) : null); - - acls.forEach(acl -> builder.addAcl(new OzoneAcl(acl.getType(), - acl.getName(), (BitSet) acl.getAclBitSet().clone(), - acl.getAclScope()))); - - if (metadata != null) { - metadata.forEach((k, v) -> builder.addMetadata(k, v)); - } - return builder.build(); - + .setBucketEncryptionKey(bekInfo) + .setSourceVolume(sourceVolume) + .setSourceBucket(sourceBucket) + .setAcls(acls) + .addAllMetadata(metadata); } /** @@ -281,6 +314,8 @@ public static class Builder { private long updateID; private Map metadata; private BucketEncryptionKeyInfo bekInfo; + private String sourceVolume; + private String sourceBucket; public Builder() { //Default values @@ -362,6 +397,16 @@ public Builder setBucketEncryptionKey( return this; } + public Builder setSourceVolume(String volume) { + this.sourceVolume = volume; + return this; + } + + public Builder setSourceBucket(String bucket) { + this.sourceBucket = bucket; + return this; + } + /** * Constructs the OmBucketInfo. * @return instance of OmBucketInfo. @@ -375,7 +420,7 @@ public OmBucketInfo build() { return new OmBucketInfo(volumeName, bucketName, acls, isVersionEnabled, storageType, creationTime, modificationTime, objectID, updateID, - metadata, bekInfo); + metadata, bekInfo, sourceVolume, sourceBucket); } } @@ -397,6 +442,12 @@ public BucketInfo getProtobuf() { if (bekInfo != null && bekInfo.getKeyName() != null) { bib.setBeinfo(OMPBHelper.convert(bekInfo)); } + if (sourceVolume != null) { + bib.setSourceVolume(sourceVolume); + } + if (sourceBucket != null) { + bib.setSourceBucket(sourceBucket); + } return bib.build(); } @@ -428,17 +479,28 @@ public static OmBucketInfo getFromProtobuf(BucketInfo bucketInfo) { if (bucketInfo.hasBeinfo()) { obib.setBucketEncryptionKey(OMPBHelper.convert(bucketInfo.getBeinfo())); } + if (bucketInfo.hasSourceVolume()) { + obib.setSourceVolume(bucketInfo.getSourceVolume()); + } + if (bucketInfo.hasSourceBucket()) { + obib.setSourceBucket(bucketInfo.getSourceBucket()); + } return obib.build(); } @Override public String getObjectInfo() { + String sourceInfo = sourceVolume != null && sourceBucket != null + ? ", source='" + sourceVolume + "/" + sourceBucket + "'" + : ""; + return "OMBucketInfo{" + - "volume='" + volumeName + '\'' + - ", bucket='" + bucketName + '\'' + - ", isVersionEnabled='" + isVersionEnabled + '\'' + - ", storageType='" + storageType + '\'' + - ", creationTime='" + creationTime + '\'' + + "volume='" + volumeName + "'" + + ", bucket='" + bucketName + "'" + + ", isVersionEnabled='" + isVersionEnabled + "'" + + ", storageType='" + storageType + "'" + + ", creationTime='" + creationTime + "'" + + sourceInfo + '}'; } @@ -460,6 +522,8 @@ public boolean equals(Object o) { storageType == that.storageType && objectID == that.objectID && updateID == that.updateID && + Objects.equals(sourceVolume, that.sourceVolume) && + Objects.equals(sourceBucket, that.sourceBucket) && Objects.equals(metadata, that.metadata) && Objects.equals(bekInfo, that.bekInfo); } @@ -468,4 +532,22 @@ public boolean equals(Object o) { public int hashCode() { return Objects.hash(volumeName, bucketName); } + + @Override + public String toString() { + return "OmBucketInfo{" + + "volumeName='" + volumeName + "'" + + ", bucketName='" + bucketName + "'" + + ", acls=" + acls + + ", isVersionEnabled=" + isVersionEnabled + + ", storageType=" + storageType + + ", creationTime=" + creationTime + + ", bekInfo=" + bekInfo + + ", sourceVolume='" + sourceVolume + "'" + + ", sourceBucket='" + sourceBucket + "'" + + ", objectID=" + objectID + + ", updateID=" + updateID + + ", metadata=" + metadata + + '}'; + } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java index 2a882a43a926..c08c988fc7e3 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java @@ -162,6 +162,24 @@ public void addLocationInfo(OmKeyLocationInfo locationInfo) { locationInfoList.add(locationInfo); } + public OmKeyArgs.Builder toBuilder() { + return new OmKeyArgs.Builder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(keyName) + .setDataSize(dataSize) + .setType(type) + .setFactor(factor) + .setLocationInfoList(locationInfoList) + .setIsMultipartKey(isMultipartKey) + .setMultipartUploadID(multipartUploadID) + .setMultipartUploadPartNumber(multipartUploadPartNumber) + .addAllMetadata(metadata) + .setRefreshPipeline(refreshPipeline) + .setSortDatanodesInPipeline(sortDatanodesInPipeline) + .setAcls(acls); + } + /** * Builder class of OmKeyArgs. */ diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmBucketInfo.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmBucketInfo.java index 15468c7b2f62..650fc910289d 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmBucketInfo.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmBucketInfo.java @@ -42,10 +42,21 @@ public void protobufConversion() { .setStorageType(StorageType.ARCHIVE) .build(); - OmBucketInfo afterSerialization = - OmBucketInfo.getFromProtobuf(bucket.getProtobuf()); + Assert.assertEquals(bucket, + OmBucketInfo.getFromProtobuf(bucket.getProtobuf())); + } + + @Test + public void protobufConversionOfBucketLink() { + OmBucketInfo bucket = OmBucketInfo.newBuilder() + .setBucketName("bucket") + .setVolumeName("vol1") + .setSourceVolume("otherVol") + .setSourceBucket("someBucket") + .build(); - Assert.assertEquals(bucket, afterSerialization); + Assert.assertEquals(bucket, + OmBucketInfo.getFromProtobuf(bucket.getProtobuf())); } @Test @@ -66,7 +77,10 @@ public void testClone() { /* Clone an omBucketInfo. */ OmBucketInfo cloneBucketInfo = omBucketInfo.copyObject(); - Assert.assertEquals(omBucketInfo, cloneBucketInfo); + Assert.assertNotSame(omBucketInfo, cloneBucketInfo); + Assert.assertEquals("Expected " + omBucketInfo + " and " + cloneBucketInfo + + " to be equal", + omBucketInfo, cloneBucketInfo); /* Reset acl & check not equal. */ omBucketInfo.setAcls(Collections.singletonList(new OzoneAcl( diff --git a/hadoop-ozone/dist/src/main/compose/ozone/test.sh b/hadoop-ozone/dist/src/main/compose/ozone/test.sh index e0b1d62ade08..c40339ec6b0e 100755 --- a/hadoop-ozone/dist/src/main/compose/ozone/test.sh +++ b/hadoop-ozone/dist/src/main/compose/ozone/test.sh @@ -26,24 +26,24 @@ source "$COMPOSE_DIR/../testlib.sh" start_docker_env -#Due to the limitation of the current auditparser test, it should be the -#first test in a clean cluster. - -#Disabling for now, audit parser tool during parse getting exception. -#execute_robot_test om auditparser - execute_robot_test scm lib +execute_robot_test scm ozone-lib execute_robot_test scm basic execute_robot_test scm gdpr -execute_robot_test scm -v SCHEME:ofs ozonefs/ozonefs.robot -execute_robot_test scm -v SCHEME:o3fs ozonefs/ozonefs.robot +for scheme in ofs o3fs; do + for bucket in link bucket; do + execute_robot_test scm -v SCHEME:${scheme} -v BUCKET_TYPE:${bucket} ozonefs/ozonefs.robot + done +done execute_robot_test scm security/ozone-secure-token.robot -execute_robot_test scm s3 +for bucket in link generated; do + execute_robot_test scm -v BUCKET:${bucket} s3 +done execute_robot_test scm recon diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh index 9c3f3ab83cc7..ce50fa02fc0e 100755 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh @@ -31,10 +31,15 @@ execute_robot_test scm basic execute_robot_test scm security -execute_robot_test scm -v SCHEME:ofs ozonefs/ozonefs.robot -execute_robot_test scm -v SCHEME:o3fs ozonefs/ozonefs.robot - -execute_robot_test s3g s3 +for scheme in ofs o3fs; do + for bucket in link bucket; do + execute_robot_test scm -v SCHEME:${scheme} -v BUCKET_TYPE:${bucket} ozonefs/ozonefs.robot + done +done + +for bucket in link generated; do + execute_robot_test s3g -v BUCKET:${bucket} s3 +done #expects 4 pipelines, should be run before #admincli which creates STANDALONE pipeline diff --git a/hadoop-ozone/dist/src/main/smoketest/basic/links.robot b/hadoop-ozone/dist/src/main/smoketest/basic/links.robot new file mode 100644 index 000000000000..71c046e18a25 --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/basic/links.robot @@ -0,0 +1,152 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Documentation Test bucket links via Ozone CLI +Library OperatingSystem +Resource ../commonlib.robot +Resource ../ozone-lib/shell.robot +Test Setup Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit test user testuser testuser.keytab +Test Timeout 2 minute +Suite Setup Create volumes + +*** Variables *** +${prefix} generated + +*** Keywords *** +Create volumes + ${random} = Generate Random String 5 [NUMBERS] + Set Suite Variable ${source} ${random}-source + Set Suite Variable ${target} ${random}-target + Execute ozone sh volume create ${source} + Execute ozone sh volume create ${target} + Run Keyword if '${SECURITY_ENABLED}' == 'true' Setup ACL tests + +Setup ACL tests + Execute ozone sh bucket create ${source}/readable-bucket + Execute ozone sh key put ${source}/readable-bucket/key-in-readable-bucket /etc/passwd + Execute ozone sh bucket create ${source}/unreadable-bucket + Execute ozone sh bucket link ${source}/readable-bucket ${target}/readable-link + Execute ozone sh bucket link ${source}/readable-bucket ${target}/unreadable-link + Execute ozone sh bucket link ${source}/unreadable-bucket ${target}/link-to-unreadable-bucket + Execute ozone sh volume addacl --acl user:testuser2/scm@EXAMPLE.COM:r ${target} + Execute ozone sh volume addacl --acl user:testuser2/scm@EXAMPLE.COM:rl ${source} + Execute ozone sh bucket addacl --acl user:testuser2/scm@EXAMPLE.COM:rl ${source}/readable-bucket + Execute ozone sh bucket addacl --acl user:testuser2/scm@EXAMPLE.COM:r ${target}/readable-link + Execute ozone sh bucket addacl --acl user:testuser2/scm@EXAMPLE.COM:r ${target}/link-to-unreadable-bucket + +Can follow link with read access + Execute kdestroy + Run Keyword Kinit test user testuser2 testuser2.keytab + ${result} = Execute And Ignore Error ozone sh key list ${target}/readable-link + Should Contain ${result} key-in-readable-bucket + +Cannot follow link without read access + Execute kdestroy + Run Keyword Kinit test user testuser2 testuser2.keytab + ${result} = Execute And Ignore Error ozone sh key list ${target}/unreadable-link + Should Contain ${result} PERMISSION_DENIED + +ACL verified on source bucket + Execute kdestroy + Run Keyword Kinit test user testuser2 testuser2.keytab + ${result} = Execute ozone sh bucket info ${target}/link-to-unreadable-bucket + Should Contain ${result} link-to-unreadable-bucket + Should Not Contain ${result} PERMISSION_DENIED + ${result} = Execute And Ignore Error ozone sh key list ${target}/link-to-unreadable-bucket + Should Contain ${result} PERMISSION_DENIED + +*** Test Cases *** +Link to non-existent bucket + Execute ozone sh bucket link ${source}/no-such-bucket ${target}/dangling-link + ${result} = Execute And Ignore Error ozone sh key list ${target}/dangling-link + Should Contain ${result} BUCKET_NOT_FOUND + +Key create passthrough + Execute ozone sh bucket link ${source}/bucket1 ${target}/link1 + Execute ozone sh bucket create ${source}/bucket1 + Execute ozone sh key put ${target}/link1/key1 /etc/passwd + Key Should Match Local File ${target}/link1/key1 /etc/passwd + +Key read passthrough + Execute ozone sh key put ${source}/bucket1/key2 /opt/hadoop/NOTICE.txt + Key Should Match Local File ${source}/bucket1/key2 /opt/hadoop/NOTICE.txt + +Key list passthrough + ${target_list} = Execute ozone sh key list ${target}/link1 | jq -r '.name' + ${source_list} = Execute ozone sh key list ${source}/bucket1 | jq -r '.name' + Should Be Equal ${target_list} ${source_list} + Should Contain ${source_list} key1 + Should Contain ${source_list} key2 + +Key delete passthrough + Execute ozone sh key delete ${target}/link1/key2 + ${source_list} = Execute ozone sh key list ${source}/bucket1 | jq -r '.name' + Should Not Contain ${source_list} key2 + +Bucket list contains links + ${result} = Execute ozone sh bucket list ${target} + Should Contain ${result} link1 + Should Contain ${result} dangling-link + +Bucket info shows source + ${result} = Execute ozone sh bucket info ${target}/link1 | jq -r '.sourceVolume, .sourceBucket' | xargs + Should Be Equal ${result} ${source} bucket1 + +Source and target have separate ACLs + Execute ozone sh bucket addacl --acl user:user1:rwxy ${target}/link1 + Verify ACL bucket ${target}/link1 USER user1 READ WRITE READ_ACL WRITE_ACL + Verify ACL bucket ${source}/bucket1 USER user1 ${EMPTY} + + Execute ozone sh bucket addacl --acl group:group2:r ${source}/bucket1 + Verify ACL bucket ${target}/link1 GROUP group2 ${EMPTY} + Verify ACL bucket ${source}/bucket1 GROUP group2 READ + +Buckets and links share namespace + Execute ozone sh bucket link ${source}/bucket2 ${target}/link2 + ${result} = Execute And Ignore Error ozone sh bucket create ${target}/link2 + Should Contain ${result} BUCKET_ALREADY_EXISTS + + Execute ozone sh bucket create ${target}/bucket3 + ${result} = Execute And Ignore Error ozone sh bucket link ${source}/bucket1 ${target}/bucket3 + Should Contain ${result} BUCKET_ALREADY_EXISTS + +Can follow link with read access + Run Keyword if '${SECURITY_ENABLED}' == 'true' Can follow link with read access + +Cannot follow link without read access + Run Keyword if '${SECURITY_ENABLED}' == 'true' Cannot follow link without read access + +ACL verified on source bucket + Run Keyword if '${SECURITY_ENABLED}' == 'true' ACL verified on source bucket + +Loop in link chain is detected + Execute ozone sh bucket link ${target}/loop1 ${target}/loop2 + Execute ozone sh bucket link ${target}/loop2 ${target}/loop3 + Execute ozone sh bucket link ${target}/loop3 ${target}/loop1 + ${result} = Execute And Ignore Error ozone sh key list ${target}/loop2 + Should Contain ${result} DETECTED_LOOP + +Multiple links to same bucket are allowed + Execute ozone sh bucket link ${source}/bucket1 ${target}/link3 + Execute ozone sh key put ${target}/link3/key3 /etc/group + Key Should Match Local File ${target}/link1/key3 /etc/group + +Source bucket not affected by deleting link + Execute ozone sh bucket delete ${target}/link1 + ${bucket_list} = Execute ozone sh bucket list ${target} + Should Not Contain ${bucket_list} link1 + ${source_list} = Execute ozone sh key list ${source}/bucket1 | jq -r '.name' + Should Contain ${source_list} key1 diff --git a/hadoop-ozone/dist/src/main/smoketest/commonlib.robot b/hadoop-ozone/dist/src/main/smoketest/commonlib.robot index 407111a8030c..bf3b3e92d708 100644 --- a/hadoop-ozone/dist/src/main/smoketest/commonlib.robot +++ b/hadoop-ozone/dist/src/main/smoketest/commonlib.robot @@ -18,44 +18,14 @@ Library OperatingSystem Library String Library BuiltIn +Resource lib/os.robot + *** Variables *** ${SECURITY_ENABLED} false ${OM_HA_PARAM} ${EMPTY} ${OM_SERVICE_ID} om *** Keywords *** -Execute - [arguments] ${command} - ${rc} ${output} = Run And Return Rc And Output ${command} - Log ${output} - Should Be Equal As Integers ${rc} 0 - [return] ${output} - -Execute And Ignore Error - [arguments] ${command} - ${rc} ${output} = Run And Return Rc And Output ${command} - Log ${output} - [return] ${output} - -Execute and checkrc - [arguments] ${command} ${expected_error_code} - ${rc} ${output} = Run And Return Rc And Output ${command} - Log ${output} - Should Be Equal As Integers ${rc} ${expected_error_code} - [return] ${output} - -Compare files - [arguments] ${file1} ${file2} - ${checksumbefore} = Execute md5sum ${file1} | awk '{print $1}' - ${checksumafter} = Execute md5sum ${file2} | awk '{print $1}' - Should Be Equal ${checksumbefore} ${checksumafter} - -Install aws cli - ${rc} ${output} = Run And Return Rc And Output which apt-get - Run Keyword if '${rc}' == '0' Install aws cli s3 debian - ${rc} ${output} = Run And Return Rc And Output yum --help - Run Keyword if '${rc}' == '0' Install aws cli s3 centos - Kinit HTTP user ${hostname} = Execute hostname Wait Until Keyword Succeeds 2min 10sec Execute kinit -k HTTP/${hostname}@EXAMPLE.COM -t /etc/security/keytabs/HTTP.keytab diff --git a/hadoop-ozone/dist/src/main/smoketest/lib/os.robot b/hadoop-ozone/dist/src/main/smoketest/lib/os.robot new file mode 100644 index 000000000000..af927f9af7c0 --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/lib/os.robot @@ -0,0 +1,49 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Library OperatingSystem + +*** Keywords *** +Execute + [arguments] ${command} + Run Keyword And Return Execute and checkrc ${command} 0 + +Execute And Ignore Error + [arguments] ${command} + ${rc} ${output} = Run And Return Rc And Output ${command} + Log ${output} + [return] ${output} + +Execute and checkrc + [arguments] ${command} ${expected_error_code} + ${rc} ${output} = Run And Return Rc And Output ${command} + Log ${output} + Should Be Equal As Integers ${rc} ${expected_error_code} + [return] ${output} + +Compare files + [arguments] ${file1} ${file2} + ${checksumbefore} = Execute md5sum ${file1} | awk '{print $1}' + ${checksumafter} = Execute md5sum ${file2} | awk '{print $1}' + Should Be Equal ${checksumbefore} ${checksumafter} + +Create Random File + ${postfix} = Generate Random String 5 [NUMBERS] + ${tmpfile} = Set Variable /tmp/tempfile-${postfix} + File Should Not Exist ${tmpfile} + ${content} = Set Variable "Random string" + Create File ${tmpfile} ${content} + [Return] ${tmpfile} diff --git a/hadoop-ozone/dist/src/main/smoketest/lib/os_tests.robot b/hadoop-ozone/dist/src/main/smoketest/lib/os_tests.robot new file mode 100644 index 000000000000..dd4beaf3c161 --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/lib/os_tests.robot @@ -0,0 +1,38 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Resource os.robot + + +*** Test Cases *** + +Execute + ${output} = Execute echo 42 + Should Be Equal ${output} 42 + +Execute failing command + Run Keyword And Expect Error * Execute false + +Execute And Ignore Error + ${output} = Execute And Ignore Error echo 123 && false + Should Be Equal ${output} 123 + +Execute and checkrc + ${output} = Execute and checkrc echo failure && exit 1 1 + Should Be Equal ${output} failure + +Execute and checkrc RC mismatch + Run Keyword And Expect Error * Execute and checkrc echo failure && exit 3 1 diff --git a/hadoop-ozone/dist/src/main/smoketest/ozone-lib/shell.robot b/hadoop-ozone/dist/src/main/smoketest/ozone-lib/shell.robot new file mode 100644 index 000000000000..2e56ae40eeb5 --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/ozone-lib/shell.robot @@ -0,0 +1,48 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Resource ../lib/os.robot +Library String + + +*** Keywords *** +Bucket Exists + [arguments] ${bucket} + ${rc} ${output} = Run And Return Rc And Output timeout 15 ozone sh bucket info ${bucket} + Return From Keyword If ${rc} != 0 ${FALSE} + Return From Keyword If 'VOLUME_NOT_FOUND' in '''${output}''' ${FALSE} + Return From Keyword If 'BUCKET_NOT_FOUND' in '''${output}''' ${FALSE} + [Return] ${TRUE} + +Compare Key With Local File + [arguments] ${key} ${file} + ${postfix} = Generate Random String 5 [NUMBERS] + ${tmpfile} = Set Variable /tmp/tempkey-${postfix} + Execute ozone sh key get -f ${key} ${tmpfile} + ${rc} = Run And Return Rc diff -q ${file} ${tmpfile} + Execute rm -f ${tmpfile} + ${result} = Set Variable If ${rc} == 0 ${TRUE} ${FALSE} + [Return] ${result} + +Key Should Match Local File + [arguments] ${key} ${file} + ${matches} = Compare Key With Local File ${key} ${file} + Should Be True ${matches} + +Verify ACL + [arguments] ${object_type} ${object} ${type} ${name} ${acls} + ${actual_acls} = Execute ozone sh ${object_type} getacl ${object} | jq -r '.[] | select(.type == "${type}") | select(.name == "${name}") | .aclList[]' | xargs + Should Be Equal ${acls} ${actual_acls} diff --git a/hadoop-ozone/dist/src/main/smoketest/ozone-lib/shell_tests.robot b/hadoop-ozone/dist/src/main/smoketest/ozone-lib/shell_tests.robot new file mode 100644 index 000000000000..56fbcf8b61f0 --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/ozone-lib/shell_tests.robot @@ -0,0 +1,58 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Resource ../lib/os.robot +Resource shell.robot + + +*** Variables *** +${OM_SERVICE_ID} om + + +*** Test Cases *** + +Bucket Exists should not if No Such Volume + ${exists} = Bucket Exists o3://${OM_SERVICE_ID}/no-such-volume/any-bucket + Should Be Equal ${exists} ${FALSE} + +Bucket Exists should not if No Such Bucket + Execute And Ignore Error ozone sh volume create o3://${OM_SERVICE_ID}/vol1 + ${exists} = Bucket Exists o3://${OM_SERVICE_ID}/vol1/no-such-bucket + Should Be Equal ${exists} ${FALSE} + +Bucket Exists + Execute And Ignore Error ozone sh bucket create o3://${OM_SERVICE_ID}/vol1/bucket + ${exists} = Bucket Exists o3://${OM_SERVICE_ID}/vol1/bucket + Should Be Equal ${exists} ${TRUE} + +Bucket Exists should not if No Such OM service + ${exists} = Bucket Exists o3://no-such-host/any-volume/any-bucket + Should Be Equal ${exists} ${FALSE} + + +Key Should Match Local File + [Setup] Execute ozone sh key put o3://${OM_SERVICE_ID}/vol1/bucket/passwd /etc/passwd + Key Should Match Local File o3://${OM_SERVICE_ID}/vol1/bucket/passwd /etc/passwd + +Compare Key With Local File with Different File + ${random_file} = Create Random File + ${matches} = Compare Key With Local File o3://${OM_SERVICE_ID}/vol1/bucket/passwd ${random_file} + Should Be Equal ${matches} ${FALSE} + [Teardown] Remove File ${random_file} + +Compare Key With Local File if File Does Not Exist + ${matches} = Compare Key With Local File o3://${OM_SERVICE_ID}/vol1/bucket/passwd /no-such-file + Should Be Equal ${matches} ${FALSE} diff --git a/hadoop-ozone/dist/src/main/smoketest/ozonefs/ozonefs.robot b/hadoop-ozone/dist/src/main/smoketest/ozonefs/ozonefs.robot index 6d0042b30496..450f1b6d9efc 100644 --- a/hadoop-ozone/dist/src/main/smoketest/ozonefs/ozonefs.robot +++ b/hadoop-ozone/dist/src/main/smoketest/ozonefs/ozonefs.robot @@ -19,7 +19,7 @@ Library OperatingSystem Resource ../commonlib.robot Resource setup.robot Test Timeout 5 minutes -Suite Setup Setup ${BUCKET_TYPE}s for FS test +Suite Setup Setup for FS test *** Test Cases *** List root diff --git a/hadoop-ozone/dist/src/main/smoketest/ozonefs/setup.robot b/hadoop-ozone/dist/src/main/smoketest/ozonefs/setup.robot index 16e059ede721..441822d7fb3b 100644 --- a/hadoop-ozone/dist/src/main/smoketest/ozonefs/setup.robot +++ b/hadoop-ozone/dist/src/main/smoketest/ozonefs/setup.robot @@ -29,12 +29,12 @@ ${BUCKET_IN_VOL2} ${BUCKET_TYPE}3-${SCHEME} ${DEEP_DIR} test/${SCHEME}/dir *** Keywords *** -Setup buckets for FS test +Setup for FS test Create volumes for FS test - Create buckets for FS test + Run Keyword Create ${BUCKET_TYPE}s for FS test Sanity check for FS test Assign suite vars for FS test - Log Completed setup for ${SCHEME} tests in ${VOLUME}/${BUCKET} using FS base URL: ${BASE_URL} + Log Completed setup for ${SCHEME} tests with ${BUCKET_TYPE}s in ${VOLUME}/${BUCKET} using FS base URL: ${BASE_URL} Create volumes for FS test Execute And Ignore Error ozone sh volume create ${VOLUME} --quota 100TB @@ -45,6 +45,16 @@ Create buckets for FS test Execute ozone sh bucket create ${VOLUME}/${BUCKET2} Execute ozone sh bucket create ${VOL2}/${BUCKET_IN_VOL2} +Create links for FS test + Execute And Ignore Error ozone sh volume create ${VOLUME}-src --quota 100TB + Execute And Ignore Error ozone sh volume create ${VOL2}-src --quota 100TB + Execute ozone sh bucket create ${VOLUME}-src/${BUCKET}-src + Execute ozone sh bucket create ${VOLUME}-src/${BUCKET2}-src + Execute ozone sh bucket create ${VOL2}-src/${BUCKET_IN_VOL2}-src + Execute ozone sh bucket link ${VOLUME}-src/${BUCKET}-src ${VOLUME}/${BUCKET} + Execute ozone sh bucket link ${VOLUME}-src/${BUCKET2}-src ${VOLUME}/${BUCKET2} + Execute ozone sh bucket link ${VOL2}-src/${BUCKET_IN_VOL2}-src ${VOL2}/${BUCKET_IN_VOL2} + Sanity check for FS test ${result} = Execute ozone sh volume list Should contain ${result} ${VOLUME} diff --git a/hadoop-ozone/dist/src/main/smoketest/robot.robot b/hadoop-ozone/dist/src/main/smoketest/robot.robot new file mode 100644 index 000000000000..d677ef3c743c --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/robot.robot @@ -0,0 +1,81 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Documentation Smoketest for Robot functions +Resource commonlib.robot +Test Timeout 5 minutes + +*** Test Cases *** + +Ensure Leading without Leading + ${result} = Ensure Leading / a/b + Should Be Equal ${result} /a/b + +Ensure Leading with Leading + ${result} = Ensure Leading _ _a_b_c + Should Be Equal ${result} _a_b_c + +Ensure Leading for empty + ${result} = Ensure Leading | ${EMPTY} + Should Be Equal ${result} | + + +Ensure Trailing without Trailing + ${result} = Ensure Trailing . x.y.z + Should Be Equal ${result} x.y.z. + +Ensure Trailing with Trailing + ${result} = Ensure Trailing x axbxcx + Should Be Equal ${result} axbxcx + +Ensure Trailing for empty + ${result} = Ensure Trailing = ${EMPTY} + Should Be Equal ${result} = + + +Format o3fs URL without path + ${result} = Format o3fs URL vol1 bucket1 + Should Be Equal ${result} o3fs://bucket1.vol1/ + +Format o3fs URL with path + ${result} = Format o3fs URL vol1 bucket1 dir/file + Should Be Equal ${result} o3fs://bucket1.vol1/dir/file + + +Format ofs URL without path + ${result} = Format ofs URL vol1 bucket1 + Should Be Equal ${result} ofs://vol1/bucket1 + +Format ofs URL with path + ${result} = Format ofs URL vol1 bucket1 dir/file + Should Be Equal ${result} ofs://vol1/bucket1/dir/file + + +Format FS URL with ofs scheme + ${result} = Format FS URL ofs vol1 bucket1 + ${expected} = Format ofs URL vol1 bucket1 + Should Be Equal ${result} ${expected} + +Format FS URL with o3fs scheme + ${result} = Format FS URL o3fs vol1 bucket1 + ${expected} = Format o3fs URL vol1 bucket1 + Should Be Equal ${result} ${expected} + +Format FS URL with unsupported scheme + ${result} = Run Keyword And Expect Error * Format FS URL http org apache + Should Contain ${result} http + Should Contain ${result} nsupported + diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot b/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot index 004a49645918..1c6827a16560 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot @@ -88,7 +88,7 @@ Test Multipart Upload Complete #read file and check the key ${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key multipartKey1 /tmp/multipartKey1.result - Execute cat /tmp/part1 /tmp/part2 >> /tmp/multipartKey1 + Execute cat /tmp/part1 /tmp/part2 > /tmp/multipartKey1 Compare files /tmp/multipartKey1 /tmp/multipartKey1.result Test Multipart Upload Complete Entity too small @@ -156,7 +156,7 @@ Test Multipart Upload Complete Invalid part errors and complete mpu with few par Should contain ${result} ETag ${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key multipartKey3 /tmp/multipartKey3.result - Execute cat /tmp/part1 /tmp/part3 >> /tmp/multipartKey3 + Execute cat /tmp/part1 /tmp/part3 > /tmp/multipartKey3 Compare files /tmp/multipartKey3 /tmp/multipartKey3.result Test abort Multipart upload @@ -237,7 +237,6 @@ Test Multipart Upload Put With Copy Should contain ${result} UploadId ${result} = Execute AWSS3APICli upload-part-copy --bucket ${BUCKET} --key copytest/destination --upload-id ${uploadID} --part-number 1 --copy-source ${BUCKET}/copytest/source - Should contain ${result} ${BUCKET} Should contain ${result} ETag Should contain ${result} LastModified ${eTag1} = Execute and checkrc echo '${result}' | jq -r '.CopyPartResult.ETag' 0 @@ -260,13 +259,11 @@ Test Multipart Upload Put With Copy and range Should contain ${result} UploadId ${result} = Execute AWSS3APICli upload-part-copy --bucket ${BUCKET} --key copyrange/destination --upload-id ${uploadID} --part-number 1 --copy-source ${BUCKET}/copyrange/source --copy-source-range bytes=0-10485758 - Should contain ${result} ${BUCKET} Should contain ${result} ETag Should contain ${result} LastModified ${eTag1} = Execute and checkrc echo '${result}' | jq -r '.CopyPartResult.ETag' 0 ${result} = Execute AWSS3APICli upload-part-copy --bucket ${BUCKET} --key copyrange/destination --upload-id ${uploadID} --part-number 2 --copy-source ${BUCKET}/copyrange/source --copy-source-range bytes=10485758-10485760 - Should contain ${result} ${BUCKET} Should contain ${result} ETag Should contain ${result} LastModified ${eTag2} = Execute and checkrc echo '${result}' | jq -r '.CopyPartResult.ETag' 0 diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/bucketdelete.robot b/hadoop-ozone/dist/src/main/smoketest/s3/bucketdelete.robot index bcba30db94e3..ce7b8254b0d0 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/bucketdelete.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/bucketdelete.robot @@ -23,14 +23,20 @@ Test Timeout 5 minutes Suite Setup Setup s3 tests *** Variables *** -${ENDPOINT_URL} http://s3g:9878 ${BUCKET} generated +${ENDPOINT_URL} http://s3g:9878 + +*** Keywords *** +Create bucket to be deleted + ${bucket} = Run Keyword if '${BUCKET}' == 'link' Create link to-be-deleted + ... ELSE Run Keyword Create bucket + [return] ${bucket} *** Test Cases *** Delete existing bucket -# Bucket already is created in Test Setup. - Execute AWSS3APICli delete-bucket --bucket ${BUCKET} + ${bucket} = Create bucket to be deleted + Execute AWSS3APICli delete-bucket --bucket ${bucket} Delete non-existent bucket ${result} = Execute AWSS3APICli and checkrc delete-bucket --bucket nosuchbucket 255 diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot b/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot index 4595587c91af..c263988281b4 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot @@ -15,12 +15,13 @@ *** Settings *** Resource ../commonlib.robot -Resource ../commonlib.robot +Resource ../ozone-lib/shell.robot *** Variables *** +${ENDPOINT_URL} http://s3g:9878 ${OZONE_S3_HEADER_VERSION} v4 ${OZONE_S3_SET_CREDENTIALS} true -${BUCKET} bucket-999 +${BUCKET} generated *** Keywords *** Execute AWSS3APICli @@ -38,6 +39,12 @@ Execute AWSS3Cli ${output} = Execute aws s3 --endpoint-url ${ENDPOINT_URL} ${command} [return] ${output} +Install aws cli + ${rc} ${output} = Run And Return Rc And Output which apt-get + Run Keyword if '${rc}' == '0' Install aws cli s3 debian + ${rc} ${output} = Run And Return Rc And Output yum --help + Run Keyword if '${rc}' == '0' Install aws cli s3 centos + Install aws cli s3 centos Execute sudo -E yum install -y awscli @@ -73,8 +80,9 @@ Setup dummy credentials for S3 Create bucket ${postfix} = Generate Random String 5 [NUMBERS] - Set Suite Variable ${BUCKET} bucket-${postfix} - Create bucket with name ${BUCKET} + ${bucket} = Set Variable bucket-${postfix} + Create bucket with name ${bucket} + [Return] ${bucket} Create bucket with name [Arguments] ${bucket} @@ -87,4 +95,19 @@ Setup s3 tests Run Keyword if '${OZONE_S3_SET_CREDENTIALS}' == 'true' Setup v4 headers ${result} = Execute And Ignore Error ozone sh volume create o3://${OM_SERVICE_ID}/s3v Should not contain ${result} Failed - Run Keyword if '${BUCKET}' == 'generated' Create bucket + ${BUCKET} = Run Keyword if '${BUCKET}' == 'generated' Create bucket + ... ELSE Set Variable ${BUCKET} + Set Suite Variable ${BUCKET} + Run Keyword if '${BUCKET}' == 'link' Setup links for S3 tests + +Setup links for S3 tests + ${exists} = Bucket Exists o3://${OM_SERVICE_ID}/s3v/link + Return From Keyword If ${exists} + Execute ozone sh volume create o3://${OM_SERVICE_ID}/legacy + Execute ozone sh bucket create o3://${OM_SERVICE_ID}/legacy/source-bucket + Create link link + +Create link + [arguments] ${bucket} + Execute ozone sh bucket link o3://${OM_SERVICE_ID}/legacy/source-bucket o3://${OM_SERVICE_ID}/s3v/${bucket} + [return] ${bucket} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java index d4594ef69498..b80e35793748 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with this * work for additional information regarding copyright ownership. The ASF @@ -19,11 +19,11 @@ import static org.apache.hadoop.test.MetricsAsserts.assertCounter; import static org.apache.hadoop.test.MetricsAsserts.getMetrics; import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyInt; import static org.mockito.Matchers.anyLong; import java.io.IOException; -import java.util.ArrayList; -import java.util.List; +import java.util.Collections; import java.util.concurrent.TimeUnit; import org.apache.hadoop.hdds.client.BlockID; @@ -32,6 +32,7 @@ import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.junit.After; @@ -44,7 +45,6 @@ /** * Test for OM metrics. */ -@SuppressWarnings("deprecation") public class TestOmMetrics { /** @@ -62,8 +62,6 @@ public class TestOmMetrics { /** * Create a MiniDFSCluster for testing. - * - * @throws IOException */ @Before public void setup() throws Exception { @@ -233,20 +231,28 @@ public void testKeyOps() throws IOException { KeyManager keyManager = (KeyManager) HddsWhiteboxTestUtils .getInternalState(ozoneManager, "keyManager"); KeyManager mockKm = Mockito.spy(keyManager); - - Mockito.doReturn(null).when(mockKm).openKey(null); - Mockito.doNothing().when(mockKm).deleteKey(null); - Mockito.doReturn(null).when(mockKm).lookupKey(null, ""); - Mockito.doReturn(null).when(mockKm).listKeys(null, null, null, null, 0); - Mockito.doReturn(null).when(mockKm).listTrash( - null, null, null, null, 0); - Mockito.doNothing().when(mockKm).commitKey(any(OmKeyArgs.class), anyLong()); - Mockito.doReturn(null).when(mockKm).initiateMultipartUpload( - any(OmKeyArgs.class)); + BucketManager mockBm = Mockito.mock(BucketManager.class); + + OmBucketInfo mockBucket = OmBucketInfo.newBuilder() + .setVolumeName("").setBucketName("") + .build(); + Mockito.when(mockBm.getBucketInfo(any(), any())).thenReturn(mockBucket); + Mockito.doReturn(null).when(mockKm).openKey(any()); + Mockito.doNothing().when(mockKm).deleteKey(any()); + Mockito.doReturn(null).when(mockKm).lookupKey(any(), any()); + Mockito.doReturn(null).when(mockKm).listKeys(any(), any(), any(), any(), + anyInt()); + Mockito.doReturn(null).when(mockKm).listTrash(any(), any(), any(), any(), + anyInt()); + Mockito.doNothing().when(mockKm).commitKey(any(), anyLong()); + Mockito.doReturn(null).when(mockKm).initiateMultipartUpload(any()); + HddsWhiteboxTestUtils.setInternalState( + ozoneManager, "bucketManager", mockBm); HddsWhiteboxTestUtils.setInternalState( ozoneManager, "keyManager", mockKm); - doKeyOps(); + OmKeyArgs keyArgs = createKeyArgs(); + doKeyOps(keyArgs); MetricsRecordBuilder omMetrics = getMetrics("OMMetrics"); assertCounter("NumKeyOps", 7L, omMetrics); @@ -259,34 +265,32 @@ public void testKeyOps() throws IOException { assertCounter("NumInitiateMultipartUploads", 1L, omMetrics); - ozoneManager.openKey(null); - ozoneManager.commitKey(createKeyArgs(), 0); - ozoneManager.openKey(null); - ozoneManager.commitKey(createKeyArgs(), 0); - ozoneManager.openKey(null); - ozoneManager.commitKey(createKeyArgs(), 0); - ozoneManager.deleteKey(null); + ozoneManager.openKey(keyArgs); + ozoneManager.commitKey(keyArgs, 0); + ozoneManager.openKey(keyArgs); + ozoneManager.commitKey(keyArgs, 0); + ozoneManager.openKey(keyArgs); + ozoneManager.commitKey(keyArgs, 0); + ozoneManager.deleteKey(keyArgs); omMetrics = getMetrics("OMMetrics"); assertCounter("NumKeys", 2L, omMetrics); // inject exception to test for Failure Metrics - Mockito.doThrow(exception).when(mockKm).openKey(null); - Mockito.doThrow(exception).when(mockKm).deleteKey(null); - Mockito.doThrow(exception).when(mockKm).lookupKey(null, ""); + Mockito.doThrow(exception).when(mockKm).openKey(any()); + Mockito.doThrow(exception).when(mockKm).deleteKey(any()); + Mockito.doThrow(exception).when(mockKm).lookupKey(any(), any()); Mockito.doThrow(exception).when(mockKm).listKeys( - null, null, null, null, 0); + any(), any(), any(), any(), anyInt()); Mockito.doThrow(exception).when(mockKm).listTrash( - null, null, null, null, 0); - Mockito.doThrow(exception).when(mockKm).commitKey(any(OmKeyArgs.class), - anyLong()); - Mockito.doThrow(exception).when(mockKm).initiateMultipartUpload( - any(OmKeyArgs.class)); + any(), any(), any(), any(), anyInt()); + Mockito.doThrow(exception).when(mockKm).commitKey(any(), anyLong()); + Mockito.doThrow(exception).when(mockKm).initiateMultipartUpload(any()); HddsWhiteboxTestUtils.setInternalState( ozoneManager, "keyManager", mockKm); - doKeyOps(); + doKeyOps(keyArgs); omMetrics = getMetrics("OMMetrics"); assertCounter("NumKeyOps", 21L, omMetrics); @@ -380,39 +384,39 @@ private void doBucketOps() { /** * Test key operations with ignoring thrown exception. */ - private void doKeyOps() { + private void doKeyOps(OmKeyArgs keyArgs) { try { - ozoneManager.openKey(null); + ozoneManager.openKey(keyArgs); } catch (IOException ignored) { } try { - ozoneManager.deleteKey(null); + ozoneManager.deleteKey(keyArgs); } catch (IOException ignored) { } try { - ozoneManager.lookupKey(null); + ozoneManager.lookupKey(keyArgs); } catch (IOException ignored) { } try { - ozoneManager.listKeys(null, null, null, null, 0); + ozoneManager.listKeys("", "", null, null, 0); } catch (IOException ignored) { } try { - ozoneManager.listTrash(null, null, null, null, 0); + ozoneManager.listTrash("", "", null, null, 0); } catch (IOException ignored) { } try { - ozoneManager.commitKey(createKeyArgs(), 0); + ozoneManager.commitKey(keyArgs, 0); } catch (IOException ignored) { } try { - ozoneManager.initiateMultipartUpload(null); + ozoneManager.initiateMultipartUpload(keyArgs); } catch (IOException ignored) { } @@ -420,12 +424,12 @@ private void doKeyOps() { private OmKeyArgs createKeyArgs() { OmKeyLocationInfo keyLocationInfo = new OmKeyLocationInfo.Builder() - .setBlockID(new BlockID(new ContainerBlockID(1, 1))).build(); + .setBlockID(new BlockID(new ContainerBlockID(1, 1))) + .build(); keyLocationInfo.setCreateVersion(0); - List omKeyLocationInfoList = new ArrayList<>(); - omKeyLocationInfoList.add(keyLocationInfo); - OmKeyArgs keyArgs = new OmKeyArgs.Builder().setLocationInfoList( - omKeyLocationInfoList).build(); - return keyArgs; + + return new OmKeyArgs.Builder() + .setLocationInfoList(Collections.singletonList(keyLocationInfo)) + .build(); } } diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index f6eaf3859eef..68598179adf0 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -304,6 +304,8 @@ enum Status { INVALID_VOLUME_NAME = 61; PARTIAL_DELETE = 62; + + DETECTED_LOOP_IN_BUCKET_LINKS = 63; } /** @@ -483,6 +485,8 @@ message BucketInfo { optional uint64 objectID = 9; optional uint64 updateID = 10; optional uint64 modificationTime = 11; + optional string sourceVolume = 12; + optional string sourceBucket = 13; } enum StorageTypeProto { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java index 2e0c6cfa56ea..4349d7c185ae 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java @@ -17,7 +17,6 @@ package org.apache.hadoop.ozone.om; import java.io.IOException; -import java.util.ArrayList; import java.util.List; import java.util.Objects; @@ -25,7 +24,7 @@ import org.apache.hadoop.crypto.CryptoProtocolVersion; import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; -import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.hdds.protocol.StorageType; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.exceptions.OMException; @@ -34,6 +33,7 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.ozone.security.acl.RequestContext; import org.apache.hadoop.util.StringUtils; @@ -41,6 +41,7 @@ import com.google.common.base.Preconditions; import org.iq80.leveldb.DBException; +import org.jetbrains.annotations.Nullable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -136,46 +137,34 @@ public void createBucket(OmBucketInfo bucketInfo) throws IOException { throw new OMException("Bucket already exist", OMException.ResultCodes.BUCKET_ALREADY_EXISTS); } + BucketEncryptionKeyInfo bek = bucketInfo.getEncryptionKeyInfo(); - BucketEncryptionKeyInfo.Builder bekb = null; - if (bek != null) { - if (kmsProvider == null) { - throw new OMException("Invalid KMS provider, check configuration " + - CommonConfigurationKeys.HADOOP_SECURITY_KEY_PROVIDER_PATH, - OMException.ResultCodes.INVALID_KMS_PROVIDER); - } - if (bek.getKeyName() == null) { - throw new OMException("Bucket encryption key needed.", OMException - .ResultCodes.BUCKET_ENCRYPTION_KEY_NOT_FOUND); - } - // Talk to KMS to retrieve the bucket encryption key info. - KeyProvider.Metadata metadata = getKMSProvider().getMetadata( - bek.getKeyName()); - if (metadata == null) { - throw new OMException("Bucket encryption key " + bek.getKeyName() - + " doesn't exist.", - OMException.ResultCodes.BUCKET_ENCRYPTION_KEY_NOT_FOUND); - } - // If the provider supports pool for EDEKs, this will fill in the pool - kmsProvider.warmUpEncryptedKeys(bek.getKeyName()); - bekb = new BucketEncryptionKeyInfo.Builder() - .setKeyName(bek.getKeyName()) - .setVersion(CryptoProtocolVersion.ENCRYPTION_ZONES) - .setSuite(CipherSuite.convert(metadata.getCipher())); - } - List acls = new ArrayList<>(); - acls.addAll(bucketInfo.getAcls()); - volumeArgs.getAclMap().getDefaultAclList().forEach( - a -> acls.add(OzoneAcl.fromProtobufWithAccessType(a))); - - OmBucketInfo.Builder omBucketInfoBuilder = OmBucketInfo.newBuilder() - .setVolumeName(bucketInfo.getVolumeName()) - .setBucketName(bucketInfo.getBucketName()) - .setAcls(acls) - .setStorageType(bucketInfo.getStorageType()) - .setIsVersionEnabled(bucketInfo.getIsVersionEnabled()) - .setCreationTime(Time.now()) - .addAllMetadata(bucketInfo.getMetadata()); + + boolean hasSourceVolume = bucketInfo.getSourceVolume() != null; + boolean hasSourceBucket = bucketInfo.getSourceBucket() != null; + + if (hasSourceBucket != hasSourceVolume) { + throw new OMException("Both source volume and source bucket are " + + "required for bucket links", + OMException.ResultCodes.INVALID_REQUEST); + } + + if (bek != null && hasSourceBucket) { + throw new OMException("Encryption cannot be set for bucket links", + OMException.ResultCodes.INVALID_REQUEST); + } + + BucketEncryptionKeyInfo.Builder bekb = + createBucketEncryptionKeyInfoBuilder(bek); + + OmBucketInfo.Builder omBucketInfoBuilder = bucketInfo.toBuilder() + .setCreationTime(Time.now()); + + List defaultAclList = + volumeArgs.getAclMap().getDefaultAclList(); + for (OzoneManagerProtocolProtos.OzoneAclInfo a : defaultAclList) { + omBucketInfoBuilder.addAcl(OzoneAcl.fromProtobufWithAccessType(a)); + } if (bekb != null) { omBucketInfoBuilder.setBucketEncryptionKey(bekb.build()); @@ -183,7 +172,14 @@ public void createBucket(OmBucketInfo bucketInfo) throws IOException { OmBucketInfo omBucketInfo = omBucketInfoBuilder.build(); commitBucketInfoToDB(omBucketInfo); - LOG.debug("created bucket: {} in volume: {}", bucketName, volumeName); + if (hasSourceBucket) { + LOG.debug("created link {}/{} to bucket: {}/{}", + volumeName, bucketName, + omBucketInfo.getSourceVolume(), omBucketInfo.getSourceBucket()); + } else { + LOG.debug("created bucket: {} in volume: {}", bucketName, + volumeName); + } } catch (IOException | DBException ex) { if (!(ex instanceof OMException)) { LOG.error("Bucket creation failed for bucket:{} in volume:{}", @@ -199,6 +195,38 @@ public void createBucket(OmBucketInfo bucketInfo) throws IOException { } } + @Nullable + public BucketEncryptionKeyInfo.Builder createBucketEncryptionKeyInfoBuilder( + BucketEncryptionKeyInfo bek) throws IOException { + BucketEncryptionKeyInfo.Builder bekb = null; + if (bek != null) { + if (kmsProvider == null) { + throw new OMException("Invalid KMS provider, check configuration " + + CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, + OMException.ResultCodes.INVALID_KMS_PROVIDER); + } + if (bek.getKeyName() == null) { + throw new OMException("Bucket encryption key needed.", OMException + .ResultCodes.BUCKET_ENCRYPTION_KEY_NOT_FOUND); + } + // Talk to KMS to retrieve the bucket encryption key info. + KeyProvider.Metadata metadata = getKMSProvider().getMetadata( + bek.getKeyName()); + if (metadata == null) { + throw new OMException("Bucket encryption key " + bek.getKeyName() + + " doesn't exist.", + OMException.ResultCodes.BUCKET_ENCRYPTION_KEY_NOT_FOUND); + } + // If the provider supports pool for EDEKs, this will fill in the pool + kmsProvider.warmUpEncryptedKeys(bek.getKeyName()); + bekb = new BucketEncryptionKeyInfo.Builder() + .setKeyName(bek.getKeyName()) + .setVersion(CryptoProtocolVersion.ENCRYPTION_ZONES) + .setSuite(CipherSuite.convert(metadata.getCipher())); + } + return bekb; + } + private void commitBucketInfoToDB(OmBucketInfo omBucketInfo) throws IOException { String dbBucketKey = diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index 85895630eab0..53808f9e23ae 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -36,15 +36,18 @@ import java.util.Arrays; import java.util.Collection; import java.util.HashMap; +import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Set; import java.util.Timer; import java.util.TimerTask; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; @@ -133,6 +136,7 @@ import org.apache.hadoop.ozone.om.snapshot.OzoneManagerSnapshotProvider; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DBUpdatesRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRoleInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServicePort; @@ -204,6 +208,7 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_USER_MAX_VOLUME_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_VOLUME_LISTALL_ALLOWED; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_VOLUME_LISTALL_ALLOWED_DEFAULT; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.DETECTED_LOOP_IN_BUCKET_LINKS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_AUTH_METHOD; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_REQUEST; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; @@ -231,6 +236,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl AuditLoggerType.OMLOGGER); private static final String OM_DAEMON = "om"; + private static boolean securityEnabled = false; private OzoneDelegationTokenSecretManager delegationTokenMgr; private OzoneBlockTokenSecretManager blockTokenMgr; @@ -2025,22 +2031,29 @@ public OmBucketInfo getBucketInfo(String volume, String bucket) */ @Override public OpenKeySession openKey(OmKeyArgs args) throws IOException { + ResolvedBucket bucket = resolveBucketLink(args); + if (isAclEnabled) { try { checkAcls(ResourceType.KEY, StoreType.OZONE, ACLType.WRITE, - args.getVolumeName(), args.getBucketName(), args.getKeyName()); + bucket.realVolume(), bucket.realBucket(), args.getKeyName()); } catch (OMException ex) { // For new keys key checkAccess call will fail as key doesn't exist. // Check user access for bucket. if (ex.getResult().equals(KEY_NOT_FOUND)) { checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.WRITE, - args.getVolumeName(), args.getBucketName(), args.getKeyName()); + bucket.realVolume(), bucket.realBucket(), args.getKeyName()); } else { throw ex; } } } + boolean auditSuccess = true; + Map auditMap = bucket.audit(args.toAuditMap()); + + args = bucket.update(args); + try { metrics.incNumKeyAllocates(); return keyManager.openKey(args); @@ -2048,12 +2061,12 @@ public OpenKeySession openKey(OmKeyArgs args) throws IOException { metrics.incNumKeyAllocateFails(); auditSuccess = false; AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.ALLOCATE_KEY, - (args == null) ? null : args.toAuditMap(), ex)); + auditMap, ex)); throw ex; } finally { if (auditSuccess) { AUDIT.logWriteSuccess(buildAuditMessageForSuccess( - OMAction.ALLOCATE_KEY, (args == null) ? null : args.toAuditMap())); + OMAction.ALLOCATE_KEY, auditMap)); } } } @@ -2061,24 +2074,29 @@ public OpenKeySession openKey(OmKeyArgs args) throws IOException { @Override public void commitKey(OmKeyArgs args, long clientID) throws IOException { + ResolvedBucket bucket = resolveBucketLink(args); + if (isAclEnabled) { try { checkAcls(ResourceType.KEY, StoreType.OZONE, ACLType.WRITE, - args.getVolumeName(), args.getBucketName(), args.getKeyName()); + bucket.realVolume(), bucket.realBucket(), args.getKeyName()); } catch (OMException ex) { // For new keys key checkAccess call will fail as key doesn't exist. // Check user access for bucket. if (ex.getResult().equals(KEY_NOT_FOUND)) { checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.WRITE, - args.getVolumeName(), args.getBucketName(), args.getKeyName()); + bucket.realVolume(), bucket.realBucket(), args.getKeyName()); } else { throw ex; } } } - Map auditMap = (args == null) ? new LinkedHashMap<>() : - args.toAuditMap(); + + Map auditMap = bucket.audit(args.toAuditMap()); auditMap.put(OzoneConsts.CLIENT_ID, String.valueOf(clientID)); + + args = bucket.update(args); + try { metrics.incNumKeyCommits(); keyManager.commitKey(args, clientID); @@ -2088,7 +2106,7 @@ public void commitKey(OmKeyArgs args, long clientID) // As key also can have multiple versions, we need to increment keys // only if version is 0. Currently we have not complete support of // versioning of keys. So, this can be revisited later. - if (args != null && args.getLocationInfoList() != null && + if (args.getLocationInfoList() != null && args.getLocationInfoList().size() > 0 && args.getLocationInfoList().get(0) != null && args.getLocationInfoList().get(0).getCreateVersion() == 0) { @@ -2105,25 +2123,30 @@ public void commitKey(OmKeyArgs args, long clientID) @Override public OmKeyLocationInfo allocateBlock(OmKeyArgs args, long clientID, ExcludeList excludeList) throws IOException { + ResolvedBucket bucket = resolveBucketLink(args); + if (isAclEnabled) { try { checkAcls(ResourceType.KEY, StoreType.OZONE, ACLType.WRITE, - args.getVolumeName(), args.getBucketName(), args.getKeyName()); + bucket.realVolume(), bucket.realBucket(), args.getKeyName()); } catch (OMException ex) { // For new keys key checkAccess call will fail as key doesn't exist. // Check user access for bucket. if (ex.getResult().equals(KEY_NOT_FOUND)) { checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.WRITE, - args.getVolumeName(), args.getBucketName(), args.getKeyName()); + bucket.realVolume(), bucket.realBucket(), args.getKeyName()); } else { throw ex; } } } + boolean auditSuccess = true; - Map auditMap = (args == null) ? new LinkedHashMap<>() : - args.toAuditMap(); + Map auditMap = bucket.audit(args.toAuditMap()); auditMap.put(OzoneConsts.CLIENT_ID, String.valueOf(clientID)); + + args = bucket.update(args); + try { metrics.incNumBlockAllocateCalls(); return keyManager.allocateBlock(args, clientID, excludeList); @@ -2150,11 +2173,18 @@ public OmKeyLocationInfo allocateBlock(OmKeyArgs args, long clientID, */ @Override public OmKeyInfo lookupKey(OmKeyArgs args) throws IOException { + ResolvedBucket bucket = resolveBucketLink(args); + if (isAclEnabled) { checkAcls(ResourceType.KEY, StoreType.OZONE, ACLType.READ, - args.getVolumeName(), args.getBucketName(), args.getKeyName()); + bucket.realVolume(), bucket.realBucket(), args.getKeyName()); } + boolean auditSuccess = true; + Map auditMap = bucket.audit(args.toAuditMap()); + + args = bucket.update(args); + try { metrics.incNumKeyLookups(); return keyManager.lookupKey(args, getClientAddress()); @@ -2162,25 +2192,32 @@ public OmKeyInfo lookupKey(OmKeyArgs args) throws IOException { metrics.incNumKeyLookupFails(); auditSuccess = false; AUDIT.logReadFailure(buildAuditMessageForFailure(OMAction.READ_KEY, - (args == null) ? null : args.toAuditMap(), ex)); + auditMap, ex)); throw ex; } finally { if (auditSuccess) { AUDIT.logReadSuccess(buildAuditMessageForSuccess(OMAction.READ_KEY, - (args == null) ? null : args.toAuditMap())); + auditMap)); } } } @Override public void renameKey(OmKeyArgs args, String toKeyName) throws IOException { + Preconditions.checkNotNull(args); + + ResolvedBucket bucket = resolveBucketLink(args); + if (isAclEnabled) { checkAcls(ResourceType.KEY, StoreType.OZONE, ACLType.WRITE, - args.getVolumeName(), args.getBucketName(), args.getKeyName()); + bucket.realVolume(), bucket.realBucket(), args.getKeyName()); } - Map auditMap = (args == null) ? new LinkedHashMap<>() : - args.toAuditMap(); + + Map auditMap = bucket.audit(args.toAuditMap()); auditMap.put(OzoneConsts.TO_KEY_NAME, toKeyName); + + args = bucket.update(args); + try { metrics.incNumKeyRenames(); keyManager.renameKey(args, toKeyName); @@ -2202,20 +2239,25 @@ public void renameKey(OmKeyArgs args, String toKeyName) throws IOException { */ @Override public void deleteKey(OmKeyArgs args) throws IOException { + Map auditMap = args.toAuditMap(); try { + ResolvedBucket bucket = resolveBucketLink(args); + args = bucket.update(args); + if (isAclEnabled) { checkAcls(ResourceType.KEY, StoreType.OZONE, ACLType.DELETE, - args.getVolumeName(), args.getBucketName(), args.getKeyName()); + bucket.realVolume(), bucket.realBucket(), args.getKeyName()); } + metrics.incNumKeyDeletes(); keyManager.deleteKey(args); AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction.DELETE_KEY, - (args == null) ? null : args.toAuditMap())); + auditMap)); metrics.decNumKeys(); } catch (Exception ex) { metrics.incNumKeyDeleteFails(); AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.DELETE_KEY, - (args == null) ? null : args.toAuditMap(), ex)); + auditMap, ex)); throw ex; } } @@ -2235,19 +2277,23 @@ public void deleteKeys(OmDeleteKeys deleteKeys) throws IOException { @Override public List listKeys(String volumeName, String bucketName, String startKey, String keyPrefix, int maxKeys) throws IOException { + + ResolvedBucket bucket = resolveBucketLink(Pair.of(volumeName, bucketName)); + if (isAclEnabled) { - checkAcls(ResourceType.BUCKET, - StoreType.OZONE, ACLType.LIST, volumeName, bucketName, keyPrefix); + checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.LIST, + bucket.realVolume(), bucket.realBucket(), keyPrefix); } + boolean auditSuccess = true; - Map auditMap = buildAuditMap(volumeName); - auditMap.put(OzoneConsts.BUCKET, bucketName); + Map auditMap = bucket.audit(); auditMap.put(OzoneConsts.START_KEY, startKey); auditMap.put(OzoneConsts.MAX_KEYS, String.valueOf(maxKeys)); auditMap.put(OzoneConsts.KEY_PREFIX, keyPrefix); + try { metrics.incNumKeyLists(); - return keyManager.listKeys(volumeName, bucketName, + return keyManager.listKeys(bucket.realVolume(), bucket.realBucket(), startKey, keyPrefix, maxKeys); } catch (IOException ex) { metrics.incNumKeyListFails(); @@ -2268,6 +2314,8 @@ public List listTrash(String volumeName, String bucketName, String startKeyName, String keyPrefix, int maxKeys) throws IOException { + // bucket links not supported + if (isAclEnabled) { checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.LIST, volumeName, bucketName, keyPrefix); @@ -2528,66 +2576,75 @@ public S3SecretValue getS3Secret(String kerberosID) throws IOException { @Override public OmMultipartInfo initiateMultipartUpload(OmKeyArgs keyArgs) throws IOException { - OmMultipartInfo multipartInfo; + + Preconditions.checkNotNull(keyArgs); + ResolvedBucket bucket = resolveBucketLink(keyArgs); + + Map auditMap = bucket.audit(keyArgs.toAuditMap()); + + keyArgs = bucket.update(keyArgs); + metrics.incNumInitiateMultipartUploads(); try { - multipartInfo = keyManager.initiateMultipartUpload(keyArgs); + OmMultipartInfo result = keyManager.initiateMultipartUpload(keyArgs); AUDIT.logWriteSuccess(buildAuditMessageForSuccess( - OMAction.INITIATE_MULTIPART_UPLOAD, (keyArgs == null) ? null : - keyArgs.toAuditMap())); + OMAction.INITIATE_MULTIPART_UPLOAD, auditMap)); + return result; } catch (IOException ex) { AUDIT.logWriteFailure(buildAuditMessageForFailure( - OMAction.INITIATE_MULTIPART_UPLOAD, - (keyArgs == null) ? null : keyArgs.toAuditMap(), ex)); + OMAction.INITIATE_MULTIPART_UPLOAD, auditMap, ex)); metrics.incNumInitiateMultipartUploadFails(); throw ex; } - return multipartInfo; } @Override public OmMultipartCommitUploadPartInfo commitMultipartUploadPart( OmKeyArgs keyArgs, long clientID) throws IOException { - boolean auditSuccess = false; - OmMultipartCommitUploadPartInfo commitUploadPartInfo; + + Preconditions.checkNotNull(keyArgs); + ResolvedBucket bucket = resolveBucketLink(keyArgs); + + Map auditMap = bucket.audit(keyArgs.toAuditMap()); + + keyArgs = bucket.update(keyArgs); + metrics.incNumCommitMultipartUploadParts(); try { - commitUploadPartInfo = keyManager.commitMultipartUploadPart(keyArgs, - clientID); - auditSuccess = true; + OmMultipartCommitUploadPartInfo result = + keyManager.commitMultipartUploadPart(keyArgs, clientID); + AUDIT.logWriteSuccess(buildAuditMessageForSuccess( + OMAction.COMMIT_MULTIPART_UPLOAD_PARTKEY, auditMap)); + return result; } catch (IOException ex) { - AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction - .INITIATE_MULTIPART_UPLOAD, (keyArgs == null) ? null : keyArgs - .toAuditMap(), ex)); + AUDIT.logWriteFailure(buildAuditMessageForFailure( + OMAction.INITIATE_MULTIPART_UPLOAD, auditMap, ex)); metrics.incNumCommitMultipartUploadPartFails(); throw ex; - } finally { - if (auditSuccess) { - AUDIT.logWriteSuccess(buildAuditMessageForSuccess( - OMAction.COMMIT_MULTIPART_UPLOAD_PARTKEY, (keyArgs == null) ? null : - keyArgs.toAuditMap())); - } } - return commitUploadPartInfo; } @Override public OmMultipartUploadCompleteInfo completeMultipartUpload( OmKeyArgs omKeyArgs, OmMultipartUploadCompleteList multipartUploadList) throws IOException { - OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo; - metrics.incNumCompleteMultipartUploads(); - Map auditMap = (omKeyArgs == null) ? new LinkedHashMap<>() : - omKeyArgs.toAuditMap(); + Preconditions.checkNotNull(omKeyArgs); + ResolvedBucket bucket = resolveBucketLink(omKeyArgs); + + Map auditMap = bucket.audit(omKeyArgs.toAuditMap()); auditMap.put(OzoneConsts.MULTIPART_LIST, multipartUploadList .getMultipartMap().toString()); + + omKeyArgs = bucket.update(omKeyArgs); + + metrics.incNumCompleteMultipartUploads(); try { - omMultipartUploadCompleteInfo = keyManager.completeMultipartUpload( - omKeyArgs, multipartUploadList); + OmMultipartUploadCompleteInfo result = keyManager.completeMultipartUpload( + omKeyArgs, multipartUploadList); AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction .COMPLETE_MULTIPART_UPLOAD, auditMap)); - return omMultipartUploadCompleteInfo; + return result; } catch (IOException ex) { metrics.incNumCompleteMultipartUploadFails(); AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction @@ -2599,8 +2656,13 @@ public OmMultipartUploadCompleteInfo completeMultipartUpload( @Override public void abortMultipartUpload(OmKeyArgs omKeyArgs) throws IOException { - Map auditMap = (omKeyArgs == null) ? new LinkedHashMap<>() : - omKeyArgs.toAuditMap(); + Preconditions.checkNotNull(omKeyArgs); + ResolvedBucket bucket = resolveBucketLink(omKeyArgs); + + Map auditMap = bucket.audit(omKeyArgs.toAuditMap()); + + omKeyArgs = bucket.update(omKeyArgs); + metrics.incNumAbortMultipartUploads(); try { keyManager.abortMultipartUpload(omKeyArgs); @@ -2616,22 +2678,24 @@ public void abortMultipartUpload(OmKeyArgs omKeyArgs) throws IOException { } @Override - public OmMultipartUploadListParts listParts(String volumeName, - String bucketName, String keyName, String uploadID, int partNumberMarker, - int maxParts) throws IOException { - Map auditMap = new HashMap<>(); - auditMap.put(OzoneConsts.VOLUME, volumeName); - auditMap.put(OzoneConsts.BUCKET, bucketName); + public OmMultipartUploadListParts listParts(final String volumeName, + final String bucketName, String keyName, String uploadID, + int partNumberMarker, int maxParts) throws IOException { + + ResolvedBucket bucket = resolveBucketLink(Pair.of(volumeName, bucketName)); + + Map auditMap = bucket.audit(); auditMap.put(OzoneConsts.KEY, keyName); auditMap.put(OzoneConsts.UPLOAD_ID, uploadID); auditMap.put(OzoneConsts.PART_NUMBER_MARKER, Integer.toString(partNumberMarker)); auditMap.put(OzoneConsts.MAX_PARTS, Integer.toString(maxParts)); + metrics.incNumListMultipartUploadParts(); try { OmMultipartUploadListParts omMultipartUploadListParts = - keyManager.listParts(volumeName, bucketName, keyName, uploadID, - partNumberMarker, maxParts); + keyManager.listParts(bucket.realVolume(), bucket.realBucket(), + keyName, uploadID, partNumberMarker, maxParts); AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction .LIST_MULTIPART_UPLOAD_PARTS, auditMap)); return omMultipartUploadListParts; @@ -2647,15 +2711,16 @@ public OmMultipartUploadListParts listParts(String volumeName, public OmMultipartUploadList listMultipartUploads(String volumeName, String bucketName, String prefix) throws IOException { - Map auditMap = new HashMap<>(); - auditMap.put(OzoneConsts.VOLUME, volumeName); - auditMap.put(OzoneConsts.BUCKET, bucketName); + ResolvedBucket bucket = resolveBucketLink(Pair.of(volumeName, bucketName)); + + Map auditMap = bucket.audit(); auditMap.put(OzoneConsts.PREFIX, prefix); metrics.incNumListMultipartUploads(); try { OmMultipartUploadList omMultipartUploadList = - keyManager.listMultipartUploads(volumeName, bucketName, prefix); + keyManager.listMultipartUploads(bucket.realVolume(), + bucket.realBucket(), prefix); AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction .LIST_MULTIPART_UPLOADS, auditMap)); return omMultipartUploadList; @@ -2671,11 +2736,13 @@ public OmMultipartUploadList listMultipartUploads(String volumeName, @Override public OzoneFileStatus getFileStatus(OmKeyArgs args) throws IOException { - if (isAclEnabled) { - checkAcls(getResourceType(args), StoreType.OZONE, ACLType.READ, - args.getVolumeName(), args.getBucketName(), args.getKeyName()); - } + ResolvedBucket bucket = resolveBucketLink(args); + boolean auditSuccess = true; + Map auditMap = bucket.audit(args.toAuditMap()); + + args = bucket.update(args); + try { metrics.incNumGetFileStatus(); return keyManager.getFileStatus(args, getClientAddress()); @@ -2683,14 +2750,12 @@ public OzoneFileStatus getFileStatus(OmKeyArgs args) throws IOException { metrics.incNumGetFileStatusFails(); auditSuccess = false; AUDIT.logReadFailure( - buildAuditMessageForFailure(OMAction.GET_FILE_STATUS, - (args == null) ? null : args.toAuditMap(), ex)); + buildAuditMessageForFailure(OMAction.GET_FILE_STATUS, auditMap, ex)); throw ex; } finally { if (auditSuccess) { AUDIT.logReadSuccess( - buildAuditMessageForSuccess(OMAction.GET_FILE_STATUS, - (args == null) ? null : args.toAuditMap())); + buildAuditMessageForSuccess(OMAction.GET_FILE_STATUS, auditMap)); } } } @@ -2704,11 +2769,13 @@ private ResourceType getResourceType(OmKeyArgs args) { @Override public void createDirectory(OmKeyArgs args) throws IOException { - if (isAclEnabled) { - checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.WRITE, - args.getVolumeName(), args.getBucketName(), args.getKeyName()); - } + ResolvedBucket bucket = resolveBucketLink(args); + boolean auditSuccess = true; + Map auditMap = bucket.audit(args.toAuditMap()); + + args = bucket.update(args); + try { metrics.incNumCreateDirectory(); keyManager.createDirectory(args); @@ -2716,14 +2783,12 @@ public void createDirectory(OmKeyArgs args) throws IOException { metrics.incNumCreateDirectoryFails(); auditSuccess = false; AUDIT.logWriteFailure( - buildAuditMessageForFailure(OMAction.CREATE_DIRECTORY, - (args == null) ? null : args.toAuditMap(), ex)); + buildAuditMessageForFailure(OMAction.CREATE_DIRECTORY, auditMap, ex)); throw ex; } finally { if (auditSuccess) { AUDIT.logWriteSuccess( - buildAuditMessageForSuccess(OMAction.CREATE_DIRECTORY, - (args == null) ? null : args.toAuditMap())); + buildAuditMessageForSuccess(OMAction.CREATE_DIRECTORY, auditMap)); } } } @@ -2731,11 +2796,13 @@ public void createDirectory(OmKeyArgs args) throws IOException { @Override public OpenKeySession createFile(OmKeyArgs args, boolean overWrite, boolean recursive) throws IOException { - if (isAclEnabled) { - checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.WRITE, - args.getVolumeName(), args.getBucketName(), null); - } + ResolvedBucket bucket = resolveBucketLink(args); + boolean auditSuccess = true; + Map auditMap = bucket.audit(args.toAuditMap()); + + args = bucket.update(args); + try { metrics.incNumCreateFile(); return keyManager.createFile(args, overWrite, recursive); @@ -2743,23 +2810,30 @@ public OpenKeySession createFile(OmKeyArgs args, boolean overWrite, metrics.incNumCreateFileFails(); auditSuccess = false; AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.CREATE_FILE, - (args == null) ? null : args.toAuditMap(), ex)); + auditMap, ex)); throw ex; } finally { if (auditSuccess) { AUDIT.logWriteSuccess(buildAuditMessageForSuccess( - OMAction.CREATE_FILE, (args == null) ? null : args.toAuditMap())); + OMAction.CREATE_FILE, auditMap)); } } } @Override public OmKeyInfo lookupFile(OmKeyArgs args) throws IOException { + ResolvedBucket bucket = resolveBucketLink(args); + if (isAclEnabled) { checkAcls(ResourceType.KEY, StoreType.OZONE, ACLType.READ, - args.getVolumeName(), args.getBucketName(), args.getKeyName()); + bucket.realVolume(), bucket.realBucket(), args.getKeyName()); } + boolean auditSuccess = true; + Map auditMap = bucket.audit(args.toAuditMap()); + + args = bucket.update(args); + try { metrics.incNumLookupFile(); return keyManager.lookupFile(args, getClientAddress()); @@ -2767,12 +2841,12 @@ public OmKeyInfo lookupFile(OmKeyArgs args) throws IOException { metrics.incNumLookupFileFails(); auditSuccess = false; AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.LOOKUP_FILE, - (args == null) ? null : args.toAuditMap(), ex)); + auditMap, ex)); throw ex; } finally { if (auditSuccess) { AUDIT.logWriteSuccess(buildAuditMessageForSuccess( - OMAction.LOOKUP_FILE, (args == null) ? null : args.toAuditMap())); + OMAction.LOOKUP_FILE, auditMap)); } } } @@ -2780,11 +2854,19 @@ public OmKeyInfo lookupFile(OmKeyArgs args) throws IOException { @Override public List listStatus(OmKeyArgs args, boolean recursive, String startKey, long numEntries) throws IOException { + + ResolvedBucket bucket = resolveBucketLink(args); + if (isAclEnabled) { checkAcls(getResourceType(args), StoreType.OZONE, ACLType.READ, - args.getVolumeName(), args.getBucketName(), args.getKeyName()); + bucket.realVolume(), bucket.realBucket(), args.getKeyName()); } + boolean auditSuccess = true; + Map auditMap = bucket.audit(args.toAuditMap()); + + args = bucket.update(args); + try { metrics.incNumListStatus(); return keyManager.listStatus(args, recursive, startKey, numEntries, @@ -2793,12 +2875,12 @@ public List listStatus(OmKeyArgs args, boolean recursive, metrics.incNumListStatusFails(); auditSuccess = false; AUDIT.logReadFailure(buildAuditMessageForFailure(OMAction.LIST_STATUS, - (args == null) ? null : args.toAuditMap(), ex)); + auditMap, ex)); throw ex; } finally { if (auditSuccess) { AUDIT.logReadSuccess(buildAuditMessageForSuccess( - OMAction.LIST_STATUS, (args == null) ? null : args.toAuditMap())); + OMAction.LIST_STATUS, auditMap)); } } } @@ -3315,4 +3397,60 @@ private void startJVMPauseMonitor() { jvmPauseMonitor.init(configuration); jvmPauseMonitor.start(); } + + public ResolvedBucket resolveBucketLink(KeyArgs args) throws IOException { + return resolveBucketLink( + Pair.of(args.getVolumeName(), args.getBucketName())); + } + + public ResolvedBucket resolveBucketLink(OmKeyArgs args) + throws IOException { + return resolveBucketLink( + Pair.of(args.getVolumeName(), args.getBucketName())); + } + + public ResolvedBucket resolveBucketLink(Pair requested) + throws IOException { + Pair resolved = + resolveBucketLink(requested, new HashSet<>()); + return new ResolvedBucket(requested, resolved); + } + + /** + * Resolves bucket symlinks. Read permission is required for following links. + * + * @param volumeAndBucket the bucket to be resolved (if it is a link) + * @param visited collects link buckets visited during the resolution to + * avoid infinite loops + * @return bucket location possibly updated with its actual volume and bucket + * after following bucket links + * @throws IOException (most likely OMException) if ACL check fails, bucket is + * not found, loop is detected in the links, etc. + */ + private Pair resolveBucketLink( + Pair volumeAndBucket, + Set> visited) throws IOException { + + String volumeName = volumeAndBucket.getLeft(); + String bucketName = volumeAndBucket.getRight(); + OmBucketInfo info = bucketManager.getBucketInfo(volumeName, bucketName); + if (!info.isLink()) { + return volumeAndBucket; + } + + if (!visited.add(volumeAndBucket)) { + throw new OMException("Detected loop in bucket links", + DETECTED_LOOP_IN_BUCKET_LINKS); + } + + if (isAclEnabled) { + checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.READ, + volumeName, bucketName, null); + } + + return resolveBucketLink( + Pair.of(info.getSourceVolume(), info.getSourceBucket()), + visited); + } + } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ResolvedBucket.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ResolvedBucket.java new file mode 100644 index 000000000000..fef9b2e35a27 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ResolvedBucket.java @@ -0,0 +1,111 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.om; + +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; + +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.Objects; + +/** + * Bundles information about a bucket, which is possibly a symlink, + * and the real bucket that it resolves to, if it is indeed a link. + * For regular buckets, both {@code requested} and {@code resolved} point to + * the same bucket. + */ +public class ResolvedBucket { + + private final Pair requested; + private final Pair resolved; + + public ResolvedBucket(Pair requested, + Pair resolved) { + this.requested = requested; + this.resolved = resolved; + } + + public Pair requested() { + return requested; + } + + public Pair resolved() { + return resolved; + } + + public String requestedVolume() { + return requested.getLeft(); + } + + public String requestedBucket() { + return requested.getRight(); + } + + public String realVolume() { + return resolved.getLeft(); + } + + public String realBucket() { + return resolved.getRight(); + } + + public OmKeyArgs update(OmKeyArgs args) { + return isLink() + ? args.toBuilder() + .setVolumeName(realVolume()) + .setBucketName(realBucket()) + .build() + : args; + } + + public KeyArgs update(KeyArgs args) { + return isLink() + ? args.toBuilder() + .setVolumeName(realVolume()) + .setBucketName(realBucket()) + .build() + : args; + } + + public boolean isLink() { + return !Objects.equals(requested, resolved); + } + + public Map audit() { + return audit(new LinkedHashMap<>()); + } + + /** + * Adds audit information about the bucket (and if it's a link, then the + * real bucket, too) to {@code auditMap}. + * @return the same map for convenience + */ + public Map audit(Map auditMap) { + auditMap.putIfAbsent(OzoneConsts.VOLUME, requestedVolume()); + auditMap.putIfAbsent(OzoneConsts.BUCKET, requestedBucket()); + if (isLink()) { + auditMap.put(OzoneConsts.SOURCE_VOLUME, realVolume()); + auditMap.put(OzoneConsts.SOURCE_BUCKET, realBucket()); + } + return auditMap; + } + +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java index 9d7d133eca54..71d5458c84e4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java @@ -115,6 +115,20 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { newBucketInfo.setBeinfo(getBeinfo(kmsProvider, bucketInfo)); } + boolean hasSourceVolume = bucketInfo.getSourceVolume() != null; + boolean hasSourceBucket = bucketInfo.getSourceBucket() != null; + + if (hasSourceBucket != hasSourceVolume) { + throw new OMException("Both source volume and source bucket are " + + "required for bucket links", + OMException.ResultCodes.INVALID_REQUEST); + } + + if (hasSourceBucket && bucketInfo.hasBeinfo()) { + throw new OMException("Encryption cannot be set for bucket links", + OMException.ResultCodes.INVALID_REQUEST); + } + newCreateBucketRequest.setBucketInfo(newBucketInfo.build()); return getOmRequest().toBuilder().setUserInfo(getUserInfo()) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java index ec51333a5710..7b2ab51f0c17 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java @@ -146,6 +146,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, List missingParentInfos; try { + keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap); + volumeName = keyArgs.getVolumeName(); + bucketName = keyArgs.getBucketName(); + // check Acl checkKeyAcls(ozoneManager, volumeName, bucketName, keyName, IAccessAuthorizer.ACLType.CREATE, OzoneObj.ResourceType.KEY); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java index 3b0b02bf549d..7327626427e6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java @@ -166,6 +166,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, CreateFileRequest createFileRequest = getOmRequest().getCreateFileRequest(); KeyArgs keyArgs = createFileRequest.getKeyArgs(); + Map auditMap = buildKeyArgsAuditMap(keyArgs); String volumeName = keyArgs.getVolumeName(); String bucketName = keyArgs.getBucketName(); @@ -199,6 +200,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, IOException exception = null; Result result = null; try { + keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap); + volumeName = keyArgs.getVolumeName(); + bucketName = keyArgs.getBucketName(); + // check Acl checkKeyAcls(ozoneManager, volumeName, bucketName, keyName, IAccessAuthorizer.ACLType.CREATE, OzoneObj.ResourceType.KEY); @@ -310,7 +315,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, } // Audit Log outside the lock - Map auditMap = buildKeyArgsAuditMap(keyArgs); auditLog(ozoneManager.getAuditLogger(), buildAuditMessage( OMAction.CREATE_FILE, auditMap, exception, getOmRequest().getUserInfo())); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java index 1a39e0b19b80..9e82888be457 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java @@ -161,8 +161,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, auditMap.put(OzoneConsts.CLIENT_ID, String.valueOf(clientID)); OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); - String openKeyName = omMetadataManager.getOpenKey(volumeName, bucketName, - keyName, clientID); + String openKeyName = null; OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( getOmRequest()); @@ -172,6 +171,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, IOException exception = null; try { + keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap); + volumeName = keyArgs.getVolumeName(); + bucketName = keyArgs.getBucketName(); + // check Acl checkKeyAclsInOpenKeyTable(ozoneManager, volumeName, bucketName, keyName, IAccessAuthorizer.ACLType.WRITE, allocateBlockRequest.getClientID()); @@ -182,6 +185,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, // Here we don't acquire bucket/volume lock because for a single client // allocateBlock is called in serial fashion. + openKeyName = omMetadataManager.getOpenKey(volumeName, bucketName, + keyName, clientID); openKeyInfo = omMetadataManager.getOpenKeyTable().get(openKeyName); if (openKeyInfo == null) { throw new OMException("Open Key not found " + openKeyName, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java index edeea3d2d449..eb3769b70ddd 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java @@ -125,37 +125,42 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OmKeyInfo omKeyInfo = null; OMClientResponse omClientResponse = null; boolean bucketLockAcquired = false; - Result result = null; + Result result; OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); - String dbOzoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName, - keyName); - String dbOpenKey = omMetadataManager.getOpenKey(volumeName, bucketName, - keyName, commitKeyRequest.getClientID()); try { + commitKeyArgs = resolveBucketLink(ozoneManager, commitKeyArgs, auditMap); + volumeName = commitKeyArgs.getVolumeName(); + bucketName = commitKeyArgs.getBucketName(); + // check Acl checkKeyAclsInOpenKeyTable(ozoneManager, volumeName, bucketName, keyName, IAccessAuthorizer.ACLType.WRITE, commitKeyRequest.getClientID()); + String dbOzoneKey = + omMetadataManager.getOzoneKey(volumeName, bucketName, + keyName); + String dbOpenKey = omMetadataManager.getOpenKey(volumeName, bucketName, + keyName, commitKeyRequest.getClientID()); + List locationInfoList = new ArrayList<>(); for (KeyLocation keyLocation : commitKeyArgs.getKeyLocationsList()) { locationInfoList.add(OmKeyLocationInfo.getFromProtobuf(keyLocation)); } - bucketLockAcquired = omMetadataManager.getLock().acquireWriteLock( - BUCKET_LOCK, volumeName, bucketName); + bucketLockAcquired = + omMetadataManager.getLock().acquireLock(BUCKET_LOCK, + volumeName, bucketName); validateBucketAndVolume(omMetadataManager, volumeName, bucketName); omKeyInfo = omMetadataManager.getOpenKeyTable().get(dbOpenKey); - if (omKeyInfo == null) { throw new OMException("Failed to commit key, as " + dbOpenKey + "entry is not found in the OpenKey table", KEY_NOT_FOUND); } - omKeyInfo.setDataSize(commitKeyArgs.getDataSize()); omKeyInfo.setModificationTime(commitKeyArgs.getModificationTime()); @@ -183,7 +188,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, result = Result.FAILURE; exception = ex; omClientResponse = new OMKeyCommitResponse(createErrorOMResponse( - omResponse, exception)); + omResponse, exception)); } finally { addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper); @@ -207,7 +212,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, if (omKeyInfo.getKeyLocationVersions().size() == 1) { omMetrics.incNumKeys(); } - LOG.debug("Key commited. Volume:{}, Bucket:{}, Key:{}", volumeName, + LOG.debug("Key committed. Volume:{}, Bucket:{}, Key:{}", volumeName, bucketName, keyName); break; case FAILURE: diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java index c6a7e52f744e..f7f08dc75c09 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java @@ -165,6 +165,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, CreateKeyRequest createKeyRequest = getOmRequest().getCreateKeyRequest(); KeyArgs keyArgs = createKeyRequest.getKeyArgs(); + Map auditMap = buildKeyArgsAuditMap(keyArgs); String volumeName = keyArgs.getVolumeName(); String bucketName = keyArgs.getBucketName(); @@ -184,6 +185,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, IOException exception = null; Result result = null; try { + keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap); + volumeName = keyArgs.getVolumeName(); + bucketName = keyArgs.getBucketName(); + // check Acl checkKeyAcls(ozoneManager, volumeName, bucketName, keyName, IAccessAuthorizer.ACLType.CREATE, OzoneObj.ResourceType.KEY); @@ -253,13 +258,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, } // Audit Log outside the lock - - Map auditMap = buildKeyArgsAuditMap(keyArgs); auditLog(ozoneManager.getAuditLogger(), buildAuditMessage( OMAction.ALLOCATE_KEY, auditMap, exception, getOmRequest().getUserInfo())); - switch (result) { case SUCCESS: LOG.debug("Key created. Volume:{}, Bucket:{}, Key:{}", volumeName, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java index b0eb6fd0be7b..8b7541734206 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java @@ -88,12 +88,13 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) { DeleteKeyRequest deleteKeyRequest = getOmRequest().getDeleteKeyRequest(); - OzoneManagerProtocolProtos.KeyArgs deleteKeyArgs = + OzoneManagerProtocolProtos.KeyArgs keyArgs = deleteKeyRequest.getKeyArgs(); + Map auditMap = buildKeyArgsAuditMap(keyArgs); - String volumeName = deleteKeyArgs.getVolumeName(); - String bucketName = deleteKeyArgs.getBucketName(); - String keyName = deleteKeyArgs.getKeyName(); + String volumeName = keyArgs.getVolumeName(); + String bucketName = keyArgs.getBucketName(); + String keyName = keyArgs.getKeyName(); OMMetrics omMetrics = ozoneManager.getMetrics(); omMetrics.incNumKeyDeletes(); @@ -101,8 +102,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, AuditLogger auditLogger = ozoneManager.getAuditLogger(); OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo(); - Map auditMap = buildKeyArgsAuditMap(deleteKeyArgs); - OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( getOmRequest()); OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); @@ -111,6 +110,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OMClientResponse omClientResponse = null; Result result = null; try { + keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap); + volumeName = keyArgs.getVolumeName(); + bucketName = keyArgs.getBucketName(); + // check Acl checkKeyAcls(ozoneManager, volumeName, bucketName, keyName, IAccessAuthorizer.ACLType.DELETE, OzoneObj.ResourceType.KEY); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java index dc83ff633f79..91db347c1470 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java @@ -101,12 +101,13 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) { RenameKeyRequest renameKeyRequest = getOmRequest().getRenameKeyRequest(); - OzoneManagerProtocolProtos.KeyArgs renameKeyArgs = + OzoneManagerProtocolProtos.KeyArgs keyArgs = renameKeyRequest.getKeyArgs(); + Map auditMap = buildAuditMap(keyArgs, renameKeyRequest); - String volumeName = renameKeyArgs.getVolumeName(); - String bucketName = renameKeyArgs.getBucketName(); - String fromKeyName = renameKeyArgs.getKeyName(); + String volumeName = keyArgs.getVolumeName(); + String bucketName = keyArgs.getBucketName(); + String fromKeyName = keyArgs.getKeyName(); String toKeyName = renameKeyRequest.getToKeyName(); OMMetrics omMetrics = ozoneManager.getMetrics(); @@ -114,9 +115,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, AuditLogger auditLogger = ozoneManager.getAuditLogger(); - Map auditMap = - buildAuditMap(renameKeyArgs, renameKeyRequest); - OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( getOmRequest()); @@ -132,6 +130,11 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, throw new OMException("Key name is empty", OMException.ResultCodes.INVALID_KEY_NAME); } + + keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap); + volumeName = keyArgs.getVolumeName(); + bucketName = keyArgs.getBucketName(); + // check Acls to see if user has access to perform delete operation on // old key and create operation on new key checkKeyAcls(ozoneManager, volumeName, bucketName, fromKeyName, @@ -168,7 +171,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, fromKeyValue.setKeyName(toKeyName); //Set modification time - fromKeyValue.setModificationTime(renameKeyArgs.getModificationTime()); + fromKeyValue.setModificationTime(keyArgs.getModificationTime()); // Add to cache. // fromKey should be deleted, toKey should be added with newly updated diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java index 0aec04dc608b..e3f0a69cb767 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java @@ -27,11 +27,13 @@ import java.util.Collections; import java.util.EnumSet; import java.util.List; +import java.util.Map; import com.google.common.base.Optional; import com.google.common.base.Preconditions; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.PrefixManager; +import org.apache.hadoop.ozone.om.ResolvedBucket; import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo; import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; @@ -88,6 +90,15 @@ public OMKeyRequest(OMRequest omRequest) { super(omRequest); } + protected static KeyArgs resolveBucketLink( + OzoneManager ozoneManager, KeyArgs keyArgs, + Map auditMap) throws IOException { + ResolvedBucket bucket = ozoneManager.resolveBucketLink(keyArgs); + keyArgs = bucket.update(keyArgs); + bucket.audit(auditMap); + return keyArgs; + } + /** * This methods avoids multiple rpc calls to SCM by allocating multiple blocks * in one rpc call. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java index adc42d8dc201..012df4960e42 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java @@ -19,12 +19,14 @@ package org.apache.hadoop.ozone.om.request.key; import com.google.common.base.Optional; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.ResolvedBucket; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; @@ -42,7 +44,7 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.HashMap; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -85,10 +87,11 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OMMetrics omMetrics = ozoneManager.getMetrics(); omMetrics.incNumKeyDeletes(); - Map auditMap = null; String volumeName = deleteKeyArgs.getVolumeName(); String bucketName = deleteKeyArgs.getBucketName(); - String keyName = ""; + Map auditMap = new LinkedHashMap<>(); + auditMap.put(VOLUME, volumeName); + auditMap.put(BUCKET, bucketName); List omKeyInfoList = new ArrayList<>(); AuditLogger auditLogger = ozoneManager.getAuditLogger(); @@ -99,10 +102,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, getOmRequest()); OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); - - boolean acquiredLock = - omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, - bucketName); + boolean acquiredLock = false; int indexFailed = 0; int length = deleteKeys.size(); @@ -112,12 +112,19 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, boolean deleteStatus = true; try { - + ResolvedBucket bucket = ozoneManager.resolveBucketLink( + Pair.of(volumeName, bucketName)); + bucket.audit(auditMap); + volumeName = bucket.realVolume(); + bucketName = bucket.realBucket(); + + acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, + volumeName, bucketName); // Validate bucket and volume exists or not. validateBucketAndVolume(omMetadataManager, volumeName, bucketName); for (indexFailed = 0; indexFailed < length; indexFailed++) { - keyName = deleteKeyArgs.getKeys(indexFailed); + String keyName = deleteKeyArgs.getKeys(indexFailed); String objectKey = omMetadataManager.getOzoneKey(volumeName, bucketName, keyName); OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(objectKey); @@ -187,8 +194,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, omDoubleBufferHelper); } - auditMap = buildDeleteKeysAuditMap(volumeName, bucketName, deleteKeys, - unDeletedKeys.getKeysList()); + addDeletedKeys(auditMap, deleteKeys, unDeletedKeys.getKeysList()); auditLog(auditLogger, buildAuditMessage(DELETE_KEYS, auditMap, exception, userInfo)); @@ -221,21 +227,13 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, } /** - * Build audit map for DeleteKeys request. - * @param volumeName - * @param bucketName - * @param deletedKeys - * @param unDeletedKeys - * @return + * Add key info to audit map for DeleteKeys request. */ - private Map buildDeleteKeysAuditMap(String volumeName, - String bucketName, List deletedKeys, List unDeletedKeys) { - Map< String, String > auditMap = new HashMap<>(); - auditMap.put(VOLUME, volumeName); - auditMap.put(BUCKET, bucketName); + private static void addDeletedKeys( + Map auditMap, List deletedKeys, + List unDeletedKeys) { auditMap.put(DELETED_KEYS_LIST, String.join(",", deletedKeys)); - auditMap.put(UNDELETED_KEYS_LIST, String.join(",", - unDeletedKeys)); - return auditMap; + auditMap.put(UNDELETED_KEYS_LIST, String.join(",", unDeletedKeys)); } + } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMTrashRecoverRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMTrashRecoverRequest.java index eac7842f84e2..232a0fb6c0e4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMTrashRecoverRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMTrashRecoverRequest.java @@ -21,6 +21,8 @@ import java.io.IOException; import com.google.common.base.Preconditions; +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.ozone.om.ResolvedBucket; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; import org.apache.hadoop.ozone.om.response.key.OMTrashRecoverResponse; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; @@ -86,6 +88,11 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, boolean acquireLock = false; OMClientResponse omClientResponse = null; try { + ResolvedBucket bucket = ozoneManager.resolveBucketLink( + Pair.of(volumeName, destinationBucket)); + volumeName = bucket.realVolume(); + destinationBucket = bucket.realBucket(); + // Check acl for the destination bucket. checkBucketAcls(ozoneManager, volumeName, destinationBucket, keyName, IAccessAuthorizer.ACLType.WRITE); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java index 4f95fe445654..aa96ba995ab9 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java @@ -48,6 +48,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collections; +import java.util.Map; import java.util.UUID; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; @@ -96,8 +97,12 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Preconditions.checkNotNull(keyArgs.getMultipartUploadID()); + Map auditMap = buildKeyArgsAuditMap(keyArgs); + String volumeName = keyArgs.getVolumeName(); String bucketName = keyArgs.getBucketName(); + final String requestedVolume = volumeName; + final String requestedBucket = bucketName; String keyName = keyArgs.getKeyName(); OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); @@ -114,10 +119,14 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, getOmRequest()); OMClientResponse omClientResponse = null; try { + keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap); + volumeName = keyArgs.getVolumeName(); + bucketName = keyArgs.getBucketName(); + // TODO to support S3 ACL later. acquiredBucketLock = - omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, - bucketName); + omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, + volumeName, bucketName); validateBucketAndVolume(omMetadataManager, volumeName, bucketName); @@ -136,8 +145,9 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, // multipart upload request is received, it returns multipart upload id // for the key. - String multipartKey = omMetadataManager.getMultipartKey(volumeName, - bucketName, keyName, keyArgs.getMultipartUploadID()); + String multipartKey = omMetadataManager.getMultipartKey( + volumeName, bucketName, keyName, + keyArgs.getMultipartUploadID()); // Even if this key already exists in the KeyTable, it would be taken // care of in the final complete multipart upload. AWS S3 behavior is @@ -154,8 +164,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, .build(); omKeyInfo = new OmKeyInfo.Builder() - .setVolumeName(keyArgs.getVolumeName()) - .setBucketName(keyArgs.getBucketName()) + .setVolumeName(volumeName) + .setBucketName(bucketName) .setKeyName(keyArgs.getKeyName()) .setCreationTime(keyArgs.getModificationTime()) .setModificationTime(keyArgs.getModificationTime()) @@ -180,8 +190,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, new S3InitiateMultipartUploadResponse( omResponse.setInitiateMultiPartUploadResponse( MultipartInfoInitiateResponse.newBuilder() - .setVolumeName(volumeName) - .setBucketName(bucketName) + .setVolumeName(requestedVolume) + .setBucketName(requestedBucket) .setKeyName(keyName) .setMultipartUploadID(keyArgs.getMultipartUploadID())) .build(), multipartKeyInfo, omKeyInfo); @@ -196,14 +206,14 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, addResponseToDoubleBuffer(transactionLogIndex, omClientResponse, ozoneManagerDoubleBufferHelper); if (acquiredBucketLock) { - omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, - bucketName); + omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, + volumeName, bucketName); } } // audit log auditLog(ozoneManager.getAuditLogger(), buildAuditMessage( - OMAction.INITIATE_MULTIPART_UPLOAD, buildKeyArgsAuditMap(keyArgs), + OMAction.INITIATE_MULTIPART_UPLOAD, auditMap, exception, getOmRequest().getUserInfo())); switch (result) { @@ -217,6 +227,7 @@ OMAction.INITIATE_MULTIPART_UPLOAD, buildKeyArgsAuditMap(keyArgs), LOG.error("S3 InitiateMultipart Upload request for Key {} in " + "Volume/Bucket {}/{} is failed", keyName, volumeName, bucketName, exception); + break; default: LOG.error("Unrecognized Result for S3InitiateMultipartUploadRequest: {}", multipartInfoInitiateRequest); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java index 4518a3b9b6cf..0726fe4a9c7e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; import java.io.IOException; +import java.util.Map; import com.google.common.base.Optional; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; @@ -85,9 +86,12 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, .getAbortMultiPartUploadRequest(); OzoneManagerProtocolProtos.KeyArgs keyArgs = multipartUploadAbortRequest .getKeyArgs(); + Map auditMap = buildKeyArgsAuditMap(keyArgs); String volumeName = keyArgs.getVolumeName(); String bucketName = keyArgs.getBucketName(); + final String requestedVolume = volumeName; + final String requestedBucket = bucketName; String keyName = keyArgs.getKeyName(); ozoneManager.getMetrics().incNumAbortMultipartUploads(); @@ -101,15 +105,19 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OMClientResponse omClientResponse = null; Result result = null; try { + keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap); + volumeName = keyArgs.getVolumeName(); + bucketName = keyArgs.getBucketName(); + // TODO to support S3 ACL later. acquiredLock = - omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, - bucketName); + omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, + volumeName, bucketName); validateBucketAndVolume(omMetadataManager, volumeName, bucketName); - multipartKey = omMetadataManager.getMultipartKey(volumeName, - bucketName, keyName, keyArgs.getMultipartUploadID()); + multipartKey = omMetadataManager.getMultipartKey( + volumeName, bucketName, keyName, keyArgs.getMultipartUploadID()); OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(multipartKey); @@ -118,7 +126,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, // upload initiated for this key. if (omKeyInfo == null) { throw new OMException("Abort Multipart Upload Failed: volume: " + - volumeName + "bucket: " + bucketName + "key: " + keyName, + requestedVolume + "bucket: " + requestedBucket + "key: " + keyName, OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR); } @@ -152,14 +160,14 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper); if (acquiredLock) { - omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, - bucketName); + omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, + volumeName, bucketName); } } // audit log auditLog(ozoneManager.getAuditLogger(), buildAuditMessage( - OMAction.ABORT_MULTIPART_UPLOAD, buildKeyArgsAuditMap(keyArgs), + OMAction.ABORT_MULTIPART_UPLOAD, auditMap, exception, getOmRequest().getUserInfo())); switch (result) { @@ -173,6 +181,7 @@ OMAction.ABORT_MULTIPART_UPLOAD, buildKeyArgsAuditMap(keyArgs), LOG.error("Abort Multipart request is failed for KeyName {} in " + "VolumeName/Bucket {}/{}", keyName, volumeName, bucketName, exception); + break; default: LOG.error("Unrecognized Result for S3MultipartUploadAbortRequest: {}", multipartUploadAbortRequest); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java index 346ff87ff186..283a22dc37b7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java @@ -89,6 +89,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, getOmRequest().getCommitMultiPartUploadRequest(); KeyArgs keyArgs = multipartCommitUploadPartRequest.getKeyArgs(); + Map auditMap = buildKeyArgsAuditMap(keyArgs); String volumeName = keyArgs.getVolumeName(); String bucketName = keyArgs.getBucketName(); @@ -111,6 +112,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OmMultipartKeyInfo multipartKeyInfo = null; Result result = null; try { + keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap); + volumeName = keyArgs.getVolumeName(); + bucketName = keyArgs.getBucketName(); + // TODO to support S3 ACL later. acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, bucketName); @@ -118,16 +123,19 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, validateBucketAndVolume(omMetadataManager, volumeName, bucketName); String uploadID = keyArgs.getMultipartUploadID(); - multipartKey = omMetadataManager.getMultipartKey(volumeName, - bucketName, keyName, uploadID); + multipartKey = omMetadataManager.getMultipartKey(volumeName, bucketName, + keyName, uploadID); multipartKeyInfo = omMetadataManager.getMultipartInfoTable() .get(multipartKey); long clientID = multipartCommitUploadPartRequest.getClientID(); - openKey = omMetadataManager.getOpenKey(volumeName, bucketName, keyName, - clientID); + openKey = omMetadataManager.getOpenKey( + volumeName, bucketName, keyName, clientID); + + String ozoneKey = omMetadataManager.getOzoneKey( + volumeName, bucketName, keyName); omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey); @@ -147,8 +155,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, // Set the UpdateID to current transactionLogIndex omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); - partName = omMetadataManager.getOzoneKey(volumeName, bucketName, - keyName) + clientID; + partName = ozoneKey + clientID; if (multipartKeyInfo == null) { // This can occur when user started uploading part by the time commit @@ -217,15 +224,19 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper); if (acquiredLock) { - omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, - bucketName); + omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, + volumeName, bucketName); } } // audit log + // Add MPU related information. + auditMap.put(OzoneConsts.MULTIPART_UPLOAD_PART_NUMBER, + String.valueOf(keyArgs.getMultipartNumber())); + auditMap.put(OzoneConsts.MULTIPART_UPLOAD_PART_NAME, partName); auditLog(ozoneManager.getAuditLogger(), buildAuditMessage( OMAction.COMMIT_MULTIPART_UPLOAD_PARTKEY, - buildAuditMap(keyArgs, partName), exception, + auditMap, exception, getOmRequest().getUserInfo())); switch (result) { @@ -236,7 +247,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, case FAILURE: ozoneManager.getMetrics().incNumCommitMultipartUploadPartFails(); LOG.error("MultipartUpload Commit is failed for Key:{} in " + - "Volume/Bucket {}/{}", keyName, volumeName, bucketName, exception); + "Volume/Bucket {}/{}", keyName, volumeName, bucketName, + exception); break; default: LOG.error("Unrecognized Result for S3MultipartUploadCommitPartRequest: " + @@ -246,15 +258,5 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, return omClientResponse; } - private Map buildAuditMap(KeyArgs keyArgs, String partName) { - Map auditMap = buildKeyArgsAuditMap(keyArgs); - - // Add MPU related information. - auditMap.put(OzoneConsts.MULTIPART_UPLOAD_PART_NUMBER, - String.valueOf(keyArgs.getMultipartNumber())); - auditMap.put(OzoneConsts.MULTIPART_UPLOAD_PART_NAME, partName); - - return auditMap; - } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java index c4e315cddace..a9aefa08a5b8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java @@ -96,19 +96,19 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, List partsList = multipartUploadCompleteRequest.getPartsListList(); + Map auditMap = buildKeyArgsAuditMap(keyArgs); String volumeName = keyArgs.getVolumeName(); String bucketName = keyArgs.getBucketName(); + final String requestedVolume = volumeName; + final String requestedBucket = bucketName; String keyName = keyArgs.getKeyName(); String uploadID = keyArgs.getMultipartUploadID(); + String multipartKey = null; ozoneManager.getMetrics().incNumCompleteMultipartUploads(); OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); - String multipartKey = omMetadataManager.getMultipartKey(volumeName, - bucketName, keyName, uploadID); - String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName, - keyName); boolean acquiredLock = false; OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( @@ -117,6 +117,13 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, IOException exception = null; Result result = null; try { + keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap); + volumeName = keyArgs.getVolumeName(); + bucketName = keyArgs.getBucketName(); + + multipartKey = omMetadataManager.getMultipartKey(volumeName, + bucketName, keyName, uploadID); + // TODO to support S3 ACL later. acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, @@ -124,12 +131,15 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, validateBucketAndVolume(omMetadataManager, volumeName, bucketName); + String ozoneKey = omMetadataManager.getOzoneKey( + volumeName, bucketName, keyName); + OmMultipartKeyInfo multipartKeyInfo = omMetadataManager .getMultipartInfoTable().get(multipartKey); if (multipartKeyInfo == null) { - throw new OMException("Complete Multipart Upload Failed: volume: " + - volumeName + "bucket: " + bucketName + "key: " + keyName, + throw new OMException( + failureMessage(requestedVolume, requestedBucket, keyName), OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR); } TreeMap partKeyInfoMap = @@ -140,8 +150,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, LOG.error("Complete MultipartUpload failed for key {} , MPU Key has" + " no parts in OM, parts given to upload are {}", ozoneKey, partsList); - throw new OMException("Complete Multipart Upload Failed: volume: " + - volumeName + "bucket: " + bucketName + "key: " + keyName, + throw new OMException( + failureMessage(requestedVolume, requestedBucket, keyName), OMException.ResultCodes.INVALID_PART); } @@ -157,9 +167,9 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, "partNumber at index {} is {} for ozonekey is " + "{}", i, currentPartNumber, i - 1, prevPartNumber, ozoneKey); - throw new OMException("Complete Multipart Upload Failed: volume: " + - volumeName + "bucket: " + bucketName + "key: " + keyName + - "because parts are in Invalid order.", + throw new OMException( + failureMessage(requestedVolume, requestedBucket, keyName) + + " because parts are in Invalid order.", OMException.ResultCodes.INVALID_PART_ORDER); } prevPartNumber = currentPartNumber; @@ -182,10 +192,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, !partName.equals(partKeyInfo.getPartName())) { String omPartName = partKeyInfo == null ? null : partKeyInfo.getPartName(); - throw new OMException("Complete Multipart Upload Failed: volume: " + - volumeName + "bucket: " + bucketName + "key: " + keyName + + throw new OMException( + failureMessage(requestedVolume, requestedBucket, keyName) + ". Provided Part info is { " + partName + ", " + partNumber + - "}, where as OM has partName " + omPartName, + "}, whereas OM has partName " + omPartName, OMException.ResultCodes.INVALID_PART); } @@ -200,9 +210,9 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, partKeyInfo.getPartNumber(), currentPartKeyInfo.getDataSize(), OzoneConsts.OM_MULTIPART_MIN_SIZE); - throw new OMException("Complete Multipart Upload Failed: " + - "Entity too small: volume: " + volumeName + "bucket: " + - bucketName + "key: " + keyName, + throw new OMException( + failureMessage(requestedVolume, requestedBucket, keyName) + + ". Entity too small.", OMException.ResultCodes.ENTITY_TOO_SMALL); } } @@ -275,8 +285,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, omResponse.setCompleteMultiPartUploadResponse( MultipartUploadCompleteResponse.newBuilder() - .setVolume(volumeName) - .setBucket(bucketName) + .setVolume(requestedVolume) + .setBucket(requestedBucket) .setKey(keyName) .setHash(DigestUtils.sha256Hex(keyName))); @@ -285,9 +295,9 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, result = Result.SUCCESS; } else { - throw new OMException("Complete Multipart Upload Failed: volume: " + - volumeName + "bucket: " + bucketName + "key: " + keyName + - "because of empty part list", + throw new OMException( + failureMessage(requestedVolume, requestedBucket, keyName) + + " because of empty part list", OMException.ResultCodes.INVALID_REQUEST); } @@ -300,12 +310,11 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper); if (acquiredLock) { - omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, - bucketName); + omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, + volumeName, bucketName); } } - Map auditMap = buildKeyArgsAuditMap(keyArgs); auditMap.put(OzoneConsts.MULTIPART_LIST, partsList.toString()); // audit log @@ -315,13 +324,15 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, switch (result) { case SUCCESS: - LOG.debug("MultipartUpload Complete request is successfull for Key: {} " + + LOG.debug("MultipartUpload Complete request is successful for Key: {} " + "in Volume/Bucket {}/{}", keyName, volumeName, bucketName); break; case FAILURE: ozoneManager.getMetrics().incNumCompleteMultipartUploadFails(); LOG.error("MultipartUpload Complete request failed for Key: {} " + - "in Volume/Bucket {}/{}", keyName, volumeName, bucketName, exception); + "in Volume/Bucket {}/{}", keyName, volumeName, bucketName, + exception); + break; default: LOG.error("Unrecognized Result for S3MultipartUploadCommitRequest: {}", multipartUploadCompleteRequest); @@ -330,6 +341,12 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, return omClientResponse; } + private static String failureMessage(String volume, String bucket, + String keyName) { + return "Complete Multipart Upload Failed: volume: " + + volume + " bucket: " + bucket + " key: " + keyName; + } + private void updateCache(OMMetadataManager omMetadataManager, String ozoneKey, String multipartKey, OmKeyInfo omKeyInfo, long transactionLogIndex) { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java index b714375249e2..c09bf8651e70 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java @@ -21,7 +21,9 @@ import java.util.UUID; import org.apache.commons.lang3.RandomStringUtils; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.ozone.om.ResolvedBucket; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; import org.apache.hadoop.ozone.om.response.OMClientResponse; @@ -86,6 +88,8 @@ public void setup() throws Exception { auditLogger = Mockito.mock(AuditLogger.class); when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); + when(ozoneManager.resolveBucketLink(any(KeyArgs.class))) + .thenReturn(new ResolvedBucket(Pair.of("", ""), Pair.of("", ""))); } @After diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java index 49794a1ed6f9..dd6caf46857f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java @@ -22,7 +22,10 @@ import java.util.List; import java.util.UUID; +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.ozone.om.ResolvedBucket; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; import org.junit.After; import org.junit.Before; import org.junit.Rule; @@ -150,6 +153,11 @@ public void setup() throws Exception { clientID = Time.now(); dataSize = 1000L; + Pair volumeAndBucket = Pair.of(volumeName, bucketName); + when(ozoneManager.resolveBucketLink(any(KeyArgs.class))) + .thenReturn(new ResolvedBucket(volumeAndBucket, volumeAndBucket)); + when(ozoneManager.resolveBucketLink(any(Pair.class))) + .thenReturn(new ResolvedBucket(volumeAndBucket, volumeAndBucket)); } @After diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java index 99500274628f..0271a7a400b1 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java @@ -29,6 +29,7 @@ import org.junit.rules.TemporaryFolder; import org.mockito.Mockito; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.audit.AuditMessage; @@ -37,6 +38,8 @@ import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.ResolvedBucket; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Part; @@ -79,6 +82,13 @@ public void setup() throws Exception { auditLogger = Mockito.mock(AuditLogger.class); when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); + when(ozoneManager.resolveBucketLink(any(KeyArgs.class))) + .thenAnswer(inv -> { + KeyArgs args = (KeyArgs) inv.getArguments()[0]; + return new ResolvedBucket( + Pair.of(args.getVolumeName(), args.getBucketName()), + Pair.of(args.getVolumeName(), args.getBucketName())); + }); } diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java index e723df7fdae5..25dea3de9aa3 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java @@ -477,9 +477,7 @@ public boolean delete(Path f, boolean recursive) throws IOException { result = innerDelete(f, recursive); } else { LOG.debug("delete: Path is a file: {}", f); - List keyList = new ArrayList<>(); - keyList.add(key); - result = adapter.deleteObjects(keyList); + result = adapter.deleteObject(key); } if (result) { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketCommands.java index ea4ec7096232..2d800605622a 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketCommands.java @@ -38,6 +38,7 @@ InfoBucketHandler.class, ListBucketHandler.class, CreateBucketHandler.class, + LinkBucketHandler.class, DeleteBucketHandler.class, AddAclBucketHandler.class, RemoveAclBucketHandler.class, diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/LinkBucketHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/LinkBucketHandler.java new file mode 100644 index 000000000000..6671f2da6fb8 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/LinkBucketHandler.java @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.shell.bucket; + +import org.apache.hadoop.hdds.protocol.StorageType; +import org.apache.hadoop.ozone.client.BucketArgs; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.shell.Handler; +import org.apache.hadoop.ozone.shell.OzoneAddress; + +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; + +import java.io.IOException; + +/** + * Creates a symlink to another bucket. + */ +@Command(name = "link", + description = "creates a symlink to another bucket") +public class LinkBucketHandler extends Handler { + + @Parameters(index = "0", arity = "1..1", + description = "The bucket which the link should point to.", + converter = BucketUri.class) + private OzoneAddress source; + + @Parameters(index = "1", arity = "1..1", + description = "Address of the link bucket", + converter = BucketUri.class) + private OzoneAddress target; + + @Override + protected OzoneAddress getAddress() { + return source; + } + + /** + * Executes create bucket. + */ + @Override + public void execute(OzoneClient client, OzoneAddress address) + throws IOException { + + BucketArgs.Builder bb = new BucketArgs.Builder() + .setStorageType(StorageType.DEFAULT) + .setVersioning(false) + .setSourceVolume(source.getVolumeName()) + .setSourceBucket(source.getBucketName()); + + String volumeName = target.getVolumeName(); + String bucketName = target.getBucketName(); + + OzoneVolume vol = client.getObjectStore().getVolume(volumeName); + vol.createBucket(bucketName, bb.build()); + + if (isVerbose()) { + OzoneBucket bucket = vol.getBucket(bucketName); + printObjectAsJson(bucket); + } + } +} From cb4a9701cc3930016e8430fdb1dfbcbf616b538f Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Fri, 17 Jul 2020 08:20:19 +0200 Subject: [PATCH 035/165] HDDS-3964. Ratis config key mismatch (#1204) --- .../apache/hadoop/hdds/ratis/RatisHelper.java | 24 ++++++-------- .../hdds/ratis/conf/RatisClientConfig.java | 6 ++-- .../hdds/conf/DatanodeRatisGrpcConfig.java | 7 ++-- .../hdds/conf/DatanodeRatisServerConfig.java | 32 ++++++------------- .../fs/ozone/contract/OzoneContract.java | 17 +++++----- .../contract/rooted/RootedOzoneContract.java | 17 +++++----- .../hdds/scm/pipeline/TestNodeFailure.java | 2 +- .../client/rpc/Test2WayCommitInRatis.java | 16 ++++------ .../TestBlockOutputStreamWithFailures.java | 28 ++++++++-------- ...ockOutputStreamWithFailuresFlushDelay.java | 28 ++++++++-------- .../ozone/client/rpc/TestCommitWatcher.java | 27 ++++++++-------- ...estContainerStateMachineFailureOnRead.java | 27 ++++++++-------- .../TestContainerStateMachineFailures.java | 27 ++++++++-------- .../rpc/TestDeleteWithSlowFollower.java | 28 ++++++++-------- .../rpc/TestFailureHandlingByClient.java | 28 ++++++++-------- ...TestFailureHandlingByClientFlushDelay.java | 28 ++++++++-------- .../TestMultiBlockWritesWithDnFailures.java | 28 ++++++++-------- .../rpc/TestValidateBCSIDOnRestart.java | 28 ++++++++-------- .../ozone/client/rpc/TestWatchForCommit.java | 28 ++++++++-------- .../hadoop/ozone/freon/TestDataValidate.java | 17 +++++----- .../freon/TestFreonWithDatanodeRestart.java | 17 +++++----- .../freon/TestFreonWithPipelineDestroy.java | 17 +++++----- .../ozone/freon/TestRandomKeyGenerator.java | 17 +++++----- 23 files changed, 246 insertions(+), 248 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java index 47523bc79a8f..505b6c9e8092 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java @@ -75,13 +75,6 @@ public final class RatisHelper { // Prefix for Ratis Server GRPC and Ratis client conf. public static final String HDDS_DATANODE_RATIS_PREFIX_KEY = "hdds.ratis"; - private static final String RAFT_SERVER_PREFIX_KEY = "raft.server"; - public static final String HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY = - HDDS_DATANODE_RATIS_PREFIX_KEY + "." + RAFT_SERVER_PREFIX_KEY; - public static final String HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY = - HDDS_DATANODE_RATIS_PREFIX_KEY + "." + RaftClientConfigKeys.PREFIX; - public static final String HDDS_DATANODE_RATIS_GRPC_PREFIX_KEY = - HDDS_DATANODE_RATIS_PREFIX_KEY + "." + GrpcConfigKeys.PREFIX; private static final Class[] NO_RETRY_EXCEPTIONS = new Class[] {NotReplicatedException.class, GroupMismatchException.class, @@ -227,7 +220,7 @@ private static RaftClient newRaftClient(RpcType rpcType, RaftPeerId leader, } /** - * Set all the properties matching with regex + * Set all client properties matching with regex * {@link RatisHelper#HDDS_DATANODE_RATIS_PREFIX_KEY} in * ozone configuration object and configure it to RaftProperties. * @param ozoneConf @@ -237,23 +230,26 @@ public static void createRaftClientProperties(ConfigurationSource ozoneConf, RaftProperties raftProperties) { // As for client we do not require server and grpc server/tls. exclude them. - Map ratisClientConf = ozoneConf.getPropsWithPrefix( - StringUtils.appendIfNotPresent(HDDS_DATANODE_RATIS_PREFIX_KEY, '.')); + Map ratisClientConf = + getDatanodeRatisPrefixProps(ozoneConf); ratisClientConf.forEach((key, val) -> { - if (key.startsWith(RaftClientConfigKeys.PREFIX) || isGrpcClientConfig( - key)) { + if (isClientConfig(key) || isGrpcClientConfig(key)) { raftProperties.set(key, val); } }); } + private static boolean isClientConfig(String key) { + return key.startsWith(RaftClientConfigKeys.PREFIX); + } + private static boolean isGrpcClientConfig(String key) { return key.startsWith(GrpcConfigKeys.PREFIX) && !key .startsWith(GrpcConfigKeys.TLS.PREFIX) && !key .startsWith(GrpcConfigKeys.Server.PREFIX); } /** - * Set all the properties matching with prefix + * Set all server properties matching with prefix * {@link RatisHelper#HDDS_DATANODE_RATIS_PREFIX_KEY} in * ozone configuration object and configure it to RaftProperties. * @param ozoneConf @@ -266,7 +262,7 @@ public static void createRaftServerProperties(ConfigurationSource ozoneConf, getDatanodeRatisPrefixProps(ozoneConf); ratisServerConf.forEach((key, val) -> { // Exclude ratis client configuration. - if (!key.startsWith(RaftClientConfigKeys.PREFIX)) { + if (!isClientConfig(key)) { raftProperties.set(key, val); } }); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/conf/RatisClientConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/conf/RatisClientConfig.java index 18d174d193b8..b88583dc25bc 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/conf/RatisClientConfig.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/conf/RatisClientConfig.java @@ -29,7 +29,7 @@ /** * Configuration related to Ratis Client. This is the config used in creating - * RaftClient creation. + * RaftClient. */ @ConfigGroup(prefix = RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY) public class RatisClientConfig { @@ -38,7 +38,9 @@ public class RatisClientConfig { * Configurations which will be set in RaftProperties. RaftProperties is a * configuration object for Ratis client. */ - @ConfigGroup(prefix = RaftClientConfigKeys.PREFIX) + @ConfigGroup(prefix = + RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY + "." + + RaftClientConfigKeys.PREFIX) public static class RaftConfig { @Config(key = "async.outstanding-requests.max", defaultValue = "32", diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisGrpcConfig.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisGrpcConfig.java index ed036fd70812..443b879f060e 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisGrpcConfig.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisGrpcConfig.java @@ -18,15 +18,18 @@ package org.apache.hadoop.hdds.conf; +import org.apache.ratis.grpc.GrpcConfigKeys; + import static org.apache.hadoop.hdds.conf.ConfigTag.CLIENT; import static org.apache.hadoop.hdds.conf.ConfigTag.OZONE; import static org.apache.hadoop.hdds.conf.ConfigTag.PERFORMANCE; -import static org.apache.hadoop.hdds.ratis.RatisHelper.HDDS_DATANODE_RATIS_GRPC_PREFIX_KEY; +import static org.apache.hadoop.hdds.ratis.RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY; /** * Ratis Grpc Config Keys. */ -@ConfigGroup(prefix = HDDS_DATANODE_RATIS_GRPC_PREFIX_KEY) +@ConfigGroup(prefix = HDDS_DATANODE_RATIS_PREFIX_KEY + "." + + GrpcConfigKeys.PREFIX) public class DatanodeRatisGrpcConfig { @Config(key = "message.size.max", defaultValue = "32MB", diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisServerConfig.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisServerConfig.java index 39f58bb56acc..b0034eee02df 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisServerConfig.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisServerConfig.java @@ -18,36 +18,24 @@ package org.apache.hadoop.hdds.conf; +import org.apache.ratis.server.RaftServerConfigKeys; + import java.time.Duration; import static org.apache.hadoop.hdds.conf.ConfigTag.DATANODE; import static org.apache.hadoop.hdds.conf.ConfigTag.OZONE; import static org.apache.hadoop.hdds.conf.ConfigTag.PERFORMANCE; import static org.apache.hadoop.hdds.conf.ConfigTag.RATIS; -import static org.apache.hadoop.hdds.ratis.RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY; +import static org.apache.hadoop.hdds.ratis.RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY; /** * Datanode Ratis server Configuration. */ -@ConfigGroup(prefix = HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY) +@ConfigGroup(prefix = HDDS_DATANODE_RATIS_PREFIX_KEY + "." + + RaftServerConfigKeys.PREFIX) public class DatanodeRatisServerConfig { - private static final String RATIS_SERVER_REQUEST_TIMEOUT_KEY = - "rpc.request.timeout"; - - private static final String RATIS_SERVER_WATCH_REQUEST_TIMEOUT_KEY = - "watch.timeout"; - - private static final String RATIS_SERVER_NO_LEADER_TIMEOUT_KEY = - "Notification.no-leader.timeout"; - - private static final String RATIS_FOLLOWER_SLOWNESS_TIMEOUT_KEY = - "rpcslowness.timeout"; - - private static final String RATIS_LEADER_NUM_PENDING_REQUESTS_KEY = - "write.element-limit"; - - @Config(key = RATIS_SERVER_REQUEST_TIMEOUT_KEY, + @Config(key = "rpc.request.timeout", defaultValue = "60s", type = ConfigType.TIME, tags = {OZONE, DATANODE, RATIS}, @@ -64,7 +52,7 @@ public void setRequestTimeOut(Duration duration) { this.requestTimeOut = duration.toMillis(); } - @Config(key = RATIS_SERVER_WATCH_REQUEST_TIMEOUT_KEY, + @Config(key = "watch.timeout", defaultValue = "180s", type = ConfigType.TIME, tags = {OZONE, DATANODE, RATIS}, @@ -82,7 +70,7 @@ public void setWatchTimeOut(Duration duration) { this.watchTimeOut = duration.toMillis(); } - @Config(key = RATIS_SERVER_NO_LEADER_TIMEOUT_KEY, + @Config(key = "notification.no-leader.timeout", defaultValue = "300s", type = ConfigType.TIME, tags = {OZONE, DATANODE, RATIS}, @@ -100,7 +88,7 @@ public void setNoLeaderTimeout(Duration duration) { this.noLeaderTimeout = duration.toMillis(); } - @Config(key = RATIS_FOLLOWER_SLOWNESS_TIMEOUT_KEY, + @Config(key = "rpcslowness.timeout", defaultValue = "300s", type = ConfigType.TIME, tags = {OZONE, DATANODE, RATIS}, @@ -118,7 +106,7 @@ public void setFollowerSlownessTimeout(Duration duration) { this.followerSlownessTimeout = duration.toMillis(); } - @Config(key = RATIS_LEADER_NUM_PENDING_REQUESTS_KEY, + @Config(key = "write.element-limit", defaultValue = "1024", type = ConfigType.INT, tags = {OZONE, DATANODE, RATIS, PERFORMANCE}, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java index 7335a933b467..e832dd78df96 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.ratis.RatisHelper; +import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConsts; @@ -71,14 +71,13 @@ public static void createCluster() throws IOException { ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(10)); conf.setFromObject(ratisServerConfig); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY + "." + - "rpc.request.timeout", - 3, TimeUnit.SECONDS); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY + "." + - "watch.request.timeout", - 10, TimeUnit.SECONDS); + + RatisClientConfig.RaftConfig raftClientConfig = + conf.getObject(RatisClientConfig.RaftConfig.class); + raftClientConfig.setRpcRequestTimeout(TimeUnit.SECONDS.toMillis(3)); + raftClientConfig.setRpcWatchRequestTimeout(TimeUnit.SECONDS.toMillis(10)); + conf.setFromObject(raftClientConfig); + conf.addResource(CONTRACT_XML); cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(5).build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/RootedOzoneContract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/RootedOzoneContract.java index c90a7ba9ac04..cd6892921314 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/RootedOzoneContract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/RootedOzoneContract.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.ratis.RatisHelper; +import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConsts; @@ -68,14 +68,13 @@ public static void createCluster() throws IOException { ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(10)); conf.setFromObject(ratisServerConfig); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY + "." + - "rpc.request.timeout", - 3, TimeUnit.SECONDS); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY + "." + - "watch.request.timeout", - 10, TimeUnit.SECONDS); + + RatisClientConfig.RaftConfig raftClientConfig = + conf.getObject(RatisClientConfig.RaftConfig.class); + raftClientConfig.setRpcRequestTimeout(TimeUnit.SECONDS.toMillis(3)); + raftClientConfig.setRpcWatchRequestTimeout(TimeUnit.SECONDS.toMillis(10)); + conf.setFromObject(raftClientConfig); + conf.addResource(CONTRACT_XML); cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(5).build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java index 2bb1fb18ad67..6f58eaea2ced 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java @@ -63,7 +63,7 @@ public static void init() throws Exception { DatanodeRatisServerConfig ratisServerConfig = conf.getObject(DatanodeRatisServerConfig.class); ratisServerConfig.setFollowerSlownessTimeout(Duration.ofSeconds(10)); - ratisServerConfig.setNoLeaderTimeout(Duration.ofSeconds(10)); + ratisServerConfig.setNoLeaderTimeout(Duration.ofMinutes(5)); conf.setFromObject(ratisServerConfig); conf.set(HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL, "2s"); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java index 087376b2d89e..7da9d1125ce3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java @@ -22,7 +22,7 @@ import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.ratis.RatisHelper; +import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.XceiverClientRatis; import org.apache.hadoop.hdds.scm.XceiverClientSpi; @@ -97,14 +97,12 @@ private void startCluster(OzoneConfiguration conf) throws Exception { ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(10)); conf.setFromObject(ratisServerConfig); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "rpc.request.timeout", - 3, TimeUnit.SECONDS); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "watch.request.timeout", - 10, TimeUnit.SECONDS); + + RatisClientConfig.RaftConfig raftClientConfig = + conf.getObject(RatisClientConfig.RaftConfig.class); + raftClientConfig.setRpcRequestTimeout(TimeUnit.SECONDS.toMillis(3)); + raftClientConfig.setRpcWatchRequestTimeout(TimeUnit.SECONDS.toMillis(10)); + conf.setFromObject(raftClientConfig); conf.setQuietMode(false); cluster = MiniOzoneCluster.newBuilder(conf) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java index 40756b0f4d13..37094d5d0982 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java @@ -21,7 +21,7 @@ import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.ratis.RatisHelper; +import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.XceiverClientRatis; import org.apache.hadoop.hdds.scm.client.HddsClientUtils; @@ -108,19 +108,21 @@ public void init() throws Exception { ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(3)); conf.setFromObject(ratisServerConfig); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "rpc.request.timeout", - 3, TimeUnit.SECONDS); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "watch.request.timeout", - 3, TimeUnit.SECONDS); + + RatisClientConfig.RaftConfig raftClientConfig = + conf.getObject(RatisClientConfig.RaftConfig.class); + raftClientConfig.setRpcRequestTimeout(TimeUnit.SECONDS.toMillis(3)); + raftClientConfig.setRpcWatchRequestTimeout(TimeUnit.SECONDS.toMillis(3)); + conf.setFromObject(raftClientConfig); + conf.setBoolean(OZONE_CLIENT_STREAM_BUFFER_FLUSH_DELAY, false); - conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY - + ".client.request.write.timeout", 30, TimeUnit.SECONDS); - conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY - + ".client.request.watch.timeout", 30, TimeUnit.SECONDS); + + RatisClientConfig ratisClientConfig = + conf.getObject(RatisClientConfig.class); + ratisClientConfig.setWriteRequestTimeoutInMs(TimeUnit.SECONDS.toMillis(30)); + ratisClientConfig.setWatchRequestTimeoutInMs(TimeUnit.SECONDS.toMillis(30)); + conf.setFromObject(ratisClientConfig); + cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(7) .setTotalPipelineNumLimit(10).setBlockSize(blockSize) .setChunkSize(chunkSize).setStreamBufferFlushSize(flushSize) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailuresFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailuresFlushDelay.java index 6fa54ed8faac..3462e50b09bd 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailuresFlushDelay.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailuresFlushDelay.java @@ -22,7 +22,7 @@ import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.ratis.RatisHelper; +import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.XceiverClientRatis; import org.apache.hadoop.hdds.scm.client.HddsClientUtils; @@ -102,18 +102,20 @@ public void init() throws Exception { ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(3)); conf.setFromObject(ratisServerConfig); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "rpc.request.timeout", - 3, TimeUnit.SECONDS); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "watch.request.timeout", - 3, TimeUnit.SECONDS); - conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY - + ".client.request.write.timeout", 30, TimeUnit.SECONDS); - conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY - + ".client.request.watch.timeout", 30, TimeUnit.SECONDS); + + RatisClientConfig.RaftConfig raftClientConfig = + conf.getObject(RatisClientConfig.RaftConfig.class); + raftClientConfig.setRpcRequestTimeout(TimeUnit.SECONDS.toMillis(3)); + raftClientConfig.setRpcWatchRequestTimeout(TimeUnit.SECONDS.toMillis(3)); + conf.setFromObject(raftClientConfig); + + + RatisClientConfig ratisClientConfig = + conf.getObject(RatisClientConfig.class); + ratisClientConfig.setWriteRequestTimeoutInMs(TimeUnit.SECONDS.toMillis(30)); + ratisClientConfig.setWatchRequestTimeoutInMs(TimeUnit.SECONDS.toMillis(30)); + conf.setFromObject(ratisClientConfig); + cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(7) .setTotalPipelineNumLimit(10).setBlockSize(blockSize) .setChunkSize(chunkSize).setStreamBufferFlushSize(flushSize) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java index b9b7e5ed1a6b..c3eca6af0031 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java @@ -23,7 +23,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.ratis.RatisHelper; +import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.XceiverClientRatis; import org.apache.hadoop.hdds.scm.XceiverClientReply; @@ -112,18 +112,19 @@ public void init() throws Exception { ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(3)); conf.setFromObject(ratisServerConfig); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "rpc.request.timeout", - 3, TimeUnit.SECONDS); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "watch.request.timeout", - 3, TimeUnit.SECONDS); - conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY - + ".client.request.write.timeout", 30, TimeUnit.SECONDS); - conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY - + ".client.request.watch.timeout", 30, TimeUnit.SECONDS); + + RatisClientConfig.RaftConfig raftClientConfig = + conf.getObject(RatisClientConfig.RaftConfig.class); + raftClientConfig.setRpcRequestTimeout(TimeUnit.SECONDS.toMillis(3)); + raftClientConfig.setRpcWatchRequestTimeout(TimeUnit.SECONDS.toMillis(3)); + conf.setFromObject(raftClientConfig); + + RatisClientConfig ratisClientConfig = + conf.getObject(RatisClientConfig.class); + ratisClientConfig.setWriteRequestTimeoutInMs(TimeUnit.SECONDS.toMillis(30)); + ratisClientConfig.setWatchRequestTimeoutInMs(TimeUnit.SECONDS.toMillis(30)); + conf.setFromObject(ratisClientConfig); + conf.set(OzoneConfigKeys.OZONE_CLIENT_CHECKSUM_TYPE, "NONE"); conf.setQuietMode(false); conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 4, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java index 7af57debd32e..d390ca381e37 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java @@ -30,7 +30,7 @@ import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.ratis.RatisHelper; +import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientRatis; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException; @@ -97,18 +97,19 @@ public void setup() throws Exception { ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(3)); conf.setFromObject(ratisServerConfig); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "rpc.request.timeout", - 3, TimeUnit.SECONDS); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "watch.request.timeout", - 3, TimeUnit.SECONDS); - conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY - + ".client.request.write.timeout", 30, TimeUnit.SECONDS); - conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY - + ".client.request.watch.timeout", 30, TimeUnit.SECONDS); + + RatisClientConfig.RaftConfig raftClientConfig = + conf.getObject(RatisClientConfig.RaftConfig.class); + raftClientConfig.setRpcRequestTimeout(TimeUnit.SECONDS.toMillis(3)); + raftClientConfig.setRpcWatchRequestTimeout(TimeUnit.SECONDS.toMillis(3)); + conf.setFromObject(raftClientConfig); + + + RatisClientConfig ratisClientConfig = + conf.getObject(RatisClientConfig.class); + ratisClientConfig.setWriteRequestTimeoutInMs(TimeUnit.SECONDS.toMillis(30)); + ratisClientConfig.setWatchRequestTimeoutInMs(TimeUnit.SECONDS.toMillis(30)); + conf.setFromObject(ratisClientConfig); conf.setQuietMode(false); cluster = MiniOzoneCluster.newBuilder(conf) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java index fba3ba1289d1..e266ff4b7c5b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java @@ -24,7 +24,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.ratis.RatisHelper; +import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.client.HddsClientUtils; @@ -121,23 +121,24 @@ public static void init() throws Exception { conf.setTimeDuration(OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, 1, TimeUnit.SECONDS); - conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY - + ".client.request.write.timeout", 10, TimeUnit.SECONDS); - conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY - + ".client.request.watch.timeout", 10, TimeUnit.SECONDS); + RatisClientConfig ratisClientConfig = + conf.getObject(RatisClientConfig.class); + ratisClientConfig.setWriteRequestTimeoutInMs(TimeUnit.SECONDS.toMillis(10)); + ratisClientConfig.setWatchRequestTimeoutInMs(TimeUnit.SECONDS.toMillis(10)); + conf.setFromObject(ratisClientConfig); + DatanodeRatisServerConfig ratisServerConfig = conf.getObject(DatanodeRatisServerConfig.class); ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(10)); conf.setFromObject(ratisServerConfig); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "rpc.request.timeout", - 3, TimeUnit.SECONDS); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "watch.request.timeout", - 10, TimeUnit.SECONDS); + + RatisClientConfig.RaftConfig raftClientConfig = + conf.getObject(RatisClientConfig.RaftConfig.class); + raftClientConfig.setRpcRequestTimeout(TimeUnit.SECONDS.toMillis(3)); + raftClientConfig.setRpcWatchRequestTimeout(TimeUnit.SECONDS.toMillis(10)); + conf.setFromObject(raftClientConfig); + conf.setLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1); conf.setQuietMode(false); cluster = diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithSlowFollower.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithSlowFollower.java index cac16f12cc8d..3dc46b91cc14 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithSlowFollower.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithSlowFollower.java @@ -31,7 +31,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.ratis.RatisHelper; +import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.XceiverClientSpi; @@ -120,20 +120,22 @@ public static void init() throws Exception { ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(3)); conf.setFromObject(ratisServerConfig); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "rpc.request.timeout", - 3, TimeUnit.SECONDS); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "watch.request.timeout", - 10, TimeUnit.SECONDS); + + RatisClientConfig.RaftConfig raftClientConfig = + conf.getObject(RatisClientConfig.RaftConfig.class); + raftClientConfig.setRpcRequestTimeout(TimeUnit.SECONDS.toMillis(3)); + raftClientConfig.setRpcWatchRequestTimeout(TimeUnit.SECONDS.toMillis(10)); + conf.setFromObject(raftClientConfig); + conf.setTimeDuration(OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 1, TimeUnit.SECONDS); - conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY - + ".client.request.write.timeout", 30, TimeUnit.SECONDS); - conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY - + ".client.request.watch.timeout", 30, TimeUnit.SECONDS); + + RatisClientConfig ratisClientConfig = + conf.getObject(RatisClientConfig.class); + ratisClientConfig.setWriteRequestTimeoutInMs(TimeUnit.SECONDS.toMillis(30)); + ratisClientConfig.setWatchRequestTimeoutInMs(TimeUnit.SECONDS.toMillis(30)); + conf.setFromObject(ratisClientConfig); + conf.setQuietMode(false); int numOfDatanodes = 3; cluster = MiniOzoneCluster.newBuilder(conf) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java index e35a39304829..887acbb6119c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java @@ -32,7 +32,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.ratis.RatisHelper; +import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; @@ -97,10 +97,13 @@ private void init() throws Exception { chunkSize = (int) OzoneConsts.MB; blockSize = 4 * chunkSize; conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 100, TimeUnit.SECONDS); - conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY - + ".client.request.write.timeout", 30, TimeUnit.SECONDS); - conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY - + ".client.request.watch.timeout", 30, TimeUnit.SECONDS); + + RatisClientConfig ratisClientConfig = + conf.getObject(RatisClientConfig.class); + ratisClientConfig.setWriteRequestTimeoutInMs(TimeUnit.SECONDS.toMillis(30)); + ratisClientConfig.setWatchRequestTimeoutInMs(TimeUnit.SECONDS.toMillis(30)); + conf.setFromObject(ratisClientConfig); + conf.setTimeDuration( OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, 1, TimeUnit.SECONDS); @@ -112,14 +115,13 @@ private void init() throws Exception { ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(3)); conf.setFromObject(ratisServerConfig); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "rpc.request.timeout", - 3, TimeUnit.SECONDS); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "watch.request.timeout", - 3, TimeUnit.SECONDS); + + RatisClientConfig.RaftConfig raftClientConfig = + conf.getObject(RatisClientConfig.RaftConfig.class); + raftClientConfig.setRpcRequestTimeout(TimeUnit.SECONDS.toMillis(3)); + raftClientConfig.setRpcWatchRequestTimeout(TimeUnit.SECONDS.toMillis(3)); + conf.setFromObject(raftClientConfig); + conf.setBoolean( OzoneConfigKeys.OZONE_CLIENT_STREAM_BUFFER_FLUSH_DELAY, false); conf.setQuietMode(false); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java index 8f145b48ebfd..a7bf5145a77d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java @@ -25,7 +25,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.ratis.RatisHelper; +import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; @@ -95,10 +95,13 @@ private void init() throws Exception { maxFlushSize = 2 * flushSize; blockSize = 4 * chunkSize; conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 100, TimeUnit.SECONDS); - conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY - + ".client.request.write.timeout", 30, TimeUnit.SECONDS); - conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY - + ".client.request.watch.timeout", 30, TimeUnit.SECONDS); + + RatisClientConfig ratisClientConfig = + conf.getObject(RatisClientConfig.class); + ratisClientConfig.setWriteRequestTimeoutInMs(TimeUnit.SECONDS.toMillis(30)); + ratisClientConfig.setWatchRequestTimeoutInMs(TimeUnit.SECONDS.toMillis(30)); + conf.setFromObject(ratisClientConfig); + conf.setTimeDuration( OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, 1, TimeUnit.SECONDS); @@ -110,14 +113,13 @@ private void init() throws Exception { ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(3)); conf.setFromObject(ratisServerConfig); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "rpc.request.timeout", - 3, TimeUnit.SECONDS); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "watch.request.timeout", - 3, TimeUnit.SECONDS); + + RatisClientConfig.RaftConfig raftClientConfig = + conf.getObject(RatisClientConfig.RaftConfig.class); + raftClientConfig.setRpcRequestTimeout(TimeUnit.SECONDS.toMillis(3)); + raftClientConfig.setRpcWatchRequestTimeout(TimeUnit.SECONDS.toMillis(3)); + conf.setFromObject(raftClientConfig); + conf.setQuietMode(false); conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, StaticMapping.class, DNSToSwitchMapping.class); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java index 1ca073d868cd..1329cc3898c5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java @@ -23,7 +23,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.ratis.RatisHelper; +import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; @@ -94,18 +94,20 @@ private void startCluster(int datanodes) throws Exception { ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(3)); conf.setFromObject(ratisServerConfig); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "rpc.request.timeout", - 3, TimeUnit.SECONDS); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "watch.request.timeout", - 3, TimeUnit.SECONDS); - conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY - + ".client.request.write.timeout", 30, TimeUnit.SECONDS); - conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY - + ".client.request.watch.timeout", 30, TimeUnit.SECONDS); + + RatisClientConfig.RaftConfig raftClientConfig = + conf.getObject(RatisClientConfig.RaftConfig.class); + raftClientConfig.setRpcRequestTimeout(TimeUnit.SECONDS.toMillis(3)); + raftClientConfig.setRpcWatchRequestTimeout(TimeUnit.SECONDS.toMillis(3)); + conf.setFromObject(raftClientConfig); + + + RatisClientConfig ratisClientConfig = + conf.getObject(RatisClientConfig.class); + ratisClientConfig.setWriteRequestTimeoutInMs(TimeUnit.SECONDS.toMillis(30)); + ratisClientConfig.setWatchRequestTimeoutInMs(TimeUnit.SECONDS.toMillis(30)); + conf.setFromObject(ratisClientConfig); + conf.setTimeDuration( OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, 1, TimeUnit.SECONDS); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java index 63e63af6e1a0..315a1dfde764 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java @@ -26,7 +26,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.ratis.RatisHelper; +import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.MiniOzoneCluster; @@ -106,24 +106,24 @@ public static void init() throws Exception { conf.setTimeDuration(OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, 1, TimeUnit.SECONDS); - conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY - + ".client.request.write.timeout", 10, TimeUnit.SECONDS); - conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY - + ".client.request.watch.timeout", 10, TimeUnit.SECONDS); + RatisClientConfig ratisClientConfig = + conf.getObject(RatisClientConfig.class); + ratisClientConfig.setWriteRequestTimeoutInMs(TimeUnit.SECONDS.toMillis(10)); + ratisClientConfig.setWatchRequestTimeoutInMs(TimeUnit.SECONDS.toMillis(10)); + conf.setFromObject(ratisClientConfig); + DatanodeRatisServerConfig ratisServerConfig = conf.getObject(DatanodeRatisServerConfig.class); ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(10)); conf.setFromObject(ratisServerConfig); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY + "." + - "rpc.request.timeout", - 3, TimeUnit.SECONDS); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY + "." + - "watch.request.timeout", - 10, TimeUnit.SECONDS); - conf.setQuietMode(false); + + RatisClientConfig.RaftConfig raftClientConfig = + conf.getObject(RatisClientConfig.RaftConfig.class); + raftClientConfig.setRpcRequestTimeout(TimeUnit.SECONDS.toMillis(3)); + raftClientConfig.setRpcWatchRequestTimeout(TimeUnit.SECONDS.toMillis(10)); + conf.setFromObject(raftClientConfig); + cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(2). setHbInterval(200) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java index 131d2890b8b6..ed9642aff46b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java @@ -22,7 +22,7 @@ import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.ratis.RatisHelper; +import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; import org.apache.hadoop.hdds.scm.*; import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; @@ -100,23 +100,25 @@ public void init() throws Exception { conf.setTimeDuration(OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, 10, TimeUnit.SECONDS); conf.setQuietMode(false); - conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY - + ".client.request.write.timeout", 10, TimeUnit.SECONDS); - conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY - + ".client.request.watch.timeout", 10, TimeUnit.SECONDS); + + RatisClientConfig ratisClientConfig = + conf.getObject(RatisClientConfig.class); + ratisClientConfig.setWriteRequestTimeoutInMs(TimeUnit.SECONDS.toMillis(10)); + ratisClientConfig.setWatchRequestTimeoutInMs(TimeUnit.SECONDS.toMillis(10)); + conf.setFromObject(ratisClientConfig); + DatanodeRatisServerConfig ratisServerConfig = conf.getObject(DatanodeRatisServerConfig.class); ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(3)); conf.setFromObject(ratisServerConfig); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "rpc.request.timeout", - 3, TimeUnit.SECONDS); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "watch.request.timeout", - 10, TimeUnit.SECONDS); + + RatisClientConfig.RaftConfig raftClientConfig = + conf.getObject(RatisClientConfig.RaftConfig.class); + raftClientConfig.setRpcRequestTimeout(TimeUnit.SECONDS.toMillis(3)); + raftClientConfig.setRpcWatchRequestTimeout(TimeUnit.SECONDS.toMillis(10)); + conf.setFromObject(raftClientConfig); + conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 30, TimeUnit.SECONDS); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(9) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java index 154bd559a485..86b004d9d20b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java @@ -23,7 +23,7 @@ import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.ratis.RatisHelper; +import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.junit.Assert; import org.junit.Test; @@ -44,14 +44,13 @@ static void startCluster(OzoneConfiguration conf) throws Exception { ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(10)); conf.setFromObject(ratisServerConfig); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "rpc.request.timeout", - 3, TimeUnit.SECONDS); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "watch.request.timeout", - 10, TimeUnit.SECONDS); + + RatisClientConfig.RaftConfig raftClientConfig = + conf.getObject(RatisClientConfig.RaftConfig.class); + raftClientConfig.setRpcRequestTimeout(TimeUnit.SECONDS.toMillis(3)); + raftClientConfig.setRpcWatchRequestTimeout(TimeUnit.SECONDS.toMillis(10)); + conf.setFromObject(raftClientConfig); + cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(5).setTotalPipelineNumLimit(8).build(); cluster.waitForClusterToBeReady(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeRestart.java index 8984b66d0cde..1207bab8be0d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeRestart.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeRestart.java @@ -24,7 +24,7 @@ import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.ratis.RatisHelper; +import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.junit.AfterClass; @@ -74,14 +74,13 @@ public static void init() throws Exception { ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(3)); conf.setFromObject(ratisServerConfig); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "rpc.request.timeout", - 3, TimeUnit.SECONDS); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "watch.request.timeout", - 3, TimeUnit.SECONDS); + + RatisClientConfig.RaftConfig raftClientConfig = + conf.getObject(RatisClientConfig.RaftConfig.class); + raftClientConfig.setRpcRequestTimeout(TimeUnit.SECONDS.toMillis(3)); + raftClientConfig.setRpcWatchRequestTimeout(TimeUnit.SECONDS.toMillis(3)); + conf.setFromObject(raftClientConfig); + cluster = MiniOzoneCluster.newBuilder(conf) .setHbProcessorInterval(1000) .setHbInterval(1000) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java index 61ac6af738e6..615444fd7f5a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java @@ -25,7 +25,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdds.ratis.RatisHelper; +import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; @@ -67,14 +67,13 @@ public static void init() throws Exception { ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(3)); conf.setFromObject(ratisServerConfig); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "rpc.request.timeout", - 3, TimeUnit.SECONDS); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "watch.request.timeout", - 3, TimeUnit.SECONDS); + + RatisClientConfig.RaftConfig raftClientConfig = + conf.getObject(RatisClientConfig.RaftConfig.class); + raftClientConfig.setRpcRequestTimeout(TimeUnit.SECONDS.toMillis(3)); + raftClientConfig.setRpcWatchRequestTimeout(TimeUnit.SECONDS.toMillis(3)); + conf.setFromObject(raftClientConfig); + cluster = MiniOzoneCluster.newBuilder(conf) .setHbProcessorInterval(1000) .setHbInterval(1000) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java index ddb27cead77e..f05ce078364d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java @@ -25,7 +25,7 @@ import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.ratis.RatisHelper; +import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.junit.AfterClass; @@ -57,14 +57,13 @@ public static void init() throws Exception { ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(3)); conf.setFromObject(ratisServerConfig); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "rpc.request.timeout", - 3, TimeUnit.SECONDS); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "watch.request.timeout", - 3, TimeUnit.SECONDS); + + RatisClientConfig.RaftConfig raftClientConfig = + conf.getObject(RatisClientConfig.RaftConfig.class); + raftClientConfig.setRpcRequestTimeout(TimeUnit.SECONDS.toMillis(3)); + raftClientConfig.setRpcWatchRequestTimeout(TimeUnit.SECONDS.toMillis(3)); + conf.setFromObject(raftClientConfig); + cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(5).build(); cluster.waitForClusterToBeReady(); } From 3d6ac6703520150da389814c006f609c99c1ceea Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Fri, 17 Jul 2020 12:45:03 +0200 Subject: [PATCH 036/165] HDDS-3855. Add upgrade smoketest (#1142) --- .github/workflows/post-commit.yml | 1 + .../dev-support/bin/dist-layout-stitching | 1 + hadoop-ozone/dist/pom.xml | 2 +- hadoop-ozone/dist/src/main/compose/testlib.sh | 39 +++--- .../dist/src/main/compose/upgrade/.env | 21 +++ .../dist/src/main/compose/upgrade/README.md | 29 ++++ .../main/compose/upgrade/docker-compose.yaml | 127 ++++++++++++++++++ .../src/main/compose/upgrade/docker-config | 33 +++++ .../dist/src/main/compose/upgrade/test.sh | 70 ++++++++++ .../main/compose/upgrade/versions/README.md | 15 +++ .../compose/upgrade/versions/ozone-0.5.0.sh | 18 +++ .../compose/upgrade/versions/ozone-0.6.0.sh | 18 +++ hadoop-ozone/dist/src/shell/upgrade/0.6.0.sh | 23 ++++ .../shell/upgrade/0.6.0/01-migrate-scm-db.sh | 24 ++++ 14 files changed, 400 insertions(+), 21 deletions(-) create mode 100644 hadoop-ozone/dist/src/main/compose/upgrade/.env create mode 100644 hadoop-ozone/dist/src/main/compose/upgrade/README.md create mode 100644 hadoop-ozone/dist/src/main/compose/upgrade/docker-compose.yaml create mode 100644 hadoop-ozone/dist/src/main/compose/upgrade/docker-config create mode 100644 hadoop-ozone/dist/src/main/compose/upgrade/test.sh create mode 100644 hadoop-ozone/dist/src/main/compose/upgrade/versions/README.md create mode 100644 hadoop-ozone/dist/src/main/compose/upgrade/versions/ozone-0.5.0.sh create mode 100644 hadoop-ozone/dist/src/main/compose/upgrade/versions/ozone-0.6.0.sh create mode 100755 hadoop-ozone/dist/src/shell/upgrade/0.6.0.sh create mode 100755 hadoop-ozone/dist/src/shell/upgrade/0.6.0/01-migrate-scm-db.sh diff --git a/.github/workflows/post-commit.yml b/.github/workflows/post-commit.yml index e00018ac15f1..bac27e467182 100644 --- a/.github/workflows/post-commit.yml +++ b/.github/workflows/post-commit.yml @@ -150,6 +150,7 @@ jobs: env: KEEP_IMAGE: false OZONE_WITH_COVERAGE: true + OZONE_VOLUME_OWNER: 1000 - uses: actions/upload-artifact@master if: always() with: diff --git a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching b/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching index e1f5c7e55c5e..80455a62953a 100755 --- a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching +++ b/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching @@ -108,6 +108,7 @@ run cp "${ROOT}/hadoop-ozone/dist/src/shell/hdds/hadoop-config.cmd" "libexec/" run cp "${ROOT}/hadoop-ozone/dist/src/shell/hdds/hadoop-functions.sh" "libexec/" run cp "${ROOT}/hadoop-ozone/dist/src/shell/ozone/ozone-config.sh" "libexec/" run cp -r "${ROOT}/hadoop-ozone/dist/src/shell/shellprofile.d" "libexec/" +run cp -r "${ROOT}/hadoop-ozone/dist/src/shell/upgrade" "libexec/" run cp "${ROOT}/hadoop-ozone/dist/src/shell/hdds/hadoop-daemons.sh" "sbin/" diff --git a/hadoop-ozone/dist/pom.xml b/hadoop-ozone/dist/pom.xml index 840f628899b2..a766c0a18023 100644 --- a/hadoop-ozone/dist/pom.xml +++ b/hadoop-ozone/dist/pom.xml @@ -28,7 +28,7 @@ UTF-8 true - 20200420-1 + 20200625-1 diff --git a/hadoop-ozone/dist/src/main/compose/testlib.sh b/hadoop-ozone/dist/src/main/compose/testlib.sh index 15d1664ed80c..56c35c186243 100755 --- a/hadoop-ozone/dist/src/main/compose/testlib.sh +++ b/hadoop-ozone/dist/src/main/compose/testlib.sh @@ -17,7 +17,6 @@ set -e COMPOSE_ENV_NAME=$(basename "$COMPOSE_DIR") -COMPOSE_FILE=$COMPOSE_DIR/docker-compose.yaml RESULT_DIR=${RESULT_DIR:-"$COMPOSE_DIR/result"} RESULT_DIR_INSIDE="/tmp/smoketest/$(basename "$COMPOSE_ENV_NAME")/result" SMOKETEST_DIR_INSIDE="${OZONE_DIR:-/opt/hadoop}/smoketest" @@ -32,7 +31,7 @@ fi ## @description create results directory, purging any prior data create_results_dir() { #delete previous results - rm -rf "$RESULT_DIR" + [[ "${OZONE_KEEP_RESULTS:-}" == "true" ]] || rm -rf "$RESULT_DIR" mkdir -p "$RESULT_DIR" #Should be writeable from the docker containers where user is different. chmod ogu+w "$RESULT_DIR" @@ -40,9 +39,9 @@ create_results_dir() { ## @description wait until safemode exit (or 180 seconds) -## @param the docker-compose file wait_for_safemode_exit(){ - local compose_file=$1 + # version-dependent + : ${OZONE_ADMIN_COMMAND:=admin} #Reset the timer SECONDS=0 @@ -51,11 +50,11 @@ wait_for_safemode_exit(){ while [[ $SECONDS -lt 180 ]]; do #This line checks the safemode status in scm - local command="ozone admin safemode status" + local command="ozone ${OZONE_ADMIN_COMMAND} safemode status" if [[ "${SECURITY_ENABLED}" == 'true' ]]; then - status=$(docker-compose -f "${compose_file}" exec -T scm bash -c "kinit -k HTTP/scm@EXAMPLE.COM -t /etc/security/keytabs/HTTP.keytab && $command" || true) + status=$(docker-compose exec -T scm bash -c "kinit -k HTTP/scm@EXAMPLE.COM -t /etc/security/keytabs/HTTP.keytab && $command" || true) else - status=$(docker-compose -f "${compose_file}" exec -T scm bash -c "$command") + status=$(docker-compose exec -T scm bash -c "$command") fi echo $status @@ -80,9 +79,9 @@ start_docker_env(){ create_results_dir export OZONE_SAFEMODE_MIN_DATANODES="${datanode_count}" - docker-compose -f "$COMPOSE_FILE" --no-ansi down - if ! { docker-compose -f "$COMPOSE_FILE" --no-ansi up -d --scale datanode="${datanode_count}" \ - && wait_for_safemode_exit "$COMPOSE_FILE"; }; then + docker-compose --no-ansi down + if ! { docker-compose --no-ansi up -d --scale datanode="${datanode_count}" \ + && wait_for_safemode_exit ; }; then OUTPUT_NAME="$COMPOSE_ENV_NAME" stop_docker_env return 1 @@ -114,11 +113,11 @@ execute_robot_test(){ OUTPUT_PATH="$RESULT_DIR_INSIDE/${OUTPUT_FILE}" # shellcheck disable=SC2068 - docker-compose -f "$COMPOSE_FILE" exec -T "$CONTAINER" mkdir -p "$RESULT_DIR_INSIDE" \ - && docker-compose -f "$COMPOSE_FILE" exec -T "$CONTAINER" robot -v OM_SERVICE_ID:"${OM_SERVICE_ID}" -v SECURITY_ENABLED:"${SECURITY_ENABLED}" -v OM_HA_PARAM:"${OM_HA_PARAM}" ${ARGUMENTS[@]} --log NONE -N "$TEST_NAME" --report NONE "${OZONE_ROBOT_OPTS[@]}" --output "$OUTPUT_PATH" "$SMOKETEST_DIR_INSIDE/$TEST" + docker-compose exec -T "$CONTAINER" mkdir -p "$RESULT_DIR_INSIDE" \ + && docker-compose exec -T "$CONTAINER" robot -v OM_SERVICE_ID:"${OM_SERVICE_ID}" -v SECURITY_ENABLED:"${SECURITY_ENABLED}" -v OM_HA_PARAM:"${OM_HA_PARAM}" ${ARGUMENTS[@]} --log NONE -N "$TEST_NAME" --report NONE "${OZONE_ROBOT_OPTS[@]}" --output "$OUTPUT_PATH" "$SMOKETEST_DIR_INSIDE/$TEST" local -i rc=$? - FULL_CONTAINER_NAME=$(docker-compose -f "$COMPOSE_FILE" ps | grep "_${CONTAINER}_" | head -n 1 | awk '{print $1}') + FULL_CONTAINER_NAME=$(docker-compose ps | grep "_${CONTAINER}_" | head -n 1 | awk '{print $1}') docker cp "$FULL_CONTAINER_NAME:$OUTPUT_PATH" "$RESULT_DIR/" copy_daemon_logs @@ -135,7 +134,7 @@ execute_robot_test(){ ## @description Copy any 'out' files for daemon processes to the result dir copy_daemon_logs() { local c f - for c in $(docker-compose -f "$COMPOSE_FILE" ps | grep "^${COMPOSE_ENV_NAME}_" | awk '{print $1}'); do + for c in $(docker-compose ps | grep "^${COMPOSE_ENV_NAME}_" | awk '{print $1}'); do for f in $(docker exec "${c}" ls -1 /var/log/hadoop | grep -F '.out'); do docker cp "${c}:/var/log/hadoop/${f}" "$RESULT_DIR/" done @@ -149,7 +148,7 @@ copy_daemon_logs() { execute_command_in_container(){ set -e # shellcheck disable=SC2068 - docker-compose -f "$COMPOSE_FILE" exec -T "$@" + docker-compose exec -T "$@" set +e } @@ -157,7 +156,7 @@ execute_command_in_container(){ ## @param List of container names, eg datanode_1 datanode_2 stop_containers() { set -e - docker-compose -f "$COMPOSE_FILE" --no-ansi stop $@ + docker-compose --no-ansi stop $@ set +e } @@ -166,7 +165,7 @@ stop_containers() { ## @param List of container names, eg datanode_1 datanode_2 start_containers() { set -e - docker-compose -f "$COMPOSE_FILE" --no-ansi start $@ + docker-compose --no-ansi start $@ set +e } @@ -185,7 +184,7 @@ wait_for_port(){ while [[ $SECONDS -lt $timeout ]]; do set +e - docker-compose -f "${COMPOSE_FILE}" exec -T scm /bin/bash -c "nc -z $host $port" + docker-compose exec -T scm /bin/bash -c "nc -z $host $port" status=$? set -e if [ $status -eq 0 ] ; then @@ -202,9 +201,9 @@ wait_for_port(){ ## @description Stops a docker-compose based test environment (with saving the logs) stop_docker_env(){ - docker-compose -f "$COMPOSE_FILE" --no-ansi logs > "$RESULT_DIR/docker-$OUTPUT_NAME.log" + docker-compose --no-ansi logs > "$RESULT_DIR/docker-$OUTPUT_NAME.log" if [ "${KEEP_RUNNING:-false}" = false ]; then - docker-compose -f "$COMPOSE_FILE" --no-ansi down + docker-compose --no-ansi down fi } diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/.env b/hadoop-ozone/dist/src/main/compose/upgrade/.env new file mode 100644 index 000000000000..6f757c552b2e --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/upgrade/.env @@ -0,0 +1,21 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +HDDS_VERSION=${hdds.version} +OZONE_RUNNER_VERSION=${docker.ozone-runner.version} +OZONE_IMAGE=apache/ozone-runner:${docker.ozone-runner.version} +OZONE_DIR=/opt/hadoop +OZONE_VOLUME=. diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/README.md b/hadoop-ozone/dist/src/main/compose/upgrade/README.md new file mode 100644 index 000000000000..2a832f4b9e29 --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/upgrade/README.md @@ -0,0 +1,29 @@ + + +# Compose file for upgrade + +This directory contains a sample cluster definition and script for +testing upgrade from previous version to the current one. + +Data for each container is persisted in mounted volume (by default it's +`data` under the `compose/upgrade` directory, but can be overridden via +`OZONE_VOLUME` environment variable). + +Prior version is run using an official `apache/ozone` image, while the +current version is run with the `ozone-runner` image using locally built +source code. + +Currently the test script only supports a single version upgrade (eg. +from 0.5.0 to 0.6.0). diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/upgrade/docker-compose.yaml new file mode 100644 index 000000000000..ae527593dcd4 --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/upgrade/docker-compose.yaml @@ -0,0 +1,127 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +version: "3.4" + +# reusable fragments (see https://docs.docker.com/compose/compose-file/#extension-fields) +x-common-config: + &common-config + env_file: + - docker-config + image: ${OZONE_IMAGE} + +x-replication: + &replication + OZONE-SITE.XML_ozone.replication: ${OZONE_REPLICATION_FACTOR:-1} + +x-datanode: + &datanode + command: ["ozone","datanode"] + <<: *common-config + environment: + <<: *replication + ports: + - 9864 + - 9882 + +services: + dn1: + <<: *datanode + networks: + net: + ipv4_address: 10.9.0.11 + volumes: + - ${OZONE_VOLUME}/dn1:/data + - ../..:${OZONE_DIR} + dn2: + <<: *datanode + networks: + net: + ipv4_address: 10.9.0.12 + volumes: + - ${OZONE_VOLUME}/dn2:/data + - ../..:${OZONE_DIR} + dn3: + <<: *datanode + networks: + net: + ipv4_address: 10.9.0.13 + volumes: + - ${OZONE_VOLUME}/dn3:/data + - ../..:${OZONE_DIR} + om: + command: ["ozone","om"] + <<: *common-config + environment: + ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION + <<: *replication + networks: + net: + ipv4_address: 10.9.0.14 + ports: + - 9874:9874 + - 9862:9862 + volumes: + - ${OZONE_VOLUME}/om:/data + - ../..:${OZONE_DIR} + recon: + command: ["ozone","recon"] + <<: *common-config + environment: + <<: *replication + networks: + net: + ipv4_address: 10.9.0.15 + ports: + - 9888:9888 + volumes: + - ${OZONE_VOLUME}/recon:/data + - ../..:${OZONE_DIR} + s3g: + command: ["ozone","s3g"] + <<: *common-config + environment: + <<: *replication + networks: + net: + ipv4_address: 10.9.0.16 + ports: + - 9878:9878 + volumes: + - ${OZONE_VOLUME}/s3g:/data + - ../..:${OZONE_DIR} + scm: + command: ["ozone","scm"] + <<: *common-config + environment: + ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION + OZONE-SITE.XML_hdds.scm.safemode.min.datanode: ${OZONE_SAFEMODE_MIN_DATANODES:-1} + <<: *replication + networks: + net: + ipv4_address: 10.9.0.17 + ports: + - 9876:9876 + volumes: + - ${OZONE_VOLUME}/scm:/data + - ../..:${OZONE_DIR} + +networks: + net: + driver: bridge + ipam: + config: + - subnet: 10.9.0.0/16 diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/docker-config b/hadoop-ozone/dist/src/main/compose/upgrade/docker-config new file mode 100644 index 000000000000..ecc994b4ac26 --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/upgrade/docker-config @@ -0,0 +1,33 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +CORE-SITE.XML_fs.ofs.impl=org.apache.hadoop.fs.ozone.RootedOzoneFileSystem +CORE-SITE.XML_fs.o3fs.impl=org.apache.hadoop.fs.ozone.OzoneFileSystem +OZONE-SITE.XML_ozone.om.address=om +OZONE-SITE.XML_ozone.om.http-address=om:9874 +OZONE-SITE.XML_ozone.scm.container.size=1GB +OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 +OZONE-SITE.XML_ozone.scm.names=scm +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.block.client.address=scm +OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata +OZONE-SITE.XML_ozone.recon.db.dir=/data/metadata/recon +OZONE-SITE.XML_ozone.scm.client.address=scm +OZONE-SITE.XML_hdds.datanode.dir=/data/hdds +OZONE-SITE.XML_ozone.recon.address=recon:9891 +OZONE-SITE.XML_recon.om.snapshot.task.interval.delay=1m + +no_proxy=om,scm,s3g,kdc,localhost,127.0.0.1 diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/test.sh b/hadoop-ozone/dist/src/main/compose/upgrade/test.sh new file mode 100644 index 000000000000..0babd17676d5 --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/upgrade/test.sh @@ -0,0 +1,70 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +export COMPOSE_DIR + +: "${OZONE_REPLICATION_FACTOR:=3}" +: "${OZONE_UPGRADE_FROM:="0.5.0"}" +: "${OZONE_UPGRADE_TO:="0.6.0"}" +: "${OZONE_VOLUME:="${COMPOSE_DIR}/data"}" + +export OZONE_VOLUME + +mkdir -p "${OZONE_VOLUME}"/{dn1,dn2,dn3,om,recon,s3g,scm} +mkdir -p "${OZONE_VOLUME}/debug" + +if [[ -n "${OZONE_VOLUME_OWNER}" ]]; then + current_user=$(whoami) + if [[ "${OZONE_VOLUME_OWNER}" != "${current_user}" ]]; then + chown -R "${OZONE_VOLUME_OWNER}" "${OZONE_VOLUME}" \ + || sudo chown -R "${OZONE_VOLUME_OWNER}" "${OZONE_VOLUME}" + fi +fi + +# define version-specifics +export OZONE_DIR=/opt/ozone +export OZONE_IMAGE="apache/ozone:${OZONE_UPGRADE_FROM}" +# shellcheck source=/dev/null +source "${COMPOSE_DIR}/versions/ozone-${OZONE_UPGRADE_FROM}.sh" +# shellcheck source=/dev/null +source "${COMPOSE_DIR}/../testlib.sh" + +# prepare pre-upgrade cluster +start_docker_env +execute_robot_test scm topology/loaddata.robot +stop_docker_env + +# run upgrade scripts +SCRIPT_DIR=../../libexec/upgrade +[[ -f "${SCRIPT_DIR}/${OZONE_UPGRADE_TO}.sh" ]] && "${SCRIPT_DIR}/${OZONE_UPGRADE_TO}.sh" + +# update version-specifics +export OZONE_DIR=/opt/hadoop +unset OZONE_IMAGE # use apache/ozone-runner defined in docker-compose.yaml +# shellcheck source=/dev/null +source "${COMPOSE_DIR}/versions/ozone-${OZONE_UPGRADE_TO}.sh" +# shellcheck source=/dev/null +source "${COMPOSE_DIR}/../testlib.sh" + +# re-start cluster with new version and check after upgrade +export OZONE_KEEP_RESULTS=true +start_docker_env +execute_robot_test scm topology/readdata.robot +stop_docker_env + +generate_report diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/versions/README.md b/hadoop-ozone/dist/src/main/compose/upgrade/versions/README.md new file mode 100644 index 000000000000..c662c2f286be --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/upgrade/versions/README.md @@ -0,0 +1,15 @@ + + +The scripts in this directory define version-specific behavior required for [`testlib.sh`](../../testlib.sh). For example the `ozone admin` command was renamed from `ozone scmcli` in 0.6.0. diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/versions/ozone-0.5.0.sh b/hadoop-ozone/dist/src/main/compose/upgrade/versions/ozone-0.5.0.sh new file mode 100644 index 000000000000..68f6c75764df --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/upgrade/versions/ozone-0.5.0.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +export OZONE_ADMIN_COMMAND=scmcli diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/versions/ozone-0.6.0.sh b/hadoop-ozone/dist/src/main/compose/upgrade/versions/ozone-0.6.0.sh new file mode 100644 index 000000000000..bab1dba91baf --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/upgrade/versions/ozone-0.6.0.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +export OZONE_ADMIN_COMMAND=admin diff --git a/hadoop-ozone/dist/src/shell/upgrade/0.6.0.sh b/hadoop-ozone/dist/src/shell/upgrade/0.6.0.sh new file mode 100755 index 000000000000..58c78dbdfb64 --- /dev/null +++ b/hadoop-ozone/dist/src/shell/upgrade/0.6.0.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" + +: "${SCM_DIR:="${OZONE_VOLUME}/scm"}" +: "${OZONE_RUNNER_VERSION:="20200625-1"}" + +docker run --rm -v "${SCM_DIR}":/scm -v "${SCRIPT_DIR}/0.6.0":/upgrade -w /scm/metadata apache/ozone-runner:"${OZONE_RUNNER_VERSION}" /upgrade/01-migrate-scm-db.sh diff --git a/hadoop-ozone/dist/src/shell/upgrade/0.6.0/01-migrate-scm-db.sh b/hadoop-ozone/dist/src/shell/upgrade/0.6.0/01-migrate-scm-db.sh new file mode 100755 index 000000000000..dee676841972 --- /dev/null +++ b/hadoop-ozone/dist/src/shell/upgrade/0.6.0/01-migrate-scm-db.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +echo Running upgrade script for HDDS-3499 + +ldb --db=scm.db create_column_family containers +ldb --db=scm.db create_column_family pipelines + +ldb --db=scm-container.db --key_hex --value_hex dump | ldb --db=scm.db --key_hex --value_hex --column_family=containers load +ldb --db=scm-pipeline.db --key_hex --value_hex dump | ldb --db=scm.db --key_hex --value_hex --column_family=pipelines load From 3050f0bda4ee08d4aa29816a55c5b13a25e3daa6 Mon Sep 17 00:00:00 2001 From: avijayanhwx <14299376+avijayanhwx@users.noreply.github.com> Date: Fri, 17 Jul 2020 11:51:44 -0700 Subject: [PATCH 037/165] HDDS-3965. SCM failed to start up for duplicated pipeline detected. (#1210) --- .../hdds/utils/db/RDBStoreIterator.java | 40 ++-- .../hdds/utils/db/TestRDBStoreIterator.java | 10 +- .../hdds/utils/db/TestRDBTableStore.java | 61 ++++++ .../hdds/scm/pipeline/SCMPipelineManager.java | 6 + .../scm/pipeline/TestSCMPipelineManager.java | 80 ++++++++ ...CMStoreImplWithOldPipelineIDKeyFormat.java | 180 ++++++++++++++++++ 6 files changed, 360 insertions(+), 17 deletions(-) create mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMStoreImplWithOldPipelineIDKeyFormat.java diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreIterator.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreIterator.java index 5902486ec6ee..ffe5f96baa79 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreIterator.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreIterator.java @@ -24,6 +24,8 @@ import java.util.function.Consumer; import org.rocksdb.RocksIterator; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * RocksDB store iterator. @@ -31,12 +33,16 @@ public class RDBStoreIterator implements TableIterator { + private static final Logger LOG = + LoggerFactory.getLogger(RDBStoreIterator.class); + private RocksIterator rocksDBIterator; private RDBTable rocksDBTable; + private ByteArrayKeyValue currentEntry; public RDBStoreIterator(RocksIterator iterator) { this.rocksDBIterator = iterator; - rocksDBIterator.seekToFirst(); + seekToFirst(); } public RDBStoreIterator(RocksIterator iterator, RDBTable table) { @@ -52,6 +58,15 @@ public void forEachRemaining( } } + private void setCurrentEntry() { + if (rocksDBIterator.isValid()) { + currentEntry = ByteArrayKeyValue.create(rocksDBIterator.key(), + rocksDBIterator.value()); + } else { + currentEntry = null; + } + } + @Override public boolean hasNext() { return rocksDBIterator.isValid(); @@ -59,12 +74,10 @@ public boolean hasNext() { @Override public ByteArrayKeyValue next() { - if (rocksDBIterator.isValid()) { - ByteArrayKeyValue value = - ByteArrayKeyValue.create(rocksDBIterator.key(), rocksDBIterator - .value()); + setCurrentEntry(); + if (currentEntry != null) { rocksDBIterator.next(); - return value; + return currentEntry; } throw new NoSuchElementException("RocksDB Store has no more elements"); } @@ -72,21 +85,20 @@ public ByteArrayKeyValue next() { @Override public void seekToFirst() { rocksDBIterator.seekToFirst(); + setCurrentEntry(); } @Override public void seekToLast() { rocksDBIterator.seekToLast(); + setCurrentEntry(); } @Override public ByteArrayKeyValue seek(byte[] key) { rocksDBIterator.seek(key); - if (rocksDBIterator.isValid()) { - return ByteArrayKeyValue.create(rocksDBIterator.key(), - rocksDBIterator.value()); - } - return null; + setCurrentEntry(); + return currentEntry; } @Override @@ -111,8 +123,10 @@ public void removeFromDB() throws IOException { if (rocksDBTable == null) { throw new UnsupportedOperationException("remove"); } - if (rocksDBIterator.isValid()) { - rocksDBTable.delete(rocksDBIterator.key()); + if (currentEntry != null) { + rocksDBTable.delete(currentEntry.getKey()); + } else { + LOG.info("Unable to delete currentEntry as it does not exist."); } } diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreIterator.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreIterator.java index 6e85977843ac..fcb7dd2175a2 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreIterator.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreIterator.java @@ -58,12 +58,14 @@ public void setup() { @Test public void testForeachRemainingCallsConsumerWithAllElements() { when(rocksDBIteratorMock.isValid()) - .thenReturn(true, true, true, true, true, true, false); + .thenReturn(true, true, true, true, true, true, true, false); when(rocksDBIteratorMock.key()) - .thenReturn(new byte[]{0x00}, new byte[]{0x01}, new byte[]{0x02}) + .thenReturn(new byte[]{0x00}, new byte[]{0x00}, new byte[]{0x01}, + new byte[]{0x02}) .thenThrow(new NoSuchElementException()); when(rocksDBIteratorMock.value()) - .thenReturn(new byte[]{0x7f}, new byte[]{0x7e}, new byte[]{0x7d}) + .thenReturn(new byte[]{0x7f}, new byte[]{0x7f}, new byte[]{0x7e}, + new byte[]{0x7d}) .thenThrow(new NoSuchElementException()); @@ -91,7 +93,7 @@ public void testForeachRemainingCallsConsumerWithAllElements() { @Test public void testHasNextDependsOnIsvalid(){ - when(rocksDBIteratorMock.isValid()).thenReturn(true, false); + when(rocksDBIteratorMock.isValid()).thenReturn(true, true, false); RDBStoreIterator iter = new RDBStoreIterator(rocksDBIteratorMock); diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java index 00d05a14045c..5d007630e54e 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java @@ -364,4 +364,65 @@ public void testCountEstimatedRowsInTable() throws Exception { Assert.assertTrue(keyCount > 0 && keyCount <= numKeys); } } + + @Test + public void testIteratorRemoveFromDB() throws Exception { + + // Remove without next removes first entry. + try (Table testTable = rdbStore.getTable("Fifth")) { + writeToTable(testTable, 3); + TableIterator> iterator = + testTable.iterator(); + iterator.removeFromDB(); + Assert.assertNull(testTable.get("1".getBytes(StandardCharsets.UTF_8))); + Assert.assertNotNull(testTable.get("2".getBytes(StandardCharsets.UTF_8))); + Assert.assertNotNull(testTable.get("3".getBytes(StandardCharsets.UTF_8))); + } + + // Remove after seekToLast removes lastEntry + try (Table testTable = rdbStore.getTable("Sixth")) { + writeToTable(testTable, 3); + TableIterator> iterator = + testTable.iterator(); + iterator.seekToLast(); + iterator.removeFromDB(); + Assert.assertNotNull(testTable.get("1".getBytes(StandardCharsets.UTF_8))); + Assert.assertNotNull(testTable.get("2".getBytes(StandardCharsets.UTF_8))); + Assert.assertNull(testTable.get("3".getBytes(StandardCharsets.UTF_8))); + } + + // Remove after seek deletes that entry. + try (Table testTable = rdbStore.getTable("Sixth")) { + writeToTable(testTable, 3); + TableIterator> iterator = + testTable.iterator(); + iterator.seek("3".getBytes(StandardCharsets.UTF_8)); + iterator.removeFromDB(); + Assert.assertNotNull(testTable.get("1".getBytes(StandardCharsets.UTF_8))); + Assert.assertNotNull(testTable.get("2".getBytes(StandardCharsets.UTF_8))); + Assert.assertNull(testTable.get("3".getBytes(StandardCharsets.UTF_8))); + } + + // Remove after next() deletes entry that was returned by next. + try (Table testTable = rdbStore.getTable("Sixth")) { + writeToTable(testTable, 3); + TableIterator> iterator = + testTable.iterator(); + iterator.seek("2".getBytes(StandardCharsets.UTF_8)); + iterator.next(); + iterator.removeFromDB(); + Assert.assertNotNull(testTable.get("1".getBytes(StandardCharsets.UTF_8))); + Assert.assertNull(testTable.get("2".getBytes(StandardCharsets.UTF_8))); + Assert.assertNotNull(testTable.get("3".getBytes(StandardCharsets.UTF_8))); + } + } + + private void writeToTable(Table testTable, int num) throws IOException { + for (int i = 1; i <= num; i++) { + byte[] key = (i + "").getBytes(StandardCharsets.UTF_8); + byte[] value = + RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); + testTable.put(key, value); + } + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java index fda937134c1c..6fce895185f7 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java @@ -209,6 +209,7 @@ private void checkKeyAndReplaceIfObsolete( ) { if (!pipelineID.equals(pipeline.getId())) { try { + LOG.info("Found pipeline in old format key : {}", pipeline.getId()); it.removeFromDB(); pipelineStore.put(pipeline.getId(), pipeline); } catch (IOException e) { @@ -701,4 +702,9 @@ public void onMessage(SafeModeStatus status, startPipelineCreator(); } } + + @VisibleForTesting + protected static Logger getLog() { + return LOG; + } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java index fc8f61a7dbf1..62289b95a9a3 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java @@ -22,10 +22,13 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.NoSuchElementException; import java.util.Set; +import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.stream.Collectors; @@ -56,12 +59,15 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT; import static org.apache.hadoop.test.MetricsAsserts.getLongCounter; import static org.apache.hadoop.test.MetricsAsserts.getMetrics; + +import org.apache.hadoop.test.GenericTestUtils.LogCapturer; import org.junit.After; import org.junit.Assert; import static org.junit.Assert.assertEquals; import static org.junit.Assert.fail; import org.junit.Before; import org.junit.Test; +import org.junit.rules.TemporaryFolder; import org.mockito.InOrder; import static org.mockito.Mockito.doReturn; @@ -617,6 +623,80 @@ public void testPipelineDBKeyFormatChange() throws Exception { verify(pipelineStore, never()).put(p2.getId(), p2); } + @Test + public void testScmWithPipelineDBKeyFormatChange() throws Exception { + TemporaryFolder tempDir = new TemporaryFolder(); + tempDir.create(); + File dir = tempDir.newFolder(); + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.getAbsolutePath()); + + SCMMetadataStore scmDbWithOldKeyFormat = null; + Map oldPipelines = new HashMap<>(); + try { + scmDbWithOldKeyFormat = + new TestSCMStoreImplWithOldPipelineIDKeyFormat(conf); + // Create 3 pipelines. + for (int i = 0; i < 3; i++) { + Pipeline pipeline = pipelineStub(); + scmDbWithOldKeyFormat.getPipelineTable() + .put(pipeline.getId(), pipeline); + oldPipelines.put(pipeline.getId().getId(), pipeline); + } + } finally { + if (scmDbWithOldKeyFormat != null) { + scmDbWithOldKeyFormat.stop(); + } + } + + LogCapturer logCapturer = + LogCapturer.captureLogs(SCMPipelineManager.getLog()); + + // Create SCMPipelineManager with new DBDefinition. + SCMMetadataStore newScmMetadataStore = null; + try { + newScmMetadataStore = new SCMMetadataStoreImpl(conf); + SCMPipelineManager pipelineManager = new SCMPipelineManager(conf, + nodeManager, + newScmMetadataStore.getPipelineTable(), + new EventQueue()); + + waitForLog(logCapturer); + assertEquals(3, pipelineManager.getPipelines().size()); + oldPipelines.values().forEach(p -> + pipelineManager.containsPipeline(p.getId())); + } finally { + newScmMetadataStore.stop(); + } + + // Mimicking another restart. + try { + logCapturer.clearOutput(); + newScmMetadataStore = new SCMMetadataStoreImpl(conf); + SCMPipelineManager pipelineManager = new SCMPipelineManager(conf, + nodeManager, + newScmMetadataStore.getPipelineTable(), + new EventQueue()); + try { + waitForLog(logCapturer); + Assert.fail("Unexpected log: " + logCapturer.getOutput()); + } catch (TimeoutException ex) { + Assert.assertTrue(ex.getMessage().contains("Timed out")); + } + assertEquals(3, pipelineManager.getPipelines().size()); + oldPipelines.values().forEach(p -> + pipelineManager.containsPipeline(p.getId())); + } finally { + newScmMetadataStore.stop(); + } + } + + private static void waitForLog(LogCapturer logCapturer) + throws TimeoutException, InterruptedException { + GenericTestUtils.waitFor(() -> logCapturer.getOutput() + .contains("Found pipeline in old format key"), + 1000, 5000); + } + private Pipeline pipelineStub() { return Pipeline.newBuilder() .setId(PipelineID.randomId()) diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMStoreImplWithOldPipelineIDKeyFormat.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMStoreImplWithOldPipelineIDKeyFormat.java new file mode 100644 index 000000000000..a04ecea75041 --- /dev/null +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMStoreImplWithOldPipelineIDKeyFormat.java @@ -0,0 +1,180 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.pipeline; + +import static org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition.PIPELINES; + +import java.io.IOException; +import java.math.BigInteger; +import java.security.cert.X509Certificate; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.apache.hadoop.hdds.scm.metadata.PipelineCodec; +import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore; +import org.apache.hadoop.hdds.security.x509.certificate.authority.CertificateStore; +import org.apache.hadoop.hdds.utils.db.BatchOperationHandler; +import org.apache.hadoop.hdds.utils.db.Codec; +import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition; +import org.apache.hadoop.hdds.utils.db.DBDefinition; +import org.apache.hadoop.hdds.utils.db.DBStore; +import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; + +/** + * Test SCM Metadata Store that has ONLY the pipeline table whose key uses the + * old codec format. + */ +public class TestSCMStoreImplWithOldPipelineIDKeyFormat + implements SCMMetadataStore { + + private DBStore store; + private final OzoneConfiguration configuration; + private Table pipelineTable; + + public TestSCMStoreImplWithOldPipelineIDKeyFormat( + OzoneConfiguration config) throws IOException { + this.configuration = config; + start(configuration); + } + + @Override + public void start(OzoneConfiguration config) + throws IOException { + if (this.store == null) { + this.store = DBStoreBuilder.createDBStore(config, + new SCMDBTestDefinition()); + pipelineTable = PIPELINES.getTable(store); + } + } + + @Override + public void stop() throws Exception { + if (store != null) { + store.close(); + store = null; + } + } + + @Override + public DBStore getStore() { + return null; + } + + @Override + public Table getDeletedBlocksTXTable() { + return null; + } + + @Override + public Long getCurrentTXID() { + return null; + } + + @Override + public Long getNextDeleteBlockTXID() { + return null; + } + + @Override + public Table getValidCertsTable() { + return null; + } + + @Override + public Table getRevokedCertsTable() { + return null; + } + + @Override + public TableIterator getAllCerts(CertificateStore.CertType certType) { + return null; + } + + @Override + public Table getPipelineTable() { + return pipelineTable; + } + + @Override + public BatchOperationHandler getBatchHandler() { + return null; + } + + @Override + public Table getContainerTable() { + return null; + } + + /** + * Test SCM DB Definition for the above class. + */ + public static class SCMDBTestDefinition implements DBDefinition { + + public static final DBColumnFamilyDefinition + PIPELINES = + new DBColumnFamilyDefinition<>( + "pipelines", + PipelineID.class, + new OldPipelineIDCodec(), + Pipeline.class, + new PipelineCodec()); + + @Override + public String getName() { + return "scm.db"; + } + + @Override + public String getLocationConfigKey() { + return ScmConfigKeys.OZONE_SCM_DB_DIRS; + } + + @Override + public DBColumnFamilyDefinition[] getColumnFamilies() { + return new DBColumnFamilyDefinition[] {PIPELINES}; + } + } + + /** + * Old Pipeline ID codec that relies on protobuf serialization. + */ + public static class OldPipelineIDCodec implements Codec { + @Override + public byte[] toPersistedFormat(PipelineID object) throws IOException { + return object.getProtobuf().toByteArray(); + } + + @Override + public PipelineID fromPersistedFormat(byte[] rawData) throws IOException { + return null; + } + + @Override + public PipelineID copyObject(PipelineID object) { + throw new UnsupportedOperationException(); + } + } + +} + From 3b930ab199022ebed60f5c250d246363889a5c0f Mon Sep 17 00:00:00 2001 From: Hanisha Koneru Date: Fri, 17 Jul 2020 13:54:08 -0700 Subject: [PATCH 038/165] HDDS-3741. Reload old OM state if Install Snapshot from Leader fails (#1129) --- .../org/apache/hadoop/ozone/OzoneConsts.java | 3 + .../apache/hadoop/ozone/util/ExitManager.java | 33 +++ .../hadoop/ozone/om/TestOMRatisSnapshots.java | 215 ++++++++++++++---- .../TestOzoneManagerSnapshotProvider.java | 26 +-- .../ozone/om/OmMetadataManagerImpl.java | 23 +- .../apache/hadoop/ozone/om/OzoneManager.java | 176 ++++++++------ .../ozone/om/ratis/OMTransactionInfo.java | 21 +- .../om/ratis/OzoneManagerStateMachine.java | 6 +- .../ratis/utils/OzoneManagerRatisUtils.java | 37 ++- .../OzoneManagerSnapshotProvider.java | 10 +- .../ozone/om/TestOmMetadataManager.java | 2 +- ...eManagerDoubleBufferWithDummyResponse.java | 2 +- ...zoneManagerDoubleBufferWithOMResponse.java | 2 +- 13 files changed, 399 insertions(+), 157 deletions(-) create mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/ExitManager.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index e340b3231491..4b380948abd9 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -366,4 +366,7 @@ private OzoneConsts() { public static final String CONTAINER_DB_TYPE_ROCKSDB = "RocksDB"; public static final String CONTAINER_DB_TYPE_LEVELDB = "LevelDB"; + + // An on-disk transient marker file used when replacing DB with checkpoint + public static final String DB_TRANSIENT_MARKER = "dbInconsistentMarker"; } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/ExitManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/ExitManager.java new file mode 100644 index 000000000000..4a83c1d8c239 --- /dev/null +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/ExitManager.java @@ -0,0 +1,33 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.util; + +import org.apache.ratis.util.ExitUtils; +import org.slf4j.Logger; + +/** + * An Exit Manager used to shutdown service in case of unrecoverable error. + * This class will be helpful to test exit functionality. + */ +public class ExitManager { + + public void exitSystem(int status, String message, Throwable throwable, + Logger log) { + ExitUtils.terminate(1, message, throwable, log); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java index 0cfbea4ef9c2..ef08abd89096 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java @@ -18,10 +18,12 @@ import java.io.File; import java.io.IOException; +import java.nio.file.Path; import java.util.ArrayList; import java.util.List; import java.util.UUID; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.db.DBCheckpoint; import org.apache.hadoop.ozone.MiniOzoneCluster; @@ -33,10 +35,14 @@ import org.apache.hadoop.ozone.client.VolumeArgs; import org.apache.hadoop.ozone.om.ratis.OMTransactionInfo; import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; +import org.apache.hadoop.ozone.util.ExitManager; +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.ratis.server.protocol.TermIndex; -import org.apache.commons.lang3.RandomStringUtils; import static org.apache.hadoop.ozone.om.TestOzoneManagerHAWithData.createKey; -import org.apache.ratis.server.protocol.TermIndex; +import static org.junit.Assert.assertTrue; + import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -45,6 +51,8 @@ import org.junit.Test; import org.junit.rules.ExpectedException; import org.junit.rules.Timeout; +import org.slf4j.Logger; +import org.slf4j.event.Level; /** * Tests the Ratis snaphsots feature in OM. @@ -59,6 +67,10 @@ public class TestOMRatisSnapshots { private String scmId; private String omServiceId; private int numOfOMs = 3; + private OzoneBucket ozoneBucket; + private String volumeName; + private String bucketName; + private static final long SNAPSHOT_THRESHOLD = 50; private static final int LOG_PURGE_GAP = 50; @@ -95,6 +107,20 @@ public void init() throws Exception { cluster.waitForClusterToBeReady(); objectStore = OzoneClientFactory.getRpcClient(omServiceId, conf) .getObjectStore(); + + volumeName = "volume" + RandomStringUtils.randomNumeric(5); + bucketName = "bucket" + RandomStringUtils.randomNumeric(5); + + VolumeArgs createVolumeArgs = VolumeArgs.newBuilder() + .setOwner("user" + RandomStringUtils.randomNumeric(5)) + .setAdmin("admin" + RandomStringUtils.randomNumeric(5)) + .build(); + + objectStore.createVolume(volumeName, createVolumeArgs); + OzoneVolume retVolumeinfo = objectStore.getVolume(volumeName); + + retVolumeinfo.createBucket(bucketName); + ozoneBucket = retVolumeinfo.getBucket(bucketName); } /** @@ -125,37 +151,13 @@ public void testInstallSnapshot() throws Exception { OzoneManager followerOM = cluster.getOzoneManager(followerNodeId); // Do some transactions so that the log index increases - String userName = "user" + RandomStringUtils.randomNumeric(5); - String adminName = "admin" + RandomStringUtils.randomNumeric(5); - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); - - VolumeArgs createVolumeArgs = VolumeArgs.newBuilder() - .setOwner(userName) - .setAdmin(adminName) - .build(); - - objectStore.createVolume(volumeName, createVolumeArgs); - OzoneVolume retVolumeinfo = objectStore.getVolume(volumeName); - - retVolumeinfo.createBucket(bucketName); - OzoneBucket ozoneBucket = retVolumeinfo.getBucket(bucketName); - - long leaderOMappliedLogIndex = - leaderRatisServer.getLastAppliedTermIndex().getIndex(); - - List keys = new ArrayList<>(); - while (leaderOMappliedLogIndex < 2000) { - keys.add(createKey(ozoneBucket)); - leaderOMappliedLogIndex = - leaderRatisServer.getLastAppliedTermIndex().getIndex(); - } + List keys = writeKeysToIncreaseLogIndex(leaderRatisServer, 200); // Get the latest db checkpoint from the leader OM. OMTransactionInfo omTransactionInfo = OMTransactionInfo.readTransactionInfo(leaderOM.getMetadataManager()); TermIndex leaderOMTermIndex = - TermIndex.newTermIndex(omTransactionInfo.getCurrentTerm(), + TermIndex.newTermIndex(omTransactionInfo.getTerm(), omTransactionInfo.getTransactionIndex()); long leaderOMSnaphsotIndex = leaderOMTermIndex.getIndex(); long leaderOMSnapshotTermIndex = leaderOMTermIndex.getTerm(); @@ -169,30 +171,20 @@ public void testInstallSnapshot() throws Exception { // The recently started OM should be lagging behind the leader OM. long followerOMLastAppliedIndex = followerOM.getOmRatisServer().getLastAppliedTermIndex().getIndex(); - Assert.assertTrue( + assertTrue( followerOMLastAppliedIndex < leaderOMSnaphsotIndex); // Install leader OM's db checkpoint on the lagging OM. - File oldDbLocation = followerOM.getMetadataManager().getStore() - .getDbLocation(); - followerOM.getOmRatisServer().getOmStateMachine().pause(); - followerOM.getMetadataManager().getStore().close(); - followerOM.replaceOMDBWithCheckpoint(leaderOMSnaphsotIndex, oldDbLocation, - leaderDbCheckpoint.getCheckpointLocation()); - - // Reload the follower OM with new DB checkpoint from the leader OM. - followerOM.reloadOMState(leaderOMSnaphsotIndex, leaderOMSnapshotTermIndex); - followerOM.getOmRatisServer().getOmStateMachine().unpause( - leaderOMSnaphsotIndex, leaderOMSnapshotTermIndex); - - // After the new checkpoint is loaded and state machine is unpaused, the - // follower OM lastAppliedIndex must match the snapshot index of the - // checkpoint. + followerOM.installCheckpoint(leaderOMNodeId, leaderDbCheckpoint); + + // After the new checkpoint is installed, the follower OM + // lastAppliedIndex must >= the snapshot index of the checkpoint. It + // could be great than snapshot index if there is any conf entry from ratis. followerOMLastAppliedIndex = followerOM.getOmRatisServer() .getLastAppliedTermIndex().getIndex(); - Assert.assertEquals(leaderOMSnaphsotIndex, followerOMLastAppliedIndex); - Assert.assertEquals(leaderOMSnapshotTermIndex, - followerOM.getOmRatisServer().getLastAppliedTermIndex().getTerm()); + assertTrue(followerOMLastAppliedIndex >= leaderOMSnaphsotIndex); + assertTrue(followerOM.getOmRatisServer().getLastAppliedTermIndex() + .getTerm() >= leaderOMSnapshotTermIndex); // Verify that the follower OM's DB contains the transactions which were // made while it was inactive. @@ -206,4 +198,133 @@ public void testInstallSnapshot() throws Exception { followerOMMetaMngr.getOzoneKey(volumeName, bucketName, key))); } } + + @Test + public void testInstallOldCheckpointFailure() throws Exception { + // Get the leader OM + String leaderOMNodeId = OmFailoverProxyUtil + .getFailoverProxyProvider(objectStore.getClientProxy()) + .getCurrentProxyOMNodeId(); + + OzoneManager leaderOM = cluster.getOzoneManager(leaderOMNodeId); + + // Find the inactive OM and start it + String followerNodeId = leaderOM.getPeerNodes().get(0).getOMNodeId(); + if (cluster.isOMActive(followerNodeId)) { + followerNodeId = leaderOM.getPeerNodes().get(1).getOMNodeId(); + } + cluster.startInactiveOM(followerNodeId); + + OzoneManager followerOM = cluster.getOzoneManager(followerNodeId); + OzoneManagerRatisServer followerRatisServer = followerOM.getOmRatisServer(); + + // Do some transactions so that the log index increases on follower OM + writeKeysToIncreaseLogIndex(followerRatisServer, 100); + + TermIndex leaderCheckpointTermIndex = leaderOM.getOmRatisServer() + .getLastAppliedTermIndex(); + DBCheckpoint leaderDbCheckpoint = leaderOM.getMetadataManager().getStore() + .getCheckpoint(false); + + // Do some more transactions to increase the log index further on + // follower OM such that it is more than the checkpoint index taken on + // leader OM. + writeKeysToIncreaseLogIndex(followerOM.getOmRatisServer(), + leaderCheckpointTermIndex.getIndex() + 100); + + GenericTestUtils.setLogLevel(OzoneManager.LOG, Level.INFO); + GenericTestUtils.LogCapturer logCapture = + GenericTestUtils.LogCapturer.captureLogs(OzoneManager.LOG); + + // Install the old checkpoint on the follower OM. This should fail as the + // followerOM is already ahead of that transactionLogIndex and the OM + // state should be reloaded. + TermIndex followerTermIndex = followerRatisServer.getLastAppliedTermIndex(); + TermIndex newTermIndex = followerOM.installCheckpoint( + leaderOMNodeId, leaderDbCheckpoint); + + String errorMsg = "Cannot proceed with InstallSnapshot as OM is at " + + "TermIndex " + followerTermIndex + " and checkpoint has lower " + + "TermIndex"; + Assert.assertTrue(logCapture.getOutput().contains(errorMsg)); + Assert.assertNull("OM installed checkpoint even though checkpoint " + + "logIndex is less than it's lastAppliedIndex", newTermIndex); + Assert.assertEquals(followerTermIndex, + followerRatisServer.getLastAppliedTermIndex()); + } + + @Test + public void testInstallCorruptedCheckpointFailure() throws Exception { + // Get the leader OM + String leaderOMNodeId = OmFailoverProxyUtil + .getFailoverProxyProvider(objectStore.getClientProxy()) + .getCurrentProxyOMNodeId(); + + OzoneManager leaderOM = cluster.getOzoneManager(leaderOMNodeId); + OzoneManagerRatisServer leaderRatisServer = leaderOM.getOmRatisServer(); + + // Find the inactive OM + String followerNodeId = leaderOM.getPeerNodes().get(0).getOMNodeId(); + if (cluster.isOMActive(followerNodeId)) { + followerNodeId = leaderOM.getPeerNodes().get(1).getOMNodeId(); + } + OzoneManager followerOM = cluster.getOzoneManager(followerNodeId); + OzoneManagerRatisServer followerRatisServer = followerOM.getOmRatisServer(); + + // Do some transactions so that the log index increases + writeKeysToIncreaseLogIndex(leaderRatisServer, 100); + + DBCheckpoint leaderDbCheckpoint = leaderOM.getMetadataManager().getStore() + .getCheckpoint(false); + Path leaderCheckpointLocation = leaderDbCheckpoint.getCheckpointLocation(); + OMTransactionInfo leaderCheckpointTrxnInfo = OzoneManagerRatisUtils + .getTrxnInfoFromCheckpoint(conf, leaderCheckpointLocation); + + // Corrupt the leader checkpoint and install that on the OM. The + // operation should fail and OM should shutdown. + boolean delete = true; + for (File file : leaderCheckpointLocation.toFile() + .listFiles()) { + if (file.getName().contains(".sst")) { + if (delete) { + file.delete(); + delete = false; + } else { + delete = true; + } + } + } + + GenericTestUtils.setLogLevel(OzoneManager.LOG, Level.ERROR); + GenericTestUtils.LogCapturer logCapture = + GenericTestUtils.LogCapturer.captureLogs(OzoneManager.LOG); + followerOM.setExitManagerForTesting(new DummyExitManager()); + + followerOM.installCheckpoint(leaderOMNodeId, leaderCheckpointLocation, + leaderCheckpointTrxnInfo); + + Assert.assertTrue(logCapture.getOutput().contains("System Exit: " + + "Failed to reload OM state and instantiate services.")); + } + + private List writeKeysToIncreaseLogIndex( + OzoneManagerRatisServer omRatisServer, long targetLogIndex) + throws IOException, InterruptedException { + List keys = new ArrayList<>(); + long logIndex = omRatisServer.getLastAppliedTermIndex().getIndex(); + while (logIndex < targetLogIndex) { + keys.add(createKey(ozoneBucket)); + Thread.sleep(100); + logIndex = omRatisServer.getLastAppliedTermIndex().getIndex(); + } + return keys; + } + + private class DummyExitManager extends ExitManager { + @Override + public void exitSystem(int status, String message, Throwable throwable, + Logger log) { + log.error("System Exit: " + message, throwable); + } + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java index d77f4d9d1341..844c859ac028 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java @@ -20,6 +20,7 @@ import java.util.UUID; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.db.DBCheckpoint; import org.apache.hadoop.ozone.MiniOzoneCluster; @@ -31,11 +32,10 @@ import org.apache.hadoop.ozone.client.VolumeArgs; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OmFailoverProxyUtil; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OzoneManager; - -import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.ozone.om.ratis.OMTransactionInfo; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; + import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -124,7 +124,7 @@ public void testDownloadCheckpoint() throws Exception { .getOzoneManagerDBSnapshot(leaderOMNodeId); long leaderSnapshotIndex = leaderOM.getRatisSnapshotIndex(); - long downloadedSnapshotIndex = getDownloadSnapshotIndex(omSnapshot); + long downloadedSnapshotIndex = getDownloadedSnapshotIndex(omSnapshot); // The snapshot index downloaded from leader OM should match the ratis // snapshot index on the leader OM @@ -133,21 +133,13 @@ public void testDownloadCheckpoint() throws Exception { leaderSnapshotIndex, downloadedSnapshotIndex); } - private long getDownloadSnapshotIndex(DBCheckpoint dbCheckpoint) + private long getDownloadedSnapshotIndex(DBCheckpoint dbCheckpoint) throws Exception { - OzoneConfiguration configuration = new OzoneConfiguration(conf); - configuration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - dbCheckpoint.getCheckpointLocation().getParent().toString()); - - OmMetadataManagerImpl omMetadataManager = - new OmMetadataManagerImpl(configuration); - - long transactionIndex = - OMTransactionInfo.readTransactionInfo(omMetadataManager) - .getTransactionIndex(); - omMetadataManager.stop(); - return transactionIndex; + OMTransactionInfo trxnInfoFromCheckpoint = + OzoneManagerRatisUtils.getTrxnInfoFromCheckpoint(conf, + dbCheckpoint.getCheckpointLocation()); + return trxnInfoFromCheckpoint.getTransactionIndex(); } } \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index d48c6fa9a36f..6c8b50595ca1 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -77,9 +77,11 @@ import org.apache.commons.lang3.StringUtils; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConsts.DB_TRANSIENT_MARKER; import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; +import org.apache.ratis.util.ExitUtils; import org.eclipse.jetty.util.StringUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -249,6 +251,20 @@ public void start(OzoneConfiguration configuration) throws IOException { if (store == null) { File metaDir = OMStorage.getOmDbDir(configuration); + // Check if there is a DB Inconsistent Marker in the metaDir. This + // marker indicates that the DB is in an inconsistent state and hence + // the OM process should be terminated. + File markerFile = new File(metaDir, DB_TRANSIENT_MARKER); + if (markerFile.exists()) { + LOG.error("File {} marks that OM DB is in an inconsistent state."); + // Note - The marker file should be deleted only after fixing the DB. + // In an HA setup, this can be done by replacing this DB with a + // checkpoint from another OM. + String errorMsg = "Cannot load OM DB as it is in an inconsistent " + + "state."; + ExitUtils.terminate(1, errorMsg, LOG); + } + RocksDBConfiguration rocksDBConfiguration = configuration.getObject(RocksDBConfiguration.class); @@ -273,10 +289,15 @@ public void start(OzoneConfiguration configuration) throws IOException { public static DBStore loadDB(OzoneConfiguration configuration, File metaDir) throws IOException { + return loadDB(configuration, metaDir, OM_DB_NAME); + } + + public static DBStore loadDB(OzoneConfiguration configuration, File metaDir, + String dbName) throws IOException { RocksDBConfiguration rocksDBConfiguration = configuration.getObject(RocksDBConfiguration.class); DBStoreBuilder dbStoreBuilder = DBStoreBuilder.newBuilder(configuration, - rocksDBConfiguration).setName(OM_DB_NAME) + rocksDBConfiguration).setName(dbName) .setPath(Paths.get(metaDir.getPath())); DBStore dbStore = addOMTablesAndCodecs(dbStoreBuilder).build(); return dbStore; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index 53808f9e23ae..72da3344ff36 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -155,6 +155,7 @@ import org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType; import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; import org.apache.hadoop.ozone.security.acl.RequestContext; +import org.apache.hadoop.ozone.util.ExitManager; import org.apache.hadoop.ozone.util.OzoneVersionInfo; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; @@ -194,6 +195,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_KEY_PREALLOCATION_BLOCKS_MAX_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConsts.DB_TRANSIENT_MARKER; import static org.apache.hadoop.ozone.OzoneConsts.OM_METRICS_FILE; import static org.apache.hadoop.ozone.OzoneConsts.OM_METRICS_TEMP_FILE; import static org.apache.hadoop.ozone.OzoneConsts.RPC_PORT; @@ -217,6 +219,7 @@ import org.apache.ratis.proto.RaftProtos.RaftPeerRole; import org.apache.ratis.server.protocol.TermIndex; +import org.apache.ratis.util.ExitUtils; import org.apache.ratis.util.FileUtils; import org.apache.ratis.util.LifeCycle; import org.bouncycastle.pkcs.PKCS10CertificationRequest; @@ -308,6 +311,8 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl private boolean isNativeAuthorizerEnabled; + private ExitManager exitManager; + private enum State { INITIALIZED, RUNNING, @@ -3067,51 +3072,52 @@ public List getAcl(OzoneObj obj) throws IOException { /** * Download and install latest checkpoint from leader OM. - * If the download checkpoints snapshot index is greater than this OM's - * last applied transaction index, then re-initialize the OM state via this - * checkpoint. Before re-initializing OM state, the OM Ratis server should - * be stopped so that no new transactions can be applied. * * @param leaderId peerNodeID of the leader OM - * @return If checkpoint is installed, return the corresponding termIndex. - * Otherwise, return null. + * @return If checkpoint is installed successfully, return the + * corresponding termIndex. Otherwise, return null. */ - public TermIndex installSnapshot(String leaderId) { + public TermIndex installSnapshotFromLeader(String leaderId) { if (omSnapshotProvider == null) { LOG.error("OM Snapshot Provider is not configured as there are no peer " + "nodes."); return null; } - DBCheckpoint omDBcheckpoint = getDBCheckpointFromLeader(leaderId); - Path newDBlocation = omDBcheckpoint.getCheckpointLocation(); + DBCheckpoint omDBCheckpoint = getDBCheckpointFromLeader(leaderId); + LOG.info("Downloaded checkpoint from Leader {} to the location {}", + leaderId, omDBCheckpoint.getCheckpointLocation()); - LOG.info("Downloaded checkpoint from Leader {}, in to the location {}", - leaderId, newDBlocation); + TermIndex termIndex = null; + try { + termIndex = installCheckpoint(leaderId, omDBCheckpoint); + } catch (Exception ex) { + LOG.error("Failed to install snapshot from Leader OM: {}", ex); + } + return termIndex; + } - // Check if current ratis log index is smaller than the downloaded - // checkpoint transaction index. If yes, proceed by stopping the ratis - // server so that the OM state can be re-initialized. If no, then do not - // proceed with installSnapshot. + /** + * Install checkpoint. If the checkpoints snapshot index is greater than + * OM's last applied transaction index, then re-initialize the OM + * state via this checkpoint. Before re-initializing OM state, the OM Ratis + * server should be stopped so that no new transactions can be applied. + */ + TermIndex installCheckpoint(String leaderId, DBCheckpoint omDBCheckpoint) + throws Exception { - OMTransactionInfo omTransactionInfo = null; + Path checkpointLocation = omDBCheckpoint.getCheckpointLocation(); + OMTransactionInfo checkpointTrxnInfo = OzoneManagerRatisUtils + .getTrxnInfoFromCheckpoint(configuration, checkpointLocation); - Path dbDir = newDBlocation.getParent(); - if (dbDir == null) { - LOG.error("Incorrect DB location path {} received from checkpoint.", - newDBlocation); - return null; - } + LOG.info("Installing checkpoint with OMTransactionInfo {}", + checkpointTrxnInfo); - try { - omTransactionInfo = - OzoneManagerRatisUtils.getTransactionInfoFromDownloadedSnapshot( - configuration, dbDir); - } catch (Exception ex) { - LOG.error("Failed during opening downloaded snapshot from " + - "{} to obtain transaction index", newDBlocation, ex); - return null; - } + return installCheckpoint(leaderId, checkpointLocation, checkpointTrxnInfo); + } + + TermIndex installCheckpoint(String leaderId, Path checkpointLocation, + OMTransactionInfo checkpointTrxnInfo) throws Exception { File oldDBLocation = metadataManager.getStore().getDbLocation(); try { @@ -3124,58 +3130,74 @@ public TermIndex installSnapshot(String leaderId) { omRatisServer.getOmStateMachine().pause(); } catch (Exception e) { LOG.error("Failed to stop/ pause the services. Cannot proceed with " + - "installing the new checkpoint.", e); - return null; - } - - //TODO: un-pause SM if any failures and retry? - - long lastAppliedIndex = omRatisServer.getLastAppliedTermIndex().getIndex(); - - boolean canProceed = - OzoneManagerRatisUtils.verifyTransactionInfo(omTransactionInfo, - lastAppliedIndex, leaderId, newDBlocation); - - // If downloaded DB has transaction info less than current one, return. - if (!canProceed) { - return null; + "installing the new checkpoint."); + // During stopServices, if KeyManager was stopped successfully and + // OMMetadataManager stop failed, we should restart the KeyManager. + keyManager.start(configuration); + throw e; } - long leaderIndex = omTransactionInfo.getTransactionIndex(); - long leaderTerm = omTransactionInfo.getCurrentTerm(); + File dbBackup = null; + TermIndex termIndex = omRatisServer.getLastAppliedTermIndex(); + long term = termIndex.getTerm(); + long lastAppliedIndex = termIndex.getIndex(); + // Check if current applied log index is smaller than the downloaded + // checkpoint transaction index. If yes, proceed by stopping the ratis + // server so that the OM state can be re-initialized. If no then do not + // proceed with installSnapshot. + boolean canProceed = OzoneManagerRatisUtils.verifyTransactionInfo( + checkpointTrxnInfo, lastAppliedIndex, leaderId, checkpointLocation); - File dbBackup; - try { - dbBackup = replaceOMDBWithCheckpoint(lastAppliedIndex, oldDBLocation, - newDBlocation); - } catch (Exception e) { - LOG.error("OM DB checkpoint replacement with new downloaded checkpoint " + - "failed.", e); - return null; + if (canProceed) { + try { + dbBackup = replaceOMDBWithCheckpoint(lastAppliedIndex, oldDBLocation, + checkpointLocation); + term = checkpointTrxnInfo.getTerm(); + lastAppliedIndex = checkpointTrxnInfo.getTransactionIndex(); + LOG.info("Replaced DB with checkpoint from OM: {}, term: {}, index: {}", + leaderId, term, lastAppliedIndex); + } catch (Exception e) { + LOG.error("Failed to install Snapshot from {} as OM failed to replace" + + " DB with downloaded checkpoint. Reloading old OM state.", e); + } + } else { + LOG.warn("Cannot proceed with InstallSnapshot as OM is at TermIndex {} " + + "and checkpoint has lower TermIndex {}. Reloading old state of OM.", + termIndex, checkpointTrxnInfo.getTermIndex()); } // Reload the OM DB store with the new checkpoint. // Restart (unpause) the state machine and update its last applied index // to the installed checkpoint's snapshot index. try { - reloadOMState(leaderIndex, leaderTerm); - omRatisServer.getOmStateMachine().unpause(leaderIndex, leaderTerm); - } catch (IOException e) { - LOG.error("Failed to reload OM state with new DB checkpoint.", e); - return null; + reloadOMState(lastAppliedIndex, term); + omRatisServer.getOmStateMachine().unpause(lastAppliedIndex, term); + LOG.info("Reloaded OM state with Term: {} and Index: {}", term, + lastAppliedIndex); + } catch (IOException ex) { + String errorMsg = "Failed to reload OM state and instantiate services."; + exitManager.exitSystem(1, errorMsg, ex, LOG); } // Delete the backup DB try { - FileUtils.deleteFully(dbBackup); + if (dbBackup != null) { + FileUtils.deleteFully(dbBackup); + } } catch (IOException e) { LOG.error("Failed to delete the backup of the original DB {}", dbBackup); } + if (lastAppliedIndex != checkpointTrxnInfo.getTransactionIndex()) { + // Install Snapshot failed and old state was reloaded. Return null to + // Ratis to indicate that installation failed. + return null; + } + // TODO: We should only return the snpashotIndex to the leader. // Should be fixed after RATIS-586 - TermIndex newTermIndex = TermIndex.newTermIndex(leaderTerm, leaderIndex); + TermIndex newTermIndex = TermIndex.newTermIndex(term, lastAppliedIndex); return newTermIndex; } @@ -3209,16 +3231,17 @@ void stopServices() throws Exception { * * @param lastAppliedIndex the last applied index in the current OM DB. * @param checkpointPath path to the new DB checkpoint - * @return location of the backup of the original DB + * @return location of backup of the original DB * @throws Exception */ File replaceOMDBWithCheckpoint(long lastAppliedIndex, File oldDB, - Path checkpointPath) throws Exception { + Path checkpointPath) throws IOException { // Take a backup of the current DB String dbBackupName = OzoneConsts.OM_DB_BACKUP_PREFIX + lastAppliedIndex + "_" + System.currentTimeMillis(); - File dbBackup = new File(oldDB.getParentFile(), dbBackupName); + File dbDir = oldDB.getParentFile(); + File dbBackup = new File(dbDir, dbBackupName); try { Files.move(oldDB.toPath(), dbBackup.toPath()); @@ -3229,13 +3252,28 @@ File replaceOMDBWithCheckpoint(long lastAppliedIndex, File oldDB, } // Move the new DB checkpoint into the om metadata dir + Path markerFile = new File(dbDir, DB_TRANSIENT_MARKER).toPath(); try { + // Create a Transient Marker file. This file will be deleted if the + // checkpoint DB is successfully moved to the old DB location or if the + // old DB backup is reset to its location. If not, then the OM DB is in + // an inconsistent state and this marker file will fail OM from + // starting up. + Files.createFile(markerFile); Files.move(checkpointPath, oldDB.toPath()); + Files.deleteIfExists(markerFile); } catch (IOException e) { LOG.error("Failed to move downloaded DB checkpoint {} to metadata " + "directory {}. Resetting to original DB.", checkpointPath, oldDB.toPath()); - Files.move(dbBackup.toPath(), oldDB.toPath()); + try { + Files.move(dbBackup.toPath(), oldDB.toPath()); + Files.deleteIfExists(markerFile); + } catch (IOException ex) { + String errorMsg = "Failed to reset to original DB. OM is in an " + + "inconsistent state."; + ExitUtils.terminate(1, errorMsg, ex, LOG); + } throw e; } return dbBackup; @@ -3453,4 +3491,8 @@ private Pair resolveBucketLink( visited); } + @VisibleForTesting + void setExitManagerForTesting(ExitManager exitManagerForTesting) { + this.exitManager = exitManagerForTesting; + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OMTransactionInfo.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OMTransactionInfo.java index 24417515ef13..28c8c3a91f27 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OMTransactionInfo.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OMTransactionInfo.java @@ -24,6 +24,7 @@ import java.io.IOException; import java.util.Objects; +import org.apache.ratis.server.protocol.TermIndex; import static org.apache.hadoop.ozone.OzoneConsts.TRANSACTION_INFO_KEY; import static org.apache.hadoop.ozone.OzoneConsts.TRANSACTION_INFO_SPLIT_KEY; @@ -33,7 +34,7 @@ */ public final class OMTransactionInfo { - private long currentTerm; // term associated with the ratis log index. + private long term; // term associated with the ratis log index. // Transaction index corresponds to ratis log index private long transactionIndex; @@ -43,12 +44,12 @@ private OMTransactionInfo(String transactionInfo) { Preconditions.checkState(tInfo.length==2, "Incorrect TransactionInfo value"); - currentTerm = Long.parseLong(tInfo[0]); + term = Long.parseLong(tInfo[0]); transactionIndex = Long.parseLong(tInfo[1]); } private OMTransactionInfo(long currentTerm, long transactionIndex) { - this.currentTerm = currentTerm; + this.term = currentTerm; this.transactionIndex = transactionIndex; } @@ -56,8 +57,8 @@ private OMTransactionInfo(long currentTerm, long transactionIndex) { * Get current term. * @return currentTerm */ - public long getCurrentTerm() { - return currentTerm; + public long getTerm() { + return term; } /** @@ -68,6 +69,10 @@ public long getTransactionIndex() { return transactionIndex; } + public TermIndex getTermIndex() { + return TermIndex.newTermIndex(term, transactionIndex); + } + /** * Generate String form of transaction info which need to be persisted in OM * DB finally in byte array. @@ -75,7 +80,7 @@ public long getTransactionIndex() { */ private String generateTransactionInfo() { StringBuilder stringBuilder = new StringBuilder(); - stringBuilder.append(currentTerm); + stringBuilder.append(term); stringBuilder.append(TRANSACTION_INFO_SPLIT_KEY); stringBuilder.append(transactionIndex); @@ -109,13 +114,13 @@ public boolean equals(Object o) { return false; } OMTransactionInfo that = (OMTransactionInfo) o; - return currentTerm == that.currentTerm && + return term == that.term && transactionIndex == that.transactionIndex; } @Override public int hashCode() { - return Objects.hash(currentTerm, transactionIndex); + return Objects.hash(term, transactionIndex); } /** diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java index c042fcb7eedd..3f7429ab7dd0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java @@ -379,7 +379,7 @@ public CompletableFuture notifyInstallSnapshotFromLeader( } CompletableFuture future = CompletableFuture.supplyAsync( - () -> ozoneManager.installSnapshot(leaderNodeId), + () -> ozoneManager.installSnapshotFromLeader(leaderNodeId), installSnapshotExecutor); return future; } @@ -521,9 +521,9 @@ public void loadSnapshotInfoFromDB() throws IOException { ozoneManager.getMetadataManager()); if (omTransactionInfo != null) { setLastAppliedTermIndex(TermIndex.newTermIndex( - omTransactionInfo.getCurrentTerm(), + omTransactionInfo.getTerm(), omTransactionInfo.getTransactionIndex())); - snapshotInfo.updateTermIndex(omTransactionInfo.getCurrentTerm(), + snapshotInfo.updateTermIndex(omTransactionInfo.getTerm(), omTransactionInfo.getTransactionIndex()); } LOG.info("LastAppliedIndex is set from TransactionInfo from OM DB as {}", diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java index 4aaaf13f6a4d..ddb6841ae31e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java @@ -228,15 +228,35 @@ public static Status exceptionToResponseStatus(IOException exception) { } /** - * Obtain Transaction info from downloaded snapshot DB. + * Obtain OMTransactionInfo from Checkpoint. + */ + public static OMTransactionInfo getTrxnInfoFromCheckpoint( + OzoneConfiguration conf, Path dbPath) throws Exception { + + if (dbPath != null) { + Path dbDir = dbPath.getParent(); + Path dbFile = dbPath.getFileName(); + if (dbDir != null && dbFile != null) { + return getTransactionInfoFromDB(conf, dbDir, dbFile.toString()); + } + } + + throw new IOException("Checkpoint " + dbPath + " does not have proper " + + "DB location"); + } + + /** + * Obtain Transaction info from DB. * @param tempConfig + * @param dbDir path to DB * @return OMTransactionInfo * @throws Exception */ - public static OMTransactionInfo getTransactionInfoFromDownloadedSnapshot( - OzoneConfiguration tempConfig, Path dbDir) throws Exception { - DBStore dbStore = - OmMetadataManagerImpl.loadDB(tempConfig, dbDir.toFile()); + private static OMTransactionInfo getTransactionInfoFromDB( + OzoneConfiguration tempConfig, Path dbDir, String dbName) + throws Exception { + DBStore dbStore = OmMetadataManagerImpl.loadDB(tempConfig, dbDir.toFile(), + dbName); Table transactionInfoTable = dbStore.getTable(TRANSACTION_INFO_TABLE, @@ -245,8 +265,11 @@ public static OMTransactionInfo getTransactionInfoFromDownloadedSnapshot( OMTransactionInfo omTransactionInfo = transactionInfoTable.get(TRANSACTION_INFO_KEY); dbStore.close(); - OzoneManager.LOG.info("Downloaded checkpoint with OMTransactionInfo {}", - omTransactionInfo); + + if (omTransactionInfo == null) { + throw new IOException("Failed to read OMTransactionInfo from DB " + + dbName + " at " + dbDir); + } return omTransactionInfo; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OzoneManagerSnapshotProvider.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OzoneManagerSnapshotProvider.java index 1c78251abb92..a11c60b9435d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OzoneManagerSnapshotProvider.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OzoneManagerSnapshotProvider.java @@ -113,8 +113,10 @@ public OzoneManagerSnapshotProvider(MutableConfigurationSource conf, public DBCheckpoint getOzoneManagerDBSnapshot(String leaderOMNodeID) throws IOException { String snapshotTime = Long.toString(System.currentTimeMillis()); - String snapshotFileName = Paths.get(omSnapshotDir.getAbsolutePath(), - snapshotTime, OM_DB_NAME).toFile().getAbsolutePath(); + String snapshotFileName = OM_DB_NAME + "-" + leaderOMNodeID + + "-" + snapshotTime; + String snapshotFilePath = Paths.get(omSnapshotDir.getAbsolutePath(), + snapshotFileName).toFile().getAbsolutePath(); File targetFile = new File(snapshotFileName + ".tar.gz"); String omCheckpointUrl = peerNodesMap.get(leaderOMNodeID) @@ -141,11 +143,11 @@ public DBCheckpoint getOzoneManagerDBSnapshot(String leaderOMNodeID) }); // Untar the checkpoint file. - Path untarredDbDir = Paths.get(snapshotFileName); + Path untarredDbDir = Paths.get(snapshotFilePath); FileUtil.unTar(targetFile, untarredDbDir.toFile()); FileUtils.deleteQuietly(targetFile); - LOG.info("Sucessfully downloaded latest checkpoint from leader OM: {}", + LOG.info("Successfully downloaded latest checkpoint from leader OM: {}", leaderOMNodeID); RocksDBCheckpoint omCheckpoint = new RocksDBCheckpoint(untarredDbDir); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java index 054c97f396c7..6226c5bbc9f1 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java @@ -76,7 +76,7 @@ public void testTransactionTable() throws Exception { OMTransactionInfo omTransactionInfo = omMetadataManager.getTransactionInfoTable().get(TRANSACTION_INFO_KEY); - Assert.assertEquals(3, omTransactionInfo.getCurrentTerm()); + Assert.assertEquals(3, omTransactionInfo.getTerm()); Assert.assertEquals(250, omTransactionInfo.getTransactionIndex()); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java index 7b86006b9379..372679b2b3eb 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java @@ -139,7 +139,7 @@ public void testDoubleBufferWithDummyResponse() throws Exception { Assert.assertEquals(lastAppliedIndex, omTransactionInfo.getTransactionIndex()); - Assert.assertEquals(term, omTransactionInfo.getCurrentTerm()); + Assert.assertEquals(term, omTransactionInfo.getTerm()); } /** diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java index b3693415b183..260e2cd17c10 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java @@ -202,7 +202,7 @@ public void testDoubleBufferWithMixOfTransactions() throws Exception { Assert.assertEquals(lastAppliedIndex, omTransactionInfo.getTransactionIndex()); - Assert.assertEquals(term, omTransactionInfo.getCurrentTerm()); + Assert.assertEquals(term, omTransactionInfo.getTerm()); } /** From cb349ec7f72e40d8e24d29c3f18f43c667aaadd7 Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Sat, 18 Jul 2020 10:15:45 -0700 Subject: [PATCH 039/165] HDDS-3955. Unable to list intermediate paths on keys created using S3G. (#1196) --- .../src/main/resources/ozone-default.xml | 15 ++ .../apache/hadoop/ozone/om/OMConfigKeys.java | 7 + .../TestOzoneFSWithObjectStoreCreate.java | 132 +++++++++++++ .../apache/hadoop/ozone/om/OzoneManager.java | 8 + .../ozone/om/request/OMClientRequest.java | 76 ++++++++ .../ozone/om/request/file/OMFileRequest.java | 2 +- .../request/key/OMAllocateBlockRequest.java | 6 +- .../om/request/key/OMKeyCommitRequest.java | 4 +- .../om/request/key/OMKeyCreateRequest.java | 68 ++++++- .../om/request/key/OMKeyDeleteRequest.java | 4 +- .../om/request/key/OMKeyRenameRequest.java | 17 +- .../S3InitiateMultipartUploadRequest.java | 14 +- .../S3MultipartUploadAbortRequest.java | 5 +- .../S3MultipartUploadCommitPartRequest.java | 9 +- .../S3MultipartUploadCompleteRequest.java | 5 +- .../ozone/om/request/TestNormalizePaths.java | 109 +++++++++++ .../request/key/TestOMKeyCreateRequest.java | 182 +++++++++++++++++- .../TestS3InitiateMultipartUploadRequest.java | 2 +- .../s3/multipart/TestS3MultipartRequest.java | 5 +- ...estS3MultipartUploadCommitPartRequest.java | 2 +- 20 files changed, 643 insertions(+), 29 deletions(-) create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestNormalizePaths.java diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index d8fc591a1795..b474ac38c2f1 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -2478,4 +2478,19 @@ rules in Amazon S3's object key naming guide. + + + ozone.om.enable.filesystem.paths + OZONE, OM + false + If true, key names will be interpreted as file system paths. + "/" will be treated as a special character and paths will be normalized + and must follow Unix filesystem path naming conventions. This flag will + be helpful when objects created by S3G need to be accessed using OFS/O3Fs. + If false, it will fallback to default behavior of Key/MPU create + requests where key paths are not normalized and any intermediate + directories will not be created or any file checks happens to check + filesystem semantics. + + diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java index 4f512a55032d..f16679a681eb 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java @@ -239,4 +239,11 @@ private OMConfigKeys() { "ozone.om.keyname.character.check.enabled"; public static final boolean OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_DEFAULT = false; + + // This config needs to be enabled, when S3G created objects used via + // FileSystem API. + public static final String OZONE_OM_ENABLE_FILESYSTEM_PATHS = + "ozone.om.enable.filesystem.paths"; + public static final boolean OZONE_OM_ENABLE_FILESYSTEM_PATHS_DEFAULT = + false; } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java new file mode 100644 index 000000000000..b872a3d8694a --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java @@ -0,0 +1,132 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.ozone; + +import org.apache.commons.lang3.RandomStringUtils; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.TestDataUtil; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.client.io.OzoneOutputStream; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.Timeout; + +import java.net.URI; +import java.util.Arrays; + +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_SCHEME; + +/** + * Class tests create with object store and getFileStatus. + */ +public class TestOzoneFSWithObjectStoreCreate { + + @Rule + public Timeout timeout = new Timeout(300000); + + private String rootPath; + + private MiniOzoneCluster cluster = null; + + private OzoneFileSystem o3fs; + + private String volumeName; + + private String bucketName; + + + @Before + public void init() throws Exception { + volumeName = RandomStringUtils.randomAlphabetic(10).toLowerCase(); + bucketName = RandomStringUtils.randomAlphabetic(10).toLowerCase(); + + OzoneConfiguration conf = new OzoneConfiguration(); + + conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true); + cluster = MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(3) + .build(); + cluster.waitForClusterToBeReady(); + + // create a volume and a bucket to be used by OzoneFileSystem + OzoneBucket bucket = + TestDataUtil.createVolumeAndBucket(cluster, volumeName, bucketName); + + rootPath = String.format("%s://%s.%s/", OZONE_URI_SCHEME, bucketName, + volumeName); + o3fs = (OzoneFileSystem) FileSystem.get(new URI(rootPath), conf); + } + + + @Test + public void test() throws Exception { + + OzoneVolume ozoneVolume = + cluster.getRpcClient().getObjectStore().getVolume(volumeName); + + OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName); + + String key1 = "///dir1/dir2/file1"; + String key2 = "///dir1/dir2/file2"; + int length = 10; + OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(key1, length); + byte[] b = new byte[10]; + Arrays.fill(b, (byte)96); + ozoneOutputStream.write(b); + ozoneOutputStream.close(); + + ozoneOutputStream = ozoneBucket.createKey(key2, length); + ozoneOutputStream.write(b); + ozoneOutputStream.close(); + + // Adding "/" here otherwise Path will be considered as relative path and + // workingDir will be added. + key1 = "///dir1/dir2/file1"; + Path p = new Path(key1); + Assert.assertTrue(o3fs.getFileStatus(p).isFile()); + + p = p.getParent(); + checkAncestors(p); + + + key2 = "///dir1/dir2/file2"; + p = new Path(key2); + Assert.assertTrue(o3fs.getFileStatus(p).isFile()); + checkAncestors(p); + + } + + private void checkAncestors(Path p) throws Exception { + p = p.getParent(); + while(p.getParent() != null) { + FileStatus fileStatus = o3fs.getFileStatus(p); + Assert.assertTrue(fileStatus.isDirectory()); + p = p.getParent(); + } + } + +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index 72da3344ff36..f4b5cdd758dd 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -200,6 +200,8 @@ import static org.apache.hadoop.ozone.OzoneConsts.OM_METRICS_TEMP_FILE; import static org.apache.hadoop.ozone.OzoneConsts.RPC_PORT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HANDLER_COUNT_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HANDLER_COUNT_KEY; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_KEYTAB_FILE_KEY; @@ -3495,4 +3497,10 @@ private Pair resolveBucketLink( void setExitManagerForTesting(ExitManager exitManagerForTesting) { this.exitManager = exitManagerForTesting; } + + + public boolean getEnableFileSystemPaths() { + return configuration.getBoolean(OZONE_OM_ENABLE_FILESYSTEM_PATHS, + OZONE_OM_ENABLE_FILESYSTEM_PATHS_DEFAULT); + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java index 4ced9fdfdba3..0fa9ca1a8d2c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java @@ -20,6 +20,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ozone.OzoneConsts; @@ -38,13 +39,18 @@ import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.security.UserGroupInformation; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import javax.annotation.Nonnull; import java.io.IOException; import java.net.InetAddress; +import java.nio.file.Paths; import java.util.LinkedHashMap; import java.util.Map; +import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_KEY_NAME; /** * OMClientRequest provides methods which every write OM request should @@ -52,6 +58,8 @@ */ public abstract class OMClientRequest implements RequestAuditor { + private static final Logger LOG = + LoggerFactory.getLogger(OMClientRequest.class); private OMRequest omRequest; /** @@ -265,4 +273,72 @@ public Map buildVolumeAuditMap(String volume) { auditMap.put(OzoneConsts.VOLUME, volume); return auditMap; } + + + public static String validateAndNormalizeKey(boolean enableFileSystemPaths, + String keyName) throws OMException { + if (enableFileSystemPaths) { + return validateAndNormalizeKey(keyName); + } else { + return keyName; + } + } + + @SuppressFBWarnings("DMI_HARDCODED_ABSOLUTE_FILENAME") + public static String validateAndNormalizeKey(String keyName) + throws OMException { + String normalizedKeyName; + if (keyName.startsWith(OM_KEY_PREFIX)) { + normalizedKeyName = Paths.get(keyName).toUri().normalize().getPath(); + } else { + normalizedKeyName = Paths.get(OM_KEY_PREFIX, keyName).toUri() + .normalize().getPath(); + } + if (!keyName.equals(normalizedKeyName)) { + LOG.debug("Normalized key {} to {} ", keyName, + normalizedKeyName.substring(1)); + } + return isValidKeyPath(normalizedKeyName.substring(1)); + } + + /** + * Whether the pathname is valid. Check key names which contain a + * ":", ".", "..", "//", "". If it has any of these characters throws + * OMException, else return the path. + */ + private static String isValidKeyPath(String path) throws OMException { + boolean isValid = true; + + // If keyName is empty string throw error. + if (path.length() == 0) { + throw new OMException("Invalid KeyPath, empty keyName" + path, + INVALID_KEY_NAME); + } else if(path.startsWith("/")) { + isValid = false; + } else { + // Check for ".." "." ":" "/" + String[] components = StringUtils.split(path, '/'); + for (int i = 0; i < components.length; i++) { + String element = components[i]; + if (element.equals(".") || + (element.contains(":")) || + (element.contains("/") || element.equals(".."))) { + isValid = false; + break; + } + + // The string may end with a /, but not have + // "//" in the middle. + if (element.isEmpty() && i != components.length - 1) { + isValid = false; + } + } + } + + if (isValid) { + return path; + } else { + throw new OMException("Invalid KeyPath " + path, INVALID_KEY_NAME); + } + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java index 3367ec7e4758..21ffff815e09 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java @@ -197,7 +197,7 @@ public boolean directParentExists() { /** * Return codes used by verifyFilesInPath method. */ - enum OMDirectoryResult { + public enum OMDirectoryResult { // In below examples path is assumed as "a/b/c" in volume volume1 and // bucket b1. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java index 9e82888be457..94d700f271b0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java @@ -110,9 +110,11 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { ozoneManager.getPreallocateBlocksMax(), ozoneManager.isGrpcBlockTokenEnabled(), ozoneManager.getOMNodeId()); - // Set modification time + // Set modification time and normalize key if required. KeyArgs.Builder newKeyArgs = keyArgs.toBuilder() - .setModificationTime(Time.now()); + .setModificationTime(Time.now()) + .setKeyName(validateAndNormalizeKey( + ozoneManager.getEnableFileSystemPaths(), keyArgs.getKeyName())); AllocateBlockRequest.Builder newAllocatedBlockRequest = AllocateBlockRequest.newBuilder() diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java index eb3769b70ddd..dccb93bb9da3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java @@ -91,7 +91,9 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { } KeyArgs.Builder newKeyArgs = - keyArgs.toBuilder().setModificationTime(Time.now()); + keyArgs.toBuilder().setModificationTime(Time.now()) + .setKeyName(validateAndNormalizeKey( + ozoneManager.getEnableFileSystemPaths(), keyArgs.getKeyName())); return getOmRequest().toBuilder() .setCommitKeyRequest(commitKeyRequest.toBuilder() diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java index f7f08dc75c09..8927c1931c0d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.om.request.key; import java.io.IOException; +import java.nio.file.Paths; import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -27,7 +28,11 @@ import com.google.common.base.Optional; import com.google.common.base.Preconditions; import org.apache.hadoop.ozone.OmUtils; +import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequest; +import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -63,7 +68,10 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.hdds.utils.UniqueId; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS; +import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH; /** * Handles CreateKey request. @@ -91,6 +99,20 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { if(checkKeyNameEnabled){ OmUtils.validateKeyName(keyArgs.getKeyName()); } + + String keyPath = keyArgs.getKeyName(); + if (ozoneManager.getEnableFileSystemPaths()) { + // If enabled, disallow keys with trailing /. As in fs semantics + // directories end with trailing /. + keyPath = validateAndNormalizeKey( + ozoneManager.getEnableFileSystemPaths(), keyPath); + if (keyPath.endsWith("/")) { + throw new OMException("Invalid KeyPath, key names with trailing / " + + "are not allowed." + keyPath, + OMException.ResultCodes.INVALID_KEY_NAME); + } + } + // We cannot allocate block for multipart upload part when // createMultipartKey is called, as we will not know type and factor with // which initiateMultipartUpload has started for this key. When @@ -131,7 +153,7 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { // As for a client for the first time this can be executed on any OM, // till leader is identified. - List< OmKeyLocationInfo > omKeyLocationInfoList = + List omKeyLocationInfoList = allocateBlock(ozoneManager.getScmClient(), ozoneManager.getBlockTokenSecretManager(), type, factor, new ExcludeList(), requestedSize, scmBlockSize, @@ -149,7 +171,10 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { newKeyArgs = keyArgs.toBuilder().setModificationTime(Time.now()); } + newKeyArgs.setKeyName(keyPath); + generateRequiredEncryptionInfo(keyArgs, newKeyArgs, ozoneManager); + newCreateKeyRequest = createKeyRequest.toBuilder().setKeyArgs(newKeyArgs) .setClientID(UniqueId.next()); @@ -160,6 +185,7 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { } @Override + @SuppressWarnings("methodlength") public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) { CreateKeyRequest createKeyRequest = getOmRequest().getCreateKeyRequest(); @@ -184,6 +210,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, getOmRequest()); IOException exception = null; Result result = null; + List missingParentInfos = null; try { keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap); volumeName = keyArgs.getVolumeName(); @@ -209,8 +236,41 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OmBucketInfo bucketInfo = omMetadataManager.getBucketTable().get( omMetadataManager.getBucketKey(volumeName, bucketName)); + // If FILE_EXISTS we just override like how we used to do for Key Create. + List< OzoneAcl > inheritAcls; + if (ozoneManager.getEnableFileSystemPaths()) { + OMFileRequest.OMPathInfo pathInfo = + OMFileRequest.verifyFilesInPath(omMetadataManager, volumeName, + bucketName, keyName, Paths.get(keyName)); + OMFileRequest.OMDirectoryResult omDirectoryResult = + pathInfo.getDirectoryResult(); + inheritAcls = pathInfo.getAcls(); + + // Check if a file or directory exists with same key name. + if (omDirectoryResult == DIRECTORY_EXISTS) { + throw new OMException("Cannot write to " + + "directory. createIntermediateDirs behavior is enabled and " + + "hence / has special interpretation: " + keyName, NOT_A_FILE); + } else + if (omDirectoryResult == FILE_EXISTS_IN_GIVENPATH) { + throw new OMException("Can not create file: " + keyName + + " as there is already file in the given path", NOT_A_FILE); + } + + missingParentInfos = OMDirectoryCreateRequest + .getAllParentInfo(ozoneManager, keyArgs, + pathInfo.getMissingParents(), inheritAcls, trxnLogIndex); + + // Add cache entries for the prefix directories. + // Skip adding for the file key itself, until Key Commit. + OMFileRequest.addKeyTableCacheEntries(omMetadataManager, volumeName, + bucketName, Optional.absent(), Optional.of(missingParentInfos), + trxnLogIndex); + + } + omKeyInfo = prepareKeyInfo(omMetadataManager, keyArgs, dbKeyInfo, - keyArgs.getDataSize(), locations, getFileEncryptionInfo(keyArgs), + keyArgs.getDataSize(), locations, getFileEncryptionInfo(keyArgs), ozoneManager.getPrefixManager(), bucketInfo, trxnLogIndex, ozoneManager.isRatisEnabled()); @@ -238,7 +298,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, .setOpenVersion(openVersion).build()) .setCmdType(Type.CreateKey); omClientResponse = new OMKeyCreateResponse(omResponse.build(), - omKeyInfo, null, clientID); + omKeyInfo, missingParentInfos, clientID); result = Result.SUCCESS; } catch (IOException ex) { @@ -269,7 +329,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, break; case FAILURE: LOG.error("Key creation failed. Volume:{}, Bucket:{}, Key{}. " + - "Exception:{}", volumeName, bucketName, keyName, exception); + "Exception:{}", volumeName, bucketName, keyName, exception); break; default: LOG.error("Unrecognized Result for OMKeyCreateRequest: {}", diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java index 8b7541734206..4d8562c206d6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java @@ -75,7 +75,9 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { OzoneManagerProtocolProtos.KeyArgs keyArgs = deleteKeyRequest.getKeyArgs(); OzoneManagerProtocolProtos.KeyArgs.Builder newKeyArgs = - keyArgs.toBuilder().setModificationTime(Time.now()); + keyArgs.toBuilder().setModificationTime(Time.now()) + .setKeyName(validateAndNormalizeKey( + ozoneManager.getEnableFileSystemPaths(), keyArgs.getKeyName())); return getOmRequest().toBuilder() .setDeleteKeyRequest(deleteKeyRequest.toBuilder() diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java index 91db347c1470..e6e9839062b1 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java @@ -85,13 +85,22 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { OmUtils.validateKeyName(renameKeyRequest.getToKeyName()); } - // Set modification time. - KeyArgs.Builder newKeyArgs = renameKeyRequest.getKeyArgs().toBuilder() - .setModificationTime(Time.now()); + KeyArgs renameKeyArgs = renameKeyRequest.getKeyArgs(); + + // Set modification time and normalize key if needed. + KeyArgs.Builder newKeyArgs = renameKeyArgs.toBuilder() + .setModificationTime(Time.now()) + .setKeyName(validateAndNormalizeKey( + ozoneManager.getEnableFileSystemPaths(), + renameKeyArgs.getKeyName())); return getOmRequest().toBuilder() .setRenameKeyRequest(renameKeyRequest.toBuilder() - .setKeyArgs(newKeyArgs)).setUserInfo(getUserInfo()).build(); + .setKeyArgs(newKeyArgs) + .setToKeyName(validateAndNormalizeKey( + ozoneManager.getEnableFileSystemPaths(), + renameKeyRequest.getToKeyName()))) + .setUserInfo(getUserInfo()).build(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java index aa96ba995ab9..f7951a296807 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java @@ -33,7 +33,7 @@ import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.om.response.s3.multipart.S3InitiateMultipartUploadResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartInfoInitiateRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartInfoInitiateResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; @@ -67,15 +67,17 @@ public S3InitiateMultipartUploadRequest(OMRequest omRequest) { } @Override - public OMRequest preExecute(OzoneManager ozoneManager) { + public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { MultipartInfoInitiateRequest multipartInfoInitiateRequest = getOmRequest().getInitiateMultiPartUploadRequest(); Preconditions.checkNotNull(multipartInfoInitiateRequest); - OzoneManagerProtocolProtos.KeyArgs.Builder newKeyArgs = - multipartInfoInitiateRequest.getKeyArgs().toBuilder() + KeyArgs keyArgs = multipartInfoInitiateRequest.getKeyArgs(); + KeyArgs.Builder newKeyArgs = keyArgs.toBuilder() .setMultipartUploadID(UUID.randomUUID().toString() + "-" + - UniqueId.next()).setModificationTime(Time.now()); + UniqueId.next()).setModificationTime(Time.now()) + .setKeyName(validateAndNormalizeKey( + ozoneManager.getEnableFileSystemPaths(), keyArgs.getKeyName())); return getOmRequest().toBuilder() .setUserInfo(getUserInfo()) @@ -92,7 +94,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, MultipartInfoInitiateRequest multipartInfoInitiateRequest = getOmRequest().getInitiateMultiPartUploadRequest(); - OzoneManagerProtocolProtos.KeyArgs keyArgs = + KeyArgs keyArgs = multipartInfoInitiateRequest.getKeyArgs(); Preconditions.checkNotNull(keyArgs.getMultipartUploadID()); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java index 0726fe4a9c7e..c0ef8b378261 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java @@ -73,7 +73,10 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { return getOmRequest().toBuilder().setAbortMultiPartUploadRequest( getOmRequest().getAbortMultiPartUploadRequest().toBuilder() - .setKeyArgs(keyArgs.toBuilder().setModificationTime(Time.now()))) + .setKeyArgs(keyArgs.toBuilder().setModificationTime(Time.now()) + .setKeyName(validateAndNormalizeKey( + ozoneManager.getEnableFileSystemPaths(), + keyArgs.getKeyName())))) .setUserInfo(getUserInfo()).build(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java index 283a22dc37b7..1e29d5f3f212 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java @@ -70,14 +70,17 @@ public S3MultipartUploadCommitPartRequest(OMRequest omRequest) { } @Override - public OMRequest preExecute(OzoneManager ozoneManager) { + public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { MultipartCommitUploadPartRequest multipartCommitUploadPartRequest = getOmRequest().getCommitMultiPartUploadRequest(); + KeyArgs keyArgs = multipartCommitUploadPartRequest.getKeyArgs(); return getOmRequest().toBuilder().setCommitMultiPartUploadRequest( multipartCommitUploadPartRequest.toBuilder() - .setKeyArgs(multipartCommitUploadPartRequest.getKeyArgs() - .toBuilder().setModificationTime(Time.now()))) + .setKeyArgs(keyArgs.toBuilder().setModificationTime(Time.now()) + .setKeyName(validateAndNormalizeKey( + ozoneManager.getEnableFileSystemPaths(), + keyArgs.getKeyName())))) .setUserInfo(getUserInfo()).build(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java index a9aefa08a5b8..83cc28b01070 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java @@ -81,7 +81,10 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { return getOmRequest().toBuilder() .setCompleteMultiPartUploadRequest(multipartUploadCompleteRequest .toBuilder().setKeyArgs(keyArgs.toBuilder() - .setModificationTime(Time.now()))) + .setModificationTime(Time.now()) + .setKeyName(validateAndNormalizeKey( + ozoneManager.getEnableFileSystemPaths(), + keyArgs.getKeyName())))) .setUserInfo(getUserInfo()).build(); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestNormalizePaths.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestNormalizePaths.java new file mode 100644 index 000000000000..6137444b2296 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestNormalizePaths.java @@ -0,0 +1,109 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.hadoop.ozone.om.request; + +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.junit.Assert; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +import static org.apache.hadoop.ozone.om.request.OMClientRequest.validateAndNormalizeKey; +import static org.junit.Assert.fail; + +/** + * Class to test normalize paths. + */ +public class TestNormalizePaths { + + @Rule + public ExpectedException exceptionRule = ExpectedException.none(); + + @Test + public void testNormalizePathsEnabled() throws Exception { + + Assert.assertEquals("a/b/c/d", + validateAndNormalizeKey(true, "a/b/c/d")); + Assert.assertEquals("a/b/c/d", + validateAndNormalizeKey(true, "/a/b/c/d")); + Assert.assertEquals("a/b/c/d", + validateAndNormalizeKey(true, "////a/b/c/d")); + Assert.assertEquals("a/b/c/d", + validateAndNormalizeKey(true, "////a/b/////c/d")); + Assert.assertEquals("a/b/c/...../d", + validateAndNormalizeKey(true, "////a/b/////c/...../d")); + Assert.assertEquals("a/b/d", + validateAndNormalizeKey(true, "/a/b/c/../d")); + Assert.assertEquals("a", + validateAndNormalizeKey(true, "a")); + Assert.assertEquals("a/b", + validateAndNormalizeKey(true, "/a/./b")); + Assert.assertEquals("a/b", + validateAndNormalizeKey(true, ".//a/./b")); + Assert.assertEquals("a/", + validateAndNormalizeKey(true, "/a/.")); + Assert.assertEquals("b/c", + validateAndNormalizeKey(true, "//./b/c/")); + Assert.assertEquals("a/b/c/d", + validateAndNormalizeKey(true, "a/b/c/d/")); + Assert.assertEquals("a/b/c/...../d", + validateAndNormalizeKey(true, "////a/b/////c/...../d/")); + } + + @Test + public void testNormalizeKeyInvalidPaths() throws OMException { + checkInvalidPath("/a/b/c/../../../../../d"); + checkInvalidPath("../a/b/c/"); + checkInvalidPath("/../..a/b/c/"); + checkInvalidPath("//"); + checkInvalidPath("/////"); + checkInvalidPath(""); + checkInvalidPath("/"); + checkInvalidPath("/:/:"); + } + + private void checkInvalidPath(String keyName) { + try { + validateAndNormalizeKey(true, keyName); + fail("checkInvalidPath failed for path " + keyName); + } catch (OMException ex) { + Assert.assertTrue(ex.getMessage().contains("Invalid KeyPath")); + } + } + + + + @Test + public void testNormalizePathsDisable() throws OMException { + + Assert.assertEquals("/a/b/c/d", + validateAndNormalizeKey(false, "/a/b/c/d")); + Assert.assertEquals("////a/b/c/d", + validateAndNormalizeKey(false, "////a/b/c/d")); + Assert.assertEquals("////a/b/////c/d", + validateAndNormalizeKey(false, "////a/b/////c/d")); + Assert.assertEquals("////a/b/////c/...../d", + validateAndNormalizeKey(false, "////a/b/////c/...../d")); + Assert.assertEquals("/a/b/c/../d", + validateAndNormalizeKey(false, "/a/b/c/../d")); + Assert.assertEquals("/a/b/c/../../d", + validateAndNormalizeKey(false, "/a/b/c/../../d")); + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java index b26505b32d37..2b8ffce49958 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java @@ -18,10 +18,15 @@ package org.apache.hadoop.ozone.om.request.key; +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; import java.util.List; import java.util.UUID; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.om.exceptions.OMException; import org.junit.Assert; import org.junit.Test; @@ -37,7 +42,14 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .OMRequest; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS; import static org.apache.hadoop.ozone.om.request.TestOMRequestUtils.addVolumeAndBucketToDB; +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.NOT_A_FILE; +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK; +import static org.junit.Assert.fail; +import static org.mockito.Mockito.when; /** * Tests OMCreateKeyRequest class. @@ -82,7 +94,7 @@ public void testValidateAndUpdateCache() throws Exception { omKeyCreateRequest.validateAndUpdateCache(ozoneManager, 100L, ozoneManagerDoubleBufferHelper); - Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, + Assert.assertEquals(OK, omKeyCreateResponse.getOMResponse().getStatus()); // Check open table whether key is added or not. @@ -310,6 +322,11 @@ private OMRequest doPreExecute(OMRequest originalOMRequest) throws Exception { @SuppressWarnings("parameterNumber") private OMRequest createKeyRequest(boolean isMultipartKey, int partNumber) { + return createKeyRequest(isMultipartKey, partNumber, keyName); + } + + private OMRequest createKeyRequest(boolean isMultipartKey, int partNumber, + String keyName) { KeyArgs.Builder keyArgs = KeyArgs.newBuilder() .setVolumeName(volumeName).setBucketName(bucketName) @@ -327,7 +344,170 @@ private OMRequest createKeyRequest(boolean isMultipartKey, int partNumber) { .setCmdType(OzoneManagerProtocolProtos.Type.CreateKey) .setClientId(UUID.randomUUID().toString()) .setCreateKeyRequest(createKeyRequest).build(); + } + + @Test + public void testKeyCreateWithFileSystemPathsEnabled() throws Exception { + + OzoneConfiguration configuration = new OzoneConfiguration(); + configuration.setBoolean(OZONE_OM_ENABLE_FILESYSTEM_PATHS, true); + when(ozoneManager.getConfiguration()).thenReturn(configuration); + when(ozoneManager.getEnableFileSystemPaths()).thenReturn(true); + + // Add volume and bucket entries to DB. + addVolumeAndBucketToDB(volumeName, bucketName, + omMetadataManager); + + + keyName = "dir1/dir2/dir3/file1"; + createAndCheck(keyName); + + // Key with leading '/'. + String keyName = "/a/b/c/file1"; + createAndCheck(keyName); + + // Commit openKey entry. + TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, + keyName.substring(1), 0L, RATIS, THREE, omMetadataManager); + + // Now create another file in same dir path. + keyName = "/a/b/c/file2"; + createAndCheck(keyName); + + // Create key with multiple /'s + // converted to a/b/c/file5 + keyName = "///a/b///c///file5"; + createAndCheck(keyName); + + // converted to a/b/c/.../file3 + keyName = "///a/b///c//.../file3"; + createAndCheck(keyName); + + // converted to r1/r2 + keyName = "././r1/r2/"; + createAndCheck(keyName); + + // converted to ..d1/d2/d3 + keyName = "..d1/d2/d3/"; + createAndCheck(keyName); + + // Create a file, where a file already exists in the path. + // Now try with a file exists in path. Should fail. + keyName = "/a/b/c/file1/file3"; + checkNotAFile(keyName); + + // Empty keyName. + keyName = ""; + checkNotAValidPath(keyName); + + // Key name ends with / + keyName = "/a/./"; + checkNotAValidPath(keyName); + + keyName = "/////"; + checkNotAValidPath(keyName); + + keyName = "../../b/c"; + checkNotAValidPath(keyName); + + keyName = "../../b/c/"; + checkNotAValidPath(keyName); + + keyName = "../../b:/c/"; + checkNotAValidPath(keyName); + + keyName = ":/c/"; + checkNotAValidPath(keyName); + + keyName = ""; + checkNotAValidPath(keyName); + + keyName = "../a/b"; + checkNotAValidPath(keyName); + + keyName = "/../a/b"; + checkNotAValidPath(keyName); + + } + + private void checkNotAValidPath(String keyName) { + OMRequest omRequest = createKeyRequest(false, 0, keyName); + OMKeyCreateRequest omKeyCreateRequest = new OMKeyCreateRequest(omRequest); + + try { + omKeyCreateRequest.preExecute(ozoneManager); + fail("checkNotAValidPath failed for path" + keyName); + } catch (IOException ex) { + Assert.assertTrue(ex instanceof OMException); + OMException omException = (OMException) ex; + Assert.assertEquals(OMException.ResultCodes.INVALID_KEY_NAME, + omException.getResult()); + } + + + } + private void checkNotAFile(String keyName) throws Exception { + OMRequest omRequest = createKeyRequest(false, 0, keyName); + + OMKeyCreateRequest omKeyCreateRequest = new OMKeyCreateRequest(omRequest); + + omRequest = omKeyCreateRequest.preExecute(ozoneManager); + + omKeyCreateRequest = new OMKeyCreateRequest(omRequest); + + OMClientResponse omClientResponse = + omKeyCreateRequest.validateAndUpdateCache(ozoneManager, + 101L, ozoneManagerDoubleBufferHelper); + + Assert.assertEquals(NOT_A_FILE, + omClientResponse.getOMResponse().getStatus()); + } + + + private void createAndCheck(String keyName) throws Exception { + OMRequest omRequest = createKeyRequest(false, 0, keyName); + + OMKeyCreateRequest omKeyCreateRequest = new OMKeyCreateRequest(omRequest); + + omRequest = omKeyCreateRequest.preExecute(ozoneManager); + + omKeyCreateRequest = new OMKeyCreateRequest(omRequest); + + OMClientResponse omClientResponse = + omKeyCreateRequest.validateAndUpdateCache(ozoneManager, + 101L, ozoneManagerDoubleBufferHelper); + + Assert.assertEquals(OK, omClientResponse.getOMResponse().getStatus()); + + checkCreatedPaths(omKeyCreateRequest, omRequest, keyName); + } + + private void checkCreatedPaths(OMKeyCreateRequest omKeyCreateRequest, + OMRequest omRequest, String keyName) throws Exception { + keyName = omKeyCreateRequest.validateAndNormalizeKey(true, keyName); + // Check intermediate directories created or not. + Path keyPath = Paths.get(keyName); + checkIntermediatePaths(keyPath); + + // Check open key entry + String openKey = omMetadataManager.getOpenKey(volumeName, bucketName, + keyName, omRequest.getCreateKeyRequest().getClientID()); + OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey); + Assert.assertNotNull(omKeyInfo); + } + + + + private void checkIntermediatePaths(Path keyPath) throws Exception { + // Check intermediate paths are created + keyPath = keyPath.getParent(); + while(keyPath != null) { + Assert.assertNotNull(omMetadataManager.getKeyTable().get( + omMetadataManager.getOzoneDirKey(volumeName, bucketName, + keyPath.toString()))); + keyPath = keyPath.getParent(); + } } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java index 1d785609b566..5633c726c54e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java @@ -36,7 +36,7 @@ public class TestS3InitiateMultipartUploadRequest extends TestS3MultipartRequest { @Test - public void testPreExecute() { + public void testPreExecute() throws Exception { doPreExecuteInitiateMPU(UUID.randomUUID().toString(), UUID.randomUUID().toString(), UUID.randomUUID().toString()); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java index 0271a7a400b1..f0f040f39479 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java @@ -107,7 +107,7 @@ public void stop() { * @return OMRequest - returned from preExecute. */ protected OMRequest doPreExecuteInitiateMPU( - String volumeName, String bucketName, String keyName) { + String volumeName, String bucketName, String keyName) throws Exception { OMRequest omRequest = TestOMRequestUtils.createInitiateMPURequest(volumeName, bucketName, keyName); @@ -141,7 +141,8 @@ protected OMRequest doPreExecuteInitiateMPU( */ protected OMRequest doPreExecuteCommitMPU( String volumeName, String bucketName, String keyName, - long clientID, String multipartUploadID, int partNumber) { + long clientID, String multipartUploadID, int partNumber) + throws Exception { // Just set dummy size long dataSize = 100L; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java index 5b220bf4c874..d623b17dcfe1 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java @@ -38,7 +38,7 @@ public class TestS3MultipartUploadCommitPartRequest extends TestS3MultipartRequest { @Test - public void testPreExecute() { + public void testPreExecute() throws Exception { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); String keyName = UUID.randomUUID().toString(); From 0305a776664579ff51cbde079fcba227609be932 Mon Sep 17 00:00:00 2001 From: Sammi Chen Date: Mon, 20 Jul 2020 15:27:10 +0800 Subject: [PATCH 040/165] HDDS-3806. Support recognize aws v2 Authorization header. (#1098) --- ...cessor.java => AWSSignatureProcessor.java} | 35 ++++++++++----- .../hadoop/ozone/s3/OzoneClientProducer.java | 8 ++-- .../s3/header/AuthorizationHeaderV2.java | 2 +- .../s3/header/AuthorizationHeaderV4.java | 4 +- ...or.java => TestAWSSignatureProcessor.java} | 44 +++++++++++++++++-- .../ozone/s3/endpoint/TestBucketPut.java | 2 +- .../s3/header/TestAuthorizationHeaderV4.java | 2 +- 7 files changed, 75 insertions(+), 22 deletions(-) rename hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/{AWSV4SignatureProcessor.java => AWSSignatureProcessor.java} (92%) rename hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/{TestAWSV4SignatureProcessor.java => TestAWSSignatureProcessor.java} (69%) diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AWSV4SignatureProcessor.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AWSSignatureProcessor.java similarity index 92% rename from hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AWSV4SignatureProcessor.java rename to hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AWSSignatureProcessor.java index 1ff1a72575b7..0cb82fb77b9e 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AWSV4SignatureProcessor.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AWSSignatureProcessor.java @@ -43,6 +43,7 @@ import java.util.regex.Pattern; import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import org.apache.hadoop.ozone.s3.header.AuthorizationHeaderV2; import org.apache.hadoop.ozone.s3.header.AuthorizationHeaderV4; import org.apache.hadoop.ozone.s3.header.Credential; @@ -54,15 +55,15 @@ import org.slf4j.LoggerFactory; /** - * Parser to process AWS v4 auth request. Creates string to sign and auth + * Parser to process AWS V2 & V4 auth request. Creates string to sign and auth * header. For more details refer to AWS documentation https://docs.aws * .amazon.com/general/latest/gr/sigv4-create-canonical-request.html. **/ @RequestScoped -public class AWSV4SignatureProcessor implements SignatureProcessor { +public class AWSSignatureProcessor implements SignatureProcessor { private final static Logger LOG = - LoggerFactory.getLogger(AWSV4SignatureProcessor.class); + LoggerFactory.getLogger(AWSSignatureProcessor.class); @Context private ContainerRequestContext context; @@ -72,13 +73,12 @@ public class AWSV4SignatureProcessor implements SignatureProcessor { private String uri; private String method; private AuthorizationHeaderV4 v4Header; + private AuthorizationHeaderV2 v2Header; private String stringToSign; @PostConstruct public void init() throws Exception { - LOG.info("Initializing request header parser"); - //header map is MUTABLE. It's better to save it here. (with lower case // keys!!!) this.headers = new LowerCaseKeyStringMap(new HashMap<>()); @@ -107,10 +107,18 @@ public void init() this.uri = context.getUriInfo().getRequestUri().getPath(); this.method = context.getMethod(); - if (v4Header == null) { - v4Header = new AuthorizationHeaderV4(headers.get(AUTHORIZATION_HEADER)); + String authHeader = headers.get(AUTHORIZATION_HEADER); + String[] split = authHeader.split(" "); + if (split[0].equals(AuthorizationHeaderV2.IDENTIFIER)) { + if (v2Header == null) { + v2Header = new AuthorizationHeaderV2(authHeader); + } + } else { + if (v4Header == null) { + v4Header = new AuthorizationHeaderV4(authHeader); + } + parse(); } - parse(); } @@ -320,11 +328,13 @@ public static String hash(String payload) throws NoSuchAlgorithmException { } public String getAwsAccessId() { - return v4Header.getAccessKeyID(); + return (v4Header != null ? v4Header.getAccessKeyID() : + v2Header != null ? v2Header.getAccessKeyID() : ""); } public String getSignature() { - return v4Header.getSignature(); + return (v4Header != null ? v4Header.getSignature() : + v2Header != null ? v2Header.getSignature() : ""); } public String getStringToSign() throws Exception { @@ -342,6 +352,11 @@ public void setV4Header( this.v4Header = v4Header; } + @VisibleForTesting + public void setV2Header(AuthorizationHeaderV2 v2Header) { + this.v2Header = v2Header; + } + /** * A simple map which forces lower case key usage. */ diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientProducer.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientProducer.java index 3cd7b7c5dc02..a3042c13a3cf 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientProducer.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientProducer.java @@ -51,7 +51,7 @@ public class OzoneClientProducer { private OzoneClient client; @Inject - private SignatureProcessor v4RequestParser; + private SignatureProcessor signatureParser; @Inject private OzoneConfiguration ozoneConfiguration; @@ -76,7 +76,7 @@ public void destory() throws IOException { private OzoneClient getClient(OzoneConfiguration config) throws IOException { try { - String awsAccessId = v4RequestParser.getAwsAccessId(); + String awsAccessId = signatureParser.getAwsAccessId(); UserGroupInformation remoteUser = UserGroupInformation.createRemoteUser(awsAccessId); if (OzoneSecurityUtil.isSecurityEnabled(config)) { @@ -85,8 +85,8 @@ private OzoneClient getClient(OzoneConfiguration config) throws IOException { OzoneTokenIdentifier identifier = new OzoneTokenIdentifier(); identifier.setTokenType(S3AUTHINFO); - identifier.setStrToSign(v4RequestParser.getStringToSign()); - identifier.setSignature(v4RequestParser.getSignature()); + identifier.setStrToSign(signatureParser.getStringToSign()); + identifier.setSignature(signatureParser.getSignature()); identifier.setAwsAccessId(awsAccessId); identifier.setOwner(new Text(awsAccessId)); if (LOG.isTraceEnabled()) { diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/AuthorizationHeaderV2.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/AuthorizationHeaderV2.java index dfafc3a5acad..fe096cee9ff5 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/AuthorizationHeaderV2.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/AuthorizationHeaderV2.java @@ -29,7 +29,7 @@ */ public class AuthorizationHeaderV2 { - private final static String IDENTIFIER = "AWS"; + public final static String IDENTIFIER = "AWS"; private String authHeader; private String identifier; private String accessKeyID; diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/AuthorizationHeaderV4.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/AuthorizationHeaderV4.java index 0d9f092b8e36..1e48689a86c7 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/AuthorizationHeaderV4.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/AuthorizationHeaderV4.java @@ -35,8 +35,8 @@ import static org.apache.commons.lang3.StringUtils.isEmpty; import static org.apache.commons.lang3.StringUtils.isNotEmpty; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.MALFORMED_HEADER; -import static org.apache.hadoop.ozone.s3.AWSV4SignatureProcessor.AWS4_SIGNING_ALGORITHM; -import static org.apache.hadoop.ozone.s3.AWSV4SignatureProcessor.DATE_FORMATTER; +import static org.apache.hadoop.ozone.s3.AWSSignatureProcessor.AWS4_SIGNING_ALGORITHM; +import static org.apache.hadoop.ozone.s3.AWSSignatureProcessor.DATE_FORMATTER; /** * S3 Authorization header. diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestAWSV4SignatureProcessor.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestAWSSignatureProcessor.java similarity index 69% rename from hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestAWSV4SignatureProcessor.java rename to hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestAWSSignatureProcessor.java index 11b3b91724df..239e2857957b 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestAWSV4SignatureProcessor.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestAWSSignatureProcessor.java @@ -24,6 +24,7 @@ import java.net.URI; import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import org.apache.hadoop.ozone.s3.header.AuthorizationHeaderV2; import org.apache.hadoop.ozone.s3.header.AuthorizationHeaderV4; import org.junit.Assert; @@ -33,10 +34,10 @@ /** * Test the Auth parser. */ -public class TestAWSV4SignatureProcessor { +public class TestAWSSignatureProcessor { @Test - public void testInitialization() throws Exception { + public void testV4Initialization() throws Exception { MultivaluedMap headers = new MultivaluedHashMap<>(); headers.putSingle("Content-Length", "123"); @@ -76,7 +77,7 @@ public void validateDateRange() throws OS3Exception { Mockito.when(mock.getMethod()).thenReturn("GET"); Mockito.when(mock.getUriInfo()).thenReturn(uriInfo); - AWSV4SignatureProcessor parser = new AWSV4SignatureProcessor() { + AWSSignatureProcessor parser = new AWSSignatureProcessor() { @Override void validateSignedHeader(String header, String headerValue) throws OS3Exception { @@ -100,4 +101,41 @@ void validateSignedHeader(String header, String headerValue) "f20d4de80af2271545385e8d4c7df608cae70a791c69b97aab1527ed93a0d665", parser.getStringToSign()); } + + @Test + public void testV2Initialization() throws Exception { + + MultivaluedMap headers = new MultivaluedHashMap<>(); + String authHeader = "AWS root:ixWQAgWvJDuqLUqgDG9o4b2HF7c="; + headers.putSingle("Authorization", authHeader); + + AuthorizationHeaderV2 parserAuthHeader = + new AuthorizationHeaderV2(authHeader); + + MultivaluedMap queryParameters = new MultivaluedHashMap<>(); + + UriInfo uriInfo = Mockito.mock(UriInfo.class); + Mockito.when(uriInfo.getQueryParameters()).thenReturn(queryParameters); + Mockito.when(uriInfo.getRequestUri()) + .thenReturn(new URI("http://localhost/buckets")); + + ContainerRequestContext mock = Mockito.mock(ContainerRequestContext.class); + Mockito.when(mock.getHeaders()).thenReturn(headers); + Mockito.when(mock.getMethod()).thenReturn("GET"); + Mockito.when(mock.getUriInfo()).thenReturn(uriInfo); + + AWSSignatureProcessor parser = new AWSSignatureProcessor() { + @Override + void validateSignedHeader(String header, String headerValue) + throws OS3Exception { + super.validateSignedHeader(header, headerValue); + } + }; + parser.setV2Header(parserAuthHeader); + parser.setContext(mock); + parser.init(); + + Assert.assertEquals("root", parser.getAwsAccessId()); + Assert.assertEquals("ixWQAgWvJDuqLUqgDG9o4b2HF7c=", parser.getSignature()); + } } \ No newline at end of file diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketPut.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketPut.java index 014cb3e5ae8a..b4a21e3ae583 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketPut.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketPut.java @@ -30,7 +30,7 @@ import org.apache.hadoop.ozone.s3.exception.OS3Exception; import static java.net.HttpURLConnection.HTTP_NOT_FOUND; -import static org.apache.hadoop.ozone.s3.AWSV4SignatureProcessor.DATE_FORMATTER; +import static org.apache.hadoop.ozone.s3.AWSSignatureProcessor.DATE_FORMATTER; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.MALFORMED_HEADER; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/header/TestAuthorizationHeaderV4.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/header/TestAuthorizationHeaderV4.java index e5d5562cd1a5..5ca1c4522414 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/header/TestAuthorizationHeaderV4.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/header/TestAuthorizationHeaderV4.java @@ -26,7 +26,7 @@ import java.time.LocalDate; import static java.time.temporal.ChronoUnit.DAYS; -import static org.apache.hadoop.ozone.s3.AWSV4SignatureProcessor.DATE_FORMATTER; +import static org.apache.hadoop.ozone.s3.AWSSignatureProcessor.DATE_FORMATTER; import static org.junit.Assert.assertEquals; import static org.junit.Assert.fail; From dd16443dd8f560620b1afdf77c81186712ee50be Mon Sep 17 00:00:00 2001 From: HuangTao Date: Mon, 20 Jul 2020 15:32:58 +0800 Subject: [PATCH 041/165] HDDS-3984. Support filter and search the columns in recon UI (#1218) --- .../src/types/datanode.types.tsx | 4 +- .../src/utils/columnSearch.less | 36 +++++++ .../src/utils/columnSearch.tsx | 94 +++++++++++++++++++ .../src/views/datanodes/datanodes.tsx | 29 +++++- .../src/views/pipelines/pipelines.tsx | 30 +++++- 5 files changed, 186 insertions(+), 7 deletions(-) create mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/utils/columnSearch.less create mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/utils/columnSearch.tsx diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/types/datanode.types.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/types/datanode.types.tsx index ba8336b678c3..e9cb16820814 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/types/datanode.types.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/types/datanode.types.tsx @@ -16,7 +16,9 @@ * limitations under the License. */ -export type DatanodeStatus = 'HEALTHY' | 'STALE' | 'DEAD' | 'DECOMMISSIONING' | 'DECOMMISSIONED'; +export const DatanodeStatusList = ['HEALTHY', 'STALE', 'DEAD', 'DECOMMISSIONING', 'DECOMMISSIONED'] as const; +type DatanodeStatusTuple = typeof DatanodeStatusList; +export type DatanodeStatus = DatanodeStatusTuple[number]; // 'HEALTHY' | 'STALE' | 'DEAD' | 'DECOMMISSIONING' | 'DECOMMISSIONED'; export interface IStorageReport { capacity: number; diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/utils/columnSearch.less b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/utils/columnSearch.less new file mode 100644 index 000000000000..4c7013a1304d --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/utils/columnSearch.less @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +.column-search-container { + padding: 8px; + + .input-block { + width: 188px; + margin-bottom: 8px; + display: block; + } + + .search-button { + width: 90px; + margin-right: 8px; + } + + .reset-button { + width: 90px; + } +} diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/utils/columnSearch.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/utils/columnSearch.tsx new file mode 100644 index 000000000000..319bfd29e013 --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/utils/columnSearch.tsx @@ -0,0 +1,94 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react'; +import {Input, Button, Icon} from 'antd'; +import './columnSearch.less'; + +class ColumnSearch extends React.PureComponent { + searchInput: Input | null = null; + + getColumnSearchProps = (dataIndex: string) => ({ + filterDropdown: ({ + setSelectedKeys, + selectedKeys, + confirm, + clearFilters + }: { + setSelectedKeys: (keys: string[]) => void; + selectedKeys: string[]; + confirm: () => void; + clearFilters: () => void; + }) => ( +

+ { + this.searchInput = node; + }} + className='input-block' + placeholder={`Search ${dataIndex}`} + value={selectedKeys[0]} + onChange={e => + setSelectedKeys(e.target.value ? [e.target.value] : [])} + onPressEnter={() => this.handleSearch(confirm)} + /> + + +
+ ), + filterIcon: (filtered: boolean) => ( + + ), + onFilter: (value: string, record: any) => + record[dataIndex].toString().toLowerCase().includes(value.toLowerCase()), + onFilterDropdownVisibleChange: (visible: boolean) => { + if (visible) { + setTimeout(() => { + if (this.searchInput) { + this.searchInput.select(); + } + }); + } + } + }); + + handleSearch = (confirm: () => void) => { + confirm(); + }; + + handleReset = (clearFilters: () => void) => { + clearFilters(); + }; +} + +export {ColumnSearch}; diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx index fada1f43c35a..ceb7b51ac05c 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx @@ -23,13 +23,14 @@ import {PaginationConfig} from 'antd/lib/pagination'; import moment from 'moment'; import {ReplicationIcon} from 'utils/themeIcons'; import StorageBar from 'components/storageBar/storageBar'; -import {DatanodeStatus, IStorageReport} from 'types/datanode.types'; +import {DatanodeStatus, DatanodeStatusList, IStorageReport} from 'types/datanode.types'; import './datanodes.less'; import {AutoReloadHelper} from 'utils/autoReloadHelper'; import AutoReloadPanel from 'components/autoReloadPanel/autoReloadPanel'; import {MultiSelect, IOption} from 'components/multiSelect/multiSelect'; import {ActionMeta, ValueType} from 'react-select'; import {showDataFetchError} from 'utils/common'; +import {ColumnSearch} from 'utils/columnSearch'; interface IDatanodeResponse { hostname: string; @@ -98,6 +99,9 @@ const COLUMNS = [ dataIndex: 'state', key: 'state', isVisible: true, + filterMultiple: true, + filters: DatanodeStatusList.map(status => ({text: status, value: status})), + onFilter: (value: DatanodeStatus, record: IDatanode) => record.state === value, render: (text: DatanodeStatus) => renderDatanodeStatus(text), sorter: (a: IDatanode, b: IDatanode) => a.state.localeCompare(b.state) }, @@ -106,6 +110,7 @@ const COLUMNS = [ dataIndex: 'uuid', key: 'uuid', isVisible: true, + isSearchable: true, sorter: (a: IDatanode, b: IDatanode) => a.uuid.localeCompare(b.uuid), defaultSortOrder: 'ascend' as const }, @@ -114,6 +119,7 @@ const COLUMNS = [ dataIndex: 'hostname', key: 'hostname', isVisible: true, + isSearchable: true, sorter: (a: IDatanode, b: IDatanode) => a.hostname.localeCompare(b.hostname), defaultSortOrder: 'ascend' as const }, @@ -173,6 +179,7 @@ const COLUMNS = [ dataIndex: 'leaderCount', key: 'leaderCount', isVisible: true, + isSearchable: true, sorter: (a: IDatanode, b: IDatanode) => a.leaderCount - b.leaderCount }, { @@ -180,6 +187,7 @@ const COLUMNS = [ dataIndex: 'containers', key: 'containers', isVisible: true, + isSearchable: true, sorter: (a: IDatanode, b: IDatanode) => a.containers - b.containers }, { @@ -187,6 +195,7 @@ const COLUMNS = [ dataIndex: 'version', key: 'version', isVisible: false, + isSearchable: true, sorter: (a: IDatanode, b: IDatanode) => a.version.localeCompare(b.version), defaultSortOrder: 'ascend' as const }, @@ -330,9 +339,21 @@ export class Datanodes extends React.Component, IDatanode
- selectedColumns.some(e => e.value === column.key) - )} + columns={COLUMNS.reduce((filtered, column) => { + if (selectedColumns.some(e => e.value === column.key)) { + if (column.isSearchable) { + const newColumn = { + ...column, + ...new ColumnSearch(column).getColumnSearchProps(column.dataIndex) + }; + filtered.push(newColumn); + } else { + filtered.push(column); + } + } + + return filtered; + }, [])} loading={loading} pagination={paginationConfig} rowKey='hostname' diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/pipelines/pipelines.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/pipelines/pipelines.tsx index b898818cd234..342a8bd35f3f 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/pipelines/pipelines.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/pipelines/pipelines.tsx @@ -28,9 +28,12 @@ import {AutoReloadHelper} from 'utils/autoReloadHelper'; import AutoReloadPanel from 'components/autoReloadPanel/autoReloadPanel'; import {showDataFetchError} from 'utils/common'; import {IAxiosResponse} from 'types/axios.types'; +import {ColumnSearch} from 'utils/columnSearch'; const {TabPane} = Tabs; -export type PipelineStatus = 'active' | 'inactive'; +const PipelineStatusList = ['OPEN', 'CLOSING', 'QUASI_CLOSED', 'CLOSED', 'UNHEALTHY', 'INVALID', 'DELETED'] as const; +type PipelineStatusTuple = typeof PipelineStatusList; +export type PipelineStatus = PipelineStatusTuple[number]; // 'OPEN' | 'CLOSING' | 'QUASI_CLOSED' | 'CLOSED' | 'UNHEALTHY' | 'INVALID' | 'DELETED'; interface IPipelineResponse { pipelineId: string; @@ -62,6 +65,7 @@ const COLUMNS = [ title: 'Pipeline ID', dataIndex: 'pipelineId', key: 'pipelineId', + isSearchable: true, sorter: (a: IPipelineResponse, b: IPipelineResponse) => a.pipelineId.localeCompare(b.pipelineId) }, { @@ -89,24 +93,30 @@ const COLUMNS = [ title: 'Status', dataIndex: 'status', key: 'status', + filterMultiple: true, + filters: PipelineStatusList.map(status => ({text: status, value: status})), + onFilter: (value: PipelineStatus, record: IPipelineResponse) => record.status === value, sorter: (a: IPipelineResponse, b: IPipelineResponse) => a.status.localeCompare(b.status) }, { title: 'Containers', dataIndex: 'containers', key: 'containers', + isSearchable: true, sorter: (a: IPipelineResponse, b: IPipelineResponse) => a.containers - b.containers }, { title: 'Datanodes', dataIndex: 'datanodes', key: 'datanodes', + isSearchable: true, render: (datanodes: string[]) =>
{datanodes.map(datanode =>
{datanode}
)}
}, { title: 'Leader', dataIndex: 'leaderNode', key: 'leaderNode', + isSearchable: true, sorter: (a: IPipelineResponse, b: IPipelineResponse) => a.leaderNode.localeCompare(b.leaderNode) }, { @@ -128,6 +138,7 @@ const COLUMNS = [ title: 'No. of Elections', dataIndex: 'leaderElections', key: 'leaderElections', + isSearchable: true, sorter: (a: IPipelineResponse, b: IPipelineResponse) => a.leaderElections - b.leaderElections } ]; @@ -205,7 +216,22 @@ export class Pipelines extends React.Component, IPipeline
-
+
((filtered, column) => { + if (column.isSearchable) { + const newColumn = { + ...column, + ...new ColumnSearch(column).getColumnSearchProps(column.dataIndex) + }; + filtered.push(newColumn); + } else { + filtered.push(column); + } + + return filtered; + }, [])} + loading={activeLoading} pagination={paginationConfig} rowKey='pipelineId'/> From f4615f5843c3682e2a45c8ee09ca22f8ff63c13d Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Mon, 20 Jul 2020 22:02:55 +0200 Subject: [PATCH 042/165] HDDS-3987. Encrypted bucket creation failed with INVALID_REQUEST Encryption cannot be set for bucket links (#1221) --- .../dist/src/main/compose/ozonesecure/test.sh | 4 ++ hadoop-ozone/dist/src/main/compose/testlib.sh | 2 +- .../src/main/smoketest/ozone-lib/shell.robot | 5 +++ .../security/bucket-encryption.robot | 40 +++++++++++++++++++ .../request/bucket/OMBucketCreateRequest.java | 4 +- 5 files changed, 52 insertions(+), 3 deletions(-) create mode 100644 hadoop-ozone/dist/src/main/smoketest/security/bucket-encryption.robot diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh index ce50fa02fc0e..84de2a95a4ff 100755 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh @@ -23,8 +23,12 @@ source "$COMPOSE_DIR/../testlib.sh" export SECURITY_ENABLED=true +: ${OZONE_BUCKET_KEY_NAME:=key1} + start_docker_env +execute_command_in_container kms hadoop key create ${OZONE_BUCKET_KEY_NAME} + execute_robot_test scm kinit.robot execute_robot_test scm basic diff --git a/hadoop-ozone/dist/src/main/compose/testlib.sh b/hadoop-ozone/dist/src/main/compose/testlib.sh index 56c35c186243..5a0563308364 100755 --- a/hadoop-ozone/dist/src/main/compose/testlib.sh +++ b/hadoop-ozone/dist/src/main/compose/testlib.sh @@ -114,7 +114,7 @@ execute_robot_test(){ OUTPUT_PATH="$RESULT_DIR_INSIDE/${OUTPUT_FILE}" # shellcheck disable=SC2068 docker-compose exec -T "$CONTAINER" mkdir -p "$RESULT_DIR_INSIDE" \ - && docker-compose exec -T "$CONTAINER" robot -v OM_SERVICE_ID:"${OM_SERVICE_ID}" -v SECURITY_ENABLED:"${SECURITY_ENABLED}" -v OM_HA_PARAM:"${OM_HA_PARAM}" ${ARGUMENTS[@]} --log NONE -N "$TEST_NAME" --report NONE "${OZONE_ROBOT_OPTS[@]}" --output "$OUTPUT_PATH" "$SMOKETEST_DIR_INSIDE/$TEST" + && docker-compose exec -T "$CONTAINER" robot -v OM_SERVICE_ID:"${OM_SERVICE_ID}" -v SECURITY_ENABLED:"${SECURITY_ENABLED}" -v OM_HA_PARAM:"${OM_HA_PARAM}" -v KEY_NAME:"${OZONE_BUCKET_KEY_NAME}" ${ARGUMENTS[@]} --log NONE -N "$TEST_NAME" --report NONE "${OZONE_ROBOT_OPTS[@]}" --output "$OUTPUT_PATH" "$SMOKETEST_DIR_INSIDE/$TEST" local -i rc=$? FULL_CONTAINER_NAME=$(docker-compose ps | grep "_${CONTAINER}_" | head -n 1 | awk '{print $1}') diff --git a/hadoop-ozone/dist/src/main/smoketest/ozone-lib/shell.robot b/hadoop-ozone/dist/src/main/smoketest/ozone-lib/shell.robot index 2e56ae40eeb5..9afc6df2eabb 100644 --- a/hadoop-ozone/dist/src/main/smoketest/ozone-lib/shell.robot +++ b/hadoop-ozone/dist/src/main/smoketest/ozone-lib/shell.robot @@ -46,3 +46,8 @@ Verify ACL [arguments] ${object_type} ${object} ${type} ${name} ${acls} ${actual_acls} = Execute ozone sh ${object_type} getacl ${object} | jq -r '.[] | select(.type == "${type}") | select(.name == "${name}") | .aclList[]' | xargs Should Be Equal ${acls} ${actual_acls} + +Create Random Volume + ${random} = Generate Random String 5 [LOWER] + Execute ozone sh volume create o3://${OM_SERVICE_ID}/vol-${random} + [return] vol-${random} diff --git a/hadoop-ozone/dist/src/main/smoketest/security/bucket-encryption.robot b/hadoop-ozone/dist/src/main/smoketest/security/bucket-encryption.robot new file mode 100644 index 000000000000..e1f96b190f4c --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/security/bucket-encryption.robot @@ -0,0 +1,40 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Documentation Test for bucket encryption +Library BuiltIn +Library String +Resource ../commonlib.robot +Resource ../lib/os.robot +Resource ../ozone-lib/shell.robot +Test Setup Setup Test +Test Timeout 5 minutes + +*** Variables *** +${KEY_NAME} key1 +${VOLUME} + +*** Keywords *** +Setup Test + ${volume} = Create Random Volume + Set Suite Variable ${VOLUME} ${volume} + + +*** Test Cases *** +Create Encrypted Bucket + ${output} = Execute ozone sh bucket create -k ${KEY_NAME} o3://${OM_SERVICE_ID}/${VOLUME}/encrypted-bucket + Should Not Contain ${output} INVALID_REQUEST + Bucket Exists o3://${OM_SERVICE_ID}/${VOLUME}/encrypted-bucket diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java index 71d5458c84e4..7c60f6180817 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java @@ -115,8 +115,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { newBucketInfo.setBeinfo(getBeinfo(kmsProvider, bucketInfo)); } - boolean hasSourceVolume = bucketInfo.getSourceVolume() != null; - boolean hasSourceBucket = bucketInfo.getSourceBucket() != null; + boolean hasSourceVolume = bucketInfo.hasSourceVolume(); + boolean hasSourceBucket = bucketInfo.hasSourceBucket(); if (hasSourceBucket != hasSourceVolume) { throw new OMException("Both source volume and source bucket are " + From d08ee41e6e7c1ddfe2a346238624f774be6210d2 Mon Sep 17 00:00:00 2001 From: Siyao Meng <50227127+smengcl@users.noreply.github.com> Date: Mon, 20 Jul 2020 20:11:23 -0700 Subject: [PATCH 043/165] HDDS-3982. Disable moveToTrash in o3fs and ofs temporarily (#1215) --- hadoop-hdds/docs/content/design/ofs.md | 4 ++ hadoop-hdds/docs/content/design/trash.md | 7 +++- hadoop-hdds/docs/content/interface/OzoneFS.md | 3 ++ .../hadoop/fs/ozone/TestOzoneFileSystem.java | 38 +++++++++++++++++++ .../fs/ozone/TestRootedOzoneFileSystem.java | 37 ++++++++++++++++++ .../hadoop/ozone/shell/TestOzoneShellHA.java | 2 + .../hadoop/fs/ozone/BasicOzoneFileSystem.java | 29 ++++++++++++++ .../fs/ozone/BasicRootedOzoneFileSystem.java | 29 ++++++++++++++ 8 files changed, 148 insertions(+), 1 deletion(-) diff --git a/hadoop-hdds/docs/content/design/ofs.md b/hadoop-hdds/docs/content/design/ofs.md index 9a2352e6edf8..7a39665f5280 100644 --- a/hadoop-hdds/docs/content/design/ofs.md +++ b/hadoop-hdds/docs/content/design/ofs.md @@ -155,6 +155,10 @@ This feature wouldn't degrade server performance as the loop is on the client. Think it as a client is issuing multiple requests to the server to get all the information. +# Special note + +Trash is disabled even if `fs.trash.interval` is set on purpose. (HDDS-3982) + # Link Design doc is uploaded to the JIRA HDDS-2665: diff --git a/hadoop-hdds/docs/content/design/trash.md b/hadoop-hdds/docs/content/design/trash.md index 78e077a31a14..b936aaecfb45 100644 --- a/hadoop-hdds/docs/content/design/trash.md +++ b/hadoop-hdds/docs/content/design/trash.md @@ -22,4 +22,9 @@ author: Matthew Sharp The design doc is uploaded to the JIRA: -https://issues.apache.org/jira/secure/attachment/12985273/Ozone_Trash_Feature.docx \ No newline at end of file +https://issues.apache.org/jira/secure/attachment/12985273/Ozone_Trash_Feature.docx + +## Special note + +Trash is disabled for both o3fs and ofs even if `fs.trash.interval` is set +on purpose. (HDDS-3982) diff --git a/hadoop-hdds/docs/content/interface/OzoneFS.md b/hadoop-hdds/docs/content/interface/OzoneFS.md index 98bf2f9480e0..7aebe4f9a47b 100644 --- a/hadoop-hdds/docs/content/interface/OzoneFS.md +++ b/hadoop-hdds/docs/content/interface/OzoneFS.md @@ -165,3 +165,6 @@ hdfs dfs -put /etc/hosts /volume1/bucket1/test For more usage, see: https://issues.apache.org/jira/secure/attachment/12987636/Design%20ofs%20v1.pdf +## Special note + +Trash is disabled even if `fs.trash.interval` is set on purpose. (HDDS-3982) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java index 700506a5484c..ad1705a8b9c6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java @@ -33,6 +33,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.Trash; import org.apache.hadoop.fs.contract.ContractTestUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.MiniOzoneCluster; @@ -47,6 +48,7 @@ import org.apache.commons.io.IOUtils; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; import static org.apache.hadoop.fs.FileSystem.TRASH_PREFIX; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; @@ -86,6 +88,7 @@ public class TestOzoneFileSystem { private String volumeName; private String bucketName; private int rootItemCount; + private Trash trash; @Test(timeout = 300_000) public void testCreateFileShouldCheckExistenceOfDirWithSameName() @@ -167,6 +170,8 @@ public void testFileSystem() throws Exception { testOzoneFsServiceLoader(); o3fs = (OzoneFileSystem) fs; + testRenameToTrashDisabled(); + testGetTrashRoots(); testGetTrashRoot(); testGetDirectoryModificationTime(); @@ -197,6 +202,7 @@ public void tearDown() { private void setupOzoneFileSystem() throws IOException, TimeoutException, InterruptedException { OzoneConfiguration conf = new OzoneConfiguration(); + conf.setInt(FS_TRASH_INTERVAL_KEY, 1); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(3) .build(); @@ -215,6 +221,7 @@ private void setupOzoneFileSystem() // Set the number of keys to be processed during batch operate. conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5); fs = FileSystem.get(conf); + trash = new Trash(conf); } private void testOzoneFsServiceLoader() throws IOException { @@ -617,4 +624,35 @@ public void testGetTrashRoots() throws IOException { // Clean up o3fs.delete(trashRoot, true); } + + /** + * Check that no files are actually moved to trash since it is disabled by + * fs.rename(src, dst, options). + */ + public void testRenameToTrashDisabled() throws IOException { + // Create a file + String testKeyName = "testKey1"; + Path path = new Path(OZONE_URI_DELIMITER, testKeyName); + try (FSDataOutputStream stream = fs.create(path)) { + stream.write(1); + } + + // Call moveToTrash. We can't call protected fs.rename() directly + trash.moveToTrash(path); + + // Construct paths + String username = UserGroupInformation.getCurrentUser().getShortUserName(); + Path trashRoot = new Path(OZONE_URI_DELIMITER, TRASH_PREFIX); + Path userTrash = new Path(trashRoot, username); + Path userTrashCurrent = new Path(userTrash, "Current"); + Path trashPath = new Path(userTrashCurrent, testKeyName); + + // Trash Current directory should still have been created. + Assert.assertTrue(o3fs.exists(userTrashCurrent)); + // Check under trash, the key should be deleted instead + Assert.assertFalse(o3fs.exists(trashPath)); + + // Cleanup + o3fs.delete(trashRoot, true); + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java index 8c71c61f389d..3aec3e822bc5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java @@ -26,6 +26,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException; +import org.apache.hadoop.fs.Trash; import org.apache.hadoop.fs.contract.ContractTestUtils; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -64,6 +65,7 @@ import java.util.TreeSet; import java.util.stream.Collectors; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; import static org.apache.hadoop.fs.FileSystem.TRASH_PREFIX; import static org.apache.hadoop.fs.ozone.Constants.LISTING_PAGE_SIZE; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; @@ -87,6 +89,7 @@ public class TestRootedOzoneFileSystem { private RootedOzoneFileSystem ofs; private ObjectStore objectStore; private static BasicRootedOzoneClientAdapterImpl adapter; + private Trash trash; private String volumeName; private Path volumePath; @@ -98,6 +101,7 @@ public class TestRootedOzoneFileSystem { @Before public void init() throws Exception { conf = new OzoneConfiguration(); + conf.setInt(FS_TRASH_INTERVAL_KEY, 1); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(3) .build(); @@ -122,6 +126,7 @@ public void init() throws Exception { // hence this workaround. conf.set("fs.ofs.impl", "org.apache.hadoop.fs.ozone.RootedOzoneFileSystem"); fs = FileSystem.get(conf); + trash = new Trash(conf); ofs = (RootedOzoneFileSystem) fs; adapter = (BasicRootedOzoneClientAdapterImpl) ofs.getAdapter(); } @@ -999,4 +1004,36 @@ public void testGetTrashRoots() throws IOException { Assert.assertTrue(volume1.setOwner(prevOwner)); } + /** + * Check that no files are actually moved to trash since it is disabled by + * fs.rename(src, dst, options). + */ + @Test + public void testRenameToTrashDisabled() throws IOException { + // Create a file + String testKeyName = "testKey1"; + Path path = new Path(bucketPath, testKeyName); + try (FSDataOutputStream stream = fs.create(path)) { + stream.write(1); + } + + // Call moveToTrash. We can't call protected fs.rename() directly + trash.moveToTrash(path); + + // Construct paths + String username = UserGroupInformation.getCurrentUser().getShortUserName(); + Path trashRoot = new Path(bucketPath, TRASH_PREFIX); + Path userTrash = new Path(trashRoot, username); + Path userTrashCurrent = new Path(userTrash, "Current"); + Path trashPath = new Path(userTrashCurrent, testKeyName); + + // Trash Current directory should still have been created. + Assert.assertTrue(ofs.exists(userTrashCurrent)); + // Check under trash, the key should be deleted instead + Assert.assertFalse(ofs.exists(trashPath)); + + // Cleanup + ofs.delete(trashRoot, true); + } + } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java index e340d40b6f55..17baa06ce005 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java @@ -38,6 +38,7 @@ import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; +import org.junit.Ignore; import org.junit.Rule; import org.junit.Test; import org.junit.rules.Timeout; @@ -468,6 +469,7 @@ private OzoneConfiguration getClientConfForOFS( } @Test + @Ignore("HDDS-3982. Disable moveToTrash in o3fs and ofs temporarily") public void testDeleteToTrashOrSkipTrash() throws Exception { final String hostPrefix = OZONE_OFS_URI_SCHEME + "://" + omServiceId; OzoneConfiguration clientConf = getClientConfForOFS(hostPrefix, conf); diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java index 25dea3de9aa3..778277f846d4 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java @@ -29,6 +29,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocatedFileStatus; +import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException; import org.apache.hadoop.fs.permission.FsPermission; @@ -397,6 +398,34 @@ public boolean rename(Path src, Path dst) throws IOException { return result; } + /** + * Intercept rename to trash calls from TrashPolicyDefault, + * convert them to delete calls instead. + */ + @Deprecated + protected void rename(final Path src, final Path dst, + final Rename... options) throws IOException { + boolean hasMoveToTrash = false; + if (options != null) { + for (Rename option : options) { + if (option == Rename.TO_TRASH) { + hasMoveToTrash = true; + break; + } + } + } + if (!hasMoveToTrash) { + // if doesn't have TO_TRASH option, just pass the call to super + super.rename(src, dst, options); + } else { + // intercept when TO_TRASH is found + LOG.info("Move to trash is disabled for o3fs, deleting instead: {}. " + + "Files or directories will NOT be retained in trash. " + + "Ignore the following TrashPolicyDefault message, if any.", src); + delete(src, true); + } + } + private class DeleteIterator extends OzoneListingIterator { private boolean recursive; diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java index 7c284281c80b..3bea2bba0454 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java @@ -29,6 +29,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocatedFileStatus; +import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException; import org.apache.hadoop.fs.permission.FsPermission; @@ -372,6 +373,34 @@ public boolean rename(Path src, Path dst) throws IOException { return result; } + /** + * Intercept rename to trash calls from TrashPolicyDefault, + * convert them to delete calls instead. + */ + @Deprecated + protected void rename(final Path src, final Path dst, + final Options.Rename... options) throws IOException { + boolean hasMoveToTrash = false; + if (options != null) { + for (Options.Rename option : options) { + if (option == Options.Rename.TO_TRASH) { + hasMoveToTrash = true; + break; + } + } + } + if (!hasMoveToTrash) { + // if doesn't have TO_TRASH option, just pass the call to super + super.rename(src, dst, options); + } else { + // intercept when TO_TRASH is found + LOG.info("Move to trash is disabled for ofs, deleting instead: {}. " + + "Files or directories will NOT be retained in trash. " + + "Ignore the following TrashPolicyDefault message, if any.", src); + delete(src, true); + } + } + private class DeleteIterator extends OzoneListingIterator { final private boolean recursive; private final OzoneBucket bucket; From 548caabe9c03a946dea76e026b93f1d039ecabdb Mon Sep 17 00:00:00 2001 From: Lokesh Jain Date: Tue, 21 Jul 2020 14:47:46 +0530 Subject: [PATCH 044/165] Update ratis to 1.0.0 (#1222) --- .../server/ratis/ContainerStateMachine.java | 12 ++++++------ pom.xml | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java index b1c8370a48d6..840d87ca6d8d 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java @@ -103,7 +103,7 @@ * processed in 2 phases. The 2 phases are divided in * {@link #startTransaction(RaftClientRequest)}, in the first phase the user * data is written directly into the state machine via - * {@link #writeStateMachineData} and in the second phase the + * {@link #write} and in the second phase the * transaction is committed via {@link #applyTransaction(TransactionContext)} * * For the requests with no stateMachine data, the transaction is directly @@ -115,7 +115,7 @@ * the write chunk operation will fail otherwise as the container still hasn't * been created. Hence the create container operation has been split in the * {@link #startTransaction(RaftClientRequest)}, this will help in synchronizing - * the calls in {@link #writeStateMachineData} + * the calls in {@link #write} * * 2) Write chunk commit operation is executed after write chunk state machine * operation. This will ensure that commit operation is sync'd with the state @@ -517,7 +517,7 @@ private ExecutorService getChunkExecutor(WriteChunkRequestProto req) { * and also with applyTransaction. */ @Override - public CompletableFuture writeStateMachineData(LogEntryProto entry) { + public CompletableFuture write(LogEntryProto entry) { try { metrics.incNumWriteStateMachineOps(); long writeStateMachineStartTime = Time.monotonicNowNanos(); @@ -618,7 +618,7 @@ private ByteString readStateMachineData( * @return Combined future of all writeChunks till the log index given. */ @Override - public CompletableFuture flushStateMachineData(long index) { + public CompletableFuture flush(long index) { List> futureList = writeChunkFutureMap.entrySet().stream().filter(x -> x.getKey() <= index) .map(Map.Entry::getValue).collect(Collectors.toList()); @@ -632,7 +632,7 @@ public CompletableFuture flushStateMachineData(long index) { * evicted. */ @Override - public CompletableFuture readStateMachineData( + public CompletableFuture read( LogEntryProto entry) { StateMachineLogEntryProto smLogEntryProto = entry.getStateMachineLogEntry(); metrics.incNumReadStateMachineOps(); @@ -833,7 +833,7 @@ private static CompletableFuture completeExceptionally(Exception e) { } @Override - public CompletableFuture truncateStateMachineData(long index) { + public CompletableFuture truncate(long index) { stateMachineDataCache.removeIf(k -> k >= index); return CompletableFuture.completedFuture(null); } diff --git a/pom.xml b/pom.xml index 40f2f58634a6..bcdaf6972325 100644 --- a/pom.xml +++ b/pom.xml @@ -79,7 +79,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs ${ozone.version} - 0.6.0-6ab75ae-SNAPSHOT + 1.0.0 0.4.0 From 3c29ad7c1a0346cae4d32c39230f0c3480f19930 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Tue, 21 Jul 2020 14:07:36 +0200 Subject: [PATCH 045/165] HDDS-3813. Upgrade Ratis third-party, too (#1229) --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index bcdaf6972325..6a9f864f5986 100644 --- a/pom.xml +++ b/pom.xml @@ -82,7 +82,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.0.0 - 0.4.0 + 0.5.0 apache.snapshots.https Apache Development Snapshot Repository From 7ac6724ecbf843f406c1f211f9407f046ca3ba0e Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Tue, 21 Jul 2020 20:22:37 +0530 Subject: [PATCH 046/165] HDDS-3986. Frequent failure in TestCommitWatcher#testReleaseBuffersOnException (#1220) --- .../ozone/client/rpc/TestCommitWatcher.java | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java index c3eca6af0031..007fbdc930e6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java @@ -46,6 +46,7 @@ import org.apache.ratis.protocol.AlreadyClosedException; import org.apache.ratis.protocol.NotReplicatedException; import org.apache.ratis.protocol.RaftRetryFailureException; +import org.apache.ratis.protocol.TimeoutIOException; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -121,8 +122,8 @@ public void init() throws Exception { RatisClientConfig ratisClientConfig = conf.getObject(RatisClientConfig.class); - ratisClientConfig.setWriteRequestTimeoutInMs(TimeUnit.SECONDS.toMillis(30)); - ratisClientConfig.setWatchRequestTimeoutInMs(TimeUnit.SECONDS.toMillis(30)); + ratisClientConfig.setWriteRequestTimeoutInMs(TimeUnit.SECONDS.toMillis(10)); + ratisClientConfig.setWatchRequestTimeoutInMs(TimeUnit.SECONDS.toMillis(10)); conf.setFromObject(ratisClientConfig); conf.set(OzoneConfigKeys.OZONE_CLIENT_CHECKSUM_TYPE, "NONE"); @@ -312,14 +313,17 @@ public void testReleaseBuffersOnException() throws Exception { watcher.watchForCommit(replies.get(1).getLogIndex() + 100); Assert.fail("Expected exception not thrown"); } catch(IOException ioe) { - // with retry count set to lower limit and a lower watch request + // with retry count set to noRetry and a lower watch request // timeout, watch request will eventually - // fail with RaftRetryFailure exception from ratis client or the client + // fail with TimeoutIOException from ratis client or the client // can itself get AlreadyClosedException from the Ratis Server + // and the write may fail with RaftRetryFailureException Throwable t = HddsClientUtils.checkForException(ioe); - Assert.assertTrue(t instanceof RaftRetryFailureException || - t instanceof AlreadyClosedException || - t instanceof NotReplicatedException); + Assert.assertTrue("Unexpected exception: " + t.getClass(), + t instanceof RaftRetryFailureException || + t instanceof TimeoutIOException || + t instanceof AlreadyClosedException || + t instanceof NotReplicatedException); } if (ratisClient.getReplicatedMinCommitIndex() < replies.get(1) .getLogIndex()) { From 5ca41be59176c72970e7a5a2933e38a51bfef8f6 Mon Sep 17 00:00:00 2001 From: HuangTao Date: Wed, 22 Jul 2020 06:46:25 +0800 Subject: [PATCH 047/165] HDDS-3989. Display revision and build date of DN in recon UI (#1226) --- .../hadoop/hdds/protocol/DatanodeDetails.java | 88 ++++++++++++++++++- .../hadoop/ozone/HddsDatanodeService.java | 3 + .../src/main/proto/hdds.proto | 2 + .../src/main/proto/proto.lock | 12 ++- .../hadoop/ozone/recon/api/NodeEndpoint.java | 2 + .../recon/api/types/DatanodeMetadata.java | 28 ++++++ .../webapps/recon/ozone-recon-web/api/db.json | 48 +++++++--- .../src/views/datanodes/datanodes.tsx | 26 +++++- 8 files changed, 193 insertions(+), 16 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java index 96f19a6b87a8..a3db139b96ce 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java @@ -51,6 +51,8 @@ public class DatanodeDetails extends NodeImpl implements private String certSerialId; private String version; private long setupTime; + private String revision; + private String buildDate; /** * Constructs DatanodeDetails instance. DatanodeDetails.Builder is used @@ -63,11 +65,13 @@ public class DatanodeDetails extends NodeImpl implements * @param certSerialId serial id from SCM issued certificate. * @param version DataNode's version * @param setupTime the setup time of DataNode + * @param revision DataNodes's revision + * @param buildDate DataNodes's build timestamp */ @SuppressWarnings("parameternumber") private DatanodeDetails(UUID uuid, String ipAddress, String hostName, String networkLocation, List ports, String certSerialId, - String version, long setupTime) { + String version, long setupTime, String revision, String buildDate) { super(hostName, networkLocation, NetConstants.NODE_COST_DEFAULT); this.uuid = uuid; this.ipAddress = ipAddress; @@ -76,6 +80,8 @@ private DatanodeDetails(UUID uuid, String ipAddress, String hostName, this.certSerialId = certSerialId; this.version = version; this.setupTime = setupTime; + this.revision = revision; + this.buildDate = buildDate; } public DatanodeDetails(DatanodeDetails datanodeDetails) { @@ -89,6 +95,8 @@ public DatanodeDetails(DatanodeDetails datanodeDetails) { this.setParent(datanodeDetails.getParent()); this.version = datanodeDetails.version; this.setupTime = datanodeDetails.setupTime; + this.revision = datanodeDetails.revision; + this.buildDate = datanodeDetails.buildDate; } /** @@ -223,6 +231,12 @@ public static DatanodeDetails getFromProtoBuf( if (datanodeDetailsProto.hasSetupTime()) { builder.setSetupTime(datanodeDetailsProto.getSetupTime()); } + if (datanodeDetailsProto.hasRevision()) { + builder.setRevision(datanodeDetailsProto.getRevision()); + } + if (datanodeDetailsProto.hasBuildDate()) { + builder.setBuildDate(datanodeDetailsProto.getBuildDate()); + } return builder.build(); } @@ -271,6 +285,13 @@ public HddsProtos.DatanodeDetailsProto getProtoBufMessage() { builder.setSetupTime(getSetupTime()); + if (!Strings.isNullOrEmpty(getRevision())) { + builder.setRevision(getRevision()); + } + if (!Strings.isNullOrEmpty(getBuildDate())) { + builder.setBuildDate(getBuildDate()); + } + return builder.build(); } @@ -325,6 +346,8 @@ public static final class Builder { private String certSerialId; private String version; private long setupTime; + private String revision; + private String buildDate; /** * Default private constructor. To create Builder instance use @@ -425,6 +448,30 @@ public Builder setVersion(String ver) { return this; } + /** + * Sets the DataNode revision. + * + * @param rev the revision of DataNode. + * + * @return DatanodeDetails.Builder + */ + public Builder setRevision(String rev) { + this.revision = rev; + return this; + } + + /** + * Sets the DataNode build date. + * + * @param date the build date of DataNode. + * + * @return DatanodeDetails.Builder + */ + public Builder setBuildDate(String date) { + this.buildDate = date; + return this; + } + /** * Sets the DataNode setup time. * @@ -448,7 +495,8 @@ public DatanodeDetails build() { networkLocation = NetConstants.DEFAULT_RACK; } DatanodeDetails dn = new DatanodeDetails(id, ipAddress, hostName, - networkLocation, ports, certSerialId, version, setupTime); + networkLocation, ports, certSerialId, + version, setupTime, revision, buildDate); if (networkName != null) { dn.setNetworkName(networkName); } @@ -590,4 +638,40 @@ public long getSetupTime() { public void setSetupTime(long setupTime) { this.setupTime = setupTime; } + + /** + * Returns the DataNode revision. + * + * @return DataNode revision + */ + public String getRevision() { + return revision; + } + + /** + * Set DataNode revision. + * + * @param rev DataNode revision + */ + public void setRevision(String rev) { + this.revision = rev; + } + + /** + * Returns the DataNode build date. + * + * @return DataNode build date + */ + public String getBuildDate() { + return buildDate; + } + + /** + * Set DataNode build date. + * + * @param date DataNode build date + */ + public void setBuildDate(String date) { + this.buildDate = date; + } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java index a4ff67ed86fc..ac6fba45066d 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java @@ -208,6 +208,9 @@ public void start() { datanodeDetails.setVersion( HddsVersionInfo.HDDS_VERSION_INFO.getVersion()); datanodeDetails.setSetupTime(Time.now()); + datanodeDetails.setRevision( + HddsVersionInfo.HDDS_VERSION_INFO.getRevision()); + datanodeDetails.setBuildDate(HddsVersionInfo.HDDS_VERSION_INFO.getDate()); TracingUtil.initTracing( "HddsDatanodeService." + datanodeDetails.getUuidString() .substring(0, 8), conf); diff --git a/hadoop-hdds/interface-client/src/main/proto/hdds.proto b/hadoop-hdds/interface-client/src/main/proto/hdds.proto index 243e8ecaced7..3eeb3a321927 100644 --- a/hadoop-hdds/interface-client/src/main/proto/hdds.proto +++ b/hadoop-hdds/interface-client/src/main/proto/hdds.proto @@ -45,6 +45,8 @@ message DatanodeDetailsProto { optional string networkLocation = 7; // Network topology location optional string version = 8; // Datanode version optional int64 setupTime = 9; + optional string revision = 10; + optional string buildDate = 11; // TODO(runzhiwang): when uuid is gone, specify 1 as the index of uuid128 and mark as required optional UUID uuid128 = 100; // UUID with 128 bits assigned to the Datanode. } diff --git a/hadoop-hdds/interface-client/src/main/proto/proto.lock b/hadoop-hdds/interface-client/src/main/proto/proto.lock index b27896c655e3..bc8828787d1c 100644 --- a/hadoop-hdds/interface-client/src/main/proto/proto.lock +++ b/hadoop-hdds/interface-client/src/main/proto/proto.lock @@ -1540,6 +1540,16 @@ "name": "setupTime", "type": "int64" }, + { + "id": 10, + "name": "revision", + "type": "string" + }, + { + "id": 11, + "name": "buildDate", + "type": "string" + }, { "id": 100, "name": "uuid128", @@ -1935,4 +1945,4 @@ } } ] -} +} \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java index 42832debfee1..bd022c4f1da2 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java @@ -123,6 +123,8 @@ public Response getDatanodes() { .withUUid(datanode.getUuidString()) .withVersion(datanode.getVersion()) .withSetupTime(datanode.getSetupTime()) + .withRevision(datanode.getRevision()) + .withBuildDate(datanode.getBuildDate()) .build()); }); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java index 542654e96e2e..f75ea3233f37 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java @@ -61,6 +61,12 @@ public final class DatanodeMetadata { @XmlElement(name = "setupTime") private long setupTime; + @XmlElement(name = "revision") + private String revision; + + @XmlElement(name = "buildDate") + private String buildDate; + private DatanodeMetadata(Builder builder) { this.hostname = builder.hostname; this.uuid = builder.uuid; @@ -72,6 +78,8 @@ private DatanodeMetadata(Builder builder) { this.leaderCount = builder.leaderCount; this.version = builder.version; this.setupTime = builder.setupTime; + this.revision = builder.revision; + this.buildDate = builder.buildDate; } public String getHostname() { @@ -114,6 +122,14 @@ public long getSetupTime() { return setupTime; } + public String getRevision() { + return revision; + } + + public String getBuildDate() { + return buildDate; + } + /** * Returns new builder class that builds a DatanodeMetadata. * @@ -138,6 +154,8 @@ public static final class Builder { private int leaderCount; private String version; private long setupTime; + private String revision; + private String buildDate; public Builder() { this.containers = 0; @@ -195,6 +213,16 @@ public Builder withSetupTime(long setupTime) { return this; } + public Builder withRevision(String revision) { + this.revision = revision; + return this; + } + + public Builder withBuildDate(String buildDate) { + this.buildDate = buildDate; + return this; + } + /** * Constructs DatanodeMetadata. * diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json index d8d6eac55a9f..8d61333b087b 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json @@ -43,7 +43,9 @@ "containers": 80, "leaderCount": 2, "version": "0.6.0-SNAPSHOT", - "setupTime": 1574728775759 + "setupTime": 1574728775759, + "revision": "caf471111cdb9168ec013f4526bb997aa513e079", + "buildDate": "2020-07-20T15:45Z" }, { "hostname": "localhost2.storage.enterprise.com", @@ -72,7 +74,9 @@ "containers": 8192, "leaderCount": 1, "version": "0.6.0-SNAPSHOT", - "setupTime": 1574724805059 + "setupTime": 1574724805059, + "revision": "caf471111cdb9168ec013f4526bb997aa513e079", + "buildDate": "2020-07-20T15:45Z" }, { "hostname": "localhost3.storage.enterprise.com", @@ -107,7 +111,9 @@ "containers": 43, "leaderCount": 2, "version": "0.6.0-SNAPSHOT", - "setupTime": 1343544679543 + "setupTime": 1343544679543, + "revision": "aaf470000cdb9168ec013f4526bb997aa513e079", + "buildDate": "2020-07-19T13:45Z" }, { "hostname": "localhost4.storage.enterprise.com", @@ -123,7 +129,9 @@ "containers": 0, "leaderCount": 0, "version": "0.6.0-SNAPSHOT", - "setupTime": 1074724802059 + "setupTime": 1074724802059, + "revision": "aaf470000cdb9168ec013f4526bb997aa513e079", + "buildDate": "2020-07-19T13:45Z" }, { "hostname": "localhost5.storage.enterprise.com", @@ -152,7 +160,9 @@ "containers": 643, "leaderCount": 2, "version": "0.6.0-SNAPSHOT", - "setupTime": 1574724816029 + "setupTime": 1574724816029, + "revision": "aaf470000cdb9168ec013f4526bb997aa513e079", + "buildDate": "2020-07-19T13:45Z" }, { "hostname": "localhost6.storage.enterprise.com", @@ -181,7 +191,9 @@ "containers": 5, "leaderCount": 1, "version": "0.6.0-SNAPSHOT", - "setupTime": 1574724802059 + "setupTime": 1574724802059, + "revision": "aaf470000cdb9168ec013f4526bb997aa513e079", + "buildDate": "2020-07-19T13:45Z" }, { "hostname": "localhost7.storage.enterprise.com", @@ -216,7 +228,9 @@ "containers": 64, "leaderCount": 2, "version": "0.6.0-SNAPSHOT", - "setupTime": 1574724676009 + "setupTime": 1574724676009, + "revision": "aaf470000cdb9168ec013f4526bb997aa513e079", + "buildDate": "2020-07-19T13:45Z" }, { "hostname": "localhost8.storage.enterprise.com", @@ -245,7 +259,9 @@ "containers": 21, "leaderCount": 1, "version": "0.6.0-SNAPSHOT", - "setupTime": 1574724276050 + "setupTime": 1574724276050, + "revision": "caf471111cdb9168ec013f4526bb997aa513e079", + "buildDate": "2020-07-20T15:45Z" }, { "hostname": "localhost9.storage.enterprise.com", @@ -274,7 +290,9 @@ "containers": 897, "leaderCount": 1, "version": "0.6.0-SNAPSHOT", - "setupTime": 1574724573011 + "setupTime": 1574724573011, + "revision": "caf471111cdb9168ec013f4526bb997aa513e079", + "buildDate": "2020-07-20T15:45Z" }, { "hostname": "localhost10.storage.enterprise.com", @@ -309,7 +327,9 @@ "containers": 6754, "leaderCount": 2, "version": "0.6.0-SNAPSHOT", - "setupTime": 1574723756059 + "setupTime": 1574723756059, + "revision": "caf471111cdb9168ec013f4526bb997aa513e079", + "buildDate": "2020-07-20T15:45Z" }, { "hostname": "localhost11.storage.enterprise.com", @@ -338,7 +358,9 @@ "containers": 78, "leaderCount": 2, "version": "0.6.0-SNAPSHOT", - "setupTime": 1474724705783 + "setupTime": 1474724705783, + "revision": "ace991111cdb9168ec013f4526bb997aa513e079", + "buildDate": "2020-07-20T10:45Z" }, { "hostname": "localhost12.storage.enterprise.com", @@ -367,7 +389,9 @@ "containers": 543, "leaderCount": 1, "version": "0.6.0-SNAPSHOT", - "setupTime": 1574724706232 + "setupTime": 1574724706232, + "revision": "ace991111cdb9168ec013f4526bb997aa513e079", + "buildDate": "2020-07-20T10:45Z" } ] }, diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx index ceb7b51ac05c..856ab65ecabd 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx @@ -43,6 +43,8 @@ interface IDatanodeResponse { uuid: string; version: string; setupTime: number; + revision: string; + buildDate: string; } interface IDatanodesResponse { @@ -63,6 +65,8 @@ interface IDatanode { uuid: string; version: string; setupTime: number; + revision: string; + buildDate: string; } interface IPipeline { @@ -208,6 +212,24 @@ const COLUMNS = [ render: (uptime: number) => { return uptime > 0 ? moment(uptime).format('ll LTS') : 'NA'; } + }, + { + title: 'Revision', + dataIndex: 'revision', + key: 'revision', + isVisible: false, + isSearchable: true, + sorter: (a: IDatanode, b: IDatanode) => a.revision.localeCompare(b.revision), + defaultSortOrder: 'ascend' as const + }, + { + title: 'BuildDate', + dataIndex: 'buildDate', + key: 'buildDate', + isVisible: false, + isSearchable: true, + sorter: (a: IDatanode, b: IDatanode) => a.buildDate.localeCompare(b.buildDate), + defaultSortOrder: 'ascend' as const } ]; @@ -265,7 +287,9 @@ export class Datanodes extends React.Component, IDatanode containers: datanode.containers, leaderCount: datanode.leaderCount, version: datanode.version, - setupTime: datanode.setupTime + setupTime: datanode.setupTime, + revision: datanode.revision, + buildDate: datanode.buildDate }; }); const selectedColumns: IOption[] = COLUMNS.filter(column => column.isVisible).map(column => ({ From 3240c320797fc897c1ad262c3dc44b3429bb2e39 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Elek=2C=20M=C3=A1rton?= Date: Wed, 22 Jul 2020 06:11:57 +0200 Subject: [PATCH 048/165] HDDS-3992. Remove project skeleton of in-place upgrade feature (#1225) --- hadoop-ozone/dist/pom.xml | 7 +-- hadoop-ozone/dist/src/shell/ozone/ozone | 5 -- hadoop-ozone/pom.xml | 6 -- hadoop-ozone/upgrade/pom.xml | 57 ------------------- .../apache/hadoop/ozone/upgrade/Balance.java | 38 ------------- .../apache/hadoop/ozone/upgrade/Execute.java | 37 ------------ .../hadoop/ozone/upgrade/InPlaceUpgrade.java | 45 --------------- .../org/apache/hadoop/ozone/upgrade/Plan.java | 38 ------------- .../hadoop/ozone/upgrade/package-info.java | 23 -------- 9 files changed, 1 insertion(+), 255 deletions(-) delete mode 100644 hadoop-ozone/upgrade/pom.xml delete mode 100644 hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/Balance.java delete mode 100644 hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/Execute.java delete mode 100644 hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/InPlaceUpgrade.java delete mode 100644 hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/Plan.java delete mode 100644 hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/package-info.java diff --git a/hadoop-ozone/dist/pom.xml b/hadoop-ozone/dist/pom.xml index a766c0a18023..343bc9b0ccf9 100644 --- a/hadoop-ozone/dist/pom.xml +++ b/hadoop-ozone/dist/pom.xml @@ -50,8 +50,7 @@ *.classpath hadoop-hdds-server-scm,hadoop-ozone-common,hadoop-ozone-csi,hadoop-ozone-datanode,hadoop-ozone-insight, - hadoop-ozone-ozone-manager,hadoop-ozone-recon,hadoop-ozone-s3gateway,hadoop-ozone-tools, - hadoop-ozone-upgrade + hadoop-ozone-ozone-manager,hadoop-ozone-recon,hadoop-ozone-s3gateway,hadoop-ozone-tools @@ -219,10 +218,6 @@ org.apache.hadoop hadoop-hdds-docs - - org.apache.hadoop - hadoop-ozone-upgrade - org.apache.hadoop hadoop-ozone-insight diff --git a/hadoop-ozone/dist/src/shell/ozone/ozone b/hadoop-ozone/dist/src/shell/ozone/ozone index 42e8dcaf28e1..e957f7f39b43 100755 --- a/hadoop-ozone/dist/src/shell/ozone/ozone +++ b/hadoop-ozone/dist/src/shell/ozone/ozone @@ -54,7 +54,6 @@ function hadoop_usage hadoop_add_subcommand "insight" client "tool to get runtime operation information" hadoop_add_subcommand "version" client "print the version" hadoop_add_subcommand "dtutil" client "operations related to delegation tokens" - hadoop_add_subcommand "upgrade" client "HDFS to Ozone in-place upgrade tool" hadoop_add_subcommand "admin" client "Ozone admin tool" hadoop_add_subcommand "debug" client "Ozone debug tool" @@ -214,10 +213,6 @@ function ozonecmd_case HADOOP_CLASSNAME=org.apache.hadoop.security.token.DtUtilShell OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-tools" ;; - upgrade) - HADOOP_CLASSNAME=org.apache.hadoop.ozone.upgrade.InPlaceUpgrade - OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-upgrade" - ;; admin) HADOOP_CLASSNAME=org.apache.hadoop.ozone.admin.OzoneAdmin OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-tools" diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml index 1b28ea45790e..8d7661871003 100644 --- a/hadoop-ozone/pom.xml +++ b/hadoop-ozone/pom.xml @@ -41,7 +41,6 @@ datanode s3gateway dist - upgrade csi fault-injection-test insight @@ -211,11 +210,6 @@ hadoop-ozone-recon ${ozone.version} - - org.apache.hadoop - hadoop-ozone-upgrade - ${ozone.version} - org.apache.hadoop hadoop-hdds-container-service diff --git a/hadoop-ozone/upgrade/pom.xml b/hadoop-ozone/upgrade/pom.xml deleted file mode 100644 index ed80d9cb0a88..000000000000 --- a/hadoop-ozone/upgrade/pom.xml +++ /dev/null @@ -1,57 +0,0 @@ - - - - 4.0.0 - - org.apache.hadoop - hadoop-ozone - 0.6.0-SNAPSHOT - - hadoop-ozone-upgrade - 0.6.0-SNAPSHOT - Apache Hadoop Ozone In-Place Upgrade - Apache Hadoop Ozone In-Place Upgrade - jar - - - - org.apache.hadoop - hadoop-hdds-test-utils - test - - - org.apache.hadoop - hadoop-hdds-common - - - com.github.spotbugs - spotbugs - provided - - - junit - junit - test - - - org.mockito - mockito-core - test - - - diff --git a/hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/Balance.java b/hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/Balance.java deleted file mode 100644 index 149273862a75..000000000000 --- a/hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/Balance.java +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.upgrade; - -import java.util.concurrent.Callable; - -import picocli.CommandLine.Command; - -/** - * Command to move blocks between HDFS datanodes. - */ -@Command(name = "balance", - description = "Move the HDFS blocks for a better distribution " - + "usage.") -public class Balance implements Callable { - - @Override - public Void call() throws Exception { - System.err.println("[In-Place upgrade : balance] is not yet supported."); - return null; - } - -} \ No newline at end of file diff --git a/hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/Execute.java b/hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/Execute.java deleted file mode 100644 index 0837200c1fac..000000000000 --- a/hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/Execute.java +++ /dev/null @@ -1,37 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.upgrade; - -import java.util.concurrent.Callable; - -import picocli.CommandLine.Command; - -/** - * Execute Ozone specific HDFS ballanced.. - */ -@Command(name = "execute", - description = "Start/restart upgrade from HDFS to Ozone cluster.") -public class Execute implements Callable { - - @Override - public Void call() throws Exception { - System.err.println("In-Place upgrade : execute] is not yet supported."); - return null; - } - -} \ No newline at end of file diff --git a/hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/InPlaceUpgrade.java b/hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/InPlaceUpgrade.java deleted file mode 100644 index b307f44d02e9..000000000000 --- a/hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/InPlaceUpgrade.java +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.upgrade; - -import org.apache.hadoop.hdds.cli.GenericCli; -import org.apache.hadoop.hdds.cli.HddsVersionProvider; - -import picocli.CommandLine.Command; - -/** - * Command line interface for the In-Place upgrade utility. - *

- * In-Place upgrade can convert HDFS cluster data to Ozone data without - * (or minimal) data moving. - */ -@Command(name = "ozone upgrade", - description = "Convert raw HDFS data to Ozone data without data movement.", - subcommands = { - Plan.class, - Balance.class, - Execute.class, - }, - versionProvider = HddsVersionProvider.class, - mixinStandardHelpOptions = true) -public class InPlaceUpgrade extends GenericCli { - - public static void main(String[] args) { - new InPlaceUpgrade().run(args); - } -} diff --git a/hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/Plan.java b/hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/Plan.java deleted file mode 100644 index efd6092a0b16..000000000000 --- a/hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/Plan.java +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.upgrade; - -import java.util.concurrent.Callable; - -import picocli.CommandLine.Command; - -/** - * Command to calculate statistics and estimate the upgrade. - */ -@Command(name = "plan", - description = "Plan existing HDFS block distribution and give." - + "estimation.") -public class Plan implements Callable { - - @Override - public Void call() throws Exception { - System.err.println("[In-Place upgrade : plan] is not yet supported."); - return null; - } - -} diff --git a/hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/package-info.java b/hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/package-info.java deleted file mode 100644 index b14768329cb9..000000000000 --- a/hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.upgrade; - -/** - * In-Place upgrade utility to upgrade HDDS to Ozone cluster.. - */ \ No newline at end of file From 652d94853f7fdebb1008b3194ffc7849f6858334 Mon Sep 17 00:00:00 2001 From: Sammi Chen Date: Wed, 22 Jul 2020 14:11:08 +0800 Subject: [PATCH 049/165] =?UTF-8?q?HDDS-3892.=20Datanode=20initialization?= =?UTF-8?q?=20is=20too=20slow=20when=20there=20are=20thousan=E2=80=A6=20(#?= =?UTF-8?q?1147)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../apache/hadoop/ozone/OzoneConfigKeys.java | 3 + .../src/main/resources/ozone-default.xml | 10 +++ .../common/utils/ContainerCache.java | 71 ++++++++++++++----- .../container/ozoneimpl/ContainerReader.java | 2 + .../container/ozoneimpl/OzoneContainer.java | 3 + .../container/common/TestContainerCache.java | 52 ++++++++++++++ .../ozoneimpl/TestContainerReader.java | 69 +++++++++++++++++- 7 files changed, 189 insertions(+), 21 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java index dc8b231fbbdc..d89fef989a93 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java @@ -95,6 +95,9 @@ public final class OzoneConfigKeys { public static final String OZONE_CONTAINER_CACHE_SIZE = "ozone.container.cache.size"; public static final int OZONE_CONTAINER_CACHE_DEFAULT = 1024; + public static final String OZONE_CONTAINER_CACHE_LOCK_STRIPES = + "ozone.container.cache.lock.stripes"; + public static final int OZONE_CONTAINER_CACHE_LOCK_STRIPES_DEFAULT = 1024; public static final String OZONE_SCM_BLOCK_SIZE = "ozone.scm.block.size"; diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index b474ac38c2f1..a07807b1998e 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -37,6 +37,16 @@ size of that cache. + + ozone.container.cache.lock.stripes + 1024 + PERFORMANCE, CONTAINER, STORAGE + Container DB open is an exclusive operation. We use a stripe + lock to guarantee that different threads can open different container DBs + concurrently, while for one container DB, only one thread can open it at + the same time. This setting controls the lock stripes. + + dfs.container.ipc 9859 diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java index d2d29018b32a..f4d8f43f7065 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java @@ -23,6 +23,7 @@ import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; +import com.google.common.util.concurrent.Striped; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.utils.MetadataStore; import org.apache.hadoop.hdds.utils.MetadataStoreBuilder; @@ -43,12 +44,14 @@ public final class ContainerCache extends LRUMap { private final Lock lock = new ReentrantLock(); private static ContainerCache cache; private static final float LOAD_FACTOR = 0.75f; + private final Striped rocksDBLock; /** * Constructs a cache that holds DBHandle references. */ - private ContainerCache(int maxSize, float loadFactor, boolean + private ContainerCache(int maxSize, int stripes, float loadFactor, boolean scanUntilRemovable) { super(maxSize, loadFactor, scanUntilRemovable); + rocksDBLock = Striped.lazyWeakLock(stripes); } /** @@ -63,7 +66,10 @@ public synchronized static ContainerCache getInstance( if (cache == null) { int cacheSize = conf.getInt(OzoneConfigKeys.OZONE_CONTAINER_CACHE_SIZE, OzoneConfigKeys.OZONE_CONTAINER_CACHE_DEFAULT); - cache = new ContainerCache(cacheSize, LOAD_FACTOR, true); + int stripes = conf.getInt( + OzoneConfigKeys.OZONE_CONTAINER_CACHE_LOCK_STRIPES, + OzoneConfigKeys.OZONE_CONTAINER_CACHE_LOCK_STRIPES_DEFAULT); + cache = new ContainerCache(cacheSize, stripes, LOAD_FACTOR, true); } return cache; } @@ -117,30 +123,57 @@ public ReferenceCountedDB getDB(long containerID, String containerDBType, throws IOException { Preconditions.checkState(containerID >= 0, "Container ID cannot be negative."); - lock.lock(); + ReferenceCountedDB db; + Lock containerLock = rocksDBLock.get(containerDBPath); + containerLock.lock(); try { - ReferenceCountedDB db = (ReferenceCountedDB) this.get(containerDBPath); + lock.lock(); + try { + db = (ReferenceCountedDB) this.get(containerDBPath); + if (db != null) { + db.incrementReference(); + return db; + } + } finally { + lock.unlock(); + } - if (db == null) { + try { MetadataStore metadataStore = MetadataStoreBuilder.newBuilder() - .setDbFile(new File(containerDBPath)) - .setCreateIfMissing(false) - .setConf(conf) - .setDBType(containerDBType) - .build(); + .setDbFile(new File(containerDBPath)) + .setCreateIfMissing(false) + .setConf(conf) + .setDBType(containerDBType) + .build(); db = new ReferenceCountedDB(metadataStore, containerDBPath); - this.put(containerDBPath, db); + } catch (Exception e) { + LOG.error("Error opening DB. Container:{} ContainerPath:{}", + containerID, containerDBPath, e); + throw e; + } + + lock.lock(); + try { + ReferenceCountedDB currentDB = + (ReferenceCountedDB) this.get(containerDBPath); + if (currentDB != null) { + // increment the reference before returning the object + currentDB.incrementReference(); + // clean the db created in previous step + db.cleanup(); + return currentDB; + } else { + this.put(containerDBPath, db); + // increment the reference before returning the object + db.incrementReference(); + return db; + } + } finally { + lock.unlock(); } - // increment the reference before returning the object - db.incrementReference(); - return db; - } catch (Exception e) { - LOG.error("Error opening DB. Container:{} ContainerPath:{}", - containerID, containerDBPath, e); - throw e; } finally { - lock.unlock(); + containerLock.unlock(); } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java index 1b9b3d690724..fa63cf1b862e 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java @@ -120,6 +120,7 @@ public boolean accept(File pathname) { return; } + LOG.info("Start to verify containers on volume {}", hddsVolumeRootDir); for (File scmLoc : scmDir) { File currentDir = new File(scmLoc, Storage.STORAGE_DIR_CURRENT); File[] containerTopDirs = currentDir.listFiles(); @@ -144,6 +145,7 @@ public boolean accept(File pathname) { } } } + LOG.info("Finish verifying containers on volume {}", hddsVolumeRootDir); } private void verifyContainerFile(long containerID, File containerFile) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java index 62fd5a4e85e0..abe0382d1fdd 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java @@ -163,6 +163,7 @@ private void buildContainerSet() { Iterator volumeSetIterator = volumeSet.getVolumesList() .iterator(); ArrayList volumeThreads = new ArrayList(); + long startTime = System.currentTimeMillis(); //TODO: diskchecker should be run before this, to see how disks are. // And also handle disk failure tolerance need to be added @@ -183,6 +184,8 @@ private void buildContainerSet() { Thread.currentThread().interrupt(); } + LOG.info("Build ContainerSet costs {}s", + (System.currentTimeMillis() - startTime) / 1000); } /** diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java index 947a087cb82e..2e389903769f 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java @@ -31,6 +31,13 @@ import org.junit.rules.ExpectedException; import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; /** @@ -63,6 +70,8 @@ public void testContainerCacheEviction() throws Exception { conf.setInt(OzoneConfigKeys.OZONE_CONTAINER_CACHE_SIZE, 2); ContainerCache cache = ContainerCache.getInstance(conf); + cache.clear(); + Assert.assertEquals(0, cache.size()); File containerDir1 = new File(root, "cont1"); File containerDir2 = new File(root, "cont2"); File containerDir3 = new File(root, "cont3"); @@ -123,4 +132,47 @@ public void testContainerCacheEviction() throws Exception { thrown.expect(IllegalArgumentException.class); db5.close(); } + + @Test + public void testConcurrentDBGet() throws Exception { + File root = new File(testRoot); + root.mkdirs(); + root.deleteOnExit(); + + OzoneConfiguration conf = new OzoneConfiguration(); + conf.setInt(OzoneConfigKeys.OZONE_CONTAINER_CACHE_SIZE, 2); + ContainerCache cache = ContainerCache.getInstance(conf); + cache.clear(); + Assert.assertEquals(0, cache.size()); + File containerDir = new File(root, "cont1"); + createContainerDB(conf, containerDir); + ExecutorService executorService = Executors.newFixedThreadPool(2); + Runnable task = () -> { + try { + ReferenceCountedDB db1 = cache.getDB(1, "RocksDB", + containerDir.getPath(), conf); + Assert.assertNotNull(db1); + } catch (IOException e) { + Assert.fail("Should get the DB instance"); + } + }; + List futureList = new ArrayList<>(); + futureList.add(executorService.submit(task)); + futureList.add(executorService.submit(task)); + for (Future future: futureList) { + try { + future.get(); + } catch (InterruptedException| ExecutionException e) { + Assert.fail("Should get the DB instance"); + } + } + + ReferenceCountedDB db = cache.getDB(1, "RocksDB", + containerDir.getPath(), conf); + db.close(); + db.close(); + db.close(); + Assert.assertEquals(1, cache.size()); + db.cleanup(); + } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java index e1c5f33ff4af..15c023641321 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java @@ -22,15 +22,16 @@ import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.interfaces.Container; +import org.apache.hadoop.ozone.container.common.utils.ContainerCache; import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; @@ -68,7 +69,7 @@ public class TestContainerReader { private MutableVolumeSet volumeSet; private HddsVolume hddsVolume; private ContainerSet containerSet; - private ConfigurationSource conf; + private OzoneConfiguration conf; private RoundRobinVolumeChoosingPolicy volumeChoosingPolicy; @@ -219,4 +220,68 @@ public void testContainerReader() throws Exception { keyValueContainerData.getNumPendingDeletionBlocks()); } } + + @Test + public void testMultipleContainerReader() throws Exception { + final int volumeNum = 10; + StringBuffer datanodeDirs = new StringBuffer(); + File[] volumeDirs = new File[volumeNum]; + for (int i = 0; i < volumeNum; i++) { + volumeDirs[i] = tempDir.newFolder(); + datanodeDirs = datanodeDirs.append(volumeDirs[i]).append(","); + } + conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, + datanodeDirs.toString()); + MutableVolumeSet volumeSets = + new MutableVolumeSet(datanodeId.toString(), conf); + ContainerCache cache = ContainerCache.getInstance(conf); + cache.clear(); + + RoundRobinVolumeChoosingPolicy policy = + new RoundRobinVolumeChoosingPolicy(); + + final int containerCount = 100; + blockCount = containerCount; + for (int i = 0; i < containerCount; i++) { + KeyValueContainerData keyValueContainerData = + new KeyValueContainerData(i, ChunkLayOutVersion.FILE_PER_BLOCK, + (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), + datanodeId.toString()); + + KeyValueContainer keyValueContainer = + new KeyValueContainer(keyValueContainerData, + conf); + keyValueContainer.create(volumeSets, policy, scmId); + + List blkNames; + if (i % 2 == 0) { + blkNames = addBlocks(keyValueContainer, true); + markBlocksForDelete(keyValueContainer, true, blkNames, i); + } else { + blkNames = addBlocks(keyValueContainer, false); + markBlocksForDelete(keyValueContainer, false, blkNames, i); + } + } + + List hddsVolumes = volumeSets.getVolumesList(); + ContainerReader[] containerReaders = new ContainerReader[volumeNum]; + Thread[] threads = new Thread[volumeNum]; + for (int i = 0; i < volumeNum; i++) { + containerReaders[i] = new ContainerReader(volumeSets, + hddsVolumes.get(i), containerSet, conf); + threads[i] = new Thread(containerReaders[i]); + } + long startTime = System.currentTimeMillis(); + for (int i = 0; i < volumeNum; i++) { + threads[i].start(); + } + for (int i = 0; i < volumeNum; i++) { + threads[i].join(); + } + System.out.println("Open " + volumeNum + " Volume with " + containerCount + + " costs " + (System.currentTimeMillis() - startTime) / 1000 + "s"); + Assert.assertEquals(containerCount, + containerSet.getContainerMap().entrySet().size()); + Assert.assertEquals(containerCount, cache.size()); + } } From 3f380b8b3c88d4e5af726a483b1683cd766effb2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Elek=20M=C3=A1rton?= Date: Wed, 22 Jul 2020 11:27:45 +0200 Subject: [PATCH 050/165] HDDS-3989. Addendum: revert proto.lock file (#1226) --- .../interface-client/src/main/proto/proto.lock | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/hadoop-hdds/interface-client/src/main/proto/proto.lock b/hadoop-hdds/interface-client/src/main/proto/proto.lock index bc8828787d1c..b27896c655e3 100644 --- a/hadoop-hdds/interface-client/src/main/proto/proto.lock +++ b/hadoop-hdds/interface-client/src/main/proto/proto.lock @@ -1540,16 +1540,6 @@ "name": "setupTime", "type": "int64" }, - { - "id": 10, - "name": "revision", - "type": "string" - }, - { - "id": 11, - "name": "buildDate", - "type": "string" - }, { "id": 100, "name": "uuid128", @@ -1945,4 +1935,4 @@ } } ] -} \ No newline at end of file +} From ca3817e43960b9623143f25248a19acc86725074 Mon Sep 17 00:00:00 2001 From: maobaolong <307499405@qq.com> Date: Wed, 22 Jul 2020 19:47:29 +0800 Subject: [PATCH 051/165] HDDS-3980. Correct the toString of RangeHeader (#1213) --- .../java/org/apache/hadoop/ozone/s3/util/RangeHeader.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/RangeHeader.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/RangeHeader.java index 5f5c827433d2..a57a0f688e63 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/RangeHeader.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/RangeHeader.java @@ -82,8 +82,8 @@ public boolean isInValidRange() { public String toString() { - return "startOffset - [" + startOffset + "]" + "endOffset - [" + - endOffset + "]" + " readFull - [ " + readFull + "]" + " invalidRange " + - "- [ " + inValidRange + "]"; + return "startOffset - [" + startOffset + "]" + ", endOffset - [" + + endOffset + "]" + ", readFull - [" + readFull + "]" + + ", invalidRange - [" + inValidRange + "]"; } } From 04558746dbeafaef9a393073d3c0d990a9b7b557 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Elek=2C=20M=C3=A1rton?= Date: Wed, 22 Jul 2020 14:27:33 +0200 Subject: [PATCH 052/165] HDDS-3991. Ignore protobuf lock files (#1224) --- .../interface-admin/src/main/{proto => resources}/proto.lock | 0 .../interface-client/src/main/{proto => resources}/proto.lock | 0 .../interface-server/src/main/{proto => resources}/proto.lock | 0 hadoop-hdds/pom.xml | 2 +- hadoop-ozone/csi/src/main/{proto => resources}/proto.lock | 0 .../interface-client/src/main/{proto => resources}/proto.lock | 0 hadoop-ozone/pom.xml | 2 +- pom.xml | 2 +- 8 files changed, 3 insertions(+), 3 deletions(-) rename hadoop-hdds/interface-admin/src/main/{proto => resources}/proto.lock (100%) rename hadoop-hdds/interface-client/src/main/{proto => resources}/proto.lock (100%) rename hadoop-hdds/interface-server/src/main/{proto => resources}/proto.lock (100%) rename hadoop-ozone/csi/src/main/{proto => resources}/proto.lock (100%) rename hadoop-ozone/interface-client/src/main/{proto => resources}/proto.lock (100%) diff --git a/hadoop-hdds/interface-admin/src/main/proto/proto.lock b/hadoop-hdds/interface-admin/src/main/resources/proto.lock similarity index 100% rename from hadoop-hdds/interface-admin/src/main/proto/proto.lock rename to hadoop-hdds/interface-admin/src/main/resources/proto.lock diff --git a/hadoop-hdds/interface-client/src/main/proto/proto.lock b/hadoop-hdds/interface-client/src/main/resources/proto.lock similarity index 100% rename from hadoop-hdds/interface-client/src/main/proto/proto.lock rename to hadoop-hdds/interface-client/src/main/resources/proto.lock diff --git a/hadoop-hdds/interface-server/src/main/proto/proto.lock b/hadoop-hdds/interface-server/src/main/resources/proto.lock similarity index 100% rename from hadoop-hdds/interface-server/src/main/proto/proto.lock rename to hadoop-hdds/interface-server/src/main/resources/proto.lock diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml index 435167244d7b..5162eee562f6 100644 --- a/hadoop-hdds/pom.xml +++ b/hadoop-hdds/pom.xml @@ -269,7 +269,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> src/test/resources/incorrect.checksum.container src/test/resources/incorrect.container src/test/resources/test.db.ini - src/main/proto/proto.lock + src/main/resources/proto.lock diff --git a/hadoop-ozone/csi/src/main/proto/proto.lock b/hadoop-ozone/csi/src/main/resources/proto.lock similarity index 100% rename from hadoop-ozone/csi/src/main/proto/proto.lock rename to hadoop-ozone/csi/src/main/resources/proto.lock diff --git a/hadoop-ozone/interface-client/src/main/proto/proto.lock b/hadoop-ozone/interface-client/src/main/resources/proto.lock similarity index 100% rename from hadoop-ozone/interface-client/src/main/proto/proto.lock rename to hadoop-ozone/interface-client/src/main/resources/proto.lock diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml index 8d7661871003..8e7fbfc95a9f 100644 --- a/hadoop-ozone/pom.xml +++ b/hadoop-ozone/pom.xml @@ -297,7 +297,7 @@ **/pnpm-lock.yaml **/ozone-recon-web/build/** src/main/license/** - src/main/proto/proto.lock + src/main/resources/proto.lock diff --git a/pom.xml b/pom.xml index 6a9f864f5986..aa62f5d0dd97 100644 --- a/pom.xml +++ b/pom.xml @@ -1615,7 +1615,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs proto-backwards-compatibility ${proto-backwards-compatibility.version} - ${basedir}/src/main/proto/ + ${basedir}/target/classes From 01efab25bb4e26ab639aa4e5c3bb1d031bf217a2 Mon Sep 17 00:00:00 2001 From: runzhiwang <51938049+runzhiwang@users.noreply.github.com> Date: Wed, 22 Jul 2020 20:40:04 +0800 Subject: [PATCH 053/165] HDDS-3933. Fix memory leak because of too many Datanode State Machine Thread (#1185) --- .../statemachine/DatanodeStateMachine.java | 25 +++++- .../common/statemachine/StateContext.java | 34 ++++++- .../states/datanode/RunningDatanodeState.java | 14 ++- .../common/statemachine/TestStateContext.java | 30 +++++++ .../datanode/TestRunningDatanodeState.java | 90 +++++++++++++++++++ 5 files changed, 184 insertions(+), 9 deletions(-) create mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/datanode/TestRunningDatanodeState.java diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java index 779b60a1d816..27e814b30c3d 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java @@ -19,11 +19,13 @@ import java.io.Closeable; import java.io.IOException; import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; +import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto; @@ -50,7 +52,6 @@ import org.apache.hadoop.ozone.protocol.commands.SCMCommand; import org.apache.hadoop.util.JvmPauseMonitor; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopExecutors; import com.google.common.annotations.VisibleForTesting; import com.google.common.util.concurrent.ThreadFactoryBuilder; @@ -103,9 +104,10 @@ public DatanodeStateMachine(DatanodeDetails datanodeDetails, this.hddsDatanodeStopService = hddsDatanodeStopService; this.conf = conf; this.datanodeDetails = datanodeDetails; - executorService = HadoopExecutors.newCachedThreadPool( - new ThreadFactoryBuilder().setDaemon(true) - .setNameFormat("Datanode State Machine Thread - %d").build()); + executorService = Executors.newFixedThreadPool( + getEndPointTaskThreadPoolSize(), + new ThreadFactoryBuilder() + .setNameFormat("Datanode State Machine Task Thread - %d").build()); connectionManager = new SCMConnectionManager(conf); context = new StateContext(this.conf, DatanodeStates.getInitState(), this); // OzoneContainer instance is used in a non-thread safe way by the context @@ -155,6 +157,21 @@ public DatanodeStateMachine(DatanodeDetails datanodeDetails, .build(); } + private int getEndPointTaskThreadPoolSize() { + // TODO(runzhiwang): current only support one recon, if support multiple + // recon in future reconServerCount should be the real number of recon + int reconServerCount = 1; + int totalServerCount = reconServerCount; + + try { + totalServerCount += HddsUtils.getSCMAddresses(conf).size(); + } catch (Exception e) { + LOG.error("Fail to get scm addresses", e); + } + + return totalServerCount; + } + /** * * Return DatanodeDetails if set, return null otherwise. diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java index f3a599d1f21e..51262c3eb5f9 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java @@ -28,6 +28,7 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; +import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicLong; @@ -35,6 +36,7 @@ import java.util.concurrent.locks.ReentrantLock; import java.util.function.Consumer; +import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatus.Status; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerAction; @@ -51,6 +53,8 @@ import com.google.protobuf.GeneratedMessage; import static java.lang.Math.min; import org.apache.commons.collections.CollectionUtils; + +import static org.apache.hadoop.hdds.utils.HddsServerUtil.getLogWarnInterval; import static org.apache.hadoop.hdds.utils.HddsServerUtil.getScmHeartbeatInterval; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -74,6 +78,7 @@ public class StateContext { private DatanodeStateMachine.DatanodeStates state; private boolean shutdownOnError = false; private boolean shutdownGracefully = false; + private final AtomicLong threadPoolNotAvailableCount; /** * Starting with a 2 sec heartbeat frequency which will be updated to the @@ -103,6 +108,7 @@ public StateContext(ConfigurationSource conf, pipelineActions = new HashMap<>(); lock = new ReentrantLock(); stateExecutionCount = new AtomicLong(0); + threadPoolNotAvailableCount = new AtomicLong(0); } /** @@ -393,6 +399,20 @@ public DatanodeState getTask() { } } + @VisibleForTesting + public boolean isThreadPoolAvailable(ExecutorService executor) { + if (!(executor instanceof ThreadPoolExecutor)) { + return true; + } + + ThreadPoolExecutor ex = (ThreadPoolExecutor) executor; + if (ex.getQueue().size() == 0) { + return true; + } + + return false; + } + /** * Executes the required state function. * @@ -415,7 +435,19 @@ public void execute(ExecutorService service, long time, TimeUnit unit) if (this.isEntering()) { task.onEnter(); } - task.execute(service); + + if (isThreadPoolAvailable(service)) { + task.execute(service); + threadPoolNotAvailableCount.set(0); + } else { + if (threadPoolNotAvailableCount.get() + % getLogWarnInterval(conf) == 0) { + LOG.warn("No available thread in pool for past {} seconds.", + unit.toSeconds(time) * (threadPoolNotAvailableCount.get() + 1)); + } + threadPoolNotAvailableCount.incrementAndGet(); + } + DatanodeStateMachine.DatanodeStates newState = task.await(time, unit); if (this.state != newState) { if (LOG.isDebugEnabled()) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java index 8a9bcaff3afb..b0cfb4ce001a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java @@ -16,6 +16,7 @@ */ package org.apache.hadoop.ozone.container.common.states.datanode; +import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; import org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine; @@ -42,7 +43,6 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; /** * Class that implements handshake with SCM. @@ -152,6 +152,11 @@ public void execute(ExecutorService executor) { } } + @VisibleForTesting + public void setExecutorCompletionService(ExecutorCompletionService e) { + this.ecs = e; + } + private Callable getEndPointTask( EndpointStateMachine endpoint) { if (endpointTasks.containsKey(endpoint)) { @@ -200,10 +205,11 @@ private Callable getEndPointTask( @Override public DatanodeStateMachine.DatanodeStates await(long duration, TimeUnit timeUnit) - throws InterruptedException, ExecutionException, TimeoutException { + throws InterruptedException { int count = connectionManager.getValues().size(); int returned = 0; - long timeLeft = timeUnit.toMillis(duration); + long durationMS = timeUnit.toMillis(duration); + long timeLeft = durationMS; long startTime = Time.monotonicNow(); List> results = new LinkedList<>(); @@ -214,7 +220,7 @@ private Callable getEndPointTask( results.add(result); returned++; } - timeLeft = timeLeft - (Time.monotonicNow() - startTime); + timeLeft = durationMS - (Time.monotonicNow() - startTime); } return computeNextContainerState(results); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestStateContext.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestStateContext.java index 545d6702c479..c3fd310d0840 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestStateContext.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestStateContext.java @@ -28,6 +28,8 @@ import java.net.InetSocketAddress; import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.Executors; import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; @@ -39,6 +41,8 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineAction; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine.DatanodeStates; import org.apache.hadoop.ozone.container.common.states.DatanodeState; +import org.apache.hadoop.test.LambdaTestUtils; +import org.junit.Assert; import org.junit.Test; import com.google.protobuf.GeneratedMessage; @@ -182,4 +186,30 @@ public DatanodeStates await(long time, TimeUnit timeUnit) assertEquals(DatanodeStates.SHUTDOWN, subject.getState()); } + @Test + public void testIsThreadPoolAvailable() throws Exception { + StateContext stateContext = new StateContext(null, null, null); + + int threadPoolSize = 2; + ExecutorService executorService = Executors.newFixedThreadPool( + threadPoolSize); + + CompletableFuture futureOne = new CompletableFuture<>(); + CompletableFuture futureTwo = new CompletableFuture<>(); + + // task num greater than pool size + for (int i = 0; i < threadPoolSize; i++) { + executorService.submit(() -> futureOne.get()); + } + executorService.submit(() -> futureTwo.get()); + + Assert.assertFalse(stateContext.isThreadPoolAvailable(executorService)); + + futureOne.complete("futureOne"); + LambdaTestUtils.await(1000, 100, () -> + stateContext.isThreadPoolAvailable(executorService)); + + futureTwo.complete("futureTwo"); + executorService.shutdown(); + } } \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/datanode/TestRunningDatanodeState.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/datanode/TestRunningDatanodeState.java new file mode 100644 index 000000000000..9fb4307793b0 --- /dev/null +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/datanode/TestRunningDatanodeState.java @@ -0,0 +1,90 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.ozone.container.common.states.datanode; + +import org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine; +import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager; +import org.apache.hadoop.util.Time; +import org.junit.Assert; +import org.junit.Test; +import org.mockito.Mockito; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutorCompletionService; +import java.util.concurrent.Executors; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeUnit; + +import static org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine.EndPointStates.SHUTDOWN; +import static org.mockito.Mockito.when; + +/** + * Test class for RunningDatanodeState. + */ +public class TestRunningDatanodeState { + @Test + public void testAwait() throws InterruptedException { + SCMConnectionManager connectionManager = + Mockito.mock(SCMConnectionManager.class); + List stateMachines = new ArrayList<>(); + when(connectionManager.getValues()).thenReturn(stateMachines); + + RunningDatanodeState state = + new RunningDatanodeState(null, connectionManager, null); + + int threadPoolSize = 2; + ExecutorService executorService = Executors.newFixedThreadPool( + threadPoolSize); + + ExecutorCompletionService ecs = + new ExecutorCompletionService<>(executorService); + state.setExecutorCompletionService(ecs); + + for (int i = 0; i < threadPoolSize; i++) { + stateMachines.add(new EndpointStateMachine(null, null, null)); + } + + CompletableFuture futureOne = + new CompletableFuture<>(); + for (int i = 0; i < threadPoolSize; i++) { + ecs.submit(() -> futureOne.get()); + } + + long startTime = Time.monotonicNow(); + state.await(500, TimeUnit.MILLISECONDS); + long endTime = Time.monotonicNow(); + Assert.assertTrue((endTime - startTime) >= 500); + + futureOne.complete(SHUTDOWN); + + CompletableFuture futureTwo = + new CompletableFuture<>(); + for (int i = 0; i < threadPoolSize; i++) { + ecs.submit(() -> futureTwo.get()); + } + futureTwo.complete(SHUTDOWN); + + startTime = Time.monotonicNow(); + state.await(500, TimeUnit.MILLISECONDS); + endTime = Time.monotonicNow(); + Assert.assertTrue((endTime - startTime) < 500); + + executorService.shutdown(); + } +} From e7497abd42ffb7a906e899e2afc53c75f1b4a4ee Mon Sep 17 00:00:00 2001 From: Xiang Zhang Date: Wed, 22 Jul 2020 20:48:39 +0800 Subject: [PATCH 054/165] HDDS-2770. security/SecurityAcls.md (#1190) --- .../docs/content/security/SecurityAcls.md | 8 +-- .../docs/content/security/SecurityAcls.zh.md | 66 +++++++++++++++++++ 2 files changed, 70 insertions(+), 4 deletions(-) create mode 100644 hadoop-hdds/docs/content/security/SecurityAcls.zh.md diff --git a/hadoop-hdds/docs/content/security/SecurityAcls.md b/hadoop-hdds/docs/content/security/SecurityAcls.md index 31bbb0a95cc2..b081a60665d9 100644 --- a/hadoop-hdds/docs/content/security/SecurityAcls.md +++ b/hadoop-hdds/docs/content/security/SecurityAcls.md @@ -22,9 +22,9 @@ icon: transfer limitations under the License. --> -Ozone supports a set of native ACLs. These ACLs can be used independently or -along with Ranger. If Apache Ranger is enabled, then ACL will be checked -first with Ranger and then Ozone's internal ACLs will be evaluated. +Ozone supports a set of native ACLs. These ACLs can be used independently +of ozone ACL plugin such as Ranger. If Apache Ranger plugin for Ozone is +enabled, then ACL will be checked with Ranger. Ozone ACLs are a super set of Posix and S3 ACLs. @@ -52,7 +52,7 @@ we have no way of knowing who the user is or we don't care.

diff --git a/hadoop-hdds/docs/content/security/SecurityAcls.zh.md b/hadoop-hdds/docs/content/security/SecurityAcls.zh.md new file mode 100644 index 000000000000..9a3275ba69ea --- /dev/null +++ b/hadoop-hdds/docs/content/security/SecurityAcls.zh.md @@ -0,0 +1,66 @@ +--- +title: "Ozone 访问控制列表" +date: "2019-April-03" +weight: 6 +summary: Ozone 原生的授权模块提供了不需要集成 Ranger 的访问控制列表(ACL)支持。 +icon: transfer +--- + + +Ozone 既支持原生的 ACL,也支持类似 Ranger 这样的 ACL 插件,如果启用了 Ranger 插件,则以 Ranger 中的 ACL 为准。 + +Ozone 的 ACL 是 Posix ACL 和 S3 ACL 的超集。 + +ACL 的通用格式为 _对象_:_角色_:_权限_. + +_对象_ 可选的值包括: + +1. **卷** - 一个 Ozone 卷,比如 _/volume_ +2. **桶** - 一个 Ozone 桶,比如 _/volume/bucket_ +3. **键** - 一个对象键,比如 _/volume/bucket/key_ +4. **前缀** - 某个键的路径前缀,比如 _/volume/bucket/prefix1/prefix2_ + +_角色_ 可选的值包括: + +1. **用户** - 一个 Kerberos 用户,和 Posix 用户一样,用户可以是已创建的也可以是未创建的。 +2. **组** - 一个 Kerberos 组,和 Posix 组一样,组可以是已创建的也可以是未创建的。 +3. **所有人** - 所有通过 Kerberos 认证的用户,这对应 Posix 标准中的其它用户。 +4. **匿名** - 完全忽略用户字段,这是对 Posix 语义的扩展,使用 S3 协议时会用到,用于表达无法获取用户的身份或者不在乎用户的身份。 + + + +_权限_ 可选的值包括:: + +1. **创建** – 此 ACL 为用户赋予在卷中创建桶,或者在桶中创建键的权限。请注意:在 Ozone 中,只有管理员可以创建卷。 +2. **列举** – 此 ACL 允许用户列举桶和键,因为列举的是子对象,所以这种 ACL 要绑定在卷和桶上。请注意:只有卷的属主和管理员可以对卷执行列举操作。 +3. **删除** – 允许用户删除卷、桶或键。 +4. **读取** – 允许用户读取卷和桶的元数据,以及读取键的数据流和元数据。 +5. **写入** - 允许用户修改卷和桶的元数据,以及重写一个已存在的键。 +6. **读 ACL** – 允许用户读取某个对象的 ACL。 +7. **写 ACL** – 允许用户修改某个对象的 ACL。 + +

Ozone 原生 ACL API

+ +ACL 可以通过 Ozone 提供的一系列 API 进行操作,支持的 API 包括: + +1. **SetAcl** – 此 API 的参数为用户主体、Ozone 对象名称、Ozone 对象的类型和 ACL 列表。 +2. **GetAcl** – 此 API 的参数为 Ozone 对象名称和 Ozone 对象类型,返回值为 ACL 列表。 +3. **AddAcl** - 此 API 的参数为 Ozone 对象名称、Ozone 对象类型和待添加的 ACL,新的 ACL 会被添加到该 Ozone 对象的 ACL 条目中。 +4. **RemoveAcl** - 此 API 的参数为 Ozone 对象名称、Ozone 对象类型和待删除的 ACL。 From f9e15745deaa7927c99c95a9312c4e78e2a2663c Mon Sep 17 00:00:00 2001 From: Isa Hekmatizadeh Date: Wed, 22 Jul 2020 15:33:36 +0000 Subject: [PATCH 055/165] HDDS-3718: Improve OmKeyLocationInfoGroup internal data structure (#1023) --- .../client/io/BlockOutputStreamEntryPool.java | 6 +- .../hadoop/ozone/om/helpers/OmKeyInfo.java | 20 ++-- .../om/helpers/OmKeyLocationInfoGroup.java | 91 ++++++++++++------- .../helpers/TestOmKeyLocationInfoGroup.java | 59 ++++++++++++ .../hadoop/ozone/om/KeyManagerImpl.java | 4 +- .../om/response/key/OMKeyDeleteResponse.java | 2 +- 6 files changed, 127 insertions(+), 55 deletions(-) create mode 100644 hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmKeyLocationInfoGroup.java diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java index 3cab66465009..712d1199a335 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java @@ -172,10 +172,8 @@ public void addPreallocateBlocks(OmKeyLocationInfoGroup version, // server may return any number of blocks, (0 to any) // only the blocks allocated in this open session (block createVersion // equals to open session version) - for (OmKeyLocationInfo subKeyInfo : version.getLocationList()) { - if (subKeyInfo.getCreateVersion() == openVersion) { - addKeyLocationInfo(subKeyInfo); - } + for (OmKeyLocationInfo subKeyInfo : version.getLocationList(openVersion)) { + addKeyLocationInfo(subKeyInfo); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java index 783089ec1c7a..6304e398c7e7 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java @@ -148,21 +148,17 @@ public void updateModifcationTime() { public void updateLocationInfoList(List locationInfoList) { long latestVersion = getLatestVersionLocations().getVersion(); OmKeyLocationInfoGroup keyLocationInfoGroup = getLatestVersionLocations(); - List currentList = - keyLocationInfoGroup.getLocationList(); - List latestVersionList = - keyLocationInfoGroup.getBlocksLatestVersionOnly(); // Updates the latest locationList in the latest version only with // given locationInfoList here. // TODO : The original allocated list and the updated list here may vary // as the containers on the Datanode on which the blocks were pre allocated // might get closed. The diff of blocks between these two lists here // need to be garbage collected in case the ozone client dies. - currentList.removeAll(latestVersionList); + keyLocationInfoGroup.removeBlocks(latestVersion); // set each of the locationInfo object to the latest version - locationInfoList.stream().forEach(omKeyLocationInfo -> omKeyLocationInfo + locationInfoList.forEach(omKeyLocationInfo -> omKeyLocationInfo .setCreateVersion(latestVersion)); - currentList.addAll(locationInfoList); + keyLocationInfoGroup.addAll(latestVersion, locationInfoList); } /** @@ -505,12 +501,10 @@ public OmKeyInfo copyObject() { .setObjectID(objectID).setUpdateID(updateID); - keyLocationVersions.forEach(keyLocationVersion -> { - List keyLocationInfos = new ArrayList<>(); - keyLocationInfos.addAll(keyLocationVersion.getLocationList()); - builder.addOmKeyLocationInfoGroup(new OmKeyLocationInfoGroup( - keyLocationVersion.getVersion(), keyLocationInfos)); - }); + keyLocationVersions.forEach(keyLocationVersion -> + builder.addOmKeyLocationInfoGroup( + new OmKeyLocationInfoGroup(keyLocationVersion.getVersion(), + keyLocationVersion.getLocationList()))); acls.forEach(acl -> builder.addAcl(new OzoneAcl(acl.getType(), acl.getName(), (BitSet) acl.getAclBitSet().clone(), diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java index bd97757cfd04..edebb67b64be 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java @@ -16,12 +16,12 @@ */ package org.apache.hadoop.ozone.om.helpers; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyLocation; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyLocationList; -import java.io.IOException; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.stream.Collectors; /** @@ -30,12 +30,23 @@ */ public class OmKeyLocationInfoGroup { private final long version; - private final List locationList; + private final Map> locationVersionMap; public OmKeyLocationInfoGroup(long version, List locations) { this.version = version; - this.locationList = locations; + this.locationVersionMap = locations.stream() + .collect(Collectors.groupingBy(OmKeyLocationInfo::getCreateVersion)); + //prevent NPE + this.locationVersionMap.putIfAbsent(version, new ArrayList<>()); + } + + public OmKeyLocationInfoGroup(long version, + Map> locations) { + this.version = version; + this.locationVersionMap = locations; + //prevent NPE + this.locationVersionMap.putIfAbsent(version, new ArrayList<>()); } /** @@ -44,10 +55,7 @@ public OmKeyLocationInfoGroup(long version, * @return the list of blocks that are created in the latest version. */ public List getBlocksLatestVersionOnly() { - List list = new ArrayList<>(); - locationList.stream().filter(x -> x.getCreateVersion() == version) - .forEach(list::add); - return list; + return new ArrayList<>(locationVersionMap.get(version)); } public long getVersion() { @@ -55,28 +63,37 @@ public long getVersion() { } public List getLocationList() { - return locationList; + return locationVersionMap.values().stream().flatMap(List::stream) + .collect(Collectors.toList()); + } + + public long getLocationListCount() { + return locationVersionMap.values().stream().mapToLong(List::size).sum(); + } + + public List getLocationList(Long versionToFetch) { + return new ArrayList<>(locationVersionMap.get(versionToFetch)); } public KeyLocationList getProtobuf() { return KeyLocationList.newBuilder() .setVersion(version) .addAllKeyLocations( - locationList.stream().map(OmKeyLocationInfo::getProtobuf) + locationVersionMap.values().stream() + .flatMap(List::stream) + .map(OmKeyLocationInfo::getProtobuf) .collect(Collectors.toList())) .build(); } public static OmKeyLocationInfoGroup getFromProtobuf( KeyLocationList keyLocationList) { - List locations = new ArrayList<>(); - for (KeyLocation keyLocation : keyLocationList - .getKeyLocationsList()) { - locations.add(OmKeyLocationInfo.getFromProtobuf(keyLocation)); - } - return new OmKeyLocationInfoGroup( - keyLocationList.getVersion(), locations); + keyLocationList.getVersion(), + keyLocationList.getKeyLocationsList().stream() + .map(OmKeyLocationInfo::getFromProtobuf) + .collect(Collectors.groupingBy(OmKeyLocationInfo::getCreateVersion)) + ); } /** @@ -84,38 +101,42 @@ public static OmKeyLocationInfoGroup getFromProtobuf( * one. * * @param newLocationList a list of new location to be added. - * @return + * @return newly generated OmKeyLocationInfoGroup */ OmKeyLocationInfoGroup generateNextVersion( - List newLocationList) throws IOException { - // TODO : revisit if we can do this method more efficiently - // one potential inefficiency here is that later version always include - // older ones. e.g. v1 has B1, then v2, v3...will all have B1 and only add - // more - List newList = new ArrayList<>(); - newList.addAll(locationList); - for (OmKeyLocationInfo newInfo : newLocationList) { - // all these new blocks will have addVersion of current version + 1 - newInfo.setCreateVersion(version + 1); - newList.add(newInfo); - } - return new OmKeyLocationInfoGroup(version + 1, newList); + List newLocationList) { + Map> newMap = + new HashMap<>(locationVersionMap); + newMap.put(version + 1, new ArrayList<>(newLocationList)); + return new OmKeyLocationInfoGroup(version + 1, newMap); } - void appendNewBlocks(List newLocationList) - throws IOException { + void appendNewBlocks(List newLocationList) { + List locationList = locationVersionMap.get(version); for (OmKeyLocationInfo info : newLocationList) { info.setCreateVersion(version); locationList.add(info); } } + void removeBlocks(long versionToRemove){ + locationVersionMap.remove(versionToRemove); + } + + void addAll(long versionToAdd, List locationInfoList) { + locationVersionMap.putIfAbsent(versionToAdd, new ArrayList<>()); + List list = locationVersionMap.get(versionToAdd); + list.addAll(locationInfoList); + } + @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("version:").append(version).append(" "); - for (OmKeyLocationInfo kli : locationList) { - sb.append(kli.getLocalID()).append(" || "); + for (List kliList : locationVersionMap.values()) { + for(OmKeyLocationInfo kli: kliList) { + sb.append(kli.getLocalID()).append(" || "); + } } return sb.toString(); } diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmKeyLocationInfoGroup.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmKeyLocationInfoGroup.java new file mode 100644 index 000000000000..0843e0b3e882 --- /dev/null +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmKeyLocationInfoGroup.java @@ -0,0 +1,59 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.ozone.om.helpers; + +import org.junit.Assert; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; + +/** + * Test OmKeyLocationInfoGroup. + */ +public class TestOmKeyLocationInfoGroup { + + @Test + public void testCreatingAndGetLatestVersionLocations() { + OmKeyLocationInfoGroup testInstance = createTestInstance(); + List latestList = + testInstance.getBlocksLatestVersionOnly(); + Assert.assertEquals(1, latestList.size()); + Assert.assertEquals(2, latestList.get(0).getCreateVersion()); + } + + @Test + public void testGettingPreviousVersions() { + OmKeyLocationInfoGroup testInstance = createTestInstance(); + List list = testInstance.getLocationList(1L); + Assert.assertEquals(2, list.size()); + } + + private OmKeyLocationInfoGroup createTestInstance() { + OmKeyLocationInfo info1 = new OmKeyLocationInfo.Builder().build(); + info1.setCreateVersion(1); + OmKeyLocationInfo info2 = new OmKeyLocationInfo.Builder().build(); + info2.setCreateVersion(1); + OmKeyLocationInfo info3 = new OmKeyLocationInfo.Builder().build(); + info3.setCreateVersion(2); + List locationInfoList = new ArrayList<>(); + locationInfoList.add(info1); + locationInfoList.add(info2); + locationInfoList.add(info3); + return new OmKeyLocationInfoGroup(2, locationInfoList); + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index 1fca32f688c0..644558d5c4a9 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -854,7 +854,7 @@ public void deleteKey(OmKeyArgs args) throws IOException { private boolean isKeyEmpty(OmKeyInfo keyInfo) { for (OmKeyLocationInfoGroup keyLocationList : keyInfo .getKeyLocationVersions()) { - if (keyLocationList.getLocationList().size() != 0) { + if (keyLocationList.getLocationListCount() != 0) { return false; } } @@ -965,7 +965,7 @@ private OmMultipartInfo createMultipartInfo(OmKeyArgs keyArgs, .setReplicationFactor(keyArgs.getFactor()) .setPartKeyInfoList(partKeyInfoMap) .build(); - List locations = new ArrayList<>(); + Map> locations = new HashMap<>(); OmKeyInfo omKeyInfo = new OmKeyInfo.Builder() .setVolumeName(keyArgs.getVolumeName()) .setBucketName(keyArgs.getBucketName()) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java index 41853da907e7..f0ba99116c1f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java @@ -105,7 +105,7 @@ private boolean isKeyEmpty(@Nullable OmKeyInfo keyInfo) { } for (OmKeyLocationInfoGroup keyLocationList : keyInfo .getKeyLocationVersions()) { - if (keyLocationList.getLocationList().size() != 0) { + if (keyLocationList.getLocationListCount() != 0) { return false; } } From 1bc6ba22777f800374c0be979330636208621013 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Elek=2C=20M=C3=A1rton?= Date: Wed, 22 Jul 2020 17:48:30 +0200 Subject: [PATCH 056/165] Remove optional jersey-json dependency (#1238) --- hadoop-hdds/hadoop-dependency-server/pom.xml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/hadoop-hdds/hadoop-dependency-server/pom.xml b/hadoop-hdds/hadoop-dependency-server/pom.xml index e20b554b4c6f..1ddcfd33c83c 100644 --- a/hadoop-hdds/hadoop-dependency-server/pom.xml +++ b/hadoop-hdds/hadoop-dependency-server/pom.xml @@ -67,6 +67,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.codehaus.jackson jackson-xc + + com.sun.jersey + jersey-json + From 2aed901f35c6fd6e6fd2cc643e1c1c9b6565f41c Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Wed, 22 Jul 2020 09:34:59 -0700 Subject: [PATCH 057/165] HDDS-3993. Create volume required for S3G during OM startup. (#1227) --- .../src/main/smoketest/s3/commonawslib.robot | 2 - .../fs/ozone/TestRootedOzoneFileSystem.java | 6 +- .../rpc/TestOzoneRpcClientAbstract.java | 19 +++- .../client/rpc/TestSecureOzoneRpcClient.java | 3 - .../apache/hadoop/ozone/om/TestOmMetrics.java | 10 +- .../ozone/om/TestOzoneManagerListVolumes.java | 18 ++- .../apache/hadoop/ozone/om/OzoneManager.java | 105 ++++++++++++++++++ 7 files changed, 144 insertions(+), 19 deletions(-) diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot b/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot index c263988281b4..74dba38657d6 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot @@ -93,8 +93,6 @@ Create bucket with name Setup s3 tests Run Keyword Install aws cli Run Keyword if '${OZONE_S3_SET_CREDENTIALS}' == 'true' Setup v4 headers - ${result} = Execute And Ignore Error ozone sh volume create o3://${OM_SERVICE_ID}/s3v - Should not contain ${result} Failed ${BUCKET} = Run Keyword if '${BUCKET}' == 'generated' Create bucket ... ELSE Set Variable ${BUCKET} Set Suite Variable ${BUCKET} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java index 3aec3e822bc5..75b38438a960 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java @@ -573,7 +573,8 @@ public void testListStatusRootAndVolumeNonRecursive() throws Exception { // listStatus("/") Path root = new Path(OZONE_URI_DELIMITER); FileStatus[] fileStatusRoot = ofs.listStatus(root); - Assert.assertEquals(2, fileStatusRoot.length); + // Default volume "s3v" is created by OM during start up. + Assert.assertEquals(2 + 1, fileStatusRoot.length); } /** @@ -687,7 +688,8 @@ public void testListStatusRootAndVolumeContinuation() throws IOException { FileStatus[] fileStatusesOver = customListStatus(new Path("/"), false, "", 8); // There are only 5 volumes - Assert.assertEquals(5, fileStatusesOver.length); + // Default volume "s3v" is created during startup. + Assert.assertEquals(5 + 1, fileStatusesOver.length); // numEntries = 5 FileStatus[] fileStatusesExact = customListStatus(new Path("/"), diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java index 3b90815ec917..ac9faa684600 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java @@ -87,6 +87,7 @@ import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; import org.apache.hadoop.ozone.security.acl.OzoneAclConfig; @@ -162,8 +163,6 @@ static void startCluster(OzoneConfiguration conf) throws Exception { cluster.waitForClusterToBeReady(); ozClient = OzoneClientFactory.getRpcClient(conf); store = ozClient.getObjectStore(); - String volumeName = HddsClientUtils.getS3VolumeName(conf); - store.createVolume(volumeName); storageContainerLocationClient = cluster.getStorageContainerLocationClient(); ozoneManager = cluster.getOzoneManager(); @@ -236,6 +235,22 @@ public void testOMClientProxyProvider() { ozoneManager.getOmRpcServerAddr())); } + @Test + public void testDefaultS3GVolumeExists() throws Exception { + String s3VolumeName = HddsClientUtils.getS3VolumeName(cluster.getConf()); + OzoneVolume ozoneVolume = store.getVolume(s3VolumeName); + Assert.assertEquals(ozoneVolume.getName(), s3VolumeName); + OMMetadataManager omMetadataManager = + cluster.getOzoneManager().getMetadataManager(); + long transactionID = Long.MAX_VALUE -1 >> 8; + long objectID = transactionID << 8; + OmVolumeArgs omVolumeArgs = + cluster.getOzoneManager().getMetadataManager().getVolumeTable().get( + omMetadataManager.getVolumeKey(s3VolumeName)); + Assert.assertEquals(objectID, omVolumeArgs.getObjectID()); + Assert.assertEquals(transactionID, omVolumeArgs.getUpdateID()); + } + @Test public void testVolumeSetOwner() throws IOException { String volumeName = UUID.randomUUID().toString(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java index 60a1a1e8fdb4..72ce91af6758 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java @@ -25,7 +25,6 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto; import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; import org.apache.hadoop.hdds.security.token.BlockTokenVerifier; @@ -126,8 +125,6 @@ public static void init() throws Exception { cluster.waitForClusterToBeReady(); ozClient = OzoneClientFactory.getRpcClient(conf); store = ozClient.getObjectStore(); - String volumeName = HddsClientUtils.getS3VolumeName(conf); - store.createVolume(volumeName); storageContainerLocationClient = cluster.getStorageContainerLocationClient(); ozoneManager = cluster.getOzoneManager(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java index b80e35793748..a53a7588ef40 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java @@ -111,7 +111,7 @@ public void testVolumeOps() throws IOException { assertCounter("NumVolumeCheckAccesses", 1L, omMetrics); assertCounter("NumVolumeDeletes", 1L, omMetrics); assertCounter("NumVolumeLists", 1L, omMetrics); - assertCounter("NumVolumes", 0L, omMetrics); + assertCounter("NumVolumes", 1L, omMetrics); ozoneManager.createVolume(null); ozoneManager.createVolume(null); @@ -119,7 +119,9 @@ public void testVolumeOps() throws IOException { ozoneManager.deleteVolume(null); omMetrics = getMetrics("OMMetrics"); - assertCounter("NumVolumes", 2L, omMetrics); + + // Accounting 's3v' volume which is created by default. + assertCounter("NumVolumes", 3L, omMetrics); // inject exception to test for Failure Metrics @@ -152,10 +154,10 @@ public void testVolumeOps() throws IOException { // As last call for volumesOps does not increment numVolumes as those are // failed. - assertCounter("NumVolumes", 2L, omMetrics); + assertCounter("NumVolumes", 3L, omMetrics); cluster.restartOzoneManager(); - assertCounter("NumVolumes", 2L, omMetrics); + assertCounter("NumVolumes", 3L, omMetrics); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumes.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumes.java index a8b1eeff53ba..d7aaf37fd637 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumes.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumes.java @@ -203,14 +203,16 @@ public void testListVolumeWithOtherUsersListAllAllowed() throws Exception { UserGroupInformation.setLoginUser(user1); checkUser(cluster, user2, Arrays.asList("volume2", "volume3", "volume5"), true); + + // Add "s3v" created default by OM. checkUser(cluster, adminUser, Arrays.asList("volume1", "volume2", "volume3", - "volume4", "volume5"), true); + "volume4", "volume5", "s3v"), true); UserGroupInformation.setLoginUser(user2); checkUser(cluster, user1, Arrays.asList("volume1", "volume4", "volume5"), true); checkUser(cluster, adminUser, Arrays.asList("volume1", "volume2", "volume3", - "volume4", "volume5"), true); + "volume4", "volume5", "s3v"), true); stopCluster(cluster); } @@ -229,8 +231,9 @@ public void testListVolumeWithOtherUsersListAllDisallowed() throws Exception { UserGroupInformation.setLoginUser(user1); checkUser(cluster, user2, Arrays.asList("volume2", "volume3", "volume5"), false); + // Add "s3v" created default by OM. checkUser(cluster, adminUser, Arrays.asList("volume1", "volume2", "volume3", - "volume4", "volume5"), false); + "volume4", "volume5", "s3v"), false); // While admin should be able to list volumes just fine. UserGroupInformation.setLoginUser(adminUser); @@ -250,8 +253,10 @@ public void testAclEnabledListAllAllowed() throws Exception { true); checkUser(cluster, user2, Arrays.asList("volume2", "volume3", "volume5"), true); + + // Add "s3v" created default by OM. checkUser(cluster, adminUser, Arrays.asList("volume1", "volume2", "volume3", - "volume4", "volume5"), true); + "volume4", "volume5", "s3v"), true); stopCluster(cluster); } @@ -268,8 +273,9 @@ public void testAclEnabledListAllDisallowed() throws Exception { checkUser(cluster, user2, Arrays.asList("volume2", "volume3", "volume5"), false); UserGroupInformation.setLoginUser(adminUser); - checkUser(cluster, adminUser, Arrays.asList("volume1", "volume2", "volume3", - "volume4", "volume5"), true); + // Add "s3v" created default by OM. + checkUser(cluster, adminUser, Arrays.asList("volume1", "volume2", + "volume3", "volume4", "volume5", "s3v"), true); stopCluster(cluster); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index f4b5cdd758dd..74a5b9f3d18d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -47,6 +47,7 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; +import com.google.common.base.Optional; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.crypto.key.KeyProvider; @@ -61,6 +62,7 @@ import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertResponseProto; import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB; import org.apache.hadoop.hdds.scm.ScmInfo; +import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; @@ -79,9 +81,12 @@ import org.apache.hadoop.hdds.utils.HddsServerUtil; import org.apache.hadoop.hdds.utils.ProtocolMessageMetrics; import org.apache.hadoop.hdds.utils.RetriableTask; +import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.DBCheckpoint; import org.apache.hadoop.hdds.utils.db.DBUpdatesWrapper; import org.apache.hadoop.hdds.utils.db.SequenceNumberNotFoundException; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.ipc.Client; @@ -133,6 +138,7 @@ import org.apache.hadoop.ozone.om.ratis.OMTransactionInfo; import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; +import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.om.snapshot.OzoneManagerSnapshotProvider; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DBUpdatesRequest; @@ -140,6 +146,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRoleInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServicePort; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.UserVolumeInfo; import org.apache.hadoop.ozone.protocolPB.OzoneManagerProtocolServerSideTranslatorPB; import org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager; import org.apache.hadoop.ozone.security.OzoneDelegationTokenSecretManager; @@ -185,6 +192,7 @@ import static org.apache.hadoop.hdds.server.ServerUtils.getRemoteUserName; import static org.apache.hadoop.hdds.server.ServerUtils.updateRPCListenAddress; import static org.apache.hadoop.io.retry.RetryPolicies.retryUpToMaximumCountWithFixedSleep; +import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS; @@ -219,6 +227,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TOKEN_ERROR_OTHER; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneManagerService.newReflectiveBlockingService; +import org.apache.hadoop.util.Time; import org.apache.ratis.proto.RaftProtos.RaftPeerRole; import org.apache.ratis.server.protocol.TermIndex; import org.apache.ratis.util.ExitUtils; @@ -426,6 +435,10 @@ private OzoneManager(OzoneConfiguration conf) throws IOException, } instantiateServices(); + + // Create special volume s3v which is required for S3G. + addS3GVolumeToDB(); + this.omRatisSnapshotInfo = new OMRatisSnapshotInfo(); initializeRatisServer(); if (isRatisEnabled) { @@ -1146,6 +1159,7 @@ public void start() throws IOException { startJVMPauseMonitor(); setStartTime(); omState = State.RUNNING; + } /** @@ -3503,4 +3517,95 @@ public boolean getEnableFileSystemPaths() { return configuration.getBoolean(OZONE_OM_ENABLE_FILESYSTEM_PATHS, OZONE_OM_ENABLE_FILESYSTEM_PATHS_DEFAULT); } + + /** + * Create volume which is required for S3Gateway operations. + * @throws IOException + */ + private void addS3GVolumeToDB() throws IOException { + String s3VolumeName = HddsClientUtils.getS3VolumeName(configuration); + String dbVolumeKey = metadataManager.getVolumeKey(s3VolumeName); + + if (!s3VolumeName.equals(OzoneConfigKeys.OZONE_S3_VOLUME_NAME_DEFAULT)) { + LOG.warn("Make sure that all S3Gateway use same volume name." + + " Otherwise user need to manually create/configure Volume " + + "configured by S3Gateway"); + } + if (!metadataManager.getVolumeTable().isExist(dbVolumeKey)) { + long transactionID = (Long.MAX_VALUE - 1) >> 8; + long objectID = OMFileRequest.getObjIDFromTxId(transactionID); + String userName = + UserGroupInformation.getCurrentUser().getShortUserName(); + + // Add volume and user info to DB and cache. + + OmVolumeArgs omVolumeArgs = createS3VolumeInfo(s3VolumeName, + transactionID, objectID); + + String dbUserKey = metadataManager.getUserKey(userName); + UserVolumeInfo userVolumeInfo = UserVolumeInfo.newBuilder() + .setObjectID(objectID) + .setUpdateID(transactionID) + .addVolumeNames(s3VolumeName).build(); + + + // Commit to DB. + BatchOperation batchOperation = + metadataManager.getStore().initBatchOperation(); + + metadataManager.getVolumeTable().putWithBatch(batchOperation, dbVolumeKey, + omVolumeArgs); + metadataManager.getUserTable().putWithBatch(batchOperation, dbUserKey, + userVolumeInfo); + + metadataManager.getStore().commitBatchOperation(batchOperation); + + // Add to cache. + metadataManager.getVolumeTable().addCacheEntry( + new CacheKey<>(dbVolumeKey), + new CacheValue<>(Optional.of(omVolumeArgs), transactionID)); + metadataManager.getUserTable().addCacheEntry( + new CacheKey<>(dbUserKey), + new CacheValue<>(Optional.of(userVolumeInfo), transactionID)); + LOG.info("Created Volume {} With Owner {} required for S3Gateway " + + "operations.", s3VolumeName, userName); + } + } + + private OmVolumeArgs createS3VolumeInfo(String s3Volume, long transactionID, + long objectID) throws IOException { + String userName = UserGroupInformation.getCurrentUser().getShortUserName(); + long time = Time.now(); + + OmVolumeArgs.Builder omVolumeArgs = new OmVolumeArgs.Builder() + .setVolume(s3Volume) + .setUpdateID(transactionID) + .setObjectID(objectID) + .setCreationTime(time) + .setModificationTime(time) + .setOwnerName(userName) + .setAdminName(userName) + .setQuotaInBytes(OzoneConsts.MAX_QUOTA_IN_BYTES); + + // Provide ACLType of ALL which is default acl rights for user and group. + List listOfAcls = new ArrayList<>(); + //User ACL + listOfAcls.add(new OzoneAcl(ACLIdentityType.USER, + userName, ACLType.ALL, ACCESS)); + //Group ACLs of the User + List userGroups = Arrays.asList(UserGroupInformation + .createRemoteUser(userName).getGroupNames()); + + userGroups.stream().forEach((group) -> listOfAcls.add( + new OzoneAcl(ACLIdentityType.GROUP, group, ACLType.ALL, ACCESS))); + + // Add ACLs + for (OzoneAcl ozoneAcl : listOfAcls) { + omVolumeArgs.addOzoneAcls(OzoneAcl.toProtobuf(ozoneAcl)); + } + + return omVolumeArgs.build(); + + } + } From 67d18668ce84f7cde27906f11a575cb6ff61d804 Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Wed, 22 Jul 2020 13:25:20 -0700 Subject: [PATCH 058/165] HDDS-3969. Add validName check for FileSystem requests (#1211) --- .../hadoop/ozone/om/helpers/OzoneFSUtils.java | 30 ++++++++++++++ .../ozone/om/helpers/TestOzoneFsUtils.java | 39 +++++++++++++++++++ .../hadoop/fs/ozone/TestOzoneFileSystem.java | 26 +++++++++++++ .../hadoop/fs/ozone/BasicOzoneFileSystem.java | 12 +++++- 4 files changed, 105 insertions(+), 2 deletions(-) create mode 100644 hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneFsUtils.java diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java index 07f3194c14b2..d1491ed6c506 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.om.helpers; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.util.StringUtils; import java.nio.file.Paths; @@ -86,4 +87,33 @@ public static String addTrailingSlashIfNeeded(String key) { public static boolean isFile(String keyName) { return !keyName.endsWith(OZONE_URI_DELIMITER); } + + /** + * Whether the pathname is valid. Currently prohibits relative paths, + * names which contain a ":" or "//", or other non-canonical paths. + */ + public static boolean isValidName(String src) { + // Path must be absolute. + if (!src.startsWith(Path.SEPARATOR)) { + return false; + } + + // Check for ".." "." ":" "/" + String[] components = StringUtils.split(src, '/'); + for (int i = 0; i < components.length; i++) { + String element = components[i]; + if (element.equals(".") || + (element.contains(":")) || + (element.contains("/") || element.equals(".."))) { + return false; + } + // The string may start or end with a /, but not have + // "//" in the middle. + if (element.isEmpty() && i != components.length - 1 && + i != 0) { + return false; + } + } + return true; + } } diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneFsUtils.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneFsUtils.java new file mode 100644 index 000000000000..7471d539484c --- /dev/null +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneFsUtils.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.helpers; + +import org.junit.Assert; +import org.junit.Test; + +/** + * Test OzoneFsUtils. + */ +public class TestOzoneFsUtils { + + @Test + public void testPaths() { + Assert.assertTrue(OzoneFSUtils.isValidName("/a/b")); + Assert.assertFalse(OzoneFSUtils.isValidName("../../../a/b")); + Assert.assertFalse(OzoneFSUtils.isValidName("/./.")); + Assert.assertFalse(OzoneFSUtils.isValidName("/:/")); + Assert.assertFalse(OzoneFSUtils.isValidName("a/b")); + Assert.assertFalse(OzoneFSUtils.isValidName("/a:/b")); + Assert.assertFalse(OzoneFSUtils.isValidName("/a//b")); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java index ad1705a8b9c6..d4a2a46fd04e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java @@ -32,6 +32,7 @@ import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.InvalidPathException; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Trash; import org.apache.hadoop.fs.contract.ContractTestUtils; @@ -163,6 +164,31 @@ public void testMakeDirsWithAnExistingDirectoryPath() throws Exception { assertTrue("Shouldn't send error if dir exists", status); } + @Test + public void testCreateWithInvalidPaths() throws Exception { + setupOzoneFileSystem(); + Path parent = new Path("../../../../../d1/d2/"); + Path file1 = new Path(parent, "key1"); + checkInvalidPath(file1); + + file1 = new Path("/:/:"); + checkInvalidPath(file1); + } + + private void checkInvalidPath(Path path) throws Exception { + FSDataOutputStream outputStream = null; + try { + outputStream = fs.create(path, false); + fail("testCreateWithInvalidPaths failed for path" + path); + } catch (Exception ex) { + Assert.assertTrue(ex instanceof InvalidPathException); + } finally { + if (outputStream != null) { + outputStream.close(); + } + } + } + @Test(timeout = 300_000) public void testFileSystem() throws Exception { setupOzoneFileSystem(); diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java index 778277f846d4..aa103157c015 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java @@ -28,6 +28,7 @@ import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.InvalidPathException; import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.Path; @@ -39,6 +40,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.Progressable; @@ -748,9 +750,15 @@ public String pathToKey(Path path) { path = new Path(workingDir, path); } // removing leading '/' char - String key = path.toUri().getPath().substring(1); + String key = path.toUri().getPath(); + + if (OzoneFSUtils.isValidName(key)) { + key = path.toUri().getPath(); + } else { + throw new InvalidPathException("Invalid path Name" + key); + } LOG.trace("path for key:{} is:{}", key, path); - return key; + return key.substring(1); } /** From 0a92a80764fe22a111cadd027fed5edf23b14557 Mon Sep 17 00:00:00 2001 From: john28152815 <37138250+john28152815@users.noreply.github.com> Date: Thu, 23 Jul 2020 05:26:36 +0800 Subject: [PATCH 059/165] HDDS-4003. Delete the redundant word of the description (#1240) --- .../apache/hadoop/ozone/shell/volume/CreateVolumeHandler.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/CreateVolumeHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/CreateVolumeHandler.java index cbeb92a68f20..af0ea1e33701 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/CreateVolumeHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/CreateVolumeHandler.java @@ -37,7 +37,7 @@ public class CreateVolumeHandler extends VolumeHandler { @Option(names = {"--user", "-u"}, - description = "Owner of of the volume") + description = "Owner of the volume") private String ownerName; @Option(names = {"--quota", "-q"}, From d7f965379280a9be93b703b3a5410db3d83fb4ce Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Thu, 23 Jul 2020 02:11:27 +0200 Subject: [PATCH 060/165] HDDS-3827. Intermittent failure in TestKeyManagerUnit#listMultipartUploads (#1239) --- .../hadoop/ozone/om/TestKeyManagerUnit.java | 23 ++++++++++++++----- 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java index 4e62eb8fa193..7f31b2407e2a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.om; +import java.io.File; import java.io.IOException; import java.time.Instant; import java.util.ArrayList; @@ -30,6 +31,7 @@ import java.util.UUID; import com.google.common.base.Optional; +import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.BlockID; @@ -65,6 +67,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; +import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -80,12 +83,14 @@ public class TestKeyManagerUnit { private KeyManagerImpl keyManager; private Instant startDate; + private File testDir; @Before public void setup() throws IOException { configuration = new OzoneConfiguration(); + testDir = GenericTestUtils.getRandomizedTestDir(); configuration.set(HddsConfigKeys.OZONE_METADATA_DIRS, - GenericTestUtils.getRandomizedTestDir().toString()); + testDir.toString()); metadataManager = new OmMetadataManagerImpl(configuration); keyManager = new KeyManagerImpl( Mockito.mock(ScmBlockLocationProtocol.class), @@ -98,6 +103,12 @@ public void setup() throws IOException { startDate = Instant.now(); } + @After + public void cleanup() throws Exception { + metadataManager.stop(); + FileUtils.deleteDirectory(testDir); + } + @Test public void listMultipartUploadPartsWithZeroUpload() throws IOException { //GIVEN @@ -113,8 +124,6 @@ public void listMultipartUploadPartsWithZeroUpload() throws IOException { Assert.assertEquals(0, omMultipartUploadListParts.getPartInfoList().size()); - - this.startDate = Instant.now(); } @Test @@ -144,9 +153,11 @@ public void listMultipartUploads() throws IOException { Assert.assertEquals("dir/key2", uploads.get(1).getKeyName()); Assert.assertNotNull(uploads.get(1)); - Assert.assertNotNull(uploads.get(1).getCreationTime()); - Assert.assertTrue("Creation date is too old", - uploads.get(1).getCreationTime().compareTo(startDate) > 0); + Instant creationTime = uploads.get(1).getCreationTime(); + Assert.assertNotNull(creationTime); + Assert.assertFalse("Creation date is too old: " + + creationTime + " < " + startDate, + creationTime.isBefore(startDate)); } @Test From 0eaa7f43b91eefbdb5c4192f8588b85983956dc7 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Thu, 23 Jul 2020 02:12:39 +0200 Subject: [PATCH 061/165] HDDS-3998. Shorten Ozone FS Hadoop compatibility module names (#1237) --- hadoop-ozone/ozonefs-hadoop2/pom.xml | 2 +- hadoop-ozone/ozonefs-hadoop3/pom.xml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/ozonefs-hadoop2/pom.xml b/hadoop-ozone/ozonefs-hadoop2/pom.xml index 95774998b542..2de88531b8e0 100644 --- a/hadoop-ozone/ozonefs-hadoop2/pom.xml +++ b/hadoop-ozone/ozonefs-hadoop2/pom.xml @@ -22,7 +22,7 @@ 0.6.0-SNAPSHOT hadoop-ozone-filesystem-hadoop2 - Apache Hadoop Ozone FileSystem Hadoop 2.x compatibility + Apache Hadoop Ozone FS Hadoop 2.x compatibility jar 0.6.0-SNAPSHOT diff --git a/hadoop-ozone/ozonefs-hadoop3/pom.xml b/hadoop-ozone/ozonefs-hadoop3/pom.xml index cbcc29eaa4f8..8bce6fd82017 100644 --- a/hadoop-ozone/ozonefs-hadoop3/pom.xml +++ b/hadoop-ozone/ozonefs-hadoop3/pom.xml @@ -22,7 +22,7 @@ 0.6.0-SNAPSHOT hadoop-ozone-filesystem-hadoop3 - Apache Hadoop Ozone FileSystem Hadoop 3.x compatibility + Apache Hadoop Ozone FS Hadoop 3.x compatibility jar 0.6.0-SNAPSHOT From 06acd340cd279ae105f19958a2e5f3d14f6749cd Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Thu, 23 Jul 2020 09:51:32 -0700 Subject: [PATCH 062/165] HDDS-4006. Disallow MPU on encrypted buckets. (#1241) --- .../ozone/om/exceptions/OMException.java | 2 + .../src/main/proto/OmClientProtocol.proto | 2 + .../S3InitiateMultipartUploadRequest.java | 13 +++++ .../TestS3InitiateMultipartUploadRequest.java | 57 +++++++++++++++++++ 4 files changed, 74 insertions(+) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java index bab8d94fc410..54b5458af08b 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java @@ -226,5 +226,7 @@ public enum ResultCodes { PARTIAL_DELETE, DETECTED_LOOP_IN_BUCKET_LINKS, + + NOT_SUPPORTED_OPERATION } } diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index 68598179adf0..1b2075e17d9b 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -306,6 +306,8 @@ enum Status { PARTIAL_DELETE = 62; DETECTED_LOOP_IN_BUCKET_LINKS = 63; + + NOT_SUPPORTED_OPERATION = 64; } /** diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java index f7951a296807..08063b6b4d9b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java @@ -23,6 +23,7 @@ import org.apache.hadoop.ozone.audit.OMAction; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; @@ -51,6 +52,7 @@ import java.util.Map; import java.util.UUID; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_SUPPORTED_OPERATION; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; /** @@ -132,6 +134,17 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, validateBucketAndVolume(omMetadataManager, volumeName, bucketName); + // If KMS is configured and TDE is enabled on bucket, throw MPU not + // supported. + if (ozoneManager.getKmsProvider() != null) { + if (omMetadataManager.getBucketTable().get( + omMetadataManager.getBucketKey(volumeName, bucketName)) + .getEncryptionKeyInfo() != null) { + throw new OMException("MultipartUpload is not yet supported on " + + "encrypted buckets", NOT_SUPPORTED_OPERATION); + } + } + // We are adding uploadId to key, because if multiple users try to // perform multipart upload on the same key, each will try to upload, who // ever finally commit the key, we see that key in ozone. Suppose if we diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java index 5633c726c54e..33fd1cd215b2 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java @@ -21,6 +21,14 @@ import java.util.UUID; +import com.google.common.base.Optional; +import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.request.OMClientRequest; +import org.apache.hadoop.util.Time; import org.junit.Assert; import org.junit.Test; @@ -28,6 +36,12 @@ import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.mockito.Mockito; + +import static org.apache.hadoop.crypto.CipherSuite.AES_CTR_NOPADDING; +import static org.apache.hadoop.crypto.CryptoProtocolVersion.ENCRYPTION_ZONES; +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.NOT_SUPPORTED_OPERATION; +import static org.mockito.Mockito.when; /** * Tests S3 Initiate Multipart Upload request. @@ -150,4 +164,47 @@ public void testValidateAndUpdateCacheWithVolumeNotFound() throws Exception { .get(multipartKey)); } + + @Test + public void testMPUNotSupported() throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + String keyName = UUID.randomUUID().toString(); + + when(ozoneManager.getKmsProvider()) + .thenReturn(Mockito.mock(KeyProviderCryptoExtension.class)); + + TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager); + + // Set encryption info and create bucket + OmBucketInfo omBucketInfo = + OmBucketInfo.newBuilder().setVolumeName(volumeName) + .setBucketName(bucketName).setCreationTime(Time.now()) + .setBucketEncryptionKey(new BucketEncryptionKeyInfo.Builder() + .setKeyName("dummy").setSuite(AES_CTR_NOPADDING) + .setVersion(ENCRYPTION_ZONES).build()) + .build(); + + String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); + + omMetadataManager.getBucketTable().put(bucketKey, omBucketInfo); + + omMetadataManager.getBucketTable().addCacheEntry(new CacheKey<>(bucketKey), + new CacheValue<>(Optional.of(omBucketInfo), 100L)); + + OMRequest modifiedRequest = doPreExecuteInitiateMPU(volumeName, bucketName, + keyName); + + OMClientRequest omClientRequest = + new S3InitiateMultipartUploadRequest(modifiedRequest); + + OMClientResponse omClientResponse = + omClientRequest.validateAndUpdateCache(ozoneManager, 1L, + ozoneManagerDoubleBufferHelper); + + Assert.assertNotNull(omClientResponse.getOMResponse()); + Assert.assertEquals(NOT_SUPPORTED_OPERATION, + omClientResponse.getOMResponse().getStatus()); + + } } From 70207b99e8ae2acdcc070d7ac5f8f1d72b0c1582 Mon Sep 17 00:00:00 2001 From: avijayanhwx <14299376+avijayanhwx@users.noreply.github.com> Date: Thu, 23 Jul 2020 13:51:16 -0700 Subject: [PATCH 063/165] HDDS-4008. Recon should fallback to ozone.om.service.ids when the internal service id is not defined. (#1243) --- .../java/org/apache/hadoop/ozone/OmUtils.java | 47 ++++++++++++++++++ .../org/apache/hadoop/ozone/TestOmUtils.java | 49 +++++++++++++++++++ .../ozone/recon/ReconControllerModule.java | 10 ++-- .../src/views/overview/overview.tsx | 2 +- 4 files changed, 102 insertions(+), 6 deletions(-) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java index 6eb8b18b1ee4..bb9aec4748f2 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java @@ -34,6 +34,7 @@ import java.util.OptionalInt; import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.ozone.conf.OMClientConfig; @@ -54,6 +55,7 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_BIND_HOST_KEY; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_BIND_PORT_DEFAULT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_INTERNAL_SERVICE_ID; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_NODES_KEY; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_PORT_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY; @@ -527,4 +529,49 @@ public static void validateKeyName(String keyName) OMException.ResultCodes.INVALID_KEY_NAME); } } + + /** + * Return configured OzoneManager service id based on the following logic. + * Look at 'ozone.om.internal.service.id' first. If configured, return that. + * If the above is not configured, look at 'ozone.om.service.ids'. + * If count(ozone.om.service.ids) == 1, return that id. + * If count(ozone.om.service.ids) > 1 throw exception + * If 'ozone.om.service.ids' is not configured, return null. (Non HA) + * @param conf configuration + * @return OM service ID. + * @throws IOException on error. + */ + public static String getOzoneManagerServiceId(OzoneConfiguration conf) + throws IOException { + String localOMServiceId = conf.get(OZONE_OM_INTERNAL_SERVICE_ID); + Collection omServiceIds = conf.getTrimmedStringCollection( + OZONE_OM_SERVICE_IDS_KEY); + if (localOMServiceId == null) { + LOG.info("{} is not defined, falling back to {} to find serviceID for " + + "OzoneManager if it is HA enabled cluster", + OZONE_OM_INTERNAL_SERVICE_ID, OZONE_OM_SERVICE_IDS_KEY); + if (omServiceIds.size() > 1) { + throw new IOException(String.format( + "More than 1 OzoneManager ServiceID (%s) " + + "configured : %s, but %s is not " + + "configured.", OZONE_OM_SERVICE_IDS_KEY, + omServiceIds.toString(), OZONE_OM_INTERNAL_SERVICE_ID)); + } + } else if (!omServiceIds.contains(localOMServiceId)) { + throw new IOException(String.format( + "Cannot find the internal service id %s in %s", + localOMServiceId, omServiceIds.toString())); + } else { + omServiceIds = Collections.singletonList(localOMServiceId); + } + + if (omServiceIds.isEmpty()) { + LOG.info("No OzoneManager ServiceID configured."); + return null; + } else { + String serviceId = omServiceIds.iterator().next(); + LOG.info("Using OzoneManager ServiceID '{}'.", serviceId); + return serviceId; + } + } } diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOmUtils.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOmUtils.java index ab9b4e1c080e..8c527cbd318a 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOmUtils.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOmUtils.java @@ -26,10 +26,16 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import static org.apache.hadoop.ozone.OmUtils.getOzoneManagerServiceId; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_INTERNAL_SERVICE_ID; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import static org.junit.Assume.assumeTrue; + +import org.junit.Assert; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; @@ -99,5 +105,48 @@ public void testGetOmHAAddressesById() { assertTrue(rpcAddrs.stream().anyMatch( a -> a.getAddress().getHostAddress().equals("1.1.1.3"))); } + + @Test + public void testGetOzoneManagerServiceId() throws IOException { + + // If the above is not configured, look at 'ozone.om.service.ids'. + // If no config is set, return null. (Non HA) + OzoneConfiguration configuration = new OzoneConfiguration(); + assertNull(getOzoneManagerServiceId(configuration)); + + // Verify 'ozone.om.internal.service.id' takes precedence + configuration.set(OZONE_OM_INTERNAL_SERVICE_ID, "om1"); + configuration.set(OZONE_OM_SERVICE_IDS_KEY, "om2,om1"); + String id = getOzoneManagerServiceId(configuration); + assertEquals("om1", id); + + configuration.set(OZONE_OM_SERVICE_IDS_KEY, "om2,om3"); + try { + getOzoneManagerServiceId(configuration); + Assert.fail(); + } catch (IOException ioEx) { + assertTrue(ioEx.getMessage() + .contains("Cannot find the internal service id om1 in [om2, om3]")); + } + + // When internal service ID is not defined. + // Verify if count(ozone.om.service.ids) == 1, return that id. + configuration = new OzoneConfiguration(); + configuration.set(OZONE_OM_SERVICE_IDS_KEY, "om2"); + id = getOzoneManagerServiceId(configuration); + assertEquals("om2", id); + + // Verify if more than count(ozone.om.service.ids) > 1 and internal + // service id is not defined, throw exception + configuration.set(OZONE_OM_SERVICE_IDS_KEY, "om2,om1"); + try { + getOzoneManagerServiceId(configuration); + Assert.fail(); + } catch (IOException ioEx) { + assertTrue(ioEx.getMessage() + .contains("More than 1 OzoneManager ServiceID (ozone.om.service" + + ".ids) configured")); + } + } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java index ac14757af158..11996305d794 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java @@ -56,7 +56,8 @@ import com.google.inject.Singleton; import com.google.inject.multibindings.Multibinder; import static org.apache.hadoop.hdds.scm.cli.ContainerOperationClient.newContainerRpcClient; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_INTERNAL_SERVICE_ID; +import static org.apache.hadoop.ozone.OmUtils.getOzoneManagerServiceId; + import org.apache.ratis.protocol.ClientId; import org.hadoop.ozone.recon.codegen.ReconSqlDbConfig; import org.hadoop.ozone.recon.schema.tables.daos.ClusterGrowthDailyDao; @@ -152,11 +153,10 @@ OzoneManagerProtocol getOzoneManagerProtocol( try { ClientId clientId = ClientId.randomId(); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); + String serviceId = getOzoneManagerServiceId(ozoneConfiguration); OmTransport transport = - OmTransportFactory.create(ozoneConfiguration, ugi, - ozoneConfiguration.get(OZONE_OM_INTERNAL_SERVICE_ID)); - ozoneManagerClient = new - OzoneManagerProtocolClientSideTranslatorPB( + OmTransportFactory.create(ozoneConfiguration, ugi, serviceId); + ozoneManagerClient = new OzoneManagerProtocolClientSideTranslatorPB( transport, clientId.toString()); } catch (IOException ioEx) { LOG.error("Error in provisioning OzoneManagerProtocol ", ioEx); diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/overview/overview.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/overview/overview.tsx index bd92f5f2a122..4497d7171af2 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/overview/overview.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/overview/overview.tsx @@ -180,7 +180,7 @@ export class Overview extends React.Component, IOverviewS

- + From c8344df682adf91a8158c37e311647abaf5280f1 Mon Sep 17 00:00:00 2001 From: Sammi Chen Date: Fri, 24 Jul 2020 10:17:44 +0800 Subject: [PATCH 064/165] =?UTF-8?q?HDDS-3658.=20Stop=20to=20persist=20cont?= =?UTF-8?q?ainer=20related=20pipeline=20info=20of=20each=20ke=E2=80=A6=20(?= =?UTF-8?q?#1012)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../org/apache/hadoop/hdds/scm/TestUtils.java | 13 ++ .../hadoop/ozone/om/helpers/OmKeyInfo.java | 17 ++- .../ozone/om/helpers/OmKeyLocationInfo.java | 18 ++- .../om/helpers/OmKeyLocationInfoGroup.java | 22 ++-- .../ozone/om/helpers/RepeatedOmKeyInfo.java | 9 +- .../hadoop/ozone/om/TestKeyManagerImpl.java | 14 +- .../recon/TestReconWithOzoneManager.java | 14 +- .../apache/hadoop/ozone/om/KeyManager.java | 6 + .../hadoop/ozone/om/KeyManagerImpl.java | 119 +++++++++++------ .../ozone/om/OmMetadataManagerImpl.java | 9 +- .../hadoop/ozone/om/codec/OMDBDefinition.java | 6 +- .../hadoop/ozone/om/codec/OmKeyInfoCodec.java | 13 +- .../om/codec/RepeatedOmKeyInfoCodec.java | 13 +- .../om/request/file/OMFileCreateRequest.java | 4 + .../om/request/key/OMKeyCreateRequest.java | 4 + .../OzoneManagerRequestHandler.java | 2 +- .../hadoop/ozone/om/TestKeyManagerUnit.java | 20 --- .../ozone/om/codec/TestOmKeyInfoCodec.java | 116 +++++++++++++++++ .../om/codec/TestRepeatedOmKeyInfoCodec.java | 121 ++++++++++++++++++ .../om/request/key/TestOMKeyRequest.java | 6 + .../hadoop/ozone/fsck/ContainerMapper.java | 2 +- 21 files changed, 452 insertions(+), 96 deletions(-) create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmKeyInfoCodec.java create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/codec/TestRepeatedOmKeyInfoCodec.java diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java index 64752dab5543..03ed0f7123eb 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java @@ -547,4 +547,17 @@ public static ContainerReplica getReplicas( .build(); } + public static Pipeline getRandomPipeline() { + List nodes = new ArrayList<>(); + nodes.add(MockDatanodeDetails.randomDatanodeDetails()); + nodes.add(MockDatanodeDetails.randomDatanodeDetails()); + nodes.add(MockDatanodeDetails.randomDatanodeDetails()); + return Pipeline.newBuilder() + .setFactor(HddsProtos.ReplicationFactor.THREE) + .setId(PipelineID.randomId()) + .setNodes(nodes) + .setState(Pipeline.PipelineState.OPEN) + .setType(HddsProtos.ReplicationType.RATIS) + .build(); + } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java index 6304e398c7e7..d0e8bee52345 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java @@ -377,13 +377,26 @@ public OmKeyInfo build() { } } + /** + * For network transmit. + * @return + */ public KeyInfo getProtobuf() { + return getProtobuf(false); + } + + /** + * + * @param ignorePipeline true for persist to DB, false for network transmit. + * @return + */ + public KeyInfo getProtobuf(boolean ignorePipeline) { long latestVersion = keyLocationVersions.size() == 0 ? -1 : keyLocationVersions.get(keyLocationVersions.size() - 1).getVersion(); List keyLocations = new ArrayList<>(); for (OmKeyLocationInfoGroup locationInfoGroup : keyLocationVersions) { - keyLocations.add(locationInfoGroup.getProtobuf()); + keyLocations.add(locationInfoGroup.getProtobuf(ignorePipeline)); } KeyInfo.Builder kb = KeyInfo.newBuilder() @@ -393,8 +406,8 @@ public KeyInfo getProtobuf() { .setDataSize(dataSize) .setFactor(factor) .setType(type) - .addAllKeyLocationList(keyLocations) .setLatestVersion(latestVersion) + .addAllKeyLocationList(keyLocations) .setCreationTime(creationTime) .setModificationTime(modificationTime) .addAllMetadata(KeyValueUtil.toProtobuf(metadata)) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java index b9a292069135..70c71d6d7f32 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java @@ -156,7 +156,15 @@ public OmKeyLocationInfo build() { } } + public KeyLocation getCompactProtobuf() { + return getProtobuf(true); + } + public KeyLocation getProtobuf() { + return getProtobuf(false); + } + + private KeyLocation getProtobuf(boolean ignorePipeline) { KeyLocation.Builder builder = KeyLocation.newBuilder() .setBlockID(blockID.getProtobuf()) .setLength(length) @@ -165,10 +173,12 @@ public KeyLocation getProtobuf() { if (this.token != null) { builder.setToken(OzonePBHelper.protoFromToken(token)); } - try { - builder.setPipeline(pipeline.getProtobufMessage()); - } catch (UnknownPipelineStateException e) { - //TODO: fix me: we should not return KeyLocation without pipeline. + if (!ignorePipeline) { + try { + builder.setPipeline(pipeline.getProtobufMessage()); + } catch (UnknownPipelineStateException e) { + //TODO: fix me: we should not return KeyLocation without pipeline. + } } return builder.build(); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java index edebb67b64be..abaf055f9f2d 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java @@ -16,6 +16,7 @@ */ package org.apache.hadoop.ozone.om.helpers; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyLocationList; import java.util.ArrayList; @@ -75,15 +76,18 @@ public List getLocationList(Long versionToFetch) { return new ArrayList<>(locationVersionMap.get(versionToFetch)); } - public KeyLocationList getProtobuf() { - return KeyLocationList.newBuilder() - .setVersion(version) - .addAllKeyLocations( - locationVersionMap.values().stream() - .flatMap(List::stream) - .map(OmKeyLocationInfo::getProtobuf) - .collect(Collectors.toList())) - .build(); + public KeyLocationList getProtobuf(boolean ignorePipeline) { + KeyLocationList.Builder builder = KeyLocationList.newBuilder() + .setVersion(version); + List keyLocationList = + new ArrayList<>(); + for (List locationList : locationVersionMap.values()) { + for (OmKeyLocationInfo keyInfo : locationList) { + keyLocationList.add(ignorePipeline ? + keyInfo.getCompactProtobuf() : keyInfo.getProtobuf()); + } + } + return builder.addAllKeyLocations(keyLocationList).build(); } public static OmKeyLocationInfoGroup getFromProtobuf( diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java index c28c2c8abc47..6d7bf2f83ff5 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java @@ -60,10 +60,15 @@ public static RepeatedOmKeyInfo getFromProto(RepeatedKeyInfo return new RepeatedOmKeyInfo.Builder().setOmKeyInfos(list).build(); } - public RepeatedKeyInfo getProto() { + /** + * + * @param compact, true for persistence, false for network transmit + * @return + */ + public RepeatedKeyInfo getProto(boolean compact) { List list = new ArrayList<>(); for(OmKeyInfo k : omKeyInfoList) { - list.add(k.getProtobuf()); + list.add(k.getProtobuf(compact)); } RepeatedKeyInfo.Builder builder = RepeatedKeyInfo.newBuilder() diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java index bbfa0d2c8b0f..49d3417e9ed4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java @@ -23,6 +23,7 @@ import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; +import java.util.Arrays; import java.util.BitSet; import java.util.Collections; import java.util.HashMap; @@ -137,6 +138,7 @@ public class TestKeyManagerImpl { private static NodeManager nodeManager; private static StorageContainerManager scm; private static ScmBlockLocationProtocol mockScmBlockLocationProtocol; + private static StorageContainerLocationProtocol mockScmContainerClient; private static OzoneConfiguration conf; private static OMMetadataManager metadataManager; private static File dir; @@ -178,9 +180,11 @@ public static void setUp() throws Exception { StorageUnit.BYTES); conf.setLong(OZONE_KEY_PREALLOCATION_BLOCKS_MAX, 10); + mockScmContainerClient = + Mockito.mock(StorageContainerLocationProtocol.class); keyManager = - new KeyManagerImpl(scm.getBlockProtocolServer(), metadataManager, conf, - "om1", null); + new KeyManagerImpl(scm.getBlockProtocolServer(), + mockScmContainerClient, metadataManager, conf, "om1", null); prefixManager = new PrefixManagerImpl(metadataManager, false); Mockito.when(mockScmBlockLocationProtocol @@ -764,6 +768,12 @@ public void testLookupKeyWithLocation() throws IOException { keyArgs.setLocationInfoList(locationInfoList); keyManager.commitKey(keyArgs, keySession.getId()); + ContainerInfo containerInfo = new ContainerInfo.Builder().setContainerID(1L) + .setPipelineID(pipeline.getId()).build(); + List containerWithPipelines = Arrays.asList( + new ContainerWithPipeline(containerInfo, pipeline)); + when(mockScmContainerClient.getContainerWithPipelineBatch( + Arrays.asList(1L))).thenReturn(containerWithPipelines); OmKeyInfo key = keyManager.lookupKey(keyArgs, null); Assert.assertEquals(key.getKeyName(), keyName); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java index 93630548e08a..af9200103d99 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java @@ -40,8 +40,8 @@ import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.TestUtils; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; @@ -335,7 +335,7 @@ private LinkedTreeMap getContainerResponseMap(String containerResponse, */ private void addKeys(int start, int end) throws Exception { for(int i = start; i < end; i++) { - Pipeline pipeline = getRandomPipeline(); + Pipeline pipeline = TestUtils.getRandomPipeline(); List omKeyLocationInfoList = new ArrayList<>(); BlockID blockID = new BlockID(i, 1); OmKeyLocationInfo omKeyLocationInfo1 = getOmKeyLocationInfo(blockID, @@ -358,16 +358,6 @@ private long getTableKeyCount(TableIterator locationInfoGroups = value == null ? - null : value.getKeyLocationVersions(); + Preconditions.checkNotNull(value, "OMKeyInfo cannot be null"); + refreshPipeline(Arrays.asList(value)); + } - // TODO: fix Some tests that may not initialize container client - // The production should always have containerClient initialized. - if (scmClient.getContainerClient() == null || - CollectionUtils.isEmpty(locationInfoGroups)) { + /** + * Refresh pipeline info in OM by asking SCM. + * @param keyList a list of OmKeyInfo + */ + @VisibleForTesting + protected void refreshPipeline(List keyList) throws IOException { + if (keyList == null || keyList.isEmpty()) { return; } Set containerIDs = new HashSet<>(); - for (OmKeyLocationInfoGroup key : locationInfoGroups) { - for (OmKeyLocationInfo k : key.getLocationList()) { - containerIDs.add(k.getContainerID()); + for (OmKeyInfo keyInfo : keyList) { + List locationInfoGroups = + keyInfo.getKeyLocationVersions(); + + for (OmKeyLocationInfoGroup key : locationInfoGroups) { + for (OmKeyLocationInfo k : key.getLocationList()) { + containerIDs.add(k.getContainerID()); + } } } + Map containerWithPipelineMap = + refreshPipeline(containerIDs); + + for (OmKeyInfo keyInfo : keyList) { + List locationInfoGroups = + keyInfo.getKeyLocationVersions(); + for (OmKeyLocationInfoGroup key : locationInfoGroups) { + for (OmKeyLocationInfo k : key.getLocationList()) { + ContainerWithPipeline cp = + containerWithPipelineMap.get(k.getContainerID()); + if (cp != null && !cp.getPipeline().equals(k.getPipeline())) { + k.setPipeline(cp.getPipeline()); + } + } + } + } + } + + /** + * Refresh pipeline info in OM by asking SCM. + * @param containerIDs a set of containerIDs + */ + @VisibleForTesting + protected Map refreshPipeline( + Set containerIDs) throws IOException { + // TODO: fix Some tests that may not initialize container client + // The production should always have containerClient initialized. + if (scmClient.getContainerClient() == null || + containerIDs == null || containerIDs.isEmpty()) { + return Collections.EMPTY_MAP; + } + Map containerWithPipelineMap = new HashMap<>(); try { @@ -719,22 +769,12 @@ protected void refreshPipeline(OmKeyInfo value) throws IOException { containerWithPipelineMap.put( cp.getContainerInfo().getContainerID(), cp); } + return containerWithPipelineMap; } catch (IOException ioEx) { - LOG.debug("Get containerPipeline failed for volume:{} bucket:{} " + - "key:{}", value.getVolumeName(), value.getBucketName(), - value.getKeyName(), ioEx); + LOG.debug("Get containerPipeline failed for {}", + containerIDs.toString(), ioEx); throw new OMException(ioEx.getMessage(), SCM_GET_PIPELINE_EXCEPTION); } - - for (OmKeyLocationInfoGroup key : locationInfoGroups) { - for (OmKeyLocationInfo k : key.getLocationList()) { - ContainerWithPipeline cp = - containerWithPipelineMap.get(k.getContainerID()); - if (!cp.getPipeline().equals(k.getPipeline())) { - k.setPipeline(cp.getPipeline()); - } - } - } } @Override @@ -872,8 +912,10 @@ public List listKeys(String volumeName, String bucketName, // underlying table using an iterator. That automatically creates a // snapshot of the data, so we don't need these locks at a higher level // when we iterate. - return metadataManager.listKeys(volumeName, bucketName, + List keyList = metadataManager.listKeys(volumeName, bucketName, startKey, keyPrefix, maxKeys); + refreshPipeline(keyList); + return keyList; } @Override @@ -1743,9 +1785,7 @@ private OzoneFileStatus getOzoneFileStatus(String volumeName, // if the key is a file then do refresh pipeline info in OM by asking SCM if (fileKeyInfo != null) { - if (refreshPipeline) { - refreshPipeline(fileKeyInfo); - } + refreshPipeline(fileKeyInfo); if (sortDatanodes) { sortDatanodeInPipeline(fileKeyInfo, clientAddress); } @@ -1915,6 +1955,15 @@ public OmKeyInfo lookupFile(OmKeyArgs args, String clientAddress) ResultCodes.NOT_A_FILE); } + /** + * Refresh the key block location information by get latest info from SCM. + * @param key + */ + public void refresh(OmKeyInfo key) throws IOException { + Preconditions.checkNotNull(key, "Key info can not be null"); + refreshPipeline(Arrays.asList(key)); + } + /** * Helper function for listStatus to find key in TableCache. */ @@ -2082,13 +2131,11 @@ public List listStatus(OmKeyArgs args, boolean recursive, for (Map.Entry entry : cacheKeyMap.entrySet()) { // No need to check if a key is deleted or not here, this is handled // when adding entries to cacheKeyMap from DB. - if (args.getRefreshPipeline()) { - refreshPipeline(entry.getValue().getKeyInfo()); - } - if (args.getSortDatanodes()) { - sortDatanodeInPipeline(entry.getValue().getKeyInfo(), clientAddress); + OzoneFileStatus fileStatus = entry.getValue(); + if (fileStatus.isFile()) { + refreshPipeline(fileStatus.getKeyInfo()); } - fileStatusList.add(entry.getValue()); + fileStatusList.add(fileStatus); countEntries++; if (countEntries >= numEntries) { break; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index 6c8b50595ca1..e64b023a7e3c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -157,6 +157,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager { private Table prefixTable; private Table transactionInfoTable; private boolean isRatisEnabled; + private boolean ignorePipelineinKey; private Map tableMap = new HashMap<>(); @@ -172,6 +173,9 @@ public OmMetadataManagerImpl(OzoneConfiguration conf) throws IOException { isRatisEnabled = conf.getBoolean( OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, OMConfigKeys.OZONE_OM_RATIS_ENABLE_DEFAULT); + // For test purpose only + ignorePipelineinKey = conf.getBoolean( + "ozone.om.ignore.pipeline", Boolean.TRUE); start(conf); } @@ -317,8 +321,9 @@ protected static DBStoreBuilder addOMTablesAndCodecs(DBStoreBuilder builder) { .addTable(PREFIX_TABLE) .addTable(TRANSACTION_INFO_TABLE) .addCodec(OzoneTokenIdentifier.class, new TokenIdentifierCodec()) - .addCodec(OmKeyInfo.class, new OmKeyInfoCodec()) - .addCodec(RepeatedOmKeyInfo.class, new RepeatedOmKeyInfoCodec()) + .addCodec(OmKeyInfo.class, new OmKeyInfoCodec(true)) + .addCodec(RepeatedOmKeyInfo.class, + new RepeatedOmKeyInfoCodec(true)) .addCodec(OmBucketInfo.class, new OmBucketInfoCodec()) .addCodec(OmVolumeArgs.class, new OmVolumeArgsCodec()) .addCodec(UserVolumeInfo.class, new UserVolumeInfoCodec()) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java index 24f33418b0b7..f6d04a971d63 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java @@ -48,7 +48,7 @@ public class OMDBDefinition implements DBDefinition { String.class, new StringCodec(), RepeatedOmKeyInfo.class, - new RepeatedOmKeyInfoCodec()); + new RepeatedOmKeyInfoCodec(true)); public static final DBColumnFamilyDefinition @@ -85,7 +85,7 @@ public class OMDBDefinition implements DBDefinition { String.class, new StringCodec(), OmKeyInfo.class, - new OmKeyInfoCodec()); + new OmKeyInfoCodec(true)); public static final DBColumnFamilyDefinition KEY_TABLE = @@ -94,7 +94,7 @@ public class OMDBDefinition implements DBDefinition { String.class, new StringCodec(), OmKeyInfo.class, - new OmKeyInfoCodec()); + new OmKeyInfoCodec(true)); public static final DBColumnFamilyDefinition BUCKET_TABLE = diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OmKeyInfoCodec.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OmKeyInfoCodec.java index c6b86bb4a5e4..a7e1eabee7ba 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OmKeyInfoCodec.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OmKeyInfoCodec.java @@ -18,23 +18,34 @@ package org.apache.hadoop.ozone.om.codec; import java.io.IOException; + import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyInfo; import org.apache.hadoop.hdds.utils.db.Codec; import com.google.common.base.Preconditions; import com.google.protobuf.InvalidProtocolBufferException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Codec to encode OmKeyInfo as byte array. */ public class OmKeyInfoCodec implements Codec { + private static final Logger LOG = + LoggerFactory.getLogger(OmKeyInfoCodec.class); + + private final boolean ignorePipeline; + public OmKeyInfoCodec(boolean ignorePipeline) { + this.ignorePipeline = ignorePipeline; + LOG.info("OmKeyInfoCodec ignorePipeline = " + ignorePipeline); + } @Override public byte[] toPersistedFormat(OmKeyInfo object) throws IOException { Preconditions .checkNotNull(object, "Null object can't be converted to byte array."); - return object.getProtobuf().toByteArray(); + return object.getProtobuf(ignorePipeline).toByteArray(); } @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/RepeatedOmKeyInfoCodec.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/RepeatedOmKeyInfoCodec.java index 1907b790b582..9156fdf03213 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/RepeatedOmKeyInfoCodec.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/RepeatedOmKeyInfoCodec.java @@ -22,6 +22,8 @@ import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .RepeatedKeyInfo; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.IOException; @@ -29,12 +31,21 @@ * Codec to encode RepeatedOmKeyInfo as byte array. */ public class RepeatedOmKeyInfoCodec implements Codec { + private static final Logger LOG = + LoggerFactory.getLogger(RepeatedOmKeyInfoCodec.class); + + private final boolean ignorePipeline; + public RepeatedOmKeyInfoCodec(boolean ignorePipeline) { + this.ignorePipeline = ignorePipeline; + LOG.info("RepeatedOmKeyInfoCodec ignorePipeline = " + ignorePipeline); + } + @Override public byte[] toPersistedFormat(RepeatedOmKeyInfo object) throws IOException { Preconditions.checkNotNull(object, "Null object can't be converted to byte array."); - return object.getProto().toByteArray(); + return object.getProto(ignorePipeline).toByteArray(); } @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java index 7327626427e6..3226f7817797 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java @@ -225,6 +225,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OmKeyInfo dbKeyInfo = omMetadataManager.getKeyTable() .getIfExist(ozoneKey); + if (dbKeyInfo != null) { + ozoneManager.getKeyManager().refresh(dbKeyInfo); + } + OMFileRequest.OMPathInfo pathInfo = OMFileRequest.verifyFilesInPath(omMetadataManager, volumeName, bucketName, keyName, Paths.get(keyName)); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java index 8927c1931c0d..1b712fb2921b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java @@ -233,6 +233,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OmKeyInfo dbKeyInfo = omMetadataManager.getKeyTable().getIfExist(dbKeyName); + if (dbKeyInfo != null) { + ozoneManager.getKeyManager().refresh(dbKeyInfo); + } + OmBucketInfo bucketInfo = omMetadataManager.getBucketTable().get( omMetadataManager.getBucketKey(volumeName, bucketName)); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java index af4d15260688..cd3287eddb53 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java @@ -417,7 +417,7 @@ private ListTrashResponse listTrash(ListTrashRequest request) request.getMaxKeys()); for (RepeatedOmKeyInfo key: deletedKeys) { - resp.addDeletedKeys(key.getProto()); + resp.addDeletedKeys(key.getProto(false)); } return resp.build(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java index 7f31b2407e2a..d06e43d8a3dd 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java @@ -417,26 +417,6 @@ public void testLookupFileWithDnFailure() throws IOException { .setBucketName("bucketOne") .setKeyName("keyOne"); - keyArgs.setRefreshPipeline(false); - final OmKeyInfo oldKeyInfo = manager - .lookupFile(keyArgs.build(), "test"); - - final OmKeyLocationInfo oldBlockLocation = oldKeyInfo - .getLatestVersionLocations().getBlocksLatestVersionOnly().get(0); - - Assert.assertEquals(1L, oldBlockLocation.getContainerID()); - Assert.assertEquals(1L, oldBlockLocation - .getBlockID().getLocalID()); - Assert.assertEquals(pipelineOne.getId(), - oldBlockLocation.getPipeline().getId()); - Assert.assertTrue(oldBlockLocation.getPipeline() - .getNodes().contains(dnOne)); - Assert.assertTrue(oldBlockLocation.getPipeline() - .getNodes().contains(dnTwo)); - Assert.assertTrue(oldBlockLocation.getPipeline() - .getNodes().contains(dnThree)); - - keyArgs.setRefreshPipeline(true); final OmKeyInfo newKeyInfo = manager .lookupFile(keyArgs.build(), "test"); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmKeyInfoCodec.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmKeyInfoCodec.java new file mode 100644 index 000000000000..39c35f675e6f --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmKeyInfoCodec.java @@ -0,0 +1,116 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.codec; + +import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; +import org.apache.hadoop.util.Time; +import org.junit.Test; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.fail; + +/** + * This class tests OmKeyInfoCodec. + */ +public class TestOmKeyInfoCodec { + private final String volume = "hadoop"; + private final String bucket = "ozone"; + private final String keyName = "user/root/terasort/10G-input-6/part-m-00037"; + + + private OmKeyInfo getKeyInfo(int chunkNum) { + List omKeyLocationInfoList = new ArrayList<>(); + Pipeline pipeline = TestUtils.getRandomPipeline(); + for (int i = 0; i < chunkNum; i++) { + BlockID blockID = new BlockID(i, i); + OmKeyLocationInfo keyLocationInfo = new OmKeyLocationInfo.Builder() + .setBlockID(blockID) + .setPipeline(pipeline) + .build(); + omKeyLocationInfoList.add(keyLocationInfo); + } + OmKeyLocationInfoGroup omKeyLocationInfoGroup = new + OmKeyLocationInfoGroup(0, omKeyLocationInfoList); + return new OmKeyInfo.Builder() + .setCreationTime(Time.now()) + .setModificationTime(Time.now()) + .setReplicationType(HddsProtos.ReplicationType.RATIS) + .setReplicationFactor(HddsProtos.ReplicationFactor.THREE) + .setVolumeName(volume) + .setBucketName(bucket) + .setKeyName(keyName) + .setObjectID(Time.now()) + .setUpdateID(Time.now()) + .setDataSize(100) + .setOmKeyLocationInfos( + Collections.singletonList(omKeyLocationInfoGroup)) + .build(); + } + + @Test + public void test() { + testOmKeyInfoCodecWithoutPipeline(1); + testOmKeyInfoCodecWithoutPipeline(2); + testOmKeyInfoCodecCompatibility(1); + testOmKeyInfoCodecCompatibility(2); + } + + public void testOmKeyInfoCodecWithoutPipeline(int chunkNum) { + OmKeyInfoCodec codec = new OmKeyInfoCodec(true); + OmKeyInfo originKey = getKeyInfo(chunkNum); + try { + byte[] rawData = codec.toPersistedFormat(originKey); + OmKeyInfo key = codec.fromPersistedFormat(rawData); + System.out.println("Chunk number = " + chunkNum + + ", Serialized key size without pipeline = " + rawData.length); + assertNull(key.getLatestVersionLocations().getLocationList().get(0) + .getPipeline()); + } catch (IOException e) { + fail("Should success"); + } + } + + public void testOmKeyInfoCodecCompatibility(int chunkNum) { + OmKeyInfoCodec codecWithoutPipeline = new OmKeyInfoCodec(true); + OmKeyInfoCodec codecWithPipeline = new OmKeyInfoCodec(false); + OmKeyInfo originKey = getKeyInfo(chunkNum); + try { + byte[] rawData = codecWithPipeline.toPersistedFormat(originKey); + OmKeyInfo key = codecWithoutPipeline.fromPersistedFormat(rawData); + System.out.println("Chunk number = " + chunkNum + + ", Serialized key size with pipeline = " + rawData.length); + assertNotNull(key.getLatestVersionLocations().getLocationList().get(0) + .getPipeline()); + } catch (IOException e) { + fail("Should success"); + } + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/codec/TestRepeatedOmKeyInfoCodec.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/codec/TestRepeatedOmKeyInfoCodec.java new file mode 100644 index 000000000000..0eb87b8cfa2c --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/codec/TestRepeatedOmKeyInfoCodec.java @@ -0,0 +1,121 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.codec; + +import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; +import org.apache.hadoop.util.Time; +import org.junit.Test; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.fail; + +/** + * This class tests RepeatedOmKeyInfoCodec. + */ +public class TestRepeatedOmKeyInfoCodec { + private final String volume = "hadoop"; + private final String bucket = "ozone"; + private final String keyName = "user/root/terasort/10G-input-6/part-m-00037"; + + + private OmKeyInfo getKeyInfo(int chunkNum) { + List omKeyLocationInfoList = new ArrayList<>(); + Pipeline pipeline = TestUtils.getRandomPipeline(); + for (int i = 0; i < chunkNum; i++) { + BlockID blockID = new BlockID(i, i); + OmKeyLocationInfo keyLocationInfo = new OmKeyLocationInfo.Builder() + .setBlockID(blockID) + .setPipeline(pipeline) + .build(); + omKeyLocationInfoList.add(keyLocationInfo); + } + OmKeyLocationInfoGroup omKeyLocationInfoGroup = new + OmKeyLocationInfoGroup(0, omKeyLocationInfoList); + return new OmKeyInfo.Builder() + .setCreationTime(Time.now()) + .setModificationTime(Time.now()) + .setReplicationType(HddsProtos.ReplicationType.RATIS) + .setReplicationFactor(HddsProtos.ReplicationFactor.THREE) + .setVolumeName(volume) + .setBucketName(bucket) + .setKeyName(keyName) + .setObjectID(Time.now()) + .setUpdateID(Time.now()) + .setDataSize(100) + .setOmKeyLocationInfos( + Collections.singletonList(omKeyLocationInfoGroup)) + .build(); + } + + @Test + public void test() { + testWithoutPipeline(1); + testWithoutPipeline(2); + testCompatibility(1); + testCompatibility(2); + } + + public void testWithoutPipeline(int chunkNum) { + RepeatedOmKeyInfoCodec codec = new RepeatedOmKeyInfoCodec(true); + OmKeyInfo originKey = getKeyInfo(chunkNum); + RepeatedOmKeyInfo repeatedOmKeyInfo = new RepeatedOmKeyInfo(originKey); + try { + byte[] rawData = codec.toPersistedFormat(repeatedOmKeyInfo); + RepeatedOmKeyInfo key = codec.fromPersistedFormat(rawData); + System.out.println("Chunk number = " + chunkNum + + ", Serialized key size without pipeline = " + rawData.length); + assertNull(key.getOmKeyInfoList().get(0).getLatestVersionLocations() + .getLocationList().get(0).getPipeline()); + } catch (IOException e) { + fail("Should success"); + } + } + + public void testCompatibility(int chunkNum) { + RepeatedOmKeyInfoCodec codecWithoutPipeline = + new RepeatedOmKeyInfoCodec(true); + RepeatedOmKeyInfoCodec codecWithPipeline = + new RepeatedOmKeyInfoCodec(false); + OmKeyInfo originKey = getKeyInfo(chunkNum); + RepeatedOmKeyInfo repeatedOmKeyInfo = new RepeatedOmKeyInfo(originKey); + try { + byte[] rawData = codecWithPipeline.toPersistedFormat(repeatedOmKeyInfo); + RepeatedOmKeyInfo key = codecWithoutPipeline.fromPersistedFormat(rawData); + System.out.println("Chunk number = " + chunkNum + + ", Serialized key size with pipeline = " + rawData.length); + assertNotNull(key.getOmKeyInfoList().get(0).getLatestVersionLocations() + .getLocationList().get(0).getPipeline()); + } catch (IOException e) { + fail("Should success"); + } + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java index dd6caf46857f..cb35e2b9358d 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java @@ -24,6 +24,8 @@ import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.ozone.om.ResolvedBucket; +import org.apache.hadoop.ozone.om.KeyManager; +import org.apache.hadoop.ozone.om.KeyManagerImpl; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; import org.junit.After; @@ -66,6 +68,7 @@ public class TestOMKeyRequest { public TemporaryFolder folder = new TemporaryFolder(); protected OzoneManager ozoneManager; + protected KeyManager keyManager; protected OMMetrics omMetrics; protected OMMetadataManager omMetadataManager; protected AuditLogger auditLogger; @@ -113,6 +116,8 @@ public void setup() throws Exception { ozoneBlockTokenSecretManager = Mockito.mock(OzoneBlockTokenSecretManager.class); scmBlockLocationProtocol = Mockito.mock(ScmBlockLocationProtocol.class); + keyManager = new KeyManagerImpl(ozoneManager, scmClient, ozoneConfiguration, + ""); when(ozoneManager.getScmClient()).thenReturn(scmClient); when(ozoneManager.getBlockTokenSecretManager()) .thenReturn(ozoneBlockTokenSecretManager); @@ -121,6 +126,7 @@ public void setup() throws Exception { when(ozoneManager.isGrpcBlockTokenEnabled()).thenReturn(false); when(ozoneManager.getOMNodeId()).thenReturn(UUID.randomUUID().toString()); when(scmClient.getBlockClient()).thenReturn(scmBlockLocationProtocol); + when(ozoneManager.getKeyManager()).thenReturn(keyManager); Pipeline pipeline = Pipeline.newBuilder() .setState(Pipeline.PipelineState.OPEN) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/ContainerMapper.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/ContainerMapper.java index 81ff0ea831ab..fe73e35db3ab 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/ContainerMapper.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/ContainerMapper.java @@ -93,7 +93,7 @@ public static void main(String[] args) throws IOException { Table.KeyValue keyValue = keyValueTableIterator.next(); OmKeyInfo omKeyInfo = keyValue.getValue(); - byte[] value = omKeyInfo.getProtobuf().toByteArray(); + byte[] value = omKeyInfo.getProtobuf(true).toByteArray(); OmKeyInfo keyInfo = OmKeyInfo.getFromProtobuf( OzoneManagerProtocolProtos.KeyInfo.parseFrom(value)); for (OmKeyLocationInfoGroup keyLocationInfoGroup : keyInfo From 599bf493c35f24e9a2ad64fe0fbaadd6524b17dd Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Fri, 24 Jul 2020 07:58:40 +0200 Subject: [PATCH 065/165] HDDS-4018. Datanode log spammed by NPE (#1250) --- .../common/statemachine/StateContext.java | 15 +++-- .../common/statemachine/TestStateContext.java | 55 +++++++++++++++++++ 2 files changed, 62 insertions(+), 8 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java index 51262c3eb5f9..4cd769f4d245 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java @@ -436,18 +436,17 @@ public void execute(ExecutorService service, long time, TimeUnit unit) task.onEnter(); } - if (isThreadPoolAvailable(service)) { - task.execute(service); - threadPoolNotAvailableCount.set(0); - } else { - if (threadPoolNotAvailableCount.get() - % getLogWarnInterval(conf) == 0) { + if (!isThreadPoolAvailable(service)) { + long count = threadPoolNotAvailableCount.getAndIncrement(); + if (count % getLogWarnInterval(conf) == 0) { LOG.warn("No available thread in pool for past {} seconds.", - unit.toSeconds(time) * (threadPoolNotAvailableCount.get() + 1)); + unit.toSeconds(time) * (count + 1)); } - threadPoolNotAvailableCount.incrementAndGet(); + return; } + threadPoolNotAvailableCount.set(0); + task.execute(service); DatanodeStateMachine.DatanodeStates newState = task.await(time, unit); if (this.state != newState) { if (LOG.isDebugEnabled()) { diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestStateContext.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestStateContext.java index c3fd310d0840..d3032c3211f5 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestStateContext.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestStateContext.java @@ -34,6 +34,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -212,4 +213,58 @@ public void testIsThreadPoolAvailable() throws Exception { futureTwo.complete("futureTwo"); executorService.shutdown(); } + + @Test + public void doesNotAwaitWithoutExecute() throws Exception { + final AtomicInteger executed = new AtomicInteger(); + final AtomicInteger awaited = new AtomicInteger(); + + ExecutorService executorService = Executors.newFixedThreadPool(1); + CompletableFuture future = new CompletableFuture<>(); + executorService.submit(() -> future.get()); + executorService.submit(() -> future.get()); + + StateContext subject = new StateContext(new OzoneConfiguration(), + DatanodeStates.INIT, mock(DatanodeStateMachine.class)) { + @Override + public DatanodeState getTask() { + // this task counts the number of execute() and await() calls + return new DatanodeState() { + @Override + public void onEnter() { + // no-op + } + + @Override + public void onExit() { + // no-op + } + + @Override + public void execute(ExecutorService executor) { + executed.incrementAndGet(); + } + + @Override + public DatanodeStates await(long time, TimeUnit timeUnit) { + awaited.incrementAndGet(); + return DatanodeStates.INIT; + } + }; + } + }; + + subject.execute(executorService, 2, TimeUnit.SECONDS); + + assertEquals(0, awaited.get()); + assertEquals(0, executed.get()); + + future.complete("any"); + LambdaTestUtils.await(1000, 100, () -> + subject.isThreadPoolAvailable(executorService)); + + subject.execute(executorService, 2, TimeUnit.SECONDS); + assertEquals(1, awaited.get()); + assertEquals(1, executed.get()); + } } \ No newline at end of file From 12690399b9919e39974288180ac6d4016fcf1388 Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Fri, 24 Jul 2020 08:57:05 -0700 Subject: [PATCH 066/165] HDDS-3999. OM Shutdown when Commit part tries to commit the part, after abort upload. (#1244) --- .../rpc/TestOzoneRpcClientAbstract.java | 105 +++++++++++++++++- .../S3MultipartUploadCommitPartResponse.java | 3 +- 2 files changed, 104 insertions(+), 4 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java index ac9faa684600..32b6bca6a5dd 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java @@ -25,6 +25,7 @@ import java.util.BitSet; import java.util.HashMap; import java.util.Iterator; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Optional; @@ -108,6 +109,7 @@ import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.DEFAULT; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.GROUP; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.USER; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.READ; @@ -1752,7 +1754,7 @@ public void testNoSuchUploadError() throws Exception { String uploadID = "random"; OzoneTestUtils - .expectOmException(ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR, () -> + .expectOmException(NO_SUCH_MULTIPART_UPLOAD_ERROR, () -> bucket .createMultipartKey(keyName, sampleData.length(), 1, uploadID)); } @@ -1911,10 +1913,107 @@ public void testAbortUploadFail() throws Exception { volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); - OzoneTestUtils.expectOmException(ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR, + OzoneTestUtils.expectOmException(NO_SUCH_MULTIPART_UPLOAD_ERROR, () -> bucket.abortMultipartUpload(keyName, "random")); } + @Test + public void testAbortUploadFailWithInProgressPartUpload() throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + String keyName = UUID.randomUUID().toString(); + + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + volume.createBucket(bucketName); + OzoneBucket bucket = volume.getBucket(bucketName); + + OmMultipartInfo omMultipartInfo = bucket.initiateMultipartUpload(keyName, + STAND_ALONE, ONE); + + Assert.assertNotNull(omMultipartInfo.getUploadID()); + + // Do not close output stream. + byte[] data = "data".getBytes(UTF_8); + OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, + data.length, 1, omMultipartInfo.getUploadID()); + ozoneOutputStream.write(data, 0, data.length); + + // Abort before completing part upload. + bucket.abortMultipartUpload(keyName, omMultipartInfo.getUploadID()); + + try { + ozoneOutputStream.close(); + fail("testAbortUploadFailWithInProgressPartUpload failed"); + } catch (IOException ex) { + assertTrue(ex instanceof OMException); + assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR, + ((OMException) ex).getResult()); + } + } + + @Test + public void testCommitPartAfterCompleteUpload() throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + String keyName = UUID.randomUUID().toString(); + + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + volume.createBucket(bucketName); + OzoneBucket bucket = volume.getBucket(bucketName); + + OmMultipartInfo omMultipartInfo = bucket.initiateMultipartUpload(keyName, + STAND_ALONE, ONE); + + Assert.assertNotNull(omMultipartInfo.getUploadID()); + + String uploadID = omMultipartInfo.getUploadID(); + + // upload part 1. + byte[] data = generateData(5 * 1024 * 1024, + (byte) RandomUtils.nextLong()); + OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, + data.length, 1, uploadID); + ozoneOutputStream.write(data, 0, data.length); + ozoneOutputStream.close(); + + OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = + ozoneOutputStream.getCommitUploadPartInfo(); + + // Do not close output stream for part 2. + ozoneOutputStream = bucket.createMultipartKey(keyName, + data.length, 2, omMultipartInfo.getUploadID()); + ozoneOutputStream.write(data, 0, data.length); + + Map partsMap = new LinkedHashMap<>(); + partsMap.put(1, omMultipartCommitUploadPartInfo.getPartName()); + OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = + bucket.completeMultipartUpload(keyName, + uploadID, partsMap); + + Assert.assertNotNull(omMultipartCommitUploadPartInfo); + + byte[] fileContent = new byte[data.length]; + OzoneInputStream inputStream = bucket.readKey(keyName); + inputStream.read(fileContent); + StringBuilder sb = new StringBuilder(data.length); + + // Combine all parts data, and check is it matching with get key data. + String part1 = new String(data); + sb.append(part1); + Assert.assertEquals(sb.toString(), new String(fileContent)); + + try { + ozoneOutputStream.close(); + fail("testCommitPartAfterCompleteUpload failed"); + } catch (IOException ex) { + assertTrue(ex instanceof OMException); + assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR, + ((OMException) ex).getResult()); + } + } + @Test public void testAbortUploadSuccessWithOutAnyParts() throws Exception { @@ -2142,7 +2241,7 @@ public void testListPartsWithPartMarkerGreaterThanPartCount() @Test public void testListPartsWithInvalidUploadID() throws Exception { OzoneTestUtils - .expectOmException(ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR, () -> { + .expectOmException(NO_SUCH_MULTIPART_UPLOAD_ERROR, () -> { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); String keyName = UUID.randomUUID().toString(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java index 28acdb5655d4..f68af4a23dfc 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java @@ -96,7 +96,8 @@ public void checkAndUpdateDB(OMMetadataManager omMetadataManager, repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(openPartKeyInfoToBeDeleted, - repeatedOmKeyInfo, omMultipartKeyInfo.getUpdateID(), isRatisEnabled); + repeatedOmKeyInfo, openPartKeyInfoToBeDeleted.getUpdateID(), + isRatisEnabled); omMetadataManager.getDeletedTable().putWithBatch(batchOperation, openKey, repeatedOmKeyInfo); From a943fb02cf835e288dc74735296ffc681f851f6e Mon Sep 17 00:00:00 2001 From: Xiaoyu Yao Date: Fri, 24 Jul 2020 16:37:04 -0700 Subject: [PATCH 067/165] =?UTF-8?q?HDDS-3996.=20Missing=20TLS=20client=20c?= =?UTF-8?q?onfigurations=20to=20allow=20ozone.grpc.tls.=E2=80=A6=20(#1234)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../apache/hadoop/hdds/ratis/RatisHelper.java | 4 +-- .../hadoop/ozone/HddsDatanodeService.java | 10 +++++-- .../statemachine/DatanodeStateMachine.java | 4 ++- .../CreatePipelineCommandHandler.java | 3 +- .../server/ratis/XceiverServerRatis.java | 4 ++- .../container/ozoneimpl/OzoneContainer.java | 9 ++++++ .../replication/GrpcReplicationClient.java | 29 ++++++++++++++---- .../SimpleContainerDownloader.java | 30 +++++++++++++------ 8 files changed, 71 insertions(+), 22 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java index 505b6c9e8092..8bd22a1e39c9 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java @@ -170,9 +170,9 @@ private static RpcType getRpcType(ConfigurationSource conf) { } public static RaftClient newRaftClient(RaftPeer leader, - ConfigurationSource conf) { + ConfigurationSource conf, GrpcTlsConfig tlsConfig) { return newRaftClient(getRpcType(conf), leader, - RatisHelper.createRetryPolicy(conf), conf); + RatisHelper.createRetryPolicy(conf), tlsConfig, conf); } public static RaftClient newRaftClient(RpcType rpcType, RaftPeer leader, diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java index ac6fba45066d..aee0f030d01b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java @@ -352,9 +352,15 @@ private void getSCMSignedCert(OzoneConfiguration config) { dnCertClient.storeCertificate(pemEncodedCert, true); dnCertClient.storeCertificate(response.getX509CACertificate(), true, true); - datanodeDetails.setCertSerialId(getX509Certificate(pemEncodedCert). - getSerialNumber().toString()); + String dnCertSerialId = getX509Certificate(pemEncodedCert). + getSerialNumber().toString(); + datanodeDetails.setCertSerialId(dnCertSerialId); persistDatanodeDetails(datanodeDetails); + // Rebuild dnCertClient with the new CSR result so that the default + // certSerialId and the x509Certificate can be updated. + dnCertClient = new DNCertificateClient( + new SecurityConfig(config), dnCertSerialId); + } else { throw new RuntimeException("Unable to retrieve datanode certificate " + "chain"); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java index 27e814b30c3d..1f61f15f1b3a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java @@ -126,7 +126,9 @@ public DatanodeStateMachine(DatanodeDetails datanodeDetails, ContainerReplicator replicator = new DownloadAndImportReplicator(container.getContainerSet(), container.getController(), - new SimpleContainerDownloader(conf), new TarContainerPacker()); + new SimpleContainerDownloader(conf, + dnCertClient != null ? dnCertClient.getCACertificate() : null), + new TarContainerPacker()); supervisor = new ReplicationSupervisor(container.getContainerSet(), replicator, diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CreatePipelineCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CreatePipelineCommandHandler.java index c60c1129f563..78059fee78f3 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CreatePipelineCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CreatePipelineCommandHandler.java @@ -98,7 +98,8 @@ public void handle(SCMCommand command, OzoneContainer ozoneContainer, d -> !d.getUuid().equals(dn.getUuid())) .forEach(d -> { final RaftPeer peer = RatisHelper.toRaftPeer(d); - try (RaftClient client = RatisHelper.newRaftClient(peer, conf)) { + try (RaftClient client = RatisHelper.newRaftClient(peer, conf, + ozoneContainer.getTlsClientConfig())) { client.groupAdd(group, peer.getId()); } catch (AlreadyExistsException ae) { // do not log diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java index c751c5b5b9d8..c1d8df66e318 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java @@ -422,12 +422,14 @@ public static XceiverServerRatis newXceiverServerRatis( // In summary: // authenticate from server to client is via TLS. // authenticate from client to server is via block token (or container token). + // DN Ratis server act as both SSL client and server and we must pass TLS + // configuration for both. static GrpcTlsConfig createTlsServerConfigForDN(SecurityConfig conf, CertificateClient caClient) { if (conf.isSecurityEnabled() && conf.isGrpcTlsEnabled()) { return new GrpcTlsConfig( caClient.getPrivateKey(), caClient.getCertificate(), - null, false); + caClient.getCACertificate(), false); } return null; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java index abe0382d1fdd..26da4873dcdb 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto; +import org.apache.hadoop.hdds.ratis.RatisHelper; import org.apache.hadoop.hdds.security.token.BlockTokenVerifier; import org.apache.hadoop.hdds.security.x509.SecurityConfig; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; @@ -59,6 +60,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT; +import org.apache.ratis.grpc.GrpcTlsConfig; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -82,6 +84,7 @@ public class OzoneContainer { private ContainerMetadataScanner metadataScanner; private List dataScanners; private final BlockDeletingService blockDeletingService; + private final GrpcTlsConfig tlsClientConfig; /** * Construct OzoneContainer object. @@ -149,6 +152,12 @@ public OzoneContainer(DatanodeDetails datanodeDetails, ConfigurationSource blockDeletingService = new BlockDeletingService(this, svcInterval, serviceTimeout, TimeUnit.MILLISECONDS, config); + tlsClientConfig = RatisHelper.createTlsClientConfig( + secConf, certClient != null ? certClient.getCACertificate() : null); + } + + public GrpcTlsConfig getTlsClientConfig() { + return tlsClientConfig; } private GrpcReplicationService createReplicationService() { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java index 660ba4ee639b..abeaf03c1667 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java @@ -24,6 +24,7 @@ import java.io.UncheckedIOException; import java.nio.file.Files; import java.nio.file.Path; +import java.security.cert.X509Certificate; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; @@ -37,10 +38,13 @@ .IntraDatanodeProtocolServiceGrpc.IntraDatanodeProtocolServiceStub; import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.security.x509.SecurityConfig; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.ratis.thirdparty.io.grpc.ManagedChannel; +import org.apache.ratis.thirdparty.io.grpc.netty.GrpcSslContexts; import org.apache.ratis.thirdparty.io.grpc.netty.NettyChannelBuilder; import org.apache.ratis.thirdparty.io.grpc.stub.StreamObserver; +import org.apache.ratis.thirdparty.io.netty.handler.ssl.SslContextBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -58,13 +62,26 @@ public class GrpcReplicationClient { private final Path workingDirectory; - public GrpcReplicationClient(String host, - int port, Path workingDir) { + public GrpcReplicationClient(String host, int port, Path workingDir, + SecurityConfig secConfig, X509Certificate caCert) throws IOException { + NettyChannelBuilder channelBuilder = + NettyChannelBuilder.forAddress(host, port) + .usePlaintext() + .maxInboundMessageSize(OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE); - channel = NettyChannelBuilder.forAddress(host, port) - .usePlaintext() - .maxInboundMessageSize(OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE) - .build(); + if (secConfig.isGrpcTlsEnabled()) { + channelBuilder.useTransportSecurity(); + + SslContextBuilder sslContextBuilder = GrpcSslContexts.forClient(); + if (caCert != null) { + sslContextBuilder.trustManager(caCert); + } + if (secConfig.useTestCert()) { + channelBuilder.overrideAuthority("localhost"); + } + channelBuilder.sslContext(sslContextBuilder.build()); + } + channel = channelBuilder.build(); client = IntraDatanodeProtocolServiceGrpc.newStub(channel); workingDirectory = workingDir; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SimpleContainerDownloader.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SimpleContainerDownloader.java index d7666ea1127b..9d7b5516a5c3 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SimpleContainerDownloader.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SimpleContainerDownloader.java @@ -18,8 +18,10 @@ package org.apache.hadoop.ozone.container.replication; +import java.io.IOException; import java.nio.file.Path; import java.nio.file.Paths; +import java.security.cert.X509Certificate; import java.util.List; import java.util.concurrent.CompletableFuture; import java.util.function.Function; @@ -27,6 +29,7 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name; +import org.apache.hadoop.hdds.security.x509.SecurityConfig; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.slf4j.Logger; @@ -45,9 +48,11 @@ public class SimpleContainerDownloader implements ContainerDownloader { LoggerFactory.getLogger(SimpleContainerDownloader.class); private final Path workingDirectory; + private final SecurityConfig securityConfig; + private final X509Certificate caCert; - public SimpleContainerDownloader(ConfigurationSource conf) { - + public SimpleContainerDownloader(ConfigurationSource conf, + X509Certificate caCert) { String workDirString = conf.get(OzoneConfigKeys.OZONE_CONTAINER_COPY_WORKDIR); @@ -57,6 +62,8 @@ public SimpleContainerDownloader(ConfigurationSource conf) { } else { workingDirectory = Paths.get(workDirString); } + securityConfig = new SecurityConfig(conf); + this.caCert = caCert; } @Override @@ -66,22 +73,27 @@ public CompletableFuture getContainerDataFromReplicas(long containerId, CompletableFuture result = null; for (DatanodeDetails datanode : sourceDatanodes) { try { - if (result == null) { GrpcReplicationClient grpcReplicationClient = new GrpcReplicationClient(datanode.getIpAddress(), datanode.getPort(Name.STANDALONE).getValue(), - workingDirectory); + workingDirectory, securityConfig, caCert); result = grpcReplicationClient.download(containerId); } else { result = result.thenApply(CompletableFuture::completedFuture) .exceptionally(t -> { LOG.error("Error on replicating container: " + containerId, t); - GrpcReplicationClient grpcReplicationClient = - new GrpcReplicationClient(datanode.getIpAddress(), - datanode.getPort(Name.STANDALONE).getValue(), - workingDirectory); - return grpcReplicationClient.download(containerId); + try { + GrpcReplicationClient grpcReplicationClient = + new GrpcReplicationClient(datanode.getIpAddress(), + datanode.getPort(Name.STANDALONE).getValue(), + workingDirectory, securityConfig, caCert); + return grpcReplicationClient.download(containerId); + } catch (IOException e) { + LOG.error("Error on replicating container: " + containerId, + t); + return null; + } }).thenCompose(Function.identity()); } } catch (Exception ex) { From 2e7cb44a7afad82c3757dd77250b1f03c86e9a7d Mon Sep 17 00:00:00 2001 From: Xiaoyu Yao Date: Fri, 24 Jul 2020 17:07:05 -0700 Subject: [PATCH 068/165] =?UTF-8?q?HDDS-3997.=20Ozone=20certificate=20need?= =?UTF-8?q?s=20additional=20flags=20and=20SAN=20extension=E2=80=A6=20(#123?= =?UTF-8?q?5)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../certificate/authority/BaseApprover.java | 9 ++ .../authority/DefaultApprover.java | 11 ++ .../authority/DefaultCAServer.java | 42 +++++-- .../client/DNCertificateClient.java | 5 +- .../utils/SelfSignedCertificate.java | 105 ++++++++++++++---- .../certificates/TestRootCertificate.java | 42 ++++++- .../apache/hadoop/ozone/om/OzoneManager.java | 3 +- 7 files changed, 180 insertions(+), 37 deletions(-) diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/BaseApprover.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/BaseApprover.java index 12ececd8d4b3..26cb49183b39 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/BaseApprover.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/BaseApprover.java @@ -59,6 +59,15 @@ public BaseApprover(PKIProfile pkiProfile, SecurityConfig config) { this.securityConfig = Objects.requireNonNull(config); } + /** + * Returns the PKI policy profile. + * + * @return PKIProfile + */ + public PKIProfile getProfile() { + return profile; + } + /** * Returns the Security config. * diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultApprover.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultApprover.java index c7f37c18063d..0098fa55c294 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultApprover.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultApprover.java @@ -24,9 +24,12 @@ import org.apache.hadoop.hdds.security.x509.certificate.authority.PKIProfiles.PKIProfile; import org.apache.hadoop.hdds.security.x509.keys.SecurityUtil; import org.apache.hadoop.util.Time; +import org.bouncycastle.asn1.ASN1ObjectIdentifier; import org.bouncycastle.asn1.x500.X500Name; import org.bouncycastle.asn1.x500.style.BCStyle; import org.bouncycastle.asn1.x509.AlgorithmIdentifier; +import org.bouncycastle.asn1.x509.Extension; +import org.bouncycastle.asn1.x509.Extensions; import org.bouncycastle.asn1.x509.SubjectPublicKeyInfo; import org.bouncycastle.cert.X509CertificateHolder; import org.bouncycastle.cert.X509v3CertificateBuilder; @@ -136,6 +139,14 @@ public X509CertificateHolder sign( validTill, x500Name, keyInfo); + Extensions exts = SecurityUtil.getPkcs9Extensions(certificationRequest); + for (ASN1ObjectIdentifier extId : getProfile().getSupportedExtensions()) { + Extension ext = exts.getExtension(extId); + if (ext != null) { + certificateGenerator.addExtension(ext); + } + } + ContentSigner sigGen = new BcRSAContentSignerBuilder(sigAlgId, digAlgId) .build(asymmetricKP); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java index c583c1954a82..237826069bb8 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java @@ -21,6 +21,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; +import org.apache.commons.validator.routines.DomainValidator; import org.apache.hadoop.hdds.security.exception.SCMSecurityException; import org.apache.hadoop.hdds.security.x509.SecurityConfig; import org.apache.hadoop.hdds.security.x509.certificate.authority.PKIProfiles.DefaultProfile; @@ -29,6 +30,7 @@ import org.apache.hadoop.hdds.security.x509.certificates.utils.SelfSignedCertificate; import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator; import org.apache.hadoop.hdds.security.x509.keys.KeyCodec; +import org.apache.hadoop.ozone.OzoneSecurityUtil; import org.bouncycastle.cert.X509CertificateHolder; import org.bouncycastle.operator.OperatorCreationException; import org.bouncycastle.pkcs.PKCS10CertificationRequest; @@ -54,6 +56,7 @@ import java.util.function.Consumer; import static org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest.*; +import static org.apache.hadoop.hdds.security.x509.exceptions.CertificateException.ErrorCode.CSR_ERROR; /** * The default CertificateServer used by SCM. This has no dependencies on any @@ -459,18 +462,33 @@ private void generateRootCertificate(SecurityConfig securityConfig, LocalDateTime temp = LocalDateTime.of(beginDate, LocalTime.MIDNIGHT); LocalDate endDate = temp.plus(securityConfig.getMaxCertificateDuration()).toLocalDate(); - X509CertificateHolder selfSignedCertificate = - SelfSignedCertificate - .newBuilder() - .setSubject(this.subject) - .setScmID(this.scmID) - .setClusterID(this.clusterID) - .setBeginDate(beginDate) - .setEndDate(endDate) - .makeCA() - .setConfiguration(securityConfig.getConfiguration()) - .setKey(key) - .build(); + SelfSignedCertificate.Builder builder = SelfSignedCertificate.newBuilder() + .setSubject(this.subject) + .setScmID(this.scmID) + .setClusterID(this.clusterID) + .setBeginDate(beginDate) + .setEndDate(endDate) + .makeCA() + .setConfiguration(securityConfig.getConfiguration()) + .setKey(key); + + try { + DomainValidator validator = DomainValidator.getInstance(); + // Add all valid ips. + OzoneSecurityUtil.getValidInetsForCurrentHost().forEach( + ip -> { + builder.addIpAddress(ip.getHostAddress()); + if(validator.isValid(ip.getCanonicalHostName())) { + builder.addDnsName(ip.getCanonicalHostName()); + } + }); + } catch (IOException e) { + throw new org.apache.hadoop.hdds.security.x509 + .exceptions.CertificateException( + "Error while adding ip to CA self signed certificate", e, + CSR_ERROR); + } + X509CertificateHolder selfSignedCertificate = builder.build(); CertificateCodec certCodec = new CertificateCodec(config, componentName); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DNCertificateClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DNCertificateClient.java index 76986586d344..e95a4a7a22b2 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DNCertificateClient.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DNCertificateClient.java @@ -48,6 +48,7 @@ public DNCertificateClient(SecurityConfig securityConfig) { /** * Returns a CSR builder that can be used to creates a Certificate signing * request. + * The default flag is added to allow basic SSL handshake. * * @return CertificateSignRequest.Builder */ @@ -55,8 +56,8 @@ public DNCertificateClient(SecurityConfig securityConfig) { public CertificateSignRequest.Builder getCSRBuilder() throws CertificateException { return super.getCSRBuilder() - .setDigitalEncryption(false) - .setDigitalSignature(false); + .setDigitalEncryption(true) + .setDigitalSignature(true); } public Logger getLogger() { diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/SelfSignedCertificate.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/SelfSignedCertificate.java index 7ecc16109edc..a7edfde34fa7 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/SelfSignedCertificate.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/SelfSignedCertificate.java @@ -26,7 +26,9 @@ import java.time.LocalDate; import java.time.LocalTime; import java.time.ZoneOffset; +import java.util.ArrayList; import java.util.Date; +import java.util.List; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.security.exception.SCMSecurityException; @@ -37,10 +39,18 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import org.apache.logging.log4j.util.Strings; +import org.bouncycastle.asn1.ASN1EncodableVector; +import org.bouncycastle.asn1.ASN1Object; +import org.bouncycastle.asn1.ASN1ObjectIdentifier; import org.bouncycastle.asn1.DEROctetString; +import org.bouncycastle.asn1.DERSequence; +import org.bouncycastle.asn1.DERTaggedObject; +import org.bouncycastle.asn1.DERUTF8String; import org.bouncycastle.asn1.x500.X500Name; import org.bouncycastle.asn1.x509.BasicConstraints; import org.bouncycastle.asn1.x509.Extension; +import org.bouncycastle.asn1.x509.GeneralName; +import org.bouncycastle.asn1.x509.GeneralNames; import org.bouncycastle.asn1.x509.KeyUsage; import org.bouncycastle.asn1.x509.SubjectPublicKeyInfo; import org.bouncycastle.cert.CertIOException; @@ -64,28 +74,23 @@ public final class SelfSignedCertificate { private LocalDate endDate; private KeyPair key; private SecurityConfig config; + private List altNames; /** * Private Ctor invoked only via Builder Interface. * - * @param subject - Subject - * @param scmID - SCM ID - * @param clusterID - Cluster ID - * @param beginDate - NotBefore - * @param endDate - Not After - * @param configuration - SCM Config - * @param keyPair - KeyPair + * @param builder - builder */ - private SelfSignedCertificate(String subject, String scmID, String clusterID, - LocalDate beginDate, LocalDate endDate, SecurityConfig configuration, - KeyPair keyPair) { - this.subject = subject; - this.clusterID = clusterID; - this.scmID = scmID; - this.beginDate = beginDate; - this.endDate = endDate; - config = configuration; - this.key = keyPair; + + private SelfSignedCertificate(Builder builder) { + this.subject = builder.subject; + this.clusterID = builder.clusterID; + this.scmID = builder.scmID; + this.beginDate = builder.beginDate; + this.endDate = builder.endDate; + this.config = builder.config; + this.key = builder.key; + this.altNames = builder.altNames; } @VisibleForTesting @@ -142,6 +147,11 @@ private X509CertificateHolder generateCertificate(boolean isCA) KeyUsage keyUsage = new KeyUsage(keyUsageFlag); builder.addExtension(Extension.keyUsage, false, new DEROctetString(keyUsage)); + if (altNames != null && altNames.size() >= 1) { + builder.addExtension(new Extension(Extension.subjectAlternativeName, + false, new GeneralNames(altNames.toArray( + new GeneralName[altNames.size()])).getEncoded())); + } } return builder.build(contentSigner); } @@ -158,6 +168,7 @@ public static class Builder { private KeyPair key; private SecurityConfig config; private boolean isCA; + private List altNames; public Builder setConfiguration(ConfigurationSource configuration) { this.config = new SecurityConfig(configuration); @@ -199,6 +210,62 @@ public Builder makeCA() { return this; } + // Support SAN extension with DNS and RFC822 Name + // other name type will be added as needed. + public Builder addDnsName(String dnsName) { + Preconditions.checkNotNull(dnsName, "dnsName cannot be null"); + this.addAltName(GeneralName.dNSName, dnsName); + return this; + } + + // IP address is subject to change which is optional for now. + public Builder addIpAddress(String ip) { + Preconditions.checkNotNull(ip, "Ip address cannot be null"); + this.addAltName(GeneralName.iPAddress, ip); + return this; + } + + public Builder addServiceName( + String serviceName) { + Preconditions.checkNotNull( + serviceName, "Service Name cannot be null"); + + this.addAltName(GeneralName.otherName, serviceName); + return this; + } + + private Builder addAltName(int tag, String name) { + if (altNames == null) { + altNames = new ArrayList<>(); + } + if (tag == GeneralName.otherName) { + ASN1Object ono = addOtherNameAsn1Object(name); + + altNames.add(new GeneralName(tag, ono)); + } else { + altNames.add(new GeneralName(tag, name)); + } + return this; + } + + /** + * addOtherNameAsn1Object requires special handling since + * Bouncy Castle does not support othername as string. + * @param name + * @return + */ + private ASN1Object addOtherNameAsn1Object(String name) { + // Below oid is copied from this URL: + // https://docs.microsoft.com/en-us/windows/win32/adschema/a-middlename + final String otherNameOID = "2.16.840.1.113730.3.1.34"; + ASN1EncodableVector otherName = new ASN1EncodableVector(); + otherName.add(new ASN1ObjectIdentifier(otherNameOID)); + otherName.add(new DERTaggedObject( + true, GeneralName.otherName, new DERUTF8String(name))); + return new DERTaggedObject( + false, 0, new DERSequence(otherName)); + } + public X509CertificateHolder build() throws SCMSecurityException, IOException { Preconditions.checkNotNull(key, "Key cannot be null"); @@ -225,9 +292,7 @@ public X509CertificateHolder build() } SelfSignedCertificate rootCertificate = - new SelfSignedCertificate(this.subject, - this.scmID, this.clusterID, this.beginDate, this.endDate, - this.config, key); + new SelfSignedCertificate(this); try { return rootCertificate.generateCertificate(isCA); } catch (OperatorCreationException | CertIOException e) { diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestRootCertificate.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestRootCertificate.java index 02d007864426..1e3a8f4610aa 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestRootCertificate.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestRootCertificate.java @@ -19,11 +19,14 @@ package org.apache.hadoop.hdds.security.x509.certificates; +import org.apache.commons.validator.routines.DomainValidator; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.security.exception.SCMSecurityException; import org.apache.hadoop.hdds.security.x509.SecurityConfig; +import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; import org.apache.hadoop.hdds.security.x509.certificates.utils.SelfSignedCertificate; import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator; +import org.apache.hadoop.ozone.OzoneSecurityUtil; import org.bouncycastle.asn1.x509.Extension; import org.bouncycastle.cert.X509CertificateHolder; import org.bouncycastle.cert.jcajce.JcaX509CertificateConverter; @@ -33,6 +36,7 @@ import org.junit.Test; import org.junit.rules.TemporaryFolder; +import java.io.File; import java.io.IOException; import java.math.BigInteger; import java.security.InvalidKeyException; @@ -48,6 +52,9 @@ import java.util.UUID; import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; +import static org.apache.hadoop.hdds.security.x509.exceptions.CertificateException.ErrorCode.CSR_ERROR; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; /** * Test Class for Root Certificate generation. @@ -131,7 +138,7 @@ public void testAllFieldsAreExpected() @Test public void testCACert() throws SCMSecurityException, NoSuchProviderException, - NoSuchAlgorithmException, IOException { + NoSuchAlgorithmException, IOException, CertificateException { LocalDate notBefore = LocalDate.now(); LocalDate notAfter = notBefore.plus(365, ChronoUnit.DAYS); String clusterID = UUID.randomUUID().toString(); @@ -152,6 +159,23 @@ public void testCACert() .setConfiguration(conf) .makeCA(); + try { + DomainValidator validator = DomainValidator.getInstance(); + // Add all valid ips. + OzoneSecurityUtil.getValidInetsForCurrentHost().forEach( + ip -> { + builder.addIpAddress(ip.getHostAddress()); + if(validator.isValid(ip.getCanonicalHostName())) { + builder.addDnsName(ip.getCanonicalHostName()); + } + }); + } catch (IOException e) { + throw new org.apache.hadoop.hdds.security.x509 + .exceptions.CertificateException( + "Error while adding ip to CA self signed certificate", e, + CSR_ERROR); + } + X509CertificateHolder certificateHolder = builder.build(); // This time we asked for a CertificateServer Certificate, make sure that // extension is @@ -165,6 +189,22 @@ public void testCACert() // Since this code assigns ONE for the root certificate, we check if the // serial number is the expected number. Assert.assertEquals(certificateHolder.getSerialNumber(), BigInteger.ONE); + + CertificateCodec codec = new CertificateCodec(securityConfig, "scm"); + String pemString = codec.getPEMEncodedString(certificateHolder); + + File basePath = temporaryFolder.newFolder(); + if (!basePath.exists()) { + Assert.assertTrue(basePath.mkdirs()); + } + codec.writeCertificate(basePath.toPath(), "pemcertificate.crt", + pemString, false); + + X509CertificateHolder loadedCert = + codec.readCertificate(basePath.toPath(), "pemcertificate.crt"); + assertNotNull(loadedCert); + assertEquals(certificateHolder.getSerialNumber(), + loadedCert.getSerialNumber()); } @Test diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index 74a5b9f3d18d..6b90c763a6e4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -1363,8 +1363,7 @@ private static void getSCMSignedCert(CertificateClient client, .setConfiguration(config) .setScmID(omStore.getScmId()) .setClusterID(omStore.getClusterID()) - .setSubject(subject) - .addIpAddress(ip); + .setSubject(subject); OMHANodeDetails haOMHANodeDetails = OMHANodeDetails.loadOMHAConfig(config); String serviceName = From d5edb8fab04cc1c887d34ae48fde85f198403f02 Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Fri, 24 Jul 2020 22:04:57 -0700 Subject: [PATCH 069/165] HDDS-4007. Generate encryption info for the bucket outside bucket lock. (#1242) --- .../ozone/om/request/key/OMKeyRequest.java | 40 ++++++++++--------- 1 file changed, 21 insertions(+), 19 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java index e3f0a69cb767..d863073cd524 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java @@ -481,38 +481,40 @@ protected void generateRequiredEncryptionInfo(KeyArgs keyArgs, // If KMS is not enabled, follow the normal approach of execution of not // reading DB in pre-execute. + + OmBucketInfo bucketInfo = null; if (ozoneManager.getKmsProvider() != null) { try { acquireLock = omMetadataManager.getLock().acquireReadLock( BUCKET_LOCK, volumeName, bucketName); - - OmBucketInfo bucketInfo = omMetadataManager.getBucketTable().get( + bucketInfo = omMetadataManager.getBucketTable().get( omMetadataManager.getBucketKey(volumeName, bucketName)); - - // Don't throw exception of bucket not found when bucketinfo is not - // null. If bucketinfo is null, later when request - // is submitted and if bucket does not really exist it will fail in - // applyTransaction step. Why we are doing this is if OM thinks it is - // the leader, but it is not, we don't want to fail request in this - // case. As anyway when it submits request to ratis it will fail with - // not leader exception, and client will retry on correct leader and - // request will be executed. - if (bucketInfo != null) { - Optional< FileEncryptionInfo > encryptionInfo = - getFileEncryptionInfo(ozoneManager, bucketInfo); - if (encryptionInfo.isPresent()) { - newKeyArgs.setFileEncryptionInfo( - OMPBHelper.convert(encryptionInfo.get())); - } - } } finally { if (acquireLock) { omMetadataManager.getLock().releaseReadLock( BUCKET_LOCK, volumeName, bucketName); } } + + // Don't throw exception of bucket not found when bucketinfo is + // null. If bucketinfo is null, later when request + // is submitted and if bucket does not really exist it will fail in + // applyTransaction step. Why we are doing this is if OM thinks it is + // the leader, but it is not, we don't want to fail request in this + // case. As anyway when it submits request to ratis it will fail with + // not leader exception, and client will retry on correct leader and + // request will be executed. + + if (bucketInfo != null) { + Optional encryptionInfo = + getFileEncryptionInfo(ozoneManager, bucketInfo); + if (encryptionInfo.isPresent()) { + newKeyArgs.setFileEncryptionInfo( + OMPBHelper.convert(encryptionInfo.get())); + } + } } } From 5f33c6141209afeca63718b2dd38692644b4e1c1 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Sat, 25 Jul 2020 07:19:24 +0200 Subject: [PATCH 070/165] HDDS-4025. Add test for creating encrypted key (#1254) --- .../src/main/smoketest/security/bucket-encryption.robot | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/hadoop-ozone/dist/src/main/smoketest/security/bucket-encryption.robot b/hadoop-ozone/dist/src/main/smoketest/security/bucket-encryption.robot index e1f96b190f4c..a78f94e5fa97 100644 --- a/hadoop-ozone/dist/src/main/smoketest/security/bucket-encryption.robot +++ b/hadoop-ozone/dist/src/main/smoketest/security/bucket-encryption.robot @@ -20,7 +20,7 @@ Library String Resource ../commonlib.robot Resource ../lib/os.robot Resource ../ozone-lib/shell.robot -Test Setup Setup Test +Suite Setup Setup Test Test Timeout 5 minutes *** Variables *** @@ -38,3 +38,8 @@ Create Encrypted Bucket ${output} = Execute ozone sh bucket create -k ${KEY_NAME} o3://${OM_SERVICE_ID}/${VOLUME}/encrypted-bucket Should Not Contain ${output} INVALID_REQUEST Bucket Exists o3://${OM_SERVICE_ID}/${VOLUME}/encrypted-bucket + +Create Key in Encrypted Bucket + ${key} = Set Variable o3://${OM_SERVICE_ID}/${VOLUME}/encrypted-bucket/passwd + ${output} = Execute ozone sh key put ${key} /etc/passwd + Key Should Match Local File ${key} /etc/passwd From 81ee5c8a9b49674a8ea40f03e16d63099dcdc391 Mon Sep 17 00:00:00 2001 From: Sammi Chen Date: Mon, 27 Jul 2020 16:46:41 +0800 Subject: [PATCH 071/165] HDDS-3973. Update main feature design status. (#1207) --- hadoop-hdds/docs/content/design/multiraft.md | 2 +- .../docs/content/design/ozone-enhancement-proposals.md | 2 +- hadoop-hdds/docs/content/design/recon2.md | 2 +- hadoop-hdds/docs/content/design/scmha.md | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/hadoop-hdds/docs/content/design/multiraft.md b/hadoop-hdds/docs/content/design/multiraft.md index bccaff36f4ef..f9f978a98189 100644 --- a/hadoop-hdds/docs/content/design/multiraft.md +++ b/hadoop-hdds/docs/content/design/multiraft.md @@ -4,7 +4,7 @@ summary: Datanodes can be part of multiple independent RAFT groups / pipelines date: 2019-05-21 jira: HDDS-1564 status: implemented -author: +author: Li Cheng, Sammi Chen --- + + \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/om-overview.html b/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/om-overview.html new file mode 100644 index 000000000000..4f52e413b2fd --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/om-overview.html @@ -0,0 +1,26 @@ + + +

Status

+
+ + + + + + +
Rpc port{{$ctrl.overview.jmx.RpcPort}}
diff --git a/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/ozoneManager.js b/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/ozoneManager.js index fda6d8fc0b5c..6c59a5be5856 100644 --- a/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/ozoneManager.js +++ b/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/ozoneManager.js @@ -108,5 +108,10 @@ }) } }); - + angular.module('ozoneManager').component('omOverview', { + templateUrl: 'om-overview.html', + require: { + overview: "^overview" + }, + }); })(); From 47d60dd60b2863eb78c7b1bd5ac75544d85939d4 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Mon, 27 Jul 2020 13:46:22 +0200 Subject: [PATCH 076/165] HDDS-4000. Split acceptance tests to reduce CI feedback time (#1236) --- .github/workflows/post-commit.yml | 17 +++--- hadoop-ozone/dev-support/checks/acceptance.sh | 2 + .../dist/src/main/compose/ozone/test.sh | 2 + .../dist/src/main/compose/ozonesecure/test.sh | 2 + .../dist/src/main/compose/test-all.sh | 31 +++++++++-- pom.xml | 55 +------------------ 6 files changed, 42 insertions(+), 67 deletions(-) diff --git a/.github/workflows/post-commit.yml b/.github/workflows/post-commit.yml index 958842301589..e472aeab2eca 100644 --- a/.github/workflows/post-commit.yml +++ b/.github/workflows/post-commit.yml @@ -123,6 +123,13 @@ jobs: acceptance: name: acceptance runs-on: ubuntu-18.04 + strategy: + matrix: + suite: + - secure + - unsecure + - misc + fail-fast: false steps: - uses: actions/cache@v2 with: @@ -154,12 +161,13 @@ jobs: cd /mnt/ozone && hadoop-ozone/dev-support/checks/acceptance.sh env: KEEP_IMAGE: false + OZONE_ACCEPTANCE_SUITE: ${{ matrix.suite }} OZONE_WITH_COVERAGE: true OZONE_VOLUME_OWNER: 1000 - uses: actions/upload-artifact@master if: always() with: - name: acceptance + name: acceptance-${{ matrix.suite }} path: /mnt/ozone/target/acceptance continue-on-error: true - run: | @@ -170,16 +178,11 @@ jobs: integration: name: integration runs-on: ubuntu-18.04 - needs: - - build strategy: matrix: profile: - client - - filesystem - - filesystem-contract - - freon - - hdds-om + - filesystem-hdds - ozone fail-fast: false steps: diff --git a/hadoop-ozone/dev-support/checks/acceptance.sh b/hadoop-ozone/dev-support/checks/acceptance.sh index d95c034939b9..99d8d5254504 100755 --- a/hadoop-ozone/dev-support/checks/acceptance.sh +++ b/hadoop-ozone/dev-support/checks/acceptance.sh @@ -28,6 +28,8 @@ fi mkdir -p "$REPORT_DIR" +export OZONE_ACCEPTANCE_SUITE + cd "$DIST_DIR/compose" || exit 1 ./test-all.sh RES=$? diff --git a/hadoop-ozone/dist/src/main/compose/ozone/test.sh b/hadoop-ozone/dist/src/main/compose/ozone/test.sh index c40339ec6b0e..b5b778f22cff 100755 --- a/hadoop-ozone/dist/src/main/compose/ozone/test.sh +++ b/hadoop-ozone/dist/src/main/compose/ozone/test.sh @@ -15,6 +15,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +#suite:unsecure + COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" export COMPOSE_DIR diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh index 84de2a95a4ff..076b83a3d52d 100755 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh @@ -15,6 +15,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +#suite:secure + COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" export COMPOSE_DIR diff --git a/hadoop-ozone/dist/src/main/compose/test-all.sh b/hadoop-ozone/dist/src/main/compose/test-all.sh index e7f6f7123f09..da3b80e79eac 100755 --- a/hadoop-ozone/dist/src/main/compose/test-all.sh +++ b/hadoop-ozone/dist/src/main/compose/test-all.sh @@ -31,21 +31,40 @@ if [ "$OZONE_WITH_COVERAGE" ]; then export HADOOP_OPTS="-javaagent:share/coverage/jacoco-agent.jar=output=tcpclient,address=$DOCKER_BRIDGE_IP,includes=org.apache.hadoop.ozone.*:org.apache.hadoop.hdds.*:org.apache.hadoop.fs.ozone.*" fi +if [[ -n "${OZONE_ACCEPTANCE_SUITE}" ]]; then + tests=$(find "$SCRIPT_DIR" -name test.sh | xargs grep -l "^#suite:${OZONE_ACCEPTANCE_SUITE}$" | sort) + + # 'misc' is default suite, add untagged tests, too + if [[ "misc" == "${OZONE_ACCEPTANCE_SUITE}" ]]; then + untagged="$(find "$SCRIPT_DIR" -name test.sh | xargs grep -L "^#suite:")" + if [[ -n "${untagged}" ]]; then + tests=$(echo ${tests} ${untagged} | xargs -n1 | sort) + fi + fi + + if [[ -z "${tests}" ]]; then + echo "No tests found for suite ${OZONE_ACCEPTANCE_SUITE}" + exit 1 + fi +else + tests=$(find "$SCRIPT_DIR" -name test.sh | grep "${OZONE_TEST_SELECTOR:-""}" | sort) +fi + RESULT=0 -IFS=$'\n' # shellcheck disable=SC2044 -for test in $(find "$SCRIPT_DIR" -name test.sh | grep "${OZONE_TEST_SELECTOR:-""}" |sort); do - echo "Executing test in $(dirname "$test")" +for t in ${tests}; do + d="$(dirname "${t}")" + echo "Executing test in ${d}" #required to read the .env file from the right location - cd "$(dirname "$test")" || continue + cd "${d}" || continue ./test.sh ret=$? if [[ $ret -ne 0 ]]; then RESULT=1 - echo "ERROR: Test execution of $(dirname "$test") is FAILED!!!!" + echo "ERROR: Test execution of ${d} is FAILED!!!!" fi - RESULT_DIR="$(dirname "$test")/result" + RESULT_DIR="${d}/result" cp "$RESULT_DIR"/robot-*.xml "$RESULT_DIR"/docker-*.log "$RESULT_DIR"/*.out* "$ALL_RESULT_DIR"/ done diff --git a/pom.xml b/pom.xml index aa62f5d0dd97..969e5c64e3d5 100644 --- a/pom.xml +++ b/pom.xml @@ -2146,22 +2146,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs - - freon - - - - org.apache.maven.plugins - maven-surefire-plugin - - - org.apache.hadoop.ozone.freon.** - - - - - - client @@ -2179,23 +2163,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs - filesystem-contract - - - - org.apache.maven.plugins - maven-surefire-plugin - - - org.apache.hadoop.fs.ozone.contract.** - - - - - - - - filesystem + filesystem-hdds @@ -2204,26 +2172,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs org.apache.hadoop.fs.ozone.** - - - org.apache.hadoop.fs.ozone.contract.** - - - - - - - - hdds-om - - - - org.apache.maven.plugins - maven-surefire-plugin - - org.apache.hadoop.hdds.** - org.apache.hadoop.ozone.om.** @@ -2243,8 +2192,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs org.apache.hadoop.ozone.client.** - org.apache.hadoop.ozone.freon.** - org.apache.hadoop.ozone.om.** From 95689012ab0c6fc3a64073cbb51c103a4e7863c2 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Mon, 27 Jul 2020 17:28:02 +0200 Subject: [PATCH 077/165] HDDS-4017. Acceptance check may run against wrong commit (#1249) --- .github/workflows/post-commit.yml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/.github/workflows/post-commit.yml b/.github/workflows/post-commit.yml index e472aeab2eca..3e617bc38fce 100644 --- a/.github/workflows/post-commit.yml +++ b/.github/workflows/post-commit.yml @@ -146,9 +146,13 @@ jobs: - name: checkout to /mnt/ozone run: | sudo chmod 777 /mnt - git clone https://github.com/${GITHUB_REPOSITORY}.git /mnt/ozone + git clone 'https://github.com/${{ github.repository }}.git' /mnt/ozone cd /mnt/ozone - git fetch origin "${GITHUB_REF}" + if [[ '${{ github.event_name }}' == 'pull_request' ]]; then + git fetch --verbose origin '${{ github.ref }}' + else + git fetch --verbose origin '${{ github.sha }}' + fi git checkout FETCH_HEAD git reset --hard - name: run a full build From 00281d167097e8d49181bb2f610ee3fb2e4cc6e7 Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Mon, 27 Jul 2020 08:38:07 -0700 Subject: [PATCH 078/165] HDDS-4026. Dir rename failed when sets 'ozone.om.enable.filesystem.paths' to true (#1256) --- .../TestOzoneFSWithObjectStoreCreate.java | 99 +++++++++++++++++++ .../fs/ozone/TestOzoneFileInterfaces.java | 11 ++- .../hadoop/fs/ozone/TestOzoneFileSystem.java | 94 +++++++++++++++++- .../om/request/key/OMKeyRenameRequest.java | 12 +-- 4 files changed, 204 insertions(+), 12 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java index b872a3d8694a..c4e543554a6a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java @@ -35,10 +35,15 @@ import org.junit.Test; import org.junit.rules.Timeout; +import java.io.FileNotFoundException; +import java.io.IOException; import java.net.URI; +import java.util.ArrayList; import java.util.Arrays; +import java.util.List; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_SCHEME; +import static org.junit.Assert.fail; /** * Class tests create with object store and getFileStatus. @@ -120,6 +125,100 @@ public void test() throws Exception { } + + @Test + public void testObjectStoreCreateWithO3fs() throws Exception { + OzoneVolume ozoneVolume = + cluster.getRpcClient().getObjectStore().getVolume(volumeName); + + OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName); + + + // Use ObjectStore API to create keys. This similates how s3 create keys. + String parentDir = "/dir1/dir2/dir3/dir4/"; + + + List keys = new ArrayList<>(); + keys.add("/dir1"); + keys.add("/dir1/dir2"); + keys.add("/dir1/dir2/dir3"); + keys.add("/dir1/dir2/dir3/dir4/"); + for (int i=1; i <= 3; i++) { + int length = 10; + String fileName = parentDir.concat("/file" + i + "/"); + keys.add(fileName); + OzoneOutputStream ozoneOutputStream = + ozoneBucket.createKey(fileName, length); + byte[] b = new byte[10]; + Arrays.fill(b, (byte)96); + ozoneOutputStream.write(b); + ozoneOutputStream.close(); + } + + // check + for (int i=1; i <= 3; i++) { + String fileName = parentDir.concat("/file" + i + "/"); + Path p = new Path(fileName); + Assert.assertTrue(o3fs.getFileStatus(p).isFile()); + checkAncestors(p); + } + + // Delete keys with object store api delete + for (int i = 1; i <= 3; i++) { + String fileName = parentDir.concat("/file" + i + "/"); + ozoneBucket.deleteKey(fileName); + } + + + // Delete parent dir via o3fs. + boolean result = o3fs.delete(new Path("/dir1"), true); + Assert.assertTrue(result); + + // No Key should exist. + for(String key : keys) { + checkPath(new Path(key)); + } + + + for (int i=1; i <= 3; i++) { + int length = 10; + String fileName = parentDir.concat("/file" + i + "/"); + OzoneOutputStream ozoneOutputStream = + ozoneBucket.createKey(fileName, length); + byte[] b = new byte[10]; + Arrays.fill(b, (byte)96); + ozoneOutputStream.write(b); + ozoneOutputStream.close(); + } + + o3fs.mkdirs(new Path("/dest")); + o3fs.rename(new Path("/dir1"), new Path("/dest")); + + // No source Key should exist. + for(String key : keys) { + checkPath(new Path(key)); + } + + // check dest path. + for (int i=1; i <= 3; i++) { + String fileName = "/dest/".concat(parentDir.concat("/file" + i + "/")); + Path p = new Path(fileName); + Assert.assertTrue(o3fs.getFileStatus(p).isFile()); + checkAncestors(p); + } + + } + + private void checkPath(Path path) { + try { + o3fs.getFileStatus(path); + fail("testObjectStoreCreateWithO3fs failed for Path" + path); + } catch (IOException ex) { + Assert.assertTrue(ex instanceof FileNotFoundException); + Assert.assertTrue(ex.getMessage().contains("No such file or directory")); + } + } + private void checkAncestors(Path p) throws Exception { p = p.getParent(); while(p.getParent() != null) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java index 15a8fc65c5fc..06d1bd366e35 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java @@ -44,6 +44,7 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.TestDataUtil; import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; @@ -97,7 +98,8 @@ public class TestOzoneFileInterfaces { */ @Parameters public static Collection data() { - return Arrays.asList(new Object[][] {{false, true}, {true, false}}); + return Arrays.asList(new Object[][] {{false, true, true}, + {true, false, false}}); } private boolean setDefaultFs; @@ -118,10 +120,13 @@ public static Collection data() { private OMMetrics omMetrics; + private boolean enableFileSystemPaths; + public TestOzoneFileInterfaces(boolean setDefaultFs, - boolean useAbsolutePath) { + boolean useAbsolutePath, boolean enabledFileSystemPaths) { this.setDefaultFs = setDefaultFs; this.useAbsolutePath = useAbsolutePath; + this.enableFileSystemPaths = enabledFileSystemPaths; GlobalStorageStatistics.INSTANCE.reset(); } @@ -131,6 +136,8 @@ public void init() throws Exception { bucketName = RandomStringUtils.randomAlphabetic(10).toLowerCase(); OzoneConfiguration conf = new OzoneConfiguration(); + conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, + enableFileSystemPaths); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(3) .build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java index d4a2a46fd04e..cdfe0cfdade3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java @@ -18,8 +18,10 @@ package org.apache.hadoop.fs.ozone; +import java.io.FileNotFoundException; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.Set; @@ -42,6 +44,7 @@ import org.apache.hadoop.ozone.TestDataUtil; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneKeyDetails; +import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.security.UserGroupInformation; @@ -66,14 +69,25 @@ import org.junit.Rule; import org.junit.Test; import org.junit.rules.Timeout; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Ozone file system tests that are not covered by contract tests. */ +@RunWith(Parameterized.class) public class TestOzoneFileSystem { + @Parameterized.Parameters + public static Collection data() { + return Arrays.asList(new Object[]{true}, new Object[]{false}); + } + + public TestOzoneFileSystem(boolean setDefaultFs) { + this.enabledFileSystemPaths = setDefaultFs; + } /** * Set a timeout for each test. */ @@ -83,6 +97,8 @@ public class TestOzoneFileSystem { private static final Logger LOG = LoggerFactory.getLogger(TestOzoneFileSystem.class); + private boolean enabledFileSystemPaths; + private MiniOzoneCluster cluster; private FileSystem fs; private OzoneFileSystem o3fs; @@ -215,6 +231,8 @@ public void testFileSystem() throws Exception { testRenameDir(); testSeekOnFileLength(); testDeleteRoot(); + + testRecursiveDelete(); } @After @@ -229,6 +247,8 @@ private void setupOzoneFileSystem() throws IOException, TimeoutException, InterruptedException { OzoneConfiguration conf = new OzoneConfiguration(); conf.setInt(FS_TRASH_INTERVAL_KEY, 1); + conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, + enabledFileSystemPaths); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(3) .build(); @@ -296,7 +316,7 @@ private void testDeleteCreatesFakeParentDir() throws Exception { } // Delete the child key - fs.delete(child, false); + fs.delete(child, true); // Deleting the only child should create the parent dir key if it does // not exist @@ -305,6 +325,78 @@ private void testDeleteCreatesFakeParentDir() throws Exception { assertEquals(parentKey, parentKeyInfo.getName()); } + + private void testRecursiveDelete() throws Exception { + Path grandparent = new Path("/gdir1"); + + for (int i = 1; i <= 10; i++) { + Path parent = new Path(grandparent, "pdir" +i); + Path child = new Path(parent, "child"); + ContractTestUtils.touch(fs, child); + } + + // Delete the grandparent, which should delete all keys. + fs.delete(grandparent, true); + + checkPath(grandparent); + + for (int i = 1; i <= 10; i++) { + Path parent = new Path(grandparent, "dir" +i); + Path child = new Path(parent, "child"); + checkPath(parent); + checkPath(child); + } + + + Path level0 = new Path("/level0"); + + for (int i = 1; i <= 3; i++) { + Path level1 = new Path(level0, "level" +i); + Path level2 = new Path(level1, "level" +i); + Path level1File = new Path(level1, "file1"); + Path level2File = new Path(level2, "file1"); + ContractTestUtils.touch(fs, level1File); + ContractTestUtils.touch(fs, level2File); + } + + // Delete at sub directory level. + for (int i = 1; i <= 3; i++) { + Path level1 = new Path(level0, "level" +i); + Path level2 = new Path(level1, "level" +i); + fs.delete(level2, true); + fs.delete(level1, true); + } + + + // Delete level0 finally. + fs.delete(grandparent, true); + + // Check if it exists or not. + checkPath(grandparent); + + for (int i = 1; i <= 3; i++) { + Path level1 = new Path(level0, "level" +i); + Path level2 = new Path(level1, "level" +i); + Path level1File = new Path(level1, "file1"); + Path level2File = new Path(level2, "file1"); + checkPath(level1); + checkPath(level2); + checkPath(level1File); + checkPath(level2File); + } + + } + + private void checkPath(Path path) { + try { + fs.getFileStatus(path); + fail("testRecursiveDelete failed"); + } catch (IOException ex) { + Assert.assertTrue(ex instanceof FileNotFoundException); + Assert.assertTrue(ex.getMessage().contains("No such file or directory")); + } + } + private void testFileDelete() throws Exception { Path grandparent = new Path("/testBatchDelete"); Path parent = new Path(grandparent, "parent"); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java index e6e9839062b1..4e7c05c0978e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java @@ -87,19 +87,13 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { KeyArgs renameKeyArgs = renameKeyRequest.getKeyArgs(); - // Set modification time and normalize key if needed. + // Set modification time. KeyArgs.Builder newKeyArgs = renameKeyArgs.toBuilder() - .setModificationTime(Time.now()) - .setKeyName(validateAndNormalizeKey( - ozoneManager.getEnableFileSystemPaths(), - renameKeyArgs.getKeyName())); + .setModificationTime(Time.now()); return getOmRequest().toBuilder() .setRenameKeyRequest(renameKeyRequest.toBuilder() - .setKeyArgs(newKeyArgs) - .setToKeyName(validateAndNormalizeKey( - ozoneManager.getEnableFileSystemPaths(), - renameKeyRequest.getToKeyName()))) + .setKeyArgs(newKeyArgs)) .setUserInfo(getUserInfo()).build(); } From 4f341c19a44a28d0166fdb20ab106b04bfa0f943 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Mon, 27 Jul 2020 20:14:11 +0200 Subject: [PATCH 079/165] HDDS-4032. Run author check without docker (#1262) --- .github/workflows/post-commit.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.github/workflows/post-commit.yml b/.github/workflows/post-commit.yml index 3e617bc38fce..344602f87b10 100644 --- a/.github/workflows/post-commit.yml +++ b/.github/workflows/post-commit.yml @@ -57,9 +57,7 @@ jobs: runs-on: ubuntu-18.04 steps: - uses: actions/checkout@master - - uses: ./.github/buildenv - with: - args: ./hadoop-ozone/dev-support/checks/author.sh + - run: hadoop-ozone/dev-support/checks/author.sh - name: Summary of failures run: cat target/${{ github.job }}/summary.txt if: always() From 843df5cbd212c60350cea1980456316933742ef9 Mon Sep 17 00:00:00 2001 From: HuangTao Date: Tue, 28 Jul 2020 04:02:23 +0800 Subject: [PATCH 080/165] HDDS-4030. Remember the selected columns and make the X-axis scrollable in recon datanodes UI (#1259) --- .../src/views/datanodes/datanodes.less | 1 + .../src/views/datanodes/datanodes.tsx | 44 +++++++++++-------- 2 files changed, 26 insertions(+), 19 deletions(-) diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.less b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.less index 10ec907a7334..644437dc8776 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.less +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.less @@ -35,5 +35,6 @@ margin-right: 5px; display: inline-block; min-width: 200px; + z-index: 99; } } diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx index 856ab65ecabd..91b6a45aa99d 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx @@ -107,16 +107,8 @@ const COLUMNS = [ filters: DatanodeStatusList.map(status => ({text: status, value: status})), onFilter: (value: DatanodeStatus, record: IDatanode) => record.state === value, render: (text: DatanodeStatus) => renderDatanodeStatus(text), - sorter: (a: IDatanode, b: IDatanode) => a.state.localeCompare(b.state) - }, - { - title: 'Uuid', - dataIndex: 'uuid', - key: 'uuid', - isVisible: true, - isSearchable: true, - sorter: (a: IDatanode, b: IDatanode) => a.uuid.localeCompare(b.uuid), - defaultSortOrder: 'ascend' as const + sorter: (a: IDatanode, b: IDatanode) => a.state.localeCompare(b.state), + fixed: 'left' }, { title: 'Hostname', @@ -125,6 +117,16 @@ const COLUMNS = [ isVisible: true, isSearchable: true, sorter: (a: IDatanode, b: IDatanode) => a.hostname.localeCompare(b.hostname), + defaultSortOrder: 'ascend' as const, + fixed: 'left' + }, + { + title: 'Uuid', + dataIndex: 'uuid', + key: 'uuid', + isVisible: true, + isSearchable: true, + sorter: (a: IDatanode, b: IDatanode) => a.uuid.localeCompare(b.uuid), defaultSortOrder: 'ascend' as const }, { @@ -266,10 +268,19 @@ export class Datanodes extends React.Component, IDatanode }); }; + _getSelectedColumns = (selected: IOption[]) => { + const selectedColumns = selected.length > 0 ? selected : COLUMNS.filter(column => column.isVisible).map(column => ({ + label: column.key, + value: column.key + })); + return selectedColumns; + }; + _loadData = () => { - this.setState({ - loading: true - }); + this.setState(prevState => ({ + loading: true, + selectedColumns: this._getSelectedColumns(prevState.selectedColumns) + })); axios.get('/api/v1/datanodes').then(response => { const datanodesResponse: IDatanodesResponse = response.data; const totalCount = datanodesResponse.totalCount; @@ -292,18 +303,12 @@ export class Datanodes extends React.Component, IDatanode buildDate: datanode.buildDate }; }); - const selectedColumns: IOption[] = COLUMNS.filter(column => column.isVisible).map(column => ({ - label: column.key, - value: column.key - })); this.setState({ loading: false, dataSource, totalCount, lastUpdated: Number(moment()) - }, () => { - this._handleColumnChange(selectedColumns, {action: 'select-option'}); }); }).catch(error => { this.setState({ @@ -381,6 +386,7 @@ export class Datanodes extends React.Component, IDatanode loading={loading} pagination={paginationConfig} rowKey='hostname' + scroll={{x: true, y: false, scrollToFirstRowOnChange: true}} />
From 959e5b2faebfa670f0d7b713060b86f105da27d5 Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Mon, 27 Jul 2020 22:07:07 -0700 Subject: [PATCH 081/165] HDDS-4011. Update S3 related documentation. (#1245) --- hadoop-hdds/docs/content/interface/CSI.md | 7 ------- hadoop-hdds/docs/content/interface/S3.md | 8 +------- hadoop-hdds/docs/content/recipe/SparkOzoneFSK8S.md | 1 - hadoop-hdds/docs/content/start/StartFromDockerHub.md | 6 +----- 4 files changed, 2 insertions(+), 20 deletions(-) diff --git a/hadoop-hdds/docs/content/interface/CSI.md b/hadoop-hdds/docs/content/interface/CSI.md index b70572f77f5d..c7046d09f898 100644 --- a/hadoop-hdds/docs/content/interface/CSI.md +++ b/hadoop-hdds/docs/content/interface/CSI.md @@ -35,13 +35,6 @@ through goofys. If you don't have an Ozone cluster on kubernetes, you can reference [Kubernetes]({{< ref "start/Kubernetes.md" >}}) to create one. Use the resources from `kubernetes/examples/ozone` where you can find all the required Kubernetes resources to run cluster together with the dedicated Ozone CSI daemon (check `kubernetes/examples/ozone/csi`) -You should check if you already have a name of `/s3v` volume, if not create it by execute follow command: - -```bash -kubectl exec -it scm-0 bash -[hadoop@scm-0 ~]$ ozone sh vol create s3v -``` - Now, create the CSI related resources by execute the follow command. ```bash diff --git a/hadoop-hdds/docs/content/interface/S3.md b/hadoop-hdds/docs/content/interface/S3.md index 94e455728f95..1be0137942ef 100644 --- a/hadoop-hdds/docs/content/interface/S3.md +++ b/hadoop-hdds/docs/content/interface/S3.md @@ -24,7 +24,7 @@ summary: Ozone supports Amazon's Simple Storage Service (S3) protocol. In fact, Ozone provides S3 compatible REST interface to use the object store data with any S3 compatible tools. -S3 buckets are stored under the `/s3v`(Default is s3v, which can be setted through ozone.s3g.volume.name) volume, which needs to be created by an administrator first. +S3 buckets are stored under the `/s3v` volume. The default name `s3v` can be changed by setting the `ozone.s3g.volume.name` config property in `ozone-site.xml`. ## Getting started @@ -38,12 +38,6 @@ Go to the `compose/ozone` directory, and start the server: docker-compose up -d --scale datanode=3 ``` -Create the `/s3v` volume: - -```bash -docker-compose exec scm ozone sh volume create /s3v -``` - You can access the S3 gateway at `http://localhost:9878` ## URL Schema diff --git a/hadoop-hdds/docs/content/recipe/SparkOzoneFSK8S.md b/hadoop-hdds/docs/content/recipe/SparkOzoneFSK8S.md index 6cf8b1e8d6ba..0f0d094c8fbb 100644 --- a/hadoop-hdds/docs/content/recipe/SparkOzoneFSK8S.md +++ b/hadoop-hdds/docs/content/recipe/SparkOzoneFSK8S.md @@ -112,7 +112,6 @@ Download any text file and put it to the `/tmp/alice.txt` first. ```bash kubectl port-forward s3g-0 9878:9878 -ozone sh volume create /s3v aws s3api --endpoint http://localhost:9878 create-bucket --bucket=test aws s3api --endpoint http://localhost:9878 put-object --bucket test --key alice.txt --body /tmp/alice.txt ``` diff --git a/hadoop-hdds/docs/content/start/StartFromDockerHub.md b/hadoop-hdds/docs/content/start/StartFromDockerHub.md index c4f36aff8926..6d26dfac849a 100644 --- a/hadoop-hdds/docs/content/start/StartFromDockerHub.md +++ b/hadoop-hdds/docs/content/start/StartFromDockerHub.md @@ -72,11 +72,7 @@ connecting to the SCM's UI at [http://localhost:9876](http://localhost:9876). The S3 gateway endpoint will be exposed at port 9878. You can use Ozone's S3 support as if you are working against the real S3. S3 buckets are stored under -the `/s3v` volume, which needs to be created by an administrator first: - -``` -docker-compose exec scm ozone sh volume create /s3v -``` +the `/s3v` volume. Here is how you create buckets from command line: From 1acafedbed051ef48b63c38c0402c5ff3bbbe514 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Wed, 29 Jul 2020 12:35:42 +0200 Subject: [PATCH 082/165] HDDS-4038. Eliminate GitHub check warnings (#1268) --- .github/workflows/comments.yaml | 2 +- .github/workflows/post-commit.yml | 30 +++++++++++++++--------------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/.github/workflows/comments.yaml b/.github/workflows/comments.yaml index bfab244e88ac..2341662ca63a 100644 --- a/.github/workflows/comments.yaml +++ b/.github/workflows/comments.yaml @@ -25,7 +25,7 @@ jobs: name: check-comment runs-on: ubuntu-latest steps: - - uses: actions/checkout@master + - uses: actions/checkout@v2 - run: ./.github/process-comment.sh env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/post-commit.yml b/.github/workflows/post-commit.yml index 344602f87b10..d0b54339c17a 100644 --- a/.github/workflows/post-commit.yml +++ b/.github/workflows/post-commit.yml @@ -23,7 +23,7 @@ jobs: name: compile runs-on: ubuntu-18.04 steps: - - uses: actions/checkout@master + - uses: actions/checkout@v2 - uses: actions/cache@v2 with: path: | @@ -39,14 +39,14 @@ jobs: name: rat runs-on: ubuntu-18.04 steps: - - uses: actions/checkout@master + - uses: actions/checkout@v2 - uses: ./.github/buildenv with: args: ./hadoop-ozone/dev-support/checks/rat.sh - name: Summary of failures run: cat target/${{ github.job }}/summary.txt if: always() - - uses: actions/upload-artifact@master + - uses: actions/upload-artifact@v2 if: always() with: name: rat @@ -56,12 +56,12 @@ jobs: name: author runs-on: ubuntu-18.04 steps: - - uses: actions/checkout@master + - uses: actions/checkout@v2 - run: hadoop-ozone/dev-support/checks/author.sh - name: Summary of failures run: cat target/${{ github.job }}/summary.txt if: always() - - uses: actions/upload-artifact@master + - uses: actions/upload-artifact@v2 if: always() with: name: author @@ -71,14 +71,14 @@ jobs: name: unit runs-on: ubuntu-18.04 steps: - - uses: actions/checkout@master + - uses: actions/checkout@v2 - uses: ./.github/buildenv with: args: ./hadoop-ozone/dev-support/checks/unit.sh - name: Summary of failures run: cat target/${{ github.job }}/summary.txt if: always() - - uses: actions/upload-artifact@master + - uses: actions/upload-artifact@v2 if: always() with: name: unit @@ -88,14 +88,14 @@ jobs: name: checkstyle runs-on: ubuntu-18.04 steps: - - uses: actions/checkout@master + - uses: actions/checkout@v2 - uses: ./.github/buildenv with: args: ./hadoop-ozone/dev-support/checks/checkstyle.sh - name: Summary of failures run: cat target/${{ github.job }}/summary.txt if: always() - - uses: actions/upload-artifact@master + - uses: actions/upload-artifact@v2 if: always() with: name: checkstyle @@ -105,14 +105,14 @@ jobs: name: findbugs runs-on: ubuntu-18.04 steps: - - uses: actions/checkout@master + - uses: actions/checkout@v2 - uses: ./.github/buildenv with: args: ./hadoop-ozone/dev-support/checks/findbugs.sh - name: Summary of failures run: cat target/${{ github.job }}/summary.txt if: always() - - uses: actions/upload-artifact@master + - uses: actions/upload-artifact@v2 if: always() with: name: findbugs @@ -166,7 +166,7 @@ jobs: OZONE_ACCEPTANCE_SUITE: ${{ matrix.suite }} OZONE_WITH_COVERAGE: true OZONE_VOLUME_OWNER: 1000 - - uses: actions/upload-artifact@master + - uses: actions/upload-artifact@v2 if: always() with: name: acceptance-${{ matrix.suite }} @@ -189,7 +189,7 @@ jobs: fail-fast: false steps: - run: sudo mkdir mnt && sudo mount --bind /mnt `pwd`/mnt && sudo chmod 777 mnt - - uses: actions/checkout@master + - uses: actions/checkout@v2 with: path: mnt/ozone - uses: ./mnt/ozone/.github/buildenv @@ -198,7 +198,7 @@ jobs: - name: Summary of failures run: cat mnt/ozone/target/${{ github.job }}/summary.txt if: always() - - uses: actions/upload-artifact@master + - uses: actions/upload-artifact@v2 if: always() with: name: it-${{ matrix.profile }} @@ -233,7 +233,7 @@ jobs: file: ./target/coverage/all.xml name: codecov-umbrella fail_ci_if_error: true - - uses: actions/upload-artifact@master + - uses: actions/upload-artifact@v2 with: name: coverage path: target/coverage From 2986b280eb563eaf9b94c24f0c08a742d1e148df Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Wed, 29 Jul 2020 12:47:58 +0200 Subject: [PATCH 083/165] HDDS-4031. Run shell tests in CI (#1261) --- .github/workflows/post-commit.yml | 20 +++++++++++ hadoop-ozone/dev-support/checks/bats.sh | 35 +++++++++++++++++++ hadoop-ozone/dist/src/test/shell/gc_opts.bats | 6 ++-- 3 files changed, 57 insertions(+), 4 deletions(-) create mode 100755 hadoop-ozone/dev-support/checks/bats.sh diff --git a/.github/workflows/post-commit.yml b/.github/workflows/post-commit.yml index d0b54339c17a..992715fee339 100644 --- a/.github/workflows/post-commit.yml +++ b/.github/workflows/post-commit.yml @@ -35,6 +35,26 @@ jobs: - uses: ./.github/buildenv with: args: ./hadoop-ozone/dev-support/checks/build.sh + bats: + runs-on: ubuntu-18.04 + steps: + - uses: actions/checkout@v2 + - name: install bats + run: | + cd /tmp + curl -LSs https://github.com/bats-core/bats-core/archive/v1.2.1.tar.gz | tar xzf - + cd bats-core-1.2.1 + sudo ./install.sh /usr/local + - name: run tests + run: ./hadoop-ozone/dev-support/checks/${{ github.job }}.sh + - name: Summary of failures + run: cat target/${{ github.job }}/summary.txt + if: always() + - uses: actions/upload-artifact@master + if: always() + with: + name: ${{ github.job }} + path: target/${{ github.job }} rat: name: rat runs-on: ubuntu-18.04 diff --git a/hadoop-ozone/dev-support/checks/bats.sh b/hadoop-ozone/dev-support/checks/bats.sh new file mode 100755 index 000000000000..2e1bbadce9aa --- /dev/null +++ b/hadoop-ozone/dev-support/checks/bats.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +cd "${DIR}/../../.." || exit 1 + +REPORT_DIR=${OUTPUT_DIR:-"${DIR}/../../../target/bats"} +mkdir -p "${REPORT_DIR}" +REPORT_FILE="${REPORT_DIR}/summary.txt" + +rm -f "${REPORT_DIR}/output.log" + +find * -path '*/src/test/shell/*' -name '*.bats' -print0 \ + | xargs -0 -n1 bats --formatter tap \ + | tee -a "${REPORT_DIR}/output.log" + +grep '^\(not ok\|#\)' "${REPORT_DIR}/output.log" > "${REPORT_FILE}" + +grep -c '^not ok' "${REPORT_FILE}" > "${REPORT_DIR}/failures" + +if [[ -s "${REPORT_FILE}" ]]; then + exit 1 +fi diff --git a/hadoop-ozone/dist/src/test/shell/gc_opts.bats b/hadoop-ozone/dist/src/test/shell/gc_opts.bats index 1400a4058f78..feb29af0e35c 100644 --- a/hadoop-ozone/dist/src/test/shell/gc_opts.bats +++ b/hadoop-ozone/dist/src/test/shell/gc_opts.bats @@ -14,14 +14,12 @@ # See the License for the specific language governing permissions and # limitations under the License. - - # # Can be executed with bats (https://github.com/bats-core/bats-core) -# bats gc_opts.bats (FROM THE CURRENT DIRECTORY) +# bats gc_opts.bats # -source ../../shell/hdds/hadoop-functions.sh +load ../../shell/hdds/hadoop-functions.sh @test "Setting Hadoop GC parameters: add GC params for server" { export HADOOP_SUBCMD_SUPPORTDAEMONIZATION=true export HADOOP_OPTS="Test" From 7823cb8977857355fac495b67885a678b017cc40 Mon Sep 17 00:00:00 2001 From: Xiaoyu Yao Date: Wed, 29 Jul 2020 04:32:24 -0700 Subject: [PATCH 084/165] HDDS-4041. Ozone /conf endpoint triggers kerberos replay error when SPNEGO is enabled. (#1267) --- .../hadoop/hdds/server/http/HttpServer2.java | 21 +++++++++++++++++++ .../dist/src/main/smoketest/spnego/web.robot | 14 +++++++++++++ 2 files changed, 35 insertions(+) diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java index 3a2c49bcfbbd..9282c841a345 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java @@ -893,6 +893,27 @@ public void addInternalServlet(String name, String pathSpec, } webAppContext.addServlet(holder, pathSpec); + // Remove any previous filter attached to the removed servlet path to avoid + // Kerberos replay error. + FilterMapping[] filterMappings = webAppContext.getServletHandler(). + getFilterMappings(); + for (int i = 0; i < filterMappings.length; i++) { + if (filterMappings[i].getPathSpecs() == null) { + LOG.debug("Skip checking {} filterMappings {} without a path spec.", + filterMappings[i].getFilterName(), filterMappings[i]); + continue; + } + int oldPathSpecsLen = filterMappings[i].getPathSpecs().length; + String[] newPathSpecs = + ArrayUtil.removeFromArray(filterMappings[i].getPathSpecs(), pathSpec); + if (newPathSpecs.length == 0) { + webAppContext.getServletHandler().setFilterMappings( + ArrayUtil.removeFromArray(filterMappings, filterMappings[i])); + } else if (newPathSpecs.length != oldPathSpecsLen) { + filterMappings[i].setPathSpecs(newPathSpecs); + } + } + if (requireAuth && UserGroupInformation.isSecurityEnabled()) { LOG.info("Adding Kerberos (SPNEGO) filter to {}", name); ServletHandler handler = webAppContext.getServletHandler(); diff --git a/hadoop-ozone/dist/src/main/smoketest/spnego/web.robot b/hadoop-ozone/dist/src/main/smoketest/spnego/web.robot index 9c4156fcd6e4..065e390e5b84 100644 --- a/hadoop-ozone/dist/src/main/smoketest/spnego/web.robot +++ b/hadoop-ozone/dist/src/main/smoketest/spnego/web.robot @@ -30,6 +30,11 @@ ${OM_SERVICE_LIST_URL} http://om:9874/serviceList ${SCM_URL} http://scm:9876 ${RECON_URL} http://recon:9888 +${SCM_CONF_URL} http://scm:9876/conf +${SCM_JMX_URL} http://scm:9876/jmx +${SCM_STACKS_URL} http://scm:9876/stacks + + *** Keywords *** Verify SPNEGO enabled URL [arguments] ${url} @@ -60,6 +65,15 @@ Test OM Service List Test SCM portal Verify SPNEGO enabled URL ${SCM_URL} +Test SCM conf + Verify SPNEGO enabled URL ${SCM_CONF_URL} + +Test SCM jmx + Verify SPNEGO enabled URL ${SCM_JMX_URL} + +Test SCM stacks + Verify SPNEGO enabled URL ${SCM_STACKS_URL} + Test Recon portal Verify SPNEGO enabled URL ${RECON_URL} From e59d6ffbd99e5961ffdf1e52a87f50b596ae52b1 Mon Sep 17 00:00:00 2001 From: Lisa <30621230+aeioulisa@users.noreply.github.com> Date: Wed, 29 Jul 2020 20:58:23 +0800 Subject: [PATCH 085/165] HDDS-3511. Fix javadoc comment in OmMetadataManager (#1247) --- .../java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index e64b023a7e3c..36d219bd9af6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -116,7 +116,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager { * |----------------------------------------------------------------------| * | s3SecretTable | s3g_access_key_id -> s3Secret | * |----------------------------------------------------------------------| - * | dTokenTable | s3g_access_key_id -> s3Secret | + * | dTokenTable | OzoneTokenID -> renew_time | * |----------------------------------------------------------------------| * | prefixInfoTable | prefix -> PrefixInfo | * |----------------------------------------------------------------------| From 6ec384e2b05194aee0a41d2b9b167bf7be0b60c4 Mon Sep 17 00:00:00 2001 From: maobaolong <307499405@qq.com> Date: Wed, 29 Jul 2020 21:53:05 +0800 Subject: [PATCH 086/165] HDDS-4019. Show the storageDir while need init om or scm (#1248) --- .../hadoop/hdds/scm/server/StorageContainerManager.java | 3 ++- .../java/org/apache/hadoop/ozone/om/OzoneManager.java | 8 +++++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java index 621b126a1e8c..b36ca87847a7 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java @@ -241,7 +241,8 @@ public StorageContainerManager(OzoneConfiguration conf, scmStorageConfig = new SCMStorageConfig(conf); if (scmStorageConfig.getState() != StorageState.INITIALIZED) { LOG.error("Please make sure you have run \'ozone scm --init\' " + - "command to generate all the required metadata."); + "command to generate all the required metadata to " + + scmStorageConfig.getStorageDir() + "."); throw new SCMException("SCM not initialized due to storage config " + "failure.", ResultCodes.SCM_NOT_INITIALIZED); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index 6b90c763a6e4..e428289d0c45 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -367,9 +367,11 @@ private OzoneManager(OzoneConfiguration conf) throws IOException, OZONE_OM_USER_MAX_VOLUME + " value should be greater than zero"); if (omStorage.getState() != StorageState.INITIALIZED) { - throw new OMException("OM not initialized, current OM storage state: " + - omStorage.getState().name() + ". Please ensure 'ozone om --init' " - + "command is executed once before starting the OM service.", + throw new OMException("OM not initialized, current OM storage state: " + + omStorage.getState().name() + ". Please ensure 'ozone om --init' " + + "command is executed to generate all the required metadata to " + + omStorage.getStorageDir() + + " once before starting the OM service.", ResultCodes.OM_NOT_INITIALIZED); } omMetaDir = OMStorage.getOmDbDir(configuration); From cf4e73fefc3c4c740a68718a53feb24a975ab3bb Mon Sep 17 00:00:00 2001 From: bshashikant Date: Wed, 29 Jul 2020 21:20:34 +0530 Subject: [PATCH 087/165] HDDS-3809. Make number of open containers on a datanode a function of no of volumes reported by it. (#1081) --- .../src/main/resources/ozone-default.xml | 2 +- .../scm/container/SCMContainerManager.java | 7 +- .../hadoop/hdds/scm/node/DatanodeInfo.java | 22 +++++++ .../hadoop/hdds/scm/node/NodeManager.java | 2 + .../hadoop/hdds/scm/node/SCMNodeManager.java | 21 ++++++ .../hdds/scm/pipeline/PipelineManager.java | 2 + .../hdds/scm/pipeline/SCMPipelineManager.java | 10 +++ .../org/apache/hadoop/hdds/scm/TestUtils.java | 33 ++++++---- .../hdds/scm/block/TestBlockManager.java | 66 +++++++++++++++++++ .../hdds/scm/container/MockNodeManager.java | 11 ++++ .../hdds/scm/node/TestSCMNodeManager.java | 51 +++++++++++++- .../testutils/ReplicationNodeManagerMock.java | 5 ++ 12 files changed, 214 insertions(+), 18 deletions(-) diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index a07807b1998e..b9774aab9b1c 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -818,7 +818,7 @@ ozone.scm.pipeline.owner.container.count 3 OZONE, SCM, PIPELINE - Number of containers per owner in a pipeline. + Number of containers per owner per disk in a pipeline. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java index 34177f088640..e09486e2b60b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java @@ -66,7 +66,7 @@ public class SCMContainerManager implements ContainerManager { private final ContainerStateManager containerStateManager; - private final int numContainerPerOwnerInPipeline; + private final int numContainerPerVolume; private final SCMContainerManagerMetrics scmContainerManagerMetrics; @@ -98,7 +98,7 @@ public SCMContainerManager( this.lock = new ReentrantLock(); this.pipelineManager = pipelineManager; this.containerStateManager = new ContainerStateManager(conf); - this.numContainerPerOwnerInPipeline = conf + this.numContainerPerVolume = conf .getInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT_DEFAULT); @@ -432,7 +432,8 @@ public ContainerInfo getMatchingContainer(final long sizeRequired, synchronized (pipeline) { containerIDs = getContainersForOwner(pipeline, owner); - if (containerIDs.size() < numContainerPerOwnerInPipeline) { + if (containerIDs.size() < numContainerPerVolume * pipelineManager. + getNumHealthyVolumes(pipeline)) { containerInfo = containerStateManager.allocateContainer( pipelineManager, owner, pipeline); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java index d06ea2a3b3f3..b39440f41f99 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java @@ -108,6 +108,28 @@ public List getStorageReports() { } } + /** + * Returns count of healthy volumes reported from datanode. + * @return count of healthy volumes + */ + public int getHealthyVolumeCount() { + try { + lock.readLock().lock(); + return storageReports.size() - getFailedVolumeCount(); + } finally { + lock.readLock().unlock(); + } + } + + /** + * Returns count of failed volumes reported from datanode. + * @return count of failed volumes + */ + private int getFailedVolumeCount() { + return (int) storageReports.stream(). + filter(e -> e.hasFailed() ? e.getFailed() : false).count(); + } + /** * Returns the last updated time of datanode info. * @return the last updated time of datanode info. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java index 37562fe9f293..df21b84eafda 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java @@ -213,4 +213,6 @@ void processNodeReport(DatanodeDetails datanodeDetails, * @return cluster map */ NetworkTopology getClusterNetworkTopologyMap(); + + int getNumHealthyVolumes(List dnList); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java index 005881c01175..1a0cec3b2176 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java @@ -27,6 +27,7 @@ import java.util.Map; import java.util.Set; import java.util.UUID; +import java.util.Collections; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ScheduledFuture; import java.util.stream.Collectors; @@ -509,6 +510,26 @@ public Map getNodeInfo() { return nodeInfo; } + /** + * Returns the max of no healthy volumes reported out of the set + * of datanodes constituting the pipeline. + */ + @Override + public int getNumHealthyVolumes(List dnList) { + List volumeCountList = new ArrayList<>(dnList.size()); + for (DatanodeDetails dn : dnList) { + try { + volumeCountList.add(nodeStateManager.getNode(dn). + getHealthyVolumeCount()); + } catch (NodeNotFoundException e) { + LOG.warn("Cannot generate NodeStat, datanode {} not found.", + dn.getUuid()); + } + } + Preconditions.checkArgument(!volumeCountList.isEmpty()); + return Collections.max(volumeCountList); + } + /** * Get set of pipelines a datanode is part of. * diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java index 48068d82fe56..857f76e88e5f 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java @@ -89,6 +89,8 @@ void scrubPipeline(ReplicationType type, ReplicationFactor factor) void incNumBlocksAllocatedMetric(PipelineID id); + int getNumHealthyVolumes(Pipeline pipeline); + /** * Activates a dormant pipeline. * diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java index 6fce895185f7..e7540590ae9a 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java @@ -661,6 +661,16 @@ public void close() throws IOException { pipelineFactory.shutdown(); } + /** + * returns max count of healthy volumes from the set of + * datanodes constituting the pipeline. + * @param pipeline + * @return healthy volume count + */ + public int getNumHealthyVolumes(Pipeline pipeline) { + return nodeManager.getNumHealthyVolumes(pipeline.getNodes()); + } + protected ReadWriteLock getLock() { return lock; } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java index 03ed0f7123eb..f4f17598ed0d 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java @@ -222,20 +222,26 @@ public static StorageReportProto getRandomStorageReport(UUID nodeId, StorageTypeProto.DISK); } - /** - * Creates storage report with the given information. - * - * @param nodeId datanode id - * @param path storage dir - * @param capacity storage size - * @param used space used - * @param remaining space remaining - * @param type type of storage - * - * @return StorageReportProto - */ public static StorageReportProto createStorageReport(UUID nodeId, String path, - long capacity, long used, long remaining, StorageTypeProto type) { + long capacity, long used, long remaining, StorageTypeProto type) { + return createStorageReport(nodeId, path, capacity, used, remaining, + type, false); + } + /** + * Creates storage report with the given information. + * + * @param nodeId datanode id + * @param path storage dir + * @param capacity storage size + * @param used space used + * @param remaining space remaining + * @param type type of storage + * + * @return StorageReportProto + */ + public static StorageReportProto createStorageReport(UUID nodeId, String path, + long capacity, long used, long remaining, StorageTypeProto type, + boolean failed) { Preconditions.checkNotNull(nodeId); Preconditions.checkNotNull(path); StorageReportProto.Builder srb = StorageReportProto.newBuilder(); @@ -243,6 +249,7 @@ public static StorageReportProto createStorageReport(UUID nodeId, String path, .setStorageLocation(path) .setCapacity(capacity) .setScmUsed(used) + .setFailed(failed) .setRemaining(remaining); StorageTypeProto storageTypeProto = type == null ? StorageTypeProto.DISK : type; diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java index e0ba53c7e94c..a72031c42496 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java @@ -298,6 +298,72 @@ public void testBlockDistribution() throws Exception { } } + + @Test + public void testBlockDistributionWithMultipleDisks() throws Exception { + int threadCount = numContainerPerOwnerInPipeline * + numContainerPerOwnerInPipeline; + nodeManager.setNumHealthyVolumes(numContainerPerOwnerInPipeline); + List executors = new ArrayList<>(threadCount); + for (int i = 0; i < threadCount; i++) { + executors.add(Executors.newSingleThreadExecutor()); + } + pipelineManager.createPipeline(type, factor); + TestUtils.openAllRatisPipelines(pipelineManager); + Map> allocatedBlockMap = + new ConcurrentHashMap<>(); + List> futureList = + new ArrayList<>(threadCount); + for (int i = 0; i < threadCount; i++) { + final CompletableFuture future = + new CompletableFuture<>(); + CompletableFuture.supplyAsync(() -> { + try { + List blockList; + AllocatedBlock block = blockManager + .allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, + OzoneConsts.OZONE, + new ExcludeList()); + long containerId = block.getBlockID().getContainerID(); + if (!allocatedBlockMap.containsKey(containerId)) { + blockList = new ArrayList<>(); + } else { + blockList = allocatedBlockMap.get(containerId); + } + blockList.add(block); + allocatedBlockMap.put(containerId, blockList); + future.complete(block); + } catch (IOException e) { + future.completeExceptionally(e); + } + return future; + }, executors.get(i)); + futureList.add(future); + } + try { + CompletableFuture + .allOf(futureList.toArray( + new CompletableFuture[futureList.size()])).get(); + Assert.assertTrue( + pipelineManager.getPipelines(type).size() == 1); + Pipeline pipeline = pipelineManager.getPipelines(type).get(0); + // total no of containers to be created will be number of healthy + // volumes * number of numContainerPerOwnerInPipeline which is equal to + // the thread count + Assert.assertTrue(threadCount == pipelineManager. + getNumberOfContainers(pipeline.getId())); + Assert.assertTrue( + allocatedBlockMap.size() == threadCount); + Assert.assertTrue(allocatedBlockMap. + values().size() == threadCount); + allocatedBlockMap.values().stream().forEach(v -> { + Assert.assertTrue(v.size() == 1); + }); + } catch (Exception e) { + Assert.fail("testAllocateBlockInParallel failed"); + } + } + @Test public void testAllocateOversizedBlock() throws Exception { long size = 6 * GB; diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java index 54f6ee43334c..5b635a7bee94 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java @@ -92,6 +92,7 @@ public class MockNodeManager implements NodeManager { private final Node2ContainerMap node2ContainerMap; private NetworkTopology clusterMap; private ConcurrentMap> dnsToUuidMap; + private int numHealthyDisksPerDatanode; public MockNodeManager(NetworkTopologyImpl clusterMap, List nodes, @@ -121,6 +122,7 @@ public MockNodeManager(NetworkTopologyImpl clusterMap, } safemode = false; this.commandMap = new HashMap<>(); + numHealthyDisksPerDatanode = 1; } public MockNodeManager(boolean initializeFakeNodes, int nodeCount) { @@ -569,6 +571,15 @@ public void setNetworkTopology(NetworkTopology topology) { this.clusterMap = topology; } + @Override + public int getNumHealthyVolumes(List dnList) { + return numHealthyDisksPerDatanode; + } + + public void setNumHealthyVolumes(int value) { + numHealthyDisksPerDatanode = value; + } + /** * A class to declare some values for the nodes so that our tests * won't fail. diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java index df5cb2de2550..7a58d46ab68e 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java @@ -848,11 +848,12 @@ public void testScmStatsFromNodeReport() final long capacity = 2000; final long used = 100; final long remaining = capacity - used; - + List dnList = new ArrayList<>(nodeCount); try (SCMNodeManager nodeManager = createNodeManager(conf)) { EventQueue eventQueue = (EventQueue) scm.getEventQueue(); for (int x = 0; x < nodeCount; x++) { DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); + dnList.add(dn); UUID dnId = dn.getUuid(); long free = capacity - used; String storagePath = testDir.getAbsolutePath() + "/" + dnId; @@ -871,9 +872,57 @@ public void testScmStatsFromNodeReport() .getScmUsed().get()); assertEquals(remaining * nodeCount, (long) nodeManager.getStats() .getRemaining().get()); + assertEquals(1, nodeManager.getNumHealthyVolumes(dnList)); + dnList.clear(); + } + } + + /** + * Test multiple nodes sending initial heartbeat with their node report + * with multiple volumes. + * + * @throws IOException + * @throws InterruptedException + * @throws TimeoutException + */ + @Test + public void tesVolumeInfoFromNodeReport() + throws IOException, InterruptedException, AuthenticationException { + OzoneConfiguration conf = getConf(); + conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000, + MILLISECONDS); + final int volumeCount = 10; + final long capacity = 2000; + final long used = 100; + List dnList = new ArrayList<>(1); + try (SCMNodeManager nodeManager = createNodeManager(conf)) { + EventQueue eventQueue = (EventQueue) scm.getEventQueue(); + DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); + dnList.add(dn); + UUID dnId = dn.getUuid(); + long free = capacity - used; + List reports = new ArrayList<>(volumeCount); + boolean failed = true; + for (int x = 0; x < volumeCount; x++) { + String storagePath = testDir.getAbsolutePath() + "/" + dnId; + reports.add(TestUtils + .createStorageReport(dnId, storagePath, capacity, + used, free, null, failed)); + failed = !failed; + } + nodeManager.register(dn, TestUtils.createNodeReport(reports), null); + nodeManager.processHeartbeat(dn); + //TODO: wait for EventQueue to be processed + eventQueue.processAll(8000L); + + assertEquals(1, nodeManager.getNodeCount(HEALTHY)); + assertEquals(volumeCount / 2, + nodeManager.getNumHealthyVolumes(dnList)); + dnList.clear(); } } + /** * Test single node stat update based on nodereport from different heartbeat * status (healthy, stale and dead). diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java index 9ca3f18c0c75..a9b879f86ec7 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java @@ -343,4 +343,9 @@ public List getNodesByAddress(String address) { public NetworkTopology getClusterNetworkTopologyMap() { return null; } + + @Override + public int getNumHealthyVolumes(List dnList) { + return 0; + } } From 5f1fb86abf9c2f0ae83350bf36f7f7586cdce987 Mon Sep 17 00:00:00 2001 From: runzhiwang <51938049+runzhiwang@users.noreply.github.com> Date: Thu, 30 Jul 2020 00:42:49 +0800 Subject: [PATCH 088/165] HDDS-4024. Avoid while loop too soon when exception happen (#1253) --- .../statemachine/DatanodeStateMachine.java | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java index 1f61f15f1b3a..425074d6888a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java @@ -226,19 +226,26 @@ private void start() throws IOException { nextHB.set(Time.monotonicNow() + heartbeatFrequency); context.execute(executorService, heartbeatFrequency, TimeUnit.MILLISECONDS); - now = Time.monotonicNow(); - if (now < nextHB.get()) { - if(!Thread.interrupted()) { - Thread.sleep(nextHB.get() - now); - } - } } catch (InterruptedException e) { // Some one has sent interrupt signal, this could be because // 1. Trigger heartbeat immediately // 2. Shutdown has be initiated. + LOG.warn("Interrupt the execution.", e); + Thread.currentThread().interrupt(); } catch (Exception e) { LOG.error("Unable to finish the execution.", e); } + + now = Time.monotonicNow(); + if (now < nextHB.get()) { + if(!Thread.interrupted()) { + try { + Thread.sleep(nextHB.get() - now); + } catch (InterruptedException e) { + LOG.warn("Interrupt the execution.", e); + } + } + } } // If we have got some exception in stateMachine we set the state to From 85003faa3bf7160bc64d08b3acbb8aa53656bfb4 Mon Sep 17 00:00:00 2001 From: Xiaoyu Yao Date: Wed, 29 Jul 2020 10:18:56 -0700 Subject: [PATCH 089/165] HDDS-4027. Suppress ERROR message when SCM attempt to create additional pipelines. (#1265) --- .../hdds/scm/pipeline/SCMPipelineManager.java | 14 ++++++++++++-- .../hdds/scm/pipeline/TestSCMPipelineManager.java | 7 +++++++ 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java index e7540590ae9a..f072ebba1b08 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.events.SCMEvents; +import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager.SafeModeStatus; import org.apache.hadoop.hdds.server.events.EventPublisher; @@ -56,6 +57,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.FAILED_TO_FIND_SUITABLE_NODE; + /** * Implements api needed for management of pipelines. All the write operations * for pipelines must come via PipelineManager. It synchronises all write @@ -272,8 +275,15 @@ public Pipeline createPipeline(ReplicationType type, recordMetricsForPipeline(pipeline); return pipeline; } catch (IOException ex) { - LOG.error("Failed to create pipeline of type {} and factor {}. " + - "Exception: {}", type, factor, ex.getMessage()); + if (ex instanceof SCMException && + ((SCMException) ex).getResult() == FAILED_TO_FIND_SUITABLE_NODE) { + // Avoid spam SCM log with errors when SCM has enough open pipelines + LOG.debug("Can't create more pipelines of type {} and factor {}. " + + "Reason: {}", type, factor, ex.getMessage()); + } else { + LOG.error("Failed to create pipeline of type {} and factor {}. " + + "Exception: {}", type, factor, ex.getMessage()); + } metrics.incNumPipelineCreationFailed(); throw ex; } finally { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java index 62289b95a9a3..25957d8d28d1 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java @@ -76,6 +76,7 @@ import static org.mockito.Mockito.never; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import static org.slf4j.event.Level.INFO; /** * Test cases to verify PipelineManager. @@ -304,6 +305,8 @@ public void testPipelineCreationFailedMetric() throws Exception { "NumPipelineCreationFailed", metrics); Assert.assertEquals(0, numPipelineCreateFailed); + LogCapturer logs = LogCapturer.captureLogs(SCMPipelineManager.getLog()); + GenericTestUtils.setLogLevel(SCMPipelineManager.getLog(), INFO); //This should fail... try { pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, @@ -313,6 +316,10 @@ public void testPipelineCreationFailedMetric() throws Exception { // pipeline creation failed this time. Assert.assertEquals(SCMException.ResultCodes.FAILED_TO_FIND_SUITABLE_NODE, ioe.getResult()); + Assert.assertFalse(logs.getOutput().contains( + "Failed to create pipeline of type")); + } finally { + logs.stopCapturing(); } metrics = getMetrics( From 24abf2a0b461e1122f404ad65b36fe5e38cc87af Mon Sep 17 00:00:00 2001 From: prashantpogde Date: Wed, 29 Jul 2020 21:25:53 -0700 Subject: [PATCH 090/165] HDDS-3423. Enabling TestContainerReplicationEndToEnd and addressing failures (#1260) --- .../rpc/TestContainerReplicationEndToEnd.java | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java index 2c7f81896e79..d9f75788ec8b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java @@ -45,8 +45,8 @@ import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; -import org.junit.Ignore; import org.junit.Test; +import org.slf4j.LoggerFactory; import java.io.File; import java.io.IOException; @@ -65,7 +65,6 @@ * Tests delete key operation with a slow follower in the datanode * pipeline. */ -@Ignore public class TestContainerReplicationEndToEnd { private static MiniOzoneCluster cluster; @@ -94,10 +93,10 @@ public static void init() throws Exception { conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, containerReportInterval, TimeUnit.MILLISECONDS); - conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, containerReportInterval, - TimeUnit.MILLISECONDS); + conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, + 5 * containerReportInterval, TimeUnit.MILLISECONDS); conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL, - 2 * containerReportInterval, TimeUnit.MILLISECONDS); + 10 * containerReportInterval, TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, 1000, TimeUnit.SECONDS); DatanodeRatisServerConfig ratisServerConfig = @@ -167,9 +166,13 @@ public void testContainerReplication() throws Exception { .getPipeline(pipelineID); key.close(); - if (cluster.getStorageContainerManager().getContainerManager() - .getContainer(new ContainerID(containerID)).getState() != - HddsProtos.LifeCycleState.CLOSING) { + HddsProtos.LifeCycleState containerState = + cluster.getStorageContainerManager().getContainerManager() + .getContainer(new ContainerID(containerID)).getState(); + LoggerFactory.getLogger(TestContainerReplicationEndToEnd.class).info( + "Current Container State is {}", containerState); + if ((containerState != HddsProtos.LifeCycleState.CLOSING) && + (containerState != HddsProtos.LifeCycleState.CLOSED)) { cluster.getStorageContainerManager().getContainerManager() .updateContainerState(new ContainerID(containerID), HddsProtos.LifeCycleEvent.FINALIZE); From 26f166f87cc993ae63fc0af5a1314ab6eba3e1c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Elek=2C=20M=C3=A1rton?= Date: Thu, 30 Jul 2020 06:31:03 +0200 Subject: [PATCH 091/165] HDDS-4033. Make the acceptance test reports hierarchical (#1263) --- .../dist/src/main/compose/ozone-mr/test.sh | 43 +++++++++++++++++++ .../src/main/compose/ozone-topology/test.sh | 4 +- .../dist/src/main/compose/ozone/test.sh | 4 +- .../dist/src/main/compose/ozonesecure/test.sh | 4 +- .../dist/src/main/compose/test-all.sh | 30 +++++-------- hadoop-ozone/dist/src/main/compose/testlib.sh | 24 ++++++++++- .../dist/src/test/shell/compose_testlib.bats | 37 ++++++++++++++++ .../dist/src/test/shell/test1/test.sh | 15 +++++++ .../dist/src/test/shell/test2/test.sh | 17 ++++++++ .../src/test/shell/test3/subtest1/test.sh | 17 ++++++++ .../dist/src/test/shell/test4/test.sh | 17 ++++++++ 11 files changed, 185 insertions(+), 27 deletions(-) create mode 100644 hadoop-ozone/dist/src/main/compose/ozone-mr/test.sh create mode 100644 hadoop-ozone/dist/src/test/shell/compose_testlib.bats create mode 100644 hadoop-ozone/dist/src/test/shell/test1/test.sh create mode 100644 hadoop-ozone/dist/src/test/shell/test2/test.sh create mode 100644 hadoop-ozone/dist/src/test/shell/test3/subtest1/test.sh create mode 100644 hadoop-ozone/dist/src/test/shell/test4/test.sh diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/test.sh b/hadoop-ozone/dist/src/main/compose/ozone-mr/test.sh new file mode 100644 index 000000000000..6146dab871e7 --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/test.sh @@ -0,0 +1,43 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd ) +ALL_RESULT_DIR="$SCRIPT_DIR/result" +source "$SCRIPT_DIR/../testlib.sh" + +tests=$(find_tests) + +RESULT=0 +# shellcheck disable=SC2044 +for t in ${tests}; do + d="$(dirname "${t}")" + echo "Executing test in ${d}" + + #required to read the .env file from the right location + cd "${d}" || continue + ./test.sh + ret=$? + if [[ $ret -ne 0 ]]; then + RESULT=1 + echo "ERROR: Test execution of ${d} is FAILED!!!!" + fi + cd "$SCRIPT_DIR" + RESULT_DIR="${d}/result" + TEST_DIR_NAME=$(basename ${d}) + rebot -N $TEST_DIR_NAME -o "$ALL_RESULT_DIR"/$TEST_DIR_NAME.xml "$RESULT_DIR"/"*.xml" + cp "$RESULT_DIR"/docker-*.log "$ALL_RESULT_DIR"/ + cp "$RESULT_DIR"/*.out* "$ALL_RESULT_DIR"/ || true +done + diff --git a/hadoop-ozone/dist/src/main/compose/ozone-topology/test.sh b/hadoop-ozone/dist/src/main/compose/ozone-topology/test.sh index d4efa4f4af00..392112ba5313 100755 --- a/hadoop-ozone/dist/src/main/compose/ozone-topology/test.sh +++ b/hadoop-ozone/dist/src/main/compose/ozone-topology/test.sh @@ -39,7 +39,7 @@ execute_robot_test scm topology/loaddata.robot stop_containers datanode_1 datanode_2 datanode_3 -execute_robot_test scm topology/readdata.robot +execute_robot_test scm -N readdata-first-half topology/readdata.robot start_containers datanode_1 datanode_2 datanode_3 @@ -49,7 +49,7 @@ wait_for_port datanode_3 9858 60 stop_containers datanode_4 datanode_5 datanode_6 -execute_robot_test scm topology/readdata.robot +execute_robot_test scm -N readdata-second-half topology/readdata.robot stop_docker_env diff --git a/hadoop-ozone/dist/src/main/compose/ozone/test.sh b/hadoop-ozone/dist/src/main/compose/ozone/test.sh index b5b778f22cff..2f57831f88bf 100755 --- a/hadoop-ozone/dist/src/main/compose/ozone/test.sh +++ b/hadoop-ozone/dist/src/main/compose/ozone/test.sh @@ -37,14 +37,14 @@ execute_robot_test scm gdpr for scheme in ofs o3fs; do for bucket in link bucket; do - execute_robot_test scm -v SCHEME:${scheme} -v BUCKET_TYPE:${bucket} ozonefs/ozonefs.robot + execute_robot_test scm -v SCHEME:${scheme} -v BUCKET_TYPE:${bucket} -N ozonefs-${scheme}-${bucket} ozonefs/ozonefs.robot done done execute_robot_test scm security/ozone-secure-token.robot for bucket in link generated; do - execute_robot_test scm -v BUCKET:${bucket} s3 + execute_robot_test scm -v BUCKET:${bucket} -N s3-${bucket} s3 done execute_robot_test scm recon diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh index 076b83a3d52d..eeccb849eedc 100755 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh @@ -39,12 +39,12 @@ execute_robot_test scm security for scheme in ofs o3fs; do for bucket in link bucket; do - execute_robot_test scm -v SCHEME:${scheme} -v BUCKET_TYPE:${bucket} ozonefs/ozonefs.robot + execute_robot_test scm -v SCHEME:${scheme} -v BUCKET_TYPE:${bucket} -N ozonefs-${scheme}-${bucket} ozonefs/ozonefs.robot done done for bucket in link generated; do - execute_robot_test s3g -v BUCKET:${bucket} s3 + execute_robot_test s3g -v BUCKET:${bucket} -N s3-${bucket} s3 done #expects 4 pipelines, should be run before diff --git a/hadoop-ozone/dist/src/main/compose/test-all.sh b/hadoop-ozone/dist/src/main/compose/test-all.sh index da3b80e79eac..15e268867bf1 100755 --- a/hadoop-ozone/dist/src/main/compose/test-all.sh +++ b/hadoop-ozone/dist/src/main/compose/test-all.sh @@ -25,30 +25,15 @@ PROJECT_DIR="$SCRIPT_DIR/.." mkdir -p "$ALL_RESULT_DIR" rm "$ALL_RESULT_DIR/*" || true +source "$SCRIPT_DIR"/testlib.sh + if [ "$OZONE_WITH_COVERAGE" ]; then java -cp "$PROJECT_DIR"/share/coverage/$(ls "$PROJECT_DIR"/share/coverage | grep test-util):"$PROJECT_DIR"/share/coverage/jacoco-core.jar org.apache.hadoop.test.JacocoServer & DOCKER_BRIDGE_IP=$(docker network inspect bridge --format='{{(index .IPAM.Config 0).Gateway}}') export HADOOP_OPTS="-javaagent:share/coverage/jacoco-agent.jar=output=tcpclient,address=$DOCKER_BRIDGE_IP,includes=org.apache.hadoop.ozone.*:org.apache.hadoop.hdds.*:org.apache.hadoop.fs.ozone.*" fi -if [[ -n "${OZONE_ACCEPTANCE_SUITE}" ]]; then - tests=$(find "$SCRIPT_DIR" -name test.sh | xargs grep -l "^#suite:${OZONE_ACCEPTANCE_SUITE}$" | sort) - - # 'misc' is default suite, add untagged tests, too - if [[ "misc" == "${OZONE_ACCEPTANCE_SUITE}" ]]; then - untagged="$(find "$SCRIPT_DIR" -name test.sh | xargs grep -L "^#suite:")" - if [[ -n "${untagged}" ]]; then - tests=$(echo ${tests} ${untagged} | xargs -n1 | sort) - fi - fi - - if [[ -z "${tests}" ]]; then - echo "No tests found for suite ${OZONE_ACCEPTANCE_SUITE}" - exit 1 - fi -else - tests=$(find "$SCRIPT_DIR" -name test.sh | grep "${OZONE_TEST_SELECTOR:-""}" | sort) -fi +tests=$(find_tests) RESULT=0 # shellcheck disable=SC2044 @@ -64,11 +49,16 @@ for t in ${tests}; do RESULT=1 echo "ERROR: Test execution of ${d} is FAILED!!!!" fi + cd "$SCRIPT_DIR" RESULT_DIR="${d}/result" - cp "$RESULT_DIR"/robot-*.xml "$RESULT_DIR"/docker-*.log "$RESULT_DIR"/*.out* "$ALL_RESULT_DIR"/ + TEST_DIR_NAME=$(basename ${d}) + rebot -N $TEST_DIR_NAME -o "$ALL_RESULT_DIR"/$TEST_DIR_NAME.xml "$RESULT_DIR"/"*.xml" + cp "$RESULT_DIR"/docker-*.log "$ALL_RESULT_DIR"/ + cp "$RESULT_DIR"/*.out* "$ALL_RESULT_DIR"/ || true done -rebot -N "smoketests" -d "$SCRIPT_DIR/result" "$SCRIPT_DIR/result/robot-*.xml" +rebot -N acceptance -d "$ALL_RESULT_DIR" "$ALL_RESULT_DIR"/*.xml + if [ "$OZONE_WITH_COVERAGE" ]; then pkill -f JacocoServer cp /tmp/jacoco-combined.exec "$SCRIPT_DIR"/result diff --git a/hadoop-ozone/dist/src/main/compose/testlib.sh b/hadoop-ozone/dist/src/main/compose/testlib.sh index 5a0563308364..b000c913c1f7 100755 --- a/hadoop-ozone/dist/src/main/compose/testlib.sh +++ b/hadoop-ozone/dist/src/main/compose/testlib.sh @@ -37,6 +37,28 @@ create_results_dir() { chmod ogu+w "$RESULT_DIR" } +## @description find all the test.sh scripts in the immediate child dirs +find_tests(){ + if [[ -n "${OZONE_ACCEPTANCE_SUITE}" ]]; then + tests=$(find . -mindepth 2 -maxdepth 2 -name test.sh | xargs grep -l "^#suite:${OZONE_ACCEPTANCE_SUITE}$" | sort) + + # 'misc' is default suite, add untagged tests, too + if [[ "misc" == "${OZONE_ACCEPTANCE_SUITE}" ]]; then + untagged="$(find . -mindepth 2 -maxdepth 2 -name test.sh | xargs grep -L "^#suite:")" + if [[ -n "${untagged}" ]]; then + tests=$(echo ${tests} ${untagged} | xargs -n1 | sort) + fi + fi + + if [[ -z "${tests}" ]]; then + echo "No tests found for suite ${OZONE_ACCEPTANCE_SUITE}" + exit 1 + fi + else + tests=$(find . -mindepth 2 -maxdepth 2 -name test.sh | grep "${OZONE_TEST_SELECTOR:-""}" | sort) + fi + echo $tests +} ## @description wait until safemode exit (or 180 seconds) wait_for_safemode_exit(){ @@ -114,7 +136,7 @@ execute_robot_test(){ OUTPUT_PATH="$RESULT_DIR_INSIDE/${OUTPUT_FILE}" # shellcheck disable=SC2068 docker-compose exec -T "$CONTAINER" mkdir -p "$RESULT_DIR_INSIDE" \ - && docker-compose exec -T "$CONTAINER" robot -v OM_SERVICE_ID:"${OM_SERVICE_ID}" -v SECURITY_ENABLED:"${SECURITY_ENABLED}" -v OM_HA_PARAM:"${OM_HA_PARAM}" -v KEY_NAME:"${OZONE_BUCKET_KEY_NAME}" ${ARGUMENTS[@]} --log NONE -N "$TEST_NAME" --report NONE "${OZONE_ROBOT_OPTS[@]}" --output "$OUTPUT_PATH" "$SMOKETEST_DIR_INSIDE/$TEST" + && docker-compose exec -T "$CONTAINER" robot -v OM_SERVICE_ID:"${OM_SERVICE_ID}" -v SECURITY_ENABLED:"${SECURITY_ENABLED}" -v OM_HA_PARAM:"${OM_HA_PARAM}" -v KEY_NAME:"${OZONE_BUCKET_KEY_NAME}" ${ARGUMENTS[@]} --log NONE --report NONE "${OZONE_ROBOT_OPTS[@]}" --output "$OUTPUT_PATH" "$SMOKETEST_DIR_INSIDE/$TEST" local -i rc=$? FULL_CONTAINER_NAME=$(docker-compose ps | grep "_${CONTAINER}_" | head -n 1 | awk '{print $1}') diff --git a/hadoop-ozone/dist/src/test/shell/compose_testlib.bats b/hadoop-ozone/dist/src/test/shell/compose_testlib.bats new file mode 100644 index 000000000000..058da64f30d7 --- /dev/null +++ b/hadoop-ozone/dist/src/test/shell/compose_testlib.bats @@ -0,0 +1,37 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +load ../../main/compose/testlib.sh +@test "Find test recursive, only on one level" { + cd $BATS_TEST_DIRNAME + run find_tests + [[ "$output" == "./test1/test.sh ./test2/test.sh ./test4/test.sh" ]] +} + +@test "Find test by suite" { + OZONE_ACCEPTANCE_SUITE=one + cd $BATS_TEST_DIRNAME + run find_tests + [[ "$output" == "./test4/test.sh" ]] +} + +@test "Find test default suite" { + OZONE_ACCEPTANCE_SUITE=misc + cd $BATS_TEST_DIRNAME + run find_tests + [[ "$output" == "./test1/test.sh ./test2/test.sh" ]] +} diff --git a/hadoop-ozone/dist/src/test/shell/test1/test.sh b/hadoop-ozone/dist/src/test/shell/test1/test.sh new file mode 100644 index 000000000000..b13ca90d239f --- /dev/null +++ b/hadoop-ozone/dist/src/test/shell/test1/test.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. \ No newline at end of file diff --git a/hadoop-ozone/dist/src/test/shell/test2/test.sh b/hadoop-ozone/dist/src/test/shell/test2/test.sh new file mode 100644 index 000000000000..8dbf5b29721c --- /dev/null +++ b/hadoop-ozone/dist/src/test/shell/test2/test.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#suite:misc \ No newline at end of file diff --git a/hadoop-ozone/dist/src/test/shell/test3/subtest1/test.sh b/hadoop-ozone/dist/src/test/shell/test3/subtest1/test.sh new file mode 100644 index 000000000000..8dbf5b29721c --- /dev/null +++ b/hadoop-ozone/dist/src/test/shell/test3/subtest1/test.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#suite:misc \ No newline at end of file diff --git a/hadoop-ozone/dist/src/test/shell/test4/test.sh b/hadoop-ozone/dist/src/test/shell/test4/test.sh new file mode 100644 index 000000000000..accc445711e5 --- /dev/null +++ b/hadoop-ozone/dist/src/test/shell/test4/test.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#suite:one \ No newline at end of file From b309af700df5db0412b0c2496c80aaa4edd980c0 Mon Sep 17 00:00:00 2001 From: prashantpogde Date: Thu, 30 Jul 2020 01:05:25 -0700 Subject: [PATCH 092/165] HDDS-3970. Enabling TestStorageContainerManager with all failures addressed (#1257) --- .../ozone/TestStorageContainerManager.java | 33 ++++++++++++++----- 1 file changed, 25 insertions(+), 8 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java index 1320b5b9cd89..2b492a2c9b7c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java @@ -26,7 +26,6 @@ import static org.apache.hadoop.hdds.HddsConfigKeys .HDDS_SCM_SAFEMODE_PIPELINE_CREATION; import static org.junit.Assert.fail; -import org.junit.Ignore; import static org.mockito.Matchers.argThat; import static org.mockito.Matchers.eq; import static org.mockito.Mockito.mock; @@ -109,7 +108,6 @@ /** * Test class that exercises the StorageContainerManager. */ -@Ignore public class TestStorageContainerManager { private static XceiverClientManager xceiverClientManager; private static final Logger LOG = LoggerFactory.getLogger( @@ -119,7 +117,7 @@ public class TestStorageContainerManager { * Set the timeout for every test. */ @Rule - public Timeout testTimeout = new Timeout(300000); + public Timeout testTimeout = new Timeout(900000); @Rule public ExpectedException thrown = ExpectedException.none(); @@ -525,7 +523,7 @@ public void testScmInfo() throws Exception { /** * Test datanode heartbeat well processed with a 4-layer network topology. */ - @Test(timeout = 60000) + @Test(timeout = 180000) public void testScmProcessDatanodeHeartbeat() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); String scmId = UUID.randomUUID().toString(); @@ -593,7 +591,10 @@ public void testCloseContainerCommandOnRestart() throws Exception { new TestStorageContainerManagerHelper(cluster, conf); helper.createKeys(10, 4096); - Thread.sleep(5000); + GenericTestUtils.waitFor(() -> { + return cluster.getStorageContainerManager().getContainerManager(). + getContainers() != null; + }, 1000, 10000); StorageContainerManager scm = cluster.getStorageContainerManager(); List containers = cluster.getStorageContainerManager() @@ -604,8 +605,18 @@ public void testCloseContainerCommandOnRestart() throws Exception { // Stop processing HB scm.getDatanodeProtocolServer().stop(); - scm.getContainerManager().updateContainerState(selectedContainer - .containerID(), HddsProtos.LifeCycleEvent.FINALIZE); + LOG.info( + "Current Container State is {}", selectedContainer.getState()); + try { + scm.getContainerManager().updateContainerState(selectedContainer + .containerID(), HddsProtos.LifeCycleEvent.FINALIZE); + } catch (SCMException ex) { + if (selectedContainer.getState() != HddsProtos.LifeCycleState.CLOSING) { + ex.printStackTrace(); + throw(ex); + } + } + cluster.restartStorageContainerManager(false); scm = cluster.getStorageContainerManager(); EventPublisher publisher = mock(EventPublisher.class); @@ -616,7 +627,6 @@ public void testCloseContainerCommandOnRestart() throws Exception { modifiersField.setAccessible(true); modifiersField.setInt(f, f.getModifiers() & ~Modifier.FINAL); f.set(replicationManager, publisher); - Thread.sleep(10000); UUID dnUuid = cluster.getHddsDatanodes().iterator().next() .getDatanodeDetails().getUuid(); @@ -628,6 +638,13 @@ public void testCloseContainerCommandOnRestart() throws Exception { CommandForDatanode commandForDatanode = new CommandForDatanode( dnUuid, closeContainerCommand); + GenericTestUtils.waitFor(() -> { + return replicationManager.isRunning(); + }, 1000, 25000); + + // Give ReplicationManager some time to process the containers. + Thread.sleep(5000); + verify(publisher).fireEvent(eq(SCMEvents.DATANODE_COMMAND), argThat(new CloseContainerCommandMatcher(dnUuid, commandForDatanode))); } finally { From 1bcb89bc71db8f2832892e708d079759d132d99d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Elek=2C=20M=C3=A1rton?= Date: Thu, 30 Jul 2020 13:51:44 +0200 Subject: [PATCH 093/165] HDDS-4045. Add more ignore rules to the RAT ignore list (#1273) --- hadoop-hdds/pom.xml | 8 ++++++++ hadoop-ozone/pom.xml | 8 ++++++++ 2 files changed, 16 insertions(+) diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml index 5162eee562f6..e30d89ac07b4 100644 --- a/hadoop-hdds/pom.xml +++ b/hadoop-hdds/pom.xml @@ -252,7 +252,15 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> **/hs_err*.log + **/.attach_* + **/**.rej + **/.factorypath + public + **/*.iml **/target/** + **/output.xml + **/log.html + **/report.html .gitattributes .idea/** src/main/resources/webapps/static/angular-1.7.9.min.js diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml index 8e7fbfc95a9f..73bce82e846c 100644 --- a/hadoop-ozone/pom.xml +++ b/hadoop-ozone/pom.xml @@ -259,6 +259,14 @@ **/hs_err*.log **/target/** .gitattributes + **/.attach_* + **/**.rej + **/.factorypath + public + **/*.iml + **/output.xml + **/log.html + **/report.html .idea/** **/.ssh/id_rsa* dev-support/*tests From 74fdb8483ab32f24f8799b7e8ee8028fac5bf77e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Elek=2C=20M=C3=A1rton?= Date: Thu, 30 Jul 2020 14:23:04 +0200 Subject: [PATCH 094/165] HDDS-3990. Test Kubernetes examples with acceptance tests (#1223) --- .github/workflows/post-commit.yml | 66 +++++++++ hadoop-ozone/dev-support/checks/kubernetes.sh | 36 +++++ .../dev-support/bin/dist-layout-stitching | 1 + hadoop-ozone/dist/src/main/k8s/.gitignore | 15 ++ .../main/k8s/definitions/ozone/config.yaml | 1 + .../ozone/definitions/onenode.yaml | 2 +- .../flekszible.yaml | 0 .../webserver-deployment.yaml | 0 .../webserver-service.yaml | 0 .../webserver-volume.yaml | 0 .../getting-started/config-configmap.yaml | 1 + .../getting-started/datanode-statefulset.yaml | 10 -- .../main/k8s/examples/getting-started/test.sh | 37 +++++ .../examples/minikube/config-configmap.yaml | 1 + .../src/main/k8s/examples/minikube/test.sh | 37 +++++ .../main/k8s/examples/ozone-dev/Flekszible | 4 +- .../examples/ozone-dev/config-configmap.yaml | 1 + .../ozone-dev/datanode-statefulset.yaml | 10 -- .../src/main/k8s/examples/ozone-dev/test.sh | 37 +++++ .../src/main/k8s/examples/ozone/Flekszible | 2 +- .../k8s/examples/ozone/config-configmap.yaml | 1 + .../dist/src/main/k8s/examples/ozone/test.sh | 37 +++++ .../dist/src/main/k8s/examples/test-all.sh | 44 ++++++ .../dist/src/main/k8s/examples/testlib.sh | 137 ++++++++++++++++++ .../dist/src/test/shell/k8s_testlib.bats | 55 +++++++ 25 files changed, 511 insertions(+), 24 deletions(-) create mode 100755 hadoop-ozone/dev-support/checks/kubernetes.sh create mode 100644 hadoop-ozone/dist/src/main/k8s/.gitignore rename hadoop-ozone/dist/src/main/k8s/definitions/{pv-test => test-webserver}/flekszible.yaml (100%) rename hadoop-ozone/dist/src/main/k8s/definitions/{pv-test => test-webserver}/webserver-deployment.yaml (100%) rename hadoop-ozone/dist/src/main/k8s/definitions/{pv-test => test-webserver}/webserver-service.yaml (100%) rename hadoop-ozone/dist/src/main/k8s/definitions/{pv-test => test-webserver}/webserver-volume.yaml (100%) create mode 100755 hadoop-ozone/dist/src/main/k8s/examples/getting-started/test.sh create mode 100755 hadoop-ozone/dist/src/main/k8s/examples/minikube/test.sh create mode 100755 hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/test.sh create mode 100755 hadoop-ozone/dist/src/main/k8s/examples/ozone/test.sh create mode 100755 hadoop-ozone/dist/src/main/k8s/examples/test-all.sh create mode 100644 hadoop-ozone/dist/src/main/k8s/examples/testlib.sh create mode 100644 hadoop-ozone/dist/src/test/shell/k8s_testlib.bats diff --git a/.github/workflows/post-commit.yml b/.github/workflows/post-commit.yml index 992715fee339..e8728840e509 100644 --- a/.github/workflows/post-commit.yml +++ b/.github/workflows/post-commit.yml @@ -258,3 +258,69 @@ jobs: name: coverage path: target/coverage continue-on-error: true + kubernetes: + name: kubernetes + runs-on: ubuntu-18.04 + steps: + - name: Cache for maven dependencies + uses: actions/cache@v2 + with: + path: ~/.m2/repository + key: maven-repo-${{ hashFiles('**/pom.xml') }} + - name: Cache for npm dependencies + uses: actions/cache@v2 + with: + path: | + ~/.pnpm-store + **/node_modules + key: ${{ runner.os }}-pnpm-${{ hashFiles('**/pnpm-lock.yaml') }} + restore-keys: | + ${{ runner.os }}-pnpm- + - name: Checkout to /mnt/ozone + run: | + sudo chmod 777 /mnt + git clone 'https://github.com/${{ github.repository }}.git' /mnt/ozone + cd /mnt/ozone + if [[ '${{ github.event_name }}' == 'pull_request' ]]; then + git fetch --verbose origin '${{ github.ref }}' + else + git fetch --verbose origin '${{ github.sha }}' + fi + git checkout FETCH_HEAD + git reset --hard + - name: Install robotframework + run: sudo pip install robotframework + - name: Install k3s + run: curl -sfL https://get.k3s.io | sh - + - name: Copy Kubernetes config file + run: | + sudo mkdir ~/.kube + sudo cp /etc/rancher/k3s/k3s.yaml ~/.kube/config + sudo chown $(id -u) ~/.kube/config + - name: Install flekszible + run: | + cd /tmp + wget https://github.com/elek/flekszible/releases/download/v1.8.1/flekszible_1.8.1_Linux_x86_64.tar.gz -O - | tar -zx + chmod +x flekszible + sudo mv flekszible /usr/bin/flekszible + - name: Run a full build + run: | + cd /mnt/ozone + hadoop-ozone/dev-support/checks/build.sh -Pcoverage + - name: Execute tests + run: | + cd /mnt/ozone/hadoop-ozone/dist/target/ozone-* && sudo mkdir .aws && sudo chmod 777 .aws && sudo chown 1000 .aws + cd /mnt/ozone && hadoop-ozone/dev-support/checks/kubernetes.sh + - name: Archive build results + uses: actions/upload-artifact@master + if: always() + with: + name: kubernetes + path: /mnt/ozone/target/kubernetes + continue-on-error: true + - name: Delete temporary build artifacts before caching + run: | + #Never cache local artifacts + rm -rf ~/.m2/repository/org/apache/hadoop/hdds + rm -rf ~/.m2/repository/org/apache/hadoop/ozone + if: always() \ No newline at end of file diff --git a/hadoop-ozone/dev-support/checks/kubernetes.sh b/hadoop-ozone/dev-support/checks/kubernetes.sh new file mode 100755 index 000000000000..a23aa839dad3 --- /dev/null +++ b/hadoop-ozone/dev-support/checks/kubernetes.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +cd "$DIR/../../.." || exit 1 + +REPORT_DIR=${OUTPUT_DIR:-"$DIR/../../../target/kubernetes"} + +OZONE_VERSION=$(grep "" "pom.xml" | sed 's/<[^>]*>//g'| sed 's/^[ \t]*//') +DIST_DIR="$DIR/../../dist/target/ozone-$OZONE_VERSION" + +if [ ! -d "$DIST_DIR" ]; then + echo "Distribution dir is missing. Doing a full build" + "$DIR/build.sh" -Pcoverage +fi + +mkdir -p "$REPORT_DIR" + +cd "$DIST_DIR/kubernetes/examples" || exit 1 +./test-all.sh +RES=$? +cp result/* "$REPORT_DIR/" +cp "$REPORT_DIR/log.html" "$REPORT_DIR/summary.html" +exit $RES diff --git a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching b/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching index 80455a62953a..370d4deae408 100755 --- a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching +++ b/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching @@ -131,3 +131,4 @@ run cp -p -r "${ROOT}/hadoop-ozone/dist/target/Dockerfile" . #workaround for https://issues.apache.org/jira/browse/MRESOURCES-236 find ./compose -name "*.sh" -exec chmod 755 {} \; +find ./kubernetes -name "*.sh" -exec chmod 755 {} \; diff --git a/hadoop-ozone/dist/src/main/k8s/.gitignore b/hadoop-ozone/dist/src/main/k8s/.gitignore new file mode 100644 index 000000000000..bb9ee6087c6f --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/.gitignore @@ -0,0 +1,15 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +result diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/config.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/config.yaml index 6e638915a247..124f72ff5e58 100644 --- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/config.yaml +++ b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/config.yaml @@ -26,6 +26,7 @@ data: OZONE-SITE.XML_ozone.om.address: "om-0.om" OZONE-SITE.XML_ozone.scm.client.address: "scm-0.scm" OZONE-SITE.XML_ozone.scm.names: "scm-0.scm" + OZONE-SITE.XML_hdds.scm.safemode.min.datanode: "3" LOG4J.PROPERTIES_log4j.rootLogger: "INFO, stdout" LOG4J.PROPERTIES_log4j.appender.stdout: "org.apache.log4j.ConsoleAppender" LOG4J.PROPERTIES_log4j.appender.stdout.layout: "org.apache.log4j.PatternLayout" diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/onenode.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/onenode.yaml index 882477936adf..19a3e1dd7409 100644 --- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/onenode.yaml +++ b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/onenode.yaml @@ -19,7 +19,7 @@ description: remove scheduling rules to make it possible to run multiple datanod - type: Remove trigger: metadata: - name: ozone-datanode + name: datanode path: - spec - template diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/pv-test/flekszible.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/test-webserver/flekszible.yaml similarity index 100% rename from hadoop-ozone/dist/src/main/k8s/definitions/pv-test/flekszible.yaml rename to hadoop-ozone/dist/src/main/k8s/definitions/test-webserver/flekszible.yaml diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/pv-test/webserver-deployment.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/test-webserver/webserver-deployment.yaml similarity index 100% rename from hadoop-ozone/dist/src/main/k8s/definitions/pv-test/webserver-deployment.yaml rename to hadoop-ozone/dist/src/main/k8s/definitions/test-webserver/webserver-deployment.yaml diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/pv-test/webserver-service.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/test-webserver/webserver-service.yaml similarity index 100% rename from hadoop-ozone/dist/src/main/k8s/definitions/pv-test/webserver-service.yaml rename to hadoop-ozone/dist/src/main/k8s/definitions/test-webserver/webserver-service.yaml diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/pv-test/webserver-volume.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/test-webserver/webserver-volume.yaml similarity index 100% rename from hadoop-ozone/dist/src/main/k8s/definitions/pv-test/webserver-volume.yaml rename to hadoop-ozone/dist/src/main/k8s/definitions/test-webserver/webserver-volume.yaml diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/config-configmap.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/config-configmap.yaml index 00fb72b14e50..f02fb56f089c 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/config-configmap.yaml +++ b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/config-configmap.yaml @@ -26,6 +26,7 @@ data: OZONE-SITE.XML_ozone.om.address: om-0.om OZONE-SITE.XML_ozone.scm.client.address: scm-0.scm OZONE-SITE.XML_ozone.scm.names: scm-0.scm + OZONE-SITE.XML_hdds.scm.safemode.min.datanode: "3" LOG4J.PROPERTIES_log4j.rootLogger: INFO, stdout LOG4J.PROPERTIES_log4j.appender.stdout: org.apache.log4j.ConsoleAppender LOG4J.PROPERTIES_log4j.appender.stdout.layout: org.apache.log4j.PatternLayout diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/datanode-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/datanode-statefulset.yaml index c393eada79d6..db91864bdaf3 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/datanode-statefulset.yaml +++ b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/datanode-statefulset.yaml @@ -37,16 +37,6 @@ spec: prometheus.io/port: "9882" prometheus.io/path: /prom spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: component - operator: In - values: - - datanode - topologyKey: kubernetes.io/hostname securityContext: fsGroup: 1000 containers: diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/test.sh b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/test.sh new file mode 100755 index 000000000000..dabe394226bb --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/test.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +export K8S_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" + +cd "$K8S_DIR" + +# shellcheck source=/dev/null +source "../testlib.sh" + +rm -rf result + +regenerate_resources + +start_k8s_env + +execute_robot_test scm-0 smoketest/basic/basic.robot + +combine_reports + +stop_k8s_env + +revert_resources diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/config-configmap.yaml b/hadoop-ozone/dist/src/main/k8s/examples/minikube/config-configmap.yaml index 00fb72b14e50..f02fb56f089c 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/config-configmap.yaml +++ b/hadoop-ozone/dist/src/main/k8s/examples/minikube/config-configmap.yaml @@ -26,6 +26,7 @@ data: OZONE-SITE.XML_ozone.om.address: om-0.om OZONE-SITE.XML_ozone.scm.client.address: scm-0.scm OZONE-SITE.XML_ozone.scm.names: scm-0.scm + OZONE-SITE.XML_hdds.scm.safemode.min.datanode: "3" LOG4J.PROPERTIES_log4j.rootLogger: INFO, stdout LOG4J.PROPERTIES_log4j.appender.stdout: org.apache.log4j.ConsoleAppender LOG4J.PROPERTIES_log4j.appender.stdout.layout: org.apache.log4j.PatternLayout diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/test.sh b/hadoop-ozone/dist/src/main/k8s/examples/minikube/test.sh new file mode 100755 index 000000000000..dabe394226bb --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/minikube/test.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +export K8S_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" + +cd "$K8S_DIR" + +# shellcheck source=/dev/null +source "../testlib.sh" + +rm -rf result + +regenerate_resources + +start_k8s_env + +execute_robot_test scm-0 smoketest/basic/basic.robot + +combine_reports + +stop_k8s_env + +revert_resources diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/Flekszible b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/Flekszible index 350ea73c50b7..3d9bfcd6ce2d 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/Flekszible +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/Flekszible @@ -37,11 +37,11 @@ import: - type: Image image: "@docker.image@" - type: ozone/tracing - - path: pv-test + - path: test-webserver destination: pv-test - path: ozone-csi destination: csi - - path: pv-test + - path: test-webserver destination: pv-test transformations: - type: Namespace diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/config-configmap.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/config-configmap.yaml index 82f303fafe4e..58076303fbe3 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/config-configmap.yaml +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/config-configmap.yaml @@ -26,6 +26,7 @@ data: OZONE-SITE.XML_ozone.om.address: om-0.om OZONE-SITE.XML_ozone.scm.client.address: scm-0.scm OZONE-SITE.XML_ozone.scm.names: scm-0.scm + OZONE-SITE.XML_hdds.scm.safemode.min.datanode: "3" LOG4J.PROPERTIES_log4j.rootLogger: INFO, stdout LOG4J.PROPERTIES_log4j.appender.stdout: org.apache.log4j.ConsoleAppender LOG4J.PROPERTIES_log4j.appender.stdout.layout: org.apache.log4j.PatternLayout diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/datanode-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/datanode-statefulset.yaml index 475ce690b643..b22212ff79ea 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/datanode-statefulset.yaml +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/datanode-statefulset.yaml @@ -37,16 +37,6 @@ spec: prometheus.io/port: "9882" prometheus.io/path: /prom spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: component - operator: In - values: - - datanode - topologyKey: kubernetes.io/hostname securityContext: fsGroup: 1000 containers: diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/test.sh b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/test.sh new file mode 100755 index 000000000000..dabe394226bb --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/test.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +export K8S_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" + +cd "$K8S_DIR" + +# shellcheck source=/dev/null +source "../testlib.sh" + +rm -rf result + +regenerate_resources + +start_k8s_env + +execute_robot_test scm-0 smoketest/basic/basic.robot + +combine_reports + +stop_k8s_env + +revert_resources diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/Flekszible b/hadoop-ozone/dist/src/main/k8s/examples/ozone/Flekszible index 2fb527c0a45f..ec6d74533baa 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/Flekszible +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone/Flekszible @@ -31,7 +31,7 @@ import: destination: pv-test - path: ozone-csi destination: csi - - path: pv-test + - path: test-webserver destination: pv-test transformations: - type: Namespace diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/config-configmap.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/config-configmap.yaml index c7e8f486e89f..820c1977b4eb 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/config-configmap.yaml +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone/config-configmap.yaml @@ -26,6 +26,7 @@ data: OZONE-SITE.XML_ozone.om.address: om-0.om OZONE-SITE.XML_ozone.scm.client.address: scm-0.scm OZONE-SITE.XML_ozone.scm.names: scm-0.scm + OZONE-SITE.XML_hdds.scm.safemode.min.datanode: "3" LOG4J.PROPERTIES_log4j.rootLogger: INFO, stdout LOG4J.PROPERTIES_log4j.appender.stdout: org.apache.log4j.ConsoleAppender LOG4J.PROPERTIES_log4j.appender.stdout.layout: org.apache.log4j.PatternLayout diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/test.sh b/hadoop-ozone/dist/src/main/k8s/examples/ozone/test.sh new file mode 100755 index 000000000000..dabe394226bb --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone/test.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +export K8S_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" + +cd "$K8S_DIR" + +# shellcheck source=/dev/null +source "../testlib.sh" + +rm -rf result + +regenerate_resources + +start_k8s_env + +execute_robot_test scm-0 smoketest/basic/basic.robot + +combine_reports + +stop_k8s_env + +revert_resources diff --git a/hadoop-ozone/dist/src/main/k8s/examples/test-all.sh b/hadoop-ozone/dist/src/main/k8s/examples/test-all.sh new file mode 100755 index 000000000000..1d763ffdddeb --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/test-all.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# Test executor to test all the compose/*/test.sh test scripts. +# +SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd ) + +set -ex + +ALL_RESULT_DIR="$SCRIPT_DIR/result" +rm "$ALL_RESULT_DIR/*" || true +mkdir -p "$ALL_RESULT_DIR" + +RESULT=0 +IFS=$'\n' +# shellcheck disable=SC2044 +for test in $(find "$SCRIPT_DIR" -name test.sh | grep "${OZONE_TEST_SELECTOR:-""}" |sort); do + echo "" + echo "#### Executing tests of $(dirname "$test") #####" + echo "" + TEST_DIR="$(dirname $test)" + cd "$TEST_DIR" || continue + ./test.sh + cp "$TEST_DIR"/result/output.xml "$ALL_RESULT_DIR"/"$(basename "$TEST_DIR")".xml +done + +rebot -N "smoketests" -d "$ALL_RESULT_DIR/" "$ALL_RESULT_DIR/*.xml" + diff --git a/hadoop-ozone/dist/src/main/k8s/examples/testlib.sh b/hadoop-ozone/dist/src/main/k8s/examples/testlib.sh new file mode 100644 index 000000000000..d33194d09609 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/testlib.sh @@ -0,0 +1,137 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +retry() { + n=0 + until [ $n -ge 30 ] + do + "$@" && break + n=$[$n+1] + echo "$n '$@' is failed..." + sleep ${RETRY_SLEEP:-3} + done + if [ $n -eq 30 ]; then + return 255 + fi +} + +grep_log() { + CONTAINER="$1" + PATTERN="$2" + kubectl logs "$1" | grep "$PATTERN" +} + +wait_for_startup(){ + print_phase "Waiting until the k8s cluster is running" + retry all_pods_are_running + retry grep_log scm-0 "SCM exiting safe mode." + retry grep_log om-0 "HTTP server of ozoneManager listening" + print_phase "Cluster is up and running" +} + +all_pods_are_running() { + RUNNING_COUNT=$(kubectl get pod --field-selector status.phase=Running | wc -l) + ALL_COUNT=$(kubectl get pod | wc -l) + RUNNING_COUNT=$((RUNNING_COUNT - 1)) + ALL_COUNT=$((ALL_COUNT - 1)) + if [ "$RUNNING_COUNT" -lt "3" ]; then + echo "$RUNNING_COUNT pods are running. Waiting for more." + return 1 + elif [ "$RUNNING_COUNT" -ne "$ALL_COUNT" ]; then + echo "$RUNNING_COUNT pods are running out from the $ALL_COUNT" + return 2 + else + STARTED=true + return 0 + fi +} + +start_k8s_env() { + print_phase "Deleting existing k8s resources" + #reset environment + kubectl delete statefulset --all + kubectl delete daemonset --all + kubectl delete deployment --all + kubectl delete service --all + kubectl delete configmap --all + kubectl delete pod --all + kubectl delete pvc --all + kubectl delete pv --all + + print_phase "Applying k8s resources from $1" + kubectl apply -f . + wait_for_startup +} + +stop_k8s_env() { + if [ ! "$KEEP_RUNNING" ]; then + kubectl delete -f . + fi +} + +regenerate_resources() { + print_phase "Modifying Kubernetes resources file for test" + echo " (mounting current Ozone directory to the containers, scheduling containers to one node, ...)" + echo "" + echo "WARNING: this test can be executed only with local Kubernetes cluster" + echo " (source dir should be available from K8s nodes)" + echo "" + + PARENT_OF_PARENT=$(realpath ../..) + + if [ $(basename $PARENT_OF_PARENT) == "k8s" ]; then + #running from src dir + OZONE_ROOT=$(realpath ../../../../../target/ozone-0.6.0-SNAPSHOT) + else + #running from dist + OZONE_ROOT=$(realpath ../../..) + fi + + flekszible generate -t mount:hostPath="$OZONE_ROOT",path=/opt/hadoop -t image:image=apache/ozone-runner:20200420-1 -t ozone/onenode +} + +revert_resources() { + print_phase "Regenerating original Kubernetes resource files" + flekszible generate +} + +execute_robot_test() { + print_phase "Executing robot tests $@" + mkdir -p result + + CONTAINER="$1" + shift 1 #Remove first argument which was the container name + + # shellcheck disable=SC2206 + ARGUMENTS=($@) + + kubectl exec -it "${CONTAINER}" -- bash -c 'rm -rf /tmp/report' + kubectl exec -it "${CONTAINER}" -- bash -c 'mkdir -p /tmp/report' + kubectl exec -it "${CONTAINER}" -- robot --nostatusrc -d /tmp/report ${ARGUMENTS[@]} || true + kubectl cp "${CONTAINER}":/tmp/report/output.xml "result/$CONTAINER-$RANDOM.xml" || true +} + +combine_reports() { + rm result/output.xml || true + rebot -d result --nostatusrc -o output.xml -N $(basename "$(pwd)") result/*.xml +} + +print_phase() { + echo "" + echo "**** $1 ****" + echo "" +} diff --git a/hadoop-ozone/dist/src/test/shell/k8s_testlib.bats b/hadoop-ozone/dist/src/test/shell/k8s_testlib.bats new file mode 100644 index 000000000000..4558a1e114c4 --- /dev/null +++ b/hadoop-ozone/dist/src/test/shell/k8s_testlib.bats @@ -0,0 +1,55 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +export COUNTER=1 + +pass_after_three_failures() { + if [ $COUNTER -eq 3 ]; then + return 0 + fi + COUNTER=$(( COUNTER + 1)) + return 255 +} + +pass_first() { + echo "pass" +} + +pass_never() { + return 255 +} + +load ../../main/k8s/examples/testlib.sh + +@test "Test retry with passing function" { + retry pass_first +} + +@test "Test retry with 3 failures" { + export RETRY_SLEEP=0 + retry pass_after_three_failures +} + +@test "Test retry always failure" { + export RETRY_SLEEP=0 + run retry pass_never + [ "$status" -eq 255 ] +} + + + + + From 98fb97d367ce1ef1a8ec8a8489cada6dea7fec70 Mon Sep 17 00:00:00 2001 From: maobaolong <307499405@qq.com> Date: Fri, 31 Jul 2020 07:37:31 +0800 Subject: [PATCH 095/165] HDDS-4047. OzoneManager met NPE exception while getServiceList (#1277) --- .../apache/hadoop/ozone/om/OzoneManager.java | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index e428289d0c45..d0da1c0cad0a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -1147,8 +1147,7 @@ public void start() throws IOException { metricsTimer.schedule(scheduleOMMetricsWriteTask, 0, period); keyManager.start(configuration); - omRpcServer.start(); - isOmRpcServerRunning = true; + try { httpServer = new OzoneManagerHttpServer(configuration, this); httpServer.start(); @@ -1156,6 +1155,9 @@ public void start() throws IOException { // Allow OM to start as Http Server failure is not fatal. LOG.error("OM HttpServer failed to start.", ex); } + omRpcServer.start(); + isOmRpcServerRunning = true; + registerMXBean(); startJVMPauseMonitor(); @@ -1201,8 +1203,6 @@ public void restart() throws IOException { } omRpcServer = getRpcServer(configuration); - omRpcServer.start(); - isOmRpcServerRunning = true; try { httpServer = new OzoneManagerHttpServer(configuration, this); @@ -1211,6 +1211,10 @@ public void restart() throws IOException { // Allow OM to start as Http Server failure is not fatal. LOG.error("OM HttpServer failed to start.", ex); } + + omRpcServer.start(); + isOmRpcServerRunning = true; + registerMXBean(); startJVMPauseMonitor(); @@ -2503,13 +2507,15 @@ public List getServiceList() throws IOException { .setType(ServicePort.Type.RPC) .setValue(omRpcAddress.getPort()) .build()); - if (httpServer.getHttpAddress() != null) { + if (httpServer != null + && httpServer.getHttpAddress() != null) { omServiceInfoBuilder.addServicePort(ServicePort.newBuilder() .setType(ServicePort.Type.HTTP) .setValue(httpServer.getHttpAddress().getPort()) .build()); } - if (httpServer.getHttpsAddress() != null) { + if (httpServer != null + && httpServer.getHttpsAddress() != null) { omServiceInfoBuilder.addServicePort(ServicePort.newBuilder() .setType(ServicePort.Type.HTTPS) .setValue(httpServer.getHttpsAddress().getPort()) From 9e294346f50e97c6d8ea3556ae7136c494c06080 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Fri, 31 Jul 2020 21:00:12 +0200 Subject: [PATCH 096/165] HDDS-4052. Remove master/slave terminology from Ozone (#1281) --- hadoop-hdds/docs/content/start/OnPrem.md | 2 +- hadoop-hdds/docs/content/start/OnPrem.zh.md | 2 +- hadoop-ozone/dist/src/shell/hdds/hadoop-daemons.sh | 2 +- hadoop-ozone/dist/src/shell/hdds/hadoop-functions.sh | 6 +++--- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/hadoop-hdds/docs/content/start/OnPrem.md b/hadoop-hdds/docs/content/start/OnPrem.md index 243743acdb74..988f73676ccc 100644 --- a/hadoop-hdds/docs/content/start/OnPrem.md +++ b/hadoop-hdds/docs/content/start/OnPrem.md @@ -165,7 +165,7 @@ ozone om --init start-ozone.sh {{< /highlight >}} -This assumes that you have set up the slaves file correctly and ssh +This assumes that you have set up the `workers` file correctly and ssh configuration that allows ssh-ing to all data nodes. This is the same as the HDFS configuration, so please refer to HDFS documentation on how to set this up. diff --git a/hadoop-hdds/docs/content/start/OnPrem.zh.md b/hadoop-hdds/docs/content/start/OnPrem.zh.md index 2e367b8d9e39..948025e10bd5 100644 --- a/hadoop-hdds/docs/content/start/OnPrem.zh.md +++ b/hadoop-hdds/docs/content/start/OnPrem.zh.md @@ -151,4 +151,4 @@ ozone om --init start-ozone.sh {{< /highlight >}} -这么做的前提是,slaves 文件已经正确编写,并且配置好了到各个 Datanode 的 ssh,这和 HDFS 的配置方式相同,具体方法请查看 HDFS 文档。 +这么做的前提是,`workers` 文件已经正确编写,并且配置好了到各个 Datanode 的 ssh,这和 HDFS 的配置方式相同,具体方法请查看 HDFS 文档。 diff --git a/hadoop-ozone/dist/src/shell/hdds/hadoop-daemons.sh b/hadoop-ozone/dist/src/shell/hdds/hadoop-daemons.sh index 55304916ad1f..1d8096b4baae 100755 --- a/hadoop-ozone/dist/src/shell/hdds/hadoop-daemons.sh +++ b/hadoop-ozone/dist/src/shell/hdds/hadoop-daemons.sh @@ -16,7 +16,7 @@ # limitations under the License. -# Run a Hadoop command on all slave hosts. +# Run a Hadoop command on all worker hosts. function hadoop_usage { diff --git a/hadoop-ozone/dist/src/shell/hdds/hadoop-functions.sh b/hadoop-ozone/dist/src/shell/hdds/hadoop-functions.sh index b46045b2d8c0..769af336cac8 100755 --- a/hadoop-ozone/dist/src/shell/hdds/hadoop-functions.sh +++ b/hadoop-ozone/dist/src/shell/hdds/hadoop-functions.sh @@ -999,7 +999,7 @@ function hadoop_connect_to_hosts # shellcheck disable=SC2124 local params="$@" local worker_file - local tmpslvnames + local tmp_worker_names # # ssh (or whatever) to a host @@ -1030,10 +1030,10 @@ function hadoop_connect_to_hosts else # no spaces allowed in the pdsh arg host list # shellcheck disable=SC2086 - tmpslvnames=$(echo ${HADOOP_WORKER_NAMES} | tr -s ' ' ,) + tmp_worker_names=$(echo ${HADOOP_WORKER_NAMES} | tr -s ' ' ,) PDSH_SSH_ARGS_APPEND="${HADOOP_SSH_OPTS}" pdsh \ -f "${HADOOP_SSH_PARALLEL}" \ - -w "${tmpslvnames}" $"${@// /\\ }" 2>&1 + -w "${tmp_worker_names}" $"${@// /\\ }" 2>&1 fi else if [[ -z "${HADOOP_WORKER_NAMES}" ]]; then From cdbc626d3a1914bf6a2861148d0e8938cacc0eca Mon Sep 17 00:00:00 2001 From: avijayanhwx <14299376+avijayanhwx@users.noreply.github.com> Date: Fri, 31 Jul 2020 22:38:34 -0700 Subject: [PATCH 097/165] HDDS-4021. Organize Recon DBs into a 'DBDefinition'. (#1255) --- .../recon/codec/DatanodeDetailsCodec.java | 49 +++++++++++++ .../ReconNodeDBKeyCodec.java} | 26 ++++--- .../ozone/recon/codec/package-info.java | 22 ++++++ .../ozone/recon/scm/ReconNodeManager.java | 55 +++----------- .../ozone/recon/scm/ReconSCMDBDefinition.java | 61 ++++++++++++++++ .../ReconStorageContainerManagerFacade.java | 9 +-- .../impl/ContainerDBServiceProviderImpl.java | 29 ++++---- .../spi/impl/ReconContainerDBProvider.java | 28 ++------ .../recon/spi/impl/ReconDBDefinition.java | 71 +++++++++++++++++++ .../AbstractReconContainerManagerTest.java | 6 +- .../ozone/recon/scm/TestReconNodeManager.java | 20 +++++- .../recon/scm/TestReconPipelineManager.java | 8 +-- hadoop-ozone/tools/pom.xml | 4 ++ .../ozone/debug/DBDefinitionFactory.java | 22 +++++- .../apache/hadoop/ozone/debug/DBScanner.java | 25 +++++-- .../ozone/debug/TestDBDefinitionFactory.java | 59 +++++++++++++++ 16 files changed, 383 insertions(+), 111 deletions(-) create mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/DatanodeDetailsCodec.java rename hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/{scm/ReconDBDefinition.java => codec/ReconNodeDBKeyCodec.java} (59%) create mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/package-info.java create mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconSCMDBDefinition.java create mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconDBDefinition.java create mode 100644 hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/TestDBDefinitionFactory.java diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/DatanodeDetailsCodec.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/DatanodeDetailsCodec.java new file mode 100644 index 000000000000..c11ebbf63a63 --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/DatanodeDetailsCodec.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.hadoop.ozone.recon.codec; + +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto.PARSER; + +import java.io.IOException; + +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.utils.db.Codec; + +/** + * Codec for DatanodeDetails. + */ +public class DatanodeDetailsCodec implements Codec { + + @Override + public byte[] toPersistedFormat(DatanodeDetails object) throws IOException { + return object.getProtoBufMessage().toByteArray(); + } + + @Override + public DatanodeDetails fromPersistedFormat(byte[] rawData) + throws IOException { + return DatanodeDetails.getFromProtoBuf(PARSER.parseFrom(rawData)); + } + + @Override + public DatanodeDetails copyObject(DatanodeDetails object) { + return object; + } +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconDBDefinition.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/ReconNodeDBKeyCodec.java similarity index 59% rename from hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconDBDefinition.java rename to hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/ReconNodeDBKeyCodec.java index bcfe060e7baf..8c569203a8df 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconDBDefinition.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/ReconNodeDBKeyCodec.java @@ -16,23 +16,31 @@ * limitations under the License. * */ -package org.apache.hadoop.ozone.recon.scm; -import org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition; -import org.apache.hadoop.ozone.recon.ReconServerConfigKeys; +package org.apache.hadoop.ozone.recon.codec; + +import java.io.IOException; +import java.util.UUID; + +import org.apache.hadoop.hdds.StringUtils; +import org.apache.hadoop.hdds.utils.db.Codec; /** - * SCM db file for ozone. + * Codec for UUID. */ -public class ReconDBDefinition extends SCMDBDefinition { +public class ReconNodeDBKeyCodec implements Codec { + @Override + public byte[] toPersistedFormat(UUID object) throws IOException { + return StringUtils.string2Bytes(object.toString()); + } @Override - public String getName() { - return "recon-scm.db"; + public UUID fromPersistedFormat(byte[] rawData) throws IOException { + return UUID.fromString(StringUtils.bytes2String(rawData)); } @Override - public String getLocationConfigKey() { - return ReconServerConfigKeys.OZONE_RECON_SCM_DB_DIR; + public UUID copyObject(UUID object) { + return null; } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/package-info.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/package-info.java new file mode 100644 index 000000000000..0812d39a3091 --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package defines the codecs for Recon DB tables. + */ +package org.apache.hadoop.ozone.recon.codec; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java index 60e8a0635eba..d7a6104cf8b0 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.recon.scm; -import java.io.File; import java.io.IOException; import java.util.HashMap; import java.util.List; @@ -26,29 +25,21 @@ import java.util.Set; import java.util.UUID; -import org.apache.hadoop.hdds.StringUtils; -import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type; import org.apache.hadoop.hdds.scm.net.NetworkTopology; import org.apache.hadoop.hdds.scm.node.SCMNodeManager; import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.hdds.utils.MetadataStore; -import org.apache.hadoop.hdds.utils.MetadataStoreBuilder; -import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; import org.apache.hadoop.ozone.protocol.commands.SCMCommand; -import org.apache.hadoop.ozone.recon.ReconUtils; import org.apache.hadoop.util.Time; import com.google.common.collect.ImmutableSet; import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type.reregisterCommand; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_MB; -import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_SCM_NODE_DB; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -60,7 +51,7 @@ public class ReconNodeManager extends SCMNodeManager { public static final Logger LOG = LoggerFactory .getLogger(ReconNodeManager.class); - private final MetadataStore nodeStore; + private Table nodeDB; private final static Set ALLOWED_COMMANDS = ImmutableSet.of(reregisterCommand); @@ -73,27 +64,20 @@ public class ReconNodeManager extends SCMNodeManager { public ReconNodeManager(OzoneConfiguration conf, SCMStorageConfig scmStorageConfig, EventPublisher eventPublisher, - NetworkTopology networkTopology) throws IOException { + NetworkTopology networkTopology, + Table nodeDB) { super(conf, scmStorageConfig, eventPublisher, networkTopology); - final File nodeDBPath = getNodeDBPath(conf); - final int cacheSize = conf.getInt(OZONE_SCM_DB_CACHE_SIZE_MB, - OZONE_SCM_DB_CACHE_SIZE_DEFAULT); - this.nodeStore = MetadataStoreBuilder.newBuilder() - .setConf(conf) - .setDbFile(nodeDBPath) - .setCacheSize(cacheSize * OzoneConsts.MB) - .build(); + this.nodeDB = nodeDB; loadExistingNodes(); } private void loadExistingNodes() { try { - List> range = nodeStore - .getSequentialRangeKVs(null, Integer.MAX_VALUE, null); int nodeCount = 0; - for (Map.Entry entry : range) { - DatanodeDetails datanodeDetails = DatanodeDetails.getFromProtoBuf( - HddsProtos.DatanodeDetailsProto.PARSER.parseFrom(entry.getValue())); + TableIterator> + iterator = nodeDB.iterator(); + while (iterator.hasNext()) { + DatanodeDetails datanodeDetails = iterator.next().getValue(); register(datanodeDetails, null, null); nodeCount++; } @@ -108,27 +92,10 @@ private void loadExistingNodes() { * @param datanodeDetails Datanode details. */ public void addNodeToDB(DatanodeDetails datanodeDetails) throws IOException { - byte[] nodeIdBytes = - StringUtils.string2Bytes(datanodeDetails.getUuidString()); - byte[] nodeDetailsBytes = - datanodeDetails.getProtoBufMessage().toByteArray(); - nodeStore.put(nodeIdBytes, nodeDetailsBytes); + nodeDB.put(datanodeDetails.getUuid(), datanodeDetails); LOG.info("Adding new node {} to Node DB.", datanodeDetails.getUuid()); } - protected File getNodeDBPath(ConfigurationSource conf) { - File metaDir = ReconUtils.getReconScmDbDir(conf); - return new File(metaDir, RECON_SCM_NODE_DB); - } - - @Override - public void close() throws IOException { - if (nodeStore != null) { - nodeStore.close(); - } - super.close(); - } - /** * Returns the last heartbeat time of the given node. * diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconSCMDBDefinition.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconSCMDBDefinition.java new file mode 100644 index 000000000000..e56a66b831da --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconSCMDBDefinition.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package org.apache.hadoop.ozone.recon.scm; + +import java.util.UUID; + +import org.apache.commons.lang3.ArrayUtils; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition; +import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition; +import org.apache.hadoop.ozone.recon.ReconServerConfigKeys; +import org.apache.hadoop.ozone.recon.codec.DatanodeDetailsCodec; +import org.apache.hadoop.ozone.recon.codec.ReconNodeDBKeyCodec; + +/** + * Recon SCM db file for ozone. + */ +public class ReconSCMDBDefinition extends SCMDBDefinition { + + public static final String RECON_SCM_DB_NAME = "recon-scm.db"; + + public static final DBColumnFamilyDefinition + NODES = + new DBColumnFamilyDefinition( + "nodes", + UUID.class, + new ReconNodeDBKeyCodec(), + DatanodeDetails.class, + new DatanodeDetailsCodec()); + + @Override + public String getName() { + return RECON_SCM_DB_NAME; + } + + @Override + public String getLocationConfigKey() { + return ReconServerConfigKeys.OZONE_RECON_SCM_DB_DIR; + } + + @Override + public DBColumnFamilyDefinition[] getColumnFamilies() { + return ArrayUtils.add(super.getColumnFamilies(), NODES); + } +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java index 34a930a9a48e..3a0342ebc696 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java @@ -100,10 +100,11 @@ public ReconStorageContainerManagerFacade(OzoneConfiguration conf, this.scmStorageConfig = new ReconStorageConfig(conf); this.clusterMap = new NetworkTopologyImpl(conf); dbStore = DBStoreBuilder - .createDBStore(ozoneConfiguration, new ReconDBDefinition()); + .createDBStore(ozoneConfiguration, new ReconSCMDBDefinition()); this.nodeManager = - new ReconNodeManager(conf, scmStorageConfig, eventQueue, clusterMap); + new ReconNodeManager(conf, scmStorageConfig, eventQueue, clusterMap, + ReconSCMDBDefinition.NODES.getTable(dbStore)); placementMetrics = SCMContainerPlacementMetrics.create(); this.containerPlacementPolicy = ContainerPlacementPolicyFactory.getPolicy(conf, nodeManager, @@ -114,10 +115,10 @@ public ReconStorageContainerManagerFacade(OzoneConfiguration conf, new ReconPipelineManager(conf, nodeManager, - ReconDBDefinition.PIPELINES.getTable(dbStore), + ReconSCMDBDefinition.PIPELINES.getTable(dbStore), eventQueue); this.containerManager = new ReconContainerManager(conf, - ReconDBDefinition.CONTAINERS.getTable(dbStore), + ReconSCMDBDefinition.CONTAINERS.getTable(dbStore), dbStore, pipelineManager, scmServiceProvider, diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerDBServiceProviderImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerDBServiceProviderImpl.java index ec87352ea64d..aeefeef50c08 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerDBServiceProviderImpl.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerDBServiceProviderImpl.java @@ -19,9 +19,9 @@ package org.apache.hadoop.ozone.recon.spi.impl; import static org.apache.hadoop.ozone.recon.ReconConstants.CONTAINER_COUNT_KEY; -import static org.apache.hadoop.ozone.recon.ReconConstants.CONTAINER_KEY_COUNT_TABLE; -import static org.apache.hadoop.ozone.recon.ReconConstants.CONTAINER_KEY_TABLE; import static org.apache.hadoop.ozone.recon.spi.impl.ReconContainerDBProvider.getNewDBStore; +import static org.apache.hadoop.ozone.recon.spi.impl.ReconDBDefinition.CONTAINER_KEY; +import static org.apache.hadoop.ozone.recon.spi.impl.ReconDBDefinition.CONTAINER_KEY_COUNT; import static org.jooq.impl.DSL.currentTimestamp; import static org.jooq.impl.DSL.select; import static org.jooq.impl.DSL.using; @@ -38,10 +38,8 @@ import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.recon.ReconUtils; import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix; import org.apache.hadoop.ozone.recon.api.types.ContainerMetadata; -import org.apache.hadoop.ozone.recon.persistence.ContainerSchemaManager; import org.apache.hadoop.ozone.recon.spi.ContainerDBServiceProvider; import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.hdds.utils.db.Table; @@ -67,9 +65,6 @@ public class ContainerDBServiceProviderImpl private Table containerKeyCountTable; private GlobalStatsDao globalStatsDao; - @Inject - private ContainerSchemaManager containerSchemaManager; - @Inject private OzoneConfiguration configuration; @@ -79,9 +74,6 @@ public class ContainerDBServiceProviderImpl @Inject private Configuration sqlConfiguration; - @Inject - private ReconUtils reconUtils; - @Inject public ContainerDBServiceProviderImpl(DBStore dbStore, Configuration sqlConfiguration) { @@ -113,13 +105,19 @@ public void initNewContainerDB(Map throws IOException { File oldDBLocation = containerDbStore.getDbLocation(); - containerDbStore = getNewDBStore(configuration, reconUtils); + try { + containerDbStore.close(); + } catch (Exception e) { + LOG.warn("Unable to close old Recon container key DB at {}.", + containerDbStore.getDbLocation().getAbsolutePath()); + } + containerDbStore = getNewDBStore(configuration); LOG.info("Creating new Recon Container DB at {}", containerDbStore.getDbLocation().getAbsolutePath()); initializeTables(); if (oldDBLocation.exists()) { - LOG.info("Cleaning up old Recon Container DB at {}.", + LOG.info("Cleaning up old Recon Container key DB at {}.", oldDBLocation.getAbsolutePath()); FileUtils.deleteDirectory(oldDBLocation); } @@ -140,10 +138,9 @@ public void initNewContainerDB(Map */ private void initializeTables() { try { - this.containerKeyTable = containerDbStore.getTable(CONTAINER_KEY_TABLE, - ContainerKeyPrefix.class, Integer.class); - this.containerKeyCountTable = containerDbStore - .getTable(CONTAINER_KEY_COUNT_TABLE, Long.class, Long.class); + this.containerKeyTable = CONTAINER_KEY.getTable(containerDbStore); + this.containerKeyCountTable = + CONTAINER_KEY_COUNT.getTable(containerDbStore); } catch (IOException e) { LOG.error("Unable to create Container Key tables.", e); } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerDBProvider.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerDBProvider.java index d622eb357be6..ec36597a0816 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerDBProvider.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerDBProvider.java @@ -18,20 +18,15 @@ package org.apache.hadoop.ozone.recon.spi.impl; -import static org.apache.hadoop.ozone.recon.ReconConstants.CONTAINER_KEY_COUNT_TABLE; import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_CONTAINER_KEY_DB; -import static org.apache.hadoop.ozone.recon.ReconConstants.CONTAINER_KEY_TABLE; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_DB_DIR; import java.io.File; -import java.nio.file.Path; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.recon.ReconUtils; -import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix; import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; -import org.apache.hadoop.hdds.utils.db.IntegerCodec; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -69,10 +64,10 @@ public DBStore get() { if (lastKnownContainerKeyDb != null) { LOG.info("Last known container-key DB : {}", lastKnownContainerKeyDb.getAbsolutePath()); - dbStore = initializeDBStore(configuration, reconUtils, + dbStore = initializeDBStore(configuration, lastKnownContainerKeyDb.getName()); } else { - dbStore = getNewDBStore(configuration, reconUtils); + dbStore = getNewDBStore(configuration); } if (dbStore == null) { throw new ProvisionException("Unable to provide instance of DBStore " + @@ -82,28 +77,19 @@ public DBStore get() { } private static DBStore initializeDBStore(OzoneConfiguration configuration, - ReconUtils reconUtils, String dbName) { + String dbName) { DBStore dbStore = null; try { - Path metaDir = reconUtils.getReconDbDir( - configuration, OZONE_RECON_DB_DIR).toPath(); - dbStore = DBStoreBuilder.newBuilder(configuration) - .setPath(metaDir) - .setName(dbName) - .addTable(CONTAINER_KEY_TABLE) - .addTable(CONTAINER_KEY_COUNT_TABLE) - .addCodec(ContainerKeyPrefix.class, new ContainerKeyPrefixCodec()) - .addCodec(Integer.class, new IntegerCodec()) - .build(); + dbStore = DBStoreBuilder.createDBStore(configuration, + new ReconDBDefinition(dbName)); } catch (Exception ex) { LOG.error("Unable to initialize Recon container metadata store.", ex); } return dbStore; } - static DBStore getNewDBStore(OzoneConfiguration configuration, - ReconUtils reconUtils) { + static DBStore getNewDBStore(OzoneConfiguration configuration) { String dbName = RECON_CONTAINER_KEY_DB + "_" + System.currentTimeMillis(); - return initializeDBStore(configuration, reconUtils, dbName); + return initializeDBStore(configuration, dbName); } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconDBDefinition.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconDBDefinition.java new file mode 100644 index 000000000000..4f5a4c79e267 --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconDBDefinition.java @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package org.apache.hadoop.ozone.recon.spi.impl; + +import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition; +import org.apache.hadoop.hdds.utils.db.DBDefinition; +import org.apache.hadoop.hdds.utils.db.IntegerCodec; +import org.apache.hadoop.hdds.utils.db.LongCodec; +import org.apache.hadoop.ozone.recon.ReconServerConfigKeys; +import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix; + +/** + * RocksDB definition for the DB internal to Recon. + */ +public class ReconDBDefinition implements DBDefinition { + + private String dbName; + + public ReconDBDefinition(String dbName) { + this.dbName = dbName; + } + + public static final DBColumnFamilyDefinition + CONTAINER_KEY = + new DBColumnFamilyDefinition<>( + "containerKeyTable", + ContainerKeyPrefix.class, + new ContainerKeyPrefixCodec(), + Integer.class, + new IntegerCodec()); + + public static final DBColumnFamilyDefinition + CONTAINER_KEY_COUNT = + new DBColumnFamilyDefinition<>( + "containerKeyCountTable", + Long.class, + new LongCodec(), + Long.class, + new LongCodec()); + + @Override + public String getName() { + return dbName; + } + + @Override + public String getLocationConfigKey() { + return ReconServerConfigKeys.OZONE_RECON_DB_DIR; + } + + @Override + public DBColumnFamilyDefinition[] getColumnFamilies() { + return new DBColumnFamilyDefinition[] {CONTAINER_KEY, CONTAINER_KEY_COUNT}; + } +} diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java index 04010e512f3d..3114c023c107 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java @@ -69,17 +69,17 @@ public void setUp() throws Exception { conf.set(OZONE_METADATA_DIRS, temporaryFolder.newFolder().getAbsolutePath()); conf.set(OZONE_SCM_NAMES, "localhost"); - store = DBStoreBuilder.createDBStore(conf, new ReconDBDefinition()); + store = DBStoreBuilder.createDBStore(conf, new ReconSCMDBDefinition()); scmStorageConfig = new ReconStorageConfig(conf); NetworkTopology clusterMap = new NetworkTopologyImpl(conf); EventQueue eventQueue = new EventQueue(); NodeManager nodeManager = new SCMNodeManager(conf, scmStorageConfig, eventQueue, clusterMap); pipelineManager = new ReconPipelineManager(conf, nodeManager, - ReconDBDefinition.PIPELINES.getTable(store), eventQueue); + ReconSCMDBDefinition.PIPELINES.getTable(store), eventQueue); containerManager = new ReconContainerManager( conf, - ReconDBDefinition.CONTAINERS.getTable(store), + ReconSCMDBDefinition.CONTAINERS.getTable(store), store, pipelineManager, getScmServiceProvider(), diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java index 19290b16f135..c934caef22e5 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java @@ -26,12 +26,17 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; +import java.util.UUID; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.net.NetworkTopology; import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl; import org.apache.hadoop.hdds.server.events.EventQueue; +import org.apache.hadoop.hdds.utils.db.DBStore; +import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; +import org.apache.hadoop.hdds.utils.db.Table; +import org.junit.After; import org.junit.Before; import org.junit.Rule; import org.junit.Test; @@ -46,6 +51,7 @@ public class TestReconNodeManager { public TemporaryFolder temporaryFolder = new TemporaryFolder(); private OzoneConfiguration conf; + private DBStore store; @Before public void setUp() throws Exception { @@ -53,6 +59,12 @@ public void setUp() throws Exception { conf.set(OZONE_METADATA_DIRS, temporaryFolder.newFolder().getAbsolutePath()); conf.set(OZONE_SCM_NAMES, "localhost"); + store = DBStoreBuilder.createDBStore(conf, new ReconSCMDBDefinition()); + } + + @After + public void tearDown() throws Exception { + store.close(); } @Test @@ -60,8 +72,10 @@ public void testReconNodeDB() throws IOException { ReconStorageConfig scmStorageConfig = new ReconStorageConfig(conf); EventQueue eventQueue = new EventQueue(); NetworkTopology clusterMap = new NetworkTopologyImpl(conf); + Table nodeTable = + ReconSCMDBDefinition.NODES.getTable(store); ReconNodeManager reconNodeManager = new ReconNodeManager(conf, - scmStorageConfig, eventQueue, clusterMap); + scmStorageConfig, eventQueue, clusterMap, nodeTable); ReconNewNodeHandler reconNewNodeHandler = new ReconNewNodeHandler(reconNodeManager); assertTrue(reconNodeManager.getAllNodes().isEmpty()); @@ -80,8 +94,8 @@ public void testReconNodeDB() throws IOException { // Close the DB, and recreate the instance of Recon Node Manager. eventQueue.close(); reconNodeManager.close(); - reconNodeManager = new ReconNodeManager(conf, - scmStorageConfig, eventQueue, clusterMap); + reconNodeManager = new ReconNodeManager(conf, scmStorageConfig, eventQueue, + clusterMap, nodeTable); // Verify that the node information was persisted and loaded back. assertEquals(1, reconNodeManager.getAllNodes().size()); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java index c891f3321725..b190810db460 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java @@ -73,7 +73,7 @@ public void setup() throws IOException { temporaryFolder.newFolder().getAbsolutePath()); conf.set(OZONE_SCM_NAMES, "localhost"); scmStorageConfig = new ReconStorageConfig(conf); - store = DBStoreBuilder.createDBStore(conf, new ReconDBDefinition()); + store = DBStoreBuilder.createDBStore(conf, new ReconSCMDBDefinition()); } @After @@ -114,7 +114,7 @@ public void testInitialize() throws IOException { try (ReconPipelineManager reconPipelineManager = new ReconPipelineManager(conf, nodeManager, - ReconDBDefinition.PIPELINES.getTable(store), eventQueue)) { + ReconSCMDBDefinition.PIPELINES.getTable(store), eventQueue)) { reconPipelineManager.addPipeline(validPipeline); reconPipelineManager.addPipeline(invalidPipeline); @@ -150,7 +150,7 @@ public void testAddPipeline() throws IOException { ReconPipelineManager reconPipelineManager = new ReconPipelineManager(conf, nodeManager, - ReconDBDefinition.PIPELINES.getTable(store), eventQueue); + ReconSCMDBDefinition.PIPELINES.getTable(store), eventQueue); assertFalse(reconPipelineManager.containsPipeline(pipeline.getId())); reconPipelineManager.addPipeline(pipeline); assertTrue(reconPipelineManager.containsPipeline(pipeline.getId())); @@ -162,7 +162,7 @@ public void testStubbedReconPipelineFactory() throws IOException { NodeManager nodeManagerMock = mock(NodeManager.class); ReconPipelineManager reconPipelineManager = new ReconPipelineManager( - conf, nodeManagerMock, ReconDBDefinition.PIPELINES.getTable(store), + conf, nodeManagerMock, ReconSCMDBDefinition.PIPELINES.getTable(store), new EventQueue()); PipelineFactory pipelineFactory = reconPipelineManager.getPipelineFactory(); assertTrue(pipelineFactory instanceof ReconPipelineFactory); diff --git a/hadoop-ozone/tools/pom.xml b/hadoop-ozone/tools/pom.xml index 7fb083325ecc..36f861c49809 100644 --- a/hadoop-ozone/tools/pom.xml +++ b/hadoop-ozone/tools/pom.xml @@ -63,6 +63,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-hdds-server-framework + + org.apache.hadoop + hadoop-ozone-recon + org.apache.hadoop hadoop-hdfs diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBDefinitionFactory.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBDefinitionFactory.java index 3f28a64e84bc..d9d0d704d85e 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBDefinitionFactory.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBDefinitionFactory.java @@ -18,10 +18,16 @@ package org.apache.hadoop.ozone.debug; +import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_CONTAINER_KEY_DB; +import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_OM_SNAPSHOT_DB; + import org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition; import org.apache.hadoop.hdds.utils.db.DBDefinition; import org.apache.hadoop.ozone.om.codec.OMDBDefinition; +import org.apache.hadoop.ozone.recon.scm.ReconSCMDBDefinition; +import org.apache.hadoop.ozone.recon.spi.impl.ReconDBDefinition; +import java.util.Arrays; import java.util.HashMap; /** @@ -36,14 +42,26 @@ private DBDefinitionFactory() { static { dbMap = new HashMap<>(); - dbMap.put(new SCMDBDefinition().getName(), new SCMDBDefinition()); - dbMap.put(new OMDBDefinition().getName(), new OMDBDefinition()); + Arrays.asList( + new SCMDBDefinition(), + new OMDBDefinition(), + new ReconSCMDBDefinition() + ).forEach(dbDefinition -> dbMap.put(dbDefinition.getName(), dbDefinition)); } public static DBDefinition getDefinition(String dbName){ if (dbMap.containsKey(dbName)){ return dbMap.get(dbName); } + return getReconDBDefinition(dbName); + } + + private static DBDefinition getReconDBDefinition(String dbName){ + if (dbName.startsWith(RECON_CONTAINER_KEY_DB)) { + return new ReconDBDefinition(dbName); + } else if (dbName.startsWith(RECON_OM_SNAPSHOT_DB)) { + return new OMDBDefinition(); + } return null; } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java index 47fc8bc9cfb2..5da64e04ad94 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java @@ -44,6 +44,12 @@ public class DBScanner implements Callable { description = "Table name") private String tableName; + @CommandLine.Option(names = {"--with-keys"}, + description = "List Key -> Value instead of just Value.", + defaultValue = "false", + showDefaultValue = CommandLine.Help.Visibility.ALWAYS) + private boolean withKey; + @CommandLine.ParentCommand private RDBParser parent; @@ -51,21 +57,29 @@ public class DBScanner implements Callable { private static void displayTable(RocksDB rocksDB, DBColumnFamilyDefinition dbColumnFamilyDefinition, - List list) throws IOException { + List list, boolean withKey) throws IOException { ColumnFamilyHandle columnFamilyHandle = getColumnFamilyHandle( dbColumnFamilyDefinition.getTableName() .getBytes(StandardCharsets.UTF_8), list); - if (columnFamilyHandle==null){ + if (columnFamilyHandle == null) { throw new IllegalArgumentException("columnFamilyHandle is null"); } RocksIterator iterator = rocksDB.newIterator(columnFamilyHandle); iterator.seekToFirst(); while (iterator.isValid()){ + StringBuilder result = new StringBuilder(); + if (withKey) { + Object key = dbColumnFamilyDefinition.getKeyCodec() + .fromPersistedFormat(iterator.key()); + Gson gson = new GsonBuilder().setPrettyPrinting().create(); + result.append(gson.toJson(key)); + result.append(" -> "); + } Object o = dbColumnFamilyDefinition.getValueCodec() .fromPersistedFormat(iterator.value()); Gson gson = new GsonBuilder().setPrettyPrinting().create(); - String result = gson.toJson(o); - System.out.println(result); + result.append(gson.toJson(o)); + System.out.println(result.toString()); iterator.next(); } } @@ -132,7 +146,8 @@ private void printAppropriateTable( } else { DBColumnFamilyDefinition columnFamilyDefinition = this.columnFamilyMap.get(tableName); - displayTable(rocksDB, columnFamilyDefinition, columnFamilyHandleList); + displayTable(rocksDB, columnFamilyDefinition, columnFamilyHandleList, + withKey); } } else { System.out.println("Incorrect db Path"); diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/TestDBDefinitionFactory.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/TestDBDefinitionFactory.java new file mode 100644 index 000000000000..f63d14978740 --- /dev/null +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/TestDBDefinitionFactory.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.debug; + +import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_CONTAINER_KEY_DB; +import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_OM_SNAPSHOT_DB; +import static org.junit.Assert.assertTrue; + +import org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition; +import org.apache.hadoop.hdds.utils.db.DBDefinition; +import org.apache.hadoop.ozone.om.codec.OMDBDefinition; +import org.apache.hadoop.ozone.recon.scm.ReconSCMDBDefinition; +import org.apache.hadoop.ozone.recon.spi.impl.ReconDBDefinition; +import org.junit.Test; + +/** + * Simple factory unit test. + */ +public class TestDBDefinitionFactory { + + @Test + public void testGetDefinition() { + DBDefinition definition = + DBDefinitionFactory.getDefinition(new OMDBDefinition().getName()); + assertTrue(definition instanceof OMDBDefinition); + + definition = DBDefinitionFactory.getDefinition( + new SCMDBDefinition().getName()); + assertTrue(definition instanceof SCMDBDefinition); + + definition = DBDefinitionFactory.getDefinition( + new ReconSCMDBDefinition().getName()); + assertTrue(definition instanceof ReconSCMDBDefinition); + + definition = DBDefinitionFactory.getDefinition( + RECON_OM_SNAPSHOT_DB + "_1"); + assertTrue(definition instanceof OMDBDefinition); + + definition = DBDefinitionFactory.getDefinition( + RECON_CONTAINER_KEY_DB + "_1"); + assertTrue(definition instanceof ReconDBDefinition); + } +} \ No newline at end of file From 3c30b469dfe1638cede56fafcf741f51fd119af8 Mon Sep 17 00:00:00 2001 From: avijayanhwx <14299376+avijayanhwx@users.noreply.github.com> Date: Mon, 3 Aug 2020 09:20:28 -0700 Subject: [PATCH 098/165] HDDS-4029. Recon unable to add a new container which is in CLOSED state. (#1258) --- .../scm/container/ContainerStateManager.java | 8 +- .../recon/scm/ReconContainerManager.java | 61 +++++++++--- .../scm/ReconContainerReportHandler.java | 2 +- ...econIncrementalContainerReportHandler.java | 2 +- .../AbstractReconContainerManagerTest.java | 45 +++++++++ .../recon/scm/TestReconContainerManager.java | 95 ++++++++++++++----- ...econIncrementalContainerReportHandler.java | 63 ++++++++++++ 7 files changed, 236 insertions(+), 40 deletions(-) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java index 5a22521d3dd2..e575c60566b1 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java @@ -325,8 +325,12 @@ public void addContainerInfo(long containerID, Pipeline pipeline) throws IOException { Preconditions.checkNotNull(containerInfo); containers.addContainer(containerInfo); - pipelineManager.addContainerToPipeline(pipeline.getId(), - ContainerID.valueof(containerID)); + if (pipeline != null) { + // In Recon, while adding a 'new' CLOSED container, pipeline will be a + // random ID, and hence be passed down as null. + pipelineManager.addContainerToPipeline(pipeline.getId(), + ContainerID.valueof(containerID)); + } containerStateCount.incrementAndGet(containerInfo.getState()); } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java index 72d1548d5960..dff4709f56b1 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java @@ -18,16 +18,21 @@ package org.apache.hadoop.ozone.recon.scm; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent.FINALIZE; + import java.io.IOException; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; import org.apache.hadoop.hdds.scm.container.ContainerReplica; import org.apache.hadoop.hdds.scm.container.SCMContainerManager; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; +import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; import org.apache.hadoop.hdds.utils.db.BatchOperationHandler; import org.apache.hadoop.hdds.utils.db.Table; @@ -77,6 +82,7 @@ public ReconContainerManager( * @throws IOException on Error. */ public void checkAndAddNewContainer(ContainerID containerID, + ContainerReplicaProto.State replicaState, DatanodeDetails datanodeDetails) throws IOException { if (!exists(containerID)) { @@ -84,15 +90,34 @@ public void checkAndAddNewContainer(ContainerID containerID, datanodeDetails.getHostName()); ContainerWithPipeline containerWithPipeline = scmClient.getContainerWithPipeline(containerID.getId()); - LOG.debug("Verified new container from SCM {} ", - containerWithPipeline.getContainerInfo().containerID()); + LOG.debug("Verified new container from SCM {}, {} ", + containerID, containerWithPipeline.getPipeline().getId()); // If no other client added this, go ahead and add this container. if (!exists(containerID)) { addNewContainer(containerID.getId(), containerWithPipeline); } + } else { + // Check if container state is not open. In SCM, container state + // changes to CLOSING first, and then the close command is pushed down + // to Datanodes. Recon 'learns' this from DN, and hence replica state + // will move container state to 'CLOSING'. + ContainerInfo containerInfo = getContainer(containerID); + if (containerInfo.getState().equals(HddsProtos.LifeCycleState.OPEN) + && !replicaState.equals(ContainerReplicaProto.State.OPEN) + && isHealthy(replicaState)) { + LOG.info("Container {} has state OPEN, but Replica has State {}.", + containerID, replicaState); + updateContainerState(containerID, FINALIZE); + } } } + private boolean isHealthy(ContainerReplicaProto.State replicaState) { + return replicaState != ContainerReplicaProto.State.UNHEALTHY + && replicaState != ContainerReplicaProto.State.INVALID + && replicaState != ContainerReplicaProto.State.DELETED; + } + /** * Adds a new container to Recon's container manager. * @param containerId id @@ -105,18 +130,32 @@ public void addNewContainer(long containerId, ContainerInfo containerInfo = containerWithPipeline.getContainerInfo(); getLock().lock(); try { - if (getPipelineManager().containsPipeline( - containerWithPipeline.getPipeline().getId())) { - getContainerStateManager().addContainerInfo(containerId, containerInfo, - getPipelineManager(), containerWithPipeline.getPipeline()); + boolean success = false; + if (containerInfo.getState().equals(HddsProtos.LifeCycleState.OPEN)) { + PipelineID pipelineID = containerWithPipeline.getPipeline().getId(); + if (getPipelineManager().containsPipeline(pipelineID)) { + getContainerStateManager().addContainerInfo(containerId, + containerInfo, getPipelineManager(), + containerWithPipeline.getPipeline()); + success = true; + } else { + // Get open container for a pipeline that Recon does not know + // about yet. Cannot update internal state until pipeline is synced. + LOG.warn(String.format( + "Pipeline %s not found. Cannot add container %s", + pipelineID, containerInfo.containerID())); + } + } else { + // Non 'Open' Container. No need to worry about pipeline since SCM + // returns a random pipelineID. + getContainerStateManager().addContainerInfo(containerId, + containerInfo, getPipelineManager(), null); + success = true; + } + if (success) { addContainerToDB(containerInfo); LOG.info("Successfully added container {} to Recon.", containerInfo.containerID()); - } else { - throw new IOException( - String.format("Pipeline %s not found. Cannot add container %s", - containerWithPipeline.getPipeline().getId(), - containerInfo.containerID())); } } catch (IOException ex) { LOG.info("Exception while adding container {} .", diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerReportHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerReportHandler.java index 0bf63a217021..228a65793099 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerReportHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerReportHandler.java @@ -60,10 +60,10 @@ public void onMessage(final ContainerReportFromDatanode reportFromDatanode, containerReplicaProto.getContainerID()); try { containerManager.checkAndAddNewContainer(id, + containerReplicaProto.getState(), reportFromDatanode.getDatanodeDetails()); } catch (IOException ioEx) { LOG.error("Exception while checking and adding new container.", ioEx); - return; } LOG.debug("Got container report for containerID {} ", containerReplicaProto.getContainerID()); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java index b538caf4b26a..9e8887213f7c 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java @@ -73,7 +73,7 @@ public void onMessage(final IncrementalContainerReportFromDatanode report, final ContainerID id = ContainerID.valueof( replicaProto.getContainerID()); try { - containerManager.checkAndAddNewContainer(id, + containerManager.checkAndAddNewContainer(id, replicaProto.getState(), report.getDatanodeDetails()); } catch (IOException ioEx) { LOG.error("Exception while checking and adding new container.", ioEx); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java index 3114c023c107..783f42ca3929 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java @@ -21,6 +21,7 @@ import java.io.IOException; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; @@ -33,6 +34,7 @@ import org.apache.hadoop.hdds.server.events.EventQueue; import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; +import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.recon.persistence.ContainerSchemaManager; import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider; @@ -40,6 +42,7 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.STAND_ALONE; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_NAMES; +import static org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition.CONTAINERS; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_DIRS; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getRandomPipeline; import org.junit.After; @@ -129,4 +132,46 @@ private StorageContainerServiceProvider getScmServiceProvider() .thenReturn(containerWithPipeline); return scmServiceProviderMock; } + + protected Table getContainerTable() + throws IOException { + return CONTAINERS.getTable(store); + } + + protected ContainerWithPipeline getTestContainer(LifeCycleState state) + throws IOException { + ContainerID containerID = new ContainerID(100L); + Pipeline pipeline = getRandomPipeline(); + pipelineManager.addPipeline(pipeline); + ContainerInfo containerInfo = + new ContainerInfo.Builder() + .setContainerID(containerID.getId()) + .setNumberOfKeys(10) + .setPipelineID(pipeline.getId()) + .setReplicationFactor(ONE) + .setOwner("test") + .setState(state) + .setReplicationType(STAND_ALONE) + .build(); + return new ContainerWithPipeline(containerInfo, pipeline); + } + + protected ContainerWithPipeline getTestContainer(long id, + LifeCycleState state) + throws IOException { + ContainerID containerID = new ContainerID(id); + Pipeline pipeline = getRandomPipeline(); + pipelineManager.addPipeline(pipeline); + ContainerInfo containerInfo = + new ContainerInfo.Builder() + .setContainerID(containerID.getId()) + .setNumberOfKeys(10) + .setPipelineID(pipeline.getId()) + .setReplicationFactor(ONE) + .setOwner("test") + .setState(state) + .setReplicationType(STAND_ALONE) + .build(); + return new ContainerWithPipeline(containerInfo, pipeline); + } } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconContainerManager.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconContainerManager.java index ccc1c80569a6..9f47779e3b33 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconContainerManager.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconContainerManager.java @@ -19,10 +19,9 @@ package org.apache.hadoop.ozone.recon.scm; import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.OPEN; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.STAND_ALONE; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getRandomPipeline; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.CLOSED; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.CLOSING; +import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.OPEN; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; @@ -32,10 +31,11 @@ import java.util.NavigableSet; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.junit.Test; /** @@ -45,39 +45,57 @@ public class TestReconContainerManager extends AbstractReconContainerManagerTest { @Test - public void testAddNewContainer() throws IOException { - ContainerID containerID = new ContainerID(100L); - Pipeline pipeline = getRandomPipeline(); - ReconPipelineManager pipelineManager = getPipelineManager(); - pipelineManager.addPipeline(pipeline); - ContainerInfo containerInfo = - new ContainerInfo.Builder() - .setContainerID(containerID.getId()) - .setNumberOfKeys(10) - .setPipelineID(pipeline.getId()) - .setReplicationFactor(ONE) - .setOwner("test") - .setState(OPEN) - .setReplicationType(STAND_ALONE) - .build(); + public void testAddNewOpenContainer() throws IOException { ContainerWithPipeline containerWithPipeline = - new ContainerWithPipeline(containerInfo, pipeline); + getTestContainer(LifeCycleState.OPEN); + ContainerID containerID = + containerWithPipeline.getContainerInfo().containerID(); + ContainerInfo containerInfo = containerWithPipeline.getContainerInfo(); ReconContainerManager containerManager = getContainerManager(); assertFalse(containerManager.exists(containerID)); + assertFalse(getContainerTable().isExist(containerID)); containerManager.addNewContainer( containerID.getId(), containerWithPipeline); assertTrue(containerManager.exists(containerID)); - List containers = containerManager.getContainers(OPEN); + List containers = + containerManager.getContainers(LifeCycleState.OPEN); assertEquals(1, containers.size()); assertEquals(containerInfo, containers.get(0)); NavigableSet containersInPipeline = - pipelineManager.getContainersInPipeline(pipeline.getId()); + getPipelineManager().getContainersInPipeline( + containerWithPipeline.getPipeline().getId()); assertEquals(1, containersInPipeline.size()); assertEquals(containerID, containersInPipeline.first()); + + // Verify container DB. + assertTrue(getContainerTable().isExist(containerID)); + } + + @Test + public void testAddNewClosedContainer() throws IOException { + ContainerWithPipeline containerWithPipeline = getTestContainer(CLOSED); + ContainerID containerID = + containerWithPipeline.getContainerInfo().containerID(); + ContainerInfo containerInfo = containerWithPipeline.getContainerInfo(); + + ReconContainerManager containerManager = getContainerManager(); + assertFalse(containerManager.exists(containerID)); + assertFalse(getContainerTable().isExist(containerID)); + + containerManager.addNewContainer( + containerID.getId(), containerWithPipeline); + + assertTrue(containerManager.exists(containerID)); + + List containers = containerManager.getContainers(CLOSED); + assertEquals(1, containers.size()); + assertEquals(containerInfo, containers.get(0)); + // Verify container DB. + assertTrue(getContainerTable().isExist(containerID)); } @Test @@ -86,12 +104,39 @@ public void testCheckAndAddNewContainer() throws IOException { ReconContainerManager containerManager = getContainerManager(); assertFalse(containerManager.exists(containerID)); DatanodeDetails datanodeDetails = randomDatanodeDetails(); - containerManager.checkAndAddNewContainer(containerID, datanodeDetails); + containerManager.checkAndAddNewContainer(containerID, + OPEN, datanodeDetails); assertTrue(containerManager.exists(containerID)); // Doing it one more time should not change any state. - containerManager.checkAndAddNewContainer(containerID, datanodeDetails); + containerManager.checkAndAddNewContainer(containerID, OPEN, + datanodeDetails); assertTrue(containerManager.exists(containerID)); + assertEquals(LifeCycleState.OPEN, + getContainerManager().getContainer(containerID).getState()); } + @Test + public void testUpdateContainerStateFromOpen() throws IOException { + ContainerWithPipeline containerWithPipeline = + getTestContainer(LifeCycleState.OPEN); + + long id = containerWithPipeline.getContainerInfo().getContainerID(); + ContainerID containerID = + containerWithPipeline.getContainerInfo().containerID(); + + // Adding container #100. + getContainerManager().addNewContainer(id, containerWithPipeline); + assertEquals(LifeCycleState.OPEN, + getContainerManager().getContainer(containerID).getState()); + + DatanodeDetails datanodeDetails = randomDatanodeDetails(); + + // First report with "CLOSED" replica state moves container state to + // "CLOSING". + getContainerManager().checkAndAddNewContainer(containerID, State.CLOSED, + datanodeDetails); + assertEquals(CLOSING, + getContainerManager().getContainer(containerID).getState()); + } } \ No newline at end of file diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java index dacf29381779..1b42f21712de 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java @@ -19,23 +19,28 @@ package org.apache.hadoop.ozone.recon.scm; import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.OPEN; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import java.io.IOException; import java.nio.file.Path; import java.nio.file.Paths; +import java.util.Arrays; import java.util.UUID; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto; import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.hdds.scm.net.NetworkTopology; import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl; import org.apache.hadoop.hdds.scm.node.NodeManager; @@ -90,6 +95,64 @@ public void testProcessICR() throws IOException, NodeNotFoundException { nodeManager.addContainer(datanodeDetails, containerID); assertTrue(containerManager.exists(containerID)); assertEquals(1, containerManager.getContainerReplicas(containerID).size()); + assertEquals(OPEN, containerManager.getContainer(containerID).getState()); + } + + @Test + public void testProcessICRStateMismatch() throws IOException { + + // Recon container state is "OPEN". + // Replica state could be any Non OPEN state. + long containerId = 11; + for (State state : Arrays.asList(State.CLOSING, State.QUASI_CLOSED, + State.CLOSED)) { + ContainerWithPipeline containerWithPipeline = getTestContainer( + containerId++, OPEN); + ContainerID containerID = + containerWithPipeline.getContainerInfo().containerID(); + + ReconContainerManager containerManager = getContainerManager(); + containerManager.addNewContainer(containerID.getId(), + containerWithPipeline); + + DatanodeDetails datanodeDetails = + containerWithPipeline.getPipeline().getFirstNode(); + NodeManager nodeManagerMock = mock(NodeManager.class); + when(nodeManagerMock.getNodeByUuid(any())).thenReturn(datanodeDetails); + IncrementalContainerReportFromDatanode reportMock = + mock(IncrementalContainerReportFromDatanode.class); + when(reportMock.getDatanodeDetails()) + .thenReturn(containerWithPipeline.getPipeline().getFirstNode()); + + IncrementalContainerReportProto containerReport = + getIncrementalContainerReportProto(containerID, state, + datanodeDetails.getUuidString()); + when(reportMock.getReport()).thenReturn(containerReport); + ReconIncrementalContainerReportHandler reconIcr = + new ReconIncrementalContainerReportHandler(nodeManagerMock, + containerManager); + + reconIcr.onMessage(reportMock, mock(EventPublisher.class)); + assertTrue(containerManager.exists(containerID)); + assertEquals(1, + containerManager.getContainerReplicas(containerID).size()); + LifeCycleState expectedState = getContainerStateFromReplicaState(state); + LifeCycleState actualState = + containerManager.getContainer(containerID).getState(); + assertEquals(String.format("Expecting %s in " + + "container state for replica state %s", expectedState, + state), expectedState, actualState); + } + } + + private LifeCycleState getContainerStateFromReplicaState( + State state) { + switch (state) { + case CLOSING: return LifeCycleState.CLOSING; + case QUASI_CLOSED: return LifeCycleState.QUASI_CLOSED; + case CLOSED: return LifeCycleState.CLOSED; + default: return null; + } } private static IncrementalContainerReportProto From 029fa0dda90fdf60b3ead16534ae9cb533dd2cea Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Mon, 3 Aug 2020 21:25:05 +0200 Subject: [PATCH 099/165] HDDS-4058. Wrong use of AtomicBoolean in HddsDatanodeService (#1284) --- .../java/org/apache/hadoop/ozone/HddsDatanodeService.java | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java index aee0f030d01b..cfb22e30dcd2 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java @@ -99,7 +99,7 @@ public class HddsDatanodeService extends GenericCli implements ServicePlugin { private HddsDatanodeHttpServer httpServer; private boolean printBanner; private String[] args; - private volatile AtomicBoolean isStopped = new AtomicBoolean(false); + private final AtomicBoolean isStopped = new AtomicBoolean(false); private final Map ratisMetricsMap = new ConcurrentHashMap<>(); private DNMXBeanImpl serviceRuntimeInfo = @@ -531,8 +531,7 @@ public void terminateDatanode() { @Override public void stop() { - if (!isStopped.get()) { - isStopped.set(true); + if (!isStopped.getAndSet(true)) { if (plugins != null) { for (ServicePlugin plugin : plugins) { try { From 3a2aa629544f8e43180aefdf402f8237498ff6c4 Mon Sep 17 00:00:00 2001 From: prashantpogde Date: Mon, 3 Aug 2020 20:17:31 -0700 Subject: [PATCH 100/165] HDDS-3446. Enable TestOzoneManagerRestart and address any failure. (#1279) --- .../ozone/om/TestOzoneManagerRestart.java | 26 +++++++++---------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java index 6058fad61d2b..8938cfa48691 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java @@ -41,11 +41,10 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS; -import org.junit.After; +import org.junit.AfterClass; import org.junit.Assert; import static org.junit.Assert.fail; -import org.junit.Before; -import org.junit.Ignore; +import org.junit.BeforeClass; import org.junit.Rule; import org.junit.Test; import org.junit.rules.Timeout; @@ -54,16 +53,15 @@ * Test some client operations after cluster starts. And perform restart and * then performs client operations and check the behavior is expected or not. */ -@Ignore public class TestOzoneManagerRestart { - private MiniOzoneCluster cluster = null; - private OzoneConfiguration conf; - private String clusterId; - private String scmId; - private String omId; + private static MiniOzoneCluster cluster = null; + private static OzoneConfiguration conf; + private static String clusterId; + private static String scmId; + private static String omId; @Rule - public Timeout timeout = new Timeout(60000); + public Timeout timeout = new Timeout(240000); /** * Create a MiniDFSCluster for testing. @@ -72,8 +70,8 @@ public class TestOzoneManagerRestart { * * @throws IOException */ - @Before - public void init() throws Exception { + @BeforeClass + public static void init() throws Exception { conf = new OzoneConfiguration(); clusterId = UUID.randomUUID().toString(); scmId = UUID.randomUUID().toString(); @@ -94,8 +92,8 @@ public void init() throws Exception { /** * Shutdown MiniDFSCluster. */ - @After - public void shutdown() { + @AfterClass + public static void shutdown() { if (cluster != null) { cluster.shutdown(); } From 3e905813629241aedf32341cfbf4fa349e886c86 Mon Sep 17 00:00:00 2001 From: maobaolong <307499405@qq.com> Date: Tue, 4 Aug 2020 14:54:35 +0530 Subject: [PATCH 101/165] HDDS-3994. Make retry policy can be set by configuration. (#1231) --- .../apache/hadoop/hdds/ratis/RatisHelper.java | 112 ++++------------ .../hdds/ratis/conf/RatisClientConfig.java | 35 +++++ ...equestTypeDependentRetryPolicyCreator.java | 120 ++++++++++++++++++ .../RetryLimitedPolicyCreator.java | 47 +++++++ .../ratis/retrypolicy/RetryPolicyCreator.java | 29 +++++ .../hdds/ratis/retrypolicy/package-info.java | 23 ++++ 6 files changed, 276 insertions(+), 90 deletions(-) create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/retrypolicy/RequestTypeDependentRetryPolicyCreator.java create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/retrypolicy/RetryLimitedPolicyCreator.java create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/retrypolicy/RetryPolicyCreator.java create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/retrypolicy/package-info.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java index 51113b2a294f..8325f0963885 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java @@ -20,19 +20,18 @@ import java.io.IOException; import java.security.cert.X509Certificate; -import java.time.Duration; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.UUID; -import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; +import org.apache.hadoop.hdds.ratis.retrypolicy.RetryPolicyCreator; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.security.x509.SecurityConfig; @@ -40,7 +39,6 @@ import org.apache.ratis.RaftConfigKeys; import org.apache.ratis.client.RaftClient; import org.apache.ratis.client.RaftClientConfigKeys; -import org.apache.ratis.client.retry.RequestTypeDependentRetryPolicy; import org.apache.ratis.conf.RaftProperties; import org.apache.ratis.grpc.GrpcConfigKeys; import org.apache.ratis.grpc.GrpcFactory; @@ -50,20 +48,10 @@ import org.apache.ratis.protocol.RaftGroupId; import org.apache.ratis.protocol.RaftPeer; import org.apache.ratis.protocol.RaftPeerId; -import org.apache.ratis.protocol.GroupMismatchException; -import org.apache.ratis.protocol.StateMachineException; -import org.apache.ratis.protocol.NotReplicatedException; -import org.apache.ratis.protocol.TimeoutIOException; -import org.apache.ratis.protocol.exceptions.ResourceUnavailableException; -import org.apache.ratis.retry.ExponentialBackoffRetry; -import org.apache.ratis.retry.MultipleLinearRandomRetry; -import org.apache.ratis.retry.ExceptionDependentRetry; -import org.apache.ratis.retry.RetryPolicies; import org.apache.ratis.retry.RetryPolicy; import org.apache.ratis.rpc.RpcType; import org.apache.ratis.rpc.SupportedRpcType; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; -import org.apache.ratis.util.TimeDuration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -77,10 +65,6 @@ public final class RatisHelper { // Prefix for Ratis Server GRPC and Ratis client conf. public static final String HDDS_DATANODE_RATIS_PREFIX_KEY = "hdds.ratis"; - private static final Class[] NO_RETRY_EXCEPTIONS = - new Class[] {NotReplicatedException.class, GroupMismatchException.class, - StateMachineException.class}; - /* TODO: use a dummy id for all groups for the moment. * It should be changed to a unique id for each group. */ @@ -288,74 +272,17 @@ public static GrpcTlsConfig createTlsClientConfig(SecurityConfig conf, return tlsConfig; } - /** - * Table mapping exception type to retry policy used for the exception in - * write and watch request. - * --------------------------------------------------------------------------- - * | Exception | RetryPolicy for | RetryPolicy for | - * | | Write request | Watch request | - * |-------------------------------------------------------------------------| - * | NotReplicatedException | NO_RETRY | NO_RETRY | - * |-------------------------------------------------------------------------| - * | GroupMismatchException | NO_RETRY | NO_RETRY | - * |-------------------------------------------------------------------------| - * | StateMachineException | NO_RETRY | NO_RETRY | - * |-------------------------------------------------------------------------| - * | TimeoutIOException | EXPONENTIAL_BACKOFF | NO_RETRY | - * |-------------------------------------------------------------------------| - * | ResourceUnavailableException| EXPONENTIAL_BACKOFF | EXPONENTIAL_BACKOFF | - * |-------------------------------------------------------------------------| - * | Others | MULTILINEAR_RANDOM | MULTILINEAR_RANDOM | - * | | _RETRY | _RETRY | - * --------------------------------------------------------------------------- - */ public static RetryPolicy createRetryPolicy(ConfigurationSource conf) { - RatisClientConfig ratisClientConfig = conf - .getObject(RatisClientConfig.class); - ExponentialBackoffRetry exponentialBackoffRetry = - createExponentialBackoffPolicy(ratisClientConfig); - MultipleLinearRandomRetry multipleLinearRandomRetry = - MultipleLinearRandomRetry - .parseCommaSeparated(ratisClientConfig.getMultilinearPolicy()); - - return RequestTypeDependentRetryPolicy.newBuilder() - .setRetryPolicy(RaftProtos.RaftClientRequestProto.TypeCase.WRITE, - createExceptionDependentPolicy(exponentialBackoffRetry, - multipleLinearRandomRetry, exponentialBackoffRetry)) - .setRetryPolicy(RaftProtos.RaftClientRequestProto.TypeCase.WATCH, - createExceptionDependentPolicy(exponentialBackoffRetry, - multipleLinearRandomRetry, RetryPolicies.noRetry())) - .setTimeout(RaftProtos.RaftClientRequestProto.TypeCase.WRITE, - toTimeDuration(ratisClientConfig.getWriteRequestTimeout())) - .setTimeout(RaftProtos.RaftClientRequestProto.TypeCase.WATCH, - toTimeDuration(ratisClientConfig.getWatchRequestTimeout())) - .build(); - } - - private static ExponentialBackoffRetry createExponentialBackoffPolicy( - RatisClientConfig ratisClientConfig) { - return ExponentialBackoffRetry.newBuilder() - .setBaseSleepTime( - toTimeDuration(ratisClientConfig.getExponentialPolicyBaseSleep())) - .setMaxSleepTime( - toTimeDuration(ratisClientConfig.getExponentialPolicyMaxSleep())) - .build(); - } - - private static ExceptionDependentRetry createExceptionDependentPolicy( - ExponentialBackoffRetry exponentialBackoffRetry, - MultipleLinearRandomRetry multipleLinearRandomRetry, - RetryPolicy timeoutPolicy) { - ExceptionDependentRetry.Builder builder = - ExceptionDependentRetry.newBuilder(); - for (Class c : NO_RETRY_EXCEPTIONS) { - builder.setExceptionToPolicy(c, RetryPolicies.noRetry()); + try { + RatisClientConfig scmClientConfig = + conf.getObject(RatisClientConfig.class); + Class policyClass = getClass( + scmClientConfig.getRetryPolicy(), + RetryPolicyCreator.class); + return policyClass.newInstance().create(conf); + } catch (Exception e) { + throw new RuntimeException(e); } - return builder.setExceptionToPolicy(ResourceUnavailableException.class, - exponentialBackoffRetry) - .setExceptionToPolicy(TimeoutIOException.class, timeoutPolicy) - .setDefaultPolicy(multipleLinearRandomRetry) - .build(); } public static Long getMinReplicatedIndex( @@ -364,12 +291,17 @@ public static Long getMinReplicatedIndex( .min(Long::compareTo).orElse(null); } - private static TimeDuration toTimeDuration(Duration duration) { - return toTimeDuration(duration.toMillis()); - } - - private static TimeDuration toTimeDuration(long milliseconds) { - return TimeDuration.valueOf(milliseconds, TimeUnit.MILLISECONDS); + private static Class getClass(String name, + Class xface) { + try { + Class theClass = Class.forName(name); + if (!xface.isAssignableFrom(theClass)) { + throw new RuntimeException(theClass + " not " + xface.getName()); + } else { + return theClass.asSubclass(xface); + } + } catch (Exception e) { + throw new RuntimeException(e); + } } - } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/conf/RatisClientConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/conf/RatisClientConfig.java index dc07fdd6beca..7db60598e10d 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/conf/RatisClientConfig.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/conf/RatisClientConfig.java @@ -185,4 +185,39 @@ public Duration getExponentialPolicyMaxSleep() { public void setExponentialPolicyMaxSleep(Duration duration) { exponentialPolicyMaxSleepInMs = duration.toMillis(); } + + @Config(key = "client.retrylimited.retry.interval", + defaultValue = "1s", + type = ConfigType.TIME, + tags = { OZONE, CLIENT, PERFORMANCE }, + description = "Interval between successive retries for " + + "a ratis client request.") + private long retrylimitedRetryInterval; + + public long getRetrylimitedRetryInterval() { + return retrylimitedRetryInterval; + } + + @Config(key = "client.retrylimited.max.retries", + defaultValue = "180", + type = ConfigType.INT, + tags = { OZONE, CLIENT, PERFORMANCE }, + description = "Number of retries for ratis client request.") + private int retrylimitedMaxRetries; + + public int getRetrylimitedMaxRetries() { + return retrylimitedMaxRetries; + } + + @Config(key = "client.retry.policy", + defaultValue = "org.apache.hadoop.hdds.ratis.retrypolicy." + + "RequestTypeDependentRetryPolicyCreator", + type = ConfigType.STRING, + tags = { OZONE, CLIENT, PERFORMANCE }, + description = "The class name of the policy for retry.") + private String retryPolicy; + + public String getRetryPolicy() { + return retryPolicy; + } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/retrypolicy/RequestTypeDependentRetryPolicyCreator.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/retrypolicy/RequestTypeDependentRetryPolicyCreator.java new file mode 100644 index 000000000000..fe92f3217b10 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/retrypolicy/RequestTypeDependentRetryPolicyCreator.java @@ -0,0 +1,120 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.ratis.retrypolicy; + +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; +import org.apache.ratis.client.retry.RequestTypeDependentRetryPolicy; +import org.apache.ratis.proto.RaftProtos; +import org.apache.ratis.protocol.GroupMismatchException; +import org.apache.ratis.protocol.NotReplicatedException; +import org.apache.ratis.protocol.StateMachineException; +import org.apache.ratis.protocol.TimeoutIOException; +import org.apache.ratis.protocol.exceptions.ResourceUnavailableException; +import org.apache.ratis.retry.*; +import org.apache.ratis.util.TimeDuration; + +import java.time.Duration; +import java.util.concurrent.TimeUnit; + +/** + * Table mapping exception type to retry policy used for the exception in + * write and watch request. + * --------------------------------------------------------------------------- + * | Exception | RetryPolicy for | RetryPolicy for | + * | | Write request | Watch request | + * |-------------------------------------------------------------------------| + * | NotReplicatedException | NO_RETRY | NO_RETRY | + * |-------------------------------------------------------------------------| + * | GroupMismatchException | NO_RETRY | NO_RETRY | + * |-------------------------------------------------------------------------| + * | StateMachineException | NO_RETRY | NO_RETRY | + * |-------------------------------------------------------------------------| + * | TimeoutIOException | EXPONENTIAL_BACKOFF | NO_RETRY | + * |-------------------------------------------------------------------------| + * | ResourceUnavailableException| EXPONENTIAL_BACKOFF | EXPONENTIAL_BACKOFF | + * |-------------------------------------------------------------------------| + * | Others | MULTILINEAR_RANDOM | MULTILINEAR_RANDOM | + * | | _RETRY | _RETRY | + * --------------------------------------------------------------------------- + */ +public class RequestTypeDependentRetryPolicyCreator + implements RetryPolicyCreator { + + private static final Class[] NO_RETRY_EXCEPTIONS = + new Class[] {NotReplicatedException.class, GroupMismatchException.class, + StateMachineException.class}; + + @Override + public RetryPolicy create(ConfigurationSource conf) { + RatisClientConfig ratisClientConfig = conf + .getObject(RatisClientConfig.class); + ExponentialBackoffRetry exponentialBackoffRetry = + createExponentialBackoffPolicy(ratisClientConfig); + MultipleLinearRandomRetry multipleLinearRandomRetry = + MultipleLinearRandomRetry + .parseCommaSeparated(ratisClientConfig.getMultilinearPolicy()); + + return RequestTypeDependentRetryPolicy.newBuilder() + .setRetryPolicy(RaftProtos.RaftClientRequestProto.TypeCase.WRITE, + createExceptionDependentPolicy(exponentialBackoffRetry, + multipleLinearRandomRetry, exponentialBackoffRetry)) + .setRetryPolicy(RaftProtos.RaftClientRequestProto.TypeCase.WATCH, + createExceptionDependentPolicy(exponentialBackoffRetry, + multipleLinearRandomRetry, RetryPolicies.noRetry())) + .setTimeout(RaftProtos.RaftClientRequestProto.TypeCase.WRITE, + toTimeDuration(ratisClientConfig.getWriteRequestTimeout())) + .setTimeout(RaftProtos.RaftClientRequestProto.TypeCase.WATCH, + toTimeDuration(ratisClientConfig.getWatchRequestTimeout())) + .build(); + } + + private static ExponentialBackoffRetry createExponentialBackoffPolicy( + RatisClientConfig ratisClientConfig) { + return ExponentialBackoffRetry.newBuilder() + .setBaseSleepTime( + toTimeDuration(ratisClientConfig.getExponentialPolicyBaseSleep())) + .setMaxSleepTime( + toTimeDuration(ratisClientConfig.getExponentialPolicyMaxSleep())) + .build(); + } + + private static ExceptionDependentRetry createExceptionDependentPolicy( + ExponentialBackoffRetry exponentialBackoffRetry, + MultipleLinearRandomRetry multipleLinearRandomRetry, + RetryPolicy timeoutPolicy) { + ExceptionDependentRetry.Builder builder = + ExceptionDependentRetry.newBuilder(); + for (Class c : NO_RETRY_EXCEPTIONS) { + builder.setExceptionToPolicy(c, RetryPolicies.noRetry()); + } + return builder.setExceptionToPolicy(ResourceUnavailableException.class, + exponentialBackoffRetry) + .setExceptionToPolicy(TimeoutIOException.class, timeoutPolicy) + .setDefaultPolicy(multipleLinearRandomRetry) + .build(); + } + private static TimeDuration toTimeDuration(Duration duration) { + return toTimeDuration(duration.toMillis()); + } + + private static TimeDuration toTimeDuration(long milliseconds) { + return TimeDuration.valueOf(milliseconds, TimeUnit.MILLISECONDS); + } +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/retrypolicy/RetryLimitedPolicyCreator.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/retrypolicy/RetryLimitedPolicyCreator.java new file mode 100644 index 000000000000..5c3b06a17191 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/retrypolicy/RetryLimitedPolicyCreator.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.ratis.retrypolicy; + +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; +import org.apache.ratis.retry.RetryPolicies; +import org.apache.ratis.retry.RetryPolicy; +import org.apache.ratis.util.TimeDuration; + +import java.util.concurrent.TimeUnit; + +/** + * The creator of RetryLimited policy. + */ +public class RetryLimitedPolicyCreator implements RetryPolicyCreator { + + @Override + public RetryPolicy create(ConfigurationSource conf) { + RatisClientConfig scmClientConfig = + conf.getObject(RatisClientConfig.class); + int maxRetryCount = + scmClientConfig.getRetrylimitedMaxRetries(); + long retryInterval = scmClientConfig.getRetrylimitedRetryInterval(); + TimeDuration sleepDuration = + TimeDuration.valueOf(retryInterval, TimeUnit.MILLISECONDS); + RetryPolicy retryPolicy = RetryPolicies + .retryUpToMaximumCountWithFixedSleep(maxRetryCount, sleepDuration); + return retryPolicy; + } +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/retrypolicy/RetryPolicyCreator.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/retrypolicy/RetryPolicyCreator.java new file mode 100644 index 000000000000..8057baa02013 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/retrypolicy/RetryPolicyCreator.java @@ -0,0 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.ratis.retrypolicy; + +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.ratis.retry.RetryPolicy; + +/** + * The interface of RetryLimited policy creator. + */ +public interface RetryPolicyCreator { + RetryPolicy create(ConfigurationSource conf); +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/retrypolicy/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/retrypolicy/package-info.java new file mode 100644 index 000000000000..657a2bfb3cf7 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/retrypolicy/package-info.java @@ -0,0 +1,23 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.ratis.retrypolicy; + +/** + * This package contains classes related to retry policies. + */ From 63ee000568a90d71c9e99a9a683997eaa3806526 Mon Sep 17 00:00:00 2001 From: Aryan Gupta <44232823+aryangupta1998@users.noreply.github.com> Date: Wed, 5 Aug 2020 22:03:53 +0530 Subject: [PATCH 102/165] HDDS-4035. Update logs of HadoopDirGenerator. (#1264) --- .../ozone/freon/BaseFreonGenerator.java | 10 +++++ .../ozone/freon/HadoopDirTreeGenerator.java | 40 +++++++++---------- .../ozone/freon/HadoopNestedDirGenerator.java | 22 +++++++--- 3 files changed, 46 insertions(+), 26 deletions(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java index aa8282e57a35..b9b59ef2ed27 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java @@ -301,6 +301,16 @@ public void printReport() { messages.forEach(print); } + /** + * Print out reports with the given message. + */ + public void print(String msg){ + Consumer print = freonCommand.isInteractive() + ? System.out::println + : LOG::info; + print.accept(msg); + } + /** * Create the OM RPC client to use it for testing. */ diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopDirTreeGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopDirTreeGenerator.java index 62a49655f3c7..348aa244d344 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopDirTreeGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopDirTreeGenerator.java @@ -97,17 +97,24 @@ public class HadoopDirTreeGenerator extends BaseFreonGenerator @Override public Void call() throws Exception { - - init(); - OzoneConfiguration configuration = createOzoneConfiguration(); - fileSystem = FileSystem.get(URI.create(rootPath), configuration); - - contentGenerator = new ContentGenerator(fileSizeInBytes, bufferSize); - timer = getMetrics().timer("file-create"); - - runTests(this::createDir); + String s; + if (depth <= 0) { + s = "Invalid depth value, depth value should be greater than zero!"; + print(s); + } else if (span <= 0) { + s = "Invalid span value, span value should be greater than zero!"; + print(s); + } else { + init(); + OzoneConfiguration configuration = createOzoneConfiguration(); + fileSystem = FileSystem.get(URI.create(rootPath), configuration); + + contentGenerator = new ContentGenerator(fileSizeInBytes, bufferSize); + timer = getMetrics().timer("file-create"); + + runTests(this::createDir); + } return null; - } /* @@ -139,21 +146,14 @@ public Void call() throws Exception { created. */ private void createDir(long counter) throws Exception { - if (depth <= 0) { - LOG.info("Invalid depth value, at least one depth should be passed!"); - return; - } - if (span <= 0) { - LOG.info("Invalid span value, at least one span should be passed!"); - return; - } String dir = makeDirWithGivenNumberOfFiles(rootPath); if (depth > 1) { createSubDirRecursively(dir, 1, 1); } - System.out.println("Successfully created directories & files. Total Dirs " + + String message = "Successfully created directories & files. Total Dirs " + "Count=" + totalDirsCnt.get() + ", Total Files Count=" + - timer.getCount()); + timer.getCount(); + print(message); } private void createSubDirRecursively(String parent, int depthIndex, diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopNestedDirGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopNestedDirGenerator.java index 72d096c227f7..8bc8a37708ce 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopNestedDirGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopNestedDirGenerator.java @@ -72,13 +72,20 @@ public class HadoopNestedDirGenerator extends BaseFreonGenerator @Override public Void call() throws Exception { - - init(); - OzoneConfiguration configuration = createOzoneConfiguration(); - fileSystem = FileSystem.get(URI.create(rootPath), configuration); - runTests(this::createDir); + String s; + if (depth <= 0) { + s = "Invalid depth value, depth value should be greater than zero!"; + print(s); + } else if (span < 0) { + s = "Invalid span value, span value should be greater or equal to zero!"; + print(s); + } else { + init(); + OzoneConfiguration configuration = createOzoneConfiguration(); + fileSystem = FileSystem.get(URI.create(rootPath), configuration); + runTests(this::createDir); + } return null; - } /* @@ -109,5 +116,8 @@ private void createDir(long counter) throws Exception { Path dir = new Path(rootPath.concat("/").concat(childDir)); fileSystem.mkdirs(dir.getParent()); } + String message = "\nSuccessfully created directories. " + + "Total Directories with level = " + depth + " and span = " + span; + print(message); } } From fe89c19d6ab8ddf44bd6f86f7ce8202385fb9fd2 Mon Sep 17 00:00:00 2001 From: Hanisha Koneru Date: Wed, 5 Aug 2020 16:36:13 -0700 Subject: [PATCH 103/165] HDDS-4063. Fix InstallSnapshot in OM HA (#1294) --- .../ozone/om/ratis/OzoneManagerRatisServer.java | 1 + .../ozone/om/ratis/OzoneManagerStateMachine.java | 16 +++------------- 2 files changed, 4 insertions(+), 13 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java index 2a98db6d40db..d6d2be6ed8e6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java @@ -407,6 +407,7 @@ private RaftProperties newRaftProperties(ConfigurationSource conf) { StorageUnit.BYTES); RaftServerConfigKeys.Log.setSegmentSizeMax(properties, SizeInBytes.valueOf(raftSegmentSize)); + RaftServerConfigKeys.Log.setPurgeUptoSnapshotIndex(properties, true); // Set RAFT segment pre-allocated size final int raftSegmentPreallocatedSize = (int) conf.getStorageSize( diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java index 3f7429ab7dd0..fd703159811a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java @@ -363,21 +363,11 @@ public long takeSnapshot() throws IOException { public CompletableFuture notifyInstallSnapshotFromLeader( RaftProtos.RoleInfoProto roleInfoProto, TermIndex firstTermIndexInLog) { - String leaderNodeId = RaftPeerId.valueOf(roleInfoProto.getSelf().getId()) - .toString(); - - LOG.info("Received install snapshot notificaiton form OM leader: {} with " + + String leaderNodeId = RaftPeerId.valueOf(roleInfoProto.getFollowerInfo() + .getLeaderInfo().getId().getId()).toString(); + LOG.info("Received install snapshot notification from OM leader: {} with " + "term index: {}", leaderNodeId, firstTermIndexInLog); - if (!roleInfoProto.getRole().equals(RaftProtos.RaftPeerRole.LEADER)) { - // A non-leader Ratis server should not send this notification. - LOG.error("Received Install Snapshot notification from non-leader OM " + - "node: {}. Ignoring the notification.", leaderNodeId); - return completeExceptionally(new OMException("Received notification to " + - "install snaphost from non-leader OM node", - OMException.ResultCodes.RATIS_ERROR)); - } - CompletableFuture future = CompletableFuture.supplyAsync( () -> ozoneManager.installSnapshotFromLeader(leaderNodeId), installSnapshotExecutor); From a2f408245ab2be4271a2396acbfd9ddb0565d8d0 Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Wed, 5 Aug 2020 19:46:30 -0700 Subject: [PATCH 104/165] HDDS-4044. Deprecate ozone.s3g.volume.name. #1270 --- hadoop-hdds/docs/content/interface/S3.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdds/docs/content/interface/S3.md b/hadoop-hdds/docs/content/interface/S3.md index 1be0137942ef..2324fcba049a 100644 --- a/hadoop-hdds/docs/content/interface/S3.md +++ b/hadoop-hdds/docs/content/interface/S3.md @@ -24,7 +24,7 @@ summary: Ozone supports Amazon's Simple Storage Service (S3) protocol. In fact, Ozone provides S3 compatible REST interface to use the object store data with any S3 compatible tools. -S3 buckets are stored under the `/s3v` volume. The default name `s3v` can be changed by setting the `ozone.s3g.volume.name` config property in `ozone-site.xml`. +S3 buckets are stored under the `/s3v` volume. ## Getting started From e0197fd3580159ee6e09066c1dbd94f5c3b0b599 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Fri, 7 Aug 2020 16:28:52 +0200 Subject: [PATCH 105/165] HDDS-4073. Remove leftover robot.robot (#1297) --- .../dist/src/main/smoketest/robot.robot | 81 ------------------- 1 file changed, 81 deletions(-) delete mode 100644 hadoop-ozone/dist/src/main/smoketest/robot.robot diff --git a/hadoop-ozone/dist/src/main/smoketest/robot.robot b/hadoop-ozone/dist/src/main/smoketest/robot.robot deleted file mode 100644 index d677ef3c743c..000000000000 --- a/hadoop-ozone/dist/src/main/smoketest/robot.robot +++ /dev/null @@ -1,81 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -*** Settings *** -Documentation Smoketest for Robot functions -Resource commonlib.robot -Test Timeout 5 minutes - -*** Test Cases *** - -Ensure Leading without Leading - ${result} = Ensure Leading / a/b - Should Be Equal ${result} /a/b - -Ensure Leading with Leading - ${result} = Ensure Leading _ _a_b_c - Should Be Equal ${result} _a_b_c - -Ensure Leading for empty - ${result} = Ensure Leading | ${EMPTY} - Should Be Equal ${result} | - - -Ensure Trailing without Trailing - ${result} = Ensure Trailing . x.y.z - Should Be Equal ${result} x.y.z. - -Ensure Trailing with Trailing - ${result} = Ensure Trailing x axbxcx - Should Be Equal ${result} axbxcx - -Ensure Trailing for empty - ${result} = Ensure Trailing = ${EMPTY} - Should Be Equal ${result} = - - -Format o3fs URL without path - ${result} = Format o3fs URL vol1 bucket1 - Should Be Equal ${result} o3fs://bucket1.vol1/ - -Format o3fs URL with path - ${result} = Format o3fs URL vol1 bucket1 dir/file - Should Be Equal ${result} o3fs://bucket1.vol1/dir/file - - -Format ofs URL without path - ${result} = Format ofs URL vol1 bucket1 - Should Be Equal ${result} ofs://vol1/bucket1 - -Format ofs URL with path - ${result} = Format ofs URL vol1 bucket1 dir/file - Should Be Equal ${result} ofs://vol1/bucket1/dir/file - - -Format FS URL with ofs scheme - ${result} = Format FS URL ofs vol1 bucket1 - ${expected} = Format ofs URL vol1 bucket1 - Should Be Equal ${result} ${expected} - -Format FS URL with o3fs scheme - ${result} = Format FS URL o3fs vol1 bucket1 - ${expected} = Format o3fs URL vol1 bucket1 - Should Be Equal ${result} ${expected} - -Format FS URL with unsupported scheme - ${result} = Run Keyword And Expect Error * Format FS URL http org apache - Should Contain ${result} http - Should Contain ${result} nsupported - From 90dda6a273356c804df2e5ddf08b276955c79117 Mon Sep 17 00:00:00 2001 From: micah zhao Date: Fri, 7 Aug 2020 22:30:30 +0800 Subject: [PATCH 106/165] HDDS-4066. Add core-site.xml to intellij configuration (#1292) --- .../dev-support/intellij/core-site.xml | 27 +++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 hadoop-ozone/dev-support/intellij/core-site.xml diff --git a/hadoop-ozone/dev-support/intellij/core-site.xml b/hadoop-ozone/dev-support/intellij/core-site.xml new file mode 100644 index 000000000000..862b32177187 --- /dev/null +++ b/hadoop-ozone/dev-support/intellij/core-site.xml @@ -0,0 +1,27 @@ + + + + + fs.ofs.impl + org.apache.hadoop.fs.ozone.RootedOzoneFileSystem + + + fs.defaultFS + ofs://localhost/ + + From c1de802fe3b40f38c89567ed305c6f1672f22777 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Elek=2C=20M=C3=A1rton?= Date: Fri, 7 Aug 2020 18:35:36 +0200 Subject: [PATCH 107/165] HDDS-4042. Update documentation for the GA release (#1269) --- hadoop-hdds/docs/content/_index.md | 3 +- hadoop-hdds/docs/content/_index.zh.md | 2 +- hadoop-hdds/docs/content/beyond/Containers.md | 234 ------------------ .../docs/content/beyond/Containers.zh.md | 203 --------------- .../docs/content/beyond/DockerCheatSheet.md | 88 ------- .../content/beyond/DockerCheatSheet.zh.md | 85 ------- hadoop-hdds/docs/content/beyond/_index.md | 30 --- hadoop-hdds/docs/content/beyond/_index.zh.md | 27 -- .../docs/content/concept/Containers.md | 47 ++++ .../docs/content/concept/Containers.png | Bin 0 -> 24775 bytes hadoop-hdds/docs/content/concept/Datanodes.md | 5 +- hadoop-hdds/docs/content/concept/Hdds.md | 52 ---- hadoop-hdds/docs/content/concept/Overview.md | 7 +- .../docs/content/concept/Overview.zh.md | 2 +- .../content/concept/OzoneManager-ReadPath.png | Bin 0 -> 81030 bytes .../concept/OzoneManager-WritePath.png | Bin 0 -> 96696 bytes .../docs/content/concept/OzoneManager.md | 63 ++++- .../docs/content/concept/OzoneManager.png | Bin 0 -> 13327 bytes .../docs/content/concept/OzoneManager.zh.md | 6 + .../concept/StorageContainerManager.md | 102 ++++++++ .../concept/StorageContainerManager.png | Bin 0 -> 13336 bytes ...ds.zh.md => StorageContainerManager.zh.md} | 6 + hadoop-hdds/docs/content/concept/_index.md | 4 +- hadoop-hdds/docs/content/design/ec.md | 39 +++ .../docs/content/design/namespace-support.md | 6 +- hadoop-hdds/docs/content/design/ofs.md | 135 ---------- .../docs/content/design/storage-class.md | 28 +++ hadoop-hdds/docs/content/design/topology.md | 29 +++ ...ume-management.md => volume-management.md} | 0 hadoop-hdds/docs/content/feature/GDPR.md | 80 ++++++ .../GDPR.zh.md} | 5 + .../content/feature/HA-OM-doublebuffer.png | Bin 0 -> 77661 bytes hadoop-hdds/docs/content/feature/HA-OM.png | Bin 0 -> 60888 bytes hadoop-hdds/docs/content/feature/HA.md | 115 +++++++++ .../docs/content/feature/Observability.md | 224 +++++++++++++++++ .../{shell/_index.md => feature/Recon.md} | 15 +- hadoop-hdds/docs/content/feature/Topology.md | 108 ++++++++ .../docs/content/{gdpr => feature}/_index.md | 12 +- .../content/{gdpr => feature}/_index.zh.md | 0 .../docs/content/gdpr/GDPR in Ozone.md | 42 ---- hadoop-hdds/docs/content/interface/CSI.md | 15 +- hadoop-hdds/docs/content/interface/CSI.png | Bin 0 -> 27210 bytes hadoop-hdds/docs/content/interface/Cli.md | 208 ++++++++++++++++ hadoop-hdds/docs/content/interface/JavaApi.md | 5 +- .../content/interface/{OzoneFS.md => O3fs.md} | 79 ++---- .../interface/{OzoneFS.zh.md => O3fs.zh.md} | 6 + hadoop-hdds/docs/content/interface/Ofs.md | 227 +++++++++++++++++ hadoop-hdds/docs/content/interface/S3.md | 21 ++ hadoop-hdds/docs/content/interface/_index.md | 4 +- hadoop-hdds/docs/content/recipe/Prometheus.md | 5 +- .../docs/content/recipe/Prometheus.zh.md | 7 +- .../docs/content/security/SecureOzone.md | 3 + .../content/security/SecuringDatanodes.md | 5 +- .../content/security/SecuringOzoneHTTP.md | 7 +- .../docs/content/security/SecuringS3.md | 5 +- .../docs/content/security/SecuringTDE.md | 5 +- .../docs/content/security/SecurityAcls.md | 3 + ...ityWithRanger.md => SecurityWithRanger.md} | 5 +- ...hRanger.zh.md => SecurityWithRanger.zh.md} | 0 .../docs/content/shell/BucketCommands.md | 100 -------- .../docs/content/shell/BucketCommands.zh.md | 98 -------- hadoop-hdds/docs/content/shell/Format.md | 69 ------ hadoop-hdds/docs/content/shell/Format.zh.md | 65 ----- hadoop-hdds/docs/content/shell/KeyCommands.md | 177 ------------- .../docs/content/shell/KeyCommands.zh.md | 176 ------------- .../docs/content/shell/VolumeCommands.md | 114 --------- .../docs/content/shell/VolumeCommands.zh.md | 108 -------- hadoop-hdds/docs/content/shell/_index.zh.md | 27 -- hadoop-hdds/docs/content/start/FromSource.md | 38 +-- .../docs/content/start/FromSource.zh.md | 7 +- hadoop-hdds/docs/content/tools/_index.md | 6 +- .../ozonedoc/layouts/design/section.html | 2 +- .../ozonedoc/layouts/partials/sidebar.html | 10 +- .../themes/ozonedoc/static/css/ozonedoc.css | 9 + 74 files changed, 1459 insertions(+), 1961 deletions(-) delete mode 100644 hadoop-hdds/docs/content/beyond/Containers.md delete mode 100644 hadoop-hdds/docs/content/beyond/Containers.zh.md delete mode 100644 hadoop-hdds/docs/content/beyond/DockerCheatSheet.md delete mode 100644 hadoop-hdds/docs/content/beyond/DockerCheatSheet.zh.md delete mode 100644 hadoop-hdds/docs/content/beyond/_index.md delete mode 100644 hadoop-hdds/docs/content/beyond/_index.zh.md create mode 100644 hadoop-hdds/docs/content/concept/Containers.md create mode 100644 hadoop-hdds/docs/content/concept/Containers.png delete mode 100644 hadoop-hdds/docs/content/concept/Hdds.md create mode 100644 hadoop-hdds/docs/content/concept/OzoneManager-ReadPath.png create mode 100644 hadoop-hdds/docs/content/concept/OzoneManager-WritePath.png create mode 100644 hadoop-hdds/docs/content/concept/OzoneManager.png create mode 100644 hadoop-hdds/docs/content/concept/StorageContainerManager.md create mode 100644 hadoop-hdds/docs/content/concept/StorageContainerManager.png rename hadoop-hdds/docs/content/concept/{Hdds.zh.md => StorageContainerManager.zh.md} (93%) create mode 100644 hadoop-hdds/docs/content/design/ec.md create mode 100644 hadoop-hdds/docs/content/design/storage-class.md create mode 100644 hadoop-hdds/docs/content/design/topology.md rename hadoop-hdds/docs/content/design/{ozone-volume-management.md => volume-management.md} (100%) create mode 100644 hadoop-hdds/docs/content/feature/GDPR.md rename hadoop-hdds/docs/content/{gdpr/GDPR in Ozone.zh.md => feature/GDPR.zh.md} (91%) create mode 100644 hadoop-hdds/docs/content/feature/HA-OM-doublebuffer.png create mode 100644 hadoop-hdds/docs/content/feature/HA-OM.png create mode 100644 hadoop-hdds/docs/content/feature/HA.md create mode 100644 hadoop-hdds/docs/content/feature/Observability.md rename hadoop-hdds/docs/content/{shell/_index.md => feature/Recon.md} (52%) create mode 100644 hadoop-hdds/docs/content/feature/Topology.md rename hadoop-hdds/docs/content/{gdpr => feature}/_index.md (80%) rename hadoop-hdds/docs/content/{gdpr => feature}/_index.zh.md (100%) delete mode 100644 hadoop-hdds/docs/content/gdpr/GDPR in Ozone.md create mode 100644 hadoop-hdds/docs/content/interface/CSI.png create mode 100644 hadoop-hdds/docs/content/interface/Cli.md rename hadoop-hdds/docs/content/interface/{OzoneFS.md => O3fs.md} (65%) rename hadoop-hdds/docs/content/interface/{OzoneFS.zh.md => O3fs.zh.md} (97%) create mode 100644 hadoop-hdds/docs/content/interface/Ofs.md rename hadoop-hdds/docs/content/security/{SecuityWithRanger.md => SecurityWithRanger.md} (97%) rename hadoop-hdds/docs/content/security/{SecuityWithRanger.zh.md => SecurityWithRanger.zh.md} (100%) delete mode 100644 hadoop-hdds/docs/content/shell/BucketCommands.md delete mode 100644 hadoop-hdds/docs/content/shell/BucketCommands.zh.md delete mode 100644 hadoop-hdds/docs/content/shell/Format.md delete mode 100644 hadoop-hdds/docs/content/shell/Format.zh.md delete mode 100644 hadoop-hdds/docs/content/shell/KeyCommands.md delete mode 100644 hadoop-hdds/docs/content/shell/KeyCommands.zh.md delete mode 100644 hadoop-hdds/docs/content/shell/VolumeCommands.md delete mode 100644 hadoop-hdds/docs/content/shell/VolumeCommands.zh.md delete mode 100644 hadoop-hdds/docs/content/shell/_index.zh.md diff --git a/hadoop-hdds/docs/content/_index.md b/hadoop-hdds/docs/content/_index.md index 52e190cf99a4..9bc7a7ae695a 100644 --- a/hadoop-hdds/docs/content/_index.md +++ b/hadoop-hdds/docs/content/_index.md @@ -1,4 +1,5 @@ --- +name: Ozone title: Overview menu: main weight: -10 @@ -29,7 +30,7 @@ Apart from scaling to billions of objects of varying sizes, Ozone can function effectively in containerized environments like Kubernetes._*

-Applications like Apache Spark, Hive and YARN, work without any modifications when using Ozone. Ozone comes with a [Java client library]({{}}), [S3 protocol support]({{< ref "S3.md" >}}), and a [command line interface]({{< ref "shell/_index.md" >}}) which makes it easy to use Ozone. +Applications like Apache Spark, Hive and YARN, work without any modifications when using Ozone. Ozone comes with a [Java client library]({{}}), [S3 protocol support]({{< ref "S3.md" >}}), and a [command line interface]({{< ref "Cli.md" >}}) which makes it easy to use Ozone. Ozone consists of volumes, buckets, and keys: diff --git a/hadoop-hdds/docs/content/_index.zh.md b/hadoop-hdds/docs/content/_index.zh.md index 8bdcf5044454..689490be11ad 100644 --- a/hadoop-hdds/docs/content/_index.zh.md +++ b/hadoop-hdds/docs/content/_index.zh.md @@ -28,7 +28,7 @@ weight: -10 Ozone 不仅能存储数十亿个不同大小的对象,还支持在容器化环境(比如 Kubernetes)中运行。_*

Apache Spark、Hive 和 YARN 等应用无需任何修改即可使用 Ozone。Ozone 提供了 [Java API]({{< -ref "JavaApi.zh.md" >}})、[S3 接口]({{< ref "S3.zh.md" >}})和[命令行接口]({{< ref "shell/_index.zh.md" >}}),极大地方便了 Ozone +ref "JavaApi.zh.md" >}})、[S3 接口]({{< ref "S3.zh.md" >}})和命令行接口,极大地方便了 Ozone 在不同应用场景下的的使用。 Ozone 的管理由卷、桶和键组成: diff --git a/hadoop-hdds/docs/content/beyond/Containers.md b/hadoop-hdds/docs/content/beyond/Containers.md deleted file mode 100644 index 13a66d801f5d..000000000000 --- a/hadoop-hdds/docs/content/beyond/Containers.md +++ /dev/null @@ -1,234 +0,0 @@ ---- -title: "Ozone Containers" -summary: Ozone uses containers extensively for testing. This page documents the usage and best practices of Ozone. -weight: 2 ---- - - -Docker heavily is used at the ozone development with three principal use-cases: - -* __dev__: - * We use docker to start local pseudo-clusters (docker provides unified environment, but no image creation is required) -* __test__: - * We create docker images from the dev branches to test ozone in kubernetes and other container orchestrator system - * We provide _apache/ozone_ images for each release to make it easier for evaluation of Ozone. - These images are __not__ created __for production__ usage. - -

- -* __production__: - * We have documentation on how you can create your own docker image for your production cluster. - -Let's check out each of the use-cases in more detail: - -## Development - -Ozone artifact contains example docker-compose directories to make it easier to start Ozone cluster in your local machine. - -From distribution: - -```bash -cd compose/ozone -docker-compose up -d -``` - -After a local build: - -```bash -cd hadoop-ozone/dist/target/ozone-*/compose -docker-compose up -d -``` - -These environments are very important tools to start different type of Ozone clusters at any time. - -To be sure that the compose files are up-to-date, we also provide acceptance test suites which start -the cluster and check the basic behaviour. - -The acceptance tests are part of the distribution, and you can find the test definitions in `smoketest` directory. - -You can start the tests from any compose directory: - -For example: - -```bash -cd compose/ozone -./test.sh -``` - -### Implementation details - -`compose` tests are based on the apache/hadoop-runner docker image. The image itself does not contain -any Ozone jar file or binary just the helper scripts to start ozone. - -hadoop-runner provdes a fixed environment to run Ozone everywhere, but the ozone distribution itself -is mounted from the including directory: - -(Example docker-compose fragment) - -``` - scm: - image: apache/hadoop-runner:jdk11 - volumes: - - ../..:/opt/hadoop - ports: - - 9876:9876 - -``` - -The containers are configured based on environment variables, but because the same environment -variables should be set for each containers we maintain the list of the environment variables -in a separated file: - -``` - scm: - image: apache/hadoop-runner:jdk11 - #... - env_file: - - ./docker-config -``` - -The docker-config file contains the list of the required environment variables: - -``` -OZONE-SITE.XML_ozone.om.address=om -OZONE-SITE.XML_ozone.om.http-address=om:9874 -OZONE-SITE.XML_ozone.scm.names=scm -#... -``` - -As you can see we use naming convention. Based on the name of the environment variable, the -appropriate hadoop config XML (`ozone-site.xml` in our case) will be generated by a -[script](https://github.com/apache/hadoop/tree/docker-hadoop-runner-latest/scripts) which is -included in the `hadoop-runner` base image. - -The [entrypoint](https://github.com/apache/hadoop/blob/docker-hadoop-runner-latest/scripts/starter.sh) -of the `hadoop-runner` image contains a helper shell script which triggers this transformation and -can do additional actions (eg. initialize scm/om storage, download required keytabs, etc.) -based on environment variables. - -## Test/Staging - -The `docker-compose` based approach is recommended only for local test, not for multi node cluster. -To use containers on a multi-node cluster we need a Container Orchestrator like Kubernetes. - -Kubernetes example files are included in the `kubernetes` folder. - -*Please note*: all the provided images are based the `hadoop-runner` image which contains all the -required tool for testing in staging environments. For production we recommend to create your own, -hardened image with your own base image. - -### Test the release - -The release can be tested with deploying any of the example clusters: - -```bash -cd kubernetes/examples/ozone -kubectl apply -f -``` - -Plese note that in this case the latest released container will be downloaded from the dockerhub. - -### Test the development build - -To test a development build you can create your own image and upload it to your own docker registry: - - -```bash -mvn clean install -DskipTests -Pdocker-build,docker-push -Ddocker.image=myregistry:9000/name/ozone -``` - -The configured image will be used in all the generated kubernetes resources files (`image:` keys are adjusted during the build) - -```bash -cd kubernetes/examples/ozone -kubectl apply -f -``` - -## Production - - - -You can use the source of our development images as an example: - - * [Base image](https://github.com/apache/hadoop/blob/docker-hadoop-runner-jdk11/Dockerfile) - * [Docker image](https://github.com/apache/hadoop/blob/trunk/hadoop-ozone/dist/src/main/docker/Dockerfile) - - Most of the elements are optional and just helper function but to use the provided example - kubernetes resources you may need the scripts from - [here](https://github.com/apache/hadoop/tree/docker-hadoop-runner-jdk11/scripts) - - * The two python scripts convert environment variables to real hadoop XML config files - * The start.sh executes the python scripts (and other initialization) based on environment variables. - -## Containers - -Ozone related container images and source locations: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#ContainerRepositoryBaseBranchTagsComments
1apache/ozonehttps://github.com/apache/hadoop-docker-ozoneozone-... hadoop-runner0.3.0,0.4.0,0.4.1For each Ozone release we create new release tag.
2apache/hadoop-runner https://github.com/apache/hadoopdocker-hadoop-runnercentosjdk11,jdk8,latestThis is the base image used for testing Hadoop Ozone. - This is a set of utilities that make it easy for us run ozone.
diff --git a/hadoop-hdds/docs/content/beyond/Containers.zh.md b/hadoop-hdds/docs/content/beyond/Containers.zh.md deleted file mode 100644 index c06902e04a36..000000000000 --- a/hadoop-hdds/docs/content/beyond/Containers.zh.md +++ /dev/null @@ -1,203 +0,0 @@ ---- -title: "Ozone 中的容器技术" -summary: Ozone 广泛地使用容器来进行测试,本页介绍 Ozone 中容器的使用及其最佳实践。 -weight: 2 ---- - - -Ozone 的开发中大量地使用了 Docker,包括以下三种主要的应用场景: - -* __开发__: - * 我们使用 docker 来启动本地伪集群(docker 可以提供统一的环境,但是不需要创建镜像)。 -* __测试__: - * 我们从开发分支创建 docker 镜像,然后在 kubernetes 或其它容器编排系统上测试 ozone。 - * 我们为每个发行版提供了 _apache/ozone_ 镜像,以方便用户体验 Ozone。 - 这些镜像 __不__ 应当在 __生产__ 中使用。 - - - -* __生产__: - * 我们提供了如何为生产集群创建 docker 镜像的文档。 - -下面我们来详细地介绍一下各种应用场景: - -## 开发 - -Ozone 安装包中包含了 docker-compose 的示例目录,用于方便地在本地机器启动 Ozone 集群。 - -使用官方提供的发行包: - -```bash -cd compose/ozone -docker-compose up -d -``` - -本地构建方式: - -```bash -cd hadoop-ozone/dist/target/ozone-*/compose -docker-compose up -d -``` - -这些 compose 环境文件是重要的工具,可以用来随时启动各种类型的 Ozone 集群。 - -为了确保 compose 文件是最新的,我们提供了验收测试套件,套件会启动集群并检查其基本行为是否正常。 - -验收测试也包含在发行包中,你可以在 `smoketest` 目录下找到各个测试的定义。 - -你可以在任意 compose 目录进行测试,比如: - -```bash -cd compose/ozone -./test.sh -``` - -### 实现细节 - -`compose` 测试都基于 apache/hadoop-runner 镜像,这个镜像本身并不包含任何 Ozone 的 jar 包或二进制文件,它只是提供其了启动 Ozone 的辅助脚本。 - -hadoop-runner 提供了一个随处运行 Ozone 的固定环境,Ozone 分发包通过目录挂载包含在其中。 - -(docker-compose 示例片段) - -``` - scm: - image: apache/hadoop-runner:jdk11 - volumes: - - ../..:/opt/hadoop - ports: - - 9876:9876 - -``` - -容器应该通过环境变量来进行配置,由于每个容器都应当设置相同的环境变量,我们在单独的文件中维护了一个环境变量列表: - -``` - scm: - image: apache/hadoop-runner:jdk11 - #... - env_file: - - ./docker-config -``` - -docker-config 文件中包含了所需环境变量的列表: - -``` -OZONE-SITE.XML_ozone.om.address=om -OZONE-SITE.XML_ozone.om.http-address=om:9874 -OZONE-SITE.XML_ozone.scm.names=scm -#... -``` - -你可以看到我们所使用的命名规范,根据这些环境变量的名字,`hadoop-runner` 基础镜像中的[脚本](https://github.com/apache/hadoop/tree/docker-hadoop-runner-latest/scripts) 会生成合适的 hadoop XML 配置文件(在我们这种情况下就是 `ozone-site.xml`)。 - -`hadoop-runner` 镜像的[入口点](https://github.com/apache/hadoop/blob/docker-hadoop-runner-latest/scripts/starter -.sh)包含了一个辅助脚本,这个辅助脚本可以根据环境变量触发上述的配置文件生成以及其它动作(比如初始化 SCM 和 OM 的存储、下载必要的 keytab 等)。 - -## 测试 - -`docker-compose` 的方式应当只用于本地测试,不适用于多节点集群。要在多节点集群上使用容器,我们需要像 Kubernetes 这样的容器编排系统。 - -Kubernetes 示例文件在 `kubernetes` 文件夹中。 - -*请注意*:所有提供的镜像都使用 `hadoop-runner` 作为基础镜像,这个镜像中包含了所有测试环境所需的测试工具。对于生产环境,我们推荐用户使用自己的基础镜像创建可靠的镜像。 - -### 发行包测试 - -可以通过部署任意的示例集群来测试发行包: - -```bash -cd kubernetes/examples/ozone -kubectl apply -f -``` - -注意,在这个例子中会从 Docker Hub 下载最新的镜像。 - -### 开发构建测试 - -为了测试开发中的构建,你需要创建自己的镜像并上传到自己的 docker 仓库中: - - -```bash -mvn clean install -DskipTests -Pdocker-build,docker-push -Ddocker.image=myregistry:9000/name/ozone -``` - -所有生成的 kubernetes 资源文件都会使用这个镜像 (`image:` keys are adjusted during the build) - -```bash -cd kubernetes/examples/ozone -kubectl apply -f -``` - -## 生产 - - - -你可以使用我们开发中所用的镜像作为示例: - - * [基础镜像] (https://github.com/apache/hadoop/blob/docker-hadoop-runner-jdk11/Dockerfile) - * [完整镜像] (https://github.com/apache/hadoop/blob/trunk/hadoop-ozone/dist/src/main/docker/Dockerfile) - - Dockerfile 中大部分内容都是可选的辅助功能,但如果要使用我们提供的 kubernetes 示例资源文件,你可能需要[这里](https://github.com/apache/hadoop/tree/docker-hadoop-runner-jdk11/scripts)的脚本。 - - * 两个 python 脚本将环境变量转化为实际的 hadoop XML 配置文件 - * start.sh 根据环境变量执行 python 脚本(以及其它初始化工作) - -## 容器 - -Ozone 相关的容器镜像和 Dockerfile 位置: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#容器仓库基础镜像分支标签说明
1apache/ozonehttps://github.com/apache/hadoop-docker-ozoneozone-... hadoop-runner0.3.0,0.4.0,0.4.1每个 Ozone 发行版都对应一个新标签。
2apache/hadoop-runner https://github.com/apache/hadoopdocker-hadoop-runnercentosjdk11,jdk8,latest这是用于测试 Hadoop Ozone 的基础镜像,包含了一系列可以让我们更加方便地运行 Ozone 的工具。 -
diff --git a/hadoop-hdds/docs/content/beyond/DockerCheatSheet.md b/hadoop-hdds/docs/content/beyond/DockerCheatSheet.md deleted file mode 100644 index f4f5492cf177..000000000000 --- a/hadoop-hdds/docs/content/beyond/DockerCheatSheet.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: "Docker Cheat Sheet" -date: 2017-08-10 -summary: Docker Compose cheat sheet to help you remember the common commands to control an Ozone cluster running on top of Docker. -weight: 4 ---- - - - -In the `compose` directory of the ozone distribution there are multiple pseudo-cluster setup which -can be used to run Ozone in different way (for example: secure cluster, with tracing enabled, -with prometheus etc.). - -If the usage is not document in a specific directory the default usage is the following: - -```bash -cd compose/ozone -docker-compose up -d -``` - -The data of the container is ephemeral and deleted together with the docker volumes. -```bash -docker-compose down -``` - -## Useful Docker & Ozone Commands - -If you make any modifications to ozone, the simplest way to test it is to run freon and unit tests. - -Here are the instructions to run freon in a docker-based cluster. - -{{< highlight bash >}} -docker-compose exec datanode bash -{{< /highlight >}} - -This will open a bash shell on the data node container. -Now we can execute freon for load generation. - -{{< highlight bash >}} -ozone freon randomkeys --numOfVolumes=10 --numOfBuckets 10 --numOfKeys 10 -{{< /highlight >}} - -Here is a set of helpful commands for working with docker for ozone. -To check the status of the components: - -{{< highlight bash >}} -docker-compose ps -{{< /highlight >}} - -To get logs from a specific node/service: - -{{< highlight bash >}} -docker-compose logs scm -{{< /highlight >}} - - -As the WebUI ports are forwarded to the external machine, you can check the web UI: - -* For the Storage Container Manager: http://localhost:9876 -* For the Ozone Manager: http://localhost:9874 -* For the Datanode: check the port with `docker ps` (as there could be multiple data nodes, ports are mapped to the ephemeral port range) - -You can start multiple data nodes with: - -{{< highlight bash >}} -docker-compose scale datanode=3 -{{< /highlight >}} - -You can test the commands from the [Ozone CLI]({{< ref "shell/_index.md" >}}) after opening a new bash shell in one of the containers: - -{{< highlight bash >}} -docker-compose exec datanode bash -{{< /highlight >}} diff --git a/hadoop-hdds/docs/content/beyond/DockerCheatSheet.zh.md b/hadoop-hdds/docs/content/beyond/DockerCheatSheet.zh.md deleted file mode 100644 index 0a37f9ba0714..000000000000 --- a/hadoop-hdds/docs/content/beyond/DockerCheatSheet.zh.md +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: "Docker 速查表" -date: 2017-08-10 -summary: Docker Compose 速查表帮助你记住一些操作在 Docker 上运行的 Ozone 集群的常用命令。 -weight: 4 ---- - - - -Ozone 发行包中的 `compose` 目录包含了多种伪集群配置,可以用来以多种方式运行 Ozone(比如:安全集群,启用追踪功能,启用 prometheus 等)。 - -如果目录下没有额外的使用说明,默认的用法如下: - -```bash -cd compose/ozone -docker-compose up -d -``` - -容器中的数据没有持久化,在集群关闭时会和 docker 卷一起被删除。 -```bash -docker-compose down -``` - -## Docker 和 Ozone 实用命令 - -如果你对 Ozone 做了修改,最简单的测试方法是运行 freon 和单元测试。 - -下面是在基于 docker 的集群中运行 freon 的命令。 - -{{< highlight bash >}} -docker-compose exec datanode bash -{{< /highlight >}} - -这会在数据节点的容器中打开一个 bash shell,接下来我们执行 freon 来生成负载。 - -{{< highlight bash >}} -ozone freon randomkeys --numOfVolumes=10 --numOfBuckets 10 --numOfKeys 10 -{{< /highlight >}} - -下面是一些与 docker 有关的实用命令。 -检查各组件的状态: - -{{< highlight bash >}} -docker-compose ps -{{< /highlight >}} - -获取指定节点/服务中的日志: - -{{< highlight bash >}} -docker-compose logs scm -{{< /highlight >}} - - -因为 WebUI 的端口已经被转发到外部机器,你可以查看 web UI: - -* 对于 Storage Container Manager:http://localhost:9876 -* 对于 Ozone Manager:http://localhost:9874 -* 对于 数据节点:使用 `docker ps` 查看端口(因为可能会有多个数据节点,它们的端口被映射到一个临时的端口) - -你也可以启动多个数据节点: - -{{< highlight bash >}} -docker-compose scale datanode=3 -{{< /highlight >}} - -在一个容器中打开 bash shell 后,你也可以对 [Ozone 命令行接口]({{< ref "shell/_index.zh.md" >}})中的命令进行测试。 - -{{< highlight bash >}} -docker-compose exec datanode bash -{{< /highlight >}} diff --git a/hadoop-hdds/docs/content/beyond/_index.md b/hadoop-hdds/docs/content/beyond/_index.md deleted file mode 100644 index 2a29a5810aab..000000000000 --- a/hadoop-hdds/docs/content/beyond/_index.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: "Beyond Basics" -date: "2017-10-10" -menu: main -weight: 7 - ---- - - -{{}} - Beyond Basics pages go into custom configurations of Ozone, including how - to run Ozone concurrently with an existing HDFS cluster. These pages also - take deep into how to run profilers and leverage tracing support built into - Ozone. -{{}} diff --git a/hadoop-hdds/docs/content/beyond/_index.zh.md b/hadoop-hdds/docs/content/beyond/_index.zh.md deleted file mode 100644 index b7f6775674e2..000000000000 --- a/hadoop-hdds/docs/content/beyond/_index.zh.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: "进阶" -date: "2017-10-10" -menu: main -weight: 7 - ---- - - -{{}} - 本部分介绍 Ozone 的自定义配置,包括如何将 Ozone 以并存的方式部署到已有的 HDFS 集群,以及如何运行 Ozone 内置的 profilers 和 tracing 功能。 -{{}} diff --git a/hadoop-hdds/docs/content/concept/Containers.md b/hadoop-hdds/docs/content/concept/Containers.md new file mode 100644 index 000000000000..4e46acc5a280 --- /dev/null +++ b/hadoop-hdds/docs/content/concept/Containers.md @@ -0,0 +1,47 @@ +--- +title: Containers +weight: 5 +menu: + main: + parent: Architecture +summary: Description of the Containers, the replication unit of Ozone. +--- + + + +Containers are the fundamental replication unit of Ozone/HDDS, they are managed by the Storage Container Manager (SCM) service. + +Containers are big binary units (5Gb by default) which can contain multiple blocks: + +![Containers](Containers.png) + +Blocks are local information and not managed by SCM. Therefore even if billions of small files are created in the system (which means billions of blocks are created), only of the status of the containers will be reported by the Datanodes and containers will be replicated. + +When Ozone Manager requests a new Block allocation from the SCM, SCM will identify the suitable container and generate a block id which contains `ContainerId` + `LocalId`. Client will connect to the Datanode which stores the Container, and datanode can manage the separated block based on the `LocalId`. + +## Open vs. Closed containers + +When a container is created it starts in an OPEN state. When it's full (~5GB data is written), container will be closed and becomes a CLOSED container. + +The fundamental differences between OPEN and CLOSED containers: + +OPEN | CLOSED +-----------------------------------|----------------------------------------- +mutable | immutable +replicated with RAFT (Ratis) | Replicated with async container copy +Raft leader is used to READ / WRITE | All the nodes can be used to READ diff --git a/hadoop-hdds/docs/content/concept/Containers.png b/hadoop-hdds/docs/content/concept/Containers.png new file mode 100644 index 0000000000000000000000000000000000000000..3d2df0f313bc83f5d78223c0f3fd54863c8f4ed1 GIT binary patch literal 24775 zcmeFZ2T)Yc_b({pC?Ft71VNCDNN~tG4msy2GUS|d9waLn2@*t65hO^?VHA)IA~~z% zoO60Te#85{dbL}-_1@N2?f$Dk$LV`-pFVxM`}6snbB$6{k;S`3e(Tz`Yk2Z3V+tS9~;u-@N{I5?8oG@!AH+KduDF#kXX?qJZ zcULDz@DY6P;AC!NX=7pjR~sjc6UGMPVT18%aljb3Bw#$?4=!$YPHrxPzpgj2vT*$S zLCQ|PHum-=44g7N>@e`Chk6_w3|x}nlah_2hd21!o1fR1mzx=Ul5lpmx6rjPRkU$O zU(dzI&c%zqMNv*mS&4xY4nEu4*ja$TWG&3>oY1$JyP9~h+c<&CIXKyQ*tx)0aBEj5 z2XK)LKa3s5&LzOl$IiivZuswGlHq6P{By0QiKU6F&3|-;9<7DDiPc{tRnzh@cjYyc z=4KPHgiCwa@@Z*+F`%#Uv~YE^0q*(Ztbe*jH@N#aTm1E{xrL{V8L)$sflC@V+1$p& z%GJaHTw?RL9~C%Ywu%b2+B}|47M^+n+II5%T7Ny=(F6?O&!gc^j_xKlj-a={9>iy0 zZ*D5!an+?X8`pWr3e%yydLW4^>qM4f+};V5Bl8oHEwd zCemJV+UDR(6KNX{E>1gZE)6R=dlgF+ekZszxX4A0R}t>+#Odi`$DwQOWC1sWn>x8k z+QKX}%+$54HEdM%`M8}`oZY>3ti3sGEG(tr(q8<&%DztW;BFlmH+cb0Jq0ZpPj7i% z5PLFO@=6}Q9*QtUGk14wH8(34FJE5^9%)Hu0~1L}UnNfiJ#IY%2?rH<4|@|mE;A{9 z3k`lgFSLQ4-X`X7^iX`c4SXGWVFJ#aDlP)vJ{Izd`WmW=K6;vJ<^tX-4ko;udY1M! z-kM&zX5L&r0%~f?uFfi)8oCOS?y@@E{92}-Dhdi3TDIU(aCI9wD@QQ3k`8=Mitaib ze5xL<`W~iIa2a_ob#)#~2`OiJ6;mw(M?NQ2cNJMZ2WuV|J8OGIDLpGYGYOcytF*O) zj=CGalcS5Tse!tsw~D&DrhyWlqN9nenU{~Oy1TTZw3(%Zqdkv%gZ5Eb&lO)UbeC^C{VANh>+YC@N|x%D8xGb82Z= z+2|{Cz&WL0CUz>4wi-?Xl8#(59@cP087)&;T^%iz1xgUCF^5W~O3dqiNym!^E$YIsVw7T4pZcV^SRh*x%f(HX!!bCI%+HHDXA!G%4l)%Dd~Zo!&b?}US87P z%TmkC0_H3$VXkD(?Ig#cWoHIF%%v@1$|-B=!e;^&IS5HfcRfcv4i)gh+#5LBip$Jf z&6n3s)`833Mc>riK~_T-X3A@0Ai?LGM{y}Vq!o!xlA)%Mb!zIs|Nijo58$cLG6 zdDu8>>S_DfnyUDkNb;I;$@7})fEG(FRVjB*H7-@O(VQII^4cy|QZAOfU?Ir>C+nNR z-BdNz<@l`ZZB$%1Z9zdZ7L6<+tJenCZouy zVd|{L?a8I1U?QMnuVkR$p(Q2jY#}G5=q_dNrsw7f?6t8|^0bgsvvw7bR7dY7=pV3$ z%KWnv{n?hlzyIF;xugd$$|J5_qrN6DC86d0bR+9V&E2-6cKoVmrY~q2@80w@eNi+P z7EDR@@HrcT^ss-^u~Z1(kU2+SHUPp?BRE6ZlT z#U6VhE{f4zSw;vg_1`~uiiq!TBzT>FqDD?D;izr+E38Q5-#^^6814!Z7{uBy4=8%XJoH}^^XaHDlIj>W z!=(S}UmOaWum4x`|Ds#e+%Q_OEojy7At5*M%7U+Bj?4|+=w}X!CGpB-XAJ7oa^t(` zWnAbFINj&CL`>Iv@!jdDQDA97&Nc^KQwcQMUmW^~?hQsWr=#E}4(yI>igF{Zf-Wr3-E)-Ih^62o z8T2Ei5vKOb(N7dEGZVu5Ivg0$#@ur$*!f?`KC=lehSoCU-@(${7RV(+a^VE|v)j94(u zC}5sD!Z%rr%#!o4gPXcNDx12!iaKk5^UIUph&~s%-G6*UzPkh)8O9EHQQNE*rGj^W!5x<~OL0jr)=-W6u6h6J~2J&&xZK#nn$B7%iF!(Kauh5m<(#iK8 zIi4ro+OaB&h;VaCMq;-bzQ8cPypL7?)pU2s*%|!m$S1!Pi%3dEVp|P!GYm_7&Cs|W z_|N^_*)94E+8USlRh~i^Gqnd_YGGnKDc~LdDolLD9CVurbZV8XB@a6E!A^75QP$L; zq;Fv6`595J@ag@zKxPB@)z)BG=>Ij(fcUD>hG|Tk1-)A|wn|LlT#Rc3|mSNs zj1b%8HaK;ZYNM4hsv~=D$D)`v=(gFkLq~v4K6R>C1V^!Se&LB*wrD`KnBLLmbdgc_ z?A%Q}a`Wq$w^=i+G;U?zmRw42-rKhr)yE7amRr@_(!T>baBeQFK+n^YN{Gzm{l2;% ziTe9XxkRw!&@~p16jrL#p>um35RbJ@jY^=s`tG00>J!0bR@i9@!VK@)%}Yb}w`OOL zHH&Rfk5l!|7l8FWe@5`=m2^-FQod_a!+V+)y;|=G+9Hw{`h(vXJeNm>f)Fvkd!Y|u zWOh>gh&$4IYY%Nh#^xer)M z=m@nO0P8(*x9ZMQz8C1#85T>fXK~_2{ZJb5Rf0i}^99%|lHRm@*!ZEpPE7=}+5545 z3RZE!%OL)z6PX3W!G;SwUv)EV1Z4_ zf=T)`0ah05*)hHGyW)AXSn&B<<-=qdFcl|PeLO)+D|WxG3(8_ZhW%HKAdFK*=Pi=p zuEIq0dbAzBMb;Vztn^}F7+eJd&~BjA`u$K}Bscv;AFS}t`HLomfh6c1<4l}KRKK_M z7mrqF1(>F2s1j==ybW4@_zg~gNHBGY8#0o*ygy?;92svLi}(4hLhdk_ zZP2pPXtM!kv$RHbl^v`Cr!re9IiB|LQMJD;1}*zD73gr&o1*g|O%RLV)90ampKjJ- zmzQXBV1-QvT6~=h&{N6^PKXyB*)17`LJ%M<0u>pRaRT_EIhcDw5Ar>4VjOtr11 z3WQMz>>f|>f+wh0Ff<#H`MJ{EqPlRWbF+p1t5dfYth6PW*rJ-Sv#Dc$RzkAO#p7we z$?TwWM@=PxWQPhjV?)ngJp0A*CEK?)yo-DN>#s1ks_NOrX1~Q=ex2^0Y0tJ0sh(;& zn4w3M(SQX);5O|{ag`tkW#)(@9cG`2pvIP`4dJF22CzkuERsD6{##tFdIX^S;< zsfTwwjGbm{AT{U4v%mX*1q8XvYeK;=_Z^Oiu`ITPWO>Gnp1ztr7ber^;|lBU{xicP5*-`2 zQ@6OW+0Eun&b87RM$hm1sj*%w51J_LCfNk4&rYtWy!$M({GJ|drACO^hj>m6S!P%T z3Y|4sW#-kbOwnv^83&#Y`D1X6dVb5i8o7yA1n4=`}sgt6MFtup5tLj>@YW+<#$QAC%%#kLBxW%7WMmAg? zucosWCqFk;wD>t8-iiA6r|8vwr1V%(*2YQWGxq*=d)n0@wVcaGG1Q`51i!?iYPyX< z+{EjN=W8R^^|za|A7@TWdItlrkG3QU$z1(artE|=@}$%78rByPcZp!XUTZjkaJ+Ov zfkXSu;sZcNVK-{OEh#iAXv)Pt>A3S_Tgb7D=4@-V%b4%CCezJFJdY8Kna(*W%vm4X zGoIHieR{F}Oail(VSKb<-=!Cd%Va98)vVd?q}s%NiW|?5>3H+{88_Rj8-+%c*-VmEXnex@ z1v<1}1(kkiuU|y=`qpKpu^mw%Cf9@Xo?`4Rbi_HJaz!@IN{uI}WbsQqe7Bz5#RTWc zlLM;lh5qaefO;MjurUX*Qw9$lKWR#3#>@@VmxF%mXQpLz46ssK+gCj4xH1frdLI%g zTP%H}UD+vcMTcWp0rlkM{A|Q!Vx_%4H|~~0ouAlbAnMHyw1*+C+2lH}@u0FpLc9{h zwU!ZsSfz00deb>L;WuN3Q^5&_O%~I?aFPyHdV`D#e~2TX|L|0!J8z8{5lKZT-BoD{ z3D?#weB--!@^RYa?Df8wVL5Q$-MzAq0JB&*wby}8g3Jwj4` zIA}Qhe0*v#2^_)Mg}LU9XbMn&S$IDUd7q5PsM^?Q15jROHF|yCa8wo)q_k=bP7wW0tjYwgrqor%*Ah!9? zajj~|RoZa=<5qKd&aWqkEFPyBXDG1r%~s`F}* zQGqe(B4&q%PB-RecxOC}*W5Y=zC2~dAdddD3Bv4r>l5Dp6m`@PIHt+y#Knlw%$P~) zv=ye2=ZoHPhhGgB>8jZG%ezw?UojfLx2)ag=IYT(M%G>y5^vOTsk&?OEs31}_8t}S z94(p}EiE;WLF7Qf-Namu96Y<4gzBUgzp!O%E;pTbNtk6ol?WJhAI1U_bnfH_j{b|X z9{~8IT(p3*XjSw^oLwDO&v{n6v5DsM32gWS{L5IPL5>i^s)ehK)NF^}@{A7p3Jpm` z6~7ioKCGRyWn-ju1l;~P9(fdHH!nXL z!DgCRl5dfP%3sCk@DPoZ+_TE+6;$C1iMiZ2JKGq|cGxC?8@3G#9Er_Ik?1(wR&s=f zyV3qUH2fS9aIzwM<70*ExEw0f?;#|YJW1@RO!qapWmwgXnqQSwhM&8$3wEl0w0C9m zh^8JU?kaM}AfwJSxBTZKzwZqm&Dz*^Uv7*VkBhNIHEF8yHTG7j9i>p(`i*x{IHvG* zR%&3t_sR@6hvBgvn$oi|Cli7d{)5(+ZK+jkMdn&5f*PT? zO95W3IG!DM?Ive~t}N>D1=~iZH)pe+_M>+V9iqX?Jjxqe;m%vppB|dXVk&W4}W@G3P9z>*%9C6 z`?mT1or;(LaTx=`U>C2e^Il|oR{GYXKj5=>=|yD?-H999!xYg7sFICL<->%(AZ~ux zRjRP;A9UH7+iQ+-;wIe7jzNsS^bbfDp)W#nmA`@I4mTKraONMZjYh8V|A}1N2hE9K zJI*TqKx6oi|AfY>N%E-H7J+}jCcgZCf=yv%u;Kl`*lEudPMQK>FL8D}?b<42r!@yg z#RUgOO?Kma+}*6@V{arv?0=$yEgl&5V#o)a7pDuJfjEDH*M^QOMXpcM*nc@!7LB%Z zrBNNfX{a$YgZglf%e=8Vve)|cFP{W`yS!zyztn@1W(#`Cw&UqfMk_GmeD44~3-H-Vw!%@!59gJx%^gK6+%sK}pOK zOfMuik?3vaKxR}(3)x+IUb6D$CPVytz)`sKH1iCu2<6HEkLe(`Qx?B`VwWRmoCr-s zQ(}6HIj``a=Gsvw+~qLjnuI#P5?dT%jVSC8$Owh%pU)zgJs$EycKRw4&gPx(k)_(kJTrwem*CCTM z5%*1*vfLAks%LyXM~%FaUCbSxUmi|6@t({l)hCHuo$po6Cn}CCwhAgT0(q5dd|laZ zaCa%!Vd5EI$~}xdaOorjhc%wGRf<^ITvM4RxtFV7xau3Z-PlvV4U6&~&TemujJF}e zJFUc38NOxgaX8IE=N)BbzC<9nx!HFJe$dtS2vR#urV;nRU<8;3J~TjSY5mo`~~ z9%WP_zGL7!E5*l+?!*|^wI#lvL9Arh*G)sis9FJmp zy)1~yh@mN?YEzOkfFWjgdA^w8G?8vsZ1YYg#PWO`$;V=-TO5zbL;I7g50mNy?;k-JZC8`0iHFu{Sr={1X%=j zS6QSQRgv82+dUXHo}~1eGKqXyYSuoX#MNv5An3eQnRnd0XyL~rgg1QJGQ(j#jPz;A z4bmsy8}miaw_Au{%v85?t_P-^pMarPgT^swqs1(*r}aOd;n*)E>+;?#1D+i9oc7SA z5cRK7=Io?D>=W7=yYJSM@LQ)(Ww?B`|D9}H)yZN;f78{apP@?|N!Iq^K+5P-Kfol) z`$gBJ1VvS~WDXyIc)9OX118KqNtxG0O#0r@`Z(7CWD#{8zk&uHKE__ZZfcd~k%A-7 zxy=GentA(X4Edn3z;bVCt+>7~Y6qJs^Lu0~wP_BoWe-_~-)8Mb#Dl;RO%;K*?Ew8b z;*L*ZV7xn?$G;-#Qx(YE*T+hg%>oX`twt*Cbm2AG;A7R%j2~a1**85nFl8+C?Z;P^ zE)Xx(^TK<7i%V#njco;XuIQ+aX4b=zAT^~ZhD&VuVDEmMf~A}ylFOXDE+rYD!#s;5jFeGgnCsN8xu9N)~kcC$@i9gUgc zW2cb`>5H(F2>#^D5Ht2&`QUl7kXnDga_w`o=;H}(U87+tzjcnKr6u5dk?pli9>#d> zAkmBc!3_|*l?y3`eK+x`GE-MgU$9xy$wZf2UYt$le|&9Vw!0#l1yxcxi@7iG1I#b) z(V*;ufFTC;WFnZ7#74!SiX){H-~^B=<1>x$B)Z_s|A;iz3K@Bbd*rSdp%t2R!gJCK_0ZWz&OXo+X9aVG9O^4 zxqBJdJ^;gcCzfj4IIL?tP31FBhvkTiP$V@xy_!yZxAO53HQzU**Ql;@3Z_i;x1)6J%ZeNzPRMk+v|aM z&jp^xH^E<{`o*r+s{|3<*Fk*5i!C|5(=`qljzn!I_H@IpQZ8i0A&XaZ6s%_OC)C#> z)3&!__-1NiKF<@w_(z_&^)5hfHH-ZUCH}VeS!kZ9jZujFYA~N7ejtd(EOH%Q-J5SUxxWzU@M_O@vv0W5R zpL}>3!^GqO-)*^JfUs^a zMHThLeL%Wu(y_jF!6}fg$iD?Is%BkvpxVh;XeraJ=Yg#G;-phE9{JlG$W4nrPAB`b zjcP9n$}m9pALf5!BTeLrzY%0v{x~NN*c;c&KNrou-ZZ44$m7_OZ|MULu&b7@IXH(A zG&%i;q^y@ePrl|5Eubnqz!DLj?%d*$kP4Z)d!-)?f`UZMp0{vZCM zw{KClQE?&_Q7>tykDDJ`p`2ub7V(m^6y={W;YVM#b|YD{pkNl!b9wN?D7VjWlP5yrISajx|iy&|NoRbb;X{KO$Yqu@- z$m!Aq9ACuTIPm0yUp>8hniI(n^%ay~MhL$)1?Q>JN1M=h-S@ULdb3}B zT5x!o$+_DRDH^^Q#3MPGfBVlN@VWW&l>K>M?7(}-JlAUp`-0$|Y0;C@pqKQBU2oh@ zUF=_fsJtr}RW~1OBd*%~*=jiZ zqIkeRBQ90-uE#t%Z59??_MM+$`ivNZi)OJ@?WD&3zi!He-WrGnXoUacu8th4f4A8XGB^4ic`7b>x6ms{R)jMQZ8cvt z85*hEGd(rN%y~@jN0`iP{v7o_2n!2%XdqkO+Po_gkMUklK%l{?A@`&gI8rPR9;%Z? z2egkx^f-OqIa$h{L}L|K#?mQihVjF+pe@lPLb6uzO>P`QeCRViT5^6;^>__P_^h8g zs(C!<*^@iDL3%{86=5Ho*9*Iw*k@eDB#?SEf_uLr9@N#rs2 z%INSX==Ct&%8@CK#g~@xB4>;Xk2y4tQKc6AY1Ns1QcU(B@BNkQY+LlDl)2W(F|=&8 zzoN&bopOnGNf;mYnox@5g#9&N)hgl>w{d9hj$w5mPtSW~XGGF6_L)sd0zsc7H~px- zLO>5;@K@P;oi-+`5$`4Y+A(Qf&9c7CkQ@rP#vRn=XS@FSD$UG2q#lf<1MjSN`-0+Y zY$o3+N3({bmMyb){@lUYG1kk8;{yGQSBaJg<6Zgo?$QR-aZ zsJ%<$wd^_${1WG@5wB=)4;C3u)4#xak02gt^SWv%>+kv(aZ4;YlzDEx$Gg8l@^@hX zz0*Ikc`*I`Mf^Pgv|riSPUrnoFoO?Z(xQ#xbkyHpn1=)SZkcYt_vmlr-3td`-Dg6> zi}vp?UID_J^eT5G=K0@+H2-DB|2A*Dk3*@7U6`W*NWn2cgHDR7#- zszVIROTXcTUY_r%)^FCWtrypCy5etxO=#kHoO@8oY`V^)ymIvEr00x}6(A`qwr*c; z3cY|Th44gvqJ!W`fyoRehielCKMel(+PAA}+SBlC`CZ)eXEc&7eF95kfDL8yqX3Sc zJOog}51>Ye0cp~rI#Ix*;u=p^3pR!Kdik=IMNj6nvo)Cj;j_LCVybr_w(bQZoinK5 z8keDPFb5G`@3B5MaGU?&3FPlGopl|_vNaq!HN`z6NC6(UQ}Nwlluvr@rN!noh(w2dU$Vt!7NftW%p5s zeW>^qPB8(tOfvxUUo~DFPV%0umotUq1s{CDd;rMl2&AqA%RB;vwdaI^S2_{o;`E># zTtBFs3L^=LWVX!q-;K&B);9?wu?K}^a}0H{WE@{r#4g;?3?yKujg%KKKcR&3#dJG^ ztILZbx1~M@l^}ouC%|524dAPXrsn7qw-^qdeSp}<0V^=s>l6CCQMXpSd4x+%mXM8} z#Rgk28@ZmqdDWNaXiR@}2;dyKq{CE=3olxE`czPCi}}F6M96FC)&gn@fM@nAt1mZ5 zD*zat#;%!*73#aVXu%h&@uPj_u|FVd7;&5OTfr!xOG{}}Xx53|5*u<}S-l?D(YGL( z1P{nUvTDU~n0*_gixW1Lz3&oAY&Z2_G%*T^)v72CVSGAMmyUIZVH;4f`p_w`yU!c~ z>Hx6k-m}P^Ycgr`FF$qNeBb37zaFHjcRN*@T`bMvG!f8VAsXj`Ch-*QY{$8)YYh-86Mw3<&Df3W=oVC$G7v~NMk!w@tn>J@Dsz^MeT>8zQ za~tc<6>7=rrG>|xl#JFF1SaIsoS-Pp047_*tf8%YIyZTuyyLS+IAGFJA#( zYPiPv1xj2KN$3FBa#BrXe>7L0HCo3gh){w`Q9lLCb7yyCSnAC~3?hVl=)to$>6mnq zzd)bP1IZt=1AQCTXhqRn4j|FlC%-p95KqgXwzGj^G2}Vr($3pX8yz{1IG`lj}y5D3G-r`BBzu)m* zLJOmJ2a*6=AJZ{(;5jNVh2#ze_yb=SMRlpG-=Rh5IreF37w>Snnnp4b27HaA;z%ZG zoSm9uZ`hXP+X#p4Db_I0-RMDLTwi~mr1Ho^@w5?aM_EAX8oF1a@))u}Ky82=6X&Kz z_$S&6EWKfk$`yUS^Bs;~DZ}U5-}0J`emU`4Plb{esziN;!l@zQ=*OjriYQiR#aqZC zT3i{x+g0xPU@~!&?$&$~^4*{+4TO=rG)gB$_&)S*=NEbgvq+-c@WnK<*k=WmdKU*1 zcAv+AC9A6oy|ZrI2HKAGc@4m#4d9}o*bja55N+Z}6&ey>Sj3Zg^tv<}(6raA1r?HY z%b~`-#3V3v^(4Pu1t@N36>#_)#W6O^1(1b#ty;pyfY8foUHzzb2J{@mcgmNq$KOTR zFoK#|@iG7&5*I!fybz~=2sg-cw&@L&f4NDaB^8;O1h{oO<=GF#$KyaDNI4Ro{{R#k z4o8OD>Jh%!WTIg|w=4jwcBa^4D?vdvg=BFh2O`x)Zs10>D(PnV?{<2Am%vPJ8^IyV zor>J-0A;#~3;3ODci2)7FDi?>&I`$PaukwrM|bw4!eDTkTmsU6HrthsDs@-?6flH{1U98 zEN}!&6RtsM)u=EMrBnqL?wE~ln#GAMwn}r*rH1;G9EzE^@$!CcId|`jr-46drOPf& zz4hCwU*xF5tzV?t5%Nt|RBw1Ll}~n!I7_hLBhsXesNjw7{<2A&*rm5V9vz0}U3QYY z!FAx=$`>*<5{Gutp}MlsoD}2@t#JiN!$hMyG^}~G!Kz()(i#RGR>=~q#n}XOaq~=0 zk*)IOSwL}V0>xQoJpAjOe&4ks-DISR(>>m#mSfkd+5YgWB<}8ucqK&*hatI_9|~2; zEFX{^hJ2As^zQBqrpal$bN6>V<#RW$sfBc-@`Z1kJb!LOw`)d#cj*;)&!bkpK`BTbA9`cp{_1*+-82xO*hg==h;QP^ zNV1&`Q~QHQ{oVlkgX1*L#}P<)-AT;)vD9_ki;v`Eb79G9{R9yMiC-@SfNa|0meKqS8Euw5#Sh=NDQ?811#+V!}ea%XI z>LV|*thL&=TDy6=qfp)^OJWS-B2kVz@)$af9C+<-Iz%7W$~O_ghP}&UNrETPGb*B) zr4F@~K}PHW-NqWo%>_HJFsMobQ}t=FvF_7yoiv5avq=1SG&496l$lLO5^XUZlzD6Ey$1sFqJ$tT7rWTa@l7QKsy!|MZz<$1`#2D9a ze8>Q$M%qgJAekQ0@|=yIZhNdUO_hkc%2gSXYw1`-93`%dUi{E^D*xbeS^_3;kn!8B z8T>1e^PqE6#L;K&vB-T*WwOQXe{BP}cla>CT%`=yZGl!5!4-N#r^W*`Q@kbmx{SiUL?TC&;K?`{&c9hQ z7BssP<~Pj zP?H+`tVfHpJeI$I5m8Gu4)mGy7&Y3vjj|zu0nw?np6TWp(8mgTFF-o%qzZuN*!Ive zw07JJ$Z8cxt&JJj6-VKhM!T7DB^|Jr8}`?oxe6jK1=hddJxRB`Ax5$uT@fF7mjED*ML zXam5|;!yG!@o8ueeUg$Xv8u4k+Qn{s^z9C;^tm{(z=nvYc&92 z>nD|YGTCo)fJ3U#s;jivOECPUY8=n;m$X zllqa3)W}`k52QYx)Ee9(gQ9@R%d3^!UvaAQF(LS++tXgN+Rr|+x8qU?kAXt^`96`h zJDU9Jr+^d*n#j)20n+OZseDQE&}+hmNw+WsM zPYh00&j#87)xhmBAc?z?#AX0{15!^9@gRnsP+5bzmd2ftz%i4E@kl~CTtav#0x)7H ziyu9Z-0$!ekhiMv+iDyI(!u?spCLH-DNO-qDH|-hMjpuy%@>F0WY}DTI$>vLWXbtf z(3P9yN*Bv#`vBO^H9AO$^G_PzPKMMwjfcQ$dyrQx^CKf#$^U?4_B5fTP>SMyj;sZl zJ_lgB*Rwg02Pp#y#{mvB8L|hdno;C=@2O9f9yz(J1c8$bwTFOc_mY+q)iuZ)$N4MP?IJ@D|M2V_2=(^`|}Qi+6U z0Lk~8OV|0wYrEpw+>F=obwE|^Z)CW>i{UK;If7|vdR0Ia$Dp{9M zsl!gDZ2pAugAv1sx|}ZbX(>L9O;3xcVfi}zxncMDM#Em;rgFbwJ{GRiGr;fac1jkC zlBQvVyNHJCI|4^I+jK%~FR=e}DDup&{T9olQ*!F?8TOWZKiWT0K zLo;KCzmsY-`xOpdG=iymrr3UfdK(Qb129;H9&jE~AMnBHmRIxNS&D1Eiogi1U$B-% z9;}V_FI-5m854aD`o5@>E=QvN!8Hat42+tUiM_ufrOptJdSVvCli8o(rDd~8^cyLM zRcnH!Dy0qvOqqW=0OvmhWynV%Qp&GQ`I8Ab3_{y_xk2${nlopio8F=Gt~9uFPReQ zQ`f0J;Z~`e8H9ZoZ3nF1-SKoDJjWwM^xGsyXUfbi>@<)L7u=Dg53xxHE3AP>L7fEy zPU+ZqFftP1>*y%S@pyX>L+YtF!6NzBxeuvNU7v->sWNyDsmsu;Q5ZM?hPoj8)n^7?J8T@oqE0fx}8fV&SblI5n#Q7>kG zZahry1$vMQQUp|W+!V_pU^SXCPg1_6LP^L$_oy`_;hbEDB@Bz~0UjAhs=wewNTqjF z(hx}*(pXx(_OU#wm+a0H2%L-IEAG6b!3M=u7b9|yf^3wwQ#n40SrMZRiR8|`BIp}v z-IxleC-)pw=Btz`X+S(=Kp+$XpSJ#+{lkp0pOkBY!Q;6P}P*wS|@i zjn{y@WhUa~pQT7g5UGr!ChIWq1{z(KHMP6yP{^|E5b)W|@vFdxce)mpQ|@1<`aW>t zI}=Mt8W~%^)#N9<__S~x%)H|PehLeMz(Cj_qi z8A$0@)n*zyD(xd9*zk>5*+6CjS{Q}9i|LYKhh6yyEsf>^D`!mKf;$l`1KTWa~mPnMpG#l`okjekpvHd(gSim&OP&4cB{?N{2~i0;p#)r~$wE2V&h z5aeYbC2)_Y#V%rY5f&)M91fs4)|Ci_`0ulEW+Q1#MpWV3Zx|Kg`^Wm9IbYslDf}6F zQ-CE9+1TB?=(QXFwoi2wo>P=tl+T=bXu&9*@ypb_! z5_y-(y;iX>N_>E^P?T1-ZAp|mkU!-kcG4@=*mJQ)QgOL+09HFt;Ci^x5_gSbP zDlM_AnhSOm_0!BY*kZ_UqcU|eI;Mdnmw%V-10EqvDxAvuXK+LH&&lZJyT9|OAmLbY z#e~T%q)R~l+R4#AX7{Uq$?57jM)XGSC2m46^_;&{EsR!0>h+lMjcn6FS*N#} zdcxv38~cih?3(23vR^|o^SN@ryMn=AL|~EST$8%KdrvX&m5RAwSfsQFRH|I}^i>FARl~OlH<-A*UWW4iz0YjQ)$6i{9ERp`{ZHYqcdz zc^;njnY0$qYsTlJ-xz`P&tKxd8CXnKnGxsh`)Q}t0@Y(Tt(&NnWMORT>ooA!_(zgo z^JmMN;=P4HdDir6j>}1%$X@>erj&Q(8^QxveQ`hMxNfFh-PI@+kva$6c?A&J7k!Lh zWk3*2(|fjLMfi*E;htCIX2%8-D|WfXIAElez1s+Qic1^W@P1kI)1>~jYvg+9{}-S- zewoQ%h-EZKQ*PZ2&aMsK?9`N_c^BRVWQA5`YITg33JkZ^fuc1WhMh8fvV(^Y!LhCQz@QI_4eD61e+*4V}?ml4B`i7 z$7;m>O7iZs`z4X;H0c{ks+jQZ8HevvDESE+mKd#|ROZjGS8%pE#o{3Ea;;P>wI)H1 zu#Q#O;jl^ay_CBcEkjh@yRXvCPFakL@hDnoeDyn9?%an=Zp{*P4Q(X>HFaGqDt6vD}6GvIqnpiiKTO$Lpy=xtDaG7$Xyww<^}e)KHa!@X8PD}QjEDT zU5_|Y)Ma&}NQhwTjV-$liIxIYY>-Ro9SMW?tF!Zhw8bK==D8!{@t6hEgT{& z)Nq>lOj2lvXu_)hb_#iUVVbXZffh>bF+Vkxw~!T`V9rA&Y#QD!j^;wX+1D(Y5us0@ z)F(rJhNE-;m~)F7C~U9^mw4^>7CNgxCzv)49!-x98E|tY$fHBWOFIu z-Hjrmu7bOeaJv&`SdP-V^Sav$0%XZAO@i7Cmb(RJdC1^?Rg`3R^? z9LXj*dwC7tgSP_izrCmudu?fMBVk*R5Nl@NO&3%uDncVLG60Bt2xyr88YaexHMB$L7zW_Me&P|9FfE?w(JOz0fy!Ts)Wb12sB)y>Zza%N{VY~ zCgc_k`lMyFw&Rz>;y;4)C&s0T-8wVZ^}nATyUA-*NCi-D)682qrWgO91FO%+th-OW zP3GE=$)@{wr#G5bsyQCgBlzR8J*A;1!c5R&z8s$>F_P!&!yyHI%xFaL0SmZhz8;oQ zP%3+aMJ2877v7YU#b$TvX&N2A=q^HUrl*Tqis(A4CeAu@5(~k(PgIxleRXoG5!_8I zlB$*dY291^#QlX!zN zya{(es4sZS8PAnFksJDOBQ6i2O=OyW@ht6|c^HwY(9m=2v}Fa()&~QEp3@fcm8$O< zm%@j=2LjcUP}>*ux#7zXj&U9nZyzm+E$VGXNW-^u7yc}L4G0e@ELlgYU0W52$t*zH zz`O^O#_g1PqXCg}OB1Cvkljy8R{M)$-vP2}%nu@-PZ7o{z3$frkWU&>QGSAupipg03VaRw{s zy_4><5~B}18Cq(LLAfIJcXb52OdrGNpSV(tooF+Hyw$ei$MLI3f9AgqeR3ce%h)B5 z4@Z1hWnHoOa9hy_u1hk!yep{u9Q!!y&G<^{dmClS4>cUx9)#AWz+UC4nmV4pi@&XP zARw{eZ}^dq<>7SdKN_mIEA;qy3l)6bkVUN$*?AQ3cHU7{7t}9UyF=D7TgB=BSf~bx zzUM&*BjR|g{Kgf8js1G65JA*)^h55@p`zn}k}CxyT6NH6)bG(YY3gLOnmca)sRE0e z6j;4zt2!Nn)hclfV-S>h9%0V8G4pf=58TnX-}})nQrrz)858R{WQ10{31I#`um1e+ z0}P1Y_ScLQp;D-+cDCf}bgDq?C!oYi2*Aoi*lK=0D;{*H7?43%Sd8wL;kgxa zW2b>++E@yZ65Aa?sR&wdsCI1tWqlj*@Z0y|)yXfS$U4PqM!yX+Y4{bxQX%B~- zC`Q{fUhCH9;8p$EM}VYN{mDFj1*&NUS~K0z%~8>1Wkgtl>@x+BWhg#sr9+<>ke%nY|a0S7>g%aV1Uj$5YNFrqWK8}H&o%QBpr#*1qg$^Yc@m-hg#EDyVD z@GzHrvAG9bM0X^4sjE6UDf}#_P5n75i zux^~dJ49b0jC{19lb~_*D-dy_r+HbxhC?9e)!du}nLl*JOHyRiY``=L1H$xL+@KTi zqDm*1wEK+D(mnhp|3jet)NRj$Cpe%>-9`4l!;73C99osV3{B^|if|NxXfm=>EbOH! zN{Z%ky9Iv)l+F49`8HO*^?xhv%;TY4!#>_f z3`PdUM6wJb%Z!N7!Wc!CBROR^I7QjF4#_gs8N14sC1;Y7C61-kh%94?q7p*HNR(wp zMQM4j=lt`&|G$6De4fw!JagaobKT4JyS~>w9|0^m3a3FCt&`&BtWCS7u8g%Xcr7u0 zQ{YKPg2#_&ymbg>pj;gKf^nt=*c?AVotwVC@p(j?15#zY{LtDX_Ry+?k+twCoXew& zVa*cA5kl=ZPL5OJg+BqT(;!4(4!(Bu(J=g2Rqt%w?R6vSeZnh+RCN!~+%u~iUYw{- z&fd(Hc9_mXgv3IXc93cPpq(v1Df@f{mGr?x@tD_*N#MM#B+rt5kgILB_k$I!y!EBo z1g3+iJ&foxAf>K<0{gE;gde)mGQq7U7cJ4g3MKUtB=z^!59k(FKmNYh1Vic3w~v>< z7%3`fT0LnfIy_g$ACO1A3&8>Cwi0>^=BX9#@=#MAIxO-}HOjHfJxh$f z4ecQT#t!neNC;hnk=)Q|d}SLEkTQ$J3Lb&(?aS`TuDK_6{@laszgBMhGx(9j0{=td zfTH;J=WF>-cvy}Gb_h?}kAc?3VfB#AnezHXd3|HUrG~4iwPG4xKjc@eBnU9u7_1%p zYcRVO-qBRp1MDVzc3ZHO;zHU`{G-Bp6Vukl{K{@gWQu|w+b!!%yHoUMY73(3LyTaK zPdecV0(K!ZRVtb|Of{kmPBYc2YU2#o0d`t`Mqf3)CTCXb*EGgbD)#^YDc=Q%A4tH2 zyc&=-?5EA0WD>if=SZ5ygh8cn0^IEuC5lFr@)vuh@1fv%q*EhH4f zxCk%cO-bbP!gBm(322BD4ZGv03-@+;6%8du+|<`&e+kss2>Jwa^~9w<@Z+uf<(_Xz ze-fMEHx>i)?aX@81>SxzmTA{oR))++*HWw+f~84oKi-%T(km%~tdM0T{MrrUP zHd^%dygF1%f01?hm?B?xtX7sUlfz?(=s-C z&|9CD7y2*ql^^~(+L2+fc;|Hp_ytI&lSQcMXW|k`=I32>=k9o1fbXkB?@YH_i4Wxa z;wTGKt*w!3YiiLr=Wn}C5*TjmvJGlBD#bv7iHjBWwww$SL2LC5@8jthz}H|;V=gA# zpbPc_Z}ORXVjCu^8nO*)Vu*aA&7TRXmiI7HIz3jEgCg{Nb>?dOvMwj;mz7;uWpz_< z)a5?a+o7$VRqneLerM+8{al}#`dJj@)3x(V=@}?E7~v6{1n^s2N6MFeF_*<42eW zrXCa*M`*AiPIYHieF#2hoqZr)0bh&i#}Fr zkBvZea4c`JrFh~ievvp;nb`WtB!;$*y5me%u( zx)DKe?>7gkrQLmlqp_YxIGV698q7J(IQyVFoc8kU*CU-yIV%xOfVoNy^7q4=OGU(} zXK(#=1b;jI{0A`0M`0Z&6gt^YwO-4e3r?>HqHNt* z@(xTGjf?$A98EV^Cw%&SWIVd%P=Eq+U{#saZvRjEGR+oSP1)Bq9~O=>;Zc{-9ASur zJX^ZmOycc_SFtSS6>-xeQ>d@Cr}s+%N6fkz((oo=KTESqep&&x8PnSmxF~WSZT*Z$0DAj z0Zvb=d&ZvZ#L+8_;6{kI9=C&`sEZZVYzoCaF|M%)4a@jr#svw_{jzC3cGUgQ6P2e2 zJ6?<6)UGq_X?u>l$`SaXy}>_6H`X8;89HkaHZWRIeRUJ#JdMeaoTh4u3bLJeoEzr8PV2_9q-_^^TpNK9U zTaxd1+}NeA2?CyI@5}y7?VsI(`$^M~dR7)yYKw{wH|`{Au1-Ijq~<0Y4oQ^v7w~vk zY)cEutV`!VO!%vrzV>NrSNm4oS~l|s$LUDc$b9$i=jp>KmeTc$SAJGAWP7XU_bDov ze41tJaTMQ>m;X_ZM)u=c^BUCqV!?7BR?)&=72mlZ>rB`_&>$4Q)nW5T(TCKjr->K7 zpWk&_F6c<>ZGl$LT8hm(YC)l*V@0fdUQ&^iFW}Ao+6U7j<F|I2>jMIeMlrd^st6Xb*FAf*3 zO>qhcfFZGHzJt~p;o}iV(LStO$kmr|jwcc}4-;eM%CLtO?{Y6D(5BN5bzV*D!$+L_qnvkm?EhB5CT?i9b98EKl=k7k>dM=#fk>}6#z;$d`_CPj*vsPa{ z@A%$f{PrI;Ov}vr!UF^ivJSm!|8`DrC(bl9hm+D$b9X0k;?y;Pnbe+CJ=U4Ua+${2 z_3;mg(ZB!0>sEZWP{$UME3ZrtN7$qB3>JDK1z1)#u2v9h}Y}W(H zB_KwDmY}JX196u-`mhLG#}5mbmM(w;trI2wkX^XdUr- zS$FsHIDiIoHSzT8!zo67@B^=MZ$DFp2cV#=VCe!@wC5mvOEZl1cjJ|!!d>`=xJ5ah z1(3qRdUCV1vZTC^Gb< z3eU?8zcQ&5qnC&@_+9z6aEj3Ya{KNIYZbU%6B->%uK-dUAtMpwF$nsRu1AF@R^V+U zQCAHF{}+!mT10C)#)eI$p9a8=ad}lhG6Jlv^JoCCMGo> zeqiWc`s|IIM$i~Ab;N`>v=aIzN9;~PEd4YEX0)8w$N#(GS{vl^dLRy2!MW?IH+e(H zk#g8O20TDZ3tU9ti)pBGk0kW*J_J`4IDlNU(T18HFsT5}wH#5>?ly - -Storage container manager provides multiple critical functions for the Ozone -cluster. SCM acts as the cluster manager, Certificate authority, Block -manager and the Replica manager. - -{{}} -SCM is in charge of creating an Ozone cluster. When an SCM is booted up via init command, SCM creates the cluster identity and root certificates needed for the SCM certificate authority. SCM manages the life cycle of a data node in the cluster. -{{}} - -{{}} -SCM's Ceritificate authority is in -charge of issuing identity certificates for each and every -service in the cluster. This certificate infrastructre makes -it easy to enable mTLS at network layer and also the block -token infrastructure depends on this certificate infrastructure. -{{}} - -{{}} -SCM is the block manager. SCM -allocates blocks and assigns them to data nodes. Clients -read and write these blocks directly. -{{}} - - -{{}} -SCM keeps track of all the block -replicas. If there is a loss of data node or a disk, SCM -detects it and instructs data nodes make copies of the -missing blocks to ensure high avialablity. -{{}} diff --git a/hadoop-hdds/docs/content/concept/Overview.md b/hadoop-hdds/docs/content/concept/Overview.md index 23fcda2325ae..f478734124ec 100644 --- a/hadoop-hdds/docs/content/concept/Overview.md +++ b/hadoop-hdds/docs/content/concept/Overview.md @@ -2,6 +2,11 @@ title: Overview date: "2017-10-10" weight: 1 +menu: + main: + name: "ArchitectureOverview" + title: "Overview" + parent: Architecture summary: Ozone's overview and components that make up Ozone. --- @@ -29,7 +34,7 @@ scale to billions of objects. Ozone separates namespace management and block space management; this helps ozone to scale much better. The namespace is managed by a daemon called [Ozone Manager ]({{< ref "OzoneManager.md" >}}) (OM), and block space is -managed by [Storage Container Manager]({{< ref "Hdds.md" >}}) (SCM). +managed by [Storage Container Manager]({{< ref "StorageContainerManager.md" >}}) (SCM). Ozone consists of volumes, buckets, and keys. diff --git a/hadoop-hdds/docs/content/concept/Overview.zh.md b/hadoop-hdds/docs/content/concept/Overview.zh.md index de16738a423c..042651ed1b2f 100644 --- a/hadoop-hdds/docs/content/concept/Overview.zh.md +++ b/hadoop-hdds/docs/content/concept/Overview.zh.md @@ -24,7 +24,7 @@ summary: 介绍 Ozone 的整体和各个组件。 Ozone 是一个分布式、多副本的对象存储系统,并针对大数据场景进行了专门的优化。Ozone 主要围绕可扩展性进行设计,目标是十亿数量级以上的对象存储。 -Ozone 通过对命名空间与块空间的管理进行分离,大大增加了其可扩展性,其中命名空间由 [Ozone Manager ]({{< ref "OzoneManager.zh.md" >}})(OM)管理,块空间由 [Storage Container Manager] ({{< ref "Hdds.zh.md" >}})(SCM)管理。 +Ozone 通过对命名空间与块空间的管理进行分离,大大增加了其可扩展性,其中命名空间由 [Ozone Manager ]({{< ref "OzoneManager.zh.md" >}})(OM)管理,块空间由 [Storage Container Manager] ({{< ref "StorageContainerManager.zh.md" >}})(SCM)管理。 Ozone 的管理由卷、桶和键组成。卷类似于个人主目录,只有管理员可以创建。 diff --git a/hadoop-hdds/docs/content/concept/OzoneManager-ReadPath.png b/hadoop-hdds/docs/content/concept/OzoneManager-ReadPath.png new file mode 100644 index 0000000000000000000000000000000000000000..5e68f6fc1cd6a414ac48df5c2e1d95965ecd301e GIT binary patch literal 81030 zcmb@uNzUwCk|h?>KoAP34QL*KG>8D{?1$|8zUL%CXWu&eZU@?gcA>>cBP~UjFEXpL z3Q$$0P(S>CADzzj((bWi$ISS@TZZ`W|K0!c@BZ?azx;by;+((y<$wJBSdD;D`1}>&!MNv5S}o}SesBJHXhWK#Ybm$?>Bo&HD&mS$f~$Lv zFs9(1{+z8(Z*cN|90UvM{7d~#!Gd5*OH_ijyY>V9a$}yo&iH+1Q{qZ#zVth`k7Y^D zY*eIh-ug+N?aI3Jy!`8jQ7&Quf2f`|4am5JU!Px>XH>K)RY;}k3@xHDvrn1~hN>%? zlxF&$(&%8HiuTOT(^IB}XhcX1C|0?C>}EVgm1`7}QDc57j~d6Y(!uxyaFp`h8grBM z@W5IGzqvijnOW2p!8avxlR-f!-b4w%cTL026Xk55ul-?-Q;%7a=)q1~RgnWDYC4I*7QCngGYR?K9*l1$l9>}$DI1aUyO z=Tfvvq15baH>=2{mYus;{@hx?L?2fivD=)vWLP_u47%<s51a{aEQ^$7!>?v z5$pS4%=u!>`vG7p{0lz)XJGXop@{uWH^8PJzz}HNehWakf1uUB;92r9&%4%Lh_Gw# zw635bz)4^DBZ6G(u=V6D6gERt%4n?~^lT_T^|-BY-Cj_{X!W9VkvS9K*1*U3)zR4| z#zc#)O_%tHPOmFo$15$4}Q{&-V&!sBVplhu`IP!B3wJ z<X+I6^dU^mQzQ5V?%8UtZi5~ek-g$6hhYp4Hs(5_Ht?W|e-i}3&* zr!02+NXu6gCa$ArGgi2D7uDj={gIewy`K~cm6j?T<#1<6_ znY}}8BDSP@PwOQ9wGXu@o#g3<+>h+!D30hf6DnWx<`yGd3UVcdO@2n{oGyP9+CIv$ zmZj*$Cg;5EdsOolpP zEw^v96LY(O+lwOXlz;JOn0RHL2?|^qeLCr9^p*f`O^D7^1+25htRwcrAAhHbq2=UO zchT^5QhzE&GEIVu=!}Cbz9f^ZeyXyz>DA+`_T zF<=1}oz|1eKgEnV@huu5%hqFMh_ov&vk@t#-8RW=9(Rj2S$`_Z@u`H>Zv_Eck=x5< zhc)`0MMYl9YfCI7{xgo*^?36^C@-}_z8CVXGVZ+D$IhMCOgJ33a&l>{sPyLmuWe6y z?cpyzX4L_4~{Vi8*);`r|k}23wR?`=hBxqWqYe6a4sHu1S zg0@lQXk&sd^L3Dc^7vw5N z&_Vt!`e*<8#?<$0fJH?W-&~$oX-392h0eAIMS6B(hQ6Z#xO`^*%zpFZg6@NC2|tQF zgrZ|ZDW3Gj`7mi8#Tc`AT{rtl?ntJKQn|ODY1#M_=5BQ<764meq0hr);;|cI7<0j4 zFR45mxEZhmadtrSnFIX)d5PmQJaF^^Xjo0b9e#FZ@e5-5)%T|tiuTY`G)ZCa+vds+ z!Vg9vTfE*GsgxQ?x`{rI31qSRL)fQx`15tZ9Ro33sZK$RU!AT4M= zo!3`ars9rA%r<~KGHeT;u|R}QHcrNJv2 z-*%}e%77XBfVHx zW>D>mCcmB1Xb2YF^4V!}j=$HZ$X=#p+s-3t!S9=XEGB)Dm*T{|T7z+f7(4%VpBkS? zm}kfhw$yN})L8fyn^ws4AzT>bxd}0m&?-x(1UJahoWA^H*9ljkPj>|*LnQ*yEAP`m zN^@TqZo9$8FwE!zz`5ht1>2^#X>ku(rbi)exv=8P2@?f1fl|nYpWK--O^4(0Q#iFd z{52WPR_UvAqebu|E26Z53&L=8?zdj<2jZ#lN%b zHqo??arWs$PKolJ_@Kkjweys<^0ey?>+!At8l?RWas-GHp#&*!aI8s3U0O&kk{~USl)4QwIow^na7$NXW)!4so zUU4I=^3oALcbt&G_8B|{as%LUXk=seNT(Jd(h37tOtzYbxze$;7X0hU1OQuU$ySE% zryMjm8z)+LPd3io&Lf=-{o*;-RVyY$OxHQYkG}J;NTT;P*QKz?-OTjBt&3vpJ&c=M zxJT=Si|=n_C?jo`)4#qqh?9kY$2#}EIh6Ri&nt-)WGpKkgf%@>dRRhtOjM~om4O{O zl4S57!Hy{PvvhZ&@U_gzt>VZ`pOe(?oR@lh!)D~HkJ%9;Kc9vzH*r$)kFGdhBe)cW z;*#;+Bm;gP1JkC)QXKp4TvAL$G`Q~qd$P+W@ZW$|yU zqpa?FEsk5vxRgHkK+wW?iC%$72a(R2Zl8M=G4BG7*VA;wt-<32;br#Pbh4D;D9)lw z_X)Dq`FLj;aeH<09m3?2VE&_c=a=>OF2{?9WfFTr$tZko-sTiXyYuFdagATAxl)dl ziKTAu+K|suc+5~9kb2>l*Y*UIiV9Ex(GG9jB-~hgz|YJ4aF{F%SK$!BVr}|pwS|XK5zKv^9%4D$w$lc$R^qR2JAKk zQQk-JhLb@)VXy~+iG@98@gb2ALr$4-{u2jRr25w*Pl7AwV^Y3TQ!et;oF&Kr zpQdbGa_aLs_S=_lz`xUCQI1@v=OIXD=MDZ!K}c}mY8h$pEX8dR{%T`zBmnjAuP{Bj z{Wu1#cbR(tSH=6h7J>DT=))6F@r!!s>y+cdK;9Q&cTuT}=!PQ zz{giyxJ1HZ9ke&_*9z=l{ps4l^4Lem)&igtB+@|+^8Zhq{9lck`!1xaxd(pcpOQv5 zBph`BfWF8JCxPBAT5-k_!wrIHvF^rB%saALiDHG*TS5swcLbA3_Q0F#V2ZIgg4DOG2A6E}N1s z#n73rVwE;$5MdK%>>%g$vNlF@p+j#*XZ#bLE!9taN0Pd4GNPRSOdd6PP zNddOZd_P$puN1;*eM#HO5G^p&K|o~y9`ku06+6e46@)|!%pd{LkB|#K1b$wHNZ%yc z{{=B>Cof#?4AQNueiGX}>N)56-%GeSpVT?k2yBgD+@GIbZWZ*D+jl5skv_p3)qAkl zHW4OhF>|SBIeHj-OE(0JECW*kZ5& zt3l!sx`#jOflTowEm)3(pR!WTV+Tfub9N{ji&X^5i{R3cw%2CSEG=~Vn_;*&>AIZi ziAA4FxDLdfXS#pm7y1Q^KIV@hmm7+Ls}+32olHTzp1f9f2+$dUhxPI4Just$buGwG zg0|rJaNzS3j>!zgvM=wB4WG-)Z?@m&m6~4w0mqAyOllJkTlGK?E`h2q7^VdDS2D`E zhFs?|!!@u{jSiCe@5NB^Dp)@#9;Zr)qR!?KFmC+^hLXSY_e26bF~P;k5AU_+74=3% zEs%~YixHqLDi~OoQI9f{y#&wXtVmFaQwfAHUcf!&U7OWPExWi!#qW6Sm*A2QMkB$r z|3o8kw;fG~dYV3l?NHQA#Poc<)5W;GgcVQ}#y+!mYY zpSNU$;l;MlNA)>z?(1JP_+ZVL7}``qIV`7rEMwfIUnErf{V3>ixCZG7_c`wa0y;;Av5QM_&)nsfr14^B5+H$EePvM)Kl3)G!w9S{I<2KHnQFoMoDI=fN0Xxy$_>}o6J}=!p0_(6AAHmwN2$<#8B`%0IC3~ zyGi-A&EVTq*ng0s^>&ajVY*n{3&53*0rj>a))+U?Sw*~}jJU$cadv(# z!Oiey=V`91DK;gGu3TFY}%vPlY7tlm>(aCL)zmHc;Wq zxR^R%eh5i(d;CyzD%0l7ikklEI717%sc>$f<}FgWrjG+X#^46s6NDW(3)833!;8k& zU(u4Gy>_Q(RIh&>x!^A?Vba_-*)tNcMiDsko@cwY?*op{ZyKP#oExk)=c@ge@cfIr z5^G}xD(9(p3OR@3)XU^(Ke)0y67=94E8LSFlvN*1m6Co@)|p=u?;+GZ9Fg(US4iL% zkaXZ^O;DshG;_ezR(*F?d3xbda%pH30Xu&rz*?B2)?U#LpNqMqN=^cr`5sd2`6>qZ z*1c|dtjrP_L(hpmU*RQM(v}_1IC)@k5lh-S;u0v&xm7OuDiYSovzej#C?H!5Z40Hc zUO#Muz7_cHxs4tMsZ7lHNJaAQMa}4wt=OA`5h|;i3VA@i;)Xq8MLdGIHH_{I0KuMo zfGMJ}PbcSSAqta2SVk!pZ zUmred-Qv5VPpePQ+kGMekp!$(%_LjZDB3b4E1HBxu@)Gzs-aIPndw3^k+uhO8Y@Ik{AVbJB}TL9GEXc=>_ zvBox|(RCe?mTQh8itt`5M%tS?g-^Cy!2f~+ml9P6c=+tFnZf|y85{jM2!74}(vwb< zjh9%lR?}ho)IE^m;OPNE8A`&BPz=$#nu~?>1xT3otxGOL2Vf?=5zM2uL{me3Fm=3< z6sJFhx9_}JF~Q;Y1opT$Sc+!(-0)Z0^Fttd2MO{m3|xRNw$>WrYp)b%Q&m`yiD!^8 zDzp+}t-F^KJLjQ&fy|7};Ll2!YvZ;85Z0F`>5?j=q(c{`W_+(icGJjTXNSZpTfz1X zpVwGxA_rDEex#nyB84a+RH=tC`dn-4%>05YjUp!LxgA&KLL|pIG_D#Mc}qn+bjqK= zDZA`PG~vg?_((9$V&#bYmu?N)e}*6Jz*XFYU~d%>+3&)xMuE;U;x~<5fx7fdhW)bn-N!+977}4e)FRb)?Iv3jX_4 zqtfgn^{Tr9<8dH@7befAdyQ1^P?7E5;6_bvDuAeq<;uISd|+g51RUDkZcaU2gD=ir z{&HzhUSP^};5)AkH>xO$I}GLutvU9xt0ern&RO1W@|0}5?Z_9ug1|M0A|n@ek4Hke z4_10(UxtJ2snx#pm)cW>6AOy5a49FXUgGIbg+XJy%k-7K5z+_F8Sub`YXZTo0ESYy zK)w_F06stVW8?t|AC|=HIHk>DQi=c>?-3nO!g)D6*j^n#{lZcfhGK1H_N*q#_hgFC zhce2`Q?SxY$l%crNa*IjDA?ni=P)n(F$%u=V@4OjjsMst42bL^j`Bm5<2QVd{j?__f}crT({d z7f0C4(=v=j!v6$)@Gk_})E>m|HM$(klHWNnT%Qg#tB{vRJ zNB2yu>X?n|fG7x&%`tqglL^7iqSN#*v}m!Gbd!4Xr}Ts8#isIg@f`$qk-VadRc|Je zQuwEX%O8OEnPj8Wi-9adrk&W-zi+&j>$*-pnuQLyH+?hIA)NwA%qmIBU@a)D_o=QW z`uf%j9kTh2_42VR%W`wgUj`gI?^Q7Gfgq)7x1xX;<}?2` zx`#~x-u*GNZc9~y*-ac%id0KX+Bd3GD6@L>4rw^Dehs%C60b!oC<x*dr@ zRS&GZpi(NmA>>gn7kSZBB4W{I9r_QvYkdHK*$W3d;JA_4;NYUXT~|5u_`*YFhNB`9 zKC*)1-3;_Viu?d4qqT=6SZMQ+iN=LS!q`63@o%zH5mRPxe+ut`m~I$gnsn29(`*J`2}6>-diSNMkQM;lB|V>bzY4 zmi>n5H-f^sKRWw=L8Jqb00=NAO^DmCj6{6BDclOSqf^8U^hr7WcUcl9VkR`EQud@EhfT za-;<8B;Y^6c^(FWi3pQ$A;GZMgLX1dbH6C0*8aci+30tGb15fluCVoR zVMuzbd0}&E1SCy{8FZ6g`f+OraxfT-VD)RrW3KvyvY0ITkInJ^IYeufpdq(^=wbvj!KoWukFCzq{qw@j<`g7&3ljd=Pj5c zs2usd?yY$(k3}?)r?AG46Bk@9lZiA@Q7z1d+GY3F3)PB`E89Ky z!kILlsYmtCIQH?W73Ufa2`w1Q;0;X#dBV$um$M~w0Pgg;%O1M18tDG0stZo-S917> zmxTsOz0r*vC}C-X^$(!r4{P&+2c=CJj8PO;gj3$Vj=oA0^G?3Zs%QG}+)|O_`^|Uu zQU}q2!!%!8kbnMaSN3_})uySkuW-0&jv}efy8D&+GfVY*p1$jRLk`E zqnN5!?SuT`&+kN@tJogu0Db)k#-nt2f(Kcv$Y+$9M>WpOzehh<=5>n+=_U#@oWe?` zj3Ve``rBG|%HiTHB}7Fu;G$Y59%CUJ^cK=@lEY-IzLi8uG{syOjl6P=cy9%K=I>sK zj&St5ONa>sD!EcQ6XZW7mq8%mvz>CxHyz2?bbiB@=-4+}p-()QpENlPoUEiVXM;*P zBT)3kAj-{#Z?Jyk8d`u+fK2mZ!Mp=ii3nBKQ@^U80^y!khL100Ls8S6(2eXbY~ECn z8lPkYZc2ci7-US+8&4itNem`CDnV$AG)4W1h3f+us3QEv)Q3+K5Jmz{MiOJSt@)Lw zH6Q_n->K8GyqBPSh)o;0@9|PYrls~UOSc6cMf#0STh(D9CU}bNNSN=UZXoeJeLHdW zq-9|c)J5n`!mIVQBM5&$ewt*jhXG-HJkCjCJTYUxkStY@4TWT?0uv5GK)zk$0;_|y z+hEdRFA6=banI64FP1@4cd=9MB9RaW?S-;?R%#vEm!hp%Q}4Pw5?}Ief2~=bt;v&$|$9gs!}f5&ShC4b@g;z%oBC$sshQz^2iiOUvZV*NP_e zdJNP35Ps4mOGE&h{gb{x#d4@%FnXCVG(kPUtyI^;Oc(kOZr&PL#&x)K90zL5b&Ndp zkZzla<>~eMqxh51RDZinK5g&9)g=1{A!bcr7~lpA+VVFJ zh#GY}m&QG&(jXc%0sIB9B>}nEbA5PhbH)>Bo-V1~P5v`}2MMdGOvX1LivTbLK1)7o z+&+VZj;#r^yqVRJgNT4Sk9^)$c7y3qsm;)H7xQqo92FJvkL#>OE3ZhP^flHjr@hU9p|L}th8$! z(BXjGb}1F~t(Xu9S(lAh;&q-_&o0P`5-(0uAw%Nk_;&k(0Hq^NlY**F8Y9O?lcldj zp@TMpP01GO%wUYRq51J~W4Dtfu6Ypm+}bDjuX^T4_p zgo0>i*vHCB_uI!7PZfVkm%Su$#{v~WmN1$C%VGTbJSCzjL;B2tU@RQfrimp;!7fOO z;)x&NKOnhNtMnnM&kN`b`ksRUnHvP+zjbbs3&5OGvTm~YHe-$`=$E2(Xi(o0GhxZY z3AIvc+YxRDrPV5z_yhCPh|T7R1%Gl-qvAc-YjS8rQ~62J-(pe2pSj)7=pT%B}z871(PZWFv5%6wi1LZ2y zVOa3a{j7gm`SySAoc>nsJvjF3!T!a^5j`?k#?kz`ztOia$aL+4DEFU5-*1lVpFd?l zb55l+S$#nTf<_IWgs_az%f)#k`Yp$|Jje!D20lJK=jzv{=@bf! zU6ohI>(QCtNC!wl+m~%=td?;))5rZj652tIini%`<+!U#qdh+hv7DA2W2gSIh#;A} zG{4&(Od$xI##VS9v=xA~T+k`wd83WOJ>hnrWY2UmR=%d6!0+V!P2YXk5lL?52WDEEU4uJls zl3`{~iyozgp${&J3GHWEYJ_k<)l_{b-+kz4kv~z^42u`81z5g4FuOcv05|>PKcbj4 zYusoV$Q+$}Lsd7{d_3#INX7Y$To1yYOK|EX?LYcBmAuk5PMd?2I|f(st=DQ&L-p3Q z&3AXt`O2G6Z<>1Znpis?Ny2SSBr5MB8e01=-*1$`OXu34vy&cX6mPP&CJvcPo+ zKT7_xRP%X*-Z-W$Yo#L=jL9P!6oMfYOg*6epnToeE-@exhOwaO$^#Z7q@^`5?b!Z? zAKYgadvLPecgGA69YDN@H z?kQ(z`Qd4405{n2_4p+(%&n%*ro2(S@xR3c_p48+1MSU?a)BU=VfK!dzWR8+OfxGS zdlK}}Mzeh013scgBvPTJUS{`{+qzD(@m?I(r@3aOH_+WgJ0QKil9K=Z6u%k+{1qGq zT#wPL7ATuS6Y9P5#wGdPzd?=VUHj_1+fhYx%Ih;Fk!}k zgEC}#ojC}cBxM_0A|~MM4((O|o`az^Jwho!beP!|S4_;SO+j_np3bx~-5a0}=bb;* z`iTb(?3nq!J6#}W=>F>Q0TKYMCo$o0Tx7&6G_8@SY!Q(BJeDk-Rp;NJP+9MtOW%KeZ2RbnjzwNEFyhoHSs6q ztKIO>EZ9VK;&YhQCX}FDzI8)AMz5|Wev`_U;zh@+idPZ@k>|rf)kef6@mq10+5`YZ z!mPaa4T``7ms7AOUU=jSe}mu@?D6_;=x1;Y(4BSV7pUt^Jrq&018ciO-qdVPTdd-c z9Q07B@y)>g$JD7HQ`eF}G{!H#yCtYuxxr*ylekcp`@I5?5=xMvES7u-e1owFu(L=j9=r_VZB<2jA9YHzzFaieUDVYPtHChe3a`Bs21E zO17K0yb&C9DR+&|bvtpImGk)KEX+HK&;I z`|be#7_Xe&O#-(noCM{txMpiJ2J_D`lF!j$V~Gws>l={vYoE?}?dL99P{O$5Zk#)~ zDn%&wxh#R=K()1huJJuI?NifuRu1q&6W>D^NbjgLWf59@WGI3BZ*NqRK-=2Cio*Vu z?O=-hYd^;YAsXQiebZC+EEfquE#MLV?B{Y2tC6pti?*1nK{|X!5>PyDloO1TfK3S5 z&cc$*>?#8qZoqf&abv&}lpwRE9La%{s`R?uYLBJpicWKFYUy>A9VdT6w&>L9$_yYm z3W$?YywB|gmiiOMpwhJ!$ZIeEu9lKusGZm6ckT!7uG z=;)R))G&&JZuDdCVD@-Fe-jiepwEoNr) z!5*MB=$buUKDuM4+3=yJ^wS{+l_lSN-QjcU!%&z}Ao7UaQQRG%e+tW5-voKRv#(<> zAhm&mUJh}%`zy6^pi~@4Z7q=6Zt<-3Z^@6XP_q#%PM6INsqItD&H=&&i0Aw7LB2D; z^}xIlME@qTmmlUkjc?=lmh6DDfI)~VGF|_q#*VT@#On({6BJ5CBB-U3u}46W z@#PUFRYx3JR!e>b02o)|+c(qr)6tnf z0=NLeQw6s7DAqQlPuK(2+K_>?KkoQwp^vR~S- zj-1@zn3w&`jH81|&Ac<<2D(uZ_}K9YXa7b7b@}!m|K7gui2I}zgN_@IC^7BJK_jXE z>7s-t+Zce%HUpV0ae&OWRAT?fVWw#2v^#r@(gM*`CGI%?M_JFlXNV{#miBm39jP(< z)8~LMAP&Fe6kT4(d6(WG0Fes7C-Li3>FZOx-h~E!Zs8;CDZZhAD5c|)j!3MQw^86c zL@}Wcbx2@Wu&t?n0}T8Xb9bUU4UaXk<4#5eR>M;+7%so0vfZhl;z2 zksBkMUl3k^8af;B;Z z+2M$J4XTbHP`UbK4v#o|&Y~egVxUvimt2v<`F~@C1-$&fb~7gD;u@p4wJ?ZU2A`wj zg0cE%H)CXPZSUL3zr-9%1ABif5?y06bm-t~4=A5LPft#bC7|^Z{0f38n`u1K&Jqwz zbw|<{%l7pMWcV>0$)MYU*BL|Poy`d}xyo>*pJLF2c+>;wU1)p=XuS!YcCu>You3Xd z+GbrJe|-%ow?Z3mPS@%{c-bj_aQ_JJL9Q_FI?rD#F;Q@gd3!`wG4wL17jZ4KjBO%w zOTP2VH0qI#?|}MP3T-_iQ>zcip@={dto>kTW?FYcAcYnWNySlLki>p% z6WKiSVM9zQEENxr7j3&|Ccb|JK~)6}cLm)4fh5r1;nu^-ivAl(xkVTNG)t78Z@Btl z8v2Gq9^`V&pG%3sKXN%&n4NtGQ+wQmJdI^<4XZGKqgo4a4_!W>_TU1T|Dd431On-D z)7i7ppV#{di>7fwiPjv0>)$0Y|I_19HUjryRnYEW3eW??ruiEBc?>#=)Oy^>uc!qo zjoy`2>#SWFFvC@ez}8|?(O;-3Xg=3Y$su7FS!-$-)$&`Nivaqb^=@JNuN?b~vj;I~ z!SWM))_pvmXEjjoOMg=hI{$NgKmd5vJ*{Sj!Dxa?>lc|=rqg=sQ`NPGffmpnef*U? zIL|Q3_`ss+V#^7H)q{F1K6o=k3SN=`5iJx*|FAQ^lN|>L$ic@>zM#D5Sd-U&XqiSF z4LoSZr^%q01*LRBwNUxNWYdu~))bjO<~p zH+ZgdeFS=X2C19BHwI>Y7qCAFtS(R$^k$^u_jsC^KVfX~Ss{8u0B}?=dJl-L!ct2} z2b4CZF?71)wP=~V-{yn)`rfRMnh}tI%z3-mS%#>KVH~LlX;4J+(L{s|ASCDr;f@k8 z=LOiO-|9HXVFBxX!<;nVaP1Yr@u(# zHMsDF__D3(O+2Otm^VTd1ysWk=y}IMxG0iI2)2me94H(Dc!fX?_np03jVKo~6vCO0Dhj)Q1HbQPjenF@G=Z7i53JDbb$9#>_5_*fQc}r)$-lsZxxARIOm7J{{6`)2Q2!7t_5(!Pr+kiMO8r=j%0dwffPf5eU zzael~v;Pdit3wO^CLI6dMno8VTlJ`Zz@PkoBe)3=T$+A^|B^=#p7Fvbq3Iu}O$2^H z7~$ThZ`m5ORu^`^!Rnq0K>ss*Y_SHNY@L9r=NR;Mp|&Jh`cDZs^dfptzcQ{7IAErK za7e1S73Or8vE?5uUrm5JJ~Od9TCs*oCO)Vo7KZ?yVsJuFum))QnLH{+`Bz@vSq5Uc zOpc)|bEP#UDA5MoS=~=W04`Es{P6Lu`$kczv2C?qf>yRg=*4%5pY;ADh$z;nnjV+W3`{xTXgra`KGo_`rtnFPjQPLJl-V(WrVn#$xvOp z9aja_6J;yPFPBV{ZZ3N8x!GsifUY^p8a>6=O$ha&Ch5C%+d$QHePR}nT6*wag0K$m z1dhRL2v{~%y2pQb|J>z=u0hg9g*D4q-5!qbFTdpiL#G6ILqV2WNL<-QRgHyE0uUi{ zZ`rks2QBCfQHzR|@ku)fPth>1QMcfY7TQOPLCZ}=zmN4)GR@{aK0z?FqD1cZx%sl{ zg)iic=mqXOerTE3`t@?hS>JrI$ju<2v_6SPL1RJS-NX>XSsA^wIIIvsMs;ba2k4t9 zqkCtsQ^FEJXP{2*>|?wV1vsnLTF>T|jJIclNX}iqs43Kh^(aoDx3$YZQAJc0Xhqiu z=RBG}A<9Pvgr2E*`L@A94wCi38$@2`z(Kz8iDEWOr9T5}r zi(1hcOiMq}o>}&n^R+-L3JkTX(t^0{R~!*&-jGQUGXenq zSDE_kH*RS&{)bm<5lR1jU#%s|?|k!8Z>yT&W5%8a#p)p5{f zx38ou$5lFLgVR%!LY(NQmF#dNRUB}e7mh)P6qfM?+bt1pZ!t+7>xYMonmy6_;R90G zZ=rUVjqeZQC4B!cW$yw!&zjct!G=|9(F7tWYO%X&f-Ba6zD)OYmmo~vr|;7}-96nZ zTYa7Go}TIZ)#$P+idji2WmZF00a4Jzlot{}l1+P9nt%zdVAxixd{w|xStl* zKHEh1JBvPLNTC2? zR0M>C)!G1iDFx4&e7uLRkq>+rtHR-V?-Z7pve(tFAGbTUkB^zHRUDEu9oRkLIkK3s z)CimaEb)uOW3)%>=9|gom3V%%8c9f_rUYcK@f5tss>3m3sRsM@SV4zT!&I@)_`$I6 zCY^|=xiurn*dVzCiRnG9k?x@%+P3h{g5gtUa6hJ7wo%{*pPzQ@LFw0X90{~?HKD1Q z9ubyypypUGS9tZrJhqM^!O!VKC1PpaP1q0-^PdR74UFneY|f}>@WkO|(JvPq)u#Na z2h0zJ$eLOi=fn*`TZeqcH{aMiM_x(_O27J@+ms_6iU$x${rznSf-J$yxHt? z77#8T{h`11H@&;YITttQs{uj(*5|PxZqjLNh_GQ;$ncRHJpW8rm^W3fjj`GD08;#X z|61EiaFytN_G{jFTq_9M_Z;cz4Mp(hfoXkWikgP2l|$T%M^MY~?Xig!IL5y`MRj2| zd?MCN^b#hgJzHQKST4ku%bfE7$cWn^Rc1WYZrq`Y9>^1xVkT{;Zb2aN&+j9~A%8J5W`;dIc86ptJ(N3{5ioy$o+#Ho)o|5$VmIdN56E zeI7FNN^0=+Fo6C+Z$68FgV|x! z&)djwFT^`Oj9ON~@It>>(Ju{Se^LcA+|SL@F!5J zN&SU_scpHgG##{^5F<|9YuD$-!}64&m5ZuJ3rV0k0RNOj8p4+- zXk_KQP^VRm5&WC0xg?MM?zs1v34S(?N6iyWQAy>&-k9;i4yy^by;aJ-u(@$-qIO~- zliNM+DAOKw7QCbJcGD0Z3!8>LvyRWDbftIzA1t2MEkX!HamCa>o%eh~&heE$+aY!6 z5szEIFKc5%+uH|H5Lo3{$Hb7EAS1Nl-K?^{wOzXi&Q-^(QpZ#ZAbmBZRX3FS;`jO3;E6v{EYVvf_8eD)^8LAaNAI$M__3 zs7K-2l`rr415OPrH)r9=g+MvXi;;hHW$$Bl1ZfMfA-awy9?(3jxX43Z3K+%etgNFd z+FS^78T+LJEtDp%+_$RZ!csnH{2C5*)pSjw!k$nj&G%+r^T!l-7u6v>T>)4-{SxMboU};Y|~i8Z`*zR|+GkdV#d*FiinwPdZ>d z6@6cm?B*sM_vx)E$Z*u=C^r%f5zj!l(~$kX5FXDl5QUp`0>|mT1;j&^qh5K)6=zy& z>zphj3k+q&T(P=n2thIxZT?uSRAz-5CN^ewH4ErMxU+eS-r(qr$pWc#=Y(|xrrqU< zPJC|y@l2rW4l$_O?Y$b@mnpNZa(bRbHm;*`j{I|^X{dpt`Mr1;rsSg&CIaYh1cx)R z#y742+MEk=j>N0E^2%KYcGWPDa%X6Pu@80_JLF2)bLwU#S44Y=iQ^;*7y<-=*`6U% zdEm;I7J7R;HWs3&0jTC_#UuH$inCqjSE!#1?bW*}Fanu0m=37EY*1TuEu)RJd7TO1 zj@_h}qT}(&ulzi%;8p?CCVT}=Xi$4lfZ_uA7sL6peUjtK>323bx#bzdQ}V17x${Jml)+I zz0T&4T!$sKFMsZiKz^wfI5N&QeCop8=Jdw9)eM_4Zol`e1ApI<0`W{(>` zvB2yqi097N-q)Z{tWuW9-_>xrPe67dmI;T%ChHfA4Md^#@N$@Eq}nX7W~Ko)f5#d?FamLX zp6skRXYA|g9>NP88@LNWRf5AGelDB`Om*wYRqrs%Fq>Q;9m7xb$$92y@)8_FfD%0z zm};BN$!(}c!?SGg87%DplU{ z@*!D#-!%^BT{2j@MulZTOThcGpH)Ex9!Nz5>XPAPD6*gEWYBNBN;%d%R4CJ*@KW7v;~=qW>h}ju(PtN8KfA_4a+X;y!g!s1B|PWSk(ash;D_eu<7bbvftj8!ejwez`()MG?6-sR3I@ zi&c5@Ol0TRAUzr!f$*k}9)qNvMkp6L@?EwV^0u!ax0lfP(xHg5SxG@}^rjSWozyFH zye7}soNVS5J9nC%AhJ8-N{nrQQ1Y+$@*GORg97KVNrVf>lKMHy_dLD|h~^*{gBYq2 z%_(5HQ{#wQ;)^m%6JZ3lgKG5`$GL;T(ZDT{S;LDe4DHIE%|SkBB*SI$o&k? zKhKjZw5favoviPJ(tp_|TJqE<)tO?Z7Ht`hIlKkialha0b)W9(xEvS|8FFk1I$i zZwF#}0s+ef9Tx6rmkb!k1k>!9W;DiCS9std(W2f}_FE57d#iq! zv7i|zH3S0bddsdF9Z7yy?c6M;1&jCoKkTW^h8_D!gUu(#-jcr_mO|c$_IC`dslrT0~+^6J1yRiyUu0zt3s^>5E z#vED#gtVzLQuftuX&RFK-ILBwpiCSxD`-M- z1~FvzHWpI#OAAjaV#K`{l9G=81Hn-KwswO8{uK9FBlG!q_9LPREJ5_rai0P($Z@~C zXGU^DR!GN{-lMKC3)uaBfNj~5&tPs7%!rJn{ zK!pY$N^(A(E7#w|MBNS<4(Q!V9=iO6#r^#iF3Rk!(NN6W^Eu40iZM%gy>Vfjw9r;KG)r(@nHvx+F?EMTfIarwP^mEwi2|@>Ezi`K0v>{-W#8!Y}qpPd> zfE$L9q~_rBK#RR=C;aXQ?xXRo#J#kQAF;`hbJEc!@!G?0B4}=gt8UqGk10)_%V)26 zh)ZMH5c(mDDnBu5u=zUiEkH`Q^;H-rEyx8#pTLjqRZpCrjri(a9(X%mJv)4WnrkR7 z6fJ>TBz$&mb;OiwmZA}u<9+Io$;2M_blW9*?%(>j?b8uRXl}|-a>kVo0!T6=a;$?T z&0qsMfRQ0|!Bt)KJ#iDX*lh@O1tgu?^;%{#O?e`Cs)Y5j*7I0P$!UGv%o;w&h+mb& zIyHkb>uAkW2YYrAK){`qoG~{2L~0=*8St~(QRu-EU1M-NR^XZ z*jj@N%`L)}PT+3wxf9+|)}9(Vz;zWW9XV&wt?VAlYmeifCAWvTa#WU@Nh1sc^M{~D zFbo+5Z%a8YGpO@m9eN(u$t-|2Hz_&_cwhEeKcO0dLpy-{vQG#jTw?}{=7%ErU{LVX zai$COIe?z#IE0?i9j`z-A3UH6+vHRthWuz7YIi9yKn1#kw`kD&YXXQ70F7`wV4$z# zrQhxt?_k>exEd>ciOG)M>-9`=Pn&jLH<-;B4JK6pr8FmU9cw#=4ipf6=gQ-LQ1GJ* zDp5LA4N{!I8D@?53kMWuKOiDXXYMMJyMD!1X`{i!u}@LCmRt@f5d1J zg7Kd7Lm=|rFJL{6#{eTwOonU)beXuzz%m+N#g*_BC=%pph{c(GPRYB`^#H?!{ml*H zyunYfm;>*>NQ`tiE?q>6lzGbjwMCkLdF-P9^4Kj#A5_s>lX%q?p9<*y5WoadhrrdsM)JId^%EhV;Ac#B4h)Qcb#eO01SZbUjP$Li!Eoh!NB!Ab8T&zWf!nhBMkXF@l|vE&(voqZpnez1o{XmgW3FUC>j?l<4<_ z$_WVZymuKNDW@E{B9YEh9{6T62r^&;f;r|Tp8?C72k zZXp)447q+qjr`KP`*H#d6K?hLBB_Lrv#!fr47KeQt*BS$`c<=sN;Fq;>wH#4VjLU<*d_5T=E4Z-T6C+y#og<*N zO;7BVQv@QRtsF6Y-<}F;2q~w2yxF*z;0URUPC5z?tS0Js3;w&jJJET2iuREeYt zi!u{o9_PMG^ScWhX=%DV+Z@~}XzJRP$*9&;n*!&FOutGH zLJ5cKi}!GNoitpAef`@rS7-t8GV_a(Q0);J_ErXT?Rws6;BZ#|BJpsE*QZvWYX}Kh zPI{Hl-~yJqpPAm@X3D(@&A1>{=W{^a*f;x-pjZd5Mctv%37?nUJ8noyBGcpWViujf z>c$wt;%3{MXXapn^ggPtAPI$XAu~KB z-XA9-)4?OEchh`ntHW5lWnv6%hMNB?|Do`hP zkXx_>@O6>kAz~>=!Ggu4I%Gla$m@Pa;CFlRifDJH1s6%@Ugi1OR|tc_;GEulV4r|4^h#6!WP?8tvYfD6ZY=#5&LFT`@jK@Kz4+<%u|yv zGG#z_1R5+uF=qKd@c=IsaAcXV!Q>l=^`DC6NUz6goJVi0?|%Ebz$%3hoLB^Mq}*Xi zVkNyvMMq6+JJp!vF!{-CzrXKsKVQ_T5nm*LxAq$ceEq-#I)vSsVY3$$O!m%V$pvd< zeHtb`hFuC2{r7O>49uDPe2Zh~u`!yHbsi6xbd$`&EK#SPYZe-ll~Teh5NQU${gSt` zX`$Y1Krl-T9BplT$}F_hQ`9toy7sJx&*-pKW%64Igj)*eqB0E()&6z@PXg@b56=A| zU0G9=G3&tjy_iEj5vVv&l3+lQ_EJF{dc?(f*wYS`C2?3`8eyf}4kx+BU>&_V@Jxu{ zERyn`4`QQvR<$wCZEh>{=DI220ab^x&|ZQIJP??O%09I(VF6%?y2&y5QO-xc_iTVr=6K@udmTZ-!N6NDRHISLg&~ZI+bw2TZ|wiJ=&LH^bpvw zI*b_d<8KkGhPGkhEj&bD4u=W_0aLf8-DC+(NV<7+5p{wZJ7b?X`Ncq+48?;+WQX zfM)6sYi)0@RB=zuL%B|;Vj?mMJ(^MA5wMsrWSSh;*M=$6*|F%~ zARNR-?rMF)+L{0Aqq|sYk@9qf8@fY1WHZN1z=dchNrL|! zdp`sC1rfUl^ffS!dqy(a8c2S5DJN#G*lT+N#Vd~nqGnIC)n?@--RbOM1-Cxsb*Pn_ z#fgHoyAmkoQoq3T_KVxj<8M`InEb0t$MAZKbrpAghT{2LKj*~a;0k^sXT9$Xz&!kt=&BwgxMm610ro?j zce&YH`+AoVjsVruF0~I7Sh)ak)aHfiq_}_vNTGvWJQL?6mTg_-GQJ z$@F;9h{nH`3QLjtX|-lt3xo5v&ye0Dm9uAOy;x8aN$o{Zx{@xdtZlF^CZM^Ya3I>g z!%*zP0S)O%_MQgt0UKVbB`n}^eeGTo-lN+U>5m9B^{Nu%XBbZV<$-A~I!ab`O7USr zsg!WS5u_hV(7PIQUE$Ce(8E=m0eXtF=WM`aLu>sCC@(hy-WQY^Mbeh*4SZnxUKCRe z{j3;(xPqKVsS`7AnXZQDzzvr6G`x7@JrT16+BQ$Uz@UfwmsB6v3p3jCKg^;NSZwSS9Z;{Hh zdF)Ls~7)gAy$Y9ULw|@_OQ*QU>b!IPvc4= zt%C5*ONGW1&)4!m5elN^scyFLu+NjUOgGO}sJQ)0748>u4+OA-!N3seCF@A7S_roh zFkS;p+mkTxXB5nkS4H-ysiyMv2H0Dwbm2z1Is4A-dYH=@b_LLTgZGoJ!OL}OEJpz%xnKTZ6O!tVwD9@fP z0-mC{XJzX2)r14JlUkJVnzL8PUJ#NL8BhG8w9FtuqHs?lZRP$FndArQ0PQc}=iyev zU;@a%}n%o~q*20@ES{X!f0?9uh%XC}zEKwGn$&EY)%MTGqd zj^2xw@3&ps)n)lxO%I{YCnD^at`EZ75%$#_EGv)ZyFaoKBM0;pT$_B<@%|Mt)$xpu zBY%=eavk7XJK1}o2bAW`Gu%$s4)WC^fmbNO8b`*@8nPyH(E1nF=_=<(2YCjcH$KhOMxSxgDKZ+K5t zQ68D@ya}_RdDBjDg@767%B|zxt8eRG<8^fhb92^ zne%T@h4J<6e%HG6bwBX(rZ3B#6k~ioU3JiPwl|LoASmBsaFJLr7S;opxPh_fveW>4 z!V}`3b=5b#CS6;YCJC7Q>cnb7ra;xsbtd%&Ej@r&AXcz|h+734cVq)TffWzX!RU#o zz)A+Hh?ZP{KuS*+?U@H*o0DNsslt5SCpVijZCgD_r}Hrftv!X8Dqn)*wS8 zpxu$DJA8z2)gIR-NLUi=U}%_o{8;jNo-tgRWS$r(!agX$z!vEyr#Y@u-H+t7EO?Ac z6~OdXE8WT)o)zW+=KhSuG*MosFk0xO9QqRTO9!FqME_P7O-Kd@2ee?+Su7y@-Fh$|6bo_+TqFq%8Y}QSR4@gE(_1^5Z#8L_nJ~R+f&?dq00C58 zuxs|F;^5K7Ep0iVGzM1%6C5}0lC8c77iw^vYgvu=(Qo0W2%hhiX&@qPnjzq>aWCH8aRg9= ztXM!MZhK)B2^KU#f`)+%^a|Ie269Ql1UV~#IUEq;gh10aCa{4cEVDo_cI@sNZecs? zPZfcH<`?t^(5QljG$}oBjLGk_WkTQZ;7;D#EQ}PwaDlAFGl4iqo8dxL7iN)Uxa$@? z6A;OlhTek=sIHv1JXDM93vuoYHQn}404@Tq z5IST%**=yi4^|JAJwrb}ybgqi;vx0N{I!2r1qU?L^QrXk*C%5;@X!_2q*K84BW8#8 z**Zba?*P;2yj(0PalqUgbG6W{v*e>s|I z`%1t?e4jyDmn<^-?nM>#>YY?255UJet^VWEyudNb?16Wp73BGJ#?32hu%RS7(LFd* z%TN)`p5)EqgfgWFfgf>@6cc+zI4dysAybuI9L+SfBfxGIV=1+RZ=?2jt|&S7aVnUu zM+qj80zfB`2YITM$|2i;L)Yb`+ovfn*g{F)NHTyvK)|9iHLs|To3aVwO3JNy(ojzx z3V;RqcP!!nZ3JVzV9!qF7R57!wvh@F0HV4$hw4(@Vk_`Z`C)!Nq`G?7k?D<=yQ{I?^bsGJyf9fi zT8?0?b9c|A4%;Vqj-s&7bd-s(P6Xy1GO5*mUaQLgQKk!JB7;qJEa7Nefn} z8ftEVCaaAJnLPj|!ckbj6~9To0iSUIP!c?J^kiSZqxbSEtK8`f^h#bfYU%EtjQhE` z@@~Lg0QcD#tZbhzEpoPN#XY>~RE4Q$=sX1pIr!+_&|-vtT5@^rPrz zI@J!zO;Y4b%Gv5Mf802?>t4aKW}x(mfh8o%Giqa?=ZA#H7W2?KhqbA|?T>OR5J`+! z?4dioeJrnS5@;XJikh|u*0FMkSZfdA!88yX!b?t9QZcP0aqtje*%(4VJOAh*}ngUt8k_ zjUEXopw|1ml&@=l=Vuu9EN#5uU%a+$Jsn;U^q&n$1`pzsaeEbA_#7PN zPFXF)T6JAeU*U=flCZIR2xRcU3FfpH!x48J5kc7J9G5GYD^tF|AdrFAApdUN*L+a}O>A0J|(lfF2-WbZW`A#iy?1l11iq4cHVO z7UE1p;wyBu4wx!pb&!Y>mRsHQ$Bfb4xnw~F<~SXQEwJsr@5WBBNyhKM6wMB-X9-W7McvgzQ}zq=;8D~(4xYo( z{=DAphRa+&SvJ%$`+~YE%2NS;q7FPPP1|Ka5Q}r2ca(h&8pQe*)yv$mNFHE(B+=4v z&*BLK*^~Nslhhi_ZG}ED{^JS1GD2CQWuFTS^cA&h?qJ#k#Hm^4{pt{T{n ztAXS%O*+7cc6GF;4#bNQr{@csUr_@?K%i%0j|lERuL|Z9a!#IaOz3GBf_(MXCV0@Q z_qX2O75;3kJ>EOmzP(0P39ONjaoBgc_DWOY#m~)ttqx$~-wHcP^fkcUqjEm$&y&mN z3%PGArbm2_c&3LXguxvwMf*$yPZy%B+hn#RU!(`mg}m zA;_W+jvSJMZ}}jhW&8oJ1EJU^@_~Wbg9NS7zyLFzAgTwn>hb~me18B#wO|;^#8!+* zsu`8f50qp3Gob`%P4!j##Lgu}x!Iz4f!fw!v_^#J(THOoph_h}zq%I?M)!!1v1908 zxyM&kZePu6(hk$U#6V0y-zjuFmi{Ch58X3s?_4s-*s#5z0pH1GSiD+L{1SmSiQXgb ze!tV&@v_l5LES+HitRV^LtJ5m3gPXgj6lD;aswD@sx&nl44xirP5=d5FzYt#&|^Mi znbml5dC(RyM>&5+<|HbRC{tS?A}Oya4pmO^+EXXn$5Sf#>%AA<020-KKw;A?3iXWi zdb1B$83_IGP^h$lr8uV^Qt)xL0z)4pgZL9g!P1|r_c85REx_D{#-TkNY^p^EWIl%S zcnFzR9p(v0W|l8=CP);xO9OR$(BQ!Yf60xWU8O*b&*(0z9y+iEz-xGa zpZf_*9t%Ql#17|$@>KOTg{)w^$-cOXG}4 zb~wxJoN-T#v#`rmFT~3<(~!bzPU#_dY){^w#HDJ_)|`V3(Xa$a(+Vh0q5BL1CUUPJ z4AL+>kQqu>cQxW88Q&e?qKkKmxX$;55&cbOVx}t2q+%M!dOb`PwBxfW2^rXWJuz^b z3ngDOoI+_%ERISGb}_EoLji5ft|l_RA|Y z{{vN12^AeA?n7LFzEHy4g$QeeGX&qvNvHX(MxG^dy{&mq!b6#T1zJ3?FRs>%;oRdg z+Q=T8r0tDl6ku?iSHMAAkQzaPd&Xkx%5q0WA+Eh|H#+(B)a=qzi<;J#0bL5EZJxz7 zed*9^2$XO`J3~+8nO-`PZeQG;Lx~NC_7_F~s~Xcd`xFt_6X{EGuy1AXIp?~#oA$|V z1y4C3`?|S#g$C0}MH%i@{rbGvY z&f}HmtK=~w3f<;CZ?`2f=~+14Iw;5QYt#M7_ReUs2gcnKPVk6-@gr`zNxDR_F+u=Z zho`Um$jM+v5-KJdNtbFells@UCi=vR@%}6-jxf1aVTzNt4S$pte8DhL{TzdQ0S@`M z`l0T^JA{M#(lNMJQodnU&8W%wipU=2!|KsD#l}L&>j1Pkz21p*1}Wp?Ca^n-TF4S` zrglmrh7-n`(bQ|@P?~BaZxnpAu%aI?0qP?@do-liG=Ux(UUcJ*mw8ogO!Zh8(%QWk z&gnK0{4&2iXe_q9iv_5@MfYLMAap)KMt)XH&)c{2S037$c7<#D7q$0E@<~c@b2Wl} zj9`+>-26MlIOq*>=5oV90T@^TPdD~;jKgG8((}&JfjvB(V6DK@H?EZFg+{S?!_yZL z_e+@{OSsc$N=o!TMdBIhx4nei4pQ4-I&ZGrUNJin}<~PnE0G&#JR-) z%ChIn1YU{+HlKiug90^4csO?JULY%ZH@jn5FesDtvMl-_34N+}KwhArtVCd=FI-83 zzk|Em25nF5ND&L%}?FpR94Z?r#SR)H zmsHQ6iKzzQ{Jj|%Z>QGdbPp^}Wfbe%C6M&G33Cfg2#?E;(BxXYXwc=pgQrjjfEBaA z4SAqV=9g={(Pck@`w)z;2X~);m1*U9a_V8{2EUa(yhtT+uf(tADnWc=LmBu1{WPLv z=0)lbb+*RG?+#Bv3PW{(BgKML%;rlpzp_=|V0^PaUoNy2oll39U#~A9JeP)=d1*~g zBKNR8kdecw2`D6u1t&V?7^t+j{3Ru|&6bwQvz3JR(2)x8Qwvj}7$ni~`qe_4yJ!ew zhoRvVvt%Rp)E$$#DV}R`>a|TEy)w9{KN~~4JjdZI9l3gab)5dL&kAq0Ys$wt0S0|rHx1}dZujOk! zEOkiZ)cFy@3#L`o9d-(Ohee#S25f03<9tc`Uft6*^Z8A$LKHd4I=*Og<)r)8Njl$R z-Uj(udD5&_*CWny8Vkt{Nssv?9anIMqt|Em@NSTy9q!(v<*U-ESqKf{(zNh)flX?K z@>vSLxsvAgV6EQw?uD^`HbPqfVwWsjoxyMO9m_$^=Zq<7;3i#C%rAy(AYjkJuil?s zLZ2#u>>W!AjG=uP{p?<3?ur4H6a>C<2w>lAO58?&Ky7O08@Qrl+U~>K7q(#WSXV4L{|F9`lnbENC#fT|#Q}VV4 zjh;fiP%7ZQLKAW^f~Jud@mh$>A)oj6Y6T~7ePwAyH%=rUIux5_2i^x)f<%sy6p@+> z?9w)``F5dAAWgIr-`)20|0GT<0*nB$yqQf3nen-8w`H|#+4BF z&B*5bN*{Fy4j3c5To2JpAlJaY09&=i6t!-S$GO^`u&IC);2nbMb4XD1gPj%zV7kS5 z&_;(8K@jP1`hu2x4;>g`d-1yD<8d&`!>!y}A6;hprQym1Jy-iH)b)~!*}%@Y-8hiR z9H3GgJ1I|H@6gO|%u}1~Lqf>=tUf{S(Q1!f&?1EnD~UB>cSH(qg0tibRF>*7tpP14 z9=#2Z?(Uv9>e$DmT`dvR5PYKF4;78mChSyX3+XI^UawcH$xhtGJ*mFZ=DpvL^40?`2>dK0z(uKeJnh*a!sJ-0JRyC znjZaCTqd~tZWUX@7R|cVci7GCH8e(~1r-oh%s#ZI3ge1kd(%I{4BUj-R~+Gm$r4*Y zQFuk*~HNgV|ba?SC;Ho&#v=^l@D#Vek#69SuB;Yp#gBr-z zZr79lT3ZWV>|D=*!6|{wo)q&TEtmTamB2Map(`3-*}8swg!WC4#F}2AQ!%d{Y9+m7 zThi5z-a!`};2~>!b4`JuS*u83@q*KE^hYqZ@kkWvg2%47 zBHcfj$ETa)B`@7rXPLNOYt+P7VEr&vW8se@bgwOJC8(8vO(}zM_-Me80-N>$ZB zx12E0Zs~-zdp4k+)LE097T(i7;i#-VH{r2gr9wL+PbpkEgNAzKq2G6t=-Pu#q9dSn zDb(;bXoyUNSGh=O6uLc4v8h~I!Z#I2dp|CkL;#f?u+>|KBGn~@8JC&fkeadw!iQbp z9OEow+zwrTCV9H0$0Wi*8W&`@>6w|UPe=O%Jtuo!3Lp!;B!u|>NW+imRq$lFj-J8c zDu6}{NnERW@~e7m`-4)6wiqLy|TL7$zsA?fqRM}uDPs};2Y_?>pkr% zL)viZ7l*h7+;8%UiTh_`Bko9EBG;Gs%ln=5%8Jma6xNcrr}kI&#p7R%V0 z2P!^g_~S;&85#TW=7zQXx)v_e`Z;4erB~ zhZY;?gZC6u_4;R`6P!fQA8!VGaO&|p5qnH*i$I8X@oif zC=TrHA#j~=z!S@RO*M31yyi&?Ysuj8OVmIokpq}Fj@QEQnb5qPz+NU(3ux{mG-p8y zUSPL#d3rJDygF%AeBM)t#;M<~*itV~8ua(8PE%p|GL+MEO=nkaILF}whjW0O=L#o# zKHhuop@sIU0Jb+N>W$N+d(nAMXtTnSjAe5NulDPXM12al5vKE4xr#Bbk7VwUymYm z@v!zA6_!N75oZ;csZVP-TITGKhCCV%UmP!$0^Lrsw#`{+&p5y>a#-aD^p#jl2*jAa zUL(VWefNsMS_%)`X;u}y!jJ)56zwNHXE@Y`x zH&2gF_9&209qM#H7N)L+;R%;7C}>yws*uz?;S;zKHG9>=5Vv0fjp8?;)&bQV8pgP62t1%%{}809K=#`SR2vX#1e9@JAz`t2fR_aDLl9gm_HMchEu} zhyDUJaUNR_c2*3}XBC?(js_hypStomMKJ846`Ta`2bK^>!!XI%?7&`TYNWxQ#AA5u zY-oCD5c->br-FC(@ic{=u(z&gp9$G%=VghLHQzNzz2}BETCCRC;f^z0eO=`sYN#CgfJ$!eFx6XUZy&RIWbH{2SAM12G9)=0dEf z$e-s$EYnAAd7+>{F&F5Xjra22HH1?r=HJ`)B{Le{>mU%f2Z>?hV=Fw-Y1sM4``1HMbLC#Ve4~29uG5$ z+i5<&2#ij*nb!66;Z9DI@*A`c9^lxoZn+`qj7&i5dEG=rf9<39$u9q2enNQubIFP- zg)i3{u=vt{@!n$fAHIX_VN*V zfBFx-<7>b4eeZqOr~Iey_@bZuIQunUsJ#2%dDoY} z8K3sf-~09N`2FAfr9b{B-+lPZ5Bnc_$B+D(FaF}+^VOgF(Qo;_?c2Vb`{c{VzU?!A z+24H6|NbSP_a{{2Gd})V?|jEU`-$K2{ofIP(+7X~6<+*N{)1oolYjC@-ukuQ`UTu4 z8{U_G-#_|^ANwcvcmB;k_W56J|C!(T@gKa>Ux#157@k=DmiK-<`|ZE+TbR%I`0&n8 z`Q9)1-S13)^3Cae)^C3A$3N%qo<8H_zXu-v=m)+c{R8mur~UrV__XCynGd~X|B7#T z*Dw3%JO9qt{jopvA^14Q&wcO5fAA~*+~@ZH_B++j{IundeD<%0PxQ54`m5m>KgfUR zH`|~7&i~?laJb*j!aH)G{Q1@wfB$!U=vUca{~O>K-}>(Tq07g90$%r{KSlh}U-R$3 zBU=B+7rysjJ-}c8XP@)C_7mnl`Ja05@}KywTlT(pefbY)Kem6DU;d6S`O{zev5)-f zx4vHGKjYKB?IS<@$!|W~N8kU+A3Xlk_s3uPC*Sw|?jQZ;Z~Y^`DgM8I{GU_5`^OJo z^3mmk@4I~Md*i?Hhu`=9|LzNdZ+`s~;?I5N*Zt@h{DrUlkowm@@UD-3*7tnu1E2jj zzvpYe^ef--j`|z^-ThrZ_SW}5zw_(0{JZ}AXMaQTt-m+;#`4$S|MnmM@?T4Q?pJ-` z@A?DZ_ko}O><|6w&;J{L=vV$GB>bzN@*jWR@BHq6=zZD8zv*jI{y+TcKd64}1Mm8u zKJs7xs&~{s^_}LA{Mdg6C;P+i+COwdfArJ8{ky;E`Js1y>aY45^(!&=Gk^M@eDn92 zf8k5MQ2pc7r+?$)yMF8+fB4gvAN|Ka(0}*$zw>+l_~(4xknEqd`lWAP{F^`eC%)(d z?=HUc>;L<||JVNDhrjVV+~3;$ufMGOfxq=-{XhEKfB(DQ|AB8Dzvb`#p3nRE4}O#K zEC1}j@hg{4y?)E@`}V)~Dc}2hzuo#a?)Ux0cYNNLee>V?@NfAo`49asf9X$skAh4; zeEZ?=`z!DHb!7Dyf8yQqe`9|CcYKfg*NK1l=}Z3=Kk~glb^YM6et-CtU;VyM`pduk zdw=T3f9Ui7wDHyN{vW>ji{|CCe}niR{NXSBT|f3`lV4&~@2J1(FPDG&J^$s$KJuf) zhu{5Ie%k!$|MUAkz`qNw$H$|0{*GT#|KTzI+Q0hGe&E|a`;UIJ{Nc})zUm9;?f>}4 zAAHXTe)50(`#l<(a8eLnh%lu>uv&s5&z5g>%xW6v~ zueU(+SJlZ}(FYTK5f}Uof1!ww68%*BeYTUU-3=6VSt5cpYN@_jF8|wK2r>8Rb3YIo z*>mE_`-gIe`&v*bsS6hmhywYVbkM6_MDX=2&Q}@L*vvAO>$l+3D`)p5(ksufAJ&-m ze;`8Ln<)wQxjplo71Yg8xH|aleDI4?DgqY{hf?^2$1cLWL0c@@etRs7LBF|rexD$T zZs!g7+_9%;B%O<>p7wphKc^c5*WY;vz%FB-Zjl`kn|6JLOctkFernVT3&!Teu69fE zZ+9#00dFe{6cMFE2ZP!kAFo%wtCxIXnxSOV=l?U71wtdB>q})fa@rgsA{X&efAj}N zk<<6i6^R$Rfa(~c)pKz0yKeHkDovT1Vk#TO)5E#4TSZsq^&x6%H>l{swj^ORldr&W zrNf2?5eo0G<`uMQ|E_cFS1p+LEU7Xi&dicp`(!ncJt>wRy{%>W3%D-;5+Tn(F;Twgi`hE&15o;RI^Kaa%-aN7=>Evd|?)03Ua1Po`1y#aNaQNhCh)6boZ_zy01Y z+r!-@1~2>9P~Uy6Rp@Bh&D|z*{y<#x2vT&OP1#rk?k#=dk7NDzkXf~X)rT5M@Z|$p zk4<`8k)A)2pYctw)Xf(d93^?KUYrp=RNnUdxoP75D47 zM}P8OI)jJ3Fm+ndtZO&jcPxXtG9^C^_CIUAwEI;(lrdGJMt^bedm5$Blc4Q1^+F;T zInVBQZO7b|&-qp?6NTI10#CVKlTudrXo`S~#aBVMp*^So_$0UX&u%Q>pfPEVO>!pl zrPYpp8Mb@9Qq15E+eeMy7CjMm@XOkG*oYUK5sF0m6OKt1BamhEv(g>N;*QZISjfd2EZ7SWU5JO?ty_1NeG425KEw zM3Wx|EWgPmqqFKYN&$`=t9KPIc_zSRlO4;T)m7EJ6;;9qWtw%d+1V`7q|@gXpXOam z7bF(se0^l#^Khf;IZ&!uQF_IEUFE!im!|Mm*zcj4$)$IpiX3!&JpC=cR8F;2ow1j- z{CsG9SJdxvk0@UzzH%a!}3Yj{D>BXc$@9b40^B+=iJsm^7Lg{bavl#vehuuguR+FS)bu;Jb))HaS~d5rTRljCNX*K+??dKN9jkk#c7(!hw2iSY<}1li z%@+qcb(oVI5Bc?IRE%%1?G3|K?LoO#mtIypAA@AIj#oM)mdj;vSuMyFmfb!)t~wW@ zwF%^>=$A2zH}62(g*trii?8&#^DJve)sNkYo1-|O*izZR_RWi6`GbMLB-x?*V$Bjd^3dwBuzAnAS0>#_D zRyo5lNDRRV`ZZgoHS5*pd#@=$EFx+5z-M;OA0LJVVzOPozdj~PIQmu7>k$2FE!Hf5 zpWAJ3X3v-3dg?kS+>1t=lek(X?`>&{O{d?Z!fI@a+*{Vbuxd!H-t)tGe2~bMh{F

4No?C^YlMu&pp}rvW=3WHJQe+Fw2DAZ!15`-&3=)$fo^js zx%cjJAIl8b{K{*wrw&J3CT8W!Ufq9;p_E9y+c|y+#URNC7MTtE$#IVVlg_S!hrs1) z?OHKjifXh1z`LoMJ#VHO5i)p7x(KwXO8L1Y6t?6sI>nI-V4_S<6OJL#n7b65a5x`$$h%j>!>JYjb06|{_ibznikUse8b>x9?R3CHd$pJJd{=2~J!&|&LjLp3z^dEZ~z6D_`LZgts?8^y}ii~Np7@PdWs?c1k^+u6@C zfNgMTr(5~vyG5|+^pLKwdeI0IGHWWdd*6tf(h%a~r*oTNT0g|3ywzRbS)V09wrT+h zx-w{%Yv-}3Kw_UQ-azIm^txXQue|6EG46Y#SFeI6dc4x;2`{SM4Yo)K+zI_=Lh}`ElB5_q3Wtw|0mN)O$`!O0wzPy&r2M636I);n^ z!Cr}2E3*!Yo#8;0`Ok#zEy}Y(j3s#JjUJkR;wo}3TC&9V*Z1!~Qwy7T((Uy?Fw>>$PrE+9H0_wlNo-SjwfV-Uxc-| zUK3nnGh}6cy5Vy1?jK?Qiv;8b5*| zFbpy1VV9U9hmPFSO`PF?SdHo_{q8PgTS(o&8|;X#;V5hIXdc119+EXK6ZXxT z#L>I^hrDequ->OV;vSi&<0hjx>*=7~G@fc9oBI9A{th?3E|8enTljA0mDWx0)efAO zfAk5du~g zyn2_&a`xOmcK5ZOX{EFBG~|=5#d^@5CsM-}g5T@3ig`$v$I@1H#_F9tpn-mV?hS~S zy_$CeUo*E`lXXNw9*5N=a^cVy1clZe!=mrR-ls^xB+p?Vce(C=yD>LUK#u*>p z8!-3Fjp#OktyoRAlwi{bVpfjE7Y0T3%_o$t60sn8wMl!(s%H=gZmgMe$8YSbz>x3Y zs?Rwjjg4~i=P%Y)6*ms!sh>NO1&VBZ;e2tSE)is0#gPr;4`e>y%m+;~X3YI!IonRT zboWUUl&=}{V$qNgzh?3|mV$-kPaufM$k$wr@u!|XIeRbl$~0Nsss!35DP+r1oa+!B zLl%ROFiM}kV-3hRF<&zdNRyOcJGzceVGX$n_}FFOptjvggAE0BkuCed-9;vouI63ht_%vVeBJ#@Vgh3D zBNYn3#%$Q^J$<%yr?zMdp?h?#RKcAe%hF^s)tx@DR!AY8$bG}aZ5@5Lj1uIFYifBQ zvf-2;^dua}=be;lJ3@pi_q!MoU)OvmrNDcv>76aUSu?6GCU_p?FUrXq?W{bU)nNw+mmdK%2r_>L)NvP&Nj#Ky$ei2?5 zFwM!djXb{@gn6X`5_@FOs`rTQ*sA18QD+zJT!0jF3KneJtscGMr~8!FWHVb@l0O3c zgUzwE0nMo~qbrj&wA}wGnAqdA%E)n^tC$pL)jqin0Rd_yYZi$sqBIJopj>&xP&_M= z?n$!d@SA>Ro>x$$6d%ZC#MNFX6Td5;(Egq=2Mr^ms9rhilkUU3L^6X)vz)yoiI>ms zgrdxb4=-92TIbAeSh_*+%j>6lkk~mfT{xlG%+NOTGVM@4XzzO4^qs5J0b(Sl=_NM7 z{zlHn zI@lRzJd~e78>L|4Ow^~EtCC26CexnGq3g>=TmI(TbT_R@u)$6@jDEF13@yuH6xJe8(NZyY$Rc_5v;boqi zQaGPTX4>ho?R@2BP0UO{idH*X+wJ4_ADjiqZE7ZcH7IBB!pg%Q@I>@icGGem>N0!f z0u3)dW-69T*5^-V6zffWSNnE`k6d&%w_?b8uk`Eqz4mu}`pO>h^KFgc|JnNXYz7|e``5|Ac zko94P#~*xZnfu>b;Ek^rp0yq12H#`M%!Ir@irv{%%SJDSjE2vCy>PcW*NvI`BJ$$$ zMd{umSyKa7Ls}Z%V^VRdGR44GNfO-6?KD5A1-zqQiju>HG@K(7QID|)^4DqL*FLf3 z-*Nz}YVw0rG)RQUwc+$ddkrB<-7B5v=%WwkFU;=TVn$5^goC%z(~QYe3MBs-*4QCt zm0@x8K{lH}#-S|7CrE3sUb5OPkWMgp>&1v~J8X*vz4(Zsi-=Zrp}%|Tw{`L^A|ELs z_q|qq_UkTEiImgacM1x|!Y^KFc*HL3^RnvJe-C24GCP2`xMD4n<1kBFEuw$92duwX zmmD736eg+B;$Fao^H6K!2cboIZDiE2pc^c>j@H7aYM@XXT5h#%1g7~VbvI~94x$-yteX|Z0b{_W=`1#_aA@i$~FyWQ(QdbRAh{=w?5&>EfE%c#ZDrB=#+UD~mz zd6*PLzpzs8ep(ODYb))ACOy(s*kF4Q=3810i4_}X;&^Sxb0nfbtGv{4Mn9)O72X_~ zxG~c~G4!T5l|FCr;&>kjrxD&$S|%5%IBSS|gcBb_@Tn0?A--W{&cB2g)$b*E+RQ1E z6C|Qdu1lgZHzh9w9b;yhOOt8Ww5zg3BA3fjslCeiNn6}gi)*Zf$+wp|ESW}^A5_c4 zwD3hNAGR}2hnph1GL6&|z3muR6;B{7Z_;CHw10RUYo7FfYdQQSwl}_A$`C znJK(Pl9|OJ7i3arNVlPWEM+w|K>C{gx%l4W;8#tU9&3Nw>3de>lT&S^ zrlgh9XkyL`HxAT+$98`)`>0Qa!No-!cVw-5xkF6?uDew0xgJM?!lOcc5)H;tH+fd- zsT}l($3KXF(h@R(%ODVfu%ji^NeO7Aj{I8h+dsCMs5d=h(D-?M8NI#NbxlZCS8AJ% znaj5lH=xUF!k?l{UTOS!JF+%w=?BFyeXROp9cN^2wx;A^A+m$~0G39I#Bx#TKpX4h z26jpeD5zl3zjxSD8IM)dE*VR&NSeMk60LDHcKq)9W6E6JJ6UrL7K0|Tj0;d6Bv(m$ zLjRjk0|Bx2%F!YpE3p)Fqdf4ldtn02yDn6dt(0+g%K;s_hr+}+m&yd-^LVdMRXj*- zSWv(s4@o$K#=g+X&&VRjgjz6SK*Z0@6;r=^R*Tuj{bo9_r4;M8Q|`fG^+jZRi&r7Z z^1h5@sR0Q&WvRGr;2{JF9%Jo;m`NZ#+bZ?ebWX@5-?g z^&s(xp7nedUAkOS>(qskr~mk7Sk?=YPitP3I5VmTH*Fzt1^up=2ImJQ-+;UufJxS3 zY#GBtYQqCA?$qC>K;PNa0ml>@k#z|rRSRP@jqq-&(3*5-Hf3G5v~acQFwgDJC*y<9 z%RYU|afHzmPbY*(X^L|DFIdBc!_^&ydtzu6(k8lo344$EUM2|$*?xt09Q!a8=+}3) zC2(@JI;J>!_yd7NnY{b6J?PZ-O1QN6tAB-qI>1>3I=*O`aRBI)gA_S7*BW?*YU3}+ z1av#C=J@agY-!b9ci@;&vq(H^Q2=Ul>8=F8Z*s7iFCU#qwK0bo66lH?o|xYK9f0eG zMNNgbz=#QYVOcoH@0xcMb@u7P-3r<%z9x~WAByBWr_|xy%HbUYnXa$H(b`(XUHN

LtSf2jbSLei50{^b8yqd|f$IjOc8zlV1;td4ktqzZibcm;3` z_Z5-M?_Qs5q0hEIr^7o!J0Wi*RmE8x*?u8}@LA{oLj>m34RSW8=hp(*mhC;p_DJ}n z@4$URl~FOWe;FGw7(FDj_J>!sD;OY<&U5>}ksQeX7cG=8jtdY+tf;Jw3Oql77KuNm8q%==+z%A?G``Bx5m`bG4e?b)_mKZD zB~+acYYyvQQs_PKOmRi zq3dw9LZ{9`E*c1e1D>B(yAkY`nkO~xe!Bc^_k;n)$l0`f1iGI?=)?@p6mX%t+5R(_ zX!EAZsPo<3#qMOKfl$5u;+!%;QYOHni4&sA!Fai%*_0M!4WuPxDCzb)55{g*wPFPu z5v3f_SZ1xtT#L~RIwPFYXxek)ztmd_U`dUHU&f{3#aEo-!w>gn%l3{x z4t_D2$d%BpF-2n2Z(hBP8pizhLE=a^k_H*TNP|1!VdP?>tNVv-oM)R+%G;QYC9ufG z(hWWE?qQ3}8l@plYdt0aZhYm+I1eKR8E^}Ues%i$CuIApWcUE^PG0BF;HftA$`v9f zv^Je6mx3QjtHiI~BK9tDJjKqnXjbUTK4czf|D8ZPVBUyb8zZ{(CUk7&xv*Tpb;R4m z%$h7G@t%tvzCuwX>};ow65csO2f#3PVZ#<2?w~vyFbiKoFE+U0DWE>lE_J*pk^bi! zdqehr=}v$_ED3mB0^Y1`wfoQkY5PsEZDIch|9PvHCm97saRM*e4C4YF8edY1l9!!W zIDXA+xRrm%3_r#s>-cMM`2o{_b-QFFO?@s7kOsSOkBw%0!GEUnR`|OLfX+`CHtKqk z3PlS==;sG0BC!CR#QIXXi0t}`U1gyDznmw->LxISp11HQXBR$Qnf|~9u?Fia@SYl* z&i{4Qp$IT%J=X9690(%x0kmzQhy}dMjW0F9-^Bw^ZQ(#qpKz5OF7pE5pkIlAE6gJO z6PM}#vZ@TY>VSj6C}pPNIOpxk>7&;I1_RgKnQ6n11(dmGvdi z1+-VEj@gaUAKU)x!V|F2Mmy1m9wF!t9KbaDlkK~o{gJ(NgM8BwY?{%j1?H7XFO9P8eS%-0)bQ=di^6#4f^y_C5W8w?W?$L#oqOlD8jg`4d0}L$ktl6Rqnt!_q zjCciX#D8#83UDOaO_ym^mCC-BPhshzCrBGdKN9MuP;t~fN3t-OAC7o1FUJg-y5(0p2(+eJd(z_v;2cE z42LSt{cu4dya}M=a=ovQNQJ>1f^K_%!e~-oc?JRPW>?Dr+KGRe@%Rq`Xr2o2n0VBP zJ)s!izMS72dmKa80G?%UB{2Qvj_UfLu|3l&0+iQ&Aq3#BO#pu5yG~v4D0Zb$XIwZA z6}89F5^q&9#{YSx+~nW9$gv54=y_%XaiqL<+A5zVw#fcLzX^bG$OXU` z+MRwPVEmv8UI0sn(J$$NhxRKSzLM3(-3RUZl?4Br0AVFK-4z(q0{!fTK_SSUE$m6H z$rrDuc>SUptxd%LiCo~*pRUc})X#!$Ho!3sqsDANlg97%j5dSUKB_<_o*F<4b5}&c zC#&B-6rsNj!;D16dSU!C;nmJ!6FcB4ijDxl4Wn2oD=GqyCLiFp-Y3#w5wmCq{I0Vs zkcz}VDhi2t_rE3SR0|K8E&{=x9l!ex&nEpmOtF}vg2!QQh0eIxjR8jPoZ;F|` z=yQ;Gs`!n8_yWKe?kxaJA`L)t44pOl+8-Eu=8VHcvYXDaH ztHuna2jD0|Xj4o6b0dgENHG7iv-Y^p#Xosw$$Qc zLkLW{dibE_&jg9b`~GN>2z**}K&W6J&;%I3OMn4G-}$xUPCN5>IzW{x#Oty(g59Ep z;CFY1_3Dwq)(5fLX!s7Y+eUfJlKFpx}mROPlx{p$yERaUoa>&TBnL zNY0Q^Qm9pInPGeDr@Y!MerI?hCiSl6A09+)r{{Ev8Hhyo=Gp&wfc$@~d^fzEp>3Y{ z`*io+jKF%T01n?nUo5_YyFP{pc^ollG3MF#M-k<^#T^1RvZwIji7xMlqP6G!<-E}( z0P?zx0)V_mR;JoFB}Oo_VH^JoK8J&(KzaIq-EAiP?@wE9O|J&QfIW605c0eK36LyM zKe@Wx*8)f*9N3wJO|5;>EknfHed&jXG7;Is$r=K%y@hgFj^zS-PP6~Bs1cGP!c(N#VEdr5dccXh9Z=nuyfp@IMJAx@xyDFN<%uDxF0YFjW=)8-px=-|+up&YNBrG8*kA!-==yMh zG#n1hwMvpGg|Wh7n>@hPdz2T9yd^|cfZeTT@0s%V4hvXxngVn9GO%z_owLR8uT9g# zf-Nb|B4*W>K+`A0QpCE&j6h3AFwu#3Wps=jp_3)d@p!s@v;ziu^%7{PORug3lrkdta z?^wy9Dp2jw%_K_B*Lib{=E3_@SRLiYg&!jHD_e;xAkzW%&qs8A4Cs+CIBeGz+xYr) zxY_TT^QQKZaXmhQLIhv#k6bXa`6uc@cCke<4rVO}QCG_6HqZEs{!S7jK1Z3c<*stK zaHpFCwe*2y7rWnHlCR=kZ!R`ll{I%hHS2ecoY5meadR$^8-EKZf$1gywj z!=L>gJMzBBr`UO8f~Bj)8X#*>riaAkR}EC7O&ZM{8{m=FjT@-kNS(P$9(pY6f2BJ$ zQgs4()Gl(O>vd^Hp%i{Z@mA8;ynkcemg5Xo(vphD@BaPi_iPR=xNiR)i4kx*SVu}2 z034k7gl1RgNu$%b>dZbLjw;P##O>pLLA}NAHx4X*9j=Sc+|&C0RHA;6&z{Q(JZ|t} zkz*u|g+7PL`x5}NS@E*D=shmMa5%3->&If^(jW0gx$eg;zx?l*N{I(!hDDLG_bG84k41gpc@zw5*zqB8VVR(CZNxgg>l`NjQavW2;>+*mpGFt2sqh+9WI|9W z$(23gCDK%0u^u7FX8@Sp(58I&rJ3#>cx;>9NVR>NrF{7uWkHiY0xhfT)>`@Qnl6y< znTg^0uhYgwBs#%qI}OdqRlpJUzr;qd^pL1@jjUj5HXV>9pU`(`kpCzdS0AZRNuv_0 zUrjN&2}@mg$FxMm$LdQ}BazlX}@X^GS~y2*WRJ8{oh<2SiT`!sNc+9Q%f{ z=udZUnfoTU3~P+vVOnV-ul0m{(zb+7*zK5(=zHUCYh9>m_RjuL&eQL>$mp-ldqugw zUi$Ptog*dB{|G5aC+o}a86Ssb|FFeQEFRA%2!f?*<%arZr9NM4=-mj|;WJH0Luop; zZ?i^t0}$P$=;%>U#-x)GSq*XfwPZz8;~8_PU4yUY0N6R-zPH=Kk5Ho83140 z^t|5m-q_stXl`}DZM7LM!pQW_eg(HQ?wgtP?k8^dV_|~`r{nmOXS7vDV={($UmCW? z%#yXhYZaHHLmR@k8)q$w+2fBqmc1sI7+A{_AsV*c3+!T7z*(MT_7COXee`Q#fd8uf zZ5+Y(Nf%zZKMKLD1dBJ;QuRwbq#RU?1buIVYXwfvmGKD06^(rVITrKl$=;>-{5UW5 z0#msEYK1+&znV5fo;&cWhowkD1B?{(@M7Z-mBr_?-~y9-dYqg0vv@ZPql#kP5a(-$ z@kr*?2hwjIrY1O2akxrui`Gj_F$T35$=u>zXJ&-S&YHpnl=`*hqx@4LtH>y#6LW6Q+t*kCIAUdYo;B*)1;moAn zKG;ZeF$bhIdWoS9DKc+$$7DZe>DX3{kr#+IhMBvp-Q+aQ+>qMJ>`?Ph_{zOEZ%c!I zr*hNhu6|Yd_(+OiY5zf7ieoCXL&@cC4(XmZyeWopU1wGyoKM$&#nx+TIi243Lz~aJ z(&+8!`MkDZA*T)DaQ~?J1zmubZ}-1 zq2%l3%iEk5FS*1>enjmJQtT`Rk?b+j=blJW8H~Uc_m4D$M12-hRQI)CM?+b<4!P-&3x03Npt9z zeMTkZ<;k5Hh8O?Am)X4Fe_DZ`@ksm_maR5t_N%R8XEGNttdXe-GU54PV;(D)#lSnp zb{xFg9YhAv|K87Ujweu8AKz^csVAf3W>yl+8_)Dm zpd+Iq%>_`qWaaU@?Z&-2F{0`VSI)s}FqSFWjBekUNu`+8K~YD(%rTr=d@Y)QhM}O( zkRZ?kZ`;m8QLC-vNohw_!s6KSBChYdd-6n!l4>sx+R(+!`bhRhccUu$RK@7%7Yo6* z(`=Ip=3MW`^)NqcH0EW$+DPefN^rq!T_y57;@44vj^{HIH0YE%MQ*v)miUO9f`K%G z5SFI3d7=5RYQU4?N>zZW*uBcSIptY$!d}3K zv2~U?Y|Wl2!)NQK*uN4GBG`$bYY_bg1qu+uZZ?-cEr#$ntqO>8{hn`kR_ZY`G=}y! zuHI14ZT#sM0@s_>cT-!UK~0JF^YtTeJYwF)}i)(57xOxCp&X=(Bg7Zy)@9z5%uryT<|Y*&-x%fDepptBSS72Wi23%T1_Xsd>^~dWN&Sd+&J0Vrun(Z9dUn zA*8`u7sZXaBBX?SBCqJ;=aNI%nr;y7%4%%oYU({C^B?vK2hJ#W%@j`Eq+jLTpoT5O zwe7F;tR6p#-Bmb-0NRlvdyz!%v3FO8Mu{CU ztTZVBZ^N4`y&keN=q+VI${8>aHIG?jJ5Vp)@t ze~kTOw!|+AA)cC5h|X@A9Z?0FVZJsyjmdOKH~oTMk!{zJj6Qi>#5?UZ!(HV=O1Ae^tgi6m7-sCdu|5?TY>(E4Ajwy_d>G2=DJjsh=Y zx+c&0oQ*Xti7@`WH{%do_EC0^m8k!@=DSp?2QV4w`Rie>)wlNg%v9+OS3RH#TI(_# zb)9s@hL_%R-5Yhmw&sdg*=_K9HtcQfq8xZ@z3FpmT;Y5&ZddkkgjSbF)8D7;!=hBb zE;doVvl^sZrmyIKO3(VNCC_bAfRXI1CHQkHRz}4!@HgRhk}d8ZAsWpdl3^8GSyfy?fMkYnMk$<3%meEZupsa4N|IXx{%W{IHsG2xZ zWO3}a!{=6GPYvonP7?Kzq&SBaIDf<3+K!G*FC*ce2h|&5A~K0@b)3|_M= zs?k#I9PwLrfuhmss!vPtM%8aoHXx+p@L##sMpd)yyFml;R`hK~hFrM2JgL@$OV;9A zwa#X_VjC;k6RG{4=`%flh&Q%mPu$-#Z`m4R%TPUk>5iQ#p^CGiz&)9>T75gR4A!2l zG(#ggw|Me3-;PI590stx|9eq8qoODosimaiz6vri-QKfO7fqE#rmM;;bG^vVVv{q3&j;}29h3Y2fg*ezCj52ZF zt+%#w!FP-4+Gd#Pma|9?%Wc0ncw42omvHlF@mm&!4LN3|`i}qfP-FC(o*4DG1YYk2-$;vj-w~RfXxi?9nTaMP}s?1^q*A=EaN_+XbWC~87 z0m*R{LcZ}T^MuKFC6}#D_93-Wf~9WaU7+~y%uAoX3c?>8)QZwonxuR#`R&aMu|eZ2 z$Jw3-vH`R<|A_t@z2ye@ufoY=|CN#r8xN&X zKbYHMv!?nx^dduBwSM)KcNV=gL$AD~KloQV*}#VvUgSjk-jk1zxdK15!y>{Vm8c+d z5ULZRlm+3`T?(VLPjd3lI!(_ZIM&d5DGoJtkK-D45!)1$;CY5@>27)nn@G+2ROfdr z;tFNioC&``=a7vy)aG*A6k!QWjhW-jr-cr=nWhE%#~}iQQxA^O?%TicLMPKICU~RH+CZ13hOE&7&!D@rpPI?c;qYmo7} zMsY}~+9-uM{abtPRaR?|cf3*QG{H@+X(Wnn0BMdTbZ4P}Hj|rGQW9}JJo{D z9#dM8g(OZF%1__=RtJ|i4!KdU-piDQLw^7LIOBeO>N5h8^H+8PF)%TG-5X1*lu}e( zYWR2p5U2l-&b3&VMlIN=J?@?cAtKUIr}071&t{Oa?Yh@nTq`p9y%~ zf?EMVob!_Su1ueQ8Iskv07j$F&>_4)13=qA2pbX06bt~$fw=_%f0>W;l9hkISRDKO z<1b)~`Vs~Zz1QB=gyq`w+3)p7GpoU1*K1hD)Z_g7cMRywnZWao8mZsF4mYSDU}RCr zX3Ag`yb_G>V|*j`FZOn$2-lOz{dWbCYpnkIjDWp-7Od%`#GJ>V_&?+n7YSYtjuL^k zYBAsg-_9lT3Gv08z6Hx)H_U+h6V`O?s^Jqqz_u9~r)U31k?h03s>c`LX;dJs%$moX z?e9Jm0q+2Vr}WsO2e9RUG`3VFWEiste7DL7G~_uX=Lz9Qf(n~ueN6iA2Fh~a1{C&e z5VpbUC>Duy7%Ynpc7p-zhIy}6FJ5@ZN<%o$|B*BEFoy9`p8PujAuJPW)8xN~Bvj!7 zn{u1?;P@;9`Ir~viY@{cU9;!v3X-dQGgkuO5GDYo%1yT9L($3Yxg|hcAjy7teafx# zmkU`90LBwLf;M#l2H=r^GaGBl72!7Fk2Q0pf5$q}wWXIyC;R}PQfpS~?=HU`I*`P0z?+Ud{&Dj~LjFJ4d@{3Eo03?10&GfQ8F>Gn_83SKi2+c5U3zFx9rj2T$66G? zRA4;|zn-gH8Ozh_mA0D6!_~cFIXZcaTjI#S7cBqkf~cm&C*9~HOVKjRIFC%+28ide zfj+qmr4C>WHOcr>BX>x7&t^*&ze-xIOA zB@wyw=})#+W=}l*Pgc860hD>$gGB^JROK5_vDo-$Aysz3GoiB)6d2~Z89tSzzMNnu9yNO{hldU5_--e{@VD*1fp*@9ye;zY|LN%lMDpbwhoDWAANH|-fE2+*qJ(2-WkZo` z(2uUn=+(omWSQ?;cKD(`H!*YVa=8|D!l-0S&4X4?4;zjbHjRY-D_&992yzAP!_WT= zPgcyd?%916a37{_3ymj5zIVhto80upTD2h}|E7bmVz${mHiSPwm%=AA+vMB+VBA#k zS^pI}xEB6QJl;+ZAHl#IWfyVMJr)l3OGG<4GV}RJy1AIJ?G)FQY zjBye+6P2XlQW-fcMo58tFi>wej?54DLriHedWE!rqvaoFz;u^NC2*^Du>m%?(#_?_haTv>99v$xE z@IT(OURbwaQt>da{A^iS&Iu9HJ9(E|`o)|3NZ&t5@tZgGyRSbr*A8c+J-(~CQXf5E zlAD)WzAM&`lt^1_I3*v;Y7Xu(pEB)R5)AXK%zhRMbJHn<7!>LYK9Ih`ys{IG$N2X2 zLGyiUWJUMlDZ>0jBZU0593=tQ9FHU-t0;SJMwgyABlBYvJhY-~f4&Nj$3{H_6%StF zz1P)2`W+uZBA{8wf1&_*^qP!o6P>UDe02v_q4uK#7M`*gDn3u)2(X zu``JPFp>S=lyfi%u!xul1>FxKfb22P>p$l1aHYWBfU#;n8dN~K;E7erGZtFeJ0#TB zr9j7!<^27V8TMEG<$*_+fn(K|Jf=z%glw5}bZt7IfAUK0SH67L7YiSxOkJq!38|F;)}`1X`EDVTUdIXudleMXr^tmD3+Yj4sAypTEwha3ua?R`G!Gt zim?^uD;c$8 zpvTJ0waIeYE2_opH_4iJU;_%Z=rUHjgX>Z$ReB7Qjcx#9{JECh$*MTWw7|o&sep5qtd&Pdi@z4)yGs z{e3GJlXgx@lkVub?lM;l{j@=8T(e@Vkbct+DYA!th3Ov%yR{V_lydnQg_ehpp-weD z*#3rHjo00G!&;J6l}HA7t-UR@F1o9iC$RR&MP=i6s=4rO7ds*1ox=$MY6I z$uX1Y4G2;K&?oL72Wl3na^;x^XqK>@1s=RXLxup+u8wl6V`ez5Nbdy=J0&TINoDm^ zU)r&2eCciGeDyMjnB78uW?t+>^QEzM5pNPa)ejJ;|#mjHdn@&OIh2v*t(3VP95!ww9q1wb|kJZpgb76;iU16 zfS1q%X!#f1Rso*pTe9VP6Hs)e%y!t86I-KK-g$M?E%tqZ;oHzup~i5t@060@ueNnU zI_xD(-6mo?$7uE4jZ*m+$=Rh*M_1$qXvur#d`pl|FR>bO{Ze+deVe1<9OMcq%4dVT zV+*dsK)GX@CAjXTsTUChVobbWcZzL@bb|Oq;wqeFSkdwZ{AE#%I(;O2r_|KZ5?b_h zUm2tzzSJjH7J4r)j2b!{E(kI{Ueq!aBHHEw>v`8RuhYjl)Qo|Yqtkf2=;bsAQ6W7@ zAAx-L_in)sD_#`6xn%((`h=)2(5wGwigO3@k#vKohP~`WGFZB2#g5A`M@Mw+>$RO} zY z#4_Iv-+Y?*P~JuVMe|*VW#$``Jd1N-I=Qx)fA;r6y_>;XY;VKSN|x$$&R(m|1ER^e zI)|FboBJ)bq_Sk9^iOm>daUp4hli&5z1JJ#|HKg0;h24lzbtjv|NTM-SGUZq$%y$# z-l=Svnq3f+r6Ak9w_Y<)P|L_cO%b66fYkS||9hJ>On zaB9h_4QSH7*OnY>#*4bM-V~sO`bOrMN%!<^#^S0W(95GtpNkS>#}V;G!8b0jp0B5d zOkytU2vJ6ddTj$mnzaXxCLdE#$=)D-DU2#vjS8@UUOCpe>S8+owa6GaM`J5-ILaiE3;+a4?c^Jc(JSlI?#P8i?y zFVi>Zwy?Muii!%9y$JUbkI}k=No@v*Rrjq*Yd&L3BZxa7|$F% zKR_G0PZ?UIFDzfba|R9U&?%Qr{Wz%UZc_`2qI~FWFtL zb)vgSBg;Ktz8aMQDTUoA?gfucu2HAo&Pu1hxCmt9o(Oz?Lz{#a0pG#=lKnilK~mVf zUCj3`M8IXs_;PQy*=Y?vTg<;Q%MTw@?D>J~&BvbsKst4SozCK*<4qei8xlX9N2p2?~HTNkCad2NPUaseOXn!3s)GSwe{%u{BvJPrcRt63pb_M|?oOdY zfYkp8`BXNO4M;4y6f!n>gsATwY5n34kE+&V;7LEf3b^E2@5SL{i24cw>YHyA!8d5Y z0ZjrZoIvz_xznF&X7_S`9#&`q=wdjKjEp=)_|X?$+Ts#7u&HZWH~TeuGobzer~i;5 zW%vC~Uw#c-Gtug13w*rmmIgvYMa>PMV8zcYAwI(npAus>>#4%MbD-b`AVs273U2}= zRluBmOV5uCaH286{H_HsjK2d;t zLzzE6Kc9|^J*Vl*%PYeYW9MOFE7F*QS^cCCTbcS_Rh6 zq7tLwzwQF+SyTf#(m;Oh$R6w0Z9sH1<1K`#r;C zGxI&{2@s%QvV*mrKxln!EC<+VxVY)6rakB#q#uazPl3-oSoMh&l^^`h>$#@=QKTHk0XY0c_sRQC zKt+v)_#5FPl*&As`|dff<~^4{J}pM}aUvvmS(q`Pd?A!r=gh8#`uu)F+OAsgE=|O{ z(O|#U_ud^z7m#}OH9K!4m2t?#k)!jX+5_bPw1```Ml+e60NvA9cpTkJR8K(Jt1%Az ziLF(itumqlPW2M)UYCTzxau8#kEKgBz^JO8Y9bN_p}dOZ0N??Ea-&Wt2ei&|oXLY0 zE+VB5Fn{1^!D+D zqH#@F>eKJK0k*>!cItzP1=(wt2Af$zr&_AF1U#EJXr^{qf^I#4DowTI11(bY3_p+_ zjV_$7BHnZWNX#^-Ix6C^17{MepMY8pOtEMuoMsSpr}xa%nY3VEWANa7wjigyEGkPs0*|HcTiA;6vpBqj0t%!E)8PUsI4uUi z%w4p0)+@#@LN5J)6G<&E=V~_lB%o&de`tH_sH)m-eAQDnacS(1H zgmft_64Ko*4T}~~x>FY2wdfSi%lGX)&e><5KkmKb+~IKe&vdRi-}$`pJimCDpU8ZD z8jOT!9XZmOSnD`I!S<@=4u9prQ^b^bjH2U90*g58E4}nYqVLT`f!LG> z6u!mR`+pj_9$ZSmTno&!+JmvH7RCh74h4_&nOS;c)YWK@UDvwqs-x<8u3jej8I=Np z487ubwrn^XqP%@X+FFw{o8YSCX5mCE1Q;?X>`k{8@tk%EOMX)ntrM8! zD2OLLiF|sl(KMp!9dRx})1Q&+k0I2QU z+p+9w+6P;#1(MNoW5u%l>=(fvt8*Db9?DummS3GsMESB^#SB0JaRf{5KHD#x7#^3{ z7;bu;-0L;x$<>fvuGph`gjdE8v6~&74>*RbtE%-qS^>yS#}*8$aw)WiFbmTh*Wh78 zf}pz}`GU3we;P%~3xcE6+3x=et1)HK6~xGKLMxReGLn_G_px}f$+Um{0^$PeIR z#Iz=$jD07%Jk{WsmbBLnklaxtYrFV~GN+n`(S?%e{si_v<}P{jb9i;vj*|~37e)Pd znhxNcaQ&A5Lx%!sPhjY}CESdiXW8G;)CKR=P}q?nE(LE57J4aqueC)t=Wx@SLQ|eB z-odoNRK%uHdy6Ce5@&f>QqpZI`vS(}r_AoTXJ6Qrf!LW2g|q~W~vz6%36+K@?8l+hLy%mzr5SJi96@<~Y@tX*s znEv6v30yF_cq7?$M0BDhbI&IAfjWB@YL~_j<=pssWKT(>*E~he2CYfGuA|u-olu(w z)e-%7z zkxz5?a|GR_ORYiLj>}7#{vE@p?D) zk$qEM6ca7{J`(wY-^}}ykZEUqe&Ts1gEe5uvbOeX=j4T7kL*Z8*a_cgKK-ezy8t#0 zH3dFK8hrRnDx$soA6iYF9}!M_D17Ff6c-iN$*e4IyE~ZrnD9S;9^xL-b&MB9C5j5Sv*%0p(fGoY4!x&IVQ_#121&HrS>0 z?KL1M$A!lX@^$zdo5iAVF-8V%Z?l>SQE!U7n=G~1N3t&gc^M$l;}^%Fvqkm5e`&kc zh&fUQaSnxQC0_plt7y3Q&3TeZ%1)+PX?+eHtd-vz`+`tTYV+CUGnZ16mP^bpu1QYJ zqUYnBH+?emBM#|fb|Y^B9ew3LXJP-D@^P4jR8irP)E7}b<-Z{S74GLdNq;e+9ve-K zH-!sSfSeTAy)BYk`-SX#go2S73-UUqYmFUe;I|P;zi}>pEuDxe{nGdz36!4PQQ|&N zgBmEOhhG9USXR0fX4+beQ8QtK-UXw^HW{p+aOUV8+?HloA0G)*`X6PuFo#;BBhIU4b1 zz=7fWsHtoTLz&IMkI58j z9wjUBRv371O3WD8<%E!6@gX=&})Ep-!q3~d09bGgwUQ}@JH zNs+SI)NE{#B}TC15QiP82Vm3rCvYMm8^AnI29KpQS6Priv`4;$XU0dGzQEu|k|Zsr%4{<|>QO5kbPmDCD3U(YSJ}i5EpT8^iUkdLG@iWQBy=Cwk(Kfd^15}67vSCn>jX#TH zc#GkP!jvFIXG5rtsTNUh9Kz87YlBPSdUPU9`*{VA^wzF9^Gmm|r?sc7FEDc2tG8mK z!YHqf)>@{qpdN>FO-^R%00Ej{ewkjD$h=&H)ziy~+Ecww2CD%1m2PFve@pAIp^~|B zu~W5x>0Wrr*0N2OE^m=ZG5+V8*quI($)g0IFzlLI_M!EU7p6a~r_z6KYGV1ZF1KSJ z+b3`ONBeH?dIe?`cNuBcYM{w4{GxP;Ip(J`Ji7BuP6=gap&uC13A-s z2CPbb@^FOepPxbLc^0I!{5nd4*Ul)g9pa22Q0x6MV_m37hc9dYgAa@^iW9QXjjVV z?Qr-XpHG+$XgSnu?XZ-OI{u$E)R4;7|@rjN1rzbVdrY*v@-4D>&>cjh%C$$~Iv7u}W z^N7F4f7V^@sX0d*y4Sm2GJN1J6@?8huAbPVCoJu+h7K1Re0o)W=RtENm;btgGajzO zt@1?JYlYX7TPpy76WP7L!V3S;hLVT1r`)Sc=x^MO504)toSoMYe>)08ih2P58qDrm zyJq1%h{SK_NGtSPd&~R(K4#eQMWsGm@CcbTc3YjfD{((x!|nY0M7EK^ntO1S-Y0@( zwPfQiYOv(p@=10psACv}EnBJz|CWZTsYKTB~3AHQf=XI;@4T6}ZS6RwmEY?=3!f1tS4Nxxc`U`^`78 zR=toilv84cyT=PfA}EOXvfWj``hz!QY3?%C;@)nu1m%^3CPqKb;}Xq4)gfww5>G+WDLFOWBoin|BHH;*d=e=ukdYS4VKte zpKde!N%kEjV#y=7&gjRTPp->#TnJjNIBj^6H>OthjaH^6rFZ5`ij0>hzNRq3)mb)~ z&jz$8bZCvEBpwx&>wsA73#0cc&y+tKuo43BQrDfW@6*>~8CBN~=IdszM6gFTZJ!_2 zZqgYXO*A-OWjW0WV^{M zg*643(Ak_=-PJ-cucKHQr^Aub3m#s73cP-u!?B3`lS;BAXQrv8ti}&gxN`40c-Rtb zW?LV9PN6Mc-aUkUk~q6vg`bW-w-KDCc$^c|HO;X&Qg-xo@W7xw{#DodJYf4A#lMBK{mdUMSp&|Fv7u_kiF#To5%bU4 zJ}g5MzFZe<)ZOx{KaE&xG+9x^CkIrL#|_O+D4Pxl{D+ZP?St&tD?WBt0&)03IfH7w31r{A z0~VZ5vz`9#CF^h(e3_K4XP!;gpKr}mtWfZAB_l*Ni-P5?tX|%`^pe+K+hS)bWW>^P zlf0c8whEAFo9eH!>}CmEk97QK%7rI=`*XBP-TAe^oQpdJe+4^M{N3c58_@P+?-WU3 zNH63!y_=-i6i@uHu5djinq!?b1u=G~;I0h`i}uQj7tqRT{N`F;Oz#XzkrHf;Q~F@3 z-BXhVdN`qnCnqgUGgulFnm=7XsqafZyBgPEBPruD>fupfG#o=f-m>&4nY_J0geH)$ zE^R6Q-MkW1?T1J?=e0_8H_gUWD`KGrcGO4&J(PlfqJU(;jbR*b6zSUdL$`mFNE6kR zWOc~k{Q`o0hKSGWs0Lxx7cl>nz{js9eA%_U)-KrQC$!+|&88wQBLp-NDfVy@_D>cg ze+PKQoT(Cdov7xynDpu4W);d#Y4i{4b#{XH^f7`*dgJdC2#t`tq0?P~2Oc5QI|+cN zP9#R+<<%d`IK?v?W`HSmzP&b40#3D*BR!@}FEjUL!;|7eKkBK!K6POj%L5c)6y$&y z2JDIab*9mZK`dnGV@hm>L;K0&sghY`8=ad_na+R%S*qs3R@q1JvW13I{G-}jMB#%W zEx6le$VTg+$e08N)d-O0Nc=V~bYjvYe=$aTfJ|~=vFY&D1VnzbvIvlG06P+;!3!@8 zdm(swXcN$PQbdyT{P0A^6W&DJgQ_nLURl!#`#dmYPz5`*;5}Jfns*b+6kCug*1oA< zM&BdtsBTz1QEY_g`=25Dla7v&&^c+EY8e}+@lAzOaB776KQ|^vmUf_T-%fz^bgfVR z&=K{$HsahUb1GNT-qbMq?Fh_otd| z@{I(>8`t--kjs~5#dSrz^DOZE-V@I88nt*WEM9&NFWI0iLIB2hKn(ERfs+4(Gzspu zJH;juMM@I> zb_L-EtAz@rpbT>Do(xp!Z1Fb{e1~U3NCs>RQRl`3>aHC~Q7HjD+7m}NKT>YMGGUvx z=&)bqtud-noqII5qu`X&KjO4g9p$=F@`}ovNmWf+dwU;lH@KqV9d%uBG@CC(QXZ93He=bN zg1rLgzo;|KR{<)^k$>OOR3TSO9ZLLWCF{`S^8zQT2kvUr18VkJ`&>`%N%N>>8k+_I+iE*P{9<=d!*S*3YoJ@?G;Bw3B}#;;hTKmSY1UXg26|6_-@B`S z%CKLbO1`uhp}jrs=V1H`>XL^Ka@{Bz(ehG0P)=7h2Ck&^pu`ji2t7%ZhrrlU0f$;h zbMVV5291c&E7QJQ-Jf}K_oPnZ^-jR`M5#=hhua>e`?t|;pB%^w1D?sm&M-CTnm!PE zQV96(kpPR9Q|P<7{Mg@_!U0vN*Ue+%n$=pLNvvP5+S3oFQZ%TGo^Ui_&pz2QV7Rbw zwNYD*3VzHcg>nnHD(4oGHlc&XksHM3g$+$p+N0V{MwpR%DKfakR_1E1FbkgfY~oAP zG`=SKf^sV(CtcEvg@W!^x2+dptfaBL&Y#%6&?TSi%xbtEBlY$-G{B6X=crD~qb&oP zF>Ipf;M3mxHIsX;<}7*Ak$Skod?|w>@GmBAu5nf)L+1k|kI6QJg?vWCV#|^2F&tuw zUsW1~-v-MSz{-*+@ANhBVP#?dPL)Uq!?1BlJBE9R|H+m4CV%kh;(!X&ZG!=2hR*x? zbZfdI0~7kV@Rj*csp*lgdS!x;hZCx1(7*PbHTH|J5vGpMl^T_XuO~S^Fayul@4U9; zpup)5Oofd=*)C*vzQ!^T*nramH7L*yhlTYEI&a{Z4W)kh&S|^?G}E7-9C=CI0-bQY zD;&tCt=L`8#;kw^u`&_6?h|l+?}?%kB)Y76aNWK-URSA{1zugx6p~dD{{R?*XaanH z82;wP-Yl0yI0-HYA(d4QU^~>5UcOPNbpKq-G<47D%Rd|wLoYrGn26;;|ISqpYbLo+ zS%^TInEk+MDwi81%8GRhmA z{gCKA`-^K$LWzoewrOiyHuP-$tVTdjH$n*A)nHWX;Ff)h_N{Ip^64fL26~%#1;B2odC)qUfV7MsqhmXt2mib z>g6auGwQb9BT(#?U9b{zL!v|cMh=`fXGSgn6Nb2OY|ah10^gpFzh9ch_(F-y=Fuaz za~qG@Bn+@O!+j9Qq&V-X$=-rcM2#JJy~hb~aRUcqci?wNP0nlc`N7B)JXT2PI;Xe4`H1}p+n@B&{t#{&;$;^(|T`h0r0xcC%UEN=DW&d0_W!!T4obbb;I zPz;4M(OHhZvb#11Rgy0Ok?MjXDI?wYOrObswOZ4MOZakTUUWh;D5MrSra%^03%V%S zz5x8Eie(4r?ZuvFJhBIH6w2yaE@}jLHpHBgiWV@)|C3Jupz31)&nnr9H~h1zT+N89qUw5 zyx-0C(&{QgYl#1&%VQV+9eAB-KS{i6pJ+|ztkr|x*A-LeE$oW8BmLYJX9s0x&A4aSQ57(Cd|dqD4*4MGJ=B5?nPF|B{y)N{D$-0 zrv0)G6t`1O-#;8j;60mL2||o~Pp0)(%>k09VW&X_bCJ`Fc)~d`9r=y!l2-fglQ*O< zG+&+pt@7rfs2rFKs8K_V@BcfZeL^})03EG;OG;n;KFP-65GEXtXoU%T;@dMu;*!wri@r*+k`THqRA?9Ls4pK?H6+iMFz2*voA_Puva4rfvbCBSY%meOLrj*U_N zd!|S>c3ei8UNZ@(bH3CSwVryh?q@}1!-7fkF!C*21R2Zx^TJX7h$sT6e4 z2k(2bs~%C%#pVe;sqZ~zgr>jjd-W6m0@iehQO>r18mfvPOIttXvd6T2fGF8bc>ftj z04tA9Y^Y$^1E(xjhrM4HHnn~>$FK0-x-L|JCP$kUUl`n1Rg$@ZcT$9T(s3?I3*f$p zD*&U7N}s=;rbWOT_3IC((Wg=f+0EaM99{(U2ebee$c$#M3%mI5?N#_I#~XtsD@Whe zWGjtc8zjK_K-Y&S#`T~kr#2R3C>2gd7n>kB6=eLX2h5@eGsLdcE!g@tLBy`HzrI=H zE17_HncXZF#4uh1<5Eqe~_0fc0%Y8gwtob?z^N6%Fj%oiUzgV1_5e!o6O ztU-9U1&-O6Ph|lSUJx)3oImPQZAL3h&0i&Z>O{*|{z~R45jY4h*Xil!Twd|rPeXTC zm7MO4jzjSG6SoL^D@}y>7z6RyGIAVw6aF^-)3IDslw4NjTr9*mMzyBT@G-h9Ql)Jk zRi&7xPk9h;H$*)XTRwanIDqY&r*C@e{~Ag=7KQ8$tC%TPK6EokN(n7#ZD_1O!m&TF zrqCyQZulAL*3|BE1T`1N{JgRJ`dIMO;Ya+AoSd9L+>zod8$wFVhtq!)`lon3Z-;Nd z&X*^vkldcP8*I4AZ7CsFwg~s>LN(Rf8Bw3WY+@s6j8KK75$|4IQ0kCXDc99MW<7pV z>i7Tie;sTMy$nIUIX%fQxC%kF|B>&thk9S=Y>cW28kZo%cUsWV(S_KZH~LC&xkzx3 zMI7>;5xkje#BH6Qc!zj(WJ3HoGSW0y6H&b#oygGLIrb|VEcIDU%KI+(#s=Cke4P{O z(SYc2RqB@wMlDQyc0O`>P#?8r)guIQSees&5U@O`$JVuLgc_mdw>+pR{EZS^AS2#< z#f@WcXGg0~{k$Ek5=o>GVH3*m!6gJjb8A~!X=>gvKa%yv5>PB&2>f$FDZ=xZhhM<4 zXK8)b`lo{oehy%KCD7+#8?0ZgFL(~qhz-Fzsf#HghCo6f-5TAHW*|8`SZsUm@9*yd zw)jP&q>qE(x4^9o42muPO^JKw~c1?h>pQn&Am0osI4es>M!)GFNr69tmntGlII4xy&f4#jO zQ#{vADsl-%n)u<&O#F$bKnJS+8*%7c?|X~Y;x{$R%?4XD9BpG|6dzeyeCO`Zm11^U z3=+cP2Xf}oyHdQ(*p#wS%Jf^3!F0A>6uagAcMSZdL717*LMQDXOU(W-AB()4&bUB9 z(2K=KiAkxSwxakB_BvTy5t;wYEF{q6j$eeH8Fm zgLH1gAGZ{bqI(zji{@}bXU)d|nxq1f;3D7ydyf0_ef7gYwPU%rfT2R`obN0XF0?oz zWs7QXnPMlOy%6-|_oXfSGd;b|k-*^YbxG?#H}PG*AnHykt2S)huPTz+I+I!o??CkZ zUx62yC5iS*ZKJqFj`M~g5aiO?5)BTp8<9#%N|L=K$Lsj_t1Jy-y@c{(!d@!GcLr{q zm)+EC|HSvC(~%k{ojrFDNBemi;*o%xs?sv-i-I0qlxp!Z&3oms%)?2wQ zZ}Kji_e)dj!4Y!|WU!VCO|C5EUj+XN;dZRcI`C!Q1=KUWzsrh2LYBPbOn1}2O}!n< z`^uHUe<$dQks@29oENKK3PnZQ&PQm7$3B<2+!hv+7f+p%y3k?CD#=C2-Z_(fi}0G+ zx%*E`9Gf%(+X5{764jsAqO!icnO01aMMV{I;U1hT zSLaS6X*5~vIn5Nh`jO=0b2#+IibNp2+@M8a_mQ`Fk^#J+H#%K$@v}!~^;rxfo7H>E zv3%aM-kI;lOF@h%e&T^2abWp$-;!m&kIi*>Zx(r{v7m6ItJ`8l(+UONk&P?a{Hc7_ zVkh%A-2xu66471N!Yrrmt1_K1$<&uBx>xmUmqQ!ZejY+ua4=~8JN32m+b$HY9=q(K zPD1lXjbE1cFFOoae%x{QT^W%e=BK&n56Af&rlHzv_EvuAvHA+#>O}PHS$>Vw zUFyG!TzfrOi@ES&C}p7fC2IC;5o+X!fK-x;Y^oIqP!?hd)FPWdrMnkgXJ(53X}>9(${g{FrUFTk6IwvMrC6hadf6-lly5Yv8KSo-$jM~^!{hAx*|E45 zqPickLjs6@8D`tTGtJ*r!~x65&Sv>NwteB17N}{{(kIM$CVO^)Rw}#YSwASSP+O_1 zWj1=3hcSAS>v$8hpVI#uj-n?$_thF#c*v_bq7imdIhLI~7K+=m8_xUzoQIL6tffJJ)Fu}^t%%lZ;I0J5@!E5104)MK9JR-aQ*hW0T zDFG*_{^w+Vy&WtbRvL8K`;@c{vfh`&!6-J0K(Mv!VGXNtialgJUcy)YySpK5MgB@X zNclcLq%5FNd{Tn}C!Zh~L-gSDw_r?iDB_+~JUKhTwe$Fjt#AgJmqZ*q#JhSP#r_D7 zM#&-F=M|~_{OmAu+2VLiX&;AzlXUPCRC_joU`0HoOG8*}AmmITTL8Be&K*Il0-6_y@ruc@x*bGhWiw zTKtwioZmk3xep!d`}>iO6gLv8OmkRIG_yBazXp$N?s|g4un9 z{Lz3d6T$xSYn?hSS)|@l%oG;SOMz<(2V1L2hKY%f_5&D3HC07gi4J0LRS}dJ}8TvPJvPKmcGL!26&JU0HMPYCE z(D;u3B4hjM$iT4PJJn7hUK4+ei%+*GU!$%%X5|p(=vkliMrWFmlL+?mphk(p;`)1} zYKe@cID4v_R5wK&^p#^a~>s($6}&~{L(r< z>FVh7>ocQp+hRRj^?qj6Vss;sbMl6flh-ga4SNJm@AZIbb^lr)nnGHszt-*js+@Du=aB82CwNcloyi@FFfFe*hdfP9 zce1AEPUBObk-`Agcbw6&g7p|7C($Klb)KAe(!XeAsY}Y>_aCv+me>8Xc?M+W9Xjsr ze3L~gp~(>Y@)~x7t)K7>Z1oM{A4sKsZ2xXvE0UK#w0`kns%9wQCe{;L;KrwO_1fkO zcLs(~kRZJ(DCk@fs>#o8dR3Dza)Co9M#PvZ?5HMA$qqd{mQm1R5bznntNAoUEuMY1 zSF5*{mb-;lG5+PvJ?yQb{GUpMhWYxx+zM1x`hRNYhE~`fRqaNN&gF^zPQh_lv=n$O zKFDTgU#MC3ga^Ny;LCGJSuiB^IVszDmGQXac6a~R=h%}3FQ1H7v!b^BF@Sx~3|5Ww zgIGj*IJm*m<}WOVuFH=H8sn0ym&yPQnA`X66Dwfw%E}!l4U@q>^-MsOz(lN61``o^ zX;5~su^F9Z>4kfLy0BoHuotkIJ>5j{j8P5Yh+iIbkns=$7sCm_diVqeT~l)#;00h2 zCMISar?J%IEPq_{Oh`{?%SLH0bMRXz}I-#8W?4eZAe0!5X|o`_B(jo?3bI zR^r%~SJQW;?AB~1fz779h1k&D*xmUMGSOWE{!K%C0@E!J9v%Qedr|uwp2X~D1hg|SdRSaoi?Jd*}E~{`B3O%oaSO>H?{+olfD_wALK|Su9 zy97@WP@!b1Ht~xoJVzTXvsU&#QN(awRLU!_A^3)^J@#QQJ+Z?YN24^>Qq{# zPa0!mVtfyoA;p|fvb<-}U05?&2);?8&(k5+D}!FutJ8iorl?R^@U1FdG1-R@!`T$8 zCSfofK5#3Y{lVcXBv?LJ=j*lNF1oOuDf_hzcw<}4!fxM#G5iZ+*`qDeBUGdu+>;A> zby)!rMq;=C7zyZ5E|gAPtm0w3DDK~ZW31mt7BE_R=W7d5d!#!O#)s&=mkv})wG}{OP zTVBIAq`I{pPADg|aa?fE&H4j~4jCw!uIj@zuIelQgpWRW`#rUhJ&z#|E=74f>M(G- zo)a?%rH7xK*7{;@2IA8T#8|b)YcNBIPouIxfSF6%qmfRw6rDS##k)VmQ>6y@CSJrQ z?p%tm51%~T#O?I|d=r|0dL})k}`Hx2y$-ALQ7I$>P(`KTL;nb-E3FVT1@SGUE7R zcAWurL`2#@+vDeWRh8jx`+c@yR)UF-?%n;z^4`nYqiq%>9-HN%Ul48$R4s0=eO-KJ zA#qwPMRQujejm{TH%pkSOaxp{Ay{QTdpXv94tWdS_(}cGLRo~xZ>>r1D&eKIcjY>qidCUs>1X(Go z^>V(-3u76(BJITqD0DunHZuy<3r&Y-wc|#oruk&?%z>$<|`Mg7UhyYxAzxoVnMK55AY`N@x;UX zhF?%S`d3|cidAH=I;Ne^(N#{|GMQ_?HaJ5@3T~b#s3V(jNZ}K!(hBc z9dKTqIY|f_lj@rP^P7{~af_T)r6|)cG%8|cFGE! z@Gm{5Tjm`7f0gZqfx6(La`f+BJ?AHz%lD9YqKR&B-h!`G2=?>XT2i|(-8MJnP-9uWDGCTb;+hvV3c(?hDBm^)9Sum&4j1pq zKL7Jw_7=&aA4G>(@xBAp?aSQm*wCC#C*5{5Hf?em5xLb_|tI=(%XC=w4&=6sRwVM>G?$5b|(82T9PqM~p zoWE@o@K$le(~H+cazR;cfAAHCvo`Qqcs7UP;^5>`K+1kGLy5J;V zrbEYMtpy?2(FWB?#}ykNQ&zM~AD!j@K8t&i5*;E1iT_8bEcymnfIXMO_f~=4yavBF zJ}!<%2iYEDRJ{)(7W9d0R9i`=q=Thrd%^6Ht)BBwMfa|TaVgu(ZDU@uwv%!M1S*J( z#A{XMGR8d2Mu+0ae?w8NNIBTZ$S8tlCYT=ZQ4%^kJ1syZ49A|m2KoIUmiq(lfu+tx z02^4QbL@WB8NI*zF3am9jWV)h%M`^5g4VMc`ahoMYidcW!47of1b}S~!nU*` zk(|*+hP{SHNgO}#f8J&*H14V}(bpq&v|tmj@{@{)%nm_g>-+5tTjIIN;9DZy{7Fa2 zZ)%CY;HvlCYbkVb?4XQk-I0!=5BfCh2@;F`+WzteYALCQV@8CveC?uFe{L=hJS5b! z_BzC9ikd-}g@i;u@hVi@pZlQH@4R2MBDyCIcjjX7^L4Aw}y zk2rLRi*1~$YNm!xsj6y(OigU2)1iFs460@-m;Kxoe4!^M*Bi+AmxFg*L_?LntF zC*IIdv-6F}pw^;VI)9?F@vx38*3o7Hk=T$9=r3$CinIwm66^#EixkLq@-wq};Q22p zKOQN9^NrAi1G_%2MoQBRf{^g@rQnBne-aN?pJy~%o$w80DYtf#t5gX3=e{>TkU)0T zmC}g~?m-)i=kme*1d+bkFuZs&OX|Y8L)}9boZw%+eb_eX$Y4Yg$Nua9?lLqXwHdWw zR-qb3)mdkG$Z8)FUZNlqcNP5K=uI!7SAJjhDkarsUNvQ4*Y&A9lqOz3JO#$@|N1jh zZ<2Q0*=Jba{{}CD1~RyO+Gk+Nc`Gsk(j@eVy#Ti2&VR}P*U#Z1`>^gM=bk-inYnVJ1BYt)Ei|+unHCw~LTg_F*ZZvv% zkZqtn+z>ULHS?NbFxev%sra0?DdYL17kmkEd9L#3L#v%Qdg({3D)~4(HZw6mgD(LD z<5gZo)>EZfZFkq!bm=l&JD`7Ms6nAE;2O zpgZ|h6cwL?*ujDdc~?q-X4v#L*TZn!M9}KDDyexzdOLfFp&1J~;0H^CW*--)pdZJtQbdQA`X0z>Ra|j=D#cIm>0Bod~z%io{%#>+|4{ z5YR-L;Ct!h$fz|?jAH|$ReY1%zN~@IRbFksvzBiNBvEFVuo@}h;ggyDg}y=j_vd#k z;Ry_KB!I~@#UaaL;<>e!8X92XletCHVT+Nt4*YL6a=^_;-tMUBE zgGLC<{mpqJz=Cnfxj!eo&=Ln^t-M5iPni&Rps$p@ySr;LM+k>o(LF=HBl5ULBk6 zj2GaQ8MLM2_o1p9c>_}bXpQg6t)TUkG7$4#MLn?H9*D44G|0c`BR+qQ8NY{j;SrIL zNP&LkarYfdm;j_FEd6PH%7QAi+&^7_v$8Z;JU74P^9ZK?qpPHoqV*@D^kp`Xs|&Qi zJF%T~{gC|f0U`5Mk}zob^U7d%x1udyx6x@7NVAzi{}FL-vrG_$R;)^yF8VkTijBJW0jrQqs}`|ECwhs$Jc^8xe}Ie>-=E7 zQB*mgzs~P7SetJ~>;0YAT;+#Alo>!6OUYNQeN5fXH^U^paoBP}-EuM{=p+g_iHTmw znaN!#wbqF4UHz8_&4;p|Lr6(74Vqjm8}-+2Ob3(Wfm;OA*+~#6D`Bdy@Lb0s6&s9+ zFocoXt53qf(w&!E3=iSy3=awjy#IUXt6FWQfXR0mAZNHWT}nJ9-OVwir`FwnqtX&cP1#7wNegA{j< zgn97NYk83nkTXAo+P7ZyaGyp}Krohq|9TOz=757f1lNX?Xok$!e5RS3Sh7$?&wHfGz4Urh`Z|&YEUmuOsEk~ zyUB_=5YTO3-2gWOlLk_1Nq1j_T;`RR_V6cnm#fi??t$o-4v1R;i7y=~IE{kjq3AfB z&y!STj=32o143K73h5Pk$Xmk7$S?&1lCmiy`e+s@tgCsRA52T%x5V zB}V~&*oC*&jV*aUR?tq;YnamGS@_pBXKB3b;1^W!oH&18{@igIHZ4 z8c)cG*`l44?Jfev6|DU| zhadjR9Q;)6<|IxIRA>Z(xiY! z{VKUx`}W>tfuM_`=5?)|EN;4z7+o?CFRM~^5DrzB4n{?RM<9gY190pz>(}PKLHGp3 zyVi~rf#`S%hZCTuv(U<0vXJ^XnA8y{^!=}g0dic^;?+jO65bAq&Jf^L5kn^xM2`6W z@`p4tu7?={S`c5Y(5q*EOWG-fRJX!YS&4XPHx-`qS-_zDS+wm0P&)3UOQFi2@DK_2 zwTuqJ&n7oN*@T32NuHMK)W7Sb@I!(L28k2a|D>i93f)}xkkgI$aQEUoUUxk=;Tt*! z71Vx%jT1_T6CotJMu?OjYvrftj#XCscnY(H&7ZTg1`hTqvwiJK^+AyYn}U5*{wQWm zzfF8@#o@jwwr6#2t)MBwIY@mb{f%WvhDMFvyi2%yWC_y{yhIQ;Jr z2v0&P|6HAH+J->Hh(YSBf_^FcXNf-$9hcwsm*AT%ZkAz+MWjltyU8*xonBDjk*FMX-%n>9YkF1KQ^-i{rULeB>xbu6UB>Nx0T=(VIXDOT z9eKkAZLD%kZ2&a}rm+1tDrEnRxw6q@Wl;h~^vh>L^cHD?9}^T!ewPHlq|JQB;113% z$1 z<+tuT4tG>{Hu{zR-iGpPBg+k6*T}=L0cj{N^(5b;7vCska~zs=O?a5L(O5GjMLv*B zLsS}xhpRMVs9$L~2p&Ar8K!&15fHdx=I$I*NzMDBD8^>@%)pV=gZ_h|Qzuqt|l zh45>XX*0Yj`*k>$jHY?n?D4dPL{^Fp9gDsOZKhPUD0Tut+S4#vhDU+)#MQ!^jrc{o zV5svterFKV2o&)X!XO_9UT=n`H9eaNI3UR-Ef(_3p!wnI{JYd?vVKr0n#QjlO^veL~1k1op*JnxnW*J_blWk?R%5qYXkzHCBaRSO zg8j$G5-XzYD$*iPT7#C8ki=+fF#N_4fIW;4fd#%>x6;#Uv%y&Due6~!@fmzZC^4ab zm{seaa)%oWIdF%9R4vk~*MRl~)_Wol_u6y?Sk~gdRGV?ALVJ*YA;iOH=psI^pi+uQ zG=jGh_9G#O25&{ouOP6*Dh0r4Oi`Cn2MCE_|z0S+h=LAL)g?T1fmh-0wEOfELV?Wl=A+#VRh;k+h; zxWcMYF3&86z>KKvpvhiLue9dJVM2M1H4VHl~OYt3B!FKL5!Ij z;caIL%5V%w=v<$Q0;aCxaUmW8`gw>raw)@+meC9$EA_Ie#1ZNSvfs)koI6Wk@^%9y z=p!PRp8%yrIQ)5M62d$8W7TB0E~KH+HyCQx#gG>W;*X7{>@?dWv9+tGq-OAnjgKhz zP;{Pt#Mf8xW8K4Ip448UAc@tHlTC;0hoHJUEsvgoLqNVE1CWgWZ$M4J)88M-MBRA? zGYI4eNosQH7g@|M+|O|g*8m#2SA`;)5{L9p24?eE9ig`nA*Xu9}+6n9=xO=Wu+r-}vxB4tFR3AWH2q$o`cO+e`)B!VCyO{7^u2Bam_ z3l4&mL0TdZI+q$CQl)nYM2Zo@VCZ3_L~(@KapvVd+*xbZx@+C@cAm~Y>+G}7`G4Q< z`)@aG=V6wJ^4;YTj@A(g9i=Uqy07DsLR#=?TRdZZAtA!HbMF*E8hAdE%gfqDIZI5FXcG^#c2+CH${m{xzJatsmrvv(T$H+2{X^&%0K^o69aN)UcoT1A z&JS9btx{JF6D5mnk~Cx5pQj{>`ZL}W%J@2^iK)Cix-#bk8j`ZfV7iyOsgRueU|62f zbtKbsJXU%eURSs{`!}@XS;nW#+RRCLZQYjarfX04pGkE`s8@`4GpvTvvMK8uBm2B6 zL7x#CyIW__iW7m`h$1a=(OoD)366_*Un4J)1~Y{8?(!k^@+0)C7%bS)tKa=lmR4|v zPGY*bs#sb<4qKlO;3n@p)#yQcrW*kVE0# zqM@bVOPorOG3Mc$e}X$6Y^PnkP(kZr@LL`9aNGP|Mhf^K`ujJlV8P3D1%p^(q&TSO zrlcjV_CY?Uv(&Fc>L;n7M2dz(jtb3pqo#v?3;blt^u-QkEVNihR~CJ0LCu=7x=%UBpl~FVF zpBwNYQLbl_eAD8GvKnDuMS`@|#os-)%HQb8?v z9IcZ#p!9is!N{E8`&Mgy3Vl6Uq_$AS(>k-O?eFMDDA$ zJKL#T>g5`)4M}HLB{(pP8@mm+?XUIUXB5Qp&RFKJbUuix?_)F@F3{^I63VbP$R~Vx z8+t1}!vpisyy@Lq{DsJ{v5XSU-Z*=2@HtcU^1q80w!kV+JTt6#-pyh?=q#x$w^m^@ zm0THq<&yP++Mww&*d%VhW1oxchucc57kN9pf^H%zSu@u-b~? z@+h@(^?Sj6JhDk1HxbXDuI1a)#4C5{h2w?vEZdl4uWN5c){j;rEhjX5mn0}UVw#1u z@|a4s2gS=|dE9A(1VV)v^6Sw&H`E7^dL6$ouY7gG+@IWgPrz)ly?AmB6#OS^i4~&| z0zAX(DxT5}K$6n1Ex?&tk4%f8Oz-%hS3f!|51Pb;$_I?1t;@Cc?r{h~rNSk$>pzOi zooj&o+9v&ak23fZS;e*VaQ^;HD3aR}TU=ps;>H<>z_VF_bG(8oD9(2iFx5M<+w_9Y z8MMx&-I(+9(69IFk5FlnI=DHlZ?1~ey6PNDL`$KxJ$J*4GBs5Q74Zm?CxP2+BZ0DK zKVwXweb__KV2PM$zhviTzSOZb9*B9EMpu-87cQPX}CF zfs-bFUcnKgl!^UUvhL0l*)K>le9~h1O5p7MX&EgKqO-JAAQ~n=q~{GQmeccI2^>Dg zq3_&2C3ySH+xhmdiaF$zTetWR6~`80(+!@c8S$y|F^}2BZ%u0hf$G}={)ZA_YEOAA4?gq;a8?bSNgZ2zV3iV zQE6iVp93XXp(7*c@XUSg0oGE65(iK&cAgj?4f_i}bZB}qsCSFzrpclVjN}FusyQ|Q zp_Unu@K-qkc4q}qUzQf1r4=;BBebZX0cTgV^TQNtot2w&WUVvrBLB2$Xh%`3mXQRY z1=oOHkCGNC^-G7roH~^Ud)^Bnrm_BHcEKoEcFSc>D%gBO4{RZf&$x6@#`dubV#it< zYUBJvZCk&uyOp9~7{FEO6y2Q6)8j#}*HQ?8Hr2^J?>j00u6qT3OCnj=Pl+{a*V_K{ zX#5Y6^6Z>J%U&U&*H40D`Fnqr_qXr@YcT#X}JbvB>bbNgTI-Vk` zTx0nOy~sx?V2FuUsoieevqmiY2+2kz{+q+XiBawobtF7fi0y$#tv96rgW-xxOe_HD zdr>tW{$GEB*aZqkGKxAh?{kxk!dZx62K}vg`T=UV4s{N=w;-%i&T%1&hu+k;Ib_ey z0FKaL(^_wnGx#b>+JcklgkdD}0=SXjzFCqz>s`DLPDoRYBz4=lB8~(84c*_$n@472 P*}!3JV5VOVcZ&D}BlroE literal 0 HcmV?d00001 diff --git a/hadoop-hdds/docs/content/concept/OzoneManager-WritePath.png b/hadoop-hdds/docs/content/concept/OzoneManager-WritePath.png new file mode 100644 index 0000000000000000000000000000000000000000..924b61c31a231abbecfca6216e34603a4ba8d561 GIT binary patch literal 96696 zcmb@uNzU}zwk>oJ5<*CHfD)A>0n$Bko)n$uc@nlOR}SZCIL~@OfiBPiT0k?X(i)ck zeeZjhC-Ha>u@(FO91f?;bB#IXm~;AnwoU23{crxe|K@Lh``dr3$h`Zvzx|j0%isR? zU;OX?^?wD{{7?V=|M9>5?Qj3<|G5d#He6-X7k~RZ%Kf$UcLb_`>-Kk)|2u*ReUWX; zZv=~A{qW11vMKVvenTJx`X`9|Cxr4L`tK+MW8fbg{s)4f$zR*2RWbg{fsEhN^nLnw zM8y9AfkXWd5e)wwWxYi z1IvBW72uClWZm!GC129>AI%SJ4kQ1-|AB&4T)q4Tu#reX|A791(iHg*n83jg|Nfpt z>K|Zhu*yrzbZP$0$=FLD8Y=$^4m$h^c~pI?`6R*WTAXr&n}BVjSS zPr3qzsw?If zn_R>+Q8x$~q68L=YNAycT;UekU}5TN2qeNA7#|^+s~J5~?**A_r) z6i7_GXK0nJQQWr5*cvM)CIDwtK7O8O-x?d5L}<-s{Q(U`m97U!2#%6+81 zR$5h*25fsSRj*W9%f3#tN_=WNxrgV^t%q!cc+!a9K_g&JsI@*s%sHlIec!(iH&z3Y zJRc`f*C5C0HHUn{9P0N;&*2XK8_8el=|6Y z(ODoAk*T&W57rnluVvy$0zC zHX*og%bHt*vqIl+pMgd467h(h-^F3!iOCEu_JURw?i_$Q{2DrOdNy~ySKvT(YgPjM zF1HJD2HZzMX0qfag1MZ57~82(gL<;vItm&G67AJ;SzJyyNesXPwY{4ez`lH#1symt zScK>jH(Rj08$@~Fb_mv4hi=RBuhoz}_zWL~6Me7&{e>o8qkrhFQ|=Gfx7@-f**Olc zfnA{`N-}Qx+H>gEA<=2Jx9B;S*fJU{Kg< z`Hb#<*$buPDK3`z`u%NJQtX(6hcb?;`CXuzN`Xx~TJs7N{EAQ4-3#-l4WoN7nqOb(7%Qhgy

I^>okLr{tj+itPt3dbX7AHL#@?@1uerD;O zo^TZUJ}R-ErP#$K=e+HER0}rizH`*Brr^tpJ#f*YjoGOFrFqPgiIO5m`5C?>v%8i7n zd-bK{?!-x1PzQXu0u|M@1-0Qma9)02OL1Me)K@_;b@ z@RKrfWTC~%?Hldn+%Dkuq6j~gUyKZspw2T%gG*yhH$_H&35nK%m`qc_I!DSnazBFc zcUm}BPF{5vjZh~wQn9jSk$l8tJY)+cm1Ol(m9@jHmQXWcrz;FwWYV4dddrJzzi|U6 z!LHkw2cC%m3$U5Ao;2YpX4H*u$^39!Jyxc~c*-)Hk!m?@lg!p}w^)<)r>Y)EC9Xj$ ziuj7&ey+Hj+3y@C336WBQXvb-IA+)5&j+c#)C&Dx=(o!F^X48WcV9E%3Bt~)rM06n zm_wp=eEGEpWYCG1r;4S2A`n0X_SkQ`rD2^olXU$pS8de+-Dk2TI#5#w`LTqdFRM0GDO^>jK{|4sN_gsiaMHSzCo>yr`#SV?ljt@mfcH*Y7VOs0t++7fVX^Vp z4Ka*+;INlmo(BXWwj1)^! z*#EY}s;}LTW;EpWUf@ds}pFX_792QFqQAaj@FP!`od37JcZ#`Z2qI%cn z!Vj&(D;D2QsjBLLnK^EDDA>P-A+%f6Nf_AV`*ix}5kxda21}Tw0pI|e+*RlvZ!qgI(d~IkgKSF;UGzH)(|L z8TbZM#=XrY^}v4`S`cBK3{|3o#C8lKrVn;jsvjj_qjh5e!%WVl(P*fAoLc{3UvN>G zAU%!zVqKX*bs$;7cFLnE+Dt2Cr^UO$UY`UE;|Q@% z{_T*Om?&6asSUQZ&sb@(_$@ZQQ07CtaLD%(a-yJBkxvd)!C6p5`iCO}skOx1xJL9?m z$0Jg>wKwF_-l0k9jRS)rWcJG~ST8>1i}K%38FlFT-b3T~6If#-#&B&FWg+t@;BzNb zcAOI5+4Guc+Q&FU`VX&0`A#C(@bjEJ2G$M2sb&jV)nw7S#QVgVxr zeyJMg*Uc+oeyY55MdXeX3fMkNq(E)}Tn>vI{2rOqCM8B?;fl>x>#$ZPme)dfJ(&n# zD=oRo41$!026y8no9L;=z1w+YvY}sm_quA;g2?GQhxjpe0hTE2-sZX#7p0ro9=LT$ ziv5T6a+~lOqwt9RjSLmE?Q-VV_a=FA5b#*%-ZzIDU-x+>v5JmmWq`1zhe{t$n2wDq zt*0}vBTrE*@gunrqkp#HO*FBVIkils1EF*8fPQJq@xnx*CRR8>P!QSOW@o;S7Oc)h?o?CEu)z$C3ITS({ z)@rWQBV}W`+q?E7WGOsmm;gw<_{(c&0!l>%sDR{rZo?wISbxBLL1w#iQWYL+>e0&j zbXBc^D=s!a5JQHwIVRbrRBcZ$PiDzweb;aFWh}!6&i7E(iXdb}KSU?q=1L8&LVlO+ zlLscBOW4xtoJI+sIp^ufNH5O(Aj-d?QO z209d55HS1l>3uUv-<3`t^0(k?YJx$Z-@u@&FQ!4Lrm1ZEDTGt)Ne0zX8cP&Nec6jJ zi5hP)k(hr8iR6%iOY&F!G38$sla>2xrb4{sqDUd`E)lc^)|w zm*0ThrXb2gM1MG06#0aEC|dZZ$8F(5CghOQR-FH&!IS9z^~jUxDfyU)2w6|~(HzN# zuD7xRoJJNn@;jO9cg^d8(DOYPi!j>xyx+D61j6K6$32vx=)B!7oboTbOOZW?iFe9H zL7HcJ)YIo3gc1Ao214=$Ll9W0N1bZjjEI#H%v^q~H~#L54!F>~L2=xXkP zpZVvcF-)1jTmYai^1?~zenHGE7=N%Q*O89vnfIFA%gg6 zE++jcx(0xSk5ed0*8tdlP>pnrg^1fp6@1Hroi<4+v+z1U1mun2QYBpIvJb(1k_Tms zR?f`$%Qw=F@V6& zs~8!ZtOUO>#+>AZ%bi8Lbu~_Mn@1z({os3v7avdtubH8vldSg((#xyDp7#0>ul+Uf{|r`s;ZQorb~hH-oc{eRM@e)Up#U@Jo#&>S=1JA zv71{AHsEwfIzHYJ%toltLdl4>D-)-pR`b|_(c#@4%En@qfbt@GOr-C%RWwTr-N9y> z-c5NPulZ8Z=Q3dcap#%t-}r@oA#05JV=Co_Vc=@T0QDwIl&&YQwH*R$^+nT? zf&NNGIp0w0TxNt0R%+2fvHyQDl)MU81S8^9Eiug9d=e(?f5%Yrcb>eecu|~!xy$Y9 zY}*7vENu^KXv3&XQ9~9fr8L)HV5+;5Wz{kL?(wARk7F@$MDcH|Y?Aur8QAB8m6ka5 zC_HF{Q(;6D3)m$v-?}BJnHHkq*HzUNXR7|`bc0WJ#|~TQn+vcPam`v+ZX2p>C_`5~ z?xq0dnqcJz8q!Js7{QN?lf8>y3PNz#WRs*Pr$ za93=R%j{IJZm8nNM-HO$dlqE~NF51=Od3`#IaqU&~}0~ZDt zKhkUQiTU|UMw))?h<#L_6YstLMTZa0ipil(WsJvjCcrb+Tlz)D^xuz$E|+i6p7fr> zC~&;SJ-a4c>Z`PO0qK{b<8*u6CkCDt9dyj~0G+CsP%n>RihRuKOlmYQ&SNdqM^Zm{ zL1fqtZv&Y2qKaiAK9AqF*0e@3y231}EEQ2*p1Sv8v~iP}i%vTDWbqOy{jRpDT$~&V zVFy4JqIEZ^zqT0yhyL^kTD1NS5+-aHOM3yh(lKEEHpDvXg$74C!G|T{+5UQ9Mu;P! zvPztt$R&DtpBt|gEV4-NWiNef_H)o~azDRUmn4scxckAP39nZKUsa|;77bbl!U7vn zYN?o*_+>p@8}Q%*$#Z-BFl;K*=FEzk`56Suh=!%|UZ~|QT6tDL06oU!2g4V|9X$&x zV6elF#x_{7l4bmMXJ*W(e;u_DFD+ry+_AVblJG_qdF!5Mr*-ZFj?ZrzV!xap>^0}B z{g(*Bi@P%C;031Sseg(&j}i3CtuSN;Dlyq!<~Xe!63dV)aIqoP(AIN^VZ2>F zWHv;9;Tw#(Gj&If@f3My&^}XUbUjdmPJP6r@wRk4|HJ3l-}>!s$OkTieHztZDNZvh z1CuMZVlm)`K}jxUQ8FyXKYqdr)<@;rj(6@} z3s+Rf*IQ~o3<$JqkVIe^+yxp1*gfbPxjO>-mwZn+H^7OxGdtd3M-JVSw+fgZ*5T zxaMH#DL2Z-OR6}#>F|B(9!PbG^Z=m@EfdE_`e1i87Yh}LkT~sImt3X+z)XB2m`7{L zriS`(>I5?>&S3i7f%|5~L|51o*yrD1DVmjYBVK7Q_yEy6Owey(5+ZbQwce0lXQg?E zu092o_$C!&Vk@Key8Ah~^FG!W=*&7S@vNk^HeM?NVSRa$E~zqFJ`8bc#`j7TFO7nA zb}77aRD9ovd5!fZa$$`ZM*2Y(`Gb*SmHIeq%(cGG>@U31C}NYI-w91ABx;;L=2fGk zU~8z4P5BeLWtaWP=7adS5Q*kltUP)D(yam91)LK{-24v}UTm$3;1&`Ibk7>@8o=0D zT8?LcAc{jMu|($D>M4s_kbrRc1;F%R2;?SJF~Pu(RccD#F;`BIstG^BH2oSMMiOae zCkn|7U!j7m^KL#)%mKMx+L7yQ;4)M-YwOU~dM;MpwZ z%9mLc!uP30wb@7dRd+?!=RpK7PJY1jI;9dHRdIfU7d5@50-`RJtKhxzfth&`aAJEHFrgLu0%v%$2+>fmC4h|gh>a)dzMLIwuMMDnVJQn!wYM^R zRtpn)DkbJa9hK!NIQb=H@EC*?bPHb$>~YTXColUk3cto1G#ILudC<1lqh&&Ha8JA?sl9$w#|8IhY^FA(Qz%PWCCeGC zhlTw<)wRT4-+Ey~Hovi6K6YhUZk`qHAm_78+~X<~N$eQ4z{VAlleG1{oEq0I8eU-G ztb~4Z*i8e@aB;vuLt>{Bi~VqnaodHTozkQmPfo0s*sb{)oJZUs&*>8UwCJmAaA~N! zx7%aH3S|Mng**SX3BLq^u1RG=e~yMYI55KOJ1A$>yey&O-Rt{{#TAW}`LwDi#k7_y zW-EI^Vnlcf0&pZW)@S3iWel7rd$RudVfgkF*h5e@EFNKS+oAh?`D%=DEAJX>AeK%2LC|K^K4V6_MifV&Z+{7 zHzkLdsNzUB+yZfV+C;{&OPurl%W}dCQ^Gf0ejNM`E`u<74wFyBv7gcaWg4VN?j#*5 zy=Zm#@6Kq!4eV`&hOu`ZSek#i9vAy}KXPHw@AnGwc{mgYeG9T!dY*jz%dYPbtO*F3 z#JXXU5XObC7`~pN-!G&z59?>g|kLCp3GJVR1DaD7)``7O!9u|Vh2$u;l!SUCFb_!5)zZj&|!OLwGuL~wSwaUc(v}V@> z`6WR={cGw*T%`92>*wN&$La9xoO1w33wO zQc2ca!R8TQEn`Ir@9uTlo&O`C1?tHj(`9*7rj`HrYVF%9%f2S_rL7Cd$Gjt*{z;SG?OO zG&?@7Z1=s3VAFV}A1yfJI3T80nrrMs>fu<1Z)hSa6HzXLk}V$>;7*@=++mojiS3W3 zdEnGRrFOcS^;0@v{R1fZ!#RTJLupfnV-$TV(k<`az+R<^eJ5XL*E4hY zUa2ba{pLG+xr3O{Wt*=fD!*WLD(5_iYShimS64%+Gf_m zxsBP9RL_hUQ7z4{_CY~}^E*-JDz%3;K;IyOi6|YO@Iluq3Rra(FrByZ@6ivob=^`z zd5Ovnr?^rntBS^${Q@aZQ2uEZ`1sNe6gAxm z-RS_mfk;Ex-IZ1F>iF*YAy${;Ztl!!hRQR z1BvhH+exc0FAIxe9?EPoQLV2XLBs>{(-e0-EC}P{aZVERiCO1`WVwPI=tHF%FyZh6 z$hT)+V0E~5`>Yrus~fE>V+d9@tfpEQn>8J&><@{x;+G&-Vt5vu3WW`1LxN;YU9C0t z9#C;H#cAdRI`A>r*8YFdDF3w@=7T&^tCOWDB$r1h|LhtPuuU)p|4p&(?*WkhA3!@Q z;Neu}ghB{51=%}BcMAWfhWWje@3zw4Z$8opQ5K8hbU?%MK@J~`4fym&1@gZF@*FYH-I2cblq^K;i$(Ut%huU& zH)WJEUd*P^1qNgBxmR-b&2gZ-3pXK|- ztLu<%jNjogGL4ve5|9G_%_j&Qysi7TH+?{E;wm3*x%cA2 zB3V&$%`lZ8WFr%Nt*Ui35ZoKa6Dq)d@4aG_5;80*^A7m~;?R%(anc*mVKR!(Pw5}0 zH6Y`Cq$mFT&M^@DXyrsk*$V428Ea!6e*@M-ks z(zXQawW0~T9@Fv=GDwNE7*#XOv?4=jrcn}Bocl9`4nS}w?#}hcwTTbK=f-mI zC!}q-zDQ+V2@iz;rDc7!L?*hFkPR_Dy4_Z~XAfvJ(CdO3l{G^IaNP~2VV8G$=Blkf z_ldwY51f}l7>IU;eXQ(szXN>nHR-2z*-H|4Jk(Hh`9u?7Ic!j$r%X2GhdFZ~7>h@> z=~4+&unU@EL=uF=4@mCxDkBt)ynxPP?>ShIxj`Y}Tjv(F0L&?6`=*L-Gv=s@eQCyk z2JI~|6PNrmVRlMyJIe2%v|8hnU|@ebxj8(!5Kj(jRQv~jO%96~IzMUVTP)h=r|gE} z(J}wkvv*p)E{ZQIed}=bL3IH9EC?>yVrk;>Q42KgGjkIP?R7Z=*1MXJM;{3%`z0}{ z#`0Jt!2D_5qFiB)qV9GxY5V*)(wWyH(mIKa>JCifVY(bB`rh0^(M@!HiHr{_Qo)(# z9%$a+;IT=i1se~CZA8@x!SNPaHK^d&LD79nl@AU$4!f)JIznX8XR+U7@5L7H69pep z0=%2uK)K3vm^QrgKl?wgeEUCiPXAQyJvjF3!T#09kv%%t=F$ARe`0QNP?$OZQSQHp zzTccMkUwQWb55rWReeDPf=&-eLfYoX&&C1Dwp&QPt$5bg+Z@D}UeNMFD}ZcpWfJ2f z@SbsPxnDK-(hTaKHI!A4%M z!PW=YKyLsK<}GXSq!D=dGTqA12@a`DO~yiukp_Yq)A~1WMdm2+4~feLx`Vwdz{y0^ zN3#X|1qZ->bjh-_uSbvC!Vp5pQo;n8o|+%HpK7{3)bBoYj3k^GXMKtnu0>e6eK5Ox zb^tg16A(#Cnl*0p3}lYZy`ifcZxG+Ua7uN5Bj1Ct=Mvp|NqfW?r&3V6#%*(ua>wCH zxs6&&YN+0tw)yVfIbV76F`A~{{3h0qN0tdkm&nRLxD74#96$u}r=Tyx+BNIc(LFff z$|wpS+Nz?eLeNuxMg!Sn;#56ahl?J^4zVK@hxu6$rI zVp`e*+m7vTSgyd)Ic5R%mCd)-HANRH4nw+H4nscNYf2rQJ_frOyzx!_`{^?SR!jxW z9yEg9P&1-oYEOAn&ktWuL%6|@uP-clVQn>iHsy^GP4F!yxL*TO8yJ6X)C&YzEW3A{ z{58h&Wtv&xI+JL894yP{Jrp8(L?aDa>Sgv$rLF5U8}G&C0)}r^W&_<#j0@7+D<*&lGSZ&$(V$*`{T4C@Ek0o8xckWqQlOPwBk};Z7ODX&UB`g z<=p^%c>nxq_D?z(V8`tD-RS~3L+@9Q50C&DBZ*0u=OZ&-S-;<;I*Z(FSjfNSxL8zCjU~=97T+u!AeKRgnYNZBsK%bdspEGR)Dnm%pO=Sh`Y*tA0(@JO-J0;YE5g}Ns^#im z9tQo%lFaD4Db;S$@{j!=mU`F$>i|S6hRhA1AO; zRHZ2tkYl$WfP7jl;*PPQ2tTr>LRwp$kHP9pWdh> zgSNGQ6NUX#wu341Z~Yt}ez1smn46ihXSpZ{Y5|Y<7eAMSSdDu9T(re}4btH=nt^OxJ zawNA-S9So&Q9zu`;(cx}u++#W29>U@sH_rea=+kx94GIk!LWI2_R~Qt6DYXh-xA@SCLy2TVxpzd&0cArveQF)EXGKXo`SBcSXU9X`D616I`2 zY&GZ~a$_a)aK0*cOiR^wh80(xv<(P-gF^ThN;N)p%=0+0%OQ;(6woX~W61=hd<2ht z??%=S?UphtLhuJ@4TkPaSAgyKX*I$}SNrKu!pc@|q3(z|4PfY#(IE1Oy;0g-pnr620@t{;3NNp{U+HUErjc+N8typsrEKQfg4XG2*?9Kzi z1&HVS??JsYzxBbq5vAZJvzH&{J40;a_?GN|vw%T}Dk@$7q{fc2MWyQtKob;7MG~mt zmW5N8*0D!Hk@4k|7G83-70rW@{-pZ5Z-Y-paF25wLRT z^(U`&{t(X09;_3jUE?7Em0s&_{;ZSYgtj}W7Klj1l=yK=I4S`-X{GE7z~yK5w2D@n z7Ag~K+8oL<$UN3!B{QnN2%S@%FXD#P9ysalo?U7lr^Pp{iM zTX2wu-uyhH?jMc_Wf4tj&!rIV^Nu-@5eqXOtE0A;H6vcK~!s>|Di zOBq*4UE;n>P#rn7zcDX|%&cpGNzJ}9;0C%;5eE2yeCFUrMQ!=^NN{i8cf{&Z17lVc7*W}AV`mbgG>TPmsl<1kY!bK9LeMtOl4x{`K6_@k_6-}8g2 zC!Y31QXQ!|2NQC@7Z8VEa*8c4NoleBM)Cs4)I2wHJaXSU8y1S(S*j@p4 zC?qW?B4*=P9e=;qOPT1rjb_Hu_8=u70;Pznc~s)6iDJR$sNMp(eh|64a>@gAYG zs#lAHq-XFsx*iy-e{nNr_SW{ko%~D8a}2Qew<6IqH`9O)q4t6D83@ee)_4M1FTt-M zm~z<0r<^PS!Blr7eX(p`pG1cr%Tp}2EkvEMWZpTv$WW^SSH>xY&4-A3AiaxC00FHx zVbV^~9HI--A;#LQ>*H^r1Lan1L*DIL0|+lWEezft;XTY%)?4TKYbO>4jxleK#HprH zhV>$?WtMSGWNpcJewju+GVvYKh^;dABeAvifEL^cN(tU)w}CpL)0tt`@dNe2y1wyJsf9e*{5I0}Xct-2Z_j(BI+J!^%m)8%cS^ zCj@AgsJ+ne^}{yI4SxiX%dvhQEroyOa;{Hy_8m;`aq|%vJbPkiz(H3p{8JjTt8)(f?;&6>0#8$Z*?vr=zBK0 zh3mgc9I);l#-If&NQl`8h2 zYaItIpgr~kD|PU`X_oPUN7KcX69}sZ?OFoxW{4ELBmp8@D3t%Qv!GL47YN89;v`>C zUUaORPEp7! zm8cc2Mn9m^WhHeKbS#HJ_9|mvj-wywA$E)xL-5ykODagOl!7%gp_*}0n%*Q1D*M(| zrA4V%KeH4$!&-0fT<69J^z;nUwqS2e-1;tHe-Kz*pepFiNX75*bSZy6u`OhU26W%7Sp0Osp^vjJvBKmxMn?c!zyqA!+p<>JGDA`(QG zP%ebfup`AgO2C{K;edIowFi#AsAj4I{zD;+rEmIGUx{`pj(gb3 zP|J30f@&Jd9#8?|@G@qCq=opKmZW_}RS5J2fLl1R=gP6yzz8;hO7~M7E9$t5E*VAB z=ZOMCR#`s%MWL_3eO`z!+nU*=V|svjqjXU~H5`GScLIcq5|w6Ebi-AaW{&Q^&EP$LfnKmYW|gdyi`KwS@u zZjz>fIrJq`GI02x5jd>be}LfCp@;t@9{=n`L+EM+0Kly(~a0?)~JpG2?C66FH z6GS9om>;N31b#sr;XYt)#UAw55ciz)ol|1*4Su@0SFoq($681!~wjx5{8PYrqO zB70E3GOiIgU}hi$BvAD#L$&_@|u#gJ>eB zyh{ek2ni)qpt|-tz6z@+%2rHRE`_1IT=L;_bI!H_U30WO`f6ZU5Eej9(s%2&fvV~H z#BCt8^x(Y&pE|q~I0mmF;J8%np5PIJb5|aw21y$Y)@^h3dN{tn!j_9Hlak;K1zBpN zapjm*H5OtCK!nQuW!EzSw4k$OEva@UB<&zRMZ>;E-GVn-=!hPJmYa%sAM2@Rx+C~P zg5p?3OZ*SHg|h0!FXWEc1@1e37)8+g_43Bq*aE4@tuUnZK8Z(B=Rn}y#1PBd8MCwm ztde0ycWG&a*qbP0dUvl=(iTByph4~2W4;mvIIG^;&*qh^zh{F)&0W7}Db#%GQJO$+ zYnLNYMOGDPMb}C9Jeoft3Zei)&sKtRJK!J(#RcFEA}?@v!Zn$qemHe9stC$RC}3^dDpEswNNYi^_p!9hzs2HuME@)*_Fn23f%P%Uu7 zAgKx3V*+zM=+r8x!$KcW4FDQZVK02V$F4~8wD)R=z&i%!aYMBJH-R^hz_V=nx_~|& z{!^!;@Ln$gK6lr*QfkE;t)zti!|3T?o*W@IsL|k;rb%F&D@? zrpEF%eDgAd83{aL@ayXw*0m8>$Z5pJ6p*>Uge{XB{33%OvU1Z&AsJAt&;W582M6!T z3{Wod@`r**`(ZkASO$Ij+CC6A-uwn}%fK}%dyh6e-5e|de(dkLi_+66n~KpO`( z4r+ZBP?l+L&2Zn4$*0>(7+6%w9P^`tOZ~ttY`{^tI6TD?0{wA45~}ugXVtK&!rf#k1-y z5S**R{1X%k?__&g(2@^S0KpF$$V8;6^*}v*3A#T}m;(i=@y1u9F%aCZ%;hc?`m&4L z=6#6#xBMCeXtUc_N>Sn}9rVHN=}9F|3~6QOGg2A>xXla4phF7Jgo5jqh;X);qK_33 zAhYI9tbT-$68BrIy=4>n|BJVGkDlMp>-u(=^~fHrB2u*UB9v8Y?P4yIOd*0z8GD%%U^Xi=bAl!GGnh#slBRRpWlrQoGikx~!U zLo0pd``cx)7m54<80L-0FekrT+*(MUMP?WJW;)YUwkbeZXuR6mS4+BFzRCuFCBKZirkWnK+- zLT&Bbvf=Mu6akS6?GpAXq>#XJL0#so1c)We^1xE_`Ou?QuFkUq^&iNcEWypUS6FfG zgabKK#jfM$e%Dc`cB$FL40u<*Oj!(2jK=E$hCsunC)*t5pjzDp5p@y#;ZkxV`E%py z0}*)JS@bDG3I!0OA|NEJ*B01I33$%rV-`LqKJa0z3WrD5D=aam&#T>3w+!3I$HF!$ z4oR9G?4Ix%Sxi}K0!{#S_lrX{*^_ni&HLqf_xxxz?jenu5|F*7Q}86~9>&v}HFq-3i5r4;4*7_0zOi}66+22B`CLM{{!BUshvPPbrqMb* zor4R5Rse)sldRc^U5-TN8mCS`z@cRbN3cNR+FT@d9gwYhylZur>BO^qhz>r7QuKtf zM+je*Y2_<#w)>m~go|o84A!sN7P3v2_+h4^w=avlH~aW^K)f`{6TJJeByJYXqil5Xx_ z2qgYlUMP?u!7>Qvs%@N1{Ah~RYwpyeQ*OFRil_RjcZYE-eFxpJfzS5G1OAg#H9Egg z@dAUR0SW0t>)^K+k-a0b;|#E?6Dz9@ghmXAtb)=VENhegz*S!oTmo$^$Tf-&jU+o# z)*QJAk;Co20$czTK&(Bb{g}-XG7>O)kYaksM z!me8=m0ZfK4viF;5}Dwv_+BKGi|{J8=7^u%#UBNE63!ic&+kXNHOYWVR`4rhoam8& zFkpa5GPY+hJ52g{n;7nic*n|UWEBiA^piCngW{bDMuNNhQnBo$7x_e}6Lq|f18#uT zi61MmyV-(2gHlZzE)+~{uj@+FLDvg0;>3e?eQZ1|PbpfvsCq{~V-K+iS$cEGeKO4uR=82}Ld+ovAnDN3MYY4Zy z)ylrGxp8Nr_T55ex34mk>5e)J-qCowX$X&nZOfin$LA8dRy=?Y77yzdAq1khV&b39 zdp;rO_}X9WkU9*A$GyNWYhzhJM!ADdLc@atfcw4M-#z6uf2@~wgL-cmRH{ysZ zt7JUeB?_)%PmFZ%*z+jM7&rU=bUFQ8rKgV%Yk;}noiyq5i;uo&v)@D8ySV)SIjr9U z#8eQVfDYx&Rx*I?HhDfkK?gofe5BK@TR2N#k_jj_5eCJlF*R_JNO9+Ku1fx*QXz~- zhQ80ItU4Dd_VRFHvRh>Tt?yG0Bi#u8!E)MZz))(NczD{u0ajR9lJ3c;MfACCGoQ}F zQnu(`$OnG`TT=eeQK!J;14aY21hj7(R6MCr-uEq3)U;N4HJ$hp0Wd;4wg3wVE=Yd_ zk_dJ=(=Wbi=LR^I%ZU0F-b3Lqmy>qt_r1u;a|LGhh^{aqK?{)kYBL?2h30^l5w3c0 z$j>9-D({aEH9Pr1oQ%I?o_fL@$OILyXeaZ(J_PYJwGDk61ui zj^03$A$hm{73_K1M2r8nRb z?U0!2gf1wuv?IE25UM-Vp!=#O@W^&98C59?NUb7!QP7w`HC<*B3O_T;Bi=U%+UaRd z*G%qLNL!vxHyJd}6U4*E6`_+@5*&Zg1kKDT4$0X!2RU=6%i$#Y?BLSp7)OV$%HL3H_Q-jeuCdej%fFc7! zWaf`6LDnAHKV6vhlqRplO@a`}A}iz6>XES$#>){{%52ie@nf9Klc$fu zsAkNSecc>V_UMP}?DN=t<1>20@fa6zyaX_4KhE(UC_F1g)2yQM<{3(j8ievIg^^VA zg0$&4&jDu7I$%8&eP5I8_9h(n>8&luaMG42H|`oDo`GXIkr;oGc>?3}wYqv$|*q!F?{eyn0!Q%nA)mY|QRz7SI>rF6J$IgQGKM z3#8J$6E+c;c3%&4<_8mqX9C^yh(R^(;5FdBOqg|*lk+UHaTAqGX=P(K;Ft9Mgi1Tsl5A5i_Z zL2cEwj4slabs>N|c9Wipj>jjz_Vc8MM+Hoqa0%Mbp!T2u!|MS)TVVB-kx%l87>ST= z(`LX564CfEpDR1E9|^d|6Y-^j)1!TpmQlu_d)=(|;0n8ZkO}LNqKK9NWAj&giE^F<8v{4!g_M3 zba}J>{LA8J_PFum3(T&9cP1zy=-r-L~R6RDcIP& ziM#gw$auQ!cf#a4>~XYod3&OdyIFOi0W93+1Y{TDHRF)jWc}i015v0uJRRl{sWuC& znR$fG-?0G@j6hr;Cp#_9Df@i5hwuW&2JS*om*DV+p9`mgsct>FY6i0mv&jY0GyF`S zoJW2pFTpVcC{e}0RNHP&ZbLO1US)&N=mlWB2wh>x={X9`u7u6-!!7UneO43#zv5@$ zS`-exa>M?uRC&wGO0xK&ZynCNq_A{N3d@3)fcIrTt%DjokctS@CBsQkWIxf_px<(p za;$l%pCDVj>#12s;7GIaBk!J^fre7SQ+2nEgT$(-XBAG-XBT2WyTL+oky$Uoc%6OT zdCsM$)9t8(*=Qt3`h`#ID|6yJlJ`97-9rY%EvyGpni{`P^&D>wujrUlmvg?p(Xtuf zmn%e96p^e=4A?qatjd#TB0WDxscLZq!kZzgMoBwOP%d=jOtu*Ewyz+!r_lP+p@`B& zNkDJ(rWA0U)hlzn-XF0!+srd|GMb(tvOD8SjBS8W@~>HW4yB-?zbwus%i!w_SVFb2=di7t9lYzp~z%7wk!;316-O8TK^i=b+ z#^e0LJ=wj;a4FzP6tYHy(3zp}m8}rT&fxs>Jh?{Onh3)R;^=#ISDWxAhhtZ_9v~On=!$r~jdc5ycD&2By@)p2 zvt@hdY=Tcmq(1jdtAdoqs1aSQ90RwVVNlNQx`lzlKhAo4dJb~Fq_-s?%c}5Y3On8M z8j=T|Wb&_0C@ZTeYdM`cEhH4W?af!z>BznbZ*Ig@;=Mf(^F_!fHMk5C7={YYV7l64 z4?#$pLlWb04JqaAK+F#yV7Z{f!X53B0^^uq+C9^Z#<=PV6&?~D>Rn|YR+;-r*VLre zecLM3?KR@wsvj0CXs1~Xfk3+6(yK;ClHb=mH;d`O;=TVL_Rto?j{SR!Ehom`ms6#= zqDR%SrBG~2E=KsFC>OUlHH{$6*OWT|{&OE49)#enZCry*v8qEHJ(5F07@I8aQ*xo* zScNFp@jj5M=fCWYIdlXFX%l6l?5o|;G$i}`2c4flnK-6a&;l{lV9=`Q*Km(J;2>@o zABHl!X-&(n09kN@7&3bs3#s~72QMjN#LWsxNhZHSFqFTo{iuLH#eFo$az38@h-d># z5WQsDrvMCc+)wY3lAMqh(s8Bts4GkZc7GmVTlUHqFt-V2dV?1;?V5KGK*V@df&WYw z6`(A$_p{i~IYN7<{mz9qgm94)dk<}6YQTGbVS%T|o}N8n!XZ2cGHnq>U$KL9q)OTp z^PYm42D$7r>@;Bvi@FEcB!GG_r7{*~unT|0b6AON$dJ zM_EEv2b=xI90w6Qau?D7i%feIJYePD;n><{&d)l%*LoQdYER5`*VjaX^WOE0AFo&5 zU>b}7;3a$sYs&)z6nseU4Ikr>UKzRK=0P_*ym3y9-Cx7kbMyF42eppX~+0*pQTr?M(Vs+*y8p}uZLt11^zw0ay1GXhHJ#ohha|` zu@H3tnuoeFK;Heiwp*V7TkdNe6h?pc>Sm*tmoH+g!5$h`hr32)_i z?E?<#qjL-b5~SA-9vDWF+JnynEjH6mc;*Lg)%w=my>zV~vG+0Oq@zvZjfdYv(AZ9 z;oWrg?63kg*H~OAS^~95_~_mGh$+`JK_f87`_v(oi9PP=w@dWculls@(-BB$Zpv74 z#+42NNHQaGtb--ZU;{dUks@+ZgByNIJFawM-Y9@HVdH5&5Di!-j{te52!}q&<-HK>=VKS z_n5(=rBWmx3<{n)Ep&lCM$pq7$Iuh{;}uBfqX$%Bo19C;kgKkxc9#+ZRGKq&^Q>DbC;wv!*OX!BglSr#VzJPT-PtzoqBY+=Hqrp-4iY<)#w$ z54k?rhj!{w!D=ut1*Be?kE!~t??ZDg}`HpagIQZKtQyhdE%HG zynVS;!~X0)&Lb44r+5P5B!Y8?RotP8=ll6}6shQvYDPnpCE|0G?bH*ptW4D{ft~}^ zy=V4_lH_Wgv61qIIlvQM*{YVbyW2QZ$*K?P+!z`XUC1p=s$_J06)?|$3D1$xoiRga z-fD>OqRbSFG6#W)Q;jHzp z44XeKvc|>&3rJ=m7V{c&{fZj-WpMZ91Q^QqQ9AJk6-TwaoD`;Z=02tr_MoXy*?G0_ zqD4PuV*>XKM_!X8CV9*beR=;kE?N=CW|tPrgmfXV_7 z`*6G>HNYWG1u~Was5qdNL*0^~t?cL;It#4?=j!~Z?m?ID_BR4dibv3CHHeK2Gh^pr zMueUP$Cox&wMoiXgr0lroeZZtqpGgM9@>5Rjp#(^bEug0_&Wt9?+d_Jx?ZsR--a@&wdj*XX`Hd=HT5)K0Rc6f^* zr>Vr8){8(<_5QdPOQ=zKZgCjB6~(ZK<35fXjR9D^mpxzbkF|ES98pn>%Y&hOJ{Te` zxUkz3BU=ldBcQd-KPeTiSXUeuRWTp+Wh`IXOw# z5D+3dw#pofr(9>KtNP82Qd#!m;cHz z07?Xv!7$%tmpH?;|~oU%o5US)=u|fcp@-juXN!!j5*z^^Ux=G=E6o=nlFzo2X_jZxOQza zsx>#Jz|MtukT0p$a{Cp%-dqjr4Ck488v+Oi*IIEvY zJY3@Sp*80QLPD04o+UK6fTiwZ)B9UYxi_I17bNO(4yYUZW*-t1>*#f;JGMIE^Rk=a zhNRqOdK#b1OJ}dTF@>7`w9j5f?3+>V z0|z_;=@H^GPrZkcDFeDA&|n#gG0O*v2Y9J~Bg=#>Cf`7;|4=MPdRD7(p1i5a{O);y zRSF?Eu?XZy$zVxhCB02VN4?v2qA~Zw?B8$u^PR=Ra#812e3Ag(+8-S7^#c>=5cX$= zO`lLO**lBfFIX$<^Em4<>{6iUzlS4dV9sRAEsmkb#$?Xcc{*UyO)?9!M4e`7SZGXE zN_Sp?NHYNLm%Np22lZwHf>~nZXlvV3W}&5?qP7LpwP#g6qr+B}$!{ePZYiLP$}})k zhuaA}39y?#I9VlKSyPoU>%jSgm_t4hs5nrPU_g=fR6!hi#KmRY(+;&Iaads*VXfQ_ zC%M639lbg5N{HYrlJXuEvC%xM+8F1yw-tJG-IVZvs$*GbPr(Hq2pI=YxFP!Lq06Js zTGUyo3Al*;O(4~kVqDPhbT7}D*he1YR(LDIk(Fr=ymJc(2Xa!i4_Ei#c^wd|6|i`C zO|Gt>FR*p!at zb{!fb0j^FklR~sGFvSQbm>nQKht;S{t`oR|Cv~g#XNoi#;-02Jd7cJ-IRf$bl}unF zJVSRj_a$lh%LkLHPgv?LTwx!q`uO7TTs48m9sxl5`kZ|94O11H@2+%v(K$AXPAy#U z4kHLjkM?C4Jp^{Fj?;xf$Jpgh=9HY->gJFWw zY{1Ld%E7bRy9x?GIEanh)rLE3r~Y$@GO^Sm<>3nVYS%q$gdBF!h^vJzDS&AB zGsjH8g=i;9g8w@XehTmlB6bnzYhWDrjAXR6ko@uzPE1{K(Dnq1R~`*SO&?~bEy`2M z=(Mte+mP@&)XL4`M8Vo!3lwu{o?v?W`EBR%_tZ&v|GzICCB*89!?%(rl69AMHNP<-UtP>!^$dvFV);D!Z-V^B%YcUPXLr}`-`yT-P&&TIuj zSM?adJ-dS)U_Zopr`!4w6Ap6rhCKdMi?hDs%#FJ;ohL`ZF^Zs#%M}_8oH@JAUPq;s zJxttZrd&lX#H!cuoS7ER%^kvFgS1f4Cz&goIZN%$%2|l>Mn}XmvmXD zU5oWG0nH7C1JMmVhGG{EXh;uo@HBuA*zi{E&H^6S*RqE2s(x3bR}pCHRVBvHFr4<= z1Jhh|l&t!c;Ny%^3E_kjNI#Tda5d(-!l5yshpRRN^b}{$*?`H0)`k^OUTzA!FDNsL zq%Aia_`vqPD5e_vSup@{1v!sWCl=l^T@BHJ8!YWuqSQR$TRQtf?;J_{ivaA6O*o8mzsqV7|U@2Y?d821&Z@65}d&xRj` zkZDG548?A5k;-&=?9CnI2UWL&{l<7|yH1i@@;)exMiAzq*-N5WX5s81R)`tiA~vA* zu+4kHvv2i3XLfq&sT*a6hzB&({ABmpC@UVZXc^qbNfXVvJ1Hf0@%S| zU@8Kg@SxnBedl&PEad{b0_eTL z`}g~K1L&=~l$h&4*%4`G9uC2ju%f|rneQ*q$CUW%^+c+CZvrLg(&18CQl%;O1VsLJ z);PSHvxved&t5D7UZS{XW$yLWgadW6T9omcvscJo5Rw!b5B#EZ%qT&ka8Dv_<^C3# zt_|;V-VpKEVov* zrXo>T&3QFN0{*S?#dS}Sdu-F;#Tfvy(cYaw_V%Sr}=x@0YaURN7yml9E7(b?5jCiRvs;xKd})b2lO0V z+kDdT;TbXY@r+Irf09UY9pS2-?7h$fO7r#+Zl`Mx`D&5CYm{J(Bjcwn*^s$u68r^u zh06(g30{HlmT*|o9}iUv0{x`sf}nWG!bLh>)+@n{5W3w10ePGW(&3EIVm=bi~@R_QjRH^Q~3A3Smcz)>NY{!tm7#j``a9S#o4Q!7A2|ZV>Lyrt$Q&O)& zxU2Oun>0`2WF?72w{Jp!)ysA;_bF74@FQatpnH%i3W}LCzCQ%WBv8wPd@!s~HL=y>1N zvk9MaCX0jS+GL#ISd_VZ%E3K0_K>J#5MHwP3lk%DLhXb6j-iqhTSvf*V&>LaLQK>B z9>EB7)v60|*H}XD+X;TffV@8z`UZZQ!Lj5lIs;TA4kj7_n>G@rL2SZNA$aV zjYN;9xyRxRngBFs&c8tw#@Dwj)B5C@9e8=uU$2Z5V|+PXbWP6O?1K^vY>{qq zn&Uckb|mN53y)E$2AJMzC0lvJ)51L9JRoGs(8^3}(xl-ku#)H?x*U@htXCInk6R#F z-VN3%24t5##7IE6&zd;*Knq5lmj#5s+W_W+;)R?77rBQ9jTLwX6-)u)^v;f!TSJ;< zD$H-1Ai;?tKmb)=*fo7qaqwv4mUJ9Y8iT6_P>U|WXA#u(633j&l2S3kMMwTgLgf|? zDP2eN>FPHK=$K1{ywIqU?l1fT(zKhhjhpPXbNrMNE4W;y;i{!~TGKE!}cOQ7?E*gjJKS`h4B?T^+6Y z{oQI5$Rh%5MC17Tkk2)NfaVwU2GFR2hBPTXa*WCEv*n$>;Xy`bT^dFTVYoon;+a5P zqRnujstdEoeav(VUI~cgOH1!T22@whTOKM#8b$G3jn^tmLYSBAR2_EHZiK9_+>a~a zglrZOqoCGeH|!-ANE^V~D$K8pQF`J|v*1%!S+JDALH#pgZS*3~=g_=+^qV>Kr2YK% z0mT1D5=n7%$tk2&nh!dhi5p!^cnm+b#04>_9e$R2r92H*Zf-3GUE`EeOv|TdJ)!H` z#X_8nq2}BE4Zuaf6+(}!C)>yFO2z7-vS;Y0%IiURC>|1j%Afm}Rd7H9rM6A`9BIgia+M zh++V@O`HuQ{&Y0c_LYE(_#uU~E?J~@=0!F29GpZY55UJet$uZBpWv8f_Q3nD73BGJ z#?32hu%RS7Q5Kx3*H{znp5#sAJ7r1{0zcv)DJJ%ca9Ut)B~!Is9L*%LBfxGIQz><$ zZ=?2ft|>Y8aVnUvM+qj80zfB`2YITM$|2i;L)YbdzfV(Mu!WMokz@dUfPh73YF^PC zH)RvVm6SX4q@kWX761$KGc4i&Z3JVzV9!qN7R4ikwvh@F0HV4$hw7`o#a7^-^2741 zq^5rwrX%cAZrTXy!myy1*plG+z%_NaFeSl39?7J(tQ$M@mF_oTOJov#`M7-EBhwo# z_g7=P=@ULOd1114^g4pI&fPteI&7cdHHyMMl1V1QLgD3_f??Jp=<1GXwdqvlLgQKk z!P`fRqJDZ$?j2a68mPGinyj`aWcC1<2uEQ7SNtaV27Ja5KuPecqbK`%hGykeR=Lv| z=#{)|G*afCjO<)oc{kuLfctC>R<_TV4msP6;vU{~s>0MWbe@8Q9DH?e?1Z`kWm9%@ z)q*5!7L4SVVG`X`r@HZelN9-qaJG8Ps~hL`{WEy2DJXqnUT%GY(sI9fZ7g)?F3DIH)7OB-+a7td{5PlqQ2{bxgx z!Gri<+(AWOd=8Fsr>tMZTK9d>T;YxglCZIR2xRcU3FfpX!x0&dh#>59j;|}2D-(XW zAdrFAApdSlLZ$7fL)d&Ko5{GI(1~% z;d5Vd_m|A;8n7umEX0|X#8>EQ9WZso>L3v%EVsVt)q>Iexnw~F<~TiwEwC-ycVj2m zB;y${MbiT-x$+Utnn^R->bg$5mng4RK*?qd7<3?mF;i3orklVh|bk{ZXSBlwkC zLMS5C&5BSfYZt30!wHV{_}v4I1gtd&Ch@LSr6eFHWcf=z0p!S;7k!QFk@b zl>Gudcoelvqvx=+zpR4Pt$Z z8f5NR+$%6Xl4xnTXYquA>`ATOB(+9!TcJ;kUp?TTj8N8SIphKZeMRktJD4^BacY|S z_-aeKGG0^2gLYO`K`%iXS^M_%2ueG#5L!1E&eX>V@yd?AT@OcUhE6&9P2}cy;7s=^ z6xU{e#wiT!s)7Bu8c6=sqyvm-S4RixKs*_7emt?|8MQD31bQL%h#>npRWKiybMky^ zLQlI8Wt+4NR zeGPE;sGQIGM58R*bmU3o4&0lw*f8p#*3{4R!ax&Lu^;=}YkfwQIm=jR;ZIiqjCFN-aab zx+f4u_lS?NW9VPG$5(Z3U(I^f4)eamKukbq6gnMCe-@61{*iVWcOPYJ+@8>Y@8mKp zUcXTM5`i{}-Xrc|f6}_?ve7v~Wgr8^_J_F=R~Vr}czY@n(C@C?2!@(ENz4|5mj{~@ zKmixbx-B~nm=9TIHJ)7_v_;HO&L5FEiwY#l)DDP9%5#oGl~cU#)XVnqlt})X4Wb)B zqB;;LY+gj6S&%_*_W>&dp&xz~DqUbH&S`)Yd|a=<&&rqQG5RsN;hM4<-=&k(tWfMsA{0OH5vH{YI1{cfiwwJ7O=tlLro|5oM9L)Ftq8TcCon~l;MjZ|!r&H0J zp5W;7eDNqpt@EKhiec3W4GyM?d89TerB*Os18lV8AdObx`4#n+Ss@PlSjuns=#h8k z0Moj~68EGu&WL1(i`*?K_rN#{yIl1`yvz#?DZKWS9D-_l@ct~m>h5eUImi$VOMo=3 zfbtZ&&mdqT_X@%w4Z{POp>%auBR-OG<^UI6%qZfzWG_baH<^ioDfu}R!3{%Cl6y%U)4X#Er8%)UDIM6wxPA`>v?;fG zi!V7{-zM2Fuh9GtR81vRbdY!maRK^52}>U$tPw5{d^0DV=C=lUl*skA<^u`8O6@Dq z;(>i}wH6HLs>@^}du)<)H%W1PQW~#nzSOj*LQF2j6aW^5LoJWuRVa z(p*OLRVZEiD6Yvc$Y&f*PFalWBnAX{+h`^pnUy_4; zD}ygN*T?<5Pi`xC$^qHeFYOyQJfktb8FZx{`-c%an}iy2e-IaS#NikZdOcmYcyVzY z$qaZ`^|f9(_}6EQGucK#P-WMkEVJ z8LOMX?kH*@y@NBgS6VTgG1iRcK`Y17R3mw#;H!l-T|EV;kNE7-ke>4ldT4mljXPeJ zRk<;B^p!ycw%2+_?e1eSpqL!YwZ|AQ(v^DJu*Yq#y z;FIKol;ZYk1p64lB$v7Q8N@i~4RYpk%RvDcSOE_=_H~TIWK+`f-qL|RJfC2#z>_zw zl=+E9v3bLjClL>?GC!8^q|uag*M|h5`{TvYL0yLvzVR5(&%Plh$cOaPeGxGoKq=TV zCbGxG=Tsxk9R^UAJzpm9QY5hX3}hS>s7XTQ*qwWUtYl{P$Fg8hCLLs1^g$B(P%}VY zprEWoV52WwNrTV8-ED+)&d0U$0`5PfC>E(#mw;XdLI6e!s(8e9220{Ct;iAg5}4l= z@sY5igl7<^kH-~ra-7KO?CidvSp&CENC(UANXSxA&g$1KteQlC@=vVPGp-6!#Q?m2 zus`4GV5ly1xescdgZ*D>QAZKiv2hH z)dHc^vh=x5m3Aay`@ulNWA6=4IGG^`AX9w9=+p2opLuxm*gunTX?nQO`;hqfpb0&^ zJE00lpCX}_mHu8oJ{D7r!1;SKFy2b7$H^X8oXRNHw@V=DO&jJGdM7+CKSGmh@uESO z`yO6G9ROC$0ym^W+te@Dc%#c<2KONtUk~m+|0?s!^W;3h&JF%m4)7+G#Jv)~ma7Eu zi4A4o3i@e8_k|a!4C-vHk7o`~K?*~4fFs3%RLtf}G{3U-&|-YMKAtYL6~by)C3fg#)1=_atu`3TmF>pjm?&p$+NYD573bc@KZ0QMlndD z;mxyyHh0k&#tuWnDW><0+*5a08d@{D&W-ZMIJgBU^2S)H-D`i20P%#6_tZKd5M5aK z&6`+g+|XbTAP1AI$U5D*@H_!pT;zd;7d&t+BHO9tMw3w{bVNE)*#l3I(RmtRovuuQ zuZ@>%$F@Tkj-+kv&DCFy4ux)I-hgOC*oUqj`-6JvhoqdO!*T9?eTFC$yi6+#IrdJ_~oMat; zX-n-S`_@T1-(ubd`B{0=tWMV>&T?7{$qh-5`Fk?0;0#BvkG}G5kf9wjuj=@^bQ%^y zgSa#;ynDeWwMO|g0pDCnb9=DX?0fgZ*uNX0y#Qi&e{oF;|5`FE2RWZJC8U9ybV)Hk zAFhFbJqo}6qVy8_R0(A7SW;jN?ZfC3dy%CtMp#l1xa1JPzS)$xtzJQGYUdlcqGQ@_ zQ8rc`An z!_JBcQ{=YfZ4Vkfg?gdXzXd5>2aoS^q;y~i$SkwAx)yES7OA_X_W zdF2XJmYOMP04*pUy$z2tch4L3Y&C0FO9V9pf7i1^P2;o)I~CbNdW)df>(y%06L)b> zs;~4pTOV#Rccv8*q2#I27uKMY+(YLBaRRnXam^I-lCF|H0a}f?FOOz9fg!oT5CudZ zFCJRC-l66IwHcC{tKllXW_bE;HQT@z&AK%i?B@0w8Y9wz3J5D^AKG(`aYeAb=^tSR z?w#3J9N~${5?eq~dK+)Bdz)LBJDELOyj`&CVl6H;!!JhY@Zwv*RdJwcFG^!nh$CBx zd(cHmz;6ZyHIS{_t|$Mwb{4$Zxt;@qQv#bkDP|?TF4+#1z%@gmYZ_qLx_+!e`zA5>ygvVSp;Pd3N9KXqeWWa4^lP!nH)^~2Pyg;z)DUR&5o&?o_$PzK}3 zd!P#cU3LLXa7VH7-22xkzd?20H}#eH!Xu_4biw)IbopPk{RpvY(O)sizYc8e5QTEQCWLzLbczeLOUZ5DO@>&hI-^- z*mslYI)F{0C!lpH)bKWFh)jf6xkzXfx;@RYsa!h3Hx)>GR~JnpfXWWo>a9nS`jWtm z%gk>`O=*SjVIMfhI87P1N7s)fPq*ZFk8qI21?g>mq~_|=(LO;h_dPEKkcB}KLVW*C z%TLKw@MO7(9>L)%fJO_syH@+)SM}Hp2c;Houiy?*wDm}xTBUL9nKu1S785cBo+*a7 z=CV$LZ)E7NS<=^rwBgdv4{-~)-}}cW?oY%<+>yLQt}pYKFLIJAD?+1ESWDiT+Mnqg z--b!_6{xhqDIcGtYCxn2HLegjAaIWvS!60s> zLYy-9OeJ0e?!$zK78_{Ady1)g!{e?KoKKN&wY4h%I@K^W2|aqC5ip!lMBQ^|JU;O~ z$19w%hft>T1a$&X9N1eWaJ_KA@0RzR8|c1x&F=}UC8Ni`q82)d9KgJBycUMfgy!W0 z_A;4zf#yC!a~7oF1$H}^rzdmH>yt*s=RJjJoQCa+y_(m92L1i2(^ObK4dwJ$lf_kA z&T;s_;T$06xx&d_j@iIfT4=8dV0-UHvvHb~6}|U>HY+U2ST=X?8o%jD)ORs(v%n|R zi+6V}PA=sA4O+=Jfs0;(8gH5)38iy#5iFb4JLT~_ha#qRHnVdr8D}`? z019qG_ssCi^(aCY4{Lu=VM!DmaZ!Pp`mn~MWiAeB$dgg|;&iDM=ysBJUCu&##t|No z!zx$MSK?(tAjS;M8W}F^yH^C(Quswiv#Q_~h78!EXg}#C#i2GF=QjH4KsHEZ!mvW~ zwQ_v$BcV!}Co_QXoT2maa~XHeia^DCZ&lDuku#1UP{}qCo3K%C59#`ZOgHmTEvRu6 z&j;`BLGMr}Ji65*!&2oX5m4i=8?EIjATCt~`ATh@83A0&BXzqyiAO^0XBCbHGjOG6 zhL`nn!Y5#qJjY0HA0B<*qd-D+s8e< zi;D~F))njbRc!!#hAUIye0`HjfVrr8y!7pkq%ACU1sP1&^FboRFka{^L8TqCDA*W0 zX*U{2(tY>+FZ8#4e7l%GBuNv=ivS-uQc2eKd!Zlh^v{ZfO~|dh2ZPCKpDF7=Qn}{5 z@NZ22EyN^nn+vg~B7a$4VwqHp<%NO*jjspp#**T>w8r}uuK|WvZWR^{3evyyG->wX zLc4{yLah|aAW*0;bo2^eJ>s)!fdD^qn{z=P4-1OhNj^OZj83+N*7c-vXQxg04O$0}aO`LQx*_U<%s}gT-b6%y?xXj? zzW(3*gz);uD=q2-F5G|jp8x6ZyzOnD`8Gh;Twh=K55D0SeC?-+Kk()M=y!hZXGx#= z`rt=@?8pA(cYNnBT)yU`zvrW${+qtzb3f}(|Mu@xKKRi;{aN2p{j<;iqhIzL@bCQg z@BF;a{kG5kyMO10-~WB%AS!GlYjj3J>T#Cjvx7~w}0wqzWt{@^`HDI{=;AL{-3DfrC;>P?>+oP_#y9| z?|PAM|I)wsd0+e`^yk8BKJ{&H{|ey?e<8f!mwfhj|IYL8`~!dccb&gG`Q#rTehgFo z^!q>fws(E-eShYY&BJf}#qTlHcf1>3@r9rC?vMV{cm6p2xBmF>U;du_hrj2`Kkv)F z=j%V~UGU1+XThObU4c`OT;b)Hf zYkt;;{Q>cP--Q0-AN=-gzhs{{w@;k#AO7t3e&p{>%3plf@BaVoLi~~7pnv0U^8ebm z{MKLpyUzp$W!S{t9`Ht>K zg#Y;mzxBgE`h8z#|1aPF2Y>B9{fWQ*yMORo@vDCHyWjsw-`4&xL-#-WAF5xA zoc`VGH-GfE+kf;aeaZ*^xBub#N5AElebrA=@A$xf@hzYJ6@T`Fzv0*O@t^za-}hMK zZ~nT^c>Kus{KlW?hzI-M;Fu&vv{No#iyCl` zfVKBtYtFf5J#BsGX8;aC9e21IxVd?^(9mJ zFIm@{&XOsl>*h#_6fVsv*ABmk8Q%r-#_?#k(>8KX&?g}A&h*yzDfCu|5>Ti#Uy&XR z#G(Ph%-IdgJ{~QQtup9BR&VueAmnvdBI5Hb^v02jA@M6FoX-?|L8JAf!fI(Q(lib{sbHkryN){Rd0I-zc-sJc`%nmy92i8|wF+2;c)J%8XAQ>h zNPO0KwNHuk!2D9i%BZKM!0G(Xa*$dp3R$9sB)m!kso=S5N|U`GCo9{A4Yqd`cOI8@ zB4KZ|KSGlk8>He?9Xxq{m zzT3!8Ee8(1!jePdl?E>~yD1hcmr#jC5yn!>XN&v6y`*qESvxsi6>D%k&L-q>mAh}U zo^L!EU}~clTrYYBLxiDX@>)#Ln@yCeXDSY*aaG@)PwMWdg+qp$$f!ZQHqu<=PsVyA z+pe!^eRnZybFv!EU;NOl3|?qFsdp?<__WaKO<6m08NXA;Ep>&c+u`_ld#7GkrbWT} z$~j4PCScY?_)43M?yvW^~ z0E!X#RSaonZ`}9k!kdP>eKeo|Do$WqnoU<3XRh>q5m}9R%S7RFFlYa8=T@ZK)*4C7 zuVc^?W%X*SKY?zfLSOi&cdc@XN&@h2+8wFU(j&CnV)8o@t#MB{t`w4q)GI$I&P&2* zLu!y0O^f%v#{JO}(wlhG&dpc+Brf|i7(`=#Tu|>;5EYqh+l5 zYXh?QIlDHc*-nkgeDg?$zN$1aQ%LolSO3@A>NT^u z+gWq`(S{%jnd=s;NS3xJ!`P76hVwE_9hNJ%p^P|M#da%gNWBgHEqhslegud4!aUqh z?K;0!Sr{I;u{V~S?hum*@%C&RTY$$s&a^XHPdFGgHXN7kRRhBQKoZlqif|I6b{w0< zg!p=Y0&XWEmwj)gVGrEP#?Qk<>kVUQ2_^48ea)O%O0!$(l-Dl@JaPgm{)ek~2>Rjo zjzDZG)#>4sP2Bf+v$9)tY3z@HJAYzZ2^TAwEctO^o4c8086aCk+j;D3tm|O*A6xM+?*9fg4iS{%5|CQt>@!h z4i}0FlTz3$ig$4*%ZuJ|+6u^Z?jJ4x>X(Z0AnFdjxWBzr9q$CBa5xEni@E9*xXtJD zptaHsEtx>vWY^B2luD#NJgqSsUHcV+9B;WT2rQcLV=FO?Mh*U3m$+Ivy9TH0Y$F=APwNi}rPBffL3bKwp6#793MHPs zUn^I8_e+x2F1sAo?q@kPdN*HP^I6DrJyu@`Eaur`HaH(d;uL8~jhGBGJlc^sMoDQtmc)}sE8ek2v#z;642{^#mHlSlKdWA+!zvN8 z$4yCCU#3QKRLFkFuMj3Se-{Bk{K4`qMJjI0%;?D9iB@Sc4*%Qi!s0})?7I~4q8VuJ zeg%?AfzWHSUs^PjGIxO=l(A=AGm(nq!LOuWXXmSy>zt^crUW=_{{A-9ne#rZTO#$TA0*`1hQo3?HOdtT=|GCsX4pSy|> zH?Y-=lAG+YKpPPL0Y?>~k4_^(-W`ZBezrYYa(FNCg(`Un-T!q%dRN3-RgZ_)c+Bme zz#Xh8ENG-JV?(V{(s@1dMW}xBTF-HIJ%5FXMGiq_vmp#1v%vUu#j)XevL>P4>{>Zi zq$u_-nwZtry7@IrB-d)wVKo1PoP|P>0;PJBbJ?P+?6;43%~^;{-C7t5C`>fJ!w5ei zS{}g;txh+Glc=Oq3N4=d^hCUswS%aAa25Jz#gkckN*LZ|rbYtF3v5BjOb`F$aLAn= zTxDqO3mdKSt4e`kv-7}f``1rFdkG4U8V2$G`3u=wzU##+Ygf0^4dpELtMj)DqH_Yr z7IfBkuCg3~3~sB51PJw(@t9@1QWqVR>pfg_rHyqY?&hB*DfE8@O?HSNNOe(_-Osdn zUMQvDikFMb4wuwR{W!wBTWIoLX4+P$tjO*CD(bCKE8X&1ki)@NRw91I!|ZrfyxPvO zL_>@5+efNVm$aBGiMW!>hkpV$YaX=W0t+rmq>9fkEn@?Aj^JM?^S3|6RB3yZu|z9qYLtn0t za#554oHPo#q2ViExm~_zsG=>fyZ$=5N5%|2l$$5fkW6AMYc)J>K?%SM;uK{eQnU-b zT*439#LTL>5$5Jea4poWvrInukqmku7WMXiymt}Atj-%nm%#_E%^iBPtMV60nQ$IXhk0{_$#09J9UQPbO0K5Q~&>r_*Ki04{q7?6$QOH+EPS7k@x z(j3-n?5gY*m_`go%H!J7Y^c*2K^C>Uq;v()aqsiT!i8NvJL}AJe(gQL`*c(a8+XNL z@kPOa7P$UdU&>dAV-2q5{JQu z>}SxKEgF&E?0)o7^yU%6cGQ{I{VXQK`$htZDiksw^B5HK8%V^8F7F8fk$D`^iFo9e zV;W7t=jJxsaQ1-#8BP)R_w_^h2! zC*ZQ*@f&UJZmM7}>HVq72auM;0Mgk@<6u#88Giy2>y;m29sfK(k)y}&P*5z8*>~2o z+H$B=23u=Z%2A!0zt6MaNM!%jt?5LwlYN}pDPi3GThYX7GivL-#b@c)7oJB#`Tc3y z?_8FzNE1d~m#6gI9y%we>yzLPT9pCejmpk#)cw;FXcyt%pft6I0*; ziX{)N;C3Bb!Ur25as|D8?emFO-V_D%xjb}^*R}Z*N;U~0b~+0pcHoa2j+%f?$zi{- zmq>vab#=^a_q?w7%U0m?iI;34RmKCf!}4+_v>?pjoBjI5;=;K3CYKTuj6B3IqF4J% zKLZllt0+LEVA6O@qVVHu4sFL>L$L*lX1DzOHiRss!vbpzmR_^Y#l&XqMlsxUY1dBM z;f^2osRu!0qU0i+_UHbrcT>K$8eZ>~oJ3pO3h;TM>2=v@=s^8fkI5V-W0YVDFL3|X z98qlT%hg+Gudc6N;3dq*SkOh|nWpG!*!--q-WT&Y8XHpOvo6r6D0%o`q;~yiDNS?I zk%5q>asS&Z9or(-%@|Xbwm1i|nvU1!HUF@cEeWr0>{B=;reUd{`rgUj+U)Q3RXtuh zVWpYQRof*XGX)h?8jQ00$-Hz+a{pSw2y!9~V;<3A!(6#Zs=;HHqy?j1?Kams{1OPh zmlInzk-59~rF~YjJgI9m5u@|LbOF?-kXgt!#XuwlSX!lFzSA|pNPw*=F}S>JT5G8slv0F9?-Zvq>>nw8{80; z$6q3aW8UAL4x5DBzJ=nUY+&$vd$JzpKk^;Te`77M(Q(uR6lI6n{Du0=pIJFn6sOp) zqS5`lxWAx=0u;L&oXyfZFY-WnK+x#>U9~*jd{-Atwui05qX_sC(&7kQy8Pb=?08Ha zns(m|v=BwdMahNqFs;L#xV-ONeQAEBBI=h!K-Q9FGWfLJ0%FVYzmlT9AsWn_p#EXe z_io{{%>Q0B`(fLLnvOLI3nJnKx0m-%P2r=HAqsa1W7*5k&0FF^dxtIJxoh18l3Ok< zD}WB%!$(S*kDO6X)Wtu%UC%-iYE1gbLgJ||DukjT<(bdXt znnt$ODRDWbX50K0$ys@|O1!up3S`k(3j&evW&4$luOuqU0 z9&-grM{sSV*7}|@L!BQ%h?rF9EPP@j6ygeh%nRjDY<3Ukn|t($@qr%ObxfYQk%m2i^#84E*W9ZNjrUg04Ai}yX-siuHCO%Y2jv)$<<#}xybXs`I5+KE42KMx6qd{R7o=Ei&j z?QqJsnH2}5xuv~$k)<~N@r}dtbIb#sHX-2M@U8k%W?XuXG1-{oqCGAac^yn1vYL2$ zal=HsVC=$u$<5f*B&J^f9MwVEM6&wh5Ciq~harNW;gC>XoW-B%;SLw~dB|s9LB%bQ zh~tDi*;D3S6AAE23*BegV%pLprBoHSE{}=dXjXEzS<0|dT3LsR4Q)%!kM(1RGrCrk z2Ix7as43YT`=g;kdb}PN@j$%jud|v}5(%OdCL$M7$Lx2PVZ$!sqU66(o`-sq$Tj!FkYgHpax;~El}sM!GYe)HOS`h9*FQDJc*n$(U%lfIN$sD zYiMpdm`~n&UU`Tp`xtiLG*YSM?N`__fgxR^)k+OBypqQ6(r0H%r>hRWfQ2uJ#2}?! zNZfVy0bQ&qhf1>t$CHP7&9b;{K8ZM5=;HOky@T{e^Mp^;W|~fCX&-c3qOzH%RAGhZAJitvad+ z=yW`rv@~#_b^72FN+V75-xg3ZT52Qzf5ctCSJc8G!@h!e43C(};9ihIf%k`5n)&Ud zPqm@z7d>1ejztR}yhAR}#6v+|_!@k@yYga|Z;Ob>f(N?wf-6`!H?eIW1&*YNb=ZC$WX^UqZzzwj8{mMc_yfy#HU@UoWBPS8lB z&M}m7mWzPwNr4P~lLf6-lucdfh1g3aM7|c^RPW|(i?6qX$*g(C1BtDR(KFW-_piEx z*!(ktu7U-|(JX@ZMKL}`VyJ!Ph=6Q*2~A@^=`lt?90|`&r}|?7%#ryF{HhJCI{YmW zwhuz7`_`@l-F8?{nmWer9Vn1p>j?XGa`nI)c56kpeLp}Zyj-*;zc_%g(1HxrkZcx_ zSpR)aC+XYjajWraP(2Sm-kYB1OPGQ;lX6PEYTF!n^zrnnPhO;omy_gTRIxqng^tM8 zPI7>U(rcq;ps%47kcjpEXT3s&5L?6XrJJ9vh+t}5EvVRwB!l-xb$=I8*5u2Au=4L* zYr!xGVYT;frDWdH=zc%K7W0zMyO~LBy`j0YRuZ+;4XO7)7man+<-^rD(gR|6_Y(d~ zr@&Yy(ph&~YYUtg!r$kCoF~<9&B|$AxurfbjSMrMmn47{s2@v%n@jc#>$&M)C zsz}8%gh3Fw1Cv*50;i+i-R9ZXiL#I#iBf+Mp%7u7Nm0=p zzcdTX?81w!ciSGJOxO)&)^laS3`Yf;#|HpR5(}vH8Iqb&5?^!+LN{`9AmP;{YK42_H8{{3cS=pQ{yGy1KXT~|V9N|;Ud zI3;5YbUW6}Sg*dJpi2U6F2>Y1zY`JaALM)1Y1VUsY%fMTc02kj5^EC>Fg>WF)OR(_ z-wN3e2P>NoM3X6-OH{Z18sELN)=|~eN}D<@NKCqtLy>^e^o`^uoj|?i42H)1v7wyK zH@?<0KTOQJ-0TbGlyP?b@V}*+`Q*&7DHvS!Ob<|c#41d9Dfn}y06qSANNb?w?$7J8 zWzv9ljT+O~%wk&m#!5O49M@WfPQ^#GuT)YJm#W=`Vp zRIwU6(px_NPqWTr^z|bX4GQ2H$UnVFF;=`4Y)G8xzO6%j$*iA+Jj)&KsD;ZB{wJ7? z2tJah95#xFXci9f(DMJy)dSQ&2?#Yn^`lU=kp%79hj9Y{01ng>X?o&?<`-Wt#LSPh z&0$CJ6J?r{hq*vgOhMBxQ|`4rlXS$|RKe#x`O+ebAf{8OBGQk045@c~3;z}adO{ZXkO02%PX%l$y;@{tQgAg!Xd4iMs_ z4h*x&7WSXO;Ae1efG;OG>ku&tFBN zb~HaMravJ$(&V~Ab?5C96ngw{y*ZskUkZsLvUv*k!uJ&`lqIiEHg5Z8%2hA*Xx{|?P00uHs*Z$v902C|r znW&*@`Lo;ie}O2#)#jMyn%3;oo*qpA%Vwzf3t7^mwa;(b5Vtiz*I3Bg4buP&*Ut8Yt#p61#d#{ZF%7&C(j^I@mDFYB4QB$HxU@`8_Sdxlb-r?l~w?N@mQ3; z5EFc=;Yope01{@&b$ogpi5eWRE`MGm^e0@?43Gh13lAaTldFN3`s->c@ZA25DE~j2 zWRMz|h}-#7v1&OZo$B{^ptT|iK-~fFFrMUe#P}q%<_N^kQgG=2Fbq4Lh-W4H`}_1s zA$pL*S}&>DXnq3Fsv7|MBLJqoJF{Xb(SDY5#PlB*0OL7e!of0~0Z$rA1T-|jh=u@p z(oirWU{ENrrgzIB#ADC$WiuGBE`dVS`2OZR_*K`__~sBmy`+a(Ka=BpKR*HKgHG$E z|NefHtweMc0IlGCqKJ5l#amsEg{qAD$lh@~$G?8}(G^|$cl`go+EbLXG2J2&rGoSZ z(|Maf?LaeXeWfcxq25}%dNSbMyFXHJeUbtX5!`!mI3gK93cucd9VG{(0Q^3EvJ~*J zkqnx3NQ8%rV6EMVOH!f0ssT#DQyZVEFWWV} zZQ_Yf;Jb-gQDO=nGK8mHV_A`{vz)%`1#lqgY>_ZBlsEK8JSl0IfZc~8?akEEby`jT z(}D;zzeF6LFYAzRn(hk%6C%QKd;NrX*)l$F3WG@fT~H(uZ{uQV%p#(V(QKVn^5WK9 zt%(iA(g=X(YP-AbedG*>G_5!`eUS$@&d1rcKkY&iXH2NyIt!I}jKEP(o7F}6tT9! zoEVX#0-x4QC;^P@iM>Ca`@1OM{|QWH`@;)t=taLFs%7Y)O^x-jEBMI^{UHA%g0#0i z06*l92+}115zNYhXIKFdWC9`>lY*WCyN~3#?jI3|{yXkp5hMWb)Fd+0!~43p_3{3feAb;0zLjmVa8Pel2VrK<>&Ff`(ZSYD4i`9O?0yG z!mRHO(nP+F^v9JOKS}1tpdJ^-fA@GUEHCs(z^vEq!^g)L$Lrx7gzC>C=nuaWor8J} zEm19hBVF;a7$rUFkKRde07oAT=ji;OYS}S>8L*e6-&`*GlpH7&$XE&2cc*-;W-=sS zk^j5&IxJ@U)yi7*x<7&E>A4le&&e;X`C+b?2iiZ01M@x(q%Ds-PH0U5O{J%V5oP%Z z^jC2t&t1s%5=@)D?;AU#-p!IesZZE@72tJHs8<;sbSJJ-{!wrRU}4aZrN5>L{Qp;t z>aNARsNcx{(2B5Z#hwIxz{wQ#hfo6y;z%>ksPB^lhhq_QoHoJ@2`gfnfk{cm{E z;G2_tV+o=!N0tZwHIDXkKzJ^S;8&!eg0k{0_mX~0t^|SI@ZF$nSW)kEaw?#VRl8Ff z`>Vg}rdX1HvSu0(4TyIqyZ+LM8niDRzj+FVGwhCx^}fI90|3lC!=5N_4_;}2nc{xF zLkZMn3E#Dw+-GXIi2v2l5griV*y$-d6t~T4o@a)D^GwfTg4V!e%NAB zO5!Nbp*}v|oLgL99p~IUZoz;~k5Bs7e>bamJ3AaH7LWRqA- za)|415iaciW(dS*EGST-Cjs7eV5Zi>7yvAP8sKwSr!{+A$^?<(*A8d!OMPY5k0c_Z z?U4ruBRePU#%IIU69Zq^3&I_-cS8wO=TRIZ@K~~WJg;JOAaAvpUWffV`Ae)DN}yJC+8)m~b=+RJn^^)V zU%@Hw8n~V-+K{(ITDN{B{L|lw@m0lqqA(?Y5ItCEDM5YRqQpjhE^pE~<+*ZKnb+6f*t`=Lb12{vvEga@!)I};~DLk%6 z0zjxBizOFxz77JYV%L=R{m*DX-~5xmxjy?l1EPw|G`K(8!*>z`n88MWg4D)v29w!H zZY)4AkN`^YHNX!btDUU%8K3Wr1E9Cc`J-$)Ppn#{fkcz@{+)S!mX@lK$7NN(LqUj3DuE(4&w90me5x=dl2!}p_ zU8$$m>X+H}w4)z*S4(T=jK=U(jT&SH>BV-%eC|phFO55{kUX#@iZWdEL=XZTA=~p3 zk<|u=O$x>QPqb%B^*~NSTW$a>mco3j(Eegiosvj;ND16=tJdUfgzXiE&=7zc#`;Yo>{?)v%bepuai3KV@%pd zPi-i{o6S_bxQ||NG`e(6IJDnn62>Q?*)YZ_H!Lh$55PQEZha2s8U~FWZ^zgEHOl57 zU*+t|&%^2czz4tnc_nHMpm1423Z!q67&NJlARUZ4&D4Eyl!|TCyURsKV3WbG^6qCx zxO{Do4^1;|y8u~B68ywIO5u1UbJlVrjSTO0cyA*S5Qj}6I&)R!>C)cP>nIr7FkzuA z-iUgP#JoSfS~oDZ>?0mf7nC>Jj;vyMZ9Mj7eS6l!@awphG=Zn2Wiji43(O#3#gdA<)iZT-F7t=RJdR~wh%^Mw1ETdu0#ic#h-8&P&|{a4xi^0 z2_lLt0Jj}~ITBBz^gToaA^Emv-8Uim|GVfpe}HRuwLSV?0zj)R7hAQ1@^^wU#(CUM zdI6w&Jbwsaa3uiMT|a=_o<>=Gq}A(IiO;A^qn5&Irsjj| zkzO<|^a;j~AK>{!H0MRt_HB#q7~9L(*>209*?0ziCfzv0Xxw{|P+rkO!#n(BXK3ah zNYw_*<=V=R;f%*G(ZqZbfuYOvIG){TR1X@bXdTV7- zYk(eT^wa;PH_*AxN5-YiEiNUi8CT?mF0nPw(yFr^W}XpgD;IP0Sp-)|H1wTZElbhp zvNnApZV{az-eo}g;<@iKcv@hS>zd++#cM(_?QHEEh(rJ>-oR&IN^eW(@^Ow^V0hC% zz4|C#F0Y(xe?MoCGU|G~_pV6OQOlQY^z>cjtKB#664_AD2b38CaXYZH>-35oX*ZcR zCgtyD9-LP4!SrnMEp@;-`7i?h;4YMhq20@onmHVayr5!(Px9HKXV}m$iEK%#Obh*U zNZe~Tc?dIUWKxM*eYvBV$Z;fg^lhgJLtv$JK6gq4YlOMBGNHQVldT7ow##&0Z-%Z` zViHQv4-%U{`963rLAa_mtD3z|b+D)+7@RcU$Z0vE&=+8?nrY~V2f}y?F*Y%}A%pl_ z(5d2Qw>cZX?~REd-Q9*-(LiyrTP9%|8Jf)kF07XQ6Knipu$bvV!ZUvF>sdkR$ge!= z)03i{HVes^sbA#~z{`f!yvQ^7mZnhW&VHTM&m?I&ZkjTD_?5vzmPQ{@p37Cz>fC)u z$Z+jo=zU!I*5%2~ICoz0@1N_>hS$`HDzK_O3X!3 zf^gM_^!Ep7j1J_hK zd@lKMsyF#W<#o5f0+DB%qN45W{b8<(ljH)4zObZ>#b>Cg=i@;RjufxJ()6sIS%og% zHgSoJ_@YXL@z~xgg&&{XX;OB?D4)Hje;}hrsL`b&NFqv*cK}h_fJ$|akL`~K-_fY> z1-Q@GY(?hn6j6?%C32+v01uh*)STLwDH`UVcy_!nH>cYqioxVnNyR-Z(snJ?`skv% zVRMiPpTd?O0dal5oCiMFBU{9F%aC$*zQDFn34>Vay!>*m_zRyeCq4dqT$hdM``0>O zHux0t#2ZjQnCOs4WMih!ZSc*geBj714+_-~?B zT+23QIk&TqCS2y=ya_0Od~#heyctC!Z9D}i5+ zb#A?qj+D`c2`S`D=|SMKzA4vWOy$z7^oO^!&gR5R@~aW?x{_)R-&5=MK&^4Pw83@z znp6TWRYMg+l@QnAD81@e5yMPHVBfS=U!@|M@;b6*>@GRG*rKlZptpk zNl2wiMZwXOro4Ua9BlfO+0#FIg({fVw#J3(Fp5r*MutW7ih$qod>pEC5o znaBFd?$J=H-8aOsKi?%um2F(-^@R2pyGr-f1W-tXbAE79O>f?Mwo@~HYe;Ks`I zo$a)v60qNBVgQ|UJ1q|H1YbUNg%)-{m&1UjRRqPb@SkdE3H2%Sm8(+$^F3VV#ZNRl z_2P~g7 z;8V&LIX~yyF;N2V^pMJaP)TF*ut`Zr(PR*+=l~^sqyHSWfLP;iAx9lhoNmqp`^xS> zrv77SGPO`sE*n!vb4|D-#*oWap$tPC4VX0^UidPQqcV#a8j(+u0@MCzftf;{Wr`|& zvzPiOPPS_R)1JU$WEtaZbj|HQM%$hq(C6mHD*dMrB-5U`QC2s7{-=)&7xda|C8Zh^Zxqd)O!S zR>eA*eF{qZ>tM&vgCUAW&XCx@d+SoI>ReHmOqd*Ak((Cy?P zFg!xiR@uMWkOW+bl^QfAv|G2fjEV|vKDSwepcXkVX?Xri*=DU3xWkmO<*{DAWuo_F z`qua!rlUbQmO;^Vx!Ak%ZbO-MpA>{i8xb>o@QY4j|IJId7~jul7TD-(38$C2i7KcF z{%?d%xvjJkQAK?~@oCg94{oJMi{H)Z&@4oAqU*}Kc^Pmnj#}hRxWWscrSePYA`$bI z4rOC%hSzK>Le|YD7(d8Zy{yucLXe1MZ&g+esunQ>Qva8?c_VYF0swb;;HQH^<2*du z&zAwIlK1wB^=ft;u&dbx)|0(;f+9^S)zG))leyy`%ojGE?URe4GKIBtRIRs{(+$rvBHOF*i}b5#RqRvW@-uKMQI~- z4{Eh-P1D|CQlAeJGHyy0Mg7ls359pe01`(x!W*kv$tsDO4t^kDA!3{mj2W-c!%WwH zng3cJ%$Fgr2TN_9us_aL*eYARPPmM&5^tfa77G{ZqmNJtMB?yG0davG|9ao>kGp03a zDmw?&W9nxCozK|RNus_!;YlSwVpql{)Qi}PnO$ZJ(bbi052H_Q=W$%BAIysOk?;ji zt%(-A)Ri;t@2;!w9L{wqef5D&$%!r$^ci59_bUy61bPvi?iIl&vp+#_eqr6aE<_UH z{a%z(JDrkx`yU_b33bP0tGtPAJqTQ|pcys87K0t%a>XykfF|5(<5;|mQv`y*g7F?Q ze!Xj|5>vXgO5_c28X_ekuIF!|6eGa=w7q_a2+@K_gGs`l5oZsN)=V90`52sxhFcDcc(qZ>QtKbrnqK|!tzFx*KRthAh|mmEf`Kwi=+vXEC5QS z2z5e&o1WJ2-6XsoCjMfitSw8dS zA&C_q23%yLxYnV5iM_A7-WWpfqchs#NUOcS)N;%Ae+wC2OD)f_FgWzU|EJ17R~%>3 zhVW|PYokFD{gk8J_c`}MA=%aLaK6}kJ`5KTCU=WBCE5P#1!(==_hyNnhmvP@gfs-jS_N=Avf}y7l%1(C0ng8 z^Bh@eK_$+Qq7!?Zq+Ry;u^N;Pru?oBku844Lp zn%=z#qa04b1dI&PPKjOj)wPunceQB{pW;M+XhWOSA(gSQ>ui5<8xgi@pX_OUZ+yB_ zt~8X)o5GQ0B2$Yfng2)FM%f)bo5#>U@CO9fUIqwA#Ehkjl{Z`_=Qh+swCk$nzkVgx z-8`ewCCO2DHD4^UFuqD#G@cH9qo9aglOKFqKIQ!BTid7Bvz@Yb5%NxukQ=EHCWGzL zWP04)ET?zDDHG^jS67>9;F8VJs+M}Lo7?;ILWX_wH*BfoqB+=nmV1Be$ zGkw8>ZY~Mi5{%dmECRq=E)eQu$6K4PR#{mgn)xy(P%SmK-cM)vm{6zN-*J?w(bQS5 zewvuq6Y<1$uDcgW=J=TD2&#R^;IqG6{Cr|L=^BPZ+Pt`Yl(^67E|1k2MuV?Ng(`r! zvUMLKON&4tRGvi@f5Xo3UU*gsSpiH{J|#xP>qYri75z1aH_&oeRA?IhM5#k+0DQq! zPb#oX%*0}WCvY((#Rs-U&G;ailWxeBt&O`q{^OIcK5&3)`ByjNUkl%Z0E&DS4>XLbLn|c* zdlVu`u5A=v(8pq1xxL-1lXLDcox%WQ^)5XB>eS1O-lb#$XMsTb)9;u_~;eJQHLV-5gY z96sR~Ktl=(I7COF67YYt$zvwa?qx=f z5FYm!Bh|h|atQN7ejQBK(ZcHIkDe?}^ewc|GXjpk z;%w22E~K^@MjhK~`8iiMF&lh?Gl7YWxKX2OF#h@u#}s1mw-g-oaZl(v=`}JE{FX6w*iXxJWvVj z&ct1UU<9s27Lrn>_jo$FMQB|FvN{lE8w9lu(_Td7F>w%9%P4w zvngn8IC(?4z9}<4?sgvm5wnAdl3N!GYp5!bHXU`d7ssf2znkK0C!_(PF>s^*fj_Wp zF`uSR4-fdzBc4JU(HsPZeC&J!TF|oS9*5I6k~J)#)~E`ase4mMJtI3y_F`5Y4;PB& zokotr@lGMg>CB4g?9@@bodLdh%ZTpWAHlg z%Ih`zg_JYS1l;2~p;OqfF%mpd!hln7wsg3p*ZR)B&-z&0LmZ7VS@mgV6|J>QJ`tdG zyNH_*(< z&<1V~s&^Jg0auY&Hz$B+o%m5$DQ{Z%)IAj!E8Dc@I;Ru+Z|27t>l?fI&!brzI8~L) zcIDYLE|3f|Jtrqtsj~NiYc$n5-?S@QFl_v4?MytiMLI*WK=PfWDl!z3hI0GHU;tI= zO_?H9L81#D1h-u9wedxbgz<3c0hT%)(F>2_b+!#lPN6U!SE^H^JDKlh>Fo^wYtE3w zy;TZNtR%vU z$SM=8VI_{*(@#cQlx&zwJUJaoSZHe~i3~ju7IYR#K$!Fvq_N zh@M^u!4UGwFO6*jGf{VvHz!YNz|96YzN;6?FJB;svDpRsgQr;SHd1&;fCg!++pXY- z+&h(v;q9Wvpk4xAo9fbkvn8@%^a9!+6HG~(1IhGM%N;k!-|T*~|HlR3|H=rucRvs=IJ;yAwMJ>(!*+k{&xdjkn62+0ojIkvl|r_A`4)66Brq3Xy2ZKm|3BABY8L^ zNr&k5pVQ;w?En-xoP`5zwiIuFiHLVy(b&$+6{rXFP)@L(1EJ#2O9p`Z9*DH}I#2_r zj?1Wfb}FB=$LoAT1ZD|`plf_0$QODF_UwPLNZXHpZz-eu##tQhITCQwxYS+X%(~+} zn|(JT0P6@Qd;Q%iQxShn!UMM9Cf@!E4saCgY3b76M1ZMEWNJ4PghSrylDv-o+gAYa zZoq!J1sFm9OXu>ejl&YM#q!>^`!cwveMRJN>=pt*&ZnzLL{@9gafO|pPrGeln`+zf ziW+V)y%1Z1JmKndrXLAu|Fn;Q_WF#(%BWdKLa)p@q&nE z-T&b2HftSRf1`YiZwWBpKf@fycPA;kMPXZ51MPx8FrRRFj+ECSlK6fM)(@tp2TnR^ zZru8Tmu~-;xRtHlHb67ub_QJStj99Gc>6KDD+6~ri^qRpy2oFs)X0O=x!t6IE(;0G zNO#hDQ`0M2#A8T{N>J_qM#i(AL85x?{MV1dpc)4HZ?9A;Za`Q653nZPugvzh-Q-+# zkHwx3XX;_yS))7RV_UIG4&jj0ijr=?xa%%ESYv!wk&ZN+c?>W95F-HnUr^|+El9&m zb4>3J+a7bZHuO)H4p^`EMRdb5w&a}eizC@{s8QIP5)$BE~rEYNu zu8*gik87(TvRI@BoDn%zst$>1JdQ&qFhnLQc*82CW24SN_mt1an$-^ni*8i+noKGy zl}34kAM91U!XeyV22fA6{V`lwVYt_n?H1Enu5R}K>al|RuZa_glBb{_)=8=iaZ|U5 zsV3}5pQL=N{Qm&F!QZ-$!l?Ow1Kx;!u+eEt%3seHr)5yWH4MjM$j&IR8>!kj+3C_u z=GG6da;xr!k1^lWo~HyHO5kb7rWHx|FqwvqZpk~1v>^`9`Z?6 zj=g|SZ+VyW>NY6$p>iki%&JxmGGt8>G?1H=UF59RC zu}!Of7wo*U#AbHDsvI6aKw`MTRz(e3?z1{z6~$hbtrZespfHTeo%ke84JzyC3Xt&4 zj-C8da6Q#r_)j(0n?VpP)Vh&9(R-R%`QROF>IB#PIxY#oy0El5{T%kfOMqwKb>1+xC|7= zjqoNE7&GdkD1Oo@WZz4tW~EzHY47#i&X$5Kc`tK3-*Gf5uQ|e|>THsq;cAh}kSdVI zqr{)hN0C!xV~V;sYvWr|WuE^t?x>DMQ%*BDHab-*WoM@C6r9HIB}K(x#mI%6TVbxl z`86A6Ze+Q%{uDEt2{8-)cE$zZ7)76=$a0Y%eR6HK-TVsqhaao&wFH)5)QHSvtRGvovvPQPJTO?;wBH)kc#N}@{I2J>Hoy70!o!%;p%iC;>TMB zNsEP#YslGCL(_(j;yE9_4BErnX!}qk*hj^GWI&Ou*T*&feYO9=c#3$$!3ei#dQU1n zhw;ug1K`r;`CkP*jYQ!PAx6P50SvY(WL+e!L3$P{_csM+q7= zk)c4TzjnuVLXiY_SVVY=7I1y51 zIS~#?d)1{PL)?%hvYNl%1AqJeW*9TjG!l;{jec=ddL3{;!);#r%$Vs ziaKZ^dqFj!^fIiJ4WJ{29lP4Rf11{QB&W}R)1v$v=ACh%ZRVQNpFoWgZnf;@xROj6 zP46{_EoMyvVOs8wHaYHPvFp{B12;J>HIjw=65)q!v0dDr!gNm6A*LdQ^gX-@%N}Xd2j@hc_IX>_X9pty@ zRL|dZEVKO=e663-;^3DkVFkzGb#D!Z{P|1v*_`j3A?rtIw|U>EsbdU7{5i)#>+&Al z?1vgc8ke+IUra^xlf}?=dw5`C8h7gewL_k7v=ulFDpUCT1j}LX6VPsFna@ndHM*IqX zg3687F3XVF*{_ya^?1TD`ZrGGwb%AQ)~xB*LF*^C6(+kpQEG2y zioln`m+=VssjjAy=5!s8w%QQiu`X%-C26w12`DewJbhl*0*Df@YD}K~QW}NYz!Yy9 zN%cW4VUUhF+g6lOij$r$9eu#q!p3+;fgb@wI}lS@GP5Ul_z(GVRSG=7jo^Qyr||(&w?4Vt1=q1yB*y<`SFHl7 zLhp_E|7X|zxK%&mXOWc=-TqA0cjg6J8V~(rM1}^}jQ))a&Z<*Bxia1i#5h`kG6`Vo zy&hiu52^dlQ1KB^!Yd!7PMm}&y~FUbf83n%z`f56LDSob>TBzvf0p0@*mi=u?!#mv zbxUJ*?*%RJ=#6H7EC>JE>vOJ{(S6;?ILU%>8ZQ*U(W8hSOgHcc-^C~Q3R-?Qkf)VL z2fOQjKh+lIf-S%EM0lj2<@;EQgfD3f0)U3SVH+U3kaPVVTA&Z;_laXrD_p_5E9A7U zZpRMfvE;hLA}$d`Wi)x9ExV(_nHUW+ImH*GcXv)4cjvoCkPrq3(Z=4#)}q`$?P;iGN4u z0=abU-ZFx5hCI`J5&+ohfdD{o={2^?S_k}c-+ro>i;RQa^ui@r zuz(K=t7Mf^>;^n*La&G*s&Alq$>Ct7$#JpY1Elat8FVs1dv){(`}faSB@ur#A&*Lw z3OuMUUc*(Yx69R(9s%i$SId~dER-s?d_YIQ9^g%Y0RpIi)UO%7!oID#{RShKJ&z2C z+!2j9_Oyek! zkW@w$nJFFt;V$cIb60b1M$73U?kPtDzF-GX2AKs{Z+G!e$OjO!raG<-qiZ$%NjYPG zy|4(``(0ug6GYA2_>o~&}(m_0q(DRMh=Ve+_vN6i7mkq%Z~1LKo52CNVx5G*Uj zYee*H0%0(DNwrkFvg6R2zT~>Sd z*5&fCC0{v7^nvZPG*kOjn?v&{Wr^sMhUGzo&@((@M0YLQ5Dglq83G*k1>nB~$etKD zP&@LWg$euqnJ8Zxw@`|QFIoXJ6Df;;kSdp3kMKF4pH>iaXg>uqQS65A_o9T~?RTzf zzbystC)r@6+%^TRp%_dh#LR728!2ErEf3P{NI?4xr<1x-=cpLXq4GTLSDI1;9*t-# zcuYpu!_7B4^GXhPyq7anQX#(K<9}ru6yg(cD6kyQY z=udRA2WL&^0c3tOh5iV9XtssMU40N|i0SdCr&*f0Y=Cc#O&dTkAK)NhzZhB>{F5f{ zX3kgML8;d(1iEOp1gZ1>rD@y;rV(AFnTJBb82>m^< z@FH}+y`XqBHQyB%RX_6WWk}Fn{wF@B;GbIllzvS>l#4aQG+Q$_(n$FIaWSDVkk@#I zrit%;v*>4=`)oN{UT6iBmLR+qsnWMGQR&J_>O(7ls;ePNJ@9o`HDA%N8M_hAo{*?0=fX`@Jg|uYhT6TUFV|7z)%Bk5tCZ&vdO%g*;9`SuYx4) z^Kb0v!|U}G0MS|!BR+Tl|Eiv3>0>%LA+39Z(3PcNzG;9XQEkaG>*;#{6pmO3BAJyS zOyU?1{h0hPf5pqb{W7`c`Sx@rzb=yOF+UdH+i6;^gN=!NgYkmrUTg(Kbds2YZ#TcN zRi@namI1<+kwYH%yd~hUWw)Uhwq}7|^Sa`B-+R;N$^ec*2~b?fy6~y@^zfE|N_0xz zUs|YNX%*`XX9^sxd8g&fUw3}IB2FQ$PQOY#{sVY8+9l-pf%phetPnNfcG4BZEBu;8 z+-q}U>W6Rkj^oOJ656?1`jt+GW%}#>G$_bm-}0#t=lH>qi-*ZuUA(z}2CNutb?M_) zNBai9(BJDyQA%V0HCV~bO}s!rtQL=# zSfnc^G++VpC$Py}&rcrpAFgcK;NxNWyKYW?z*dniXL-X)H@Uw)c6ZhksN3aWchygF zdX@8#e8A7xd`36=z_`)RZ9!7#Ew@UftQa*0#d9QB;^Cl@~NbKl~;$(7q zK0yyjbKL5*&eXH2W>lik(xkyU9z3Q?&}u&$Q8wG&o_9+xe=}9PdOVW@T~iA?F+JKn}vetGv+s%!45YKHxzhxx$ISQ`TPr}akD(aYCm&R@-MOFjvE z=qt8~OJ{>eoe5Oovaqu%4|>B{Bw87PSkrBM+H)KOnYR%?Zw9O^(qDZZ2&}`c*TTZ# zo6}Ppz+CdzpKL})fx-rB0t{}g<-SaG1?JV@Uo}8ns_z76JpilQ%TJJ=AD^xOKXib+ z`tv5aWPQWkg$aCmY=WR?${?o%Ks7vM=F-OZcRD~3cu&(Pncyf{TdttKjxI|HXC;XK zQNb5!2I0Q*nu1C)LTXv>{^c3$;M;i*y*X|=iC{V<{&Y!7<|%IdW_angBNK`emlX$oi`><^q1wKUTXXAG}t_mL&FGGud{r z_z@wm3u8afu~r(=Fu8=|ZO~u1)!D4gkU4o7(0z zLm@!=hzC>I1n3T4tGOS#U=FEVz7Y2gku*A*2ySeRN_(gOoy0lm9OuXr>%eD$EKyD0 zL&E+9D3GZ`B^SS9;Yr$rz4QC2y_>rb+c09K4y6woyr_*h2Si2PunE5{I&Z%FUUGL6 zr@!RgWOyvKLJsKF^gV8u<%FVt>$e+*Gt8y6)harF%zhtxxjo`vODGm*(|cpgUgFaO zYm8j+rvEq4eXr|}?GvX&g^Pi}sJ9$Ctlxs-=c<15g0)Md{m(l=sD-|h@I z_r4-^m1344xje{SN`T(`YW1fKD@dgL&ybQ?9j}+$K*4g42!U^aWg~sg5b*tw-#eJT zPn`_GQt8E(b?C+8-4_~&#r(lv4q-OVXz3=1_-Q;sK%( zVuv=KWU4GJ%{P2l4Z7?cj-?@6_nkt<R_XPd|5242uCap) z+vwFmx||5It9?Ikqx(AxFHV& zYf~)}d$*^L^mI;N-dh?o{u(cG9LCQnIR3=eA# zR<|r!_s_1dPDLm@!rKqqy2jrgCK5BZ7IzSSU&ktQq{w4BSe+*3M;Z4Vo1zQ5c$4_y|KLwN zaP7Gz!hE!9ui8FGNRH%#1P?jj^uki9SV&f8Ba8aKwc%;FsYqta{&IC|vC?@5yCvn< z{$0%?d(e^;!r){h8s;if7c-bYuCRZ~%BURH7tS0}iuXx@juNYQ=4PJ41)t*V*a+SO zmtY&*D1#u`1Il^sFj;KS>acn~Q#jWd%Fh90ehHhxuj>iHg7RGtobUdRVwD}vHk&?s zWs^CV3e$@uvZi!LTz@L=O3Gmeg^rR_#aqQuCzVHYo;nN=!#dY{*CP%hc7GFgC@RT2 zeA8z6hYN7h*G{Li!>n~7E6?q{Q?TTKe5^5%6%j4XP+gC|k#A5PL9wEHee=r=UgZGU z6s-?)GylBksU}G*=KawhqAp`qH-j+6pDI08SpSt?^*f@Lc0&Rg?tCo;!oU##w9Dk=nS9Pmk~?Hc zNX;K~jUInTf<)#0x(_xAOk5}cQp@aUCidek3>n;UrFefX^kLi!1xqB{Qt>8a`Av~cP z1)vDX{XiuEN&SUwbFCaY#2vI7qH!j_|lDzyGqYu=b_ZYM`gvCJZjFOQ&)LQ zqU`Wm{iw6M0uK7Pgf+Pms@QkBQ{lIUG9f%_*eHZ2??F}^JYt@@a229 z9cND6Nm5aUX|+xB7ZcsgSFlUUR84($gW_R+n9CSoLf$K@=pk0}7VF0*CBVb0rUrVqEuBpt{W3Ndu7OD7Xpc3E#{_ikJh;#2EHt> zz2c?3qyB(W=x`5Lt3s$XYW=o{KkC> zd^pRUZy;!8690JgzmXcQ<-7B_e~+gdAT1;A;} zT)b)*{8s`m>)g)zw0m!oOVApxh26!r^|!?EB%f)hczTs2ZGjU4pk@Q0!<1;|-82oU zvR?{scWA^TEU%@F_o|`}%8@ds`d6XO5J0#)nuvhr53%H9U;KMB*>&aH?QeCy@c-G(ri*2TBDOVaGMEsKQ(d~*k#dvUbfto3 zZl%08w-UnvB*r!_3Z1BpREhQOijsxKGqGV1MluS*o_I=MZBuCbVv5fw9~|&S{zg~^ zE%;C2`PH@z)36{9Owj%P^d$T2UHs>Sv9;fcOA3Z#3!*VqBV!tSn#wkHc26`k5Cxv> zv&7-pDqI?Zu0p||ZrWWh zY@wzP@H1Fq_cQLMO@1YB>2qJ=3H~kb3~|UVVH9-w=so~UqMrEMD`DX!2Mpn2yt%F- zlZrngERL(9nWIey{CTwzQCdkqAN!wNotaV>1u=B{U-~*S)zZ~>iNlnwPOkL+kmL!C z12yhd*U8{{_)q+JRbPL{Ka)KPJ@#MuEk|p+1ysXXJ~y8}el3^u&9p6o;%S*d2zz(-*MCFc|13Ke03q>g z8_~I-Dcx0N0;AMkHAZJDw1>WlbCqGkGO}5|xmh1xeSScdnJlw%Ukg8QjB?oNiLPeC z;(fWz)6bTwRjuam(Y)eah>Buu{AS8yNTKF&P#y!0?gf933R!LL%a4i;(ihe!L>eSY z_DrBN;&B*TVTXZ*WdqA|8RvyA?%B!lZ((_pkbX|e{^iWX@aFiV2d$@wsb1+CX(oi< zu(2p3_PjQ3;^KJoj0;tytQ{UV10J0n@1|A46;&|;C99Fr+}NaaviD7@9Fbo3DhuW` zx57PiFd}8YVLD&R0~|(sgdOn0Oc+gO^PtlYw5{gq6TiVAgB;`cK=GGzFl{Q2!Kpye zTT5Ka^E5mfm$wLm8F= z8$4S`wBA$KzcUuq+^8^P`A&f^>`1M_bfYxl!Q##h3kiCs+sIyhl~|kJ>{wEapQ!_?PxB>Gxw3UeEly>7DOU*`wCi($)f}_s5)N- zBEo_&goAu0Wv#4Ci6)#qGEv}-0v~06X|EsBXZO4=Xz3(@Be6tZbA2sa21hJ9CqFI9 zCytVTaNiqBuR+TCtwwGjTGm=oW|4_Lg>{WS^j?TT_{U*m0eoG*7o@rRdDx_9aDyYe zinr^P%(Wu@nvivviITM*LQ$7(bzRYflJw|1b^+7F{lVyq5Nlm@Iy2o(qhGj7cCI-) zSA0HqgI>oj&2B|a9&UeI%iHVkB3rv1*KPV4TDO zugK=<9Pee&@(4qAe2@h}9oZZ)Y~Y6jb5AZE94bEk68nSSc-b9poPlk(T0r}xCc3GA zJlNXBBAaGEclsx`6OzCVGhe@kQ;0jL^w#n^XA9D#W+_}^q+>I#fI6J^`7=GLq4^Z| zlj|rGZKWu-Y%(nFMlw5yuhJ_~bhgl;5nH>n7m~S)X{vLQ-To zVrPEwohJ_^2Ub#PNt*cyZ({hcQhP+j zHIYjibQpe%WR2^-*RD~jLT!VpP=2j6`|qml&p)OrBt5o13FA>@V{(&M(HC_uqdnVg zi%5^O)u`C4fG~kr{uQdTy&yeT~^whL_Z0;}OF-!}dOt!q6XRk_J~6_%1O=dpL01p>SY^hyVY%t36Sw z;F9h#|7Y@5Azfo;5^j9R-q9{3ZvY~Q((J8BHkmkF5l*NQ9 zZuvYIcd#P#qgXy%f8 z-Z4!9j7HDvSq(IgwqEOxZA=N3HN>G@+2h8jDz({i@d7;HnsF>{b^M&OZ49;_G2WQx z0AAe&e>y-e7~Oknd)P8xMxYLK7J2Zpn{uf)#TPX3VDp3mC|A8Xn2J4)-zFN!N3;5a zbXtu4a-YJu^8%Ic%G9yIJV1=Md*x`_0D?9Lk~G`Xf!wgMN(ZynP&%~8{)pfZgCkIT zd0;axEa<$QItrTa00AaWkf?}${^MhtB6rQ~>oXvs*6MNfn(4v%n1%ZQfIjp~tU<{0 z;Ps@qPKia7dcMJ!V>fVcOhNyTEM;@K=|+F#)31_jBxN8cR5}w{d(v>5TON2uNT5x$ zT!-%21tg0UfP%}NhYKH6z%t&(Md#N))PtA??V1dkbPn9Sg=SrguUZuBcf^=ng?qEm z%kYAhIWX#bPvRGUroKB^R{$zLNaLn}a4~l`3aELg7gTZ0y9!#Q-gs-+8y_yi7GSo=WJG5$=!L^|VE@s+smC%b@b~g+B9H$F3cACkSW8S`BfgEU-$aYF>@!6=cNq>P;JDsq;=%fKAGz%EQo0a zxnV_+g)#+_IjCx9JH2mcZ4{uC69t0t4Ev`q;r6e@r136fBvLQChuB@%PyIp?Oluby zN%;N>s|;wJ0s;54yx9PCjc2znZtGYB4WQkP2}5K|vXQ|&NZe|qJ~XI)TWTo=q@R$5 zl70?w=_IhZa_B$$1w0~4oic9HPVJGD?Pbn+5V(TwmcoXJYT7}2IxpMV(L8dGs6V?o zmy%#W1|tmQ;Gg42zx_VUZ5ZrR1Et3yvD@FC?_$?0_3o)@a;*VfgEA^S(9&Gry>3=T z%K9;*WWuPiE8cM7R{*W2Y?>96pS`m5=4#-%kTj%F01rDkGUINMue;QhHN_;+_vmz{ zXDc&(RDSh#fJK>IZ%Sw-l>UwGw-;UJTOkw=W%2v_kXv&xhNPWdrdzncEIO5MXQ3T_ zBa%O6FV z?)?<4RD?~;LR)vbt%#W;@9Q(Pj5|1#^N?;c{ z+$1e6$l<9WOWs1k(r>jlNT* z#xA4ELI01E0JihM3=?}F6gd@L&uJoRks`Pn8-1i623$e*YR&3!-zo>XeZKu&ipNb; z=r`Qp5G`AblnEE0NCFDTvSn0;95a~WY`0f+t+;HGkwkWk zCQSjzWh7MBE?pIpJJqRsd5Y+vCx{2N$a!4Nb*o$gJupPaQP)@Wz%U${5b{&fZ1*(P zn&|n+xSl;_kF5-s?c{DGC6||LqJMZi{ZRW>2_Q*WB|qb2$%q(mXAR;#ze>sR z0O`j*w!Fn}2DBC0SFi@<`Uu~r{_h5@0;!Qn8f3c4e_11AD_AE81^ZpG9;~T1-cYqu z?)l^Q$4lv@%$2df{N7`uS5Kx&%Ewp+KNhCa>Uz;2d?~BnA9ht!eT@TeJEd-c^tr`_ zyw%kQM)gFj9aqZ6Pe3|bRdMH2L? z_%R$01N^t7g&{dzTYy&p*ur*-UF33_59V{OtgFrhWTHdNIJdivWsS;{*3ndqG1m$*z%V*P|lc5|Y3T zaeD&Jeh|gAQkk8T!&00-R$pqq9fto2D~VA8W@em*!89xT*D$CobBSRloi`g=rC8>* z7CwJ}@JO2vrk4L&H ze$S&{g+NIn#fXyk#_%2s#O1lTJ$naLh{?)6sBv+W~2C-h=HirwN?hS2r9^SP>E; z)vG|SR~!r2UJ$ROLO<5a(ar0pLYw2Mx}ksES=<{KpXB@nLx6{*8Mtj^Ht2c%HVhro z84GfpD|w(6CuV*OT*?UB|1+ZAF*&-kcfci{;{X4|B_6BnXx=@0LfSLT6%kV$VQluT z$|0}nrk;+LsdWmpQpn;zu{rS14`YFe8f2(HzKlvK|5$n&m`O(#(cI5kI|5yNp(h*) zi4Vf6HCa-i7J9IN$nF=*PligTGDa)*#Ipw2jjFo$lY$i%K0ai4c2H4X$Yhq)82<1= zQMF8lhgFBi_Xm$=Kzx*g3PfqlI4sd>mTop7)>#Ai+u0z($ffUcxim^be`fDY95P{{ z`8ZKIVQ3({%IQ4n+r7N@yaKV=rC%>mc5gZ1s9O7=jc^bz$~QD_&3^OArI$BJ%gIEl z=2qi6B#FAo>{|)uEuS0p>7fdg6h=03i3QnLwo9P6Kp_#(D2Qix_6Lc!@Qs1Dh~MW~0%A^j%HRV;!B2Mv`PPjSwi;XrI18*N&t&(f)=Y!`eh_ zmRs%d;Q6*|EMu=>-;!Utaw6fi+32Vf{WE^6wdYH{r|cwKr&~it zu#}NmMABca&w)o+vXBO9boM%ZR<UctA>@BW>5$Nx?0zrABJQ>Fp~C*TtLm#~b?mo@ znxX&>B|qbcMDE@dWC+4keX;NaI|9IU_(d$I9gI#NVp6U1+rv3zB-`{D)rwJX*V?U} z84jL0FU_d~bwZlE3R);XKl5~#UrgP0g~;bC;oIBna7+j8xGF_1!*T+;CWkgHA@w-g zfGEp_2x*1s4&`jTtfK_^A2N?#nWD<|6yw%VZ=+3}c@rL=Qu1!*x4iJXF)yYZZHgAs)QTLGe6XH3@7fn_kgFN`AG>30z^>Rfgc&QE2);+h`iw1`RAc3f%M2VIbcZ0>G;Py0xtfXk+LG!@2~V+)($3CqrNAQDa0j4rJ78z;kG0_ za@2WsZl_!{FMm%V;@<1f=3Q=N=s^EH1yAWM(}Aofut&KpnhGW$d5HutYW*DCcTic;T zw(0roeF{-zVfB&YcNRH$vcBiK7wN$U_b#4Qu?Lp!N0LE56lZBOAO;uofZ2U*ck=2Sk?j#I91 z*%Dn6P5lU6A+8Oov`e+^DywU)e`2WUHIT2E)ayV`YxwA8deCZ#`uLKr!6Uqkv~xt# zd*Lb{^^C1pI(VZaoqU{3?}B42g`QJ4c+0?1V*%;oC_rYd{3T3%N#{em?w3DlSocxp z7)enm^54;S^I~a$kC~ag&z@Z=l-^tWB*!m6FiQKn!+G;&pV~|K=;@E=OTVYtOTKh1 ze(s`&QmDvClTEA`YUny-n+HO12T1obGdWj@GI_Iq>G6a99+~4w95d5IypIg8r5N)1nKrT?F{TW}a zvp>8iOXtQ*S^!ChyEw`UGc>;3u$ebA{mD@F{apuE#6r)rh+!F7%k0l-Q8s0J0phNq zeD1bUd?B`@@zJVzx`~@nMkQEjXKb0r3fdxj_2b`Et4G1VM`_%6ITSh6owh%ZSX5VLANH43wIfi+k8u4ADpq&Ei04z~h5WmAA+l;)Xr$LoOb zWaei-n0ZCOpF~46m+Gd^S+w%v0qL^MWN^mkzZ=T~^E3`^y$%nvp~;H!H)V7e^|}J! zl3Lp97CUE}+{5<(KoN#u$CKaM&Tma^vewwR=r65eiP>Y=+iKg+E{&FHTB%IydhWeq>xb*G>C`PIo2kdBB0J}P>IK5m{Q{1knwlamlb(K# zHvU0Im?0TveFH1U4W z@_oQ^gDf>f6i&|mr}F=gao@xSuADo_XZ_TpLl{sv81$uJ<^a^d_~RXlNa%4ExwH9U zmUYe_LG?ejwtffrU>7P7Rzzamj^|24c)~00P)!m1MxfN#YBQ`ddeT`L5mqgy%pE#(?Zi;ny$F?_`5_0`*() z#}J^8Giu)GIk2W>|Bt*oI;Y2SIR^mGm9wcds`{jX`t-=bLJ*LO0R_9(2ALlKhL%6v z?D9W`25RyD4DCpW+b_@D2P*2aD-XoY83CJoS5Oc=H2!d$e~!>ge}??eT(hM1nlOC7 zeI5Eif28bf$r(WTVgO(lAZGR1V)pBl26?r>ztYV0dyPh#N|MvhQMl~K?f>JfC5O+7fUBkI4NbcYhY9XqYx($JlP&H6 z`tO{#$eVcJJ&2>BM8iP2i<_T=0TB9dUejPUMon>&++>20+II!1IP^{{(a0-nSoOz)zycutyum$ zd6)v|84rTWK4=AzHPPgcp80nHAhOmIcF2^^Hvu^3o~${^p`uXEBKyie<3hix`}^l| z@E}P@dQMK$2}|QG`Zz`zUge6!{Re3r8jm z03I*%`@c#4cp`s(%R)q)@qlq3;N;QlPAq3T$#Yw6TaT?A`0Ma|F8C4^>Bf+rY57^i zdFJ3|s&ck7s`8!8QeS@7?~Yr@UZ|wOKRXt^b?ZaGcbXf&4uZ`@_O6)Dmm{{H#B8IS zjd){tHQ!r*dRAkv$jlAD*VO(ZXj< z%AC3i6hs~ACf9k^6*C-1Z)5%YX_e8YyyZQFfzDbT5(3=oSgrHNMe~pE1(xjwsbg&g zrct%ahdk$HpnPa~8uGw-Ru_J5ptHnR{&B=a%%RhTWNF0F(R*ZJIB9cniil!tK}bi` zJtn;2+4TqVrIbsv?uEu}KHmmSC-&90ciYsM5I@qP(IBSC{>6$&E69!j4Cbyu!ZQ-E zT6@bP<^Jy8Keo&B(srdNz+LxOKdX1}OXi(AsA*md8=}nQ*I&!%V||-@%#A*6kt6Tq zG72^J6xg!(cTS$DemU$QaJ$%Ddgy$*=--GSYcaE&u(_}1HsUZ`45bga^}I7)1Q8V5 zs~*TvJNa~CE|A~MsKp0H%|6(DFXbg4NKZ~jz~H<1G6o<2y?*t9>*eJCo5}`?@>7%D z*JV>&SdGMM8Hg&I;GBB8Qq^A@x}voqzjwpOba-g5=*@)hwsCW0smUvFIFi`iiDqn@ z?r3K4V$@vKRxLZ!u6V!lf#WnY&UMJKmhyNmewRv3ZXAU1s|%nNkiAN5Bn{2im}6Ht zthIpaRdZEGRdbQi4cJ@WebH_VF|j)_9A=Uu6~69gYP!{17017m3lhU0>8r-;<)k@szd^-^u8llk5p>|gtQ_M2JI?lfQ^9W%Vn{KXPq>c>=fR-ZK&n|UfO z{=Bg$Dl%D_j1<}LVQ<#9tw%Oq8Pwe)F`#0MvY3xrm$Gd4{+?~xGeERGc`oJ5v$KOr z)n{#z0U`i!>36#QpBM{^vDjuQjjd8&|J9ZF&(S4M`}A`2#|xp#t$(O24bLi;VBI10 z>b>oquG;-wHQScJd1uWNRoku3q`RkA^GfK&+4dRi4qcSgJziBYo2M`0PxltYwq1_2 z=Eld|(Vzj^OATk(2*gEUNt~Fqz$u27ERPPpGfZIxWUus=Ag8Lc zc%Zpkoq2w0PpIU)^&%UHtDY_dE!RAbb}f;5wCgN2yq~p7DkvhdmzySIcB0&p`XZ$P zja3w}ua@%-ZWj<1Gl?A(b$Qv#K{kLIC9FJszAE#heTd+E8P?;r0jDJ%Q=3LA9Pey5s>y3 znkxY6x~f@7vKCat$_(wB0YfIr4kzj)_StX`e6-{UZ%N(#V}L6?hJ|5t=Qv%8sP=rp zTji9Zr1mWa_qI_7{m3sBYW8qxdqNf12^RDGJLB<%!MLgs(=JYU-?7G{oh$wEVZY*F zQIC$Sl};QM;BYm&qR8xMx#lkTk}8Ct&05VaoOz1B*vD_U-O}e_#V_8x#AQ-W>VI13 zpe;1MIe;yhWjm?h(!3&i-9R}4w{2mfI69ofE+h7){i*K2`-8aC!xJN*bT_c87^PdW z;V(apR@y4NJ>ru_x+FuhcRy2(xsT5QEeWJ;0dvZP<%35@Bc}90AWkT2T|yliTx1bwzV?X+O&{8kY`!)v z1Vr%0WTEHxd>>&I|CiVn0xexvQF%0wz$J$lALBn!9M*s=s_f^c?37O#X?^>rrNT#(lC=D6dSf)E=IOW%#~70+oSsxTtukC!#?n<%Hz1MI8b$ZY9(6$os#m)>7{r zSAWnAZ5@+@+sBZH{M#|cVnm1DDI`SjQ`m@w@$@DyTUH57@9S=3m}1dXMkzv;gERHu z!tC+HhUr$$TG1OD(5kmDglQ!kZZ?&+*$1luWSKCwz{ua@43>&1rAYcvaZ@N#A9H)Q zR}uA8@6Juh+r6F1B^?^ylf}B&-MPqm^amTu(uZL`^x-*~*4efSiaulS;XywrWpb|Q zJuq>1UauHRLjoCX`(-VSu3uK_ud0W!O~;S9!g*glDqF{frRbHvz5ibaFjU;*a+wx< zlAlok!q+#DVk#q+R=V$>c1_*oDe;$Lzu$s;d%Dk3=h@oYP})nTrJ=F_R(NB>gTcJ< z=mav^5ln}J=owVuxP9A~HM2t{Z9Ik0`+2#zRy)0QXBbO7Iccz{{@dPU(}9ABAI$vW z$!0}D#kZGAv`25kbB6dZM|{7Ovm}TX5+8S9tO@Dn__Id5QRdT9jcWg1!l#MoC5+!B4N;*&DRUcE(}hrr z_BLi0s%IPi{7Q1LArguGy96y1F-fkql(H@h85xL82Kv-63w~C5fP<+x{L!b*5k$cf z=9?Hl@QD%HaTaD-43mX-#5*J~HSPFYt-aem%me^_QngI(YJfQdN^+*rpUFM3mbq^2 zQ7-AB#B5Q^&saje6X(aNwo}Eq4_?*7+O(kGihu2U2W89fJJ|?+)l7O1YK7y4_R0v2 zh?V6T^1RfiD3Yu~v-Ys1@tt8a?w}e6`q(w+?LSg(moZEd$km@8h$-PF9_dt_B@6OI z$+qIBVr_=ST5y}WwBTQr-o0&n@Z-Z(T-I0tz`&HA#XAu(c;qgHN>xrPJ!}2M6h$bO zn!%M+rx)sWJlm$GL>i&+nXO_rA&=x)DbAjWVG|lhelM03$?iTz+-f>Z7NJ;6|2g$l z;BcAWHIybXOsO!A<2 z7z-lf`9!8S`73p;yeK`XMmpogWpO1ZmRWP9|P39BND4`f?uf= z<%7x48|y=bbN>zwdWnoud0b?}sQ=I7hajlSHuV>%OZbaPgFqB{xk*d(zvF3XWsk7t zMG792xu*(CJQ#tG$_twRE=gMZOD{)+9>KwQ<&tOg2n)n&`TUaE^}p8Y_q4%aJ(I4K zx$d8n@6-k$*zlmS1a|yL&lC_2`gu zl%c+-;$5||3pCYsZJC5eBw+!VOUx@DvidFc2y?zbZ`60yGlzSyW1MfJ=9V1a9=lY( z1D`t$xgxeO5CmeA%N(n#0?5EA9_PeiVt9DeRogpPQYF?JT)D#(CyzlVcO_{DRQ6}P zfo@2Av&S|XzN`q%uoohXlR_Aw&4no+btn!MtWE8yRG__rbr|^Ue-6$AW!@F4_Wz?RX?|30y?#M=6@RGYec+(PKLbu2 zyy-~-R)0A<*~wQ#o?xU7*+R8}P!|6bQ~1cu0`RT$UP-@Eo|+OiGs~TuEdxXq4x*p`qcUGO?xC9fFbszBa*iW(z9X)Fa64Zj!4 z{M8kZ6+YauBE-T3yU0@3A7zg7azGN}gE$z1T#7V&H|6{0b2V_$B;aUn&HQG>$3ovc zi16>SzyKxH3!)HbLlYMZMyDG{9(>UU<2C>~qBQxR+v));$l?Ev^Izx_ECPM488!=2)Yjp7b++;bd^$9!KT99Ge z-w;h1Muf9jt2#3|xnzzPghpnDD{h8=Ap@L&)qnQJ%S32C+91NgOe+&PycYo+iFlpc zw=mHbkOmR10vFyMaRyZEhi$PB6FA0E4^`xvc}2VpfpQVL1yoT+k^r@hXorp0NIK;ghnK+d~3R# zao5f3^Y%#IFW}s$JjODaUxl0Y1*PelZe901<}$2iy(Yw^zO7y1C`uvnjRM&#b&yln z8kIgWwY1`l#m^0x4wDjK;_w=Rrd5Of=W?Nll7%{(<|B*jyPq7rw$gV#86(84d0RG2 z_U3GvDs%ix=;OOp2Ab<9PYpUhp&UDN?;aqHgTIF}>bU-XYZ}@ZK&7}$O2b}eBNFN( z<`985(j6mJ7avh;vfK*N0RCt3%_hEFu!!v$hNr(4ge1+iIEFYUx+XZ&i-|S;?xmLQ z-mU11uQO#O8txW3T}eVsBYOIKNPYGqL8h{j5^-`s4a>HOV62MH3gBaUOxZ&Uk1x+a zjMkiqzi=;A=%%bo-edhR%N?sziIYjUWC(j+H6G{e+kvojG6R`v0-)6KI`vtV>&Sjp z+~9_SkJe2EWUMn1qyrv2-(>s!ZO>b{`~p1MX*CqC1+(o{G_`~DCTj9BD{ydnVhj3` z-(`*eW$vvy3mCQkdm91vV2ErR6?_zM8SJgMk-C^S0>3X_Z%|`hFv{#N_XD{=3Ao|Q zEW+I5T!S*IZ*E6!OdmtrzeYeZllWO4J~=<$p>eJ`&3?17UZy1wPSO^3AafJ+ zqCYE|DZaD1TXK3b85VR_1YQZ9*PQH?X+OXdbWL~O@EWABE{dn>!S}++&`wjE(~?smYOHmUp-e(yyQ3F69bbX8D1UfKLj~td;fv}3%$^Ke6gB)&((stks z+T>sRk5&F>#$69LOfomyX(dc;er zahI#*HQ#^JYIgnoOy<|qSj~9Q(Z(PWyMHGmX@vXHjKyAGd&S50_exxA3()o9Q(yZm zF)0~Qf~zImiZ3=oRcJWuw5!uqek;!%hSiy_S8~CAqGM0`NB%6x_n1UGB0L*AD_vvO zMf1^6i!-(aXAg_+)+Hq*Ea9{??W}UAxHwZ#r(L6iVP?MAo)zIYTbe1T5V3i8SW?=0 zuB74f=u+%c$XEkODC%(X`!$77+S3lAlW+lBV~|ut5OY1#ULMG0N9*PY*gi}5tCE6) zzoWHiK*vmfq4cAs!tv;4K$d_3U6a}Ins?iPYkRA?UN4!H=*zfO2cT~;xu2gdQ;(F{ zBrU)Df+mJ8TvS4GY3c4(mk=qXJC;}pK@bsS0qGP$ zkZuJL77zr35)h?Z>fEcpJm)=UPW^G_%*!wgGyBXwb;otz*Z1?ikhRg5TsVKtOWS#R zo~lg4KeB}v`xYc(=C|C2A{d?BX3uf#ts5${Oyut<7rbrK=(A}Y_(Kc#~r;l>)m)PDz+&clPAqoHN)P5 zG-Tkb^7Nmdm>Xi=yb!E+>P$}Lc3S4XjgGvCUfZc;vLiI<YHrfJ*Pv1PZ+x#BwYcb=b1{TL1?LfbkESUu%N>;IA zn(L0|ZT61}Vbfiw3OU;MpgjhqJMRO%yegfrId_BSS8+m|Xb0%~sc_=);_v&jj_#Tc zKxsVx{GIFrkU>%h2N}{g^EMAN~Gm;lBq| zuzw&(x0;-5MX%X;ndfVSo;xvJWpS1l%5fl5<3^>Q&5rB;49gY;w?0~QJj~%5;~d*e z)V1&{lZNZMRnBM+7kK>mL8o8c<3drm+^WP4fhwfpKtw9@+JczQ7s-wBeB5+D6u7CC z({a9;n|qoyKF4}7d==8i9RM*XiUp}&Z2_*Af;N zgREBS{zj)sUWNIK64#HwZ5Q||nLTejRZf>a-WaV0_(4*_svp=yCc9}JMG%e)T@qD> z|1UstGsCLQ41Y3mZ+tST>&zpoFil$v^qzOtKB$@8K|9~VOnP=2U^6b+oO{$NCX-o| zUX%~jvlxg$w6pZLQ#1E0tzzIRXmaEeNG`Am(Ew~Ltj|GIEl>L?&Y22VGx%_&{wwHI zFTxah+CUp}`rPE_xjC<%=AQ|2fjn}!JZic%iOycOs9m`C8r#X&XlsZMpctP2Yz2xG zqw(I;Rshk0y3ZEM5}*=0_gd4|{Z7Bi=G{!G|A38{Y`Q6&cDD~A?8P|~LTWYRs3w5% zRSQDQ!;MM|!+(mIK=cd`Ye4NhiR^&gAIYYZ52e+Hm2FxM7MKqJSpgeBcF7t|Zvp%88bQwQSiIc%C!B5Ek(#n>S)nuFC17(~IQ%hu< z%AtDwtD9kIW<(0IQ&rL`4&PmyS}ty6XcR=pKIj?WO>u>76-K`P?8fUfv`9W0 zj##DBa2W04-~lk~mSvmylCoe*MTwA~J1; zqWEr5q#1|KhzB3}Dl9*<=5S0J0F_j6b)O3W*s}Qt{$WTWjA;d`>*uVA9vugOI!LEr zmEyDO@)^F}M~hX6rxPx@F9)Dg8bRahQ?RA#lokNglr|3K0C-5<)HEXkKI_}ZTM^Nc zOM#jzGkZP{N|>9}Bz#Ah)BtW50W8?Q*A+1jdhnnbNY`aSUE*5jF^v~{C&!01_T40A zt-;Myiv^nLlO=E|m8*E;3UbMrAD=3m^_xRqF$@7R`rM!A#Ol&j?oHv(s2aWi5QN5O zs80_)888zwTQ)z4(f`!uPpzro-tL&O;<0H>OQtd1ZO;VR|7MR+`TuN> zaOLZjIwZ2|A?(RdBcm-ia~gAH9sZ;GmUzdvj|R(MC4hA3MWQ|+ff@}16oOBZY9AFw z=EQPa5}jO7I9boCcS*<*&<;#~TW_zVnJNe+b;J=dc4>*A41leV^qc*CVrZ@-E<(!@h-#<)b>%0z9J&xA(G$>8aNG;bexfBP;L$JcWr-9)Gq?Vm zdg)5ij?ZD24V<{}YM+&WbQ9mByW|NUL>F-pEGAYUGe^LqZ7l8?gU%tS&Fc=)Mb!jJ z6|_|K+k_nR0Pipp7>!xjQ_jru3K6DL^d?v$B(tPvK9%Eoy~zIQbg`O2eyG)Ob*$ut+Rj&J3 zazg)wq8g{%4=TO7{m}~9#nSF#;@&?Jir``kxGv1a%+5k3=G$D#z1NghL;}rJhGGO}Fx){EtCF z<>ki4)MYG@$0(QKPYI68sN1l(s3KliWRj-P=Gj=Ei@-?EAbC*Gq@t+S9H9$c2`6&! z;5ZB?`UZkEHy}P`%s*EXQ&i-|DB+xHK14_r$LH!uNJWu+_r;k6L1Z9coX6;CGxipR ztWc4#73$fTTAf9zPYib7n3}(b3pPQ+*@^;`+GBlg&K+2N)Glvm?ifEOW_OF=mPWO z^;E9rkF9&U0*>Q#i$f<74weOpU}SZ3(Hh=$k>Ppqm?ru92W*klZ*SrEEXV@wE&CMt z$Lz5U4^53ER!Cnv4K@y@%pM@>>i(i^G1mUj{{dvH3Rq(L-y-DHK;=uC(r!9^FU`HZ z_8w9@@0jIFX8*@K#s@{WU+F&R$p|D+;>Qx{Wl7(wP>Ljl$^*2WKJbWnuPUhBvTr!A zb8QjK{Nqv6D_>e|h;zIYCi7cN<1Vs6co&JXaln(v32Viu)QCt5uL(E~6rM-`2GMZ0tL5qR`($an3I;#Nc3bXKieIkPh z=gi|GQ>Ode`;k{)4`Oi`-J?*xu9fzJOEN7boTuk2eZHJme zEjwQ4d7zG9przyvOTtmDc{7Q{aK&g2<$bQWQNsZKkykN992gtOD|QMp)Vfx~|1ugp zi3*}M32s*1JuXvb|8LP?&R%-fdj%WC@9z^D)W6vzd}Wj=xS)z#aW+y^a00out`}s_ zl0wI7Cl#)ESGxI{m75=S?Sa1l)+Jw;DeqaNz3!{oPTV$*is16btvT1 zKn+4fa1)p=n7{`A9jl5x1G|P+wgxhw$1q5AEg2t@K8w1;5p7CL>&3p|KET|y{7~)C zJI%OQ^&RGc3egn=wLVJH6#_d9-|hvtU>6{~y-X9%d0pTP8YE0sM39Nygl>@}W0=hH ztHCmFb7$OU3Tv=atG_6jxf;CfZH%fQXSph>fkZ3|VQ+8Pm_W@I8^O3;#vR{~QvpN! zmsDOvuhw-J!R?c|5mt;y?>%jjA9HyD+4@Xp0|N7$uZfF^o*utiYcU{=_sg4JBU;d$ zBh_)NvvqQHl;L0|9HB@$*X*^Zjay?{jN!}~1>(;cuvEoKwA|^Wk1~Bq=GA|;+O}F+ zNH_XAo8#=&N|ja0APoFxC*M&B^Isd1V$S=+D}r>_u|*2u8YO^&d8|AyXm*k-^iM8C;>%3Pgo+90G~CqyFWv@H@e95@D%qxJVU-jh~Ro|2gw`*^<_k$<2f zqng!9aQ1rOOoT|`=Hk0b?9_%$U-XCYsAf_6DI=Ar4=w?Rf$o}IEz^oBL?`#bVo@25 zM{=TFZrPykJYG?5Jaij$@5mo%OF!8XuqauFO#CkB9RtZrL<(SX*$IDYvpnk^tiCx*N}+@+9LMAdwuEs50uo z@mRspl1ck#DPA}DZeZLt7=gQn+&oz4Kg2O}X@)y)vKS2yZ*3iAW(a3D=CbMfIpkyi z;YIFQN-pb{y1Km9FpkF=t%dYfKh{rI=x+@h+^v>}(0v>xwblkPTmgfv3)wMG_ZVw| zSG9Xga8_y59SFLsB}t7|d48lt{w2rvB!(t}25A+4XFlufb+;yK(aVs41i+Oxj?h{5 ztNE9-_zS?!njXCWLONnl&X_BorXmyYIpj2mncIZlHqDw3MEwY zR9;`I%mwvrt69(0y}YB!Bo_YjGL@Q~@;v#sDiqdkkYzG*bQA~Bw)foFf++L}!BW-6 zyD(P)9AV(21n10v$j~5EJ!Gs2JOVdny9tL42ChU3*}oVDYf6;>E>8RNViEb@Nl&Kp zi*}I+cF{zdDHGd7%_fN5?F%O_=f3c*-iKl^3EnbCHI;y9!(WZu=eN?>G@`8)e<}mSX&B zTfOu`5ST_Dmx5#1^cuZ5Z`OxtvzSHZ?0aIu?HADdQ!%Z)`%|bzv4wpu((^Xc_RaVq zpuvVP5|;zhn7?up!5r#&UOLIn#Kc|fMnWnP+lGtVnpqJjPHz0I>EFQM7j4BM4o$Fz zF(?hP-(W$V!CG=;fM?#f?nOk;O=WS7Lp%n(T3tnfNri~yTPWBg>~wuZ zN6yI0ghOhH8Jjjlx7d7Un73>?wn*Sa4I_&Z3BKRQwBRd$XD1wPu14dy2@C|5DTS~q zoQ)_;CjO=G0KgoR%}_HM0~Acx+BUuR5u5A!dA8yv;4^HnGglBq{>897=^&L(5(_BP`1bFjN%{#UL! zJW^c+sbv4S>%^{65jQJZ8mqI#Z_APFhm+>;(72jKes_RglF3=Wu4G7U^PZEd>p&(D z88eEI*;>EelWyy4J^LtT5uUxZOBm1(i;EGtzwX*Cb9FRvkpNS)>jdyB=N9qo^8N4< zAd@Z;*~dUbn6nn)m@jG;DvdIeVIoY`&*#lfn@70D`p7m-1>q%X!zA*vkE|_Sn=gZsBDo`6V1JXigW!=;3CBX%9O;!n z&KSuL6DG7N=ZHJ$Wg%Ct?NBi?Lr976y3U7(*4_;qjuCnFgPtfN$^O(N@F1pPvd=zL zl#`l3F~%)sTXN+Vmm?G#b3GPF+fByezr!x!=@WU3?P0s=L;Eb{4QYyx`c^}SFmn6& zP)7D$ENYIk1j2Tmwk5YN=C(`ji9D1q#=5aGjEAkT!*8vv5bo;c0*pe zo%WFk>a)E+0Ra>CyO6g^K9rGLrP8cOmvg7!>9K6n{;6$(5RGFX$9Gi`)28z_aKJem z1$~I!om(Rs?4@sc$yO7!-pxPH{D^P&kVI)6itY}=r&|)aDnF>l@nE8reoKxMdjbT- zC*oA*(tpjfe0*Z;i+rEj7`J6~y@PEm3xn>d77w@jfGv_lKCh zNLN8o63~#2bEG_@?~}-2kcn4<%)&Q!d98CsG-Zi+) z@i~dNPpYCoE)T7iCp|veDin(No@`FU-ovD)T_%7Q8+xvulpNNaB<6$f)*g7Tj}>uR zUjNy1=~5TlUS!ZH?Qt8O0mQrSQufVVLI=tdwOkeW2OH3TZB!Ip&a(~vB{x9Xrj^>R zq>sC!AmwL7Q2WL#gd(>G19Z<_qT6OIRbMV1IC;CKXfaz8$57eb?`?AhQ8S0?+P zXwxa~05kTT5Kim;aN8L$~@k+0^c-61?*pW(+jj24>rn`OsvMQOaC}pXFW( zwbb_KZ|G^^wPBnxNGU&rn@ZwV?0TJW;{3Q-KSvvmb+Ddvj;GP4;leHrN^E?^Ye6tv zx{*-pHf219mmGO@mh?;?9Kj-AbjiR6_x)st-NvD0x>f7= zE$zL5ECU~^69otqPeC3gL0=ST>7FY@4#`SIHh808c07(p%sJGDm_n}En|kV86{PJ- z25~9`BVVT4_kc;r=!KCdcNYgi; zHolaPvwZ!0%P)g6?qE8ZHh%_2NbA;(aTx@md^I0Mww$RU5!H7~2r{D2-i5W6mPZPN zyqOS}JBp*GI$Tk;3LVu{k_#7imxh&skf9F4)_U9WW;v6~>6FFE#2 zQ}J}sK#h-rG}13Vkzn7yqfm9+3+--YE|*WA7Rc782R94M=|i z;$&DL@N1-4fX?!;$4}b^vo9Rki!pM61w70Bs47mW*zTHbK3R-=#?&HE^L_LBe4)>b zX{S6R35)Uz9waPPpNkg}dQXV#639}E9iw-tma|T*1@f+~JF*1iii~DqmdZOk4`dgq z^i;O5NFlpO-e=gv9z0CqUJREkoOnYNKejXlSLQ{FHC$Lou|E;O)T(txeOyL1?}w8o z-$Z2Xz-_N7qEqc^S-q?`p7)U~WG|Zu|9siV z8>+$R6ciezac&oC4qb0DfetIb2j)W~$Un7)EFBM1W>Ry+p^ll@fr zsy9Z!SETR?Dd2eIxNt8~KHP|Dr&}FDJh~?rHEny6S<1(T73NeLR_9vyYWJAXMDG&O zc`WM&35x+TOD?2EgZxDf`=U?*%9?YG_FI|SL08jt6ECJ}7$3$Nr2SawlhsiWtn-ul z07Y&((ltbSOJcJ~wN&=ej#p0Bt-6+CU1cuZLk~r&RR^`dDe<-b;QrVi!IaDv&nJihGP2J# z3&hi=q+IBhr-M@JZIpk!wWH~WcGPNZ?l=UVG0yfu4_Frv?LxXfiX&!4@9F6fcQ2=# z5_$oG&-sf{^E3nEYt zanZFt0*R9n%2{-(CE{LkaQvwCM^*o@0T@**jfrcJz0Dn=mOE`4r9X}NhSl@Hszcuz zy0e`QmRae^w$f^C_W11rCct?l-ks7|6+ak=jBdSMCn$j{mi*YG9yNhPotced@#?Qa zsRPRpHx5qeB{MfS_HI&dcV-nL^0`YJf?a|zKE1j32Njiuwj;sj`-eHbC zDOpgCQXfxUh5S639dKq-7JWKR|rRl8{#pCiJGQzn5zN-=ktlH zY=Uu^9I$V%4^{@ro5bx(3MCis_s(9t$_92o;+8039vP*vMK0cio{@c=827=_I$)5J z|G=Jte=^$bVwJOyY3>uS(z_2alYpT?>j~<3Q>*qt`4dXYAz?c_L}|2o8LsaKm3SZD zqQljLcih&JFmaK zKN!&LCI$718k~;%l98lSc4gMh=EU?1-|g&J5&U4Pys{txbB^-*l%}#xjt!UeHFcxu z2yd~&WA4juCYm$dCgxAi$ca%7;uwI~9I-&f5$zl={lk_N46CMrP!{-*UPD-LrmB!& z?u-=bm|#tSZ+?t{r8*Zyo9si>9s92)`ETq4t57=N*|K2ruf9jY8w0dh62&5f<_>1F z1me|TzIKAtM}v}D^qAO&`x6hKbMbyP89H%>R#hW=zKCm}&#y`(R0;;G><4@YVQhWh zW+V6XB0;!OMfeMw;9%)$KW$LGO!D%O-?J%UmsYn*N+_szgnl3T4CIF;8C-_zx8>){0rlNJ zp}Kt3*x`4{VVvTMX=Z*E$I!==1ezpF5Ss7>z%XIPEj(?bIP3z#=S;D#hN+6?w3@kMh z^ONId|-lO_g;0JBmvt%^>;Hb^3@NqV$VkE-qWaOT+I%wt1==$4gh(EVM*F} z?&?TyWnZ>%uhA#=1bSrK_x zs6Kl+_kY^?BpYjG5XUZRcAUD|v}7 zT~l&EQPZ}kfLY6{M0A2+vJ#@!p!plsV#5Ts%17PrAPpYgnEv(?55VeCNwbafrK}}d zknT4xw$y>-@oVAO`LQEy5REa z@B54nNIZU9ZGL7CTgom>Rar~KbNS0D^yoShyPQ9a{7r(v7V~1o8m8;r6E&_zFO|nR zrfWNi1%_XY!P1qmER@P}H(|d#gLm=zK~Q#XPOl5)8h;8`7H3Euplq$c2zdT$K}9 zsrCL;E5iMNQBYbvZjsvW_#H_|DXjbXKoSviZOPY0A-&l)$rGUYR;#E^0pQ_6_o;$Na*dR4> z05VB zLJ5mnQB1lVk9oIJyKI~bH&@Qw`ORPWhJ#EsHH{xR)jF_z1iPH{Ms#Osl!Z%$IQT9jBh}_8V+wA@SB{2X22gDLkRyT?Y86-+VuTYk z3uCXET1*r^tEYGm?Uc5B&ybCx4+J8vFu>n^Je&wM|11SNXy$z=BYQj zvs*3}Z2aLO?j|u(ySY+l4OO}Gzinee4d!*(RM%ftL`m|s-~|oCX7RWo>%U%A9 z@-#@OKzwi9m_kcPj)lSYu?vxR^XEr-bc5CDD%Zr#^1UxVYAg#PXipHhwv)F<|C&-W z*dwEI3Fs;p(&P*T%eBoWwc{SXzFj)ETwvk+)fi~6EI9p4uf1^oY&RKx*~2OJ!XthI zUcFu});c5fYfq5Ob&{4#x^8Ch>La!1a>kDM+yio)@pHSSsylsYz6Z{g}`@NX) z-%J{jC%$ip-ETuK2UT)|C)-`dsuNtShtE`vP6}BeJJ9xbADt+VjhKc_^CND*$1371 z!l)ZNJWU?2)%y?2`W@}Zx?1|l$EAlTUl6`s`g5nU?vrvu8GCU^>r{C-RsF(R+KZJD zl#%|*BAx8;{L0TUX&?<(Zvd{$O&MYZ1K2w3kcprV4;s?5J88^NZGWnx41p3i1B3hTQut2WWdrNQvnM~LfF%zMKXAEWV+!gu)&&B&U2 zQ|gTC-==W()s>%ffyL1eK=1%N4eJ$Bk>5pSAyNdGsdr-DYE^~YkdNSn$+FpmJnXT! zvf7^ymFufRtB#QrBC$LVoXJU$r4Ni#sYG@j;CI{HRkJ$2{rTb}8P9T=Ap&uWL2vPZ zw}+RJ-aQHKOdJm(tu;no{9w7PpKyJCeQPAdGWpRY9p|h1?4i21F9m_RD!nP{l6wE( zSEGO|&56m*8hHz|oux2R*yK1}QKjAE6n(4js*Zv$mdE9Dn32fm_ilemm;5f(FW6wS zKKaQ$4SoR>76fLH>Yt`i>k#ljh?D+sfpFKHn{JQ58Hm<^#>0W>`S(?jfMa8m z!{IeaXNZ1Gwsbpi0J9btL^%~azWzMbL`Q7CdJ15f635y}{|T2Sh3bL9FfZR1m>K?t zc>nwh41hHph$y69{S#jXG=@CB3c$P2DYG8GU*O+=A>rWmIIk}K5dW{givet^!8PJ~ z|8-_?B6%W!OGHmUecu1~xBNN|w_Dl>UtVg&M5}&57VI?|SF5d^DsAAFO1}?Hs#! z$$pvI+FD5*pF3{Q4aKK%K3`h7?r4EMR}fA@3STLb;>p}Arr66ig1GIY<>y+F@DzZ+a^7NIF}-~-vP%v!NFXY`$RW<%m$%%{-Ya~l{k^toa3K%&3zI&7R9Jl6BBw-p`Ult0dpsAV5y zl?%rwB*on25=<4VOxUjNeHnO>xHU_Xe3<`!XVdT@Df_{fXTAw*pP*53G@bchnQKYV zFwvil-knhoMizKWePMuz4AKb_h9!w#ZyMt1q~p1!-o2-fc^6}7NZ0r1Bz(tUBRY=vd`L$^{;^Wi=H zp}hwsRm-;L7ol@ifs4Qcq>JQ z%kr9k2KPVn<#OH;7y3w>$aR=kee+lVrGb2nby_zQG1+F3I#)>D(mN?1!b6m!*xsSM z;c>~Ofa)ss+Cw3p$_IYhbGt*fWf`*CbInFutTvZrb~ZZCuY2j?&k^&8{(P&@+mh2d zdc8KDh*&YuoBOuaE6hX&|78g~x3$PYjXs+981z&jooIg!d+x?-zIAN{w9E1FxXm4x z8?n6_YF24?w2LF@#+JW>MjIE4{>PI`FvNB0`y&!?iWhDPHX_-*2|llMUsb1R-wVFO zOPlxVMSHnT<s^=!*>qCzHu}5mHZx%?J0&jnnS7WzR))DI8YmT_ zoFzPFb%ibqWuo6moW_XL3g(TT|2mBEx+huaS#avhh&Wpx5>xWf=c|wLU`A*kt>#dX zR)FLp)|fc%TOvD`P#fiVux$M8v(4*D+Z5Z35_oIrm(t(Wc=@1uvbqLx^#~g0zVveV zV)+gNwXHw%8E;-Q9d%C_$Q4@|Rx0RNdP?xQlR+k^g{(wGx=Z@!)ydiAC@)XhTjl=b z8wkeL$1mN}Ju8p*x1V2JH~Wc}*%ACQJkflYQ03jJxr1!M{)*#Ay#3vJRq4VLhRY7Q z?~gl&iX>>6_5U0M@kkhQ&nV@wJ$a^ii-xpg8>y>Y+(X;mNNw`GT>Qq~RU4h1_Q9&k zp^zVrSAFjrUI{q)QEbU_fnSU*xD=+_s?5y8CmT7UO6&Oeb?)Qgrv>Q7W7Oc=uj>;D z3K~ZYtS$QIB=sKA(5iYw4rZa;`swqYQFSc1ekg2F>8RJs`1V2dBt+McrNngf;i<5I z&+1!Y4-p141?rHS%uphCe47BDV;6xUxQ~r&08Hb|G}0ozkI*YI;&~x4*9N z{lMc3f>jRm(HOtN+e|C?O_oj>Mv|72_s(@no4+1fVQ(?WTTxu&M17#T-vAXW+SuSV z4`IZOT-BWa7|fs~qVVNmWMgE{my1?yNq5n<6^2I-e`t*!R8+jFdh@FOv~AKMUGL>B zG4vLt>s07L-nIE5ElQ`QVgWdvu(e{{JKq7eh4B4P$?Mj&8V-ERulLucOC#Uy4z4oD z2YQD*X;RTZT#4h|-Zo<&r!fiNFM50qcO)`-czRw-Vg#5s@)8{a%$mvWA3(nf zn5360nf*$8u}bXkn>|CApxib&$y8lkT^abX{=}HV z!7n#tOVWoJeWlVWI>zT(>S@)__&a%>8|$B~u8vm>pYDLSC_yOO&TfBn)oi{F~oR*QG=fcOI7Q5Z_ znfemaOadK67#SE|xx^N%;j_Nq&QO*h9ii{kzbF8Y`%Wvkk^NEM;ke@i-{x%(0@njN zCqd)ao2!haC6%z_d?V@oijUln1L+%}VR`4;+Z_mg1{}T)p&nDyOWTHQT{t8k&AoTV zn8|0>-r?^V0*{&oOp4D8!0lmD#ztz&ECZcBy)NxbZw-B)^JeAo_)LY+kbZGAA)T^| z@oty0q0xZ(4l}3iRz0}EU4OC!nWrA?Mz^x+g^e%9?=4Vr#{cm8BZl}cicPk>_?Qu_*e3dJvk!m2VuG)7%7!s> zvL^m)O9pk>j)WoZ@-EB~|6^9X0OFnu)m)G9%&$a`WK;$TY0_k?Vc+`ie@_>&cTA`A zZ25Gwa(?6elzYDhn4_6J1&PE=MCUl^+@bL=QRHjgJ$ogbk9*9K11wD4OG|CZXFcxp z-e?F-H03eQD54PiVM2EL_9b89@|EhEl8l#+KbV)zE)PFzsqrs6yUZu};o_64RL`%h zB;)CiN95|6h}9MT9dn6F;6?Yykf7}i4v7b!%qxn)vJx_w%dAn#KJf-}EU9?vWUmHZ zG8H)o=gtjPd&ZNFZ5$n3YN+ZzpjI8r?Hy8@Lkp%)e|Uzs96YIC%yQeLwZDHCX?VO3 zKPJAg^TNQ`Kub5qVa&ZmBcCYLA>8=Z_Cr(X-4Mjp8+?cMH>{yPB>_6ztg#OcC7%@* zIGK&APRTh&tGE?R-2Xc;YcnKdVA|q?$Mt{IIxwj47NCoVdMyj3`l0MwK96jBTWZUa ziwPMG1hCAuo+*E|qyNQ>OpkS>Sr|d9FJx>`=y9om-QmZ3a!;P2gwhBm|64S8T0%d# z?z2Ux3K+G)Jm2-2M5zq#S}(gLtlW-xY!cEQN_=U^=tGD;OGyw@w=vAVommeZ_{>!~ z<)wRl(~Z2k6)}$wKZTlo9Yf&HC`h^Zy;?Uq^)ICg%<#WWR=3D-96ttlUg=bPE2~@o7V}()iN&THjEHi&VDSU#kK> zTNsFwL9o8be^!oFHj=dP{+zC1qai%XhWVXL@2KV|RKW+Q1VddtNC|)?X(5AB~8HfPsmTNoz(r*ln$CXn*pSpWQ!R z6BpF>mg6mWiq1}}-}UI8)1D{y3jWbcSn#4%qE$K%n8?@FLD*Ymu&?%y4t`p4Mrlq^ yajtcey6I6n32u=zadR$0@9z*ociM;ah@JlYEYtV6GnQw-kGirpqC(L&{J#Js11XRI literal 0 HcmV?d00001 diff --git a/hadoop-hdds/docs/content/concept/OzoneManager.md b/hadoop-hdds/docs/content/concept/OzoneManager.md index 1ebdd4951d20..f0711ed21d0d 100644 --- a/hadoop-hdds/docs/content/concept/OzoneManager.md +++ b/hadoop-hdds/docs/content/concept/OzoneManager.md @@ -2,6 +2,9 @@ title: "Ozone Manager" date: "2017-09-14" weight: 2 +menu: + main: + parent: Architecture summary: Ozone Manager is the principal name space service of Ozone. OM manages the life cycle of volumes, buckets and Keys. --- +![Ozone Manager](OzoneManager.png) + Ozone Manager (OM) is the namespace manager for Ozone. This means that when you want to write some data, you ask Ozone @@ -55,6 +60,8 @@ understood if we trace what happens during a key write and key read. ### Key Write +![Write Path](OzoneManager-WritePath.png) + * To write a key to Ozone, a client tells Ozone manager that it would like to write a key into a bucket that lives inside a specific volume. Once Ozone Manager determines that you are allowed to write a key to the specified bucket, @@ -73,15 +80,65 @@ to the client. the block and writes data to the data node. * Once the write is complete on the data node, the client will update the block -information on -Ozone manager. - +information on Ozone manager. ### Key Reads +![Read Path](OzoneManager-ReadPath.png) + * Key reads are simpler, the client requests the block list from the Ozone Manager * Ozone manager will return the block list and block tokens which allows the client to read the data from data nodes. * Client connects to the data node and presents the block token and reads the data from the data node. + +## Main components of the Ozone Manager + +For a detailed view of Ozone Manager this section gives a quick overview about the provided network services and the stored persisted data. + +**Network services provided by Ozone Manager:** + +Ozone provides a network service for the client and for administration commands. The main service calls + + * Key, Bucket, Volume / CRUD + * Multipart upload (Initiate, Complete…) + * Supports upload of huge files in multiple steps + * FS related calls (optimized for hierarchical queries instead of a flat ObjectStore namespace) + * GetFileStatus, CreateDirectory, CreateFile, LookupFile + * ACL related + * Managing ACLs if [internal ACLs]({{< ref "security/SecurityAcls.md" >}}) are used instead of [Ranger]({{< ref "security/SecurityWithRanger.md" >}}) + * Delegation token (Get / Renew / Cancel) + * For security + * Admin APIs + * Get S3 secret + * ServiceList (used for service discovery) + * DBUpdates (used by [Recon]({{< ref "feature/Recon.md" >}}) downloads snapshots) + +**Persisted state** + +The following data is persisted in Ozone Manager side in a specific RocksDB directory: + + * Volume / Bucket / Key tables + * This is the main responsibility of OM + * Key metadata contains the block id (which includes container id) to find the data + * OpenKey table + * for keys which are created, but not yet committed + * Delegation token table + * for security + * PrefixInfo table + * specific index table to store directory level ACL and to provide better performance for hierarchical queries + * S3 secret table + * For S# secret management + * Multipart info table + * Inflight uploads should be tracked + * Deleted table + * To track the blocks which should be deleted from the datanodes + +## Notable configuration + +key | default | description | +----|-------------|-------- +ozone.om.address | 0.0.0.0:9862 | RPC address of the OM. Required by the client. +ozone.om.http-address | 0.0.0.0:9874 | Default port of the HTTP server. +ozone.metadata.dirs | none | Directory to store persisted data (RocksDB). diff --git a/hadoop-hdds/docs/content/concept/OzoneManager.png b/hadoop-hdds/docs/content/concept/OzoneManager.png new file mode 100644 index 0000000000000000000000000000000000000000..f71bfacc4121d47423a2abdaffd6bf45400b2b31 GIT binary patch literal 13327 zcmbt*2|SeV+waV1QkJw(BB>Nbc4NrC4#O~(F-VMMFt!;p_CZrB?b?*=QP~O=S+b0X z5>b*Np|aN48Du;6RKN57-}n6AbI#|S&&Q0%^W4w9T=#un*Z2FqZWpjtC_z3kJ_rON zXl`a~3xPoC;3K_hBe)Z%pmYEN;WZ91aSREJ@FoQiA%|3r{#+eWQSv3zLJp}KA5u{< z2_SlgP{~2y65J0Ydy%|JM6W;3RFqVd2D)>^-P(UCN&VS0g`w)Zv zR)ir(kpcqT52=_U6qGMf)v6n zuZmPqRp-89VP=o9Jfva-t^-K^MDPnm^z4hbxfCpt!P%0zyF*+%oc|k^?~@ zQw=2rB?VPY4WxpyI`_eUjLB3(LFIQ@J9lq)D(PPungAnvk=%W#?t!34 zkUQw!?-#8xe`+KBwX9E=rGva_n46s=67S&QXA$N@HvLlp(t%>-iJ*m>V3m}yR3~F2 zgYZKthSovuc4icEAWj4Pp%mp{Vyfb7U_hptdxEZ*1}O*N44lx|KubT2X(*0_@O4lR zRaP-m4hwOzR0=opbyl^taPoEZRI@QN!(zy22V=DeWI&)Jc-b(*0~sFWNVW09gd=h0 z5$4J!K7K}2Q?wby)F#m19BF56p=25!Nip;@v%*mPDNdk4dsA!xPRZZYNW;`z1&q`X z&Al6f!$eVx0t2x=;mTATxItDkLiu{RN2!`n!C!_rd=w5F8L6gbN-{S`1yCJHHbfgA z<1mXzO1K#g>u2DH3bFLHbRc1H8k!^rC+9%xFrt|z64dVnJ~V7pl&UAVz<5M~Z#xU{ zrD?B5j0}u)wzo2gFv2N?SgV^-12lcSO^tj}G>io<+}Q|Y6Q!hzbGB78cXW3$Qp2cY z%%~J+Ut22^f1fBdZ%2Dee~hWJxj(`eji!cqMVONj`1X)?QtI~YJC=?A_l!+Y~rfPm(j35<_z(s-qKvh%$2W3q)FCBK*O` zIE7)sH+UF{Qwz2V4+szQ3$U{bu~YGPQ1T)xq5V|wW+4HZR!;tw2K5sMvK(6GcqCj`q`@cdxY9};DfM~a5c0KG1T40$=xpigP;ca zf@ajnJ{|@>HW3C<$_6$L#w41CrLD7y7tI$DNVW<6y(Upu`>0?g|AydDKyaW&_J_DBUQY+m5Qym zeFy^OK@C+4A{pb5YT;yij4eU~Z)fKmY+^z*_lvLwm4Lc}BFrOzFET{CtJ!L*`BH3c z)JRc=c18iz0Dp=SKGf8QYbhTKPXlUDKo}<2SHsuAgh*8L@ge(>sRr)m>h7j~-aZ!8 zFlx9J328}FC8`G-xEoW+0fF8j5qOk`rhjC(v9}}6k!XaqbTIJ>#79QhtC&WG+D3&r zBJj%I4(c?Tg{B6{3x|sW-p~+hV{93Y4fG2PB2$9g{U|CH&i=tc9=@vXQHHii;9?CU zjhq7=?eX9$EW|M?fMSUPJ4e~s-CSA8jB6luD$RlBi45}OZXXKGmh5k8?5z@LMpU)} zPwiDhJWbuB0-Q`eZGt1MkTi7!(U4}WLJRZ@vs4Sg5KRpHo%~FVC}<^j2T*~fx)a(o z%2J(b9_WN{HvlzI!YowL7Ea-jG_Sy5&tNZSJDL~5D^l4q1Zjt`2*M&FK_QyCnwJrh zrh*`H8}&w8Iufz=!6YRl&NtLFglZUu1idr$w(<@!Dw<{n4qh5TAt83o?mi(ZfsrWHK%_;8w;ER6Pcu@*pGZTXl`KPnkHeV+ zp`FoavaeU9v9pyDF4RPw8)!iFk$#xqKUZD`UfhNaY`7kp`vanm=|8^vw<8CC{|%U` zCewu_ObBEj#N61xKEic8jh=)VnQz+FSbp_DkFSeV&qZu-Eh02B+4OURd{%_5L2ve} z%r?Ts#`ES|&pkKXl${^gn|13)xj?hgwtGKrhnV4f8>=U}+`4`ZS^6ZYj_8cApDip; zEWB8EAAYo2^K-RuKK5OY|D@)|F?*>pF5eQpQz*$s*hvR-8Zg+UV2M(d@ zq3)b_UvIoN_Vl&GW9Qu!jwQjD1;kT7VW3Rcvhx~0g$R?6(nSUNL<=AUa=_@VJ-qBh zuhZLvr)pu0>RL|$*0NTOql7Tl0aA>=-E3yW?0a2YM>;%E1s=~P|4=(XD6&%<;1{04 zLYaL=((PGjXC!ivwRTFJOBKS^Neb%_Y6fW(+CVe=vBD8AF~gBT^TN{WB?jDbJ%1; ziXd4Lrm=%h^Z~Pk)RB8~eeKq@YuAeUW8+qlwM&>^eG4@xT)zFi&t+sg2Zx<8Q+2!{ zGlS+%>o=`+&FjwzX3x!q^gQuI?ZvpBTn(+H4)Imbe>!gZs(hahl-_@4JO3#dTn?LM zyt$ITpm0T0`E=vu-LqTgRyQnf6cod@rR$8XF7)TDE-%*7EEe7>Pu>b?cV#!o-(s z!?S$vr@q$D>i1h0XJ`Z;AbNSVe|mg;cEk6fJ8sgk9rT4S;c!m>>O}b1dz54%lJWMA z;-&FRiZy0#zdqc_1+O!zz4HWE{@j|2U;;{oj!Of6NaWn-dD74&a=z^G=SQoThEr}O zC7I!iTw8ZsCP#l4|5)UvC#Ffg |A}r*`$0P5(zBgXULZQ)ok6kKm^gIvbXZhc5 z2314fe@Rmh?7@0_Kb#1v9Cmx3Ws(=O%n7N)-c}^6vpf^tx5aW)#ngP;jl>)tIbLr} zkW?&!6WR!qebri+bnUz85#KgPtu2j^zEp;O1a@ckl^(TE*;I`0*zDkTnAw@~xZj3s zL~KK5=2)envrVrwlUg_2$dFsEiCJ#;hqA0QbkoYmRsBC5%^TKRowLiEgVI;8L0La{ z?Rc#7RD{#}Oj0MRr1$}0R@k8>Pn7PsSaTj+fT1{3jxb9`uPQBz08F?oW zHGJ3Sd`3f_bEzz$=&Qm;HbUUyJB4mOdP;g@-tn?R`>ERX)yc8!1l_Fi^77Q2;PMkU z)6&v-Ing(7(^r}jBy+<1BI9e;=8JeZVnH5E*ZRy+_KnBIW#gL5Iq6XsW2fRIu|v&? zLw>{G6ZCG6u=SSNs^)FIfqT{>*{mlY3f8CUg`ji{Qi)JRB7}s#bzsF`jL+CxEl}zp z?$#8aA8@&H$bTby)*8a>>lf!KUI>qVQW-4_VMFQS$c+TD=SO)rRe})fb2-=^;@Lq< z$xu48uTdVt`Yamywo8D;!%+_c8>KP#c#Su6Z*f^LKZK)h`eg>e!(mikISwfF$JFpjQ<1030(X9*}0{Qz+2s`jk9Zcgc}=Xr^yZ@0!Ph*2hWFT;2t`Ll z1e~A&)IsPBvtGxAyHoD+Fsknc z&$jq8`&7qDlSLylaT>Q$p}#$$EPYTv2pD>H23m|%^iGeujG4$=>9M@3-7YQMd=T$^yt#v zoV}hm>9V!zH>HG1HlKYe#i)L2uVmb|SCf%mU7-nKrHE@;Yokm?F0gwr7q#0J4(^wL zv5n9hc;?vGc}BZaGC-e~ltV({xuY(L$3MTh#@lmFI@gm?9jQF~(KCQ*J=Enr12%2@ zaBE8GH1E|jz=I1t-gcCF1j3S%aD)z?ii`xU>{7MXX_Q{Dj}4SY?e*QkM?^vB#5}_J zi(53eay_`>E->`}#>O_?QoO)=DWE46Pi7lv_95x73%}fd%`ymQjzTP51hWY&qlup&zph(z?%feofw4XQp zHTTcxzbzIX6(*2fTeRmzQ+n-|O7HH8%)}hNB?0EKPR*DmmA5IzNlP7iKN-`ox#)`~@{(ck3;zc=@|uWFVZd zrVM;rr;c7)y>)*ko@}KH;S4vR`K_UJ`IiHEJ6yhi0Ftuvs3XI*I_LT(@{FN){QK1@ zt5c;UzLK;Fm-Eo&mF)hR2OA$gfUgMJof@~$6POjKZh>>GUg{SO)umwS`LExchO^`9 z`$9wsWgGB}{uWGvB4Kh?fS3JcbFn00;Hs_nyKnJ}Fpl?d`qM6flhuuPnb-cNy7ie0QtEB(B<60jncD861#Em@?ywN;tiLEy7o0zT!2jegrHE&|1-+y~`iL^8` z1Y2TeL=D&eJjr1@jpKP){*qavdp5AMJFMpen69`Zs^c0p!5K2`S0%It-#UkAIhO@t zQjlbYc6k7*ay8iiR263$oa14CdEZ^&6jH>->8`_A^h6znR({qCDK*06)pxF~S@gEO z#S5~kYRS8RSw#Ps<Loc+PYd#+lf_Qyh-+De`{5stH_tK})-@E3+?+0O3rG zvOAAfo;>ZK#VT^Gc22im9nDV2HNLR@V^t(AS&g(^XLYW_^%Zt_s42HEb~$iKxc9;i znL&$0x9Dk;=~DYVTQ$!{fnyO<`&v_w3Ftct7}V80_nH&sP=mAQ1jTa!R@{~$IdQ)_ zc~{n4+ShcEUe}?*TH3R_syD`p*9YQ+M(4ZSa!#)PbUP3QK*QLs+C{j|+T!<_*#{PP zd|Q%~OQ=KWV<*?DR$|*T4d!lUtQfnle=4gn0r5TS(W9dtQyZ@KuW`ygW}}3fU%$?r zj2_V%-Y^HXw&{-FR!Mw&+or!J2A--|HFY|=C*bSLvvZ%0liwGfcrp4$CPTne%?ESx zS4T$kvu6U!4>2j|mon1QAI3kI9WtlYug$jUNd)GVX;Eiqbn3^ zYUNX*@Yv&Q%(>N^j94^SJ~`mSKS=JYIzj+VYK`|++??pEwrJCyy)1leh4eld;s0Uc z%Zoe!L1wPtJe#hHu-0cY)(@Vezr8I_d--mu#;0vB+S+8K^-Evc-3I{f^@gC2 zxi;_o_q^_38Ai_iC@A`%cyj4&wA?b`d(MLg?@#gYHY{)}m~l~9Zt~xw7po24y718Q zt|9PwS2g*e%SjeTcO(fcgN}R{sEZpLeV?uA5j>HuNjsO1FS2r(pRE~M#Xa?Uvq$`0 z@WvHjTgBoLIfu3F)IP;%m#PSx!k!EO?z-b+^Vp#)8y>3{kG$0iqEF?tEOvNT2(9g7 z3Y}au@)#Y~b&Xvj<{j*d9FaJ6&`DYycCf4Q!lg_6$8_D+tdf)r`u52> zJUT3XwyRb5sbcRIL*nF?GhdQld9i5jvmfoOZ7%JUA5Fx&ZllN?JUD39gs-T|S=920 zlZbwoYmS^**QJI+YI-d}zyXkWGrOYm<(+%cw-nvdITK&xu8mkU zhyMKZX0pho;s74%@?@(2>83rPLkDWm18+MCsMzBE5|Ul}v`<^QyzXP?QuMi0P1?i7 zhC5G#>i6o8h{7;w8Whlog_VlMQx?2x3eaD5HXBGJ+W9Hs#FTG5R0YPI3pA(PhJz6*cJBUqj}U%0s2P7!nUPND=l}@T~5rlor?Qx5-%iq z+fR$~=){ZLzZNDlXr8jQUhjlB(WTo0qHFZ(bf^2@55&o>GTVac`^MC-qHQbU@YUYR z5}b<8!PRCs-!6HV2ekB)oNgb@rPPcWmu^*d#d;h4V!uXt`1*8{$L!O4=pHG&u!P?usj=LCYg1KIvH(9TfleHZpIf3 z5sH1vRYG?Ef=l$`2a#ENAk=G;6$0Au5Vl;|$-bC{FY!^FyLayzG_?taGhG!AH#Ljj zbZ>r!Q&cff}Cj8n{pB7H1I>wO&OoXdc)U$x~q&oxOBcD0aWVF=i7n`WlueL=!)}VN#mFZ9FDm}HMI}qIT?xP`>|v?SMP|^%6ECUs zNyWy+47^Cqjub<|-+eup*&I<=moBm1k+EicBOdY%M@dG^BayZBY0NpD_DQ5wC%+o2jmm?KP(J0W>?KHE&Dz+*jm?68Vsr4X$?gKC zW@>5vzpeG5tcXVZGl#@!$TCG3(Y_Oedf&gH9^Y?J@XP%8{~Fj`aU9vLJ{Ws8w)?+= zhHtnwSNJzZHQwo7cNM4y3jG5zs-nbr0hsu<@!h}nyI=&6#-am31KgtjjI`?426@6H zu0b%Bi#z^y=HJ3GFQ)7A);a?KIRK0Ljb$M8qFYT>&48aZ+nNx8v~wtfBeT4b(<3Ly zn{AtE)?}%vH}!!3y}-k$`oqbZmfCmR8TI{q8*1{JTVw85_S>`hRu|stPteXUS{PCm zk3*3+KlY)M{ukSp}yWi9I=X%3^_H=Gapr$2yk))J~xmd&{1UZ*Lr zMJn&!ch?%ZxdTGa*zlA=EU!rLfYI0EH_3$XPY%EW~8QAqBnUx8tB|L1YONpgEdD0oBF!!%meS^P+i?)g; ztyqK6$9ZERd>in8yC)@8Z>x`3dMPH{h-Iq4?H8`!3}aLi&3BGdaW@rvA}PAUQ_ueC zUE6MvxY9gnH-U$Z*RV|FKW6H`pPJ{Td4uTBvU&FRmsFDQNHq= zzjcHw*3=}KBbK6{I39)ZHJJS!qGUGVSEys>;Ox&|PZSOhocX8Q7hXH5o%5Sx_<`aMRerLm3DKCdnwB2`0#IoWk=s+ohbC1G0_rw z7Y~d5r;26GY>#5~&cRD{ADxAqX1mj!mw89B@B=q5o4gmAd9~^9!gw;(t$fXIru$yw zy2G2|@a-F)kL4^!rClxJu0J69>AY2&wMHzd(X@sqGz8<4v@UOcY0Dg2%>TU#mx~Rz zcyZabqbbeNfnC7Cy6RyA_89XBz zeSkowObtYNfz8Z_@w)5Dabi?=Psc561J&|yw%xxnode-C#&6}{gogqdNeJn)0#v8v z{TCgL9Dl##xcU1IPiCLYsRosgKzoML=L4n_4iP3Z96b(Q728vy8%7M-4YnftxZf^$JKm(!@! zh^q%=I+yy{0C~-*$JAfxKRAqH&pnS=U4kU-g7T>#J{I5d_Cx6QFE7GL4d=J1dAB6R zXBLUGt<$xSFm+>osmZ%mB}rSQn*IFrI2-6tJCkfBj)i2t&qmoj^KSJ>k7BH@UK1;r zK#FLsbiTbtt>0G$lQkbgz54Ls!{H-GE&x7&dQ|sYv30|wg|74v1#P z_8mJ&o%y(@=|ID^2yMkHes4<1K_|+-zjeee*Dl9uspr+JS2llee&TIKH!CM6r;j<- zlJDaZ0DIcr7QW00%y)cpp!KdQs&HpeZ~6W(AY|vr;_|U^8wE#uO8r$9o_ID5HjlO3 zRkg1SAteLI=yH9On)$Tr(Dtc%1hA|Qzso&C__6SbR9^{6$qe3K^IEkJUm_T6dNJLQR@dZl&1lpt>3*AuT(hZp9v0Mp%WdakzH9xp} z(eGof!7c@t%x6Arw$)Mdz5qwsKl8>UYlSPcX6R*vjrW*|V?R0zxP%mX ztgUH4Qq6ZO;9A!2##v9KB=llkLOy+kutU$oqA9pjO_C>5tx`3<{zyG$-MBi<8UNgY zx`Xx;T(-EQVAK7SI0lH%jAH$5IbU7T9$?#>&Du!z65qZgiF#crskk3qMT(vPbG!Cw z1If!+4qtd5;s5FNWJU7GPS&eSyNKz4B{t)>Nv>=JTCV)U4!^EK%pFCyhk$Op^#|+g zVV+&iu^E&7R)cL)*4mhnXB;i%TbJwbb}J8e{lLl>+OyjmH$PB!ZOdk$ZI^H=H|3TA zu{58LUi}rCSA)7DX5unV(_U(&zrMErR%&W0FGt`TA?(pBu$&)6jYv(YmQk+m(KH+z z8`C6V^X-rH)SlE6+^d}wH8*Mi?0BQA92YV-Y9ppW$#gLvtU1y`R*62;{=j0F{D(JV z&90#~`OBDBTZ-{GiLfkg4{T&7x@m`+kt7VeJ3&J0_Le;w`yTHuEavjq@kF4EU;&xU z1N=m&u4dm0LUI37;-$H1fEMLQ%s{DgI{0yIp|T*r^refg?9sH9UE91v`gMxHx?2}d zaTh{mO3oYXA-Y{gBUq(;@{Yy&`Zb|H9maBKr7i74GMCD~4U0bkS%XCJ!rt3=wIfwZ z{ksbFK|6}BRpksI!t2=1w!(pk72^HZ~~y(xrqPgrF+l-b8#tXtkBTwLlSiI!#R-ZJ#A-NH(d(2I~f1vJC8 zEGTE&!J7uadv1jD7aq%o0Jo`i)7<+CjFlpy81a(<8|~)dY`K3u*a$e-vjSU;PO%O% z)U5(SI*Im7*E?oRp)O&4AguM3dUg07V7g`#m?GiA`amuUm+6>Gh8K6q?VW0bb3JO` zS+$NJKBnu7Ys`C&a$Lv#5I-h5zMBCzJHyJrn(&A+o2ilEaq1NIR1YAp^z zUvCnXI|JJfLWL39s|rdWM{jxp5rJd73vxGzA0D_2%MY{@(VAnYUATbA&OMlj@F3j7 z?rQ|08??jJZLFL5JO!F5LwM?7nQ{O_q+A3j2`4O=hCh@ia6+v29IOYRBmgoY^zoMk zWk#djKP4n1Mik-p``*bpJmF!lCW<=>3Nv83w)$K!nE)&7u*G(dNDB7d(pY)S1PQ$p zZunnVy zE4O9I*Y*1+zlofwOw51i#{FMH$0i3)wG;&83ZDfmxyz?!v<)ve9A9cFnT;gj0 z_I|cBCi#pd6Ks4eP&sm`L(TgGvk^(ko*N~!Bd&p1v1=TwtS4EVP|=VC;TyfDe_{Kf zQ4nXoyJDTpzpEFT_2RvqX{Lk1e)4!k@y*Q=aj_(c#I%u2uREVjoB-3=I zfCIFZZR?@$8XAlN%1C^a6ME#@P%s1{5XC(U@bi=HiP_G#i__opY#*T2h|R)4p|Yz9 z3l0vpFLe2x{vCRIhnsJnAL~HP4S0@yc+fEWr6wi?pcymTaLe_Av*O60^It%8%L8!W zW2OO5W!QLfOAMQ(TtVe#j1z$r=>0DB2>xK@o?Le5g$*kd4D>|iaL;KP#{O?SU*QsH zp}DuWj!12t|Fvo3a(%pz9Y{;S3x9SM-9Z*s(-)e>%B$-;qd{gZ*DCE8%&m|Ovc3`@ zua=4`5g?dkwF}^`3BvDVCjTyl_U>msd9Kw_CG>@$e5Ys9t!Y|!hTAf%UxbZkfk(fU zkJ;hYkog}tzrLypNZN?!l#rO(`Cs!tWk$l-tm{`L^%Rc%q(C^wjG?;a`4E}aGn;qZ zhYiNbX{_B90$K-c-eL3R{mK+I8 zPhqT$nSuC)*}hA0MU{)K${+99p1F5r2il}69R*FiKU3f(jpxK~k+|_=2@J?KCP55w zvAky_B3&c+^f`guhHu`yi6pe`+v45i%CyN{farHCJw5&A#s}!cEZ{Xd zfOMJ%QbZpM@QRUFj+=nX2S9smPBflKdATB8kB6P840(Sbtty|5+g0>b5lY7)+-3M! zKwm=NB7aC`J(JW5J1U_;(RWsLaqq8+wCj;GxhQ-Oq`z(Fe{@Qojxh+UBh&)9rJ~vC zMcCs({<15g9GB8r?cU1W>H$er>eNBAyv3!9*NP4=u2=45R2N=9<%VUt!!2XPMu4ne zLa2j5mjE7pknyfl#pq7P(Qg-1$PECm;eyJk$Gv9;-+Tn>I&A!OT@8?>U=uoYj#;N3 zNVr$HzopG~Amtn$WdfYEy)2Q%yQYhu=tc&xumkCNWm z=G!2=&pek`dNc$UK?14Ge!{jq@R^aS3>^4u)2ap~OMzzi6pVyu-s*MaiL=O`xVddYHbxy;V0mb?^ zM{dS^h4saG15@MX!b0tnu~IfW)zV@)eY=)oQfBmE zQ+?O=A3G6?o+YrA(r6I-Apac7F$afo%%!uhfYV0xydyXfeeoQ^0|>p-Ql)E4F%lUz zr7ZxrkBV~_;I%spf$zcU6Wy|WCK>JOg7VNuRj8zuuS^nguHOzo`sS|ORPQ)G><#NIwF^O!@fCg z>GieHy02vTM1O73I1I-*6x8k~uB#(qXD+Lwlkgbde(3qHD4oiQY236BKrEL8QX~UY zS9l&i?H1ri^tL`={CR>x8$o$NrvkQE#_iX(g`AxKAoW#QUby`~pMq+Iz}p)Q#G4pu zp}i)|zF>3Z%n82C({OfZ+k3}FURIMK?s8r!H{2G5o^E`t$<#G{>ERdyOvGEFLu>qK2&59qK4VixRBa`tIR>@rZSJv$MUsy9$=l_;9XI{w; z7ngx7-;}ZDD+Zb~=F%?3haheE0T9JpuqELS?SZTG1;=VWK5}}Nl(UyGnc5)xYU5hN z6I#@9Qr~M5)mp+=@;dPg!-0Z(!@tYD)BNYyT^jto!U=#rMkCmbRd4)Nc6x07p zRt%#5Wy`XVG23rTLH#ut5jltZnzZ59oiBkqY6x29jN3mzi}@UE&)v&aPnURnBy15iB|JN;d`70(H;E`+>4B+3?&)U^Zw@e>Ey5%TT}S6qw2=yQ6R->nZW=ho#w(A-8k#`1WX}k^)?TbYf%>$U^8V zX`PEPHQm@**2qnoZWFytwL9d9Y{lu~6AkzfSa}6U^eSrGWy~*d6kt`r@dOO)rsw!UH>b z%XV+yzI|rie4%-*{hpDnj$3W)iTp{MY}0M{W%dGCB)X{E69kdXjUW;o83X;%Ipfik;w8;)c}@KWZ>ZI>$cO@-1MQ> ze}qhB!1UCuoSl>Bik8kvlcR%72G7+L7T5k87K~IVEMGDW0GxX0oLm`E+B)45%(`5= zf+!p88-;A^uz&sTcFi;}u^|d9YOQ@e45n>VgYwuq_Wyffe_nB<$Q4 z3E9;({emOjn;&a{b76;tTUN3e3fNggn~l8{v=J}An&2eyU&Q8H*K=H|boWNTOJ5z- z)#RxH6Y#C?FDrh!u}1SP(g;X77?5D(W+UI&X@cwxY=96Jkl`jC53LR8R>}KB{!(cI z#6=O`SwWYe)~N_>z4aOpS7&@jff_PI+Ea650A*T1ZnNeOrlalvJ>ZX~)Kfx|O87A? z6%|c+&?cTDwAR&DJK0=*l=L3%F+X!SQ<`b6}B1-q3mT4J2 zISwSp-=47@4V~zo%|0D{(_tjN+97M{^=@V#s;E$O*S(It=#Zw*KZQ@CeB;n#IiVZ{sBXiGtO2smfj zbwuplGK{5Fbj0)w%oO-uAchiipxl#9o8Z5L-TI<8m!E|$S)_iIy{dO+XQ4-NJdCTT zE^EnSyZ>DX{(n66%J`iD86fRB*|e)54(5J@>Gn1rNKcI +

+ Ozone Manager(OM)管理 Ozone 的命名空间。 当向 Ozone 写入数据时,你需要向 OM 请求一个块,OM 会返回一个块并记录下相关信息。当你想要读取那个文件时,你也需要先通过 OM 获取那个块的地址。 diff --git a/hadoop-hdds/docs/content/concept/StorageContainerManager.md b/hadoop-hdds/docs/content/concept/StorageContainerManager.md new file mode 100644 index 000000000000..68953ced24d3 --- /dev/null +++ b/hadoop-hdds/docs/content/concept/StorageContainerManager.md @@ -0,0 +1,102 @@ +--- +title: "Storage Container Manager" +date: "2017-09-14" +weight: 3 +menu: + main: + parent: Architecture +summary: Storage Container Manager or SCM is the core metadata service of Ozone. SCM provides a distributed block layer for Ozone. +--- + + +Storage Container Manager (SCM) is the leader node of the *block space management*. The main responsibility is to create and manage [containers]({{}}) which is the main replication unit of Ozone. + + +![Storage Container Manager](StorageContainerManager.png) + +## Main responsibilities + +Storage container manager provides multiple critical functions for the Ozone +cluster. SCM acts as the cluster manager, Certificate authority, Block +manager and the Replica manager. + +SCM is in charge of creating an Ozone cluster. When an SCM is booted up via `init` command, SCM creates the cluster identity and root certificates needed for the SCM certificate authority. SCM manages the life cycle of a data node in the cluster. + + 1. SCM is the block manager. SCM +allocates blocks and assigns them to data nodes. Clients +read and write these blocks directly. + + 2. SCM keeps track of all the block +replicas. If there is a loss of data node or a disk, SCM +detects it and instructs data nodes make copies of the +missing blocks to ensure high availability. + + 3. **SCM's Ceritificate authority** is in +charge of issuing identity certificates for each and every +service in the cluster. This certificate infrastructure makes +it easy to enable mTLS at network layer and the block +token infrastructure depends on this certificate infrastructure. + +## Main components + +For a detailed view of Storage Container Manager this section gives a quick overview about the provided network services and the stored persisted data. + +**Network services provided by Storage Container Manager:** + + * Pipelines: List/Delete/Activate/Deactivate + * pipelines are set of datanodes to form replication groups + * Raft groups are planned by SCM + * Containers: Create / List / Delete containers + * Admin related requests + * Safemode status/modification + * Replication manager start / stop + * CA authority service + * Required by other sever components + * Datanode HeartBeat protocol + * From Datanode to SCM (30 sec by default) + * Datanodes report the status of containers, node... + * SCM can add commands to the response + +Note: client doesn't connect directly to the SCM + +**Persisted state** + + +The following data is persisted in Storage Container Manager side in a specific RocksDB directory + + * Pipelines + * Replication group of servers. Maintained to find a group for new container/block allocations. + * Containers + * Containers are the replication units. Data is required to act in case of data under/over replicated. + * Deleted blocks + * Block data is deleted in the background. Need a list to follow the progress. + * Valid cert, Revoked certs + * Used by the internal Certificate Authority to authorize other Ozone services + +## Notable configuration + + +## Notable configuration + +key | default | description | +----|-------------|-------- +ozone.scm.container.size | 5GB | Default container size used by Ozone +ozone.scm.block.size | 256MB | The default size of a data block. +hdds.scm.safemode.min.datanode | 1 | Minimum number of datanodes to start the real work. +ozone.scm.http-address | 0.0.0.0:9876 | HTTP address of the SCM server +ozone.metadata.dirs | none | Directory to store persisted data (RocksDB). \ No newline at end of file diff --git a/hadoop-hdds/docs/content/concept/StorageContainerManager.png b/hadoop-hdds/docs/content/concept/StorageContainerManager.png new file mode 100644 index 0000000000000000000000000000000000000000..605c48c355f87728692a7e1bbd5a920760a010ba GIT binary patch literal 13336 zcmc(`2UL?w)Gi#lAk8C9LGY+ZlU@>#-g^lN0i+~A0-+_85FnHwM^F@zW~YftQwh>V zASj5S0*YurqzH;7fPw~)b|>ojzVH73ckf#NTK`>lErd6dnR#dLnc4f<&)%MLb+!=_ z*ed{mK!ogUE#VLd2L=2)yn`3Cq^oHhfI#?6Vy!%5BjR!Ra4bY#+v4|=yp~1?AtqK{ z+frUj%PJfj7#mH91dpISf)Iqq;jux#zthst(oogV1wRN4n7p=`ju!Z(siCH&1NHvh z9}|p?{4YF0Qz?3CYRYSygC|FPBq<*Jjfd&^>**+gCo>{39E-vR zIN)R1{k5TL+Is9E4z>sDU&r!P3u=*cMJ{_jsGt%V@r#T#RUJB)YIJ70~10t z_YaE3A#B3T;Xx#@2zH-1Y;+7BsOOim{(Uw3i`WDr_IGO#HVz*M)&aC*1(Y0w#{@@X zB0!f&3|QW;f$kPgzo*0hQLT%myPl)Dy?cmtgpFgMCOH!B9{PI*s0YzGP&bBb<*K3S z8tr9iVMY$LN{qFS&xH@6 zMq)HkVI(M2PXh%rvv49t#ROPH;NorF!r@NQ7EXbdNHZNB>u^g?Ems^l&e|nBNXIQv z-xF@`Zc7Rz#8|^!VIffn_b_LIwPmag%sVF7&K-`&YI-Bh-94O;G4=!(WLOj`D8M-~ zEEW!w8sz2`O0)=zFhj;`K;4~OQ10>CUI7u|;H!{03#2>PeQPb8wV7+Eg>?uhH1wp!s78-PO^YWxQAMWSYTYd>|AXEabAIL;1rvCIYrsUW5@ySxELol+lU0X zYcR$v!YkB8Pmg^}HQ1|3L|EH~lXaY9LL$QK-88-3Nq8qGcY>`|tUW%C=oId08|7i9 zrRiZ4uIq-k0;6mkJnS6oV(sD94ql%2o|pt}LWH|+3@*eeJ}eoG66A_UZ6BlHe=!^^wiq#Ks3JSsE9Roeg5?rB? zjyhhEj$~(;n;zJKa2-2uVzgs0(mv8jKR(O~i9v>A2;M;!m;@VbRFt(w0?rM*TsP4z zO4D7>PCHDO6bFqbMB3;k*o7d&y+a*!Vu+#6-oY?%3og91C5#k<)3p!xa=>HFNcv$B*l0Vb9WmU3NOFW} zg+`O26C;8=Jso05c0^=2Od}>y$2Bp~(w1N!p6F;FMT7?7<1r528hWw7u9}lI65^d< zz_yxWti3eiygk{rFHFlVKGfa@kHUvL2RUkLxCG%sW3^zB`rtd1E;KL{Zbz~K&$00& zXqdLX3sy74Bf!EvSWDB~k=^dCrR}L1z!g|)b26~B*5C&p9iHf9h9gHt z1X^G|%j)AVYpxAf~Cr?)oU9T`~P*i|-aBRG00vMGL832BVlGq*r78+|49f1u@ zK!kxdLR4rd(bCaQ-^zvL33ZOcYbKC9fN?bkmPs3U7i*ZFnI05;Hv47#fqm9yui#&^ z{mX`f&wqTSwv{%1oCJaFhuB$~A>#e!ixLVDsBgVHrEm=1H1zSiyRZF2{U>>tHXCp0 z^5F2_)GQ5*qzGjIa`2o6{gCMcYL@6Z`IGv0Pl@ihD|p&szv&KJ##zp+9XP2bn$VX; z6p0zdd$WQ-?o3&nSnpUY3HrDmIkM^T*5!50%DuM754TNr@QG15-IRuY7FxR%<>dcB z?T_iH_<7XpWTq8_Qa-~sr|U+=5DWx$a^Vip@RRE3T5M>S(y$nfgZCzW;bx~^wQCt@&t&=RPr4 z4+sr)zW;F=wLKS;KI}?Q;DJ!)bVu;kbiaxax}@S%87H+p^IaQyFO6&#bdkz=SP{aw zB6{O-GB@+!YvEvXjv6rt^R~cT;Asx#{8@DmKAr+`2($2pR`h-}A&4=hBS!S&pid+? zkuL9|y;j=$l}8{RtYWcjRqY}#(^quQ;ZiEU0EBheQYl)FWBqaC!%MDoWw45x2dAVP z;xc#Fq;uw~Ko~Z6iui-C-G%;b|8#`4`Q)niRVE1X!y^^HvN&1cR&M>gC2B@Cy~By--x@9dYW=};9aLB z)cqU@ott{|Z`&>uC$qfLlHu^bl9#VcUVnB4I`=vC>y%mIryCb4!5C`t(|RpKuGLph zzJ7lYQlfmWxY)U`ukZU*7nd^p`i^WV%>#1ZBaUxs4;mgMgr@Tf^-IpDaq+zvsX={8 z7cmxPxRe+dITst+$)#>+BC8R0k2Wb#;Jn9ZAJ!t1wAuQycu?!!n- zM(%*5K9+}Rvb_=W>B(d}WkpjcN6y6g$LEo9%F4p~2gO}m^(C8UjMrv&_dk7l9Xgqm z|LE*K2PGvXYI~ShC2j`|HOybWRRd|(*!U^^Bc+mAfA{XkK82F&J0;bI++B=*cw#5( zgIy9-T}PKwJHKTUhBD<+6i1Ts4?A6d!N>ez!Hcf-cWhuiuBFWt8=-C`<-cMT8#Uwn zDD%4bXR}5bf_u#UQ`Y3$x3|{U&?ZsuEbVP=OIIc+Il}u4EK5>;Dxqtq)p(h@f>{G< zYvQbw#J!m{v3I7{gzh+8P%FsKw{SRrq^x-&(P2#2#bh(qm{9_YI_{VB%_?-_zW#0R zM*Fd(O6HF@J=FFT&F5nWJ0DCC21irBKes8kd|A0L(Z@%o{p*9BOXHg{gGLH#mzuL5 zra~jfJ?|vlBYojzgxyQ$pg3`r*BnbZkz5(i`2G=F+4%DtDXFqQ$8BZpGROMy-4I$c zB@n`j>y@lgG3B7k(pF|3L71CTq>|b_G(P5L3%DyL)^F#+c$pj&lSpvaeh;O9hhq@h zO5n@n4&1L{691Qmi3n3lDl&dNDKYr&>lJuKFXD`kO2^>DV_L$T4%)wkbu^CoRSl-* z9r>o|HX$`$mPXcmj_yAS2q9d@zI}e@R2J6r!ge%~9BXNgr6ZJ zuPs}~&7V0S(y-IcHWRXb?k@}^vGH-54G`?v6Mp){{fh~oHT7C6mcjyM5=xyE3-)mU z>-oU+J2RK-ot^e-cH@!4C+U4$A6Ni`&=OYnHP> zp{Q8{$ks;P48tKk9kYF0%x9-BRmcYBQj(-IQE43gR|Mvo4!AW|aE929BbKXLaUwf!za>SUI%zNjl(YRUh#)NZ2(`CR|^ zi+x-JRJ_E2ipPBbJ_t8%xV)FP;+S|_5WGwvGf$h^p64j_kDqEZ1az4LUE(%;R%FGC zzT}1BMH5P~o&qERu!#SDk42#$Hm-1K2M=5?C=j!Ea45$-%y^&TYo3^BAg`d{9vZ5g zy;rxtlXDL(nm$3@|7rGsvO&?c>0))ZZ7}-Qy<1FD(&mPJ;WZ{Rd~N;1vn#QS6Gm%C zH8r0bWuleBc_8aF`Q;UUYJ2L8NgUHNajuB`aiHjE>w)gx-r>i&2l2qoZizoSSIEW6 z;I@~9a!`B^On1onyiNU4h4SRGeWIw#0r}vr~9?* zuYo<=t)gnk!))AV z)#_-es!kCu3IyF*)p@G@eXkmg{1npRwA(*d�s2WLQ6xb*vtERnv1F)_2gF zby}K^?4*$u@_a_QSPLQZC&cL!J4Z0uM|h-IJqCmeF+lj;uUdJ8tg$)$#-9&q5jit9 zUzRNzH09hs2Z@kTN06T%MlYUaR5IT_A5~GIwxjwV-nk2K#a)D(5I3_t&t*NIgEDWy z&Bs9@M?+|2o8a18V!V_TG1At?+F0rrg)x{PHpd|jeEkpa?@4&NivKnN{(g<>RTt3n z@zw2PeytTjk~CCa-?$mI{b(K6?|uoG$x63_X&Y^e9S$OL#$rRu3a8d6TO?x$rKo&{ zN}#rL9Z+`UV5po7`YM`{Z2v;cyD5AR+pzDKTdfY>^4mTwsUF&CK=)f0wVKcX7=|2u0%Ml$$j4|auSa>r z)-sXC+nfwap^@4s5A*FV0H@eeew?W1W48ATW-~ci3*20P8n<+W(Thl22#|-YAO)l` zpkS?5=78m^)v`F)1{%5`zs0+kdfekEqxM*{bFs_je*=B|dR&=Hfr!QX%V(1W764{i zD(^Shm=@`I{o;nh@GYcmD-h5cRGUcG&Zu!2yaQE}=Z#k($0F11U+Xy6d#x0Cr+N&3? zNI5R2J6;#a2h`&9s1x9o=!Mm9GguRzvp1ctz&lemgGBZnjeB~PYWz+~d0oT2Ir@G7 zcxmUx(vu~8ea~sh;Wva%CjcUD1eYZ1of*Blum4p4;D8qM3Y6fy{quEa$n4|X*p&s} zvHJ-z#*EZ&J(7sJ04rs10W6(kKfkm~d3D?Bw8X9=xdl6C6l;2H`h+3DvzAyuVzvLpG*)^f@A<5X~_oOd5~JZCwRUUR}9h=XVf_fq!nxPR!-!=Ep! z+zSc{EN32N=SRty=O*UL>JXF0~h<_)~hxtDVF4^52I#OM5>5`q(Wu3m{Y_7Go z$<4ED%=2@kHU){s%7v#)=wyn>Nlfa0TWj@U<-CRg=u_>}s^tWrRlIJ0-81zEj~e zwl7qcCmitd@&YJF&Wnq(&%~@oh&fnvRN++Dg%lsFS)tHdHaHMN$(%b&g7Wf?A)9uz$>}WqWlu9Sa0*>9gg?+(Iy-4`S+C$DJTr5 z^NCcFzPy6lMnydw>HJjKK2TCR!pka2-D*YfZQR-Z*tJREMNhcgxb0PMJ*Jqt`Nf}S z)nu*DrP3^&S10j%h<;Ii*6zZ$VQ@iuE-r7VHw!|b8`ZLU<;QPU)TCysDU^K zY^j~5*TsHa`NQPQlBqiyLFa9RcA z4!#Dmed}MDqxCQDOsl-L*?HrBzZP@t2m9tN**UAlW{Ai+`#459 zJmzH_UE}wzSWL!qQ9900miiRlYb{n)6er%gyEs~3asLq?Y8xPa$#Zx2CnYUXCd*}w z5+AFNE!PnyjqUa?+}399dD$5D`uuCoRQBC22tBbaEHke5?(^|{eIoHMVJQvM4+3#w z+?0;79eO5{CFqh>iq7hky~)?+H#G9S?D9Os!W2?n<>o`<+RK7ltob)j)UQOj^{g;D zmI`ZUn>=^If%)w?{$6JLq1@K~+eQ8x?<8MzG;*+)4`lB}0H&TK}_zrU|$ywI8#e2?gKk7*rdXR|ysP%p4(K|k^2MCs_ z;F`ru`otUhbI@wXx2CT+=NM<#C$FWIZg4*{*mf6tmS}8oI+clOVi20_iLxI=p|L9uX~9RPty#^uxy5GG}7S_!+`23B>5K z&@J?`Brhcr`pw%Q_M@otk%O|Q`;7xWJurA4#E2mjy-`m}JyT0;eYRG#n3S3FHC^s8 zm+*M%T$A!-#XF<^_4LX8E$#KBFCO?e+~~p0l$4iwD>93^9XU8@CRgWsnPWc z+RX4NuXcz{4!u99&TL&AAQk}U&2kU^X^4a=z;01)?Rd0?fN zV`g&YkF`xw1+Z0|b3`4~B@vp4jT=`l+wBVdcDt}mes8d!^&W+5kaeP%F7XI@(Rm0$ zA;+@%vxX-VtH8il)mn0BxwF@J&jKgTzZamI0Y9&(pX*HIl+1uP@&gz1q1#J(9q`a* ze*lQ{HoQ>a>O-%Z{v>KLENd@J0{HZh-#)#kX0jByd0qAYPw#}D&=4;d}J1i z*BYDcWKhJTxaR+h^bnSxl1N)WU}E~=R=T3J9DCz=7hYzg$SxUf=G#6HJMAlKzY7q| z4MV0fh#*uP#79i&#>#6ACZl^9VYNCkXYA7gikhFSrOEl8#A%I4TR&%LeDldL5^fXU zu>mibk3sNjxJ<7{k+GDtEMmY0W0k z7}Q5x-cQ?CNe(>B`JDS;IL|w3`=-7mOW>{J3tCbAEH`~3&s5^BAaupe5lu)vNi$av zb9FLS2*(I3qH8#-*B|#qwMNJsv+B8b6=|xwcC|5Uw3O;AP`PkWp_hXd_ulm>s{C@2 zCYhVJl-l)gGq1uK$vZWNOC&qG4vT46af~F-&cf&u1ITo(6;|Nlss1 znD$yr{5#2BckR0`hhaMBgi7hXoU1Z?42m4N+z>3KT!JUKjQZr?snz&KV`HlOf`1xJ zn~0sf@q2~Tfpr>Hr2O44;rvK`R+C9`~1@zPxyvoTF`|lfHtM1fUvC&=A@#h^+=PXwKS5JGR zm(PGX5>(QAUFk+Vh?U&@83<+NUDyfvk=goyI!|t~lezy&e&#s-O9Rl;m!EZNic|mm ztEb($%g<$iLJr!|5Q?Hr|Mq5JmR9&dGog@{sOZ}Ar`hPP))5{=zWOaYO(}s@$A4EH zY$q`vjm%f~J_?f)VBGqnJ&-5sK}S^gNpcN)yLhd)wKuXv2CQo7lX~jeDS6uif224n zVVvrC{c_Tl>r84*5aTF6SLdgS^~9pHwSV@8avuMoByfZ+ub3!H^nLkLd*BAOdQom!TyWei;8xU7Dw*5NB)Z~|#<=5Om9ChSL2&L_yFyqZW z|66w;AT&Kb%n2x>w%-&H7qc+tf1-4sD^mMq3zK!$0zbD>+pBWZCwGC{%f;HIkkh5W z$>QY`xnpw^f)-)eoY%*d@G!3IGl`c=BXGQLsU zh1|OLK7d0274y(u%7K@Qb)NgLC$1=V!$^HkaW1vpsIJ>yj6HmnZ)T z_z<1Ymx_A<=N35hT8I@qSS*e%F-a{xuX@ZnU%};Owloy}<<;$-PF2oD2G1FFfmI-C z&~!1PZ((e+ktJe*Mt%^c-;&579Smlod5V^qvAtgaT@3Ghy<4)TK+85RIY8(3h3F z-ri4e4=*uHfZw^Q=F?(|?Jp=Btq*=8eoBo`84wQ~CO+=;T3T=#h-7R58+?uQiw%Y{ ze`>3yR4ETV;zzL3x?yolsgYWL|EYWU;ll|t2E-}qt+Y2seV(m+e4Y-tV1=*m``M(r zS#O;L7oYHTCHI>{tub?zV70MpOW1qy-wx=83{e5NvjJG~=GyYy@PiXO@rx7fI^+c( z?~WvWY*5e;P;Vvn$$8G$rrs$z!+4D!i3sr0`sQKn?YlXKS|Bznh3nH(or^^qDYaz$8 z0~v&g`@bU27(}J3*@R(J#2f3dSGURl!hO;iqdTZdQ$gK1Sp-!11i4FYgEzmhP=00i z=U2BKK*Ujg&FXZ2n9~)#8+){IuqBJXoo5GkyO_9KaNZXkpnNI4_U5XS`pYtJpQ7{#*5kuChZ2ABtUa?T#@jFz#v_@yf=LuD39^78U>%st&*kLn~S zEN1TKq8$3<_yhfv4VbFe-9u0z`wMDLnZo<7{cVxqTI<(d3Hbe`z0Fz9S79Kg5Xw_I zbsu_S%_*an8<4~|tqk;iM{CsV+0K)TiA46o+)U-G!V9t}ET2 zCPaMy_#FPt<+6_5M9}Is$p}L6J;>D=aFgnfA$%T&f%v5Y+zI~289{2B^_SviMkPQ z?-jP(?l%6q*cv_95jFFObGz7xVjmXFpkZDtiZomfI6suNn~RlYj=5+g@YP*nqdQ|# z*uA8FPWtM&LNFDIe_{CmyfX6h+qUNMjoB;pN!ksB$&LmPQyIj6Ej2962b4QRSnq7F z;%Ul08uVz&S7T0| zjj?(Wd2zIy{onQee9UKOE9l+(tfhjkbvQJY875GA*9N=jeHS0JgZ5fc zirBM{X9$o*Z~JuGZ$VgzJzNtlrEI(1b`Cdo3BqjL$uE=D_{(m8kr|Fkq5Jh#(km`1 zQ;UHmALbs8ji&nz)9I>;Q%#)ldwU~#-Zlbmss?%sUkXvCAXs~^mwi!#u6ESkM6>}2fJA#953}YGz37+ZoNjsrbdrD}lOT1ZlWb>w$u!+M*kC#X4K+t#o zi0(D)*TJP}+A#nZ7ghsE+enDf`U7_kT61a*gvIKWQu&ATZ{Qa!?1MHS^odo`01jm! zC(4aKu#~DTP}$^!d{h>^=;)ZFecB$rW-WL`OrYD7VC~CWJYubn@ zhax(cQ(2UcsVdOo>|loVC+ftzzoQ*X1B}VN4paIBsSl%3N)26FbJF}6K<;kOs;%}d zj0%}y6qTQ?b(XJ^frirm8S0_#x0=3&`i5+*0f;r{0LMmrHK>h{G*L#cT-IL%R$J|K zw?fGLSzV76f3SBV6ZI?CDo%ID+WZ337#@L;TF9OF1`#)eFel5cFEif>0Vy<6rtDtU z^Dg074-xNPd!$4akf1@;@cW2TG_t9sBj!ANiwPc&3}!&saOUjAp8agMI#C)+dWxh^ zXew#uDhZSZi!iR_)j0p;XB@3M{NXq!uL@N~Ha)#!$^c!v|E~h zH2M$lxOQ=}ByQ9%!G9%9h9rK?dfrDkcpnH`LNT|*L1aK}7e6KQ;Q(Y^gXbY}S^R6* zV9JN5mvqo{h`poZ5sj1g?(^Vf%-GkngiBFK`vMtL0MV?kJV+eCczHC~EuxmN07BDDit~;*Zb#Nv(JNnXZU<#BF}85f2#m2I92&SQ}aWS<%r8=hgk4LEb2K z=8;h7=-nHTqIu0#J`q{R&GnTFF~GPwR=AW%6p>&M%D!t5IUgi7KLt>8aE_%eahdIB zne*EdjbSM7dYqGSd#lMrU7$@B5paos+pF?vjgI~FLVivC;dzx1W2rIFcA?);ota?N z^+=NmB}_O9N$VBpD^bh5MY+*(`dQMP>-#gJ0I5)ISeb$-KpCMg^721%>523*U7fxf<6mOV&oRQh+O zAj++?d(r-k7Bp?LE-=&QeK><=wshi+wIJor*r;%GB%uA&YO`gHyp>Zr4$P_uY^C~& z(EVONrmHJX(Kv4!XUA;*Bmr7Hmzp#;Q1rrOF7#5_6Bmi7o@da2Hu9nx)TdQd8)W-G z1LRURbG)m=^3-3P3t)d8MofYSWLhy#n$|vD0lPB{pw6i`_DrtrVwDx9G&J(aS2ms4 z5C;hI!<$>CHGR{>45NK$xuzf(hrq=uYSkd1t(+I~wdq%M@CV)jXm}-YWkD5cvi9M~ zq~QFUW_4BHdj-O>hKerluiv~WKuyn{`Nq>2CG`~`wt~=2kYPjI_SQ|)o20*gB{WYC%XBd(6)-q{O`ih!X|EL|zIMy=3_apt`r7N>ehOiE zpWqrWi|3HX+Aw=)WD6{V%E=2TC27&?iasE5B`Q*Jxe77{3z@lisr7k+5W}WI zZa~kK13{yI7zf%3_oWH4%+xl33mQ;Dw$iLG<&xUYuDpb?t*aiI&1!&>h@GtWTySD zj1u{ms-a(dLwHt$*NBYUAmK_*sxwe5y$%7%?)>NJpzi1)-yBlhRJwwA140>b)FR8E z36adH1Sx^a@4OE$3R{#x#E7OGg~_Li+qulw@dAa{m-1yjRm6tOKqEini>Ok1lBslW zDY^?e3ADC;_@$>JsB=mIHAgEk-|ONOAv{Ut5cJ{|*S2QYhN$u3z2{4*)gUie4VS98 zMYV@3x!|6gzT(MzD`1$k>iUMKn&~iJI--}9XmB$Rk(elZthUr4{Kt>PxVJMk@8ket zk=TQii@186>*n22!JWF}&&sI_-;m!#Xg2m19DHJ~qv66AyToLbsD$?Kw(%EFaNbjb zFuq9a3f|;k7LDw#-&6W)Go{YmJlnQ|aa2v|@Jruco4KIBcjq=_-Lo=FzY(mSkJ;!v zB&E;C=(Y89tf}P;nSro=;)0omT+F}Q@vr6>;x_F}@3RHOfF6`*NTxQB4FqPH2&;a?5a;qU8 z*2==$@^?L_-ObI9uliLZlt6k&-07kQJFJq3Hu{s!b1hUxnxD;%qyJ~#?|4W>*4i2< zP?FE!cnc7y&7z(&KEMsntK2IT zm6T3B%9d96;Bm{-EPQd{Z5yHrE)Pt9v5)F_NAj^pSR4*MQjM$v^;)sZADrrvmuBtu zYM>#$EyOh7>MZJm2hXeg3;2^tGVknt4 zYal1$+Mf7>yTfM`q*v8Gwy>+k{?GW&o2#qwJv+MNq|kE5k~P~hz}X1&kXD$Q|KGXE zHG%yMSjHv@YpX!9gO<$+0rA;qfxu5T;GXUE?o4UlIEq@9cQ}9j-{^Pv> z{r!3C{2=9ZBkc7Zx98Wa-Zz4b?29vdv@1az?9JrNXdmG7FZy;R1j9E5Gq4fuz?Vas zVJ*+jfl|9tmwW$B;2YdIcvas5WD9P9#AOmKSd>q?mj>##auAD2+*=U4CC16Kx{=~Vps0#hXzwA^UkL<(r9GD#`Mod;Y^|)?QSadadUS zGQ{6&*D?+hk|M2UPjuX&frPaI0GGC=9WisxpDJP+pANl;s@-W>vknJ$?|p_!GZtvk zdY?h8Vyuq_c6AB>?dJT1Y4IRb#WD59{u zPvLSw>5o-cH@wsoBXDs^NBEkQG$1t@AG5s5sBxg>YJ1!RuXmzog5CZ)K}FHqoGB^x zO5XF1Fge=qVo-J%ym}3ekNKEc#8PyqtDh5^$6a3kwtUYI3>9!!4Ir#70qYZ?h^Er!vyTaz)OWGaWk~MRB=mAo2hHYE{k6OW_Olg}rJz~az1tYOlXJmkb z-J=H+;<*Y~PO_hS5ssTl%HmB*YvN6!-v zi_z>kGB`jAggy*om++-?VwC76PclL31=hE@GHobs6~yi#@IQVac2>@ocg)ep{|7M^sr3K= literal 0 HcmV?d00001 diff --git a/hadoop-hdds/docs/content/concept/Hdds.zh.md b/hadoop-hdds/docs/content/concept/StorageContainerManager.zh.md similarity index 93% rename from hadoop-hdds/docs/content/concept/Hdds.zh.md rename to hadoop-hdds/docs/content/concept/StorageContainerManager.zh.md index d53090646cc0..da29869808c7 100644 --- a/hadoop-hdds/docs/content/concept/Hdds.zh.md +++ b/hadoop-hdds/docs/content/concept/StorageContainerManager.zh.md @@ -21,6 +21,12 @@ summary: Storage Container Manager(SCM)是 Ozone 的核心元数据服务 limitations under the License. --> +
+ +注意:本页面翻译的信息可能滞后,最新的信息请参看英文版的相关页面。 + +
+ SCM 为 Ozone 集群提供了多种重要功能,包括:集群管理、证书管理、块管理和副本管理等。 {{}} diff --git a/hadoop-hdds/docs/content/concept/_index.md b/hadoop-hdds/docs/content/concept/_index.md index 8f0aeb07c965..1441b00f2115 100644 --- a/hadoop-hdds/docs/content/concept/_index.md +++ b/hadoop-hdds/docs/content/concept/_index.md @@ -1,8 +1,8 @@ --- -title: Concepts +title: "Architecture" date: "2017-10-10" menu: main -weight: 6 +weight: 3 --- diff --git a/hadoop-hdds/docs/content/design/ec.md b/hadoop-hdds/docs/content/design/ec.md new file mode 100644 index 000000000000..415796d57597 --- /dev/null +++ b/hadoop-hdds/docs/content/design/ec.md @@ -0,0 +1,39 @@ +--- +title: Erasure Coding in Ozone +summary: Use Erasure Coding algorithm for efficient storage +date: 2020-06-30 +jira: HDDS-3816 +status: draft +author: Uma Maheswara Rao Gangumalla, Marton Elek, Stephen O'Donnell +--- + + +# Abstract + + Support Erasure Coding for read and write pipeline of Ozone. + +# Status + + The design doc describes two main methods to implement EC: + + * Container level, async Erasure Coding, to encode closed containers in the background + * Block level, striped Erasure Coding + + Second option can work only with new, dedicated write-path. Details of possible implementation will be included in the next version. + +# Link + + https://issues.apache.org/jira/secure/attachment/13006245/Erasure%20Coding%20in%20Apache%20Hadoop%20Ozone.pdf + diff --git a/hadoop-hdds/docs/content/design/namespace-support.md b/hadoop-hdds/docs/content/design/namespace-support.md index 0317b46b29a0..5dbd289e9d76 100644 --- a/hadoop-hdds/docs/content/design/namespace-support.md +++ b/hadoop-hdds/docs/content/design/namespace-support.md @@ -1,9 +1,9 @@ --- -title: Ozone Manager HA -summary: Support HA for Ozone Manager with the help of RATIS +title: Ozone FS namespace +summary: Use additional prefix table for indexed data retrieval date: 2020-01-20 jira: HDDS-2939 -status: accepted +status: implementing author: Supratim Deka, Anu Engineer --- +# Abstract + +Proposal suggest to introduce a new storage-class abstraction which can be used to define different replication strategies (factor, type, ...) for different bucket/keys. + +# Link + +https://hackmd.io/4kxufJBOQNaKn7PKFK_6OQ?view diff --git a/hadoop-hdds/docs/content/design/topology.md b/hadoop-hdds/docs/content/design/topology.md new file mode 100644 index 000000000000..edd5a90662ea --- /dev/null +++ b/hadoop-hdds/docs/content/design/topology.md @@ -0,0 +1,29 @@ +--- +title: Topology-awareness +summary: Placement policy to use rack information for read and write +date: 2018-11-16 +jira: HDDS-698 +status: implemented +author: junping, xiaoyu, junjie, jitendra, anu, nicholas +--- + + +# Abstract + + Adjust read/write path to consider rack information for proper data placement. + +# Link + + * https://docs.google.com/document/d/1HsZqlBcEmlezU6HriUaIOFE9SFdcBoaiz15Qt_ng0P8/edit \ No newline at end of file diff --git a/hadoop-hdds/docs/content/design/ozone-volume-management.md b/hadoop-hdds/docs/content/design/volume-management.md similarity index 100% rename from hadoop-hdds/docs/content/design/ozone-volume-management.md rename to hadoop-hdds/docs/content/design/volume-management.md diff --git a/hadoop-hdds/docs/content/feature/GDPR.md b/hadoop-hdds/docs/content/feature/GDPR.md new file mode 100644 index 000000000000..47424844d946 --- /dev/null +++ b/hadoop-hdds/docs/content/feature/GDPR.md @@ -0,0 +1,80 @@ +--- +title: "GDPR in Ozone" +date: "2019-September-17" +weight: 4 +summary: GDPR in Ozone +icon: user +menu: + main: + parent: Features +summary: Support to implement the "Right to be Forgotten" requirement of GDPR +--- + +--- + + +The General Data Protection Regulation (GDPR) is a law that governs how personal data should be handled. +This is an European Union law, but due to the nature of software oftentimes spills into other geographies. + +**Ozone supports GDPR's Right to Erasure(Right to be Forgotten) feature** + +When GDPR support is enabled all the keys are encrypt, by default. The encryption key is stored on the metadata server and used to encrypt the data for each of the requests. + +In case of a key deletion, Ozone deletes the metadata immediately but the binary data is deleted at the background in an async way. With GDPR support enabled, the encryption key is deleted immediately and as is, the data won't be possible to read any more even if the related binary (blocks or containers) are not yet deleted by the background process). + +Once you create a GDPR compliant bucket, any key created in that bucket will +automatically be GDPR compliant. + +Enabling GDPR compliance in Ozone is very straight forward. During bucket +creation, you can specify `--enforcegdpr=true` or `-g=true` and this will +ensure the bucket is GDPR compliant. Thus, any key created under this bucket +will automatically be GDPR compliant. + +GDPR can only be enabled on a new bucket. For existing buckets, you would +have to create a new GDPR compliant bucket and copy data from old bucket into + new bucket to take advantage of GDPR. + +Example to create a GDPR compliant bucket: + +```shell +ozone sh bucket create --enforcegdpr=true /hive/jan + +ozone sh bucket create -g=true /hive/jan +``` + +If you want to create an ordinary bucket then you can skip `--enforcegdpr` +and `-g` flags. + +## References + + * [Design doc]({{< ref "design/gdpr.md" >}}) diff --git a/hadoop-hdds/docs/content/gdpr/GDPR in Ozone.zh.md b/hadoop-hdds/docs/content/feature/GDPR.zh.md similarity index 91% rename from hadoop-hdds/docs/content/gdpr/GDPR in Ozone.zh.md rename to hadoop-hdds/docs/content/feature/GDPR.zh.md index e44957f537be..af0684dcfe08 100644 --- a/hadoop-hdds/docs/content/gdpr/GDPR in Ozone.zh.md +++ b/hadoop-hdds/docs/content/feature/GDPR.zh.md @@ -22,6 +22,11 @@ icon: user limitations under the License. --> +
+ +注意:本页面翻译的信息可能滞后,最新的信息请参看英文版的相关页面。 + +
在 Ozone 中遵守 GDPR 规范非常简单,只需要在创建桶时指定 `--enforcegdpr=true` 或 `-g=true` 参数,这样创建出的桶都是符合 GDPR 规范的,当然,在桶中创建的键也都自动符合。 diff --git a/hadoop-hdds/docs/content/feature/HA-OM-doublebuffer.png b/hadoop-hdds/docs/content/feature/HA-OM-doublebuffer.png new file mode 100644 index 0000000000000000000000000000000000000000..a71adce40a6355625fdf9b9fe136e71197839437 GIT binary patch literal 77661 zcmeFYcUV-{yDzSYiqXWbF-Ao!SaEuZjoCAMdhatTWP0zjr=i%;M2!(!Ozaw!Sc#&b zprFQry^ErVQS21O3aI?n#`yi7`@84d`_FmqU*~yzV6*qyYrpGVZ~45RcR5ciWPSO? zH(#`B)#^(Qn;~u0s!e^XR&iMg?ZBN)?$+W~t&)ZWVRg_IG25J`RzvXgPgg^57^^1` z9D-*I!Qo)1$q@8=+~5-2cX^C9v(04u^bCi=VMb%fqj3~D7Bd7-!%)BvEItK8rfNTp z*I7*NeU&bgSRv0)vb$;7__!zocQ&4C5v{DY%5@V97Tnk6y#A)dv zmW%-;L&td*;?|Oko0-tUZ&^ve-sNHDOS^PQ| zcx?MevkaR@LZ>UJA*oI1bg6_yDqi|&0=EtpR9aC; zZ^P=y7zf?VXBf-^*haTgY;24Il7n{ySeiRbz{u4K1FA1VBx={W5r)UjXEQt$j7Le3 zvni0=DJ2-CN|Hc?QD|WclM9D+QVx|VFiSXLj4lk>f-b7s#bfz^0741O=Rq`+SW4$& zdEkas9_EUaG92FIrt!lVHBAj}Vm-zX(L&ewd1kuQOJhedelHtCBEaBTSYiYEVX{a> zmx0c)D9sj!-$cflMZnUS44OcL=L#%foSp0hKdJa2mVz1vKjb$@^&+CjNEPCBY{(D} zahU!HtN_oXI;$S?QI!;ioelmp_&Gs>0P--+h=i)M2&lXu7KZo)7mY3!E3tB#j*hX= z!*-RE?zWh3VLXgy|;)GtF2oy6ycfBI1Tg zAwJG5CQ&GuaD*Z@JHimdVp3Un1UFF+J`zSSumr=f(7<(+XtBAy;0odrS+GLRq_Yem z1&fU{D15LcYSNf+Ts`7K$bK_}$;3KrHlo#r_<1fC$II5UVOUIOiJeM<+9A_GEE}Y; zLPjN3=I1(0K2gZP;YwpxHjysj>y0K2tduw~0fm$wquR-4F+E7tSPUK`A49-XHD)1p%MmXx0VtgE`TIO=cY+^IcOA|vTddS9Sx=@>w<6yTLxSH5uu-ah?Cddf5 zuuLA!NA*Nul2@o#1g&fXQ7UIiD0+1$8o(%GI*eQ3r+Xp+j8?65YUCy{#PRXGRESBC zL_!cfMDq(_FQkmg1a1M};MYPJ2q!W5oQQ*}a)voRpT-~cglzDi-A7 zd47Zsn_;@#i)Cxnd_CSNwb`N$2rJhzp@2(f^dnwiCrpio%l3QhFcYt&IdEhmfvO1M zy*4{OYUC?|0uftAz{{fs2?^|$iimi_M48K=qWbw9GUOnOB21GHWLJnx*61-rF5M)g zNXSZ9&x<+z9Dz`(hUgZ*S)(C%IYM{PCvf{@UPF`?Q1d9lh$^D+dqQEI1aJ4_-4r`7 z3WA-E9K!hQG%AGAap?|&ANVSS5Oq;pkgoIll{hg-IVOvr$iy)CqyQ^Qk7BqXNrWO1 zamZAD*hO|pXdx3V2l^;QMd zW{+w(LK2_4covR z5X&0TMc6WGP$^Pd=piF6=+FjPd;!(MB%)jE#*l1ozK2O*1w14|$e;>4wJZcNDx7pT zE9BDzYz#F;iwCcR!GchXV2Em?2Ci8cGx0p$h%2VW*!_Ap*6WE95w^pN$7v8zkR9~F zVLjDh6ojB4OJ}f)y;ir=qd|?{onpMrfz@+jdLPwmpoYLpUagl%c6hNu zxk2FbYGG$g#pQbuV@$`i5qNTrji&WErEDQj<)9NRdJZ-~QHsbewO3AO>vTGz2J3~e zT8pf#SEz!bdp#WHE9Jw)XGgTE+`jqnKnEVk;zFkorcTQ2L%$p7#|Z_gDx+g z;gRrtVwI7G(K^EsiPaCYIouecikT!@utt+#D7N@Leu~@U*BkAyN*~dXgQlPzR=`@O z+^;}9ZVexHdpu$+UF_z{=mD2OCJQ?qG`bEege-Qfk!$kJfnn3}9brt^!A2;0 ziiT-4sfhvuQSayay?Tlo7KaT{yq0MrYhz*y+lrHV19ES~Z4Y>`dM6bq#EfbM08Q}o zGuHh70ndNKO1j4jA{)j7Vi@M%5cA*AG#c>)>5xh$))L5U3Bp&Y#p2H()T=d+1Oya> z`U4({OT-CdNkY3es@IC-8m!E$B{?v#fi45EL1;6nV-y*k6_q&bRy!doQ${3I7GLLZ zQjG>RLngF4eE=du8ZE?D6JfD~$U?xMK13=&p`I5(Gz4Y{Yj?7hLW2Y=qO;in3I^7P zooWh<=h>n@r;{uY*#tzI#mS`+$Po_J$x=%|9s}^x5=1Bhx|1zc#}qIJGVqj8ga_t@ z+~$CTV3i9EA)}ESV~9900>(o`un_>=Y5JHWB9T&r%%}|~GuSm@t<+;zpcd-%K~{k) z1Z$80g`y_n#bgGWAHWkb_ys~1HKbSZgBXI|>Y|2VqJrwNYq&OPn9RZHf_glM?jnZ7 zM1hzh#xh_bjleb&JOl}opfk8h7@dQ~RcRR!r{1WN8TdYtCd>;MOuPtQrd4q;HlB>@ zmpL)UAlU{x1q7|dWzbrsI15K=aq@K-jZbcu_-$OS%L<8@K1d=|F(W}(!1pm!F&s;) zx3JV^FGfmqhH-kPF)UQ6q;8tltl@a95?P2OWtem%8$xi?BL=Pl&)^ZDXcWUV00@K& za*Qz(K=)h)PA_zZEIb&`<_6sahD0jipdeGJ7MrA8E6jr>TA6@=!wLn6oEwO!BMcs5 zw1zk`reE(DS-3n4pRH8{)F3i38APl}XvR7j0&U1;=Bte&C8|bjNMzwD>0Ghd&Jl@- zA-aHOHFG6qEyd!XMC~#bT}O+EjYcR6LM~btP^Ao*8ilnCHq2yj9U5S%5Zf0svpDD# zUgHjtC?c1;R^pN0v_ ztTdiIv;FQcT80kksR)b4Jr_03xicRM>fO~jrK&B4_ zv3AH|W>eXDTG$Z8fpNxf^6p#=n;WKMzJwP46cAI zhVa^uKZwQaNg9qz=7aoVY81o|iA+im@Kh2tmI=FtG;qe(h2UXC498;BqDhCZvPU>lw-qO}Q2^><%0&*E+(7l3c}}C1>BsWT zPOSXN9@&^h! zc^nZn!llbeBCpBKQ7JSmq7Dn{E4C>hR-v-kcvg-B;25Tm&IfzOkh9H7f7s?jymFaF z-1)&K{Y8r#96U*5$t`Tn&dK3`?2y!eFHfoC*uxzVYL3RspZkGWVeHf*p(0~62 zXz`iTawx4vjhT$0(ENTG!Rd^uSs*v!0L=I|_-Fwf;1)`Zp_~>VkVq^s2CFs%i9T9@ zgAoJ%1dDZJc}};_EQ~6hUYUj_r(0Qceh5#JIlNlFCt&3BNhUwmVz$!pF&aMPiW)I| zJuQUwm>gUS0dSd2Hs9jaTM?}YSRq6QdE810;*gj*7L_q0Fx$p~*fgiXprmP7VYxsi zQDET!1!C$G zWJN?cHG?PdP}JBcNr#v5tQNb75z&erA-^P|74qm%6f5CUEpiSL)Cs+8j>H(U2Lc4N zUiaW>%qTM$HAu~TpN{KxB0*2k0Vq?5Le!c8d=V%yh|VSCgcNcHUm)X?uws}e^Vo3$ zo+Vy2pGRq}AoR%yS2B{$a zq5y_NbVuk`9n))-$6{i-fg||`KqV{ zA*cj;T7Zf7&>RjS4a0N=v^WpXp?4AVa;Jy})9rT18GyX{h*u;d@a2G&@#-xmgVnF~ zidYz~+@cE`t!k>2O^G^%e6dGv5rzG9u>=!w5ed9V#7xzQc?wue;__@UJek4}Xe>k} zfueL+*@Tc(X@zVqZ@}fH3#1x^g5j7D1se52e7e?!0T)z>Ruwhii2!2b$X0?*Kw)|X zpjrY&CCAE9k|CBCV(A1Ls+6Yo$b2CNU12pj9buwd!z4I~B8nlTVUfbju+|h~ais*l zk3x~qd0~!S8Wxc_YIOu5V>LvZ-KlcR1Wc*X$K!FmB$by)beqv?)X0vS#b_d=L}6Zx zN&p+cRAKo-Kg|ova3PG5jT4HM7_NuvC+MY!0PE9Ql@N-9^%}7hCzlCyJ`r9khjbdD zAWZgym>srz^<1BXEjJS63Vy(DKumrIB4CRI;Rsh~47zP9b;t;yr2?;uDT#US$K zKDS6Oh%)`|Xi&!x$ce!H#CA8R>>#HY%La0AFhQd?idRJ;vRW^&2ANtfh67Tw)Xm2R zEl5hQ2OoN98JU zDvOP1$AbYLipB&w7J{(GBA0T|`Q=QGS%3#Lgp$HwOXj5*g*1NGD!w8HAo~dLS(Ev zhBq+1L9rkT*ai~^62+`0lw{XHngE-?HX1c1b`aue2%M-GLwCsNFwMsW2otjSMSwU0 z)sIKZS4Wk63XzE>+n556z+rwQD&g=LE}l*q6B;NArZWb(kVpsx+X}Oe&ryj1b%Qc( zLNO;yVOo?SKgEX=*u^-SC_;u9dXI?}quaIYAdpuFwj1K&taPq`0uV~{163_CqV~85PM%i_P?8IDiJWo}Ui?C`l_F$QAv4EJ4yXa~ zimC!JBcPmtG#(Wz?xeA~AtbB~n=N5kwv$>1`}FghP~JSVOl55JZWJ^`baF6PA~UaK}r_DQHztmGdEpbhB_ z-ar`U=*$tXImn?o!XZc0pdgupLKfDAa|M)Io>0Z%_?-$M9cXJo9E(K&2ON%|6sHZF zb%0i7qgf1KO9(?oMQ6}qoID0X6zX7>m_2OKOJru9UC2<7g>#t4~=0t_!~WUv7Ob|S{2lzutG}@tLR#x zUqVM?6NQEI6LeS(KVWmw6c{0as1I8?RtgVql&Y{8rj&`3T9jTR$^+6t+b)7Qco-3y zXnj0a(8M*8{9H(;Ht;Y~*y^!`jZqF=#|$#DSelfopfd4fP<8uQHe4*A^r(b-sgWwd z$Pt6hF6QAtm&a|Vm~1$$iK3N9tOh$#PvvU}b_$r^W)l*4R)2&NVzI=`m|iT77bdW^J(gX}DEey~>lHu_ESQ;?e#3Co!v4Ytm9|k9;^C?PxSQ?~|{3ZoW ztl<(kcAs7mQ$#dEfDj2X&@zBb92OGugNO*pG!B8* zWW%!6dR>?g@->f(*xYzCU+*OQu?W}TDw!7#sC_Y!YV|4*SJ3EGu^|vc zIanAbBN_~a#B6X}^K`c1bT&3mx_ImA({F!0*~!;U zz|O)gKg(t3Ft*G;y=>S9%D05SjTCF@MEYRa!|dqw=Bd};7OiW36M6IUcE-eKciz5A z%PosiuN9U(FWz;i_O+%=`)pU<`;t@aE;CxiC$?&xkeV15*Id{9VWQ*C!Du{DHYENs zyk3)M&j0?%tROP~PHL;TwjHk0O7Hjk{00L~g+Bgj_?b3?VQTeM(6)6ZYb$2Zni;8* z_%8ME-%0pS^R|kc&>=B(>g7#4c67TNUZyy6rEXOB)KbgM##jB~TITA4Z5_`()^2^H zyvxP9Kj=SvMKYiLdV6|F@jiFOcRfFB?A~QYsxB@eda%u>@vUZ-CL~r3qKl3XPnwY} zT8w@2MSNVh)YA6xFew4ZiB9?+e4@$1op*E4jO+_*&pp8cncn@g(pGVcyQY@1=B}MD zW3(E$F3E7YXF~ko8QGl^<~u(x?p^YX?6((|b<1Oc5#v`~F@P_=09yJqdlHy+QK#`e zbAD_EO@L2q8rda*-D==~E7LMEyHm`r_4-E1>15M_)UR6%_QlZb z%kZi@$L6Gdomsho-o7v1{L{7E^h#}kyWfgkm%jqOFc_>a`>=G>XK9Gt6X+Kcs~iu7 zKU+Iw{rxtf!Rq4HbQwkldOR>sHsJHPp7BS!5E7!_j;f76`aC0R>g}Dvro_7|z+R3K zaX;_P`h+-L!Z%q{yS7Vo4E{83@Rm)|gwIwn_p?WKqRq}enR`!|*r|k7da~l_-p+BE zs9Neb5k6bu{KPJE*V9US>ssw$LM`K1dhUohpB1eCJg!Bo_apvCtp8SRt=#)XL*?}5 zin*N!xD6+N-WR?TJ$XGX!{w+fKJ;b(aW}ppIo0tkPU^zPn*et!X{xOsYH?zcC#4z9|0{nRw2 zFzCDR{`p^>N#7rCrTX~3;bof@-ObE>_us#~P{0~6jd0eUG<+;-V*jP-*m~609eTUg zweOnp&AkOR$aMXI=h6@|_#YU!O`|cf(as@vv9-y_3G5{UyokI@=uBP`|8XQi1!; zCDwr78n5J8M9(UU3S1>0-n@CRB3-*(xzl;#r#@R7cenVf{6NB*nW+mwNFXL9wfXF? zx)yCpN=3h7j?C@UN8hp6sCP?;_ux#g*;iJ*Hlx-zsk-THUERm<%X8#geJrd0UVAC& za9T!Wsjj4;W6J)+56`c0-nSLuTRSv))wet)`>yYtR8+tBK}mMD>i3SzlP{(GJ^d2g`=%;=nrVFOm3KkA z@_y@a^GeI!Kf6+wFfbjT>`zS;0MTuGhEALLDLM>BQ^2oB+y`GUdz*4c;J|yClk8Zt7(?@3jSfnm+BdLNza`V*bN*$aRe{;&@iL@f8=?+rI9z=HDH8I`d?| zFlG0ygyJRfMH@fNKk@8r;l`>FtS1Lwp3mFNTcI3Tl_OZC{BzuBF?(<4e&1c~*Qa#N zt&+EQGP>3ux&2qoevo#vK-%4u1;iOLcT?X_DQ@OqG#}@!%xWC6V)DJDy5&og>O1yM z**mX$@^=F#TF!s#xV?Y(_!q~zU(ZdS_|`JSwOM6Mo34sFKDdi&9&UV{yf(7&OvW{Z z>gM5@ZJNFF)^2>TE&bNrMdRP7$JXw@KlzUCuek>rw=XEIj6Pb^_Ka+mqH;>O!n7{4 zaqF6l+5_&zAitg^FY7pP!0MjVf!V!4INz&YlKD^m>xL%lv)1os?_wWtuk_s9V>(VR zJe6jt8Zvd?kVEo;xvu*8drKM~uBz>HD61gr)zz)C=BtN)Uo@fIm2&~EYI;-iEdLm< zG&jH8Uc2Ma`OD9ohw{sxNhj0{9wtxAY`bmxh4F9qwjVI{Vy`i2e8z-dGeBzdgVc5z zg&RX1mwW${1&@Nr>y>R@BVyJi)u-QO^;g@HfLMTetuAZhmdu-YY`4qh#%s{3grV=!x;`w)B`= znYV9(pg)wJD8_@RbMB|o)}IqL8%=DsgVn-!E5@YPX1x-SE2=hLYyA2B-#-kfEgn?z zaHZqBj?2GUT$b0EHKiz2Q~kOuy+A;^x#`~Z*p#e-#;4nhJZBxekKxK`kMhfS-;YdR zMRHZI@)Y}^w7B#Y4_$>%wuCOy+TXmUO0VhFnpAH6eb$2~l_Khc9!(%rR5ZNXHKnM~ z^{&t>vb;c+epoVu2h|_8&Tk2nmu@W44(!qsh%B$#+WT{$EFQEjZotL1(QaG1)o=P9 zc+U~a;*N1$hVcOs3jZ4k&BVkDw1esw_t`pke%JcrlZJLGNlr>X*e?UVy`lcQwoq~8j_%!f@N4Yz`1mDY%5}zPmqMTSwL{iGYnS>C;rX2i6VbqG zIqZ9!6kiXv+t>bcMot>klDS!C;fSx;Et$K0mgqloq!#3{^l^uq_Q2Pfi?T2L()rRP zFw=iTqyIA#2srA4OFdk$E~)DN_lojXGwVSlT(N?@?=!N`W+ir_ z{dGMy`4dJ=s`nLqS2KG?I>5Ij&)ZbpVu$~geQlDIqSzt-nsU<31!Ib>f>orQ-qn?vP3N?Gt-5Xv-5xr2 z?)zm*X2S!L>2!}td##HXevLiaKlXTF@*9=bHYB5JF>42Hb~~k}bbIOp72)Hfb<^{| zL!RCMQdMW|{dn+ruSqxd`|j_M{LXxHf6>J94#}&(7ft-I*LCp4iCsHH6B>>IfI%ww zb$vx#U+14Q9fptVIxf$7Ur~tUEt=3+dUf5}@fq5>JBM6NH3bD@O5R`A?z5Zl=CkZ; zi-w@8%sCA|?i6naY(Z|lpt8d@^zm_Ip(SRbSHCYw1%M zAJ31i8>L=bzN4|W@L5XJt1F_K+kp$ng7}1n?~6A~PH4EhqqM`!y@15YIlko8KRi@6 z%9(u6ywWEKpGK}sJ@d=xk^L_V-h4M`%-`EbKiT|6)U{N#>BBj4Wzh0?$iX3-a)fId zbK0Sf;vPFKC8@H&lU^_+4ax*nRlrh%{E}vQw(nZnih5fzZ9{$iNbv`NWOK&9yE{8S z@V`r z=Ju+$9|uKB-2__BnfkrniwC2Da7yO0odx;5(;`PsEE?b`n-X3#C8K6v?YQEkk6;;r zjcNBLL7A&u`MVbnVwF#5s=nW=O<}msJz(ndnMLDr#nHkmsSRII?zSpGrsu2)`6o5K zUY%bt@#R^3Z3AI5=Kb28uWj{R8slcJ1ayVN%m9?iC+Y@D0#Jo>-xNMtGPS{3DWiDZx$~)%O=Wy${uqE2jF4XhI=)K zyLVyZ05V;&lXdH#MB5>`1+T3r8{Vb<_{*=*+)c?$pS}?#mhNx7z>MxX< z%Afk<9q4~01zW(x|I7ri(tnGdW_W0L@>A+xbHA*~yD;J9Wygn~ zyVR$Nr3vxf05QAnYuP37Kea<6%4`;&;SKP`dbE!#{?KFKfcD!jw@X+CNW|iU2e zpKC=FY%WSazUI^C{zt6;m)g2Kq6$z3;l2~5?qAq%?Dad*NBt&&$i8!0(;HIc`S*?+ zf9fRXujacZWqfRSw9{QtSbL!H^|M>MifZj;>C+DpZI?IwvFGUN_s?=m$~SSA?g%_O zuTyl@9B2tFAjV+hlRzh8QT*8D&_85+BCtc02c0mj@y4qsS6o;BkmR39DZ25)fJ|%e z^r^cI6wjS;lcEonw;8)UYie%Vp&h&xDYd^1$b5ICU8k?_o?bU`dgfjK(X)SqR!{%f z@aFOFi{@$sgH1o6B>R75UjS;M*dcFce);LNsV`s$$O3cjzkhSfbA2P+acSuaOgg}k z)%9n_6bzSbxv>#m;5_zY*EbjGovFJ^E^|lT9JFYh{>yI&f1ZZY+KwpM_+n!7`@Cyg zWheV*zSAXddi`j0{*9w27WTCOP`qcr^!Jy9l)vLM8lU9208R>-rq!QH4&8DU95^0d z)9VJ{BPaKL**y7=aS!UQ?_34aJ8R3wSJgMK#NO7r0NZzHTEl}C*S9G~uEH9a+cHPB zs8W#vSrWyAKw?<uQ9Iq4ryGP4NziT&8&_oe;x=tDzCnoR|8OR zXk%J)3Fzgyc_mp*AKq5hh2n2dds{(rTp!g0bhZ=l(!0jF8paH+y7m})z}d@tmma(X zzX91+v#9_0+Z$(w+W)~%swXx#z8VXamYtpVZ5MWTAo|K3oOC@abmQUL@Ii5qVJSthqjfzdVJ}3SAEqfQs5y!xoX|idQR_i*Vpyj zI{o!E)s!LggCkc?zpk-R|K7Rt+5F2Rr+s)`CQKMHD|c8^zwqt9j}<1p<94f_|7PSj zx%g-Emu&xb^UL#NFZ_}+MY}V59m%CVQolEK;H3${$KtRtG3;)>lTinkwhweJxE}}l za=id(>ITGF_Vp6?v86NWI@rf$aJDBtXT`l6-(S+Hb>+uoHU@{OQD4#@Y$Sy{df@1pJFM?P<){Zd z=;~{L%QOzm&8!6lOl|jI;V4nZ_TPQ8CGcXB=g_`G&yRK_WxkI9T|BEUcjO|EUi5sC zp6~pv`|EC%-Rf^S^A;}rmUsdG4dVH6MSoto2>U8Q$hDh9Q})unA`E=hQoDOqtfq5rJZ-#TD6 zjKV+9Qa#F8{c1|u>gOrx#kV^=S>-M~1#f@duK^U3x2Lf(&k@Ulg3-jJT+8bYfYL0# z34)?~m2(g0_|Dgb2_9g#;mTjjetBP4+;rZRyW+62nB*$IL9U#7KkHz8amu+{!7GBA z^|Qa)vpu)Cd~}?1?C4qUrXmx6e<1J`1pOA7@yrxOo73idLVpdRtbKytQsGu?TE{x$Mp6SGV6%o)jIF zHZI#|3DVZ?=anv<)%AHz&QC>0_Al-DubPinjq2d!uGzp-6xF;CUuUrx~OHYZ><9P2rM;N2F*VW*w4 zr)5oj@ylEEQ^#-302nq1^u1|P_M4L{XVmpZc8ulSPeeP{+{90>beM9lYr<7_3qR&y z2-_uW`WaMs$k5GnU(($sxL;23rCtd=k`8Bes~k3WMvGY;e>LY|mjo_&1y|c{h-gB> zrXRp7!HM9tt&H6@9(>8Tti}Z ze<0SY8%x$6%SOfORK#A|B9VEoFIq9dh4gHg+t?12MH@rJQbjy6b*Ud`(+!vqBj4MvRYI1(3pshE?@rUu4D^9ha zwrFYbnKPRtkAsinr#210?|wY=_qc>(Kvl&{IPT&GGfLQ0WYitnjqmm4wv~rL!Cv#n z??2BU(jVQa&=MwXeYe&_m+fa@TaG(Ry0qc(A$A{M8iS#x(x;1C5z4_(DW|`#fmQ`*$piiPTBcH2h)%D(Ya*L!#JT&NV z7LKrY>!=$oBFdp0+2ZakhQD~#vUaKKt5F%@6iMV*SC+c6c-k3vs}ZBtrh?vsv!EiB z2ORz2y}x@Z)ki>(W`PbR?M2(U@3Me}{3zILO^ATmFZVso?@`ra?n8e{O+F)RvBO^~ zvg8RfOFJ0TO5Z-cb|w?BZhgmwoK+{*w2XKwe~cBa+L3lo~oq7CBhYjGQP^ejF*zB;hF$3YOT3y(*;#orUA8g`zT zJYb%$tUY{sM(UmfqqcNWU-Eh8&)?brDii#24Ogzc=>WK9C~XxF^dffy4)6x(2=(QS zn2`L()3-b#PyYz01-7eyK|cqQ?J2y1FPjjiqHHN?2<6M#o&L?4LO~a zda#30Sz7Y?`Xm%Wt)d3Dpr>!{C^sTQGl-r$x6u|iVCxXZ;Ctk9>!4fyGouZ>(nkxv z88~1PK4*(Aq2cPUI7z=3RAEw8cT*#uOzKopcX7k)g?;cRo|ffaxJ|lp2Y!?XCFVhC zvnx`Hs`b4mdudANn#Y;;RYuUot_H|&*W;5b9lQE8U5Ia-Dj8JOz2@jV>o{maS2K6Y zplLrZ1>ZQpvX2FQszFtaf)uGo3aN?%shBY+FYG?;a;x}iHR9g0_TXe(+|jV z)5hNdFvn0CTQ|m@HMRQOns@DhaGs-#(BVgvdD)^(9gTY~wZO5ZyBIlmeqsR1Y^Z6;wrKyZ76kl-j#l1LwiBLe|aAF>8#Y9 z2}WUQXK+Md%EL{=#ti=$Z_EXG_xyRlwS3XFq`DzjpNU85exr2BvT88QFC^e%8AWfIhif+y3&;$KKT3T2VCbw{2s79FJ?8 zb$;lq)Gb->K?mB?*K=_9ho;Krd_W!!Ihv|*Z>rvWrutRfEdV9^&EGWT?!A}C!BMID z+zsQVu}W9`fZz9iamz;*g@?>Z?NT!F^;H>KFHC=@6-gUc>t6u)Zoi{q!%7N zPb3*wJEJs% z+_~*s#`^C{EWBy#i`$1^e@l^n>@PO->R@D+HosigOn@Vn3v1WFW0QAX*i#02&NYDV z47pTdyk^8VAP6u@bB+$dU781FUihWadbTd(+xc61UtLLz`>kueW5@2fhrr2^>wido z-*J<&_`@|)<1Kg5xivqe>*n-a{iEpUqlY8@deb86<>lS4S+k~YJiC15iAK~kxB3=b z4R0EOY9VNTJN_%Uth@bux0(Ee1~iDny{8y2jC;xd#kuN6@#(a}yKup^kvj{YZp(kR zt7zz+i4)R0B{VP!Q#XFIFk7_jfb@!b-TlGP1fqL! z?l5p7F#an!XThd8@De!WXEV^v>h2{yesp^6cUUy0UuZhH-<$1!&_|Ax;|=D+yx=lcdwypa}m zw@(7p@fVHz>k6kWSFOn|d$(Z_mlh(Os@v!N4jh$y`@t}w;mH~3$*7Z$D8r%RGYh)E z{tu^;h=(fCGfCm$)pw7~-U$v)fd3h9xc0B}NKXp38Jz#I_MiN8b@Glk??#a;MbkW6 z(?Kw;s9V3R;-`kXJ(H)Gyt~&C*jr6C=n@rxbbR3Y*Z!JS07ev*a6eAwZxb|sac${C z-|t`NR|$H}yxOth!I>3BE0!H|)S49A=DoW73~bL6=IcK$gJVcFps^qB*qtUT_LjA( z*rCBUy*N2wWdE|eJsG8sR&?K6yRBZ4eAoZxGBtXWM=tXe)#Ssf@G8ko(2Yt158CCRcdB06xQYRjIUw@|JP-AS5BV_ zSb&u0FF};-@xY>Zom@6yG~-D>S+(iJz9;Dei*5bMQ{VM49b20Ebk2KViAJ=lc~Y`Z zaIHghr+wt<_aGo?XSe%uGNWPhinKkTHE{AyUR(3PJxlvWNx~aD4 zLdwDC5L((#xvD-Y8JpWxu_!cY^Q=b(v;ILbmPct z-ci0}rha25YU|w09eXcxcXh&Ehu|Kk9xWL774#8!lec*AbhLU}Gr$SB{}hM-dSV5s zo7VK((+rOJJlNQ}e&-g!s>12bRZB%q5TQehwSso9RxPeXYb^JpW1uZnclIjiuPmip zm=aMhuY6kiQ?JeLk`JLrxxk860m9?l2luOLkdjO}>oN;RijK-Uj;(K7QQ|$?A^Aqz z-jlya8KIXMQ?j^ao((c(g;-Zbu71w?nQx%9yd(*4$?rvA6zrR0#Na{Q=eNYmvs_S#A zaGSdF!SUnj&xh-_9QQ8VdSFv!PH}b8;@t0NpXfR7FF;`Je_Q=y*Fn`Ae(3L?Okj?$ zFCFBr-ewp6{ytXs^zE+jdjMmrXSHw4tKEL7NRi%rNuBF^19%^}<0;$wZ9PDB4JNIhrRu7!Q<^GnzHPV@l>thOIG!koP}etud+%f84+8#DC7n*5=h zy`!cSUUj+3l4cys{+I?qZCt>w;~oIk2buk6QhjVI@6|T`q1*c^IAd<2xXQIq7B6VC zF|GK3oL%ZLrQh4;DBHE`+j)ZytslAK_Z2hav$GZrjq9fG_N*Jfn^nJl#fyq?WlgWI zuRa8eIyZXqyn2nD4CDGBKiu#g5pLRt`1 zKwwc4($Xp2U?PIj9b$kq$f8{2c?as=``)w9IrrDSKkmOT)>@1?=a^%>?^8n;Z%EQ? z`=PMKp7W^`phRLv!^13O*}@CP?sEmiKWTtMy#mU_ns=CTy)^CGGujP9D@8urtpeUT zhvpAVk2N+jIlQG_7?kzu9hYs$k({l}BNkGXJaVNoU6vZZRj?YbpJ&s%4BJ|5u?#ug zLFRkcUb#%G9KOOas78LjTK=70p^hKq>E# zM{I=Xss*LbGUa{oZITEv+erQ#>@GBW^b;+|>oTgG=Tgy=!ZmdIi>~$RP+`scQ8JRR z?v`8db!NJ~LQo|AkJY69g>xaRjCndqKe&O)9s(rw(=(1OXU|ChPI5_3Shz~twb3$2 z{IYtqrM*a5e|N=uJFoCNfTM$^qP?3&E?QrdY&#^3Dw^wCd$@?R z$Iqx=SdCgH#;4HVic)r4S9z?*IWA(~QK!Gi@<~kC=b5;3u5}jS!FYzC7*BOuPY`LU zh>L4HzH9jYaI&|&!4kVh0X9Y&0e|26XV{F&KonTZbJbRBV)<4zPPkd=pD8o0&cZXF zX^zrR&nD<|T-7u0D6E;`;}%_!jXJB_rJ_xksv*}8C0t~6wP76X~A)u zj<<{BsNco#&YOs)`x}-%=Oa2 z+s~?XJnMPr2%1;5`11^%3zVC!F-K}gj^^*;lXppPi3(UK%2}C7#0bYVOUY7Yr^K4m zh>Zzt-KfbwvW)oC3u#ITF1m_Dvu1)U&4c%V=eNW0cqliwhPo^9TOP}nTLC^@pLM*v ztEjnRAw{E1DcF9)Ykc8(c8h&ENeV+f7q4;<#yU&d?yG`#?%Ib+k>Xze7qG<&7P&mM zxWRs;GL3{MxC zw>O#NCCS~7bq7Ya9G#wf+Nl(`=)6^XP`$x>Dl_om+bFqcqPU$rm3vNg(nbqWbgWW8 zVs8@4kBfAYL~yilG&vg0-1SR(>`CSR3da*KdFP9C>6HfXcHk;}o-kr65Wra&o$K3&)$ ziqi51f1{3^GVgACJWq?gNb3AR@$=bWBCphj8L`YUMYCdK#pcpF>4# zUOrED`$%K;<2?fN70Itqf4rcOzR>G-&eXzFYT@fGO4M$jTvwR;er39r_>qw{SyXB) zIBEo*3V8sU4{A4g>mzQxYME6h(Di_1S} z^=s(Tkfxh7$IL#dds3ugUM6B<)K9n6?Qc98P%gkz84ODVmubWCY= zDRWla?&FY^H3_Dt@iCm&*7P@Rhi|t_u@~{kEC~9hNC^srTKHZxda-gEBtdP>2Q&1% zBnzTuCkhIj-3MDLIY#cvnuI4qftXxKMQWi+$C)XjWZ7SsIJ{Aqu6TdzYOMsW(FBtm zV{gfprC1ZHGp@tR>DZky?qygVj8if^0=g z>TIU6Cd*VIgNzF0{OaV-<5{MSuVmM4mFP6O_)9_^YbRzFU)lG_>ynAAv7f#S#ivIr z4LzI6vXWAk{Q)*b#z(~Y;yhxe7msALjg98W+m8}I4{#;L-x%Aj;m+lQb)#bIqJr9T zBt5GWhmcrU-aGa%ph*%06q(YJJw^&Eg57dJ~fo!$otF2z5)DOE-*z)oQ2|gtYN-CO>LuWysIb z)Wn|{7d#(@?I+TXlq;B;wFpd8zzuq`7TsjrAPS?u!?$^finZuN-cZ&UmMca4A?B4p zv8m!oYwFA9OFhh-mBT!B5QNHRv-W+ac z)1yBEM3Ihw@(D11jOpgG;fh<~tk@vaKfgoZz?sJuz<&sefy-C2ch25d9A1-ceov%1z3tTybv zit7qiV^d(TQ+lEjJ3A(}s{%rq7qR0a(qvAs0g*sO9v(Da% zucgxr9=xc?wd^$-PUfI;YU4S~j`ryz#?;_h6iO3$V!@a!ddrbD9r;)FzM1Z<$dr% z7LU$?1de7AhaV|ebr-FaU?oL^TV}!!?S}GvZ71?1w}%q*!)?R4zS!phGQi3;^!}5# zUea_XV~E5}pvmfBG~vM^)u>uC6GD~X_4r_mn8+q~VkY4RzGN46E9U3tp8OCQa?3au zC|J~c_ezN&QDf5S{np^8pR2V~MC=;qHPL;1P)~We>T59d#M>}wF8?~aw|(aXjlanTNf8|W zY?zPzMd5x)valY9zwvmZZ)4^k>+6>YO@c+DBe^JoK3kjBXQo?J(Px!u1!#K{>Ub$M zMCsnK_Tud+*?a;(1)Wz+jBzW`V)bW&Z-T7qWS1~w>gD(HN68wbE;8hEW=!H%7li7G zcam<6{Cpn~(V_r2mxo>y(- zf4H!YciW8qTKj}h`LL!oAR?>vCe|A&2N=e3za>dGGrSKTq57$cE}+Bdsg5~3<>jTr zQMfIt4ly0)hy1SJtCBTaO@rD=tze14f6%=74-h}sH8MWNlk9==BAq|RzRRZYHLB0h zZ&sZwP?hiuyI5{X-VQ9G??)0hiC#bB;fvV6qabUx`qSJ;@qep>{RdOlUquL)&DG zPxXKQV_fLR+`IsjkE#^uIpBMZ=rrf}DrrrK=uq?R_9WX+@gn4x9$yWoZXUD-4NxBt zS+edP%Q=p%v!PY1F?6|o;sJAOoH~f9spEf>N3&~2liK<)--otl=@5za7 zP`zEeYcy9>{eux~m|mhU<$78%Ymc)nwaI<`h)#2lgMrB|*3bdE>QKnNjh#uUKJ#NH6XC-2&zy^FyxqsP)&$FW| z@=WPkBzR0zv(_>LHI zrq{$9Z?<@iw?oR;;d|3ydB4ds9uGXC^|m+ZICntU%^ddFZ;xVP2_ViXrP~v)Hqfd3 zJVX?sg6Wnux!wJaUR)*p?f@(zQ-B+Lfv83F`fLARVrz{8Fs7&2#qKP1whdMGNR=;u ze5@~bKhA?Is$4cU=MfB&a2*iBA;1iyc^^87#u3f*vBg=28S4fv!UE}xQ{MXawhxvjSwh-MEVjl9H@Rt!QXKRxIyxZ83>zq9o+WXu##N~ z&obGQJ?!k-My<)k&@F%)x41F}q)p17lqxkr5}the!NU=z zF~rf_dT^zV0SrcammCMKw)Qi-(RwD9$ZZmL#(aoEVh(HhUQu?ETP18hIjF=)?5hkv@<)yEUw*{&!&6Y5m&Ta5^Io8DQ&dM}ywS}NksJL+ zW3)X3&e#JoDP%DGk>C0bf6r4ZACCRY?l+A{(brFr_fz0%Rfji#(<}Po6Bi7C5W9S% z0o)?RGdXZy^2HTyVl;plWbWUSmH>Zx$P0H#9>nuU+-3r#-9h+GKA^dO4!nhPF!$BN z%C$a6g3+-@6a?n_t6lJnbQ7PS+Dmd^{{4WMpU6_DLe#{g&{#4I)*bFl&$C#HOG@Z@ zwlrvn5&6+z*jS?I@N%Ft2?R5Lgf!eUBL2=xByXPqMfrIx)tQw0i-%wQo#Vn%=vE&+ zRhDJ9fgXO+p^K+dLC*UqDCRXQmK1*ba%`O1B~YvoiR5}~2oVLrg$q5TcLTKuWFzpA z!nc)P(AXj>A2a^lmc=mU4q*d#HtBnr1g>tz?XfG_c&o3 zC}G6nbupQA~qzr3^{p%I?Qk_+!i2-e;1^1M}9Pd{Rajv9kaa!|6a--diFI48*Dtj_#V7`C9`pR-ri1D-1FB@ zsd3ejb6~epy}-M*u-J1zm3g?*-X0n1BX5$rZf_$K3|Cb3 z<2sk)hmM|p8=V@sg6v_SL-FNb2OqP`DoQ1&01H`_Bcq5Fohz-oB@4Jcn?BNg^GLXy zhedSL$N*pXC^*c>b~}E#fwFTjT!DGoA1`x|=@v4re@ePAJg+~~ofUQgX2Tjxg4*m& zAgf1Zt<6}A2+PTHfOrrjD_nwYA=z1W12Pke4~va-fdA;ck$Ix?NL6OBJ>-~FfFROv z`NyZxU%5-_Hy19fkL=k2fBV(AuSkHF8~LJDuiOi1oCzCBD>jfIF0B3$uf5Ghh08h3 z4Os^qupWTluPdMG=LMM%2{HNLoux3JvjllOVRA0sR(<(#v~Pp<)DKRPuYuOCA^ncb z_erxoIgt~SksbZx8nm4U4l@p1QDB-NS_eXD!GG)AWWsm38u~-RO1&KG?-)$W-~t8( zYo^%50!P4#Kf1O9OMms$HgJ?PZ-0RXd`fN?QOzKm3}o{D^;NLMr7L;w4M{weNPCx+G6lXhjRjn`7F4h#Qe%G69OH7>d`T6P~a>(DUO>;_D`u+vz zvl)6h08*JAb;VZR`6^pEfQKv4j}bH;VVee2X`y08rOFg}55cv86i}8QcVSwLoCN_n zIa2A8lev{wYq;?#yf(eLf}HI1q$}PXZ26(oRN+N#XRnPE#8IEaU`1i^EHjAOc^ib3 z#`)rUDW_S>E+Sm+blBhYQb<^1H za@ViQ_skwu@|yC4AaXwCS`_G_G12rMgu?@*zd-;0Q9w8Mcm{G4LaK!hkSIJt+MoSN zY582#EbH16hkbxjfy5vmyC=1Ku|fYKFaDL3(p2;)z$kq_DM;2jIO}bd|Gfrjr0=>a zY#bxN%eD(|OI7d^N5k^6rRGNR?pBuUPwU|c#QL_qoCNXhTI0^Dkk~67Ipatz+un9k zJmgN)WJgpyvU*m>wzg5km0faWf<$kkbpf|~6|AJTP@6~ZXT4q|#cwDAIQaVH)h-S_ zuThZpN8DlqAT?6rod+JaKa2fVsRHNqPY67y?hzS&H{|j8?TB)8}-b;!c|d z)e1=N9&nTYXzD)ADc0G_FctK2){#^7@)7EYFM8nP!1j;Lo*PZSdTzqMj_sQ`ZIMNT zonxOtYXVlUM^V-Tg1!X(($A#iDgbwCGAs}5PC98iS1!#c0Ei6y^i<#gVC|I-_O6vW4^3wiMyij;HK@Vg#+K^ z1WD2Z;j&fx(uIoI(MDNI@k2y0+)-7~xvAJ^cgKz`yEWSBdbnVS){26kQRJDxTZztN zumcp>k2R=G&Eh2FuW@WGFHb+o;))5nG_9o){}vv;plb`M+qT}MO!>U_@SyXOrdl&U zVd0ronxEnqiv-q#lr4=K+wOxnvXJW2nh=Db9!2QD0s11qAetn=D( zZ?m3C{pc6Hz1sPEN_;~xJScR9^fsyGs9}*sU6Y})awS+zqEC$LG&J2Dwe5m{n#vc0 z08KJ<_A{~7=*bPwJGXg*Bf{=-Uo02F7;MI3c6r7@nX_zKAsL2;I%MyM5?lCIYPg9H z7oDoq$c+D@>;nN}?BBx?_`sH0~EY#cMmA8@^)9L)71qs`vWCpi2)>GS7`!uIMBw^4z#QAxuuQB*wxX+;4RMxF}| z;hOBYk}073zJ{K)+}wh)rLFtY8sqx2fVI1<{XBr(Lw~Qil3oxAkGS&|bfHT;fWu z>EA#J12C(%r}a+X9VD^v%WcPoW3VEG8*@o^g@u1I_N+&<8s(v_VIT<3&w-{wJ%o3f zgy?U`oq6DW$=2uUxzmy693;2UTdC3~SWrZQ@2XnTE`e4f(pJil()TRPMCEMWZsz4vhG|!(My(Pd|rOiZt$Jf)@MVfm8JKrHAm+!jJ6?ndzeOy@_ldA8SGWbSH6|P=KI=zpG4MGdU zMu>#}IPuVBwH^)0?l6BqBpd-+jr9G{@SsX;aY?U7{fS{;HfTgrSGSDjNS)F z+KzpH?5Hm2K3Q*`SU=91Y&P5NO7aWl$NHOa&A-tHJic(bL>A{G?!g~>`wUAVjd6s3 zYF3rr|K}rv5J%EpIA1z)7d>J1G1f2O-}p?ap!_GvN;-uIzCe?*r1%Z~7_GgmIOO}m zKe3Au9R3pna0|?@lC>4>j_uSqg9=HnNB3D^c1s=SCH@}^+(7SGMR*j#$oQMk)6ZDY@>sseFHvo6qJ1~knQy@^_@mAkHENgrAuWVu*c%IL?+SthzYWN4nQko z1-C zEOXGxRzOhS0Cdhq8qIl~0WDhRXm(Hz3bL~a&{%Xz*Y08 z&Dy2!?`xImLi)#C0W>Fdvo%J#^^rzYNmoEaF@W04v^fZRi~SUP3ebaNx=V}ja9_~= zj4^hJ3r5b*|GBA1u#f^kzy{`H!P&}Pm=kb!s~@rc4iwe=SIFz$OqT9~rQ(6XWm)6N zFGr$Lq6oZ6>$WfN6F@D%4-_@kYQ2lGRNFrkcjgo@0KWDQS-J0zln*MB4X0f1fPj$y&>qx z75|TkbT+`>tic2ke|3Vj08|?x{qim~QFa;8x_rJ0Be`pcx=-2io;wREad@c!5$sctZVAxTH^KI0_4?DL89QeW=x z&qt*#TRrmy_kjQglc|j?+jVw@P$%$lIt&^kkMY0iwj+O+`&|?-==uP}<-_39nH4OK zr0Q`UA1=3^Y+xTtk=d`OkwfivVH3bXeF*azUBiPqq6nhsyyESmb#xuV9gVHB$QL*| zobidc=|v}{MBrxY`gOgxw*f*}5d6?BNSIjLK5A%MeO`d(5R2qU13(en17M`_k$^US zG{B=q5gsmlxZz6*4VIQQc^`YH9LZa>d;U0(=Jq>Q!B-fdVeYY-jgbTwVnyW1gi)j3 zMmpsr6nvlmm9BRSUY+pNb=^}CC>I6nXWSAI5ppM!xSc2bd&$2uj~5GWfZ>t{sDU%z zNnwxv&rn}lSVCj}@w*)r4O?=4Rn8&891dwY$>`e|T7SlK3u-j(H)>^Ff1@3IPr!{d z@0?jV`uopN{kPwM*F3{un(ohQeiX*~;RS8MiV7qg$8cxJV1nVl7?g>7{mHF;i=rU8 z9;O^MgNki=ing>RO8%REhz z9swi!fB$yew`Y~|+3`Gf!QJVyyTTcaM(j&4id&&QqWm#H0R+c{EwG=SiBU$e(oTU) zPBmVbcifPNnQ#Y{YsKjtf6KKXjz7g83do*?M5%BARtX{?_7&xJ-n00Wfwk*gF>s!n zI1#}IB6#LUrypYl6=0IE5IE)f|49}8;zRl?Nt%6pDW8Ligl!!3vD$gCrjOovb69Cg zGi`VZxNPA=jLC=scpDq^@K4S|mjdt(jI7C$EJ`=StY~mUnqv^%S=ph}=)En>_E;ww z2)Rwve}#=dLdJ)}eab|!z4!ZBRX zm8DSQPMtdODf_AP?+?2&|DQk1qtrZE(St&dbiwn-^E42Q!`YWVWyU{2LE|F?Q<5-u zeNWJK+`^+|1k|Sk%EvGa^~tF1B-nJcujt?Y7<(OX<1rSl&tUUzSOr&S8+ZWHTFB0n z4pskQ8jjx=o@V zZI-^htAH9~m^Ulo1`>pCPYC%GTXtv0Rg{}}t@ic-tMM|H1IszYAYiDy^bvMz8`wNk zkn>R_e+{CnyI7+A_3CIZoIMUTSlX|QcscwC$}fEA)WC}a9}U%2J_ z65yzL|0O`<@sAq&={I$B4c^gRNfxnwih`KoHtQI{tuBBPwE}SD;eq0t^T5VdCOUN~ ztpRYT$2*Qtw93&Pz^_VRPZ@3#pFLqz&*N;<0kd(|Q%$%ilmlzs<{)_k)2bDj#e0h;gPQboH(J;1h{|Nf=xUs?dA zMOZF)al>VnTARR9f$-pE%pF}#BGU0r!7YiB^IRaFMB;iq zkZbGf8!J)tG@qgTu?p}^#W^+kpkf$u8@hIO)*FY#XY;i>rTjg~j>ib5`@yJ%-ap*{ ztI5ddcB--LyG+Dcf$Cx0g`9U{QJ60?ABA_3{y00@8E!2l+whM{gqR@AMTcUd`MoyI zw}htSolDGB$3WUHyi#NzqHa{ra&}C9jQgH8r0lafJchzGTo6A1tC6zPS(>x55*FUcCkKzF_?8lmC3z5>nJkp$LY+V)AIQ8*fPf$!eg* zfk41Dd>4@nK;qtI!uAKo3sBSDkWLqt9#uSvPubnS{=~5ALeow^g`BTQ< z6F(9hkI@hyAa!HP>b&p|Y!4xj{wS$^124M+-@N1Mg)C%toj7ojzW_q0TY$?2d`vxH z=Gy$^zBV_#u+Wi~g|iY1rxyVII;5BS*9xy7n(1aA^k36W=SJ7B;1?*3*=nG_fg}>> z21iDjz&Y-q5Vm4J6q0UyafmnLG^_@edNK4mnM3y&g*0z8BXoNWlF}%TkX!YaS%Bf zQl-50DOqyn>Q>_A;i%iJMeU?BU8m*?JD3j7G|ENk=NC6I%Ew(wmhCAHP0;TiJq&eE z@tOAX-K4c=IcyjEV%XQa++~~{)=-yxectZI$6He@^IW6GN(u(d2ByRF%pymp~KR?I%h zsH&5i-Ah-CO;Reb9yeom7_a9{jnDP3qmRWc?sR0NY$VPc-v;yfsKGq}MjbW7{0+qC zHBg=QShK0zmaH70%cR7Nze!gaybfc0B>&X$4=TtBb+jI(n$b094%tX=McJz71I6V7 zj}SBH8DHaKEod%~wB-4*k&s$(of3$1Ec8r}bW=D=Mv~+$6?UIiP0<2D>}8Kq~8-}=5l zjMn?YqH;@Y#W55i?*awVZq7e}9>#2$Dy96X$ioEv2l((g$iDYCy{K1i*V9yfMUsJ>N-T`0W3L+0&Oo8AKgDa{l$pG+G0 zyNC#GHQY$p;u|H}i5FJ;en*ZjP! zWUAf4P3wHODs67VDYoI;Nt=NeK|cL@M>xJXM^^olcbwBpUZyfZk9(Z61$O4^uZg&a zIN6*@4ej{Ln?9ZZ(*L1_-ov zfMhFNI)`>DUw)jk$C<+93Lg1^5-%{Hymp@!2L!A&an0My)Xv)mN}YQ0q7u$CD_M{0 z1V3t(nY8my%L;qZoZ6+iV0%|@JE&!rux7;kl~d_2*t7Ni28d}tn&~dwZ`;`)N+>{c zOTIa{lK!UC`YWi1G<%E>KH>Hzi4v#4=Ok)kG#H`2w;Z`g22Jzd(~j;wB#Aoh+jrD0 zalTh~EI%PU=4EeIy;IUX1m^t2jvRIKYPn3ktQp|BFj5lAIkk7PKv{*fp4yOIfzbyr zA_>qdj`Oqqg>Z&5Aq#fwK!3=1KufETDLKthsMj9=CpPf0XW{M;XDUZhI*x|N>^Pr2 zIpjk603P?AAMHa?2$MgA&0z~CQjU{0U+r^GJk>XN`(NCoU`cm|lenQqE;eR)ihp0t z;YP4#3Z5=3?V*YOUqCtcywvi+<5vjuWHKb7qjskD-97=LDgFCRFJ&jDZ5jpTy3g4d zr}sZspsa;dFXP>^d2jGzpg`CWGgm{p51gY~DZ?X0D412kPg{~OaxCFbp3m-sxBow}5oZSh=t_ibXFX7vQW)Q!T0l~1la3mA6Jt}224nS{wS~@dG zj^^hDU$xoE$!|7Sz3kL#2|315Bdf-XIYbsiHd>Q^()Ycf8+lWmAE|giWZNKj=R8XA z{441bQ~BLw8{m@0*UGtAi;{}pHF&PAUZ99Bn&^o#gDX2>l-vL?ycy(;2_3WIQ~rlw zj23c4ULVIi|8$sb{6ECimz1KjhM^(DmBkyDsKI49Q>N^~b2LED(JxM!2I8|2Xdunq zd@b?;iQDx~`b`95*10X8R?Mk0UV!#xvxSh9PzkY)K^Yd%&0#HI2y)NDnjQhg#=>?r zC{(~VMOM<{;EKcysKqu{lG}PJA)09F`8XV?M1zP=jW?Y(uu>Sn>h__8p+JBIUQfKf zmX;?B|Ab)PR0Y{cBn`oiV;@Kv*S zs%C$Ys{=$d=v0Vn-#eXPXy?$-ijN2g0PyDN=B8zI9u4jKK|=yx4ula7x|hLWXo_%1 zmgkMj@)uj-tERWVXfu$*(DY+{FG*uM(a=O$LeS8Vn3GPWv&|8{8uRSz>N|MfB`I`M z4pS+L!Y`D}E6l>s{1MpqRztWHgEelv-L~4rp^oc%pd!D1_{)4W-#FkE3f52RCNjq_}ctg{FHKXX`zf>5m3pD_PH@M+(K zgwGQBVEQATx8Y{@L9U|J@~_#WoV|Z2b|%7U_LqGdD}zzYGMJMDY&hR5z2fY$4Sgi_ zM}TH~*rg(aB;dxZpzG>Kr$YJc025>b%!jDgh7(eLlpmQM13Jso13&`RG;cUj1~qX0 zbyCazhBUI0+I4gkzWV5?cAXF=BbewW3= zoX~!vXJ>IZ_E0$XV;YY#U66mjMw&LqO&+b&W|vv7uw6;>Ro1?h{p4tbTpaHTJ1M6Y z-^IJBTkYjr$u}#9J(BC&ncL2I_HVjL^`2`leYjY^=1}NNY5j>2ZB$G@vvAFzcz?sh za{}b!;PO5rW&%!#5!q=Ez`!crP)H0Qe3$XZct=MS%!MOe6m3I{X`1bkofxOyiArWV2lv!6?%oK|2U&M7bq< z++nV6ffWkMeX#lJ!NFnZ_iP|dtBsxIn4!<0S9)XKp&XvQQw7BOu0LYBjUc-H9zUA zHQa7;b5CE-uVsRk9KN1PWJlIE3GpM-OpD=CSI<}t>KPRG1sfn?G5j?nk>XRPXxCVi*C&jxHsr>(CIFvXb)*W@GppvpRva8Nm zei<357JI>M2`R%sbIHL8>5!q`p^_UGD0<1R%)d;}fe z+&VF?x?AE(aV@0u)bdZcuS{L1Jpc3la#JJB^0s4RHO|&XkgN*WrUyD3pjy`H-iHr4 z>xIee?I>!Z9K{Fb>g3HqVyPqXa<)}6$@ngQk|lG6*!O%WXhGCx#aidbu%{H)X4;O#Lw zgrh4*YVmCVYuny zm|{}9k6*$hXKJvzJvr|sG@f*F6R3gD$V`sSFPwYIiWQQW7b&6R;fLp8mQmLfO(>}$ zjWIpT4s$lkS&5w#-nZ-*V+_ppLuMz@ksr@7%T7RPjH!Not9uzj!U9%kU#mA< zaP88|)y~P!I`V6Y80RaNN{=BvtqVU=e9K*i%wGZhh|Pcb5fQ_)N2r<~br%{&($O1uB+Kg~R!W-EWN-N}VRdT{=pMfygxPx(uIX%n3ZoLn zG(y&F*{e|C86=ywChA5KYNo8fBT*?z{2WCn12=|+!`iY?6Z0NDrxO-I3q-hp?wa&) zA{-~LwkX`=n2-fV5l{HPZ_Kp{6v;T?D@9(c#BBNZVUSJm4Af>LF3~i{q-zX%vH+a$ zJgnvRoe(}^+4TF;l5$Nwn(5MHXwhN*Z;Q_KCJY^}8$+Cae++Kj=BR9ZGF|g?xQ6pV z$NJ0v<36l-?R|<4OQ`vB5r(sE$NEW~Z2St`Fy?nhU>GBZ6lj{wd19Gk2><@5XV!h` z&#qjZ{^`=A#Z{WXvP2(XM z9o@LlKYh(>`4yH&1@$hP=e}%-_8IJz@6!t=~!I^=(r@js7|uDKF^-;@9N&mCeM zf!H)Zo+`K{y9z6PCc>OiWSJ zr_=|jnhhwu@0v=0(~Btz1<^M#{n#h^hy7|lL6a{7? zil<9Kt}{I!{v6DDC-v8T@q>g1kKegZ&<-a~W<2NxNtF>gClXnzPTg#(y11{M-3M~c z&V4y}i(nq<#1hWkokN%+MhdJ)?CH#p$3T@9Jh@j%2;WC_6Vtu~_n92TFV;4|rgQ`9 zp%SQ2YcG=Q5Y;Try!RN7YloI^t|?!?IF0jJ`-|&uM#QIFp~+a-ki3>uo=%9rwVk>S z_3l&}k)ovq2fa@vm7*Rw`>y1E25Y}2PRo8~raGC*r?onieXNA~M7K>`0=Sn%=XJbp?`5~jug3eRR8^V)jY%U+y_Q=vt~Z5ycUw!M*86>rNz&PJ0Bs~r|}u&{GgXhrY;QCW3d3Ad2R5;Wn?`^ z%)SaIrU29H3FfNtz7JoC4&oqgb-I7tL8CJ(67izi} zo`B^(CSzmg=rgZ7AOR%N-nvQi>8%3U)bdyMn-+YUDbn!nl5;p|JmMuC2Vl#66O?pF z$i*wu0Hk-DhuZ$3N=Og;_RRrG^9opR&)(zG_JZb*qM7##D(wz9qT z2d?h_w4wqBhi&96<0y}x?SQgpzZh4DhL7wpSRM0{T>i*@zgiC6GE^)BvOWJEeJ-!%mEQru#QaQDg}HQOeJJxpxa z8pMoFFO>AfnH^s@lR9S+97T9ne)%4PuZ~n0Mv*!|k7MD3P!*{~*pY@n z|2Hoj`IgtWADR8aP)Ngq!H<`*SZL#M^@>lG$!j$TwHQ}n&V5360J zSE-4y5=Q;V#_sl_kKh(EF1qg9flj72Xmz}1f94WKJ!w7Y2>-f6Nc>+dK*B*LdkGV> z!szc(=kN1g`>12Do%4S4NoC@e`|I@&wpofy)hm!?Sn%Ll;nSNZ>c;L3-z-W-`Ua1u zo-P>1sd`x5uJUX{O2Ncu)~K=N*zdLnH=w$%`o_1qgbB;`A8Q0*s-RQyQya zoF9F)fA_jsh_u+d4MhSt-Q!WMU>GC8dG)2*TievVcNVHAYaUROO^2(f7Z6a$+*noW z|4E2(%vG6P;?W}nZm&ntF}!tSb;~FZif>tZ3s$F@+|n>O^=2Y|*;P6@ zci|--1bd6Px95)U`$9X)u1@S#HzHK`(~rq*JlqA&J+2yYeZ3BEgB|qKFXqvCxP{~I z_P;-VeBW}J9Hw1{$iyvoqO{@Gvdpp9tLN^gOfj3d_E9#&F*TD57{8M~j^92^PYNeq z47s)Krn8NXUaVX|;cu35ryBIVx<+08A6+g>)*ZYm-#|`z&45g8!Zjm1!%lj^WOIAZ z?Z@?^n`07h4_0io)HFDz+ZuZxoYmNssr{PK^+-@;HD6#`+v=u zy`CnG$-2)(yzV%fz5k_;O})hQ-BF*+?PwyLoX{O&)3i$)L|2IOxm!XWFjG9Ld;K7Z z{x~C2ZW4z-++U1J4JN{XVyJH{dO!AaVB4>rsi#J-W@mA^E+6rE_@`4I*WVl#EjhNk zo=~vkQGH?QLqWy0!Q;bshsFCAzIcl|x6!V>{^2oqtR*iWL3fAVfZk3^nkSZTyM0P%fa(}ZH zHvv`F9oEi&(ycXOf1<=vy{iPxbewQjPuAux)E34e4^d@t;i6Z7Hm3bZAf2EmgwJ0c z-Tyxbd+&Iv|Nnjb47-d%$~X?nNJazU;3P$4rbt$)5K8v3a*RmHkxC__A|oS}JxdwM z2pQQq+4I=@dw+Vp-tW)nH*UY*Kd+lt4xaPzxZkhqx?flG8FA+t8S~Z{L?rlbJfN&K z8m|(3`Zs;ZzB2*?MW>BU9Jjt=1+wvWWLkQOf9tKxm@(zW32*w*14-98q&}C4x(9J_ zWXbKk!CAxj%cU?bpQxJeou2QEKnpP{xx5mThfsj+ebSKHQd(127QwpK|2^sV0hUb0 z8s_Th+0`=*v3;}eUoHL(%(pncxgmA^H}%%(j*fk$8CO;gwyk|-&lo)V%c&)z?o!|N zgpLp!51negYl~-%iDf_NqzB15ESV|1s`RT&J#U>r==_bS-$PCeZR2!85nYQhCM-sM z1K^we9x-h7W36FGTJeP1*gO+k@vMl6ONyHKW`87sL%)*GXI zAq64lu&+<J70*OPuYny+ma7BW9jD|r($8j*`mQ23 z#Qa^&=aU?e1ct3y%m4GZ$q`xZ<@U4W5$|8b?5i;7hgU&##_y&9 zids6(E%Cj;d*Q}XsT(nVm5ob(KW{}{SIQK2?!YazFkJ-n`0L*QIGRH`uJR3(ON6{u z?IdCm^6ETRnhdz)7+Zg^>*TcO*>o%6$o2D@4?dJ0{N>*3#ap;YZSuS`>BUIP*LNJF zP1^UCo2=&^m&KP`_lmY<{^Tr*Io6|pJ|MoXRRoYYOOpCT<)5f+qh`jtE${OlfyoSm4yrek*3gpL_9#f-+R}j&>-#gOMn+xhX!0)FL2c;?hsujfoS9n6PqJ>oF3bL{!8TXO zt&AU|)gXGb7#a40NRT@E#re9s^dRbqg&)SdYod4PmwTyJ8+$>ESER5W?|!5`t$n*?vNjX=NDeSR`ZnE%erG|l-!`p#S|<|ufJ zurm202ur_O4hZZ7OxJi^-UPHbA9AyPgK_pa+r9>sC3D0IeiHnFJqI@UcT-qCf2Nz> zHiU+;{TS(rm!)BWFCF7C9>Nrv#12)QHh$V>(C)@HKu5}c=q562kprQSii|Glu8O_% zZbP~2bFsW+k*5EpwQHUEi{CR&EG~AZbVt`tMX9A#J@Ht^r4*hFuzKomQ>i*}^-jji z1THFbZuw%3nA1{tZsznQ+u!-+x-tc`E&B}c2@kbD`@P*C>Dw_4?=F(@z|t^)o(9b; z;-b#rTOCN)GnD0(ULF0B%|LB^rZTTZPg#+o?-ofg+{w>INno?%9NPqQ4O76wk>Th( zm`x~F!Ww^Iq>|SWh>~T|_=gw^YMNj}IzOJ^Jtkq|b_+?XK`hU|uW)@F^d`7WGOtvN z=>PIQICE|Rm#rGgV0mgCKXrlZ%aQqXO>qa+H9O6FXCy4nccJ%Ly6y;xucW1rW&D;wRVJkc{Kvwntn~m-lc}hKGN0>JtW7!bKf4 zW;sevde=kT-iHPojX{d`$$5iyC*I_oGg-Z)(c~EO@Mgff$8Kaf-)CrdFp^0+CS^!9 zwqIqgQ^u^kH#HikU`F9i>RY|jQk8&1@1&<;R`3(faGn{DV-;PFGDrv`LP;Hwd-UGq z^*g*+TGH?YWEq`y;|h_n_bojg!JFm5U}CU`g(@*NYT^0ZPRu!O7~q!C=FozD!K6pL zUxmJchlM@2ug6)a)d39!3EZ%N@=^xgK@ z7lPekIX8YCWTXvNN9H5;#yxI^xi`)uis#}LOlF~f^gEfK9-yVnMdm<}nC-X-#7hg{ zZhqVQz$<)<2X&?bgS(n|(ZyHl^hx-Z98qG)asuJDzyY z=|{*a-h8;W#0TV|!py1IBQ8`eg%l`Yt0VNu`l~5kVjHsHg}ltG;5kt+_;3*#whmaD zr-t;eRzpHVoE{({p827{g-{YYT4aK!In?LbaZv~cQJ@fpur;i8V|2930633#Ah{5r zvU6v5RSssH_i~ZIoH<-0o8Qso{RwZ3tN2#O7KW`E3FN_52tq!k3g|-hc3bnt=_&r@ zV*Gl4kCY3}`({JGnYIy6Kp*t1iX3kZhJhIy5EfkQuwnOnR|Oc--La$U>MR|6K*K9l_n zV->!Pwi>ogTKhU+@9SP)QEd-uo+)}$53K0aP=l*wftn#5pe?saE0>)xD|e*oRIl~1 zf$(r6Rz>ftHVl+a0v_XLg;3+l0e_rp9W*>RHsI{;6!_I|ivA9MXXW~{8BH?MKkn5( z_hd)Y{Ye=Q){G}zH@|=QHeL+k=M2oyyDmusT_P#?uPP-|4G+bg+j*j ze(4KX(qHJ}W5*R$WRCaBv+aGvohRa(KyuL{ii`!!tb%#TZxHH@l*IHd_bUClvuknF z4`wI|gF;Ljdx7=pc{w7Mp^M+jzSenC!-f*BE^;I2({5p9t-{3HP7~lO|0fx zOn43Zq7tPLfaXHKnqOHHl~*MW&aO#Z4A+8YdjP}8#&7na{hq;@r%GR5nE-;?)5s7% zjFR3A?_AylmAQ{6)A!%YXTc+}(n5C)p%CZyscvcP;LxocE(Y75sgLz?c@Q=mvqnO% zyW*?JOw*JGb(Wom$JWh>`o{)sITJM5k~ovAI3_u>vxlRGhfjYnj&J@U*`}Ii`El2z^=$B8$8vFrNFwz&$!wJCA zJ+lXUhIuZP^v%wRBypke`{V5bkn!fyAWeQRRGSSSmv!boTLEh2Q5h6V z>gSf}h(FjDLX%*w>pcE=OK3Zxf|qwM5RwVr7JNXDIQYkC*<=OjLzJ zz#@}=@jMdY(h~;oTR~$Gk6Gg|!~1U{jh`=k;Cj-b^7P=Tr2UkX1jIf^fJlOCk|v^& zLXoe0g|-2?N26cWy_ff#ayEKkWJB64q;Y3|y>0aEW&XL3Pb-Q3iy!>iN=6dw@C2&H zSJ^wlLFs^OK&>2iKYrk-ws8|;$)vhur|r$YOAdM@PkF7Uyp_WpEisBdc>VOV|J;fT zk){;=r+Hr+pLSF=id68!X3l5!82Xx_)QjZ$P^(Ho~`F2W`dA&mf5 zl^>MIzNTQVrevJk+bu)uU2q)@AjBWW2U%6s9Djc(vK1d`!Q=v@$l{l9h&wmxa3Hhb&@y7j_ zNL=~x!3~U(_=nIXAX@?$?a;kw3V=0B0W1XDiY^Gf6R@7SfBZd1nzFm7pAqLw`{O6t#F&{XgOYXa7)bv32{bD&08GU3|hlvt7Oy@Paqr?D$_C)6){R9+3panl}f}ch_bv9IW!e3tq~_HSV)K zMsT$}oasiO<(wa1`CD%I=5*E{71G!}B5qntmN;_c{AB4}PXQT$SEc++$LyQGl6p~C zYd(XD;+hi-iNuF0yBNH7-9z=G0%yrC6$dWbso?dQo{325 zD7JjmsLJU0Ac}>`{NxSBp5X$Wj&5qzg?s=Jl2|mPrv&Z%QoQ^`o#l9 zFvJ8`bABU^DE{QQXJpgV+7c5698zR&tr6)+9FI+PUUMKJbe9INVF@Q|&6AW0>mwUi z++}oca40T6AS;w7iAI99=hQlAdz^2fU|pHyd-@BC&MUPOZ9j1j-5jYBZ{sXc ze#^}FaKGAr#jbm?95oXAQfFg(2lJ}+GV0km61Dc7n9Pf!ov3t^TvgRm2RvGhC4`M5 zWJYnMFTZL_MeFIb&h9bO$EjLcja@*|PzMvw(64noO;-ze{>6=&drmCC;C2A_aWQ>m zLpzl;X$=K~3RYc3@qrg=Sw8z$$?rA`R4R2&5tVOT+OHjPuU*0L*Xpuy{8fCy4a?3z z!Y%=R4UR%0T9>8Uuv_MdKfjBY$NmH2Ev? z`{SihL?AQoQ2@9TFwtffZd`aL@;H5(nt>4~B3$@dE<<5oV%YxfzdZGin*tzIns4bE zfWlnx=GqPSpN7^zLY#hK>f5@%=sd+N5`zJzKrR1tP*5m!0^n)8LpMmZM-W=Z2UeKfhmBZx~2+H1>VP>Sf2 z1IZ;~QX%Z(gZjO}X77RaW~{Ru{UOgs(NDRfB0|5O3NnSr3lbzfkS57BbN)OJ2WQdc z@b}*@Y)f3ozEzJ_HtESF)4EB_Lnol0z-dSkV=yb2unZy0JAkC#zis)NOibez=iF9n zXscWQBRzg~aC*z54gt z&69|&a6vWw7&TlsFe}!-Es&nozAeNMsKmW-a^YMv(a_+kT|DU;g`tP5Xl4Cb?j=+q zHEUmX?$cM_7Sjl+!ygj4>L_)dW&IAkX}?+iQHh8+t~1FXzi<%L1@;Q{MY8a@9dACK zj>7nZ0A*Ut!A{yJu>$$WLj`x|Lf{WsJrf!{!$dv$^sJ1A7MynQgUn=YG>Mwa2&6Y6 zuhBrHBC?sVufFX$HvR?pZgeb2oq7$F27P=5oO z9y}Sr2X;tsT!ss?OWr>?1`a(B^hlrc60Rp8S8Ju+R0QxUIFqFy?qiG+2B+bI)M> z-a$bW2KR(=cS;!W!oemR>Q2*H!1ZmCN?qBO(`aWy4@IyBbB}l!Yravcm)Gs0vqr0v zae~-l53pAI4Nb4bD!JJU@k=|L)%L@q1gY79^mQnn_;7y#vIH|PgQ(N~M}=VICm4Pl zg%Jwd6qQ#XN9c<8rJX^1ZXcBeTi9}Vcwr2BW;zM|PhfNLKd6?kn z$gDeRc^goTwUw;d6XX-T-ax%KMnr#~@T>4P8^jE!J}CQWBoaM{IF%N|knT&>oJr{1b75AhT~BFre=N8uZh)Vv2mFOgFeut% zb8koy86qZ-TyB3o3AcUQq~zkK{=}=Wx?V^#O8)Z-MG|H~2FsC2V}U#Cvj~@)a(H8Q z3MQkkAu)p=u|?3*`Y(c+bQfaTqp|;e705b&f4EkMq#b+gVt&v9v6o;|U&_iS|3koe z#{JeNedl8mzXlF;W$&_*L2OpTT>~@2D~Qly#jo&%l@^y5#0_SEHBqu<1hK5OdGyq_ zfG3;V4lIxbK&H&rkEC0!0r5SF470y^sPz5weghUdY#EV20VSvHZm&oD1+zR{lLgqXBCcNcr!V*J7{&Y`NuM3~d2qt#Xw zL&)VMOya(u>M6d2SiB@4@>kp_K(>A8gSZvId=gjw+?VliK#*L4b!<7(iO4y2G#%gD z;nC$}aR(&UW-?C#n&y?<$2x%a8BuRd@cP5dNaGLQA1l@r$qE<*-=zZQSITtOJtOK# zSow-rTo-BuPP`Bs5TVf9!3ItDu#?(AFlRTKe(vg2Mo8sjkWec-T|7Fz1?q=xf*NQR z24zDx+0@pZ6+A%i)BOtN^9OE)OQ_%vwVl9&Fi75DHA;NULfuVWVdvbseJ&cX(EydI zQD`s|4!*mETw0rEh0GNtITs5Xv!1GAil;NxmpB`&}DOd%<^g(|>V-00LdM84}z<>v)*!rFWot2V%xB9Hj<$s=3wdoRuvGy#Xak;@y@{$wX;b zd56_cEf{LSsuW^;$>t2AJPb@m_1-F}Bg*5hR%F8~*an5ha1`?13h-C$DZanqDNLz; zS2yPydAh?Fiq1*Uo0w_71i1L8;xW`}$LYO@&#>jXd%#mgqX4O0HDY|X!1gxl4qmA& z8ABSZ1V|N7Y6=s@m&6;ano@>ILkvyTdB$_>Z4G*aOJv`@8L4twWFy!u zWs;q}Gk?G-#yp?rHAQwC4kr45Ho(b_+f8@nXaCB$*zz}>b4(ZK^%k$MrJE%)$1U;cy9-nF<#DJ+!py^41p{{%VOdk!E0#|< zM`3(Gc9gj#*d%&Gh!f0qb`W*vv9GosoZ3p_+X~nXA#W4UC3m=#O*`&#ryBKe`JJLgH?U*%{%XU z5L&FP+rNw7-Wx9)1W<0_cY6(&@1s_2!F~qsWqzT$`}078&~nNBm;{bnko)-MZo6V0 z`WrtU73Vd{JjqC?t(}H2h8;)y96#1)KqNfd^5{|IMn!hJu+cI7w4DI0Q{!H)i|wEE{q#YB+PT$!?Kyo~MF(^9o(<0mHS`(mkuhfy;r zh8z?nhNM%@93X#_IeyC*RCN}ImD@D-s?e{&-P4T!LO3zd+dX~T7ol$bQB+(N>B#OITh)U-uwu@dNv#XC{^nR^@ zh^OOkceVV%4}Lp{#|!)myCDyCXT0J#)B^pNZm4OjWXH)R3W^!*yx1=)#HGI6Ko|Pg7zwm0ZAz zxAFAV6c5W{>{5=Xjxr_fMos%p!#3eck5SA(ttw``S{CFUS3D-PN_IofS4Sg#^B$8- zP%tmfrTm1gUmU8s6%2FcWtX>5>ew2ki)?QINQF}&hjZb*yqH91uwWr|oJIVT>hZkh zv&qB;q=qmza(`wEZ!2jJ;5gelVd4mPzgYpyV$~rACOnTGhQ@wE3)8m-B9!p|_fPKU z!nY?FxU09YD~;e0{^ESPvBYi%_Bvs>9XUyDLiF0NfQ{pNJ^JJbRR30QX$~MWKq?nR z7%hg>8&{ZgPqI9Qnt0`dfrZU=-0k+%_4uM!AQ9kX;v?lVzs`kmH4TgWBj$bqA8X;#jyyw_Mathd z=POG^k-jqahCAlT$ZtzYnTu#YRqZw{eJ%2PL;w+cDi=>faB3FzTPx zh)lEs{b7U5RImytxs?jRFvUQ;4O{JRNCbD%S6xYp1Bsly?kqfT9|3-DABio#pwn_? z_>|m$@^ncH6hldWvW6Ps9H8LKKb(KoEUUd3LKTqdB^7YAuPTCP{=X9C=Vu+_Z1cNr zD#M4Dx>sE~x93t!{g|mi*JSGRkT!kCyZqD97=NTsVU)CEGfI5GrNu*i2l`OZ4(EcO z_-_a$EyXqQV5lY;rUL=_>LD!2n3d6%lrGrg?U3rw?1tJ*1$MiN0wVDW;-KOL8eaQS zo;8DufxboKAk^HJDmT1EUP$aT$hn$;fV1rYK}goV>{uSC>K|n;C$q|TR3N1Fgc5US z7iNXB&$zBO4es~L?`aK5L_FLX#xG}IByrV5VnzekAuSy=aW`FM*{%CCBj{J%i2g9N z#+HOIPJh1fL)tq+)O8}#P!Ke`<2Vm|(Y;{bsc;xkcNAQScyRx=4eG7?2 zp6y1;9VCdhdmY?)wk{&B9f;w*YW`hdn>rQxdJ z1gHA@p$^N3*{uWD%83e#)=njrXbq@{nk}ahNj753gH+IYK>YAzC&)F#Dm0$rSOFC4ONTi)Sa{470@{2y5~65!qP zn@X!mIdD!~5J0Tv3^U`0NZFvJxLFO)qRn5l9#4K)jB=&iNpu*+77PjaLc@s7x znZutpfyVlTl-z#w4nRtT! zkMV{JWfh8scdo5SCPI`KdY!B6jqPN&{vz7KMz0D)&}7c78}kjIt2tq+<+J z=y%+T!H}xw)fr$U%mv{Zyuhw*%c&Es`=B~fnw0e?`)1u?p-WLC`#L)&Ki^{s%fonHUq#}jI5pfAcV$8uO$W*xIV=y8~{H)x6k7tiHL)Pq# z!4n|NFS#^|NQ)|Vv)>k`Febu!aZMRsvYt6s-eb;}w@`D<$vRH&GV@%l7#&A#zXUUn z2S~*a0oeT;j8wb3?fDrfp?9DU`HfUNY?uJIFR>t`oJuT%rIC$F{)uR~N<+m2^_AjB zjOVl&seLdW@W&rIBlnbOPiu-}DX3oELfQG8K1I#>a_*~mb>>h_ikZyZP(Vm!_*JRr ze#eiYsc1J|d2^_Mv2H|MkD}}=C+Mp5Qh>qj>0E-dbG8}|pzFElqqJ?*3U@SV&Mq-w z;A2SqjV)Nw+iy}ZwXpFycxsVHBq+P7Z^S|`9HM`nOte>_AwCCKYniXA0+EF8nDPJ_ z%5QMiDTc>?5{Piu0M9z9{0?9fEXsr3vE=w_`LYgy{_q%2M8w3P4B}slRF#1RuR1Ge z0GHee^}+3&+JZasfQ*eQwuyP~o-GCG4v4^;P*)P9z<*0M!g&eQw3W%6C>qyoKDir@ z*DNQ>qwDWu@4`~09Hs)j`~B*dDn5C0#w}_%k`Z%FZ3@W{1Ksu;?1IL4velO@iPKE; z=)49$i(X2Id(z%CPlZYH#BQTp@)dFKkhn`zf zFk~2z67=G{fkr0Hl6KH46gU#@Lv`0MOXO_WfO4&<7V8(FxYAppFz9@q|JB$%_3>I; zM)TTj!`L{_gmv$Vm;M2lOJ>0n*CXDYjV+7;C0tuy)zV-ZaAZzscYv%%*7HGGG*L7| zH4j|_i&;r2XnoG8&}xV1#UX5`h0PL?_wZCT%2;S{P~Nb2V`Dyb3obgjt$qLCWLG zi02RXQl#ayHybBwQiV7vkFMd2bM`{s9B70nHh`{Q@WzO?r;y-hD{Inw-h0G5Ns zOU|=^*q1g@`HHF6i%r0kGneklyEZWlNth|@MZaP*+G`jF+C03ITDB(-8f;zAv}AOW zb9&nO6+^P)lGnQnndW5U=J@4XZ(cyr`d@$@5k!p!`kp!Rme0(}NX>9l70+HVr}NCr z1+LrA9)V7EHXMoU>$arNaUp&Z$5XTAaE%{5dYZ)TZPou3zp>L68#>*6B&bE$Dq{8M zafwv9$2Ub(B2#NVHYnAdVwVS+PLW9a`HUx@2k;W-($;V04?>TI_VI;1IGevqPCH2~ zEl@*zcTa6!>7UBc#dBdmafKLMw9!H*K()9Oc^O686p3owN?fzeO-{H+ESvvLzftcf z1k-Et0vwwww#C)-w4s|XgkG~9f8Ycal2lpp+e4na>2IA061f5|7B^B2QaAV;;h16(ruX;@!9_LMt<=;{q>bxFY7oX)cVYK+~F! zZ(r?Dj2-TI)`6a3`t6yeBq9^6!)w*4n3Z`)A&S0!imd7rDJsCc1(IKK-#RW85PQ4p zkG6gt2)}qEZvtvlHf^7!smYS>0s9oD3^c3XojjTO9QMlAGH8p)*G+@RGrsC22hHZ|&`(8;WurO=#0 zVBK5|HZrN$@M9=x0c?zuS)N|E%t;70jU>1`iYMN$e$EB3|1pBmN=X%WncZF0q~Y0O z<~jUil$l0JCGXFWDuU)i{!t^kGcnW~m&ba)3chwex+^!(u9aCh`BCA(P5S zx02vM!PAG9ee|{fk#LXP4BNipJ*AwFpZs&f$4WTz*aCin%9KiEr!2Ab7}apgBAAcl zksUu?snA~q)$u7FZM_IyBJO?C1$KioQu~MvL1y87f6j=mzbM<5xGW<%Xh@LSLj7lq zjsA;cwWI>H9&vAE36Gin27g(@lalq9e>hOb>GIH3XNOOf3T1x_B;an1jd%(D36Nby z4dzHKuA0TuJCgg5vV=SJuK~v5c*O{`7m~sgJx9L2s^K;<&(XVMcaMReVqhI3>*?qQ zWwxTzN--!#60u<^Gt#*f@y)t*zzwO6$^g=uFy2$VJ;yHhkoTBfKij@0l`=F9G0d)w z7AwZ?X*>;^iitYKR>t{cl!=|<;`F>9OtuvU;n`eE90I|8(gun-ZN*66jh(w|hIKA3 zNYY~xp*BUGZsww={l@B%kxQ8!LG`5n&@+fxSvV0-ap~`t+{}0Rt37;J^eiu6PL?;F zPOKs3m}Di^M!9%9mO{)Uq?QXZMD6xU8i-0U&G6)wp?4bvl!Vw@{O9SHtl-EwbEv&G z$qNT`o)%!yyv{7$n3vBRBs~(Ug9ziO3VW|;#bDGAn@fZm*VKycKkfqo!Q(*y9WgAa zWPSYWEZi&EfJS)(#K<~*+(9H*4M#1!9m*H@XIRr1%P6D4+ywBiQi|BOXtL`~aoWW0 z;ypy1RH>rNOoiF;vszFQ(9`&tI7UCO^_pt%yuZ$-VN@?-rRD>KjnRb-h-W~Ek$|XU z-`u$QA zW1U`_#8rJ}!g)3Gj3m5Y)nKKhNRt)hC0ILNHm#pM5$}GPU6`q)>O@20i6HZ3|8#h5 zY$^Zu*G8Puj&o+YxpK@wL4*Y+N19WguF2c zRy!a$ph}7RGx*rm49?u~N<@2U5;jFpGcw@{obX+w(f@L(Wzs3pxtjJy9ND+*@%eJd;($gQYrS9hwg5dfDst*>%^nwI zD^fmjk(B3Sj=#kg&+IzXhyc##9hQhVKeBVc&PHjWNw43t>5n4gcRpL`JRw9V)1OjF zJe#!$^vUR#JS0HZz$2_No z3?6>#%y07X_A+I(glVghfy!KJ>qIS!&Q5UjWjhMpt~`%+Ibs;HPHNusCwucvji7^Q zcD%a3SF@tlfQ2w0jBF2A-On}?T)Rbld2q|CT+vJDU?3Mmudcfa4zJHeFUMjR{3SY6 zzX)ZI=E|*X(Kq`F#wX8rQ%U8(12tq#Fbf|^&95lBR}~h=_`*2D^rry9`8W}`0U>|= zL&U3$iwC@gxoOp&)`3sw*A5ALH9c# zQqF+U#&4i7Lb(Ju0Yd7I*PT^R|1z5;hP|NvS;Bb{#QRFH1eo_?-6UQxmOK(UBMm>mX8CSEg(LKv*uD`(-Yn;q{jx2e5A#@18W zl8DZ*mov6|7{(P6RnwmWo7ztMc@e%=%4^g9A7Vg=9+M82IPvcNnNRezpK|_r6p_gP zUf>u_Rq>~7gk<7P1Q0l_xgboBfvG@M%Juv6Nyx|Vz{e?(Q@zaJzkftl-9%HfxyP77 zAngmXQHpiHEc&&M9Nxm&Dm4;p>f^JS&w=Sr2M z5l1D5o}%1$^7Y*3ZfqUgUQZmv&XDa8hW=@3u9?L9hZqnMO$nTj#)KI3gK>2ZjIqaf zrxu=SNB8u;iLoH9Bu}yZ1}-LvIq5J#`VkUl2zR zO&LzXZ1iobTmr37IRor4R!*MI*7<*W+??~HKLz*D;85-q0p_!E_+Bl5R1_%kZQ~gg zpGCy;{s29x0=Hbnh9XV$p=kgKbv%|h$N#Ah$Xz6WvkTHXA7BFz6ClVRY-8@3hEY|b zYD3cYCU#R*1nGkR`76mJZ88CAje}ChnwQ{_@w@dpnDfUT7i!d&$ps=ZNWE=(G#>$5 zwkasq9lwU{nsTTm(9($Uo|=(3Cs5md;Ije9??TR%Q{$&mgOn?wRN)0B+9Wvn}x3b|tD`sv>8aK|gVU3i&BX6HLQR5`dq@cIaH9447hpzta@UK$MLB`0w0zbqV@^qXYrKh}d_uDjwkH1<_hvPUDnvFr)yK z`Bo}ZB>x$#T2xx?0pmn-j%lYIdpbObUo?|MCN%$xGrD!$_P zKJk&>b;yGArYz{wW-9V-zxbAGDZv3`hGmyfI2=!q{la!BezsWCzHR%Fd+c$)?I6AJ zgq{!Z8sGrvn^Z5@`3(^et^U7$668i$BYZRbE)l2A8fi;7pO))5|+ zMi!<>-kQ1RQ-F=w|84UzO2o4`_Zz>EJkav|B?1@BKj30QKynK2g*t6yiGIenA6XX| zTc!+Oxaav0!tn_*#PH^_7PGk&qI|-r+w}t!ea!KG4yGy0M+>>W}M- zg&rH}(9$rS6X$xlYt~xdzqoLfyWh2z`z2!j-I541kBYyb`8ucg>p2(GQ~0~qojM=u zBQP}BC92hz9!XJaM6B%Y+n1c>u0byR(evhwI^^|(ly+?1>$DxzIR~``X?kDqHtB62 z`bniBD6{^Tp~5cYoD*!ZX=@q({NP7d$29S}*4TY0SR?(ND;${c~aymXi<2 z$Pp404RnHTaY6?pybLzL@8%+Pqkw!qt5k zEC+8jDCIB~G6}SUwEq-{B0L#U$RLNS)c7L6Umb`d3Yx8h<7r%xGL)qVi6bwxkn?HO zx~XE$z;YEqCazf@W0jycf>pSE5vW4^EtX|gsssZDe+N`Q<6h4ZpEQIjK@TE67CJJe ztq)ZeO?|*k@1@B)Qeq(nqrisPfTZRVa3Nh+PQ=>=bb&Ow5R1;%g#i@wQAQ?61`kyh z7YFABgvtm1@Rlpt8;E}nY83I%3z&m#^$NrbSurAuE~6*Y=K*Z*47f(~86di;uJa}q z*ANpIr^3Haw*9acxLjIUKU|yue-+kn7!Fvv_Y!4${ScgfTmLD29AM=-~ugVneY%a07cn z>#$oum)6GF#5R2mak@COXX324hHAAY(+0GkW!EdiKj ziUYkoOzK}j%rxdIL=tYq-y_Cv*}Zi6F0lxq3x#s~`R;)H46)GJrXR`Qz^0M{{E^O9y%xT%d|KqveR3!1&4$=Dhj=BUt`(#i!&W*kk6awpDhc?Rew;zl_hW zRtNf~lN!KtD6Ywy4X5NeHBmCbRiPud3DoK2m#KRiiL(>n`v{y)BP5b1jSo17kdWH4 zX7<+4*C9xFPo@cO<%e1;z%_E)ufp@`lck2H{~$PDe(Qim-6JS?Vv;CjGN3d_`41=t6^i20dYx>eUbm2e}@+;U(bpcPW=N zfnXFwWBaC+X3D@$k*61-WO;lVXL|>eQ_zC}=E z3b97Q9_naliEo+yF|-Y6M4z99IKz)*4RU7Veg*X8NXpX2g5O3vDo-h zC3J07!cp6Y_gV0o31RdVe3@vHe-yPJ>jXId-;$Y;#?L1|Y&Jc1cjh=L!ix4wWhJ_O zfs)Y{0DwXnRj2P!pr1GruOVMvFee>qd&3CN#FC`lr!Bu#f(at3GIgDHoqvTPZJ4uA z{p@b*p84kqC+ILQkK!1$S)UCq5 zdt&b20iAC#7M)IWeUCO5i?#ibzA%3x0rH+a2IlTn*=7ZoA+l#_b#jixd&ZhKjPzTC z4YW&}H&nw1CL^a^tv~x-1or0M)&6wVFDlIWVnzL6&v~$)O`q@>8Fhc(OQ)r9xi7ly z5sNVwZd$F&{`95+KDz>VfY2zp`xO$D_QqtQs`J_j&Zki8R7BzvTIjEL?mIt;wg{gH zDc$w`OwQ*H<*SKHLv`*C`OR-voDSC(PHe0anJ(vcIBkT_XQ6WMh1OW^srUT{ z|6E0pkCC4Y{&9h%MVuH>#!+DxlHXmdiJz7^swt%zZ^zCe_o(d4?|Z`c7`@3Q<%_A; z2W9AKEIY5!(?m31aC2YOZ4F$X3*fWSy7)#svmLD991#tIPPwG6NPQ$Pp6i2d$}V@U zu|&_xooTmPD}CNm{hUdcO%QPZZ)jJ;A z)pa}_U2nPgMlim1eE5SM86J+HCvyL)&N+AJ=M+E%*Yh{$+$U)kT{vDzO;)O5!aL40 zKNDwn+$1zHuJC}tg>=2>1LVBFW2pDD0&>{|exw`cy#D^~!eh?-hn;ZfC?dmen^&>( zNBx)f9(PWB_buAKjw@J&0%ZQQfU)a>bneXpblB?gR*vulu|`>i=D)|i7I??|J$P@7 zJ#K|zowv7!(wV`yFljXkgGSW!#5eZeFG_MeZd%RY;SFS)*?T)zh<sD)XVirg1Ia=hs;1Aq8r)oIz~^+8iYX^Fq|{nOApTobd4H^Q^Sjx&c(FPbmK zoPZa8U${}UCMdGn%v>{xmgHB52sEbF@^bE|ks^~mZi`tnt}dr76XPbi)NeAJ$R$WRwea z;}fS`Orki&dVXLy$BMJpCmpToV)DL<)2{muB1zTWo1twY+bI;~IlkVx#3MH}>u z?oGV-K0rGDypD&u*rUDTPukETTY!M6)rYJ1)<{|oO^M_no0~=Zpdgy_XhGd{X;FzJ z;BNMBKn~Y!+zj;FN&yT*vyOv3Dc?fde0Zh7Z~C*mHEWp99F-$r#o4)nRUwD|3}`tE z4=^#M+*X-Ji~{;!mKB=C1kv#_f-Jpowi-*ys{EVxlJ920XO*Ln>xRU`mBRCJ;%M%D zej#r1$tS++WkQj@{dvKnhv;3hQEJX9`UzA1_rUQLtR@7vF4u?n8)8Z4GaJ8U844O+ zfW-`Tc29@{&(8$z!r-lq=&d>Bpe4`gwH&LZvLTh~gmtyW@!s|;Yx89nxV$a6PL^d| ze6C~oGj|0td#kz8mkmy+%4^~bzIE+uB{5QN^FG`Ux+B*HYtvi*oi%IuZ z$NN`7-px@=I!nCP`eY^lMg(z163K491UbG;+42_)&N{hIAhBCj!ja0Viuw2Z0v_{i z1M2xv6q0{+iG52d7E*4D-2#*Y<6o0z_iwJ85EZZjTWAsa(|I#ub@ZMp}=>@8gu~|M z)qn5EUZ{SQO$!o7ny!KVMVtZR+e8a1F}(I@b7k$zmm5VI=aTkN`UK~nZf`duT_@*a z0INk&*#Dfdy$PX=uMxEUcpJOR?RWjD8P6HclS?osF9r~LM}(JMok9hEc+bm%8T*wv zx|QJ*!bDL){^@xz#L6^E)@Hj(9z#;0xC2EF;jBd;`y-Z}D}%uagN50b@~4y(X-Ile z(3Jq+`H`P&o5k+qL!YpZ99mNc@QQn=iaWQZ`%W%{?8o~97KpgxVyt7ZLg&L60e2{J z>ujP~P>IoNjbuy|3NcTe+a9+##j|TZSJ8E-z@4@Un4Zy28Y16-Q5z@e$MGEgw9$L` z5jX@p1>$_AX&;~q;)SO9 zJ8>5_%LR8EvL{+nXZ`^_*6E0^_yfzGL3rEYt;835tFr zlehMN=CgzVUED$4QP_9v!VEWD1c1ZJPkahIVu;`V{m(m$gS7v3EY<>S$r7I(8OBwk z0Z#{%ZJg&uE=m|=5G~bWpdmZ&|mK7pUMZE?7%3QGkXb4jFB zcG~tVLK5-n_Fo54SMH3F1gxe|?|&EdKJ(9Uwljdo^gj;BU^zzv1Hwt|`kKanY8`+1 z(xD+6q*UgrS?i#^Km;I&3i$$HmqgVMu~s`|o?uAy3}oLl$JYRvZy%zQ?EgN*giDm+ zn6w<5>ea@k7g1bViA3-kJ<@gnDb5-@aNy6K`0tM$`M>;_4@GV8!O@AGrnt8e%T>_mBV(?4|Czt50MwMHhVf?0jV~7^DXcGtxc5?{8h_`F z;a_ausYu^r3QsOb{qq=Z0-Osse~Ina1P#G8{my^E;%H?DZeR>$CWRB=|ozoWDTanes+lQ#HjU z{pkN`?@Pm}Y{Pbqp%M|5c}NQJs+Y`jh@zA!QwdAvjF~A7lJF{$sYGTA%RE-7RYGN+ z=fpCE%+tQ^h4|N9R3rzcAcw@S60S_RG z3>g8l>`xH_?a50D5U>RbE0%wEj3IF}qFei#ODxeyxlGB1H?3c+vEU;*+ zC2k1o&?zFW{=Y+RRq4F4gWVFHy~(m$T=~um%rn>m1}aeh>+{Dhhe;DoGFX3a68fI+ zb`hca5K6EUbQ^bp|CBo${+6H82XgcKFnhkc?zWQnokRbv@3fEB1~X0p@L3FKTuTRr zN#JTODi7MT|HyT~S|D<9X z*#I<-yEAq{Qr2s+Z06&Xb4f6ZBKa9tc9&BMJqA4J#S^yes1W23W)G-{_hx zK!@(^o?D2%ISW$Di-5=v)n86Q9ifLmRV@;#WtI1YAQt9s4)4jq)iL@&pfYnz#PU(SH4F5)ejz1TgLJ3-;BT7U2BO^A z!}u}6JU!hU5SZN)v+rsFV%u9#->JEGF2EObruKJG;BRipfszN}*u5BYx=wKoIKpqD zs~A|{8YM{H&CA{R5VWAQM1vcX{aKrM*^z7|ZH6SJuk3O(+AUoU4%b`eW@4#Zf4>kTe0LT!%uNX&r$JK*{HEwE`DUKdil?BUN}o)pp^aJlduVv zHG~cd`G#fZNW}WwD#jqhcq3h*N;^@`9Woue;NI7lnF6-f3e5+p&(abAeZMz~lZ7mnJ8zwC(fllbRkK+rT+*->xD#7a+#rilt`1 z{~I5j>NeHYI@$y}G>A(O2`RxLHR;tX2e3{ym|E`dDG!&-45r8f5Qu1MaiFBbV^+W4 zq8f$3FGfa@S;ClBeNAqEw|gZpY>z1ccaU!GP!_=8YsO5gsC1Hk1rhB5Zw0NRFXEz6 zEWgUN1+?D=DE-{j2Qa-jj6dkMwLvjs2-OOdcd|+@z=L1f8OXb_yH)uy0MCmsc8aC# z&fy60tXNs~sqN!!Is%n_IFK~z0%O+}z_C1-suGR4|8s~9(Lzq=RG`2@NOZ?*Whz2< zv13&f+Y3KXw-9}PT!7LC4*kBvR^N{89xdyr`bR?HDV>?`SLj|L^=iGd^JR_(S=z?9@>aAXg#kr%=G%>N9XLjS?;jR0fPnx5k)9YKr6wI8M@|JJcJf3I4qtw=Jb6 zqRr`Bbxuq+5zf)27)9Wuzl4%ADd`QBx4#4dcqs7#Lo#wB34`QUP$2mK>USdTzay>z zB=PQ1cf=lIjzoO}2LLIhK}AWU5(nG?jKf}4h&jGe4suQ~0Yh+oL@shli!9EdC=rr$ zMhfkaz7geY3dzy=5cKJU7$@OlDI9%Jq{x9g$gA1`wYJ6+V&}%s1^PV8$}ZFh^oJ3# z<=^f3?-=5Zd!7qIMg830pux{Iq zS%4#PYMLoMXG-B%>jpc6DC+=`%(2)05Hs&Bo_&It?gg%`Xtz zqp*bzy>(Qke6(G;36}^`NK_~_asJE)j90SZsWVUGcmizZ}hdY6H(H@FV3Lw~6P8|gP zx*?E^+QD!YbNa7;M`R5BehK?TMFV}fueKf0d1nUa*oGxtgX-=%Mgvw`%-waksp6?v zwU+@D%JP`$J1{|0iAvYvmypE02F)Ca{BN*y#=4=%$OdK7AZ1SW@K5zbynk^M@g!0s z{MsHl+}CFU*MYWxZ9qR=r*0aiBbke?X@L6;6fItG$iaQ`N zK!PHqCTH)INPA4%0sn|L%^ZYv|6b+3;0xBbUX_oh5kw&O^)5M&z>2hi$Vw4_kSzvCK1+mXE?XA@imIF^z(sR?R_x; z-3B?1>!Nn2Q*?9=9%VRbA)~=3aOK1^Pdl9?#o!0cuL7TaQ{m=4!Ir$63ve|eb&G9=ZY<=ol0siIR zw}!G7QF)~N6&IQKo6&z&K@R&QU@5Ll3$W-)8s`&2`!exh&$U6ip*3WRNeb2eX~YxA zSL3ElKzzf)%y3A|sxfYSnpF!=aFHU%zW8yV?hWled#%6d=VZv~)m1?w)8ea?El8_^ z_^lbVly~Hez=-83&NEg|80%4y6t1Nj3cJ913hV4Cb%{TsUsnNfyy-#*8#es{>&vVl zA?)26Cx>s1k~w+NylL&uTlJGcCH~ zmY_c`W6yO+;41!0w8%{3x6CB}8rriRfLzt=_0%0=ihN3KJ$$42?M0Ly;_=iU)-3hP ztA|04E3jM1jBFI1+PK z2Nai{F1}WN_%dq_mYPeF0kdbke1E=W2;(%v9HjrbyDyx)^0p+Sf}%=b@m6`;5)?Mr zGpOT12%`mr6^G~Ym|L~Rr-Iz{Sa@XirQ{HC z&I5z^0>bG|ji7Y9Tu;mmdi-9}%3A?Tc+7pSb|tPVB(*ANrm~MX$mbP6n6oN85x7IW zt5?8Z`6-1v17f8$G_F7t^GEd_I7|2TKnI;*Sy5>JReujHtH5Ot!=;PIO?4M{gYLWi z*5<}+v;oh9cb7p^Reo=HDJ6vue0b*GF?urM7E$1Of#$Nwk%EE;S{L?=tzGfIXbGn& z-m$Mjk|u#HiaHQa^GwL_^jruGHvJ={{92;UsHn6(2uNo3N|B-9&d2AW|1p@4OvHgE z34DPxIS+|Mf@&}Nm!SmEgPp#DCxiU};EUG{lEV$Y?IY0DT*`yiNQzeHa?)7h>kyyNjAl7G8ey>7_JrazM-&$>efh@S)!R zc6b3iem>TV`2caS{h;ehwpP#?;>QN)VA1%G1fJOWsdRW)X8(2{NU4Khh{smY%7Iod zKOs@-!_aWEZ8tKY3Hsn0MgJji`_Eh>o_lsaLjQ`V=?v;8^!*Vpv`#{U`89FWNGS3H zNtjmb0vb;dR~rf04UbNjkz8=EL2`KaXOlHaxSQ^gZhzqSQBr|9IZf(8^fhRTk19+m zE{OiX_PE27yFV46AOW38p!=`e_n(?iqLhHy{vb430G(RIZU##5z|weQaZq4qT3M+t zMKtaC)dLFvJwWds9iOux!?oMgAP`&~oBfXwV)9Jn3v${S2RRtNB||0Zomqw_kM2CF zz{LjVbraS0kG#XcsQ>r6NW$cqustpT3T&_fKV8nJX4-CAI4wmQ9z*WxK(qY-b$Pv3 zyB7!j_Hd9;e1P3Nf9Ky(40v48#GoXx(~(5@)|wD^5c&4I@Z|oTC%NdK!dQ(VF>vVr zz!@0g|6UgZ9?LXsFAW8*r)a~UFbvu`Rq=bsu8!bHrHX^L9|-ETOtqDzqu!ZPvK#QY zr_8^jsBrGTx$dadU^&eH$HEEvKXs8yj(W1YxJgGxM+i23Iw>b4p|wHLu{W>-WC2mv zY+ktC<2fV+fG&Hmv-3~h<#j|vV@MGFGEtg;6@FS`ObpmbPcL;W5;IT1}{Lc z95W;zBT-m7xn-^iB{{Ze>pV=tS2(9J2}kvJCe<9>99JqX90*K1sIHB-b=I{N7g zs<)EEbL1cbv&KEjAq8p?U~faX`Rst0TO3Neq@4x=5Hq{)TR5F_{_%$-uC71h%~ODv z6`~j;sw+9$?!P(D=?#f-_Mp@@&0^x;W{Bwo=Y^pUcSZW@TC<9 zolHVPl*fm{>+kQ1+jabvc_p>+pbv$qhC=VclB}vwDEO25)2TcEfXIOn z$j^~aw;eiqyep zpHPY%2VkGeasJ?pWoO>BFW5ofYz_Yt!k5=9&66&m z$!9rwcz5#Qc;z&z`g$PH`XIG#L^UwIO+ux90b<;tc?CXBHz1hPwfVJ46d)-$g(SlY zfMif&P?#_W>>!hGV;%B=u&oifsveaTHl@y^c7UP_PeQ$rG^t6Eogu#$UrW{U&_4bo zs7fN$4}Ja))WG8nMY@wUuRx?nr)3Jr?XPbTexRIz6ri=gD~8sxA|Awt=qVw=_strxauj!k`Q zngP}hw5{XQ^eO;fE&`7G?|Ps*)b4o@Dz4!gipg$Q4XQ2Ej{q8svNcaA`d~N9WAYzW zl5E)%@-xy~#g;?D>AE_1Ka|0R8z(K4*UH532ZT4toXOd_C~BX9-SGBK3>(>H-T@j< z-7^c`nU8Y?5*RQH$r*on5ZW5R%yooh5w|5!TFb}`*Nzo$(2`xa3J2LIW8G^>-K-!j zZpRSC0vo{XuA!0}RHJBd*{VDJtdDLAR@a^9yTBJ9cGbkJz&fH5JjL-o46y#?%&o(I zmK4q8zMS0w41Gy01_j@_xUt^WX?H8UmmG>u=DCb%@vpj!ei;>KySmYj!-O%;F=Rc~ zSN?E?$-IF{=Gcfx$pG1sWW#IE!UUAvqnp}1d|ddwa$KQ9@ReYF$5XFM2wuU~bz`M$ zSI6~yR{bToMIHtTEUeL|U3-O9h+*JM%3KbX?;GDAz9D5APf5XN-39A>lh3t#jg)Q604y-kmA`t9 zgWF%l#%Go`s6yn20{!;o7q|tN-~8VSa2X_L;~jfSzIXOmcD7ZsWF&w2=hR>(cb-2) z5n@@MpO-n0*Lm=v6W@WmJ)t7=zHqpU{HmG z*Q6{}4WgA-DVm(fh_mHYWIfEV>UE%CalmKXuW!!-mCC0~){Mt1xns+X>75U!Nr{B4 zK`3Ujhn!&qav*fnxjB0#gIFT;4_uq(H(!wGOxJY@u0yB~Bv-RIH!yt2Ptha+K%!g=k21s?%W_^%WC)~5 zk!PQ2&-iJZ_Catk^Gjx}`3dfsUWsKnsl(YN6&l`Y1IGpyBy}73baZd)iy3?(wU}wY zX2};#-||t}>V8GW)?-EgrN4_CyKHNem{$CUqQtFq-?`Z-?yp=(`6R2ujpe0$X`~!0 zlHVc-0=I&uovUKC=hXNtLZX)!=Tg?kvns?dUj=Y&L(r(a*0t+(T!t5REyFpdbmZ>0 zpTXkf1?h|#Je(SMTXKcaBbD>Z&PRS3+!g07xN$7g7hnOC6V9i8J)&ar1g;|`{Ms22 z6-uJ5bp8UE+qIuDWnbyvB4z(q$D#dYOxtW_?T%fB3&ZznE?w- zmN-a-Z?-2<$7iy@FsR^yqD@8L+EmqMXRI)UNO|PEwL}iGG;d-;c>B@0OH1EX1L1J% zxq30m*KF2vnv_o&Gvem&t0kJAo=}^ZF~HEk@H_v0=KHv+)Ql<$yA#|;S?{BrDS9q!K(zb>u~!Dqtt?0ct`ZFHq6~7e7!jKP679;z23jS%~EY# zU}Y=A%DuVC+I8vs(WQa*rg8N;s5h9NPWc)1dYN#P@n)}nQ~au=HLq$`%W|&u_au~= zzEtt~F4kqsD?aI|U-Bou9KRckLIy!Tcln-;5-%!+b-1=Z`5!P;bf%|nEi_Hi#w1(T zMS+U?cQunbW2W0;39L{nZ~N`|?X11!LTz4CF^zJB9@v;t@tWZpGUNMkF9&Y@x={GC z>wHhV-|D#5oy>b)C%CbaO1)|qM_U$*}-vJ<>Psvb)5;@r2q_H&E>1WM9|2Z~byKYPxsDehj~@B70v$h{FmLx4n9d8>05` zg^!DwLfUYSPL6;V0H0SJ7vM6G63xyi3(Bvpc*=YtEMrrX?Xa+Mhwdm-h`<~}YnSom zI&WBB4mbJj+r4@fdic7YcNZwLihDH&F1eG-vPwz4x_JLOH}))D+Re&k!i#e!Hs6~N z3eUfJ`=l%G`^WojDWM1LM}A4RTq}H^sDFr%Z8UL zD>twgL&iom@%5%TA8q%ePmiOM)A&EVijy1k>F)yiN&*mHx^jzOiw!uS=pak^fk zX#zDlcOtc+1ugxfQ(ytxt(=zLv4ET5V2M$6ZxOjP_Um+DhEHA5T9%4f?fPYHg_o?; z{Vw0$E_K_A$ewMh+!u!|e6H`{Bs0p^WYG~llOga@Us*(Umw3C4dp)PT zBEM5|i0TZ^MceL|De1@m{aek_IL+sR4$q}on7uiZFEX#)@|Y^PanCFlQW00Uc$Cna z^sF!i9Zr^d#0d#dsD+$xMEBlQZz#o25NMi=nw4^Os;s zXu!-%%uTnvK|Hv(@tNpSc%>O$VKbQY4VM#Ilv5|;hG#_)Aac_5_`_{~%mSem^IzAU z>Hw*pY%BNfU^1B82tN>6=V$oNyn0VarO2Ej+@1=qFb8kyBRh9oHN)dzyqx}E7dsGM z=co0~-0ZJz^MQ|Byt&pF;D0DWWSb~p=aKvxE_i^DBFZ~*C0Ci(x>px+-)euW>!TWXVqP1?rcji1VBdsP zaxy3ZexYy`T*x^r#PVI1|7FVoKvjxW{}8R}&ky`J0~xVGn@frM9>u3Vm| z$)u+M{3P<|wO?G#-}al;55c@GHzf-#@{u#I3BagDe1#!o7q|OOjmpPF%z3EMlp4ZY zO?zc??r#rZZ2%15o=dnJ8bXVmB(h&m!CVS*s(^QG5ek2?T`ytT!4urz`)RxlMKsuo zvUK0-DaOoJrV!sEKJH&1(U4P`!|F*=gdw7ZGJoQL(LdsW`9E~SYW}KgUeiyCExIU$@;-Z)_LQ4Dao5L4s}0{+^r(0 z^$;|zD5(TrsSV1C58wQ~gnJ3L%3>$zVch_Ltr$SLHWqO^Ye2)p?sOqCKL`_@u8-so zi+uxDJ<(-`#LKB?SG0of^g$YMSKZPNO%ZlcHHa`Ho@NL{JPe4DYl+G@~A4gIxF{{@#`FMmbo?&${>Q-Iy9@Re-_8oE%(8EOZ(N+A5Dj!cG*AIw)tnu zWD?RwJP=pd#p-#6_Yg*jbWj6&YkVM^9J0&F9+Bp25Kh| z%OS*i@{qZs)uDDeiF>57#B~cCdjlwtodRmFe2C~v+o>bu7P)g!qb0OleWvWdZR*D7 z;(MUv>^fwrW%Ka}$@+M!=h8oO z8CvH!8jM`9uGDyQ`GrPy@%7JHZh0VA5QOL@^}yNEUzi5*QKXO^1>T1@L|^St8kFm= zOXVAj-BaMU-u|$Jc%OC00BKLv>qB)AOY^&~EM)UIJzqr(;!x9Lhl&d!$5P_9z-vzk z{KaNeA?;2?>JxL?G7BjR`DILc--HXx9pX^k;GbuKXL>x(r;bU#gaF}3%ZxTb=7v;m zMPaJo%{`F3)t~)7v%=dYV~$ zd`#~QQhGSf5RlL)stX43bm1^?&Rw>^`nHFZlxyn8;ZDXOLlr7Z*p+qvh&|gv36`GV zxMC1Z9O8eVSpofZzTP8;no(90%$1WT(>#B|oezl7D~|fhAO!L3VH%�xZzGhm^Mz z_FSa&Ru%_wNBgJ)JXw>h!hK9=@p8bCx|2ya@}fx)4{&_I4Cu)s0+u}o*nZWP3A|?* z%xKBrseoc0{&3s2tnnagXr<2mirO5COqI9?nC$2iS-_Bsa(Oy4`Rw>c`1<%ZO}(eY z^l9`dx6HOyqeJ+z#Y~d4`{%)wh=F|Qq!3R?6w!KQBWHh1eff)4&YGrzM%>awE8(w+ z*FoU9$~!V1zu@%To~8%PbI9nkw}r?B4%J)(cJV7?8Jvln74U<5RaGHU*K76Ad?BbJ zqh~cnT+B5Li4oUrwi!9HbC0*ZbZwIx{Swx6^K zt-l54;CpRv434!FM$7_?P*P}{03p}9x7vvgCr_Sq=v|F-ZculCRX@@wpSgYcaDqv) z_SebFd(%h78m7vt0(jBCKCePFxIzu42OCdyD|d2nHH}L z-@0F+vVZ`vS|3PLa(OF&G8zXy%~hGs1%BGy3%c&YQ7ZL^;H5A z(jcf5tSiaz_p$I%i%WdnjC0O^TjSoXeMJ??pmbHyO>#NTnHhzn0I&NsjV1hHQTG*L z;O2Z9Oi|>VkYF#*z%6pa58%c!vHBZ>w7qJQ%}ZhhipdxSR`w?&aGz>Vuj9OkR>N5F=&7V~Y)JV&2vsbrW7M|eUmLHS;+Kp)UY-N!}u z1Kgt3LhvwZPxC@(os~MC>TK#h2ffq>F3uEpXa_ia|6KMk-AELed;K%g*R0LmvE~Lu zQ?%|t&B|ZON~W<#Q4X2x^1!`D`&SKn9Mi_*B+_59`;gypl{;z7od=Dcryl2u-*o zN`X~-N?vJs9qtHsxH&l63OTrEu+^NMM`@MhKnUD%#~hSGCefBbLD}2;r?za5GkYf{ zd%aIH0fVUdQCUa5U-si%l~k3etR^U)Er3#Myx5cGC5uz-O>T>Z=K4byn9Q55i4cm* zDmB-cPh1+vJu~-O;FryV<7=gpzijTN;+AYmqKbG#>dU87a5GX^4jVZd>pCHiz@hIe zo?<5Rrom$=0@>H~Fz&ja-Qp>-ST=4cD=3x1xjF4U#6_AZP&&D0Ly_t-+%@t-*4sTJ zTwlyAvHd%_ZW(lp(D>LJ!p;+jJS;v$pI1`1t56gm++5y9yzRV7=$_Uq*8x(3h)+9F zwr%K=R?9cxxNy2p#~8SgD8O6RdoTBzLV|`vq*5gVO7+aVR+zB?jJoIWYa@<9oHcMF zhzizj+l%K>Fzn8_{*@en<@;!kg<2a-y8Oi3zqy7j_N7`(ChIQsh&|Q&JEC$<*T1`P zW~P?>=o==>I&%^3lOg34*=4Dk*E4U$X1teL6DG}0)I6kVr}=Ec2iNF8h$~;>@H8A< zSQU5Eh)#ojsCHuT)OV5BSqEkr8d0e}ld+ikJ`V8ad4w}7So-e2)NaQsa@m#(tVv;I zArsQ0P5ur=HP!_4y_H^zuuM#{|cud(B3kSJGu|NGXhEm%L{QxqYUOL}iPaD3tvDihCrkDMaP$BX# zKn~mf{p9gV5z|JRF#R+ukHHrzJR{l?gF)eR~T@Qjn3fCp7lkj~te@Z(bBz{Gcn}RbKzl zAjN!Pa@nVDGLjQWTsRNJ3+7xN+G}f?7dHAOrn_jm%i`wY#XQ<>z0Q=;92~nTp%E`@FeoBdS zKJFjuswi~q91i_yn0T@^j&kd+PP;)u&&uSmu1L3w{^oVg9JbLM`RNytQ?noECllx< z%1SuP)^B<{St+k=%p^_?{b0{_7ZZ3J~ja?Ex`5XtTTGmm2gH)Sl=Z2NaCM{o70m%ODx*uk5>wqno2wc6MwwZ#XtOd z3#0t(qXCWn_&y5IWlVb`C#EhzL_fPnLmJWQp9ug%#Y9NihbJN>Pi)zVNw_mNu z_~zM*a?KOZ>?|>d%kd9<+d4NUe>k^a`#juKcspcTV@dAZT%uLj-*Go(k4Mv1%&!HM z|LmuI+fC5q$ekgNrSFuidilGUsZZ~-Rl~AOA;HW2V@)NBRwtwAqRWmG25#9?vcNwA za~Ww>s~-(AJKP=L_6sH!@%T@Q@^{`0mBW#8ir*eG8Ez}X8pQCl8jG1Ko0~-~Q(;V* z>AS`ZB5_`n6wCLER;x89M*}YUH7^$C@c3^%SuOHhlk#Y=cQY%*`g!*6{VDri>*Uoo z)75-^f$W2){EOD!nwgJL*e#be;YAKisitjX%zH;<`=&GHPc7J0=X{P8tPZ86-nvgn zWxH>e>>sk>4YlbC=c=@^n}|-L{b)Su#5(erfdF|Ddom|J(({(vS*)dx0PGzk4u}JZnRlVaw**O?}EO2^xpJGF?=YW22?lz(h4(+5HC~yr@ zC$|@x&MZy)xp>>kHr9W{+HNUbS9ZbKmYIH>T)y;6 z{QdS)cf)DjJV)2I261ygy6$rSV#XmcjlTg}XP8UO_Ny^9@=1)p`YM^$TB3ecD|^ar zjiI0JYei3dgyW^0^41f5Updy2S8g!fIaQm9_b;$<#jpK(vcA?Yk1@-Nx4S>boVTb; z)$t-g@SO{t1&cbf$ffqxQS94}dg7>_PJ&a`Z_!9s{sGgsd{I~J+d}O`Y8M{Pm{E>(hISuZhI8~+11cfEf znchEnRf(KG&FFJm5?8jOyy_y)V6+7FvW{RzHp1Bu&hZ0KsIm75F56BXQHux=TIhAK z!Cc(K^aQWNNw^ew5ub`lZ;CyOgL|#1;-J`W6K|`&Gn^ndnTS0ga9z>`Q>Ez} zcdC79@QY6MWNXjjmkyeAlL^aoEF|c9pW`B~<3w;5H{#gMiAhY*UM_Cx+IuN-+zJ?{ zg9a04s2_*Heyl=SI`%fy`?padl0X&iZwqAy#Ql3Mgcjt^|M~z8V~g@iw7B;dpg!ub zHF#ft6x&99&^PZ2(=rd zIp(uF@#~te<_o8$>K7burZ1g;%>1;AF}KC+LbO-n2(gX-D>8-Jc#KqNVbe<@x8=V^ z#$XAsDP9KwCcQEr8tAmAI6QgmU=SQ#DhNZfIT5py;e?*#`8M8FROGZ#4!9n7_;SFz z58p{OeK)rZC)|Xq4D!qoP{OIAMvaE(G`g=nt&IQUBv)dPfbpXLrIXn2{IiiX-R^jJ zLW-Prf(%k0uaCI5ZsFopA`5#JJTRZ2fs);6fjfcE39sYt1?wzKctP%tj755)d%ITo zaFm~G`jVm?H5govIN+r}2B#T!*UWjXS2vezCo|1b4I6SO|A`BdkE(Y1(-)Aknbab@ zklz9|!k_hWj3xakKuZtW%YzmjJzMndIZYCHoh-k_j1{K$qs~kS6}e{Rb}&=n2`q%0 z6j8T!a;xaoIpD!)NrG!;#~#f}&irYi1&=*K3zaaE+?=&AZYqf|lB*YD4*t6uc)z_~ zYh7HxavFr@tVG(4ZW;?mdHMgmp9bsf7oh!WVSHzH8xkHwalmjw8os#lOK0ZZI?TKx z1F_4J(B-cOUC4J=2g7p`_=;te@WSR7s_$SkwSzCe0o>pJ?^`Y+|2Fh4964r=FK)D! zk6e7TP`k#%9NNd3+tOaA!#NXt%nzDT1I;+9T1T}z$N>SJkQI^T`iiCc_6Fy`Z<;`j zvFF9y??7wZ9yyngoHMd#FtQc) zOZQ{uQ#Jrl&7W66n?-?392%TA_$BM#ag!j`9Y&m8kej6W`}xPFxxLIn*1*=q5w*H} i{%8bj?}H4>@`(b4p7=gDpe-XIIe$(|B}eJ{-Twi0w~D|3 literal 0 HcmV?d00001 diff --git a/hadoop-hdds/docs/content/feature/HA-OM.png b/hadoop-hdds/docs/content/feature/HA-OM.png new file mode 100644 index 0000000000000000000000000000000000000000..b1ff506f78607df7f6eb6c64af8aac22455a63ac GIT binary patch literal 60888 zcmb@u$IkTHk|lPffgm){FQEAaq(NQKn<2gTCP_{TOnRI24w`AC*XXCwV`=k0RjN`0 zYIO@b$n$?pChcbI6)RRm@PD^W=|BF5|LH&c?QehkkBZE@fBV~i|G)k1Z~xuj{>T3T ztoc9w)Bjrh?Qj3-ADa+u!&NqY@wdOD++Rz7N1*z*ZhuGlzaxmy7umM_Mz9Fh55K%A znwRv$g z8Muyj?F2?-{cAM}MgjRdDu8j%o3vWe0sP+l%g}xpo~h;B`R5-$o|r_aZV9gLJ;IoR zd-`*>KE1)o|2haB*7=wIouUoFww`Dt=X5Os{c>}j{m%R$vnBJTJYU8gJIAslXEv(x zXWsfrnVrhMjJ*6i!m1a!fIoE4SSDm%;;+xI%Qq{=k}I^*43?3wnB6B`0YlXlP0FzS zPi+jaPsR9F=Nl>8LJTS|97vVpAVK+KevOW@fbUYTJ#SZoj6kQ9Ea>l__f@ftDvpL9ABc z`Hc+3>v_aD=q++o3ZwIorFUpHbOjU#P6UHFelVnA0p-))3Uzr--jEkfk>W@lc;NuWA&Ot zK4A{^`=sY^2mkV-cV9yYCb0OVQmV=<#h`L3v!02Pd`qK)WEq1K!!E9iVEY!)erK*_ z>37SM-nL*xpvq5T$%{{{!}T$jb)*RJK-ihWTzc-`A ze7=*7if7mCqMRePA3X0Rq2p@+0xO9MwC01KyHc}_wMCTaLF|Iml5LhjXxx#;EH7@* zVQb{|^F73ys*15NbXzFZo?L>v6cb(1C+U#zlTK$t(Q{221+ZR&^aPs_+_z=Tt-)EL zZ@ACEB6*2;M9=TyuyFDf5B+HBW6^IGxtPqL94_)(`uAdLZFvt6$~{d=L9E`VRA(c( z837X&d9#!)WoE#HV8MK6Gw%dz1288=_lIbtyq{(vi72rU7fh=4*pv5ru^oBFBedU{ z@HLiq=kENF(SnDOt%m*Ge_be4k`w}_9xxU?m=jD}&0{+sRGR^)gF}pV#$wh)sqku{Ux*1*U7)zR4|=0uIq z%03LZ#KBd!mP~HWx;=(oywdKvvtiroTx{i0vfaVx(VvMQ&9-&a2&dcJnWC-zRGlcs zrQ7${|3XI*6V|Jm{bOP>gNwbORfRhTU=F{APMn_2o$nPmP~Do90Kd!aLYx8jQIMG| zxrtydXCTIQD%7B!Y`2br#(_k8wOkgL6HXEX@IYWN8v;tY(RgZiPz{Kdh3+?!}Tq<@JV)#!)styXo-@Hn|?V1 z)uA4+xx>W!M-J!uyOzT)F$HPP(HM%ulCZ6LEeybsSU-cuhjx`dYj@4+UrdD9IAyWd zM|!?uFnJw4mvN#VxvHBZzhnzkm2!SL7{#`O)uxp5b4Jg!^?#GcTTg?El0C+i=$^zU zPOb(nO0YqECQ0}NS1=e9wpu=;yI=M~>3E8Z zWxjrY+m#eM=HQ`>qiTK^sHRe2(~j1>0tLU~({=X(ebCEYqu({giz3UWl-#01rLuRZ zE!2@U|7qPMxb~rzW=D3kjM3}U^4 z!2w&F(DB}Z`#ry-Gk-_kYL{t{*R_ongw9wEY$J|1_5s8JVW)B>jwLQe0bH-!cn zBerd0+<$S#|Lb)2iQoEQd5;vx_|1R5Ni+by{_phU-{HVJ%cndbj6eLOj2u~Lv2y!H zJ2|%txV(O*KMwIC+bRItvGvX0!3VEmmHj+K*F-9;nR zNsUyjY*{29F&PipLP;fA{ZwV`FsmihOxWoP!xouzC%@kE;@WTAz)7&{Hs*n6V!#4y zCaot;c#0Wy<6AO699NH(DKVb1%x0uoPTM52b=)o1Wc{hC2U3Y^(263yqPL$bE@$>T zhe?8**S1v10y2)-_4xBasxP%dzZd$gGXA`|$I0E-On8E@b82brs0`+is2yK^?Ex8d z;^nDg>7NJ$5P?1R+iq!CC(a~Yf6G-{bwKx-Y>5t()yxGY36_@FTF@#!>e?N@plud8 z7MT#+8a)*>i&@hnUG?KvV6M;5RC>}UAs85Tk}Z9e9N4wl^<^!-D0#g>h)NZs*r5Ct z1KGcUIrTjk;!#n>H=pNKno+SsW3%H!k&&IaY3x`CE}xx|IcR=-*dgea2%{uG7&bPv z>dRk3_$2M4nqwBP>*hSk9Vtvvs`u8jEf*u7+^a6t24E{L%z0RBJa$72;~qHdC6{Ld zHv@j4?ha@^dw~CumpDGd14l1_eySH$&#C>{)KVJtTp020}nRZe4xzKU0>FQPmRN1vf%7*sSeSLLh zsor?RTm!fx%eCMci{z&dZ!m|&5<}FHjo%9=KSf^MhwxiZ*S)CTwYl&^>+p)jw^ORB zI$&mwn;i=FuVD!77IhK^Hu*lC{&@ruO_9M8W@!L8z$SMUx<~nwO-Y$eTS=BdIDekE z=yNL$`xOq6hS zmav`jXo@z|3fXD#Zm`#<#9fx{IPN3s;qRLwHk&@lOL5{}tHC%ztdoB`q$VZ`7FcS7 zZS6BwS}cBxO)r%B5HB3^y@Z@7XjSA>h8y%~&OrHb>qMv!(p@3N(uqj+>icxi(%RRB z-)^um46}v^aPByE(Q%kS36&kE#CP_*CYts! z&XE4Yt5Lp_2sZpYCr>#$PrL50AO8xWLEi5$NAZdE&`B(gUQL$J+aw!ZhIRaf&<|NI zht(wVwI9;)AO%Hz&n)6w8TBhEjmvQl5Q}5Beainu9RF`%ZW>3(y`|LdFGt19`!(SS z{Lo*_ZE`b0IR(Jn?D5Apj*Ernkpk)mKK2 zFI^G2W`Sra?o*V=`*17l1p~lyJUP-K?V_6v>tm&cB#}lSwqe|=P4D85L6ifU_ zZp7%HZFmz+tYuDZ6;EZxoTN_YzSQIUX-3{gtd1Oo`7|A+iIZA*4AuRb;ialHpN#h= zoAC3XmlHYOlzpeC(VCr-hnoG?u8xL6yp*mE!U$eLuPtdK- z$2-f&+pm-F@JTKi77*1xzg)0)IZ-?un>Z6jMW5#u9A0(xJ8uq!(1o>{EA>d(Snl?& z{RmkKj~ONaQZN4U+L?e-Q2{C-IiK6GNH5kOFkg__?wnMG$C`SyvOZl^Yv78D%@4$o zVQr2{wkcKH)60`ta#`Q?8+{qeuz~YEl(ixV8PN~XiMP2@gR79=W&7lT$>$Qbv^u9z z!e`ETI`Vkv=K?|LrS#ty!IZ7w6QYRbW(KQx7%7h;4=#iq`GvO^E4G0S#TEq2zI=M$ zOwxCylZX5*_?ntv(C0TW=<17U5UOb^+kOh+RC|&^b(F>u1yW!3B21#jTTCS8Uqa$K zd#*Jn=7+O#L#XJGRom_9{QMAT78NbHMwK;(9Rx&U^+-jyXg{3U8{#ZOSrxXhn2vzG zqJh{^@aB_>c)X?LBzj6dCL%)C6Mi&D@}cXktN^Ex1&;hq z=K5XpdLZF(H7fW#z%QRj);btBt{t0Mx&~!p!LP;~2L7 zW$htc74P#}L@qcYL?EBy7xmEBEysn0{4dJwl3Ewh4M#X}=HMx{)*AL}5rIR2PoR2m ziGGfC(BHscFYtp4rfUbwV;>z`3xH0PDHlDc|1WX!|7*Y;ezYG3#)o7)ykkDa$#@7&o4zdYXcX zY<3z=E-={Om?BcjtD3()%&QZ5qxu9@PMr#WrO_#s--h;iiiLzmf zTT4AF(Zf1hx}jiXS)i&aWvA&cZ zcLcK$Dzs2CqV3AWsi@UFc3^aPcZagESS6smh#nK^du4ru#R3pU2v}B;aQc=z~)H;_L zp@WrLbWrU7UkoL$!WF@YI8{pwb2p!a3H$$IDET{2URAs(PQl#ec6GLG0wI>Rhc&ce zRHmpQin||X-N~}*n11(oQuW8Nm^h;NH&!-D{qhX#^TA3>oO%=ZE%eO=*o(Mktt+<;RW_8ND;{@K0CP>S@&oem z^%;(o5yPco5FjVXvjRq{;ijAQfm>oX3Fsc{&_GviRW67pS>egX*^7V1^m+WM;B!vE zu$TY@6~V?qO?eo*bf>6L+gQsNJ*E36(&Ip6Zvch%zC?m2CVF^@2!3l_QEyiC0vUw5 zm?73;qKS7I{irk5%kWIistlC`oj@q-hx}9CwNn3xcZIM$+aZuLlM7V0CZAG{zkY=^f2OnXtq zG7+E0Z(D0xqZnObmQXZK$3oowV9|uvD}t{oQz456tpj0!jVQHLOicW;9X4JoqT8NjHuxajC+!;xDql&zB&$H7y_W{S}Hx030&JXsQ^VR-K1mVS9nRD<0 zQ}Wb5#hk|o`ekxV5MEgxiAH#i72(Sd#%YhP$w|Mc>nyB^|B%}L8PO5xD>U>9NInRx zCTYqTnmJ@^yS_WSJpIp6^6AGcLT*N6z*^X&)?d+%n2WijOI`+=`Tn7~^HmIpt$W?_ zSXm`Hetb7VzT!)?K@@>aEcdvyjs^jY|wI2oq z+BHZbung`3jRNc*bdB5{0sTw9C!8C}BCl5U@~bp#Dq_%aRU8au`4#|mH%7s|Ypn6j zYz)JI8~9&v;8LdRkoY`1Y^5;3chW3nG|O* zeeS@0vtptv>kD*d9hP`j(pnp@6@jq6JV}>S87&`%I5p#YC5o3u!8*GXUO6hhZ^XRD zdK0;@#tS3;AdCFLNU=(NoHgcJUuX6gUTGAuNzd?|$EGe8ikR5NquM2B@D1>67IWpxtP0`#RHNGL zBmJtoBJ1-Yf)^)0V0xWWiI1u{zrl-|-ckWkm&#S}UirYxya+h7yWQM+x`sfS{ru(9 zpuE6Rm@sf(8*Vg7k#-o&6-IZRW!ESexz1VHZt9dBr|qa0zrxV7h9aXDZjVP&y$?=) zV_%ksovGEo%$MF%m6r;dwFx<=^j;Piq{5*w-WBG`-UtbSa~3>s@tQzzD}tdEFOcs9 zKY-7VL(Dv+;KP;$gP`^Klavxb#(TuZlXPFs4z||@P`|L0g{j(GnLVq82|bk(^P!H) z@)Vr>5;AxULJGQtF9!BF=lPSD{TPK`$FXCk%>QnAlA;3wfzS5bv)=1r=$5(I|8ut-zq=49Pd`) z8~2joEpbNJya`VF7rNGeMSZW{CSjZ}wKu}KrUIdx)`jW#z!VpzrtJ6DAGIlZu()-U z8v-h#3p)^)HQ<8Fm#O^;nM6yij9bG9WB^wnUY6mqtgNf8V6HS{0rS zE`I>tXNrqXKL)Z4m3C57|Gx2BuIoAlSoU$jy&0RS4e1ohQdY@w2J2yAzfW~7vDdd= z*pSU{te1~nS(ck;g*(XkEED&*3Pln-MlG;$h2$h{eJ`iRwTp%qSU4-8-yC++fHPbi zFwl_L>BM3`9An&e;b*5b>Bf^2t0i`8eg@|eH^_6k1V1hM>Ka@c>hA6K7_mZG0C3^X ze{I4qL7;0=nb4o3Ar20VF#8V5Sv4<9sCf7K{$g=OV`VB|VL=s@u^RRQ15> z3o51Bn_?dIN|6^mEul7J)t}&jcfAh*F#Dgu33+}bH#od#e>XH9JHGHxS)Wmr$N=4Q zCY)gn-vIwaN8AELYRtRYBpAxQLuYy~fx5vzQ1d+7)TuqFK%uj$z~W8GAttIg(havj zT%Iod=fYU#`c+{@ssUSoHh7f_xqh#X;YKES8=pAOEuJI|OS2f+n#qbwRs<7s;8i zbnqNxAjxOr_rS9yS)h75qt*${USwEY4F}5ZJDqmSz)7HP>z&< zokZd%y3fNxFd5-8AtpHfdeBY*YVH?SgTWT#e{xS!VSdLX|f=%;^8-H40y z9xABkdi3*XUIb5oCRFUKHBPPqn~_TwD9u}bdIw~lo$wox8&@o_s}f7XV&a+vX{Fv& zvr!;i92m5a6DixA-pC8mhTh!1{Q8c|Se^Dtw&&%VbVxCr#(p)OZnyXwrTsWHE zT3$H376D0FQ#9(hXa)VL{?w{nsjQ#2q=QiZZ^1Q{r%ie2T_f>x%tXVJMLRhGv&<5xmL@*Jh!xKK}T15e?&H|?MR{lNu!M3hjN+>T;+2IsdDrHsC z7}MX@a#Ic$XDOo^ssk6*y73r`**RYY;4^>sN^-@c z-#t=FAW+GbDw(MKDY*j|BIKrz{P%dNA=}dXCrh^l9wp|DPFv08AU1r8?MT?~qHQ4YJ$*ZA_2p$@ zQOrY`O(v@KwIhglKz^Fyu7?F-d_2xcVm>kJypSwckOO_FR0Ad)egOIQ%nPgz*KVH` zLu7TMm1PXU%7)cc>teH}BbEIju~z&NWWefow>Stf{ND#@+)eE~Yrmyg&y& z2HV>IFB;{)R>ORdM{0Gl6ousS2<4w$Ljtx5rr^IT*8M#I(*HAPM+H2b%A8OL!KNU4 z$LLPs|I{$Qm-5|K`uojC8X?MJQJfBFSU$+%gRudh{-{8H`S(pQP>r>fpL!UW2gps- zU_gw{121g_wtxFs8k-6iF6?@K1f4OD2idg}*PhSF;X$jiV&%TnrF1noF+5ZrgO~+U zzi4TN>~6*brHNXNc#buoz)@3nQ}f0$k0qyZWrS5y+>1Y=faW8N z*%R0DYO;aZz%n5Os;t<+SuIHm0auo_N_h8}6ozQpStfG`hfG zEI#*2&b~RWl_d!zaTCCTMWD?MJjIb|@#+Aa{eoW`#ZMl9v~A>CUo8@BA@Ee@XMc?l z?{YBbMke`c3;SL5J9b~2$$i||7Y-yR&*V4Ftzj$>W(v9&uJVvGw zQ%?d?;J^6MC3Ke%~oU|G-Q({UW=F*k7PFg{G% zOq{^1*B`~7f~NZ0WyxvE?=DG?4~3bfg)dam5M=tTJGt z)2hXhx^+?o=FjB)PA`iMpF)zEAU7cfPMS6N7hj?ont(#JV`}Cv#O(wx_h+SFrq4?Zb4*rC+4c8Z`tSjN6 z5TLZIua?L}mlCoe#z(i?O84vmjRtyMFr%_&hybp;!8GjhPS0Gm_2)hjxaNWLG6)0F z&ajV_o$hylFTN)I)Gm8T;*N(JiY}jM0xX9O>hqMzru;Bx4g_QIs5V_HK?-(3Q;bN0 zkoW<~onB>xqLCNSS?oOr3o1{{(9h6pUd=d=oPbW8rCl}($L5+(4;IGMH5ku!E&3ubR`}~yMP&_*3zk2pg%hyHm zMWt^Yu0E&^fS(1yC0i^_JU(iH#(idPLZQ7bXTW+_6Y}UI;bgxgCe>ITs|1)oty`2U z%u&?cZYFJ?|3*6VT0~kWu~FTDX*^7q14ZAPTPV7Tt}l`CK}9M!)7%5i8yq|~skC6@ z;joRUIw3gTLaPQ996KnwZ>jRZ0mor?RbEGkEcz_=d+fc~;(emvBT9gGvl}Q^nGVy2 zcm8Mpx0P@I=g#S$%Do52em&T~`Z%&j2irWFU-wVUEe;A(2O!G*7t!~d69)393~0{j zl%c9Gs6f!^0ZB;P{P@{8K-qQ+$+s2H`a)leDZQZOg;oIB;L0S%N8ml<+H`})V5zI} z>Ucdm`x}`MO&I5LY@O3H-eCK<-$znEs8Q87!>C+u)flW7W-(UMvSXbzSQZH+bC>3K zJA);Lq1!mBAb_?4ke2(newgh3ZqJ)W)(mi(pHplibhjKg8H0_yUW2U_eW>4k=om>jG0yrFFIq_u=3_KXz4=Y7ACD{(jxLdve{dUG>^Xo4#DOt+B$GyQw1Vjev>%kO``Tp|B*JhGG+p_?V#Ktx2euvC->_VP zqjSsx>MNUXt!s)dR2+tMwH$_gxYv|AIDHIuF?i#f`uEdk2CSG0nmuR)y`g4A!_=Pg zrk)?Zo`!IPA75Ws^1|9``fSP@Bbwk_OmM#jq&6`A+^81_vRHQSIQeUg=gTy+!gVIm z_&8XW&wD6D^oT|pwA9P&ol0BRX*S-A%LNSItjq?wn-~|Qw^vFIzDV(_all`}VZim6 z&1!?PDJ-GiJ8wKn*n=C?SYEzpWTXgbT6Tb9l+iKn6Va}7*KCMuD9EeNO@c`)1{{>7 z((BAY;3TWt*pe{`XZOcxMc_GDMmHji21JLQ9cjg-yxLUE@SN#PE6ck9`tbhw)9jyg zFu;!4@4M3la)#co9v>h9Fh&xSF3(41yt013Np%*v*|3nIL;t>Xx85%-S?6O<_FBEe zx18RTPW5{4A1yuq@CJLVkthPslHL7&_>AWPyS}|(0HKffzEkUix;}@>$gCy7q<-}q z9-0N8=uUbLtJ=g8l*_l?$B413tI6M_aiw%I@v0G(3_;ZS@KCjpaY_DGf}=MP0FgMW z?|p+JFwx^x{D~I<{leccJVj@`z8i)Nt_ixcuKWUZy{V6(YIfjlcPN{d&1s8QJeq?Z zDlNWQI6y3e4l`{nNmOTp^1IujmX#Y!#Wh8UWx3xg04cEq8OmbGr_e#>8Uh_tOJOfN z@!BNG*wi}$ky6L+ET|+(;Cc8D^aaV-1pH$1$zdQ{3lO>tacT=j} zq~(p^pi8}LY_8jh*X>*|_RIn?Bgpc&JBCHktzs6cfv&a&KR-@jqo_(#C?LmfKL7_Y z-G?<0vxf9tqTV{%CLHvH*@h0N=!3d(aDv~D>#FnZeq%w)m5jXZPwMJ%BnX!VD{*W? zk9fFLjlawwuK2ne@AU);?tx^#uaAO{eEJ%uA+;kSeIE7;Dx-q0I@3e_e6Bgg&EIzi z@W=Y)>}@i*Rq-Tgmm_pXpK(}tj*)tf4j)Tw*tx)j^k4gQ?rT4H(Sj1jo$%t^B{Vt0 zc;vE0ng`X^!MVow(6mp@5Lr3E3qyVnaiIL8($z(5iIJs6(02M?TYx2lwzYp3h5b{u zgDLXw{Tv^Du!wk=o0+m_xhM!~0gw0>KbM18je7lDw8eZ4(&00jfZ}npoM3_iY(mU- z4wgN3*I3YS1HOZg7XzN4hM6PhXbz-QwKtqre{9uI4Tf)1+o-GTIE52(B)3jib^ysy zK%C6teQqzX)W|0Wm9DL*tP*Q-zu5YEgV ztP`YN;~@c+Uh8lEtdrt|wmYd7h)Beg_;E`(DgilZrR)p9N3R=z4vOmzJj=jv^uPEOy_SOt05GA(cVMM4(lJ>u z0=NLeQw6s7Y0fcaJA5o@JL$uiHCYaFB-H{5+%X zB`dCrYWWs4oq?i7;6B6-dV}1hld3bY-q~rR0_Z9LWvcbEzw<4s%iDxY8COVM;=W8! z9XYkXF)xSAtZRTt&Av0>2D(uZ2Ka$|=HNy}ZTa>{aBtst!~;r>LC1|xmbm`qp^-BF zbWuW+V-7%On}N)hxIkuGDyjeDFjFjZ+nqZ`d4U+Zl6FG)qpWA&^Mk485QkrKiY+hXyi0EofXW5nlf;Eo#`+YmcVU2^TZBk|itk56)zbASS0Y#2-)L|i zvY4=kK4h>f*w)g%ArAgZxi>MLPQ<#@38%g|8hq|?I|Zw{yQKiwUIBI}BrPZ+X5&{K zf4|pDndrQYX2#O?ASECIvF_6^Mf&oYbpCz(5xB8<(No=UA`t!tjv#6ywmd9Z`jj}fw88~rr#O<5NmJQrCQ!KVEM4ho@-Z{L;P^$u0#wmr(hlqM0y^Bo%0j)P-(oWGFq6^X? z#@ejw<8Pn?jB=siske^&+ihmT^sF zZOM0jnMOS_@g35Ltupo_v9`KDDb;wPreK6zKV_GKVRWtOVbscRbuJ?4dp5d->%U4Iuf4`pi^8I2*@GgBwtWobgap1Ka9el zt_~hF>j}3OTR4buoHHqB7*zszdWfV5n8gP#AjmMq*|Z5OYfa%!QOGNms1>e8KcLcO zC3O^ZEQdh$Dq~-cqaWxYc8nK8@Yi=sDoC%Cf;BUtnsHK^-Xsnx`_@&ZMX6UmvlKbQ zT5s@N=f(*1^bFFrU~f#^`YvF95LjKHD(KBf#qaTSDStk(Eo6n{4O7wu(zF zqa9G%*v2%NPSB%e@_$K1fO)Iq!0QBF*5-q&1%Gh02adj|W~u}JLm`c&Z~9eViFPTDd)UfQ%XV#oY8uKO zPyyobGG>CLh4`D6qbQ$88Aa3Qi2_4bSw8(m zp|8PxUWhN-n%SgddVqPObWuPx9D$y90)&eam3+V!5u5{sLjLn+Jm|7))Sf;LgHAiXf$qmS(v*uRpTGNc&`OUaw;IUw6G81zVO#;Yje`7F*o> zTn0&WH_U#Itur_90)=nsl(78WN`+3&R*4EwBM$^W|MbX&A?IyCT@Q2F`F}=m3m~{W{f6Krk03k~L?mIDAE-?P zenA}JK45Oe9`x1__n^V+o(@6(Gkk2Z4xL<`fU4&h^mbv6EZfFU4SDP$dr-eJt`Rt3 zW*`J4SNsa|hQ~U}4_2-wB;0_VxE-rFQ=^gq)DlZW2v0FMp)cA4H2o|AlcM~qEblA> zv0SFa(3N@env%5S0Pd{qCn^FLDYC)m3GDmEFu8FYtzW`cu@#4n`UHf#KiLI!?Vs)J z=L;m|imUr*N&7{?VZo@dWLN4oP0^3tBCogT?qdrqeVdHIdzhc<6+L*oO9sja2_;jY zy7oH03acl|R!mqfg`vD$^5JuH&b9$vbF@ABYG7Cp7C=qXck8x+s_FW~Z6LMu;JpN& zI=mA&2CpIDxK!<);1PmzS01JYNgEB;ZFBW{IKIEamWwQtlHd&mS!$zk<(O497GeoN zgv$M8*E0dMptEExsdgqL?I1oy!@fq{f;U>|h#rHMn~HfK>#1hCBltps;#fsX{13T> zvg*Yzg-LFK~ClHJPG*IDkYP2}P)kvujwK@F0}@KA?_> zjfF+4m<(p*pXAIO_sjaRji2HPfd>!6KB;WJr!l zgV6;pas1PuBM-eYi=O_Ipmu|YHU~oYON<>vqn*9^5;+KFmVTj9&)Ewo}$Hs#Fe%)$WOtEG+KUZwUM~SY zch|R4YQ-C^q=f&&==af@XrZ15SU4OMJ00F#2+)-9LXkL;$ZpLs7sxxN#_~3N^D=}P z2|Qu&>+2lWwGmjzX~f19kh#BvEt4DkB7-2Za??p68Bnay0C5`!2k*%YP%iQEhk{7^ zVLEbH27UYGBT{LdA#!;P&wAf9+`bzx`G&Yi17z|D2F~@p17b&jF|h>m!Ub5+is;YF zQbeXmD*f^4D*{R-kE7qVN_Vh9OI%Zi2Ls7}d40@#38f|41v}$F8wWNHYJC+@mTG-E zV{IVk#w(y9z9)C?wY<@m(!it>O;Gq}L(zY+!s=hM7%B|4n%aW6?N=NTXx>mL5HkV* z{&$)B>^E*{GyY$%)*_Ss`@ULBQr`LIr~Xzo17gL#4&>sggA;eo_1%{=&?9ZNF|({N zMGbt{pDAmZy>fo^-y02EPlC(nYoYB|b`0=7hNk3Sm5=IxR(<`7XVqIEI9G-FCnyx& z$@a9MB_F5&f*&-HiAYoHfqM87bbp{Q2MSW-jju*yAh=(d%Uvw=Wf!;2`w;nW`85d8 zX1A}DqQq4?=!4tSlS-Z#(#pn-`8jhZLR(1=lSR;cPKQA1fk2X3d>g{Rkl? z?zdQb%O>>yCuMIEGEcjvd7W-VTClN%-V_LyE*lQSkP#W78#CsNIb+NSu&ZMpGh-fW z<3d4Dgj)1k1aT#<6_wsJLZMjeR=WRP5^ z57voGJ2VhN`dli*da6^kN|m3=vjQdZ?Dpb;S43h6o-!7>fP5W|g?rM!diFs%=Ix=& z%&6r0Z7$KF!$3MAyAAw0no6$lTd)&q$Kcf+pZakOL@IR1SVzdAfaQU@%smJYOV;&` zrPk+dM6FU?R~Py}P&?VecVHi|$0#J<39|49y zL()^6u5weY;f0902>x&>dz1XL^Yn=b{qrdXlp#d|h*1#`61GPV?4=AmXY##>z6&1) zaIXrtPcbSivF2aL?wWUjUH8Yvb}9}M6+fJ>Y|@R1jyrOaObn7sk%Yd=8rc>4 zp~C>LEEoZ0hQ&QQ^OFKU`1Y|AA6mb@>q?-N>j+KF^_Z}<8?_~ZdB9(6?NiEC?> z*=OIxkq0otM{SjS!jimSsZI1{(EYr&lZ}!)k1%!)cx=qgX)<-8PxukzO1_b@* zR3?IWN{^){qMl))!bhI){3qRF{@Qvr#^#?JNb$?-YaL#pBhlCD*Z+82TL{~)8tLPQ zBKYUjsDHaet+Qj55clFS^fCf_=@SKx@sBT2W0cRojWtufhQ;Zs7T5+>3d!rWl{^44 zl3~u24Ue=pcWdJY`M_4pWWze{2o(NVUZ{{M!wLxJnsZ)E{BDZPz7Fc$txw$~#YgMt z!)>1Hz(r4N;*0C@fd3>_jh{OzzToEQKtlS_26*%(vMD0F?gUw#*jQ~SbYe&p4YclH zTU+!Gt_G6e5om8gtx^1FBsq|>=E_xs+}`N{z=lT9O&6FDnX$(d&u^ojgvm__9+L}Q zMaQqOI02;<_+@C4xvph+*}4Z---1X#IrVIs*73RJrbBA*^)P_`LEizPA~LXLwtEVd z(GU;#723A&Tdfsikp*lGlmjEkx`kRPrNSD}Sb-&x37<;f$3ndcdv$hq{NZiEEXa%S z9O$%6?{sgHA(b8ASH?KeCjnu=0Fz|ypI~-a^yj%S+!yihjnT;}7+&ZvYkVffe-exY zXLnbz;-OdNLZ}OMcjgHj-anx5~A#dC6d+c4{H3H#P;z40oLc?`U$qHH62azGt7T8*mxj zDn7smtB>`L5du*>F$*5gtDaDDd>d?bMBOID=XUVR+Soiy_Kg$-R=IZxF_$LD2yJ*Z ztM1;zIP8M!=$KXOm`WjJeE>hO8mA>I?_c6MWujN!B&^#iwgXZUyhPKHFOdxH@6DQ5 z93=3uus~iOq3;)d5_eoVB;(s}ap=q=G1KAas-tXk(p~xK_W1K!Prtpb0p@}a(xR_B zAOHSo|C+WharyuIw*EB`Q$c_NI+UMOGK6%SeBPj;1Ft52^5s2joFy>X0u-AVgXYtm z8@Nd1xchoGHUCnn2<}IQPS;1#JgXereOy@V9=RS30_tPrC&7PMPJ2ukN}UT2S35Ys zDyzuytN65u0e7C}*WFmk7QGkw;a^}&DnB~vR(Nv5XrPvW_U(j@Cl$$8-a^G)Z`F?J zCa)NP5!$^6SU~tf#ygNiu-B8``KDhx;8<=m8Z`I_jl)vT`enRwk%#XI%<21H22(1ur9f>*1|@;@Af_B|_2mkiLT7Z{gg#n`v;jvU}zB=aM?FAXG$OhL}K% z(zib@m+UaX3UEa%pe#q7~Tu}i)KA`1(UMicy` z8E;=MTLHvg^Sh3a*SYI0^Ht!NJnt%2p{DSWsHD2HXW@Xc6H0cOWg zwg4NV=lbFeEu)r;edMcvQLM}BE^gz~gCLi&zdF!DY0@Fy5`;h&*%*&rkBx&cc2{62 zv&&;QNN}>uz5xn%HDey^{&q>(r$3&1tz+rQ7xana2`=Jz4Pel5UXv?O_%@2BSykh& zE3_Il2;~n7BdKnOvgy38A!e^SU_BN6+LP@5E!?l=t*^*v(YClWQVkKGK)BP8>$(t{ z&k~BlTY7-w^hyEoh~=nn8FAH<)`qSm>(~NAS+%vSE*e6ZuGLUByOqhT(80pST&`vT z-3f0q-|-KQ&R8vwN{?>T#bDaqKj_L&CJ@gAx*HLL>b%MCzKL>?UFk4@a#)Up%S0USm{{PVR76G6KGiXPaHSCwK1*w?!ezV@Ks?oyV>7izS>%Bc*OPrXU@~ptt`TU}v)NF>x02Xfd0J00QuQ(Jo*`V5OAPNn) zugiR5)ni5do$+WvG)8@kc(EE{}AJAm_=#s9gNurt4Fw@>$l6WbxD3yPW^ZVe48HmIW;VAIL%8hAns?6%nXQhMS|v z^+s2N{>4?wvF5G)f^0F>bF+=Xk!Is}-g~$c4XuK&>YXPCg;i588l0k^9>jijhehNj zvwn>6I{Qs|&ZC#d`>uo8XeMWR#}{@l+@y@ch#C3>QW6n$nu>S(blPdV#xQkgS@{& zA4s<<&Nn3kz0prAz*$v?xjX46F;|=UCT>B~D@1lrTuHDK5K4hl)Xzu?8w#AqCK2r% zOX|l^1LK%r`m556Cb;Se4IU8#>N|1(yG;7f zH8pGXz_u#&+-KZB^xKAo{j#bN5J-=A?r3x@1!H@0vzP%a-q-(OA8j-2BuIN~doaPZ zohqG!-c{F@BC)Hv1mUNu-n{D3b%L}yId=p6=XH1Z5Q6u%;e@ARHMb=GWVehkPFcK` zg^6z7u74WBcpAOlc_h%3j zeP{`ypDouCfI*G>>wj{R6Y@&BAM_PvcfNpr@Cf^u9Ejd!lvN|f~ndj%Hy-TE3uG}G*2lX>tK>-5#* zM9xu`P_*G`|Cr+-Vn7jFkW!1t`K=c*-=zjab>CH$S|JC5b z;`nydfSPNrUKA~XS|s|6UVFzBC(qCr%<%zr%Vpwf39zRIj`jiBs6bjt~tY@ z!w`zhh@9wPNi*1x4&lxa#?VoB{VLpq1NNRnT>(j_<~Vh}(UdQOr%E_pN4G7Vls=B{ z+w9W@FXkHZ^bP88Xz&-GhvaZt54bH33>c}&X-}Tt+-z$#))Z7)~%5hz5 zCXFx*tRI3J!)?eY_)yD9okO1o8`0--tY!tYxmD3o!27bF?gQNj9L52ZuWLbA;2bkp zyfumxfI-1mmyNE_&kTB+`yBbgcz1wwKKnoww#l_d47nM4>T)SDKm{tmTQuvFlLBG{ zKqDLv7-(mCO=p4eE~agbV;uA=AxHY^*E3a-^}}|Yu$nO%EUExX=^x~=banz0C?NbU zm8Q5U_}v4QC>^N=DX!oQvz8)9!Bgnnmo-ugPT;cRI;GFtyn?DGqew!a<*pH~7jZsp zOnia`qeTeDCoYJ9$XmR?dYmjFMn0GvIV+elQOLkDTE3M-_zE-$$}GZ?+N zFd@HrL!37UDVA{H{TGRaj+T9lX^}D?`9HBq^N-)V=s$k%R!ab?=(A7#*1^{brawe* z;(imE&R2itm&la6PL101^}F7!&H#eAt1PH=Bx@2>0*__JGX^yR0nviyi*sr4_UqM5 z*R}t7&d{J)fSg)erUT3=N52^lVbee%bXSqvPl0Y|=|iM?MvHAW9|_FBeF%zHVY zOK^G3-GJ{uU(=N-6~nuyxLhVnDI&_wX86i+`utKzM04fVS?9<&rT7W}5 zDrBxhP;tO0hqfm{TRG5eABqRTc{)F(xS_da1g+^!=DIpo=eQ*cnK8M zUYBdNMH*F>7KhPiRZW{XU-xmRF#wDAi|-EqI9g9D5f#O_GMwt~havKci-xN(a`rGe z0!G_R#ML=PAQC$31;elOR8S)*IZeyk#>EuxY3pMIF%kiV2IXV-;bu`sK#1ttDs!*C za$BID8gy?;W!aa9x8jKDHv)XJ;cK%FQ~ZqDJmr!vFo_Gi;~K>O!vc>0ln6S5bFNtc z0%Tm+)MFTCK(1B8A{L4|kY@{-xX}`g^D%BvthyZs^WX^S(cb`E0CL;<#jRGrWM-zs zQj5j82|O_ua0pqk2O4#KO8{T#X!PbG1lV;eRzx^)IUn|ZR|A`KNHQyVHa~>p`TY3& zt;54rLVC@*=spZjght}m9vp`;mv{4=#;hznNTj9p^%+WVr=XcoGxus$8KJ#LLWIMnZK(WZ18Bplf&AMFWR-^^Xb-Lr#` zkmaOr4FfJ2Ox>O#^=5zakgz&te?Yyt*NK3ay#hBRC6(!A{xZAH zI=Zn$u({c`yX`pJonXR-w+>i2lFi5DcK=WoG%P_SYoL3ah#PY=L3$sz4oE_wUC7KI ziMPpKQ?>&q$`~kSbI^iWsY(^Or~$>L?i1Gz!Hs9#_|_luF{DB*$E(pKXUvofr~-BJ z0=b2I2wxWo9wL^45-eCu+FKr$u6$hY2>fneUJ>olwBRJ^+_!#yrd9#4j_{rU()GN~ z8U;lF#(0=g3!Kv;0QL#!LLH(7ARGLGkmZy;OJmO$lIg#rjM#6Zy>1-v2;_H&%X~GB zVp9fmN2tLzlwei>6c6xH0Y{dJdQ5(USpTC~uJmmVCEOs>gd=?m*+ zeVtc5fh+}@{#Q8i1m;Y!y^{oHY%Jz#J(n9My(P0UYt-$wj)lQwwUqJ-M4BORzvQ#- z2k18&5X=%YM_cDrGK(zr5%)cyu6?Ts7#&horhL~xxTSzDD$~GFo!$@dBtSNQbBjiD zSW}fT>&69>SVBD!s5sD)U_g=fRY4qj$Hi^F$_}k1aad&+&2$_UmI3fDsquXaJdel9r1-OXgTOieg zVm#3B47=|x?59j{D|%Pa%*wP6-nk8g10|{2H^;mA{s6>k1uPzZSE`5a&6G3%Eu`?N z7g;~U&`tU3K?=-UW?x$C=50X29trPs3A0XekMtfE;;}-@=qhVM38k)v7IwP*TWFto zGLVdgK|Rg*N$*YU$M@IhbYA++H|-lsWAERGNLXFKKY_O7P2j5f93kQlp^!;(}~<4Q%7XL)d^Qph&Kk7 z7~uh{1H|XB8x5JWfGc=W&*r+ONRuJbJO|42GV$9Th`)QbfQj%4)7iXT(#kymlWIU% z>N`3h4^{(wb9t^=z+;a9ApQ6*0s6yK#n#l5-aB1lv*@#a5RfnOQtL*PF7@o&!V+f`P>gPzyXl)4qo^z9*)vPlI8?+U&r~*vsL!xuyyVKsZQ@ z($l7twe#RR#f4aFvGVbRw7GQ88X-qRJmYGmOA26`4c(<0*(@;=a3R`5lHmXDQ;-Av zf=E0B<{B9Ht0EbnJruwEj1zNDoV2Te;@5WrQS*;EXq)nt3OaAB@I7U`4!v@-dQh-; zw*tkyx-XdC{&ML&{_8f2(*NhyF@Jx}dWv^^BJug`K3i&Wa0Y){v%WF|FyF#E;{cQH zg5smprh2Aj-G@^Uhi@#bT!YF+esFl6UfQp`T^ifLda?}&UDbOG=PU&~z;%e{MYj!N zCYt2phdlmOtEYb83`~b)249ZDdmO_U7Y7CnJUP2B_Pf%{J|&dPW zec;pzOOg8Hur^$a!smIdkbaZN`De7gEU1a3;iV{JP1kKc^w^jXFx*hM5yLcMDE8uj zhV&sPUjz7n4S%YoEZ}hit>_59881b86N9E+RTBIOx6^fcV3~{WlGQ#kd|pv1Biv{K z>4y?dp2j!_92x_9xN0*%PjRoB4VY|bZ8`wu<>kQpf;OW_+H!Y-5A4cCG1btYiUEi# zsCm>nvGJDaX^0NoVCgEui*Mc+F-xFr^DzkwDyffRf5ss9NIJ)+UN?Wi3(sD%C({+@ z9D*ku>T^;sq~kJ(7{zVL38fUKo|%XI62T*)>s^F%WuOz8j>B)5IcY~)pRhrikGpn@ z;0K==IUvPT5770rMtEO??dj@Z>R1*=3t_JUfV=eq(*#(2Onyzg0%%`DBOp=vfKC{w zSVh7LN|o(1_cNdfT7`tz%`G$!6y^=d#56C5^s;Y{yk|A|oC3eSE9QX!q2qb3VA`=? z`&iZ-S;nm0JkuU_E6ce#>Is-*g&AkpH-gd!@I$oIz6-;6)C!|}0XoDys&=UN8hcB% zuO<2^L{Pwm9P%>7gF-~TmYl#-0U0n~JJ2|oudAL7KME1k&HfyT!!;t68Op?82dEFK z-T?W={OE^4l6x|p6hrC z_%#YQ-Z(3vEZFTX{GH zQ^Sr1-^-frppU8Xeg7g&c@2RQbm{i0EveBIdjTSUziJ%btVK*=ly7eq0Z&o9r?QUv zVZwoiRjumeC|L)p7lb6m#s_~X12aodD7=bDXMO#NO!BjIgYg&e^Zf4MW&%+x{Li=o zY@0xmKspbFN-sa&{)o1+z)!HuhmUonXA2)B2yT1%BFtmf`rm4Sb&-nro`a-Va# zz1be2CALs4Jintd^WzcEA!sqFzvv*JE4se@W`X=QXlpLBIlRw*6k&hEyZ@!-;(ckm z#=QJ>KS8MT+Xy?MyPNP!gzcEKWtH((1PdE8a!9YC)0c~mPv4kn?@x4D_=iN2#|+=v z!@e3lpfvA4(fM#js8@>w-l7C++?gQn$&M^dli**-9&HbpCFlU*E#t6kyx*E01o~Oa zg<-YJ!b`fp4u{}o2;I_9Kt4}`bU7pPc^JDB0uN}~Hh#q{rUumooFLBD~Rq$Q5y{*pNHO~cHQpXn*V39Mdk6t5c&qA9@N zcs2SNXgG=BzhOoH4l`X=0T*w5Dc6NUq%i#RiETRomAl0UV%LF(l?W(r&=ZfJqHd<` zKwc$Z!q-^>1z}*-y=K1BaAMRjrT9mQg6ZzwECZ;o-%wdj+|5M;`PBElAjJgV9*z#W&f)D-Aq3@D3@#Eo#=?F83pX(KJeHb)PxwHBr>+Ku-)F}F z%OnAdUzb{a#8jv{dhV*eVWbD}3d9yR5bB)n1N1 zOj#1_U}#kO{8IB}nKN9S=Drvz!gW)Efi2cePIEoCE$-yH?|6brEx_~+D?95qo>%4# z=KM^<54izZEuz)GTv=yF0@uwT8bD{g^e`7l|J1dv_!lprDDeOux=0xcMIcMAxA z?+MHY)s8$u4@o0~#wxsk4yJ%``e4V~yCcmy7uKI4NbtrGAb_en=Hx#e2cI_H*}ws% zF?2M5S_~n+ilDBSIObW`l!_4^I`dx=I=660$(hZsr@tYfV_q?`qj4jpJN^P`+FLp2 zQ}+8c`AV4;zFy1ZXn9I&dPKF4FDHAtF(zck5OCFaFTZeI0Tdwz7LutqyjVqo4NZ_> zU?2mt!nL)BT9U9r%}QWyH-vZ~Ftm*cZQuyYEYOQxdvu~RN{M@jz{Kf0R4V~Pm`&0U zE+QX&?P-~7*u6H%Cnte!(X`$J1~@u({Z6h7r@(sU_6CxH2941~dcX+MuI#m3NIRAc z9T|4p>uDnuK`m;B%S1Rd$Ggdt$p-A$rIDu;>9l$F2mn!abZGg zw?Cs^sUMS7nrDx})HtOU^ZM&sUzob~vJkgmsP%n)0&o#&ul46?iudA7MpoCn^PdSuz$tt%CKW?e- z)13up42%1;_1;;FN1>UJvkeP&`In=o$8=Us5am* zbvYfcWy%lFNYYP|3}Frsu;@(9ue$rGoPv0e@?btR)R*T9U_n8F#T=lG;9jrTr`vi} z^@(6?q=JNisIH!oy0`Dd3WG;^+rEv|jbFobg=@*pJ3-wT7W5Kl7JfgtrcN)WCOD`g znbcVf^MLO8brQ}@Ceb~><>wWdzG-=MjPtE8_{@}*$=dOL2Wy>oc_wv8pWr!)!hW(v zCZbB=<(7imY(&t*oAc(>sm6oBwFZLspB_bnJk8PptWX{F+(J!OdlM>q08E6Vuz)N6 zDZT-paRyKlJaqTvKrhguc4U=%Jb_-x%SI;^-oq%KmBV`>_X6B!Z?Llcd=1Fc?iBC# zGpPzwpD=j}3Uct#{do}D29!<3+fyr&uvsvYzotd>a-ACH^erj!E8}eSUN&#e8^>?B zA2}#}5?~3*@`lNBj7cPuX1*i#2XCHtk2qqpWu1dw_r@)J3%7h)nD^qmIsYX z-Ed_dbB?aAz>yFzAth25NV$OXYD)1xIW5xpEf_ zedH6Qu(a{RzxbZ#@wj~<=zkiL3?9S}<4r2M^CdXS-MZb0qaDYvbKr~!l5nzD2xRcU z3Ffpf!x06Jh#>59@4Ex$%8Z|02vp!TsJ~mqQ69S?@nLCl&+%af_FeE*NaTNv+~vbM zY*?k4B76}5?6O<|W`IQTV<3kCU&oqDcbV5UU{m;5gfl&fA28KAWZIb3K_W_6Ui;RY z4Wq|r&4LQdbw?0eU}tec8Es9z)pYe*g){*sU%{k5T_rq2 zAD(+5M7sHKjh;kuhOgUU>^k#NiJ&Y7tUIg0X8fS;%X z4@=W784$$c-00usfP(?CfkjO+cdyb0jE^K*8{SiVU?6)^o1dcAY@P?qi3yqy{F4#t z7OkgJU|_DO-ElY5CLm7D^8i0=NmphkhdStBH4V%Xq>*FnAD^(cGYg@0bK^{XUJ$)U9iv}2~6~j;_c3?!>Zm9fhP>!9Rgc711HMPSBd)5@?<-6jC z>d=AF8WG~A7ndnSl~#s%bzdNiUJ)N-CosSAim%$zcFcCwZtJzhKukav6uR8&U={AS z@ske)m(DUapI;cj_wX1NZ+8@b#h^{1uZVlPuC!r!opec11;{|L>tb%i1MW~EdViG# z=ywh`gQ2F)GPB3v>EY>y(7*+=ZqLpW7C@C*O;(QwZ4q;q%TH{sq5_37bpRrg@?Dci zxPwq&<_tq$`D$L`26u4IpeSFa1!2*JR z3R6jKRA+ef9E>spcmwoTpD@Px{aJr@truzDy(?V8RJZ_yXPL=byBD(dLNzN=LnymY z`D=fx^eg+Y6_l$Ki18WSW7XRTwgC7WKDgF?!jhMYkb7~&d7*x^Q%51#VRiXdTo0P{7#Mh6bcXboOE zsK2cWal7uN@{P|vnKCz6)-9Guv)Xtfk{xaGu;ttb<1Fmu=#}_dHyTQK{Uf`D&H3Sj zRovU*X>BFQ5DiO!GOd8}6nd>7U?TS`!XOR92brPdc!!bP$)s?Bi!K%v;cUguh`}i{ z3DZ_jQZbEtcih$%#_`#dgv@MrJQ%pl1(ap;re-4%uPvo~Mk2Uj=tc5gnQNLUr%;-k zh>J3SU5p#AP(WKsYqa>9)Ae(agW7@Nf1qlrp`(K&Q-ljJ7fRU12w{zAgW#Ka=rn(J z$frh}_fbwHJe1oG(Bgr8;aD4nbIoh9kt;UIhBwJ5z~FcufP=Q6G=hXh&SJ;Ga(6}{ zoGGw-o&5M}{+g&=&AQi&?v*n1pUTO;I?NgZCEPr0FcW#B_fe#WFIR9VapExk!U$nk zV|w>mB0_s514$0AR0iKlZcN7YTHFrslmoJF-1;9kJfksz84k6c1h*L`n?xFteu$Sk z<8X`*eLS2q**qLa3Ii^bu79kl4ho&-Tb769GYbmS=6(ONB{JDlc)Ukw$6tF>@nI)- zG1(jAT?Hq2BzOfex4$J_qSyo>0IkE*&UNSHup$X96ECDk^|?v?lSdQ%cE|YrJg)Ar zxVBM-lkXF6Y74$#Sg8JtLB0Zq{I93E8>0fk!E5W7Ju9t$m{l8Ua)BbUclmbs^jop9 z2R$TpBNHgAEwP}D-6f-`kgdNEot){NIltLNHOWBH`uqeU&5qwq<4()x<@d{W8OglF%J5Mnz`I_&;SNjz{g7h9pf0qVB4I@K9f9ijd%_iKv}MOnZQf2z?LhJaZsQp35{zH-V3Udg*o2qib0ut zl4UUfN$5u{0C|CevJ!zrUvNl+FTmYxM0Cj~t-AyFpHUQx)SFjGZxbN^qXkub;(UT7 z@trs1jQbg^?~3@zSW&_Yh|}l$fw?746m9YJb{N*cT?^99vIi2XRFr!JZ4bL95u*GT ztM!6w!dx)`?;l>*w>lZB2UG6DmggY<%Ps0I;yRYkES_Y+3p*>A2g61QRH{~2pJPJ` z4Bokxr>XfcOt2-lft6#xO@khh26lSO<&MT2|+JIoP0%`N_*v zzs$GO&r#d2K~)QcQp?iUHdp$Yg!F@ffye$2PB>d32q1I(#OTNL7q2}0T=w5=-ntRK z=X>0KX{7Mc=1 zSKeXBwfNFt%KZpWp>6;xW`P^hpnV?HM{?5jw1WE(jITHET7Q-G;CXVLAajF%)f4=Y zO5%QvJM|$!d}2czxPf^Zak}v$RY0Gu5Aed}DJWs6E^wq+kc!!Sjg}7EPCdr=$LH(8 zSkdiq%lO0j0^zwf)ZEWHdKJ0G_CZDtZ%sgCtR_7@fgH+0ws$iXBlvhMJ#d|!YO7iDPS6%SmC*mfJa(`1y1T#?RH_Qn%r z{9Gp3ryEn?TVt2)#CGY*m9(>ed;0F`(CAjy6NpBHeH+?&ys58o%IZbBZEp~Xv9QZC zz-x$Ra5Hw+OdQHcvTgP7K8XVLUt@-Jl6eB8lA73jJ|xzvlU{naGs*6&JZj5%#m8YR zV{LV{;oC<*WamDSq(AVT9@RROak}yj;RQ3O>IFLmy~8f9c@MUt0u}gQZ%i&*Jft4WVbC--X zaFebn<}brF5U@`bw7;LfqJXM_?43vo+(XxG^ryMVHdZriDF}S!5WxN?8VLnB|JQ;GYn+^*Le1UK{oWgkQ@Nu>ZL z6r1%3-Um;DLXMGEv04i3KJ?%1{i01EO$;kPzOViNA+9U}i~zd7Kl1as1Howu1nB37 zvtU&gYGf`?xcvpVuY_dk7q;XN`mRH8z!>?cy?4aoU7TMYfR9BIv_$tUiBm zFYloS%2=}P?JaXpS|JfiUOIha4LU1*biEM|V9Qiap_s4ykn9D}YDBtyy6pjmY*_rEvSI7Vy;_zZ85G2&!71b zX6U8Nwc`k1OqSRRn$qX|hU{(aVeMqDYVrBPoXy%iYJ~@8nD7!gE zjr%Y~NxGJY?+}t|<^SYgH+1UhpxXH33vcc)y|%KIuv0=dqYTEC zuRs<4dtCyUFh#NYIR;K#zM(rGn0il`Ld;d9A*t8iFwBMn&7`e`nvH`qQhmL-D+7D+ z-z^9-W&lmT1e_y{f9r)`!L;&a?L-j4TYlT;N?%NEn7_47xjHfOP#oB2BXFZ|!&A%ut{qHYbjmb?y=3$W$PLUJcc(G}CNf_Su$Rfy z4x0NIEm@F)SJ?YmKfcVfY#$nxJg*X>@t95r+q?ZkgZ}=|X)3C}hVu9v+2*M|=em68 zaxRebJmFz)_hRB2EwT>;$0VJk@U=7f@QP%pnSg1Xr>MshF#gjtBDiPW)7Yu;|vEAK*3Gu zJsEy;?jlU_u&xUgwnV`dHx-zvA8Wo_=H`-yyckU&F0WRBX(#zGlq`&AoZ%9=9dZM6 zC3X`6F=pzH*zh3lJ`mVT;emo?Rl%+5~>44#D_59&y zLX`?%W&q*2!sO%cHeZ|-fl98~s-T-9R~$j0lWih4VWZp?(v2CJpXROEP~$E>Z+_~- z>`*tld(Ef7a%GnY=<&Cm*7Fn)ml}iIbK7QS0N3(J+n-V^?RNqinwnn<EH!=xkqkiSvubNN*$Bb1 zURtTt#e54@4wmBIyD!vdH=O6lnD(T;_UM@KRyF|+h`M%y=d9^MRwXz9YWEDzGZHQh z#tMotRO0&~IOXr^y#Vd}OvbYLdGwjkCsJI1H2`KmD=|34jMsKma{I#T7GvJwu!_*KG+F zh8QU~OTMRM!tx*HHu=sYe2z3xTCnXgIke^?ZILaCEh5^iSqEG>0A3U$q6qf;1IoCF ztpFO(d6!$d`1_kQh)SXS7uqQ^jPRROUrEvR4kYcHA@*aYAXOx|Wuzy~VS|xg?=7DR zEv$|eZR&i3^ZgTM4VF(Ea_q{_m{)n2IjxLjhl|z zIr%|bW_bV$G>WHZCqLU8z$L`U3DcSj*tEJnk1>I-zGyRB_B5zmoNig%(`FS ze{HOFg+~){9}>JfxT^mAc%%*1tPigI&O|nU7|pzwK6%_+ZP92BM*W?$ms8Ai2L}iT z(Vq|(2FXxW7(MIiW6Bl5W@Wv=0HJDGidT;oy9DY{I2B9)0zFki;FE#qmzPIblD6t? zGQ6Gxa2}YK<#(??cPN^(lj5AWX`#)#blh?nnk=}$#h7v_BmxRtx!B-ESz(;k2V7&? z=_yV!mq4pTfq1CX7_86m&T=xd#=G%2n2$xDV;LBC04Ut#WKT`bjoIuex^QzdPix|G zGXl1Kc{hz)u!G7niijKR68ESwqbo{*#@SJC(Ad_ys&#N|rTJPWf-GAp#e)X&0`xW4zoc(Y8 zZ~yu~^Iz!hpZu*q{QH0BcYar9{ulXQ!vBLm`j7wUcbNa<^+)gjO8Ue9>A!yen}6f= zZ~r&{@*n=r|NQ&^;Xlh)2=^ah#_#@L+vgt~fAGKmkN?B;-~KEA>wozN|Nfu+{lEJU z{^8&GgFpW7iog5E;U5Qo{Gb1y{eSYG{R{v7U;FES?N@*0_kQ(H{TsjfonQUcf9?1F z=0E?R{?jq?H~-5dG1toS6y{g)wAho|KpFzfiecJ4;6ujtX<3goNLhFI$HEfq$f$N>XsP{{HjrFZ(fT&-B=rty2z|BA7P_(%6Xn-_9LEe zG)IM=`oV|PTPe>(yTT+-1Cm|W@7EsBx|yR2YfX2FBhJsr!)wnlyXsc>&o5zCg;C*0AFqObC z_?PQ>ern^|V82H4yEiT@luklXzs`}Mo>Cl!t|!?a$yUzTD6V^c!#F^O=!$X%ce7#0 zmj%QN$@2qsR`ldIt7poeg|p^h7fA;0h{x<&#dk`d$O+J6?@qYW8*GsV(};wMI&MDU zX+rU9mKY>?o*!zp8;+~>#_=2 z+M`KJiE}IiCzHUIvz;bGkD%m-b|zn5xuZ{3M!&w1W!K0JckP0t@plnLfumzv|4cw5 z{9-9P72TvAFEWv@YHW?!bt~@KI1E}Cb%tuz#N(;yr6TfOm>_uM zoKLRZyq!wM@sdTf*cXpN4Gtk#jZ%?J!=-bU_8>+}BWScz4PfUEhZ_zX z@-^~CMSi8p`m6maGaj3x5qYnK&ve|I@3J%7pi%E^ZblWPvoD{GnN& zsS~l9u8ycq>@ZqTdtanm8NCLE$@Q#qJ+#o=`>l9ii~htem|95DL;ijDVe5oaJg>~5 z$L_ogyJ2Jf=?@@*jJ1)R0_%%u=Q}6@==tekw5WrHP($zW8z~z%u2iio5w;L@#l(&vlmJ{@)OO~=DGPjky2;pl_aOtwVaffhI{oW{=4LG7z*K^C~WN2I@Z!g9hAYTQ#QCi zAXVantA(dCrW-Ory_Xo9^(>CP1u>!`HAL$)oi_nK+{XF z+=O7PP$vp~Hg}iUz!)X$E#0z13EhZhu=n-088 zahfRB*T~nnFXT!7U0iwK(bRXp0ypuZCWnoeLTt&~CaUqw3ZX11kBQ;$>Bd~AJ~j=t z8gh_`*{@1-UU%A=r9CCdcZ!DEYsq4vLI}}iev3@EDO|K$KeWS8NJKJ;EGMMKwsWlZ z8Xh@|wjqV2TJe+82QC-q$4l-X=RD7pZ#2bny&{RLt8rKx_GME^RysT0A=2ChHjIb0 z?euu3svwm#gYt zFj9cmlatOR3O{V>Ih?Wr9;q|&u?fct!F?U|yKC9OTaRp+XC4uE5KmS^$;FI?mdAy; z97W+l#d|w_8Mr^{+w}=0M@n--nl8|8wfp^IV~J&L*;GYCe+E(vxjwq@G#o#o|H$E~ zmnCMqeBI-0tBP@j${V&&WJNI}bS&O?SJc18~40t7s?AW#px4$(m~ z7R|PUG-fC=@rYW?XAJt9*~WmUSv*}jzno7M^u)0v`o8+;gTfw$t;K6>`zKm~hWqt$<%UL!LlW`8PY% zd5N6?mNC!Hz0^4-SMbT=ZaxEKEA{QT(ELu!2v+2hJ~6U4CBpQoGuuZ~(!n1JmnvfW z#@|%w;e;K+zfuErjz4Th9wd{z*RbG6GpZ|<{7I?MCNPS_gr|Pjn?&j5(uHYd@5bbI z?MCt7j*i}*=ymdU3n9M5T2szjj6qbhR8_$D46?>jF0)f=wIOxMV>su()6l!M2Iacc zvUuW^;M)pCHAp90RAt36n{y5tbbDOc0esXl>dXV}3i8^5*J zpo71+^7~r46QdeA^Vx10;GOja1>NOnfT@g_v4PB8Q%hGQfM9Smg0I*RyEIh{Kd;$uS z1)Or>@LhYZozR62vWA;v?8GXcJ75nGuA<~05FGIO;t-D4TBHICP+pZ}@qGR+(<^%? z&WKmVdNm|TbOaP|ao^sf!VZ^X+8~8wLK814ZivgLIQp{sU*9LW`o^9bRBXxNPpTI(Vey6WO5M)ha|n=;9YOR=V|2iuBaxMCp!v{EudwYn16p~-uml_r38;lkpL|3 zF&;T^K)fftj|*z-R`MU|L@+&mB-3k&>q3DTyQ%v=V~UioOT7u$bTt|d=eTFiOZQ(} zT1173%)7i#mj6xE6(c-*8(QR1zFGafi-xy)vE4hAX6K9XtyUVQTBn`lcgEoyuAIU; z;fRW|v1-wdM_U$ktBXM`B~57E8``s#Kb9&TW&DF=3~;)F9=dp>ja9FA*F|vo|H|Y1 zx#pjfuVcS|``cJGd#|Pz5R5})*vW$B_X4<{iHTRQa|ZuvEPqQm4h-IFXmIvJ;07vq zUj5FXw?7t=MhU%Ve!uXc^TSupZ~R62ZCid!e%R2I0}#N4*J`pt)pE_*?h4O~Z-hyI zlo>fCkw2-(nWmj5`0x~F!&@wG6m^U}PTpQ-CPHF&FzhBuz3dw26Ocmf!V_bkzfX*B zXDF~pCjWiNH%(#9`G&6>`pN0UkDkgXgt3uwd2U}Ci1HW9o{@(uLKQXCPcFrmPuq1U6xIv%MI)-f^ z);)m`EYs)f68q}Xa79Ei-%n&&+mvJPL_wT2)I{=9a%Wx4VYvwep0gd+^^ng|1c@it zKQ^^2?}RhTMne|q4GcQm=&HNg9`hUAjQvS7JS|^t3Ju3?UYSP7UzaTN@uk3PkPo5G zyVkqV8T&QhH6M9XX=<6U{0Fj#oUoH=iDXyfTOD)npGDtG`>`1y(-=BH^;Y^sPnsO2 znXjQLW}_X?I!;0guZJg0Oz%c;jT0s#X`W~of~&|8#_3n%{lIql3G1jcE-#EjD`jYgMdBnB6}@!PQ=UFsHJFJWN+y55@W&Oorb_-6*BXPEHUYPd-rQ} z$X0JuaZ~X2Qx-RN2rG_Pk;K&p50GY`cR$_H64hBQyI$2+)-d~l3_$h>Ibf_u)Q`(3I5z7WF>-q)j!}{3B0op|Zk(XqH$1?K}X{_tjkFZb<*Jy%2ek8ws zHKBz|!oWME@6q>2ErJ&SHx{V1(={b~$&IKb`S zu>#h0Ile2zaMewBcl-bNqclDfjXPbOEJ4UcDWVKYX!&#Z~RA`99Wx;6=NSKA+6LGj9=9(MMXVR>$s!n5vRx253A z&fje%8t1?IeTfMA!jHAk_xk*sdpN|%?p=5#K6V@ZZWJTdMzkCjD7hc%Y*=DwZs{sH zS?bt%C1>Y{6-F{b=JDFIFov$@Xe3<+IWzLe1m zxR#jc=8hB?>c6M|zx(hTKee=O)Qv0@wT`XYMsH?&RwH(H&tA`mXP`E=GkB6&flw7o z+8K~Es1-6K$I_R$@p&G5Z*~1;3Rp=X!RAjMwz!6%ov5?G=bQV*QU{r2SEi)6Wgkzu zQt%MN`Hx+bl&+&<9?IES?rQPidNsM8$Q}2i>-y(x^Cfol@o%xw>%Y0cfUs3bYN?Zr z8p^i!ax<+@A4t+ue0_v3dK@y*eHG=fjg!IIY*;jX4Q=t8@EmuE8O8`XbZRnSKH#Hd zge28bcegz>QMJMbKYQ+KaX76?QPN)yCK!cAuu!F?7+ZkFB_l)=J~s7xypFtvEQqUW zHf%tpotdGQOID!7b+;Ud?gDA*HQ(f0ofa{;OS(HzL!?xO1;j`Wo|dww4@tos@yuBw zIFi>sKz#35NLS(?mtzJyZuyVF1`EuyRjpNWxX%^Z5Co)mRf=hHA0?>f9elGSD2{u$qz;|>#wcfap(%7Yc0elAy~b{`W3SXvBKLV;&uG4lgmJlh-^CC> zRC(JTn(vNp9a5ZDe-vR6Mp@A=fL6y;mZ0~ z^zd6iz;5;`7y+krRAIq>Q?5_qAPd4w(?aTNHM72)o(Z}1_@r;wawe>;9%LL9!8*!b-C_c@FxX)7!%!YR z7q2AdxSd`&(LT6zXfgCPPE^(JU~N?B*Qc@6xVowg6|IsS=wR`x1Mcg=qVi>v`XGm=^fJ6LSytMFVRXJyza()N>Y7?yCvcZu$6RcVkm zAIVIE`dX*GS;`*9HNsVE#VrU*_(%KQFxt<9++ex+5dE@9Hwy^;ZkK)yIqKXgk@7_3 z1c?y1JgCVKGiB1dY>+UNhf-jRmf<^;NG9puUrd};ZZA>CsO-XsIwT*!h^OZ)pV<-Ri+&OY@xwkke%j+ zPc6GLV3^87(eODp&O!pMwAnB))P+5>dRoX=0qvU@E#2Wgb)Gb1{rSheUTr3$vs^`O zgY7V;>)r8p>3m%@H*0_OjC1FCw19$eJ`8{{^#cAG%`(avy>En-uJzmmTW9X8jhv|2 z&z12;(r(X7DAFK_lMIAMmZ-gdU=o?R5 zh~MJ4iT5J(olaEPx3|T+cGw(|fp*8v7}edKczPQc2N{lq%K05Kz5tXZPblgKMt@tU zBr`Uk{2ZCw`i*i;sKz+aW13>`l^lnCjj_-i>0{alaub~|XA2K~@2Vta$A8ycYem4< z5U0oYTjJBCA18{J!ORjL)bl_1law&Z{PF|snR#V?u8r7-Ri4R8C#e_?eI`{E8_kku zbtZ}*b+;}WXB4ZZqy24X!j(1ms!MqOsN6h1hu1vId)(<+VY<|*m{}&>XJe@Xz zC^T?Do$m8nW#f;ZXXnUbrKN@%G`90(d^F7L*e^_0eN`{3K1)x+^`cKJ&%6~k_r#88 zX!9}0>OfyF)TSHCGyG2Pw<+%<{K*f}_0KRDY=_L>tEt~ejGD7~jsr@3GGS10mH?BW zfE%a?utHSxpuVep+Ny12!UcU7Z^;O#&$+Fp5O_%s-MA0Vs{9VpkGSZczQlh&h2YT4 zolw*qOtzSm);!W4WgVNeckwm(ap99}en>f1tP_|&tuD&KqLZ4<^TO-Y-u08g_Bi)w zh-TUs0dwt5K_>!M2#vUQNJry1vps=|m=-LP*6Q4yJm#J)ht*^)e7pAIhGtsfhvO}c zrP3q&wP9B9KfHl{Ss*L^898qtC8E)yv8ZR(pvY{R{BmzYa){rUOzx*r>Fub>T1 zn69HIgC1)@wifIw3;wm$J12j)x+9N5W!xwoYs9~qcUY=f73$%ar}*l(t&`v2k~F&q zOu%Ap@z(lSsnATkK%HxRPR$8j83&|BV0+~dF=959V;PW>zEe|>S;nJZ_9Q)d(Jk1` zTG*yCY~rB(ZU2)2sWG2ZC##(7up0jE&%$aspXzLurcd?--+xe9-(Aj7#Q$cY7Y8|V zX0sOTc_1#|A2Ev4?eTossLz>cMBlMU7eutre z_CA8}V5Nn*!u$j`js1>wxU<7D%jsF?fUKCw@wU_xk@lPHccFu}*Mdq*pdu!__M824 z$AifP&+aKFR)6PcoN^9X82v@BJ}@zxI&tQZy`n2c_lm0UNz$aSW=sb%BqzepfebOi50{;@v2E&(oOg+%XN!xk4jj1L`C4kQ7mWLM#1Sk0Nq{&j@KL z@|$%mA#=39!gNK7ON#Lf1-dNHot2qXT6II(wa4UDMs^H&(orY2oruBGrMu>cPdrd3 z+&Kz`du1=`9`1x#SV4VVi`KHmYs|)d7#Uv`&u+~)g}?7h(K_F@fbRAZe9lul7p|~9 zau(vMH~#IfQD!D%JhD+KVm_(xOe2d|FB5&R@t)oOnWSP2dyF-7aDZPZJ1}YG4h9?H zp+y59X+V-vV|I0Sk3Uh$p(`mAJpULDaKIyehIDn-1Sg4$9SyD}SX6CB4vecNktdlA z@5YTL*0?;A3@gg_P$-zN&V&$YWTbf{-<5pMwp0`|YC@9S!=+h4EaZ{xv`WB&FJ`PhUzO{$o6A zi-noC!|eh^iDWl5TJ`+}@81V|yEiI45x3dx6l~_&Qd97>86jH|#NSKyFmP$UBijcN>1^$~ti{yUSUBbKA&fKI*=m;lj7T*$UP+_$$ChBGih@H* z8+D4+Ptz`KR?2}d5(UCv#LwlJ9ioHAkY8FDt(DTY#9&&M^e$1|urr674T?J!(D1Fg zCjL);VK75l`jP#bmo|RQEBHs0Ncm0sBNJ8)Yo3?!rw&%nh-P1@>aR=)+NeQy+X6`i zZ5HL$$BRC1Im_{v5@@DVh?2oU8YTyDd#XxG)9`88q?BnDduYnL7p3D*X$h{OBSr7t zMv?g^AZR}}X|cn*nvNd*8h{3`Kmpcca~qt`f5IHDosjr=-m;`Yo>NdQ=eaVVI7=ls z17xONfZU0|+b-g>`G(x9ZXoSJ8t`aW>krXP5de+i%NqFJAg16M&fv31{8=y)5CNe& zE)SIcdFAD^tD$!uMmp<$_MM+^f#<2`JQo(Lv%MlU`}#Fa@l^^@Wws23uSp4;yLiViXhu?3C5n09oR70cbm3YXh(hT%6NT9SO$M?;6{BhXPchga1;Sc zVl(_r8=(2eI9UFH>cNxJSNL)DAn z_Z5mUi)pW$e+F_0v)=!#rQy>4CK@_$a^6z4+$(#yU6MZg?mMnVr}gwepYi%&BaXk> z8HHW$lNOW>cWJ+7oi1;~xvQb>;cxjvx}{9&Cdr+u8Q9>S(1n5_ccRkTNY)sKWR!OE zCp!r94tq{FON||*Wvh2Qu`cyoEXHHS2eUP9)K2wgXI)V*?6p!K6+SE+aKu>I!TqZ-?{ zIqV^m`S46qw~McRUlA+(0MA=9*lhD-7ApqKfwe6k3EC+@=B^oQJvoZ?5!!)RR`SoB zG`(np=^Qq@Ug<3lUdPvk9}CimP5F>#$>#?%pVn@O^PaT&j%N3A9JHgJYSc_8*`Q~u zJxYGIMh~dg4709o5&g+KCi8Df=gt%1*6SPdWf#^sPT*hveKoESz5c59c<3T`Vo+y? z#gUl5QGfk!6gdTOv?$j;7A&b#)|MQs?E2%Qo%x+6XXXnwdNrkt_C|{)q3! zma91qcCz2O?(-pARhrZS*TY7Fs|+&O2)2n_{;VQMqt8Nd&3abj@-}H%b6@*H)`gsp zU$T?dc3ARt(ax!_JgwatM+t6)XhnuaUe{k(x$+VOl+O<^7BZz^LUYpmsYe11Y>Oel zvUv_>gmog~B8g0Z;c!8MG(6+!at)M^GQ9s)3*ZIt4x^8)8ZdXShX&As zhQnEfQMX;~$%}LTO1~1t)^8fSvF)bAhLL|;@P^^{72=!k>A0( zQ!M55*%nDHH`@Z_b|W`hv~8Ikk7L49R)~H_8i4rp#j2S^_e{C@n^L`_fHa_8#gGOq zhIds1g#Lju&wNmpQhorf11dtFJk=V2&zSmS#MO-pk}2gM#jn#G4aeM8lWy^-&eGWV z)t7s$>}V9OXH=NXPH8M`bBui_&3UfcSZ7htN83lAN2`Eu&{s6anWSjEeUENspgXXI z9)=G2q~#VY`T3B1ELU&bvkmEz6^3zYNn-sNJju3=vnyhw#{fyP6P~_;YVd|&?%N*U z1>_}u@!21@{OS4omDm{?rH>dhb@0o&7L{3F(uf%gbDg}?-;Cg+A3B^c4qS?%!|5FS zYNJkis8PiJ+w8=D3B*`g^ueJ1fS;`z-A_`*iuACBez@>zjm|lM3z`n|x>g^>5b)lg zb;rk0C9?z;7(%7fn+tDuf?O%0_WIx3WF>j2xV?hvYB4Ryy1)xj*Dvo2MgyoN?0FC_ z(3j=?RJi~53+_XVg3eko zU?#Gw(D~RT2V7Ui>;MWdEu+RF>6IoQj@r`Np#2tFQ_}p1^P17d&eysUPE49LITf{E z2FMF%P%fu$4&SlAtd_nLh7k4F%i7LKrhTOvW}0&F7=OLmWbl$7I-hyf2F z8n$8v^s&VG4v0yXK}>?f0il{q*Q86s-6?vKVXzikNLg>crnafdU9`HKH*#=pDsay# zQLxPvZ_n|Vi$h9jRJ};FNy*T9GP^CEK1OC%8P!oopC7`G5J~xoN4rz9Cx9V-&Ny;0 z^bj4MBcO-G?mgb7-aWoK4z>vGzhu!I5=@|G7=%FXQ7hjd*)PfRc${(FFB-Y0bcRNpwx(hkot-hZJKm!A85njRxJYVTT7F1ouP+ zq2aBYS#PX|zJzJ{%_vVPEuTxWq@@rbwLOD-b56D;#<`hhf#09Yxh$OF$qBzw-CTn`+AL45f zFnG538CkfyT2d+s9GIB0c2jN$QfB3%m7SFo%!u*S9_T0MmytPfo%hX#bJ^ z3qxp~{o|1T8s;g%sO^3sEBAP|@m?d$tXe!5t`gL{6P9wu=Uz?xsPw+czcQQ*p|tWM zi5LjFwtCC!*S7NIG3hT8G!O!WV}s#02VuODdlb+Yu;UG(N5El?wDs$Uyj{Fgp8E}e z7F(%*Q=~vD(8Q+~&XK`??}**NC7+jMqFofg(|9nh6y zPuTkTf-C?r%W(_`&`@8AWihDF zIK<^!z4H>YFPduu9J><_hui_*nMQhNgDC|G48%bo_$<2HV%rUinOY&`Wbp(j6k2g7 zV;k;G8ST=w@s^1uL}z@yWZH1P1YdS{Lc$ZHd7Ql;(2wwM?rx41P!P~deyMfXkcZ^ z%B&2e0_WO4 z;89%u$Uw2_@4cuv9>Z7%665g&&1(RtyYFg+(jzx8Tck30svjZ*f5h3^6r-)5Cb~+f z!9wa(Ed%mH1eiUk#at_-vJd58hxha0z>|kxge&PGIL+ee>Z$!LpsUC$SbtK(wRNxQ=>Lcr@QObmAnHPj;(N=I;ephEH3`R@IHU@;1qShmsQY#5~9b3xRY z=0y4*Dw6qrruN1TiNqgg+N2*7&XoUF2zXOBWAwpXi=ipF8vVP9+v^|D2BdB_n-o#K z-M{8{E@bCH>>MrO=7_0Lyl>PIY`@%f>u_fzM`Z+fqtPZKDD-2T^3`yqcUK%@dc&2z zEZ_q5<)|rWnOi0XHiPl@1U2fAL|Lt!)6xXU{Mxq$*TeK%%<_XL`zx~J<|Elz#rk!_ zAlB7Cplo2nSPNNvQH-?+{WS(m1QpOrJ86BSBb4r3A>XkD=^4yU_{yV{@-++o=y$3PlY(^H5E$jh<}OIFvlcJRJ%2%Fe*;=b zg-H?r1Q(DNTtKnu#aF-u-DxrWYbBn20x5HDK7tEJ@BGwf#Eg&`oMWkH;N}y1|6@Z$ z3MhbCO2)j3(Lyd69@LhPlL%t_O(7YuE9`jNA7uF>7CL~ryd%4-<;Mu713NkX>AOPo zjv->{32G}7)JzJx?7amq1m2Add^Z1rDjJ)@k^udQv5lXI(FdqHHs*HX@|oe(37GZ| z9&JsTgRLJ-SvL&<62NS((Ze3pGoEggG_C$DHL~AZek$sA#CZ;?2r-l^;I_!Jo<{i% z=cuS82$*Jr3fIsw<6ilPc7HO(UG@f*MP}qM<(-dwZ>kQZKfW*Phc7HoLK`QI--8N% zOF|*Ft3|Hz9B>vn0qqTRfC@6c#Zule#Qz3~sfw%cJa?V^`Ub}XR0SV^x>b&rm(?HM zu+G14JTYNi)&7Za@#Z|x-Yrcv!ZwRY@MXNqME;4B@}ypdJe-pS)b$nl{023|g+70{ zs-_<~&AQx;xv%cm6UUO8_)mhKvA)od0Tz;j*F5)$YO!UHr12 zuj-xoxL$n=mOR@OVAm;Q(Ja)C=!xZe23$z?VtY{UVC9?hRwAjvY~|0ZpRF6yKy7a1 zxA!xOLYCv1pnCNqV1o+UpcR5(mYfVO%q-4`NorIK<4rWvMGw;x6Iuu8?r5I+0s3rb z1XLKC_r`K*G}Kui-E_@h7ulT+msW_m+_ z?s)-$86!{L&=#GDgsQaiDHkXOt3}=-#GjZOq-=xnd`7w8 z_Kns#S@)z!HJQ^hcO#e*x~8exK)BQcWs1bC)C#C=O3L_9EDYx^Noyql zVL~)QW39Mghi?ca4AZTEh2!~H7}f4DOW-LBr>!5ujA+1-Q0-A@>%#6hf#Umz(sd`D=at30$Lr z0Oln*@D^Gb^VZSpaHM4+suM?n|fQ$GTl{HmOTow+&qEYs9A;2o{i z!3xhxfbFKjRr&^jDId+ol;Zj}$$yfW<@>^+#jRUo%R1wEw)ICLYM1ogY<&VSh}pAV zcSuQ@H@V9G0Pk6^ti(2Nt(O9>7GeB50(9bL1BVgOO$^tIviJ;)#31Tqzb}j>OcJ|k zq*|}Wwqpwv5JX@57zngkdDU)Ls{=y<-nX98 zxqot+^N2GUNG|H97hJRA0F$nKj#mN+14$qoao#a%l+c)+O(*SDH1&N*5E)1I_|4dR zs!Ws-%Pl!Szgc~Dz2LWYDbdIyLCALaK=|(MNB);{^8!tf6 z?_KUVUc{PC0=3(!h`)!E4N6St4%T55;LL)Rl`(>90)nE8w*Z1#ubD?oX!CXLFdKk+ z`pez6yqY?ZXa}Xxh#EMXV1P_QoYMdLCGBtTbWR6gXjv7BLk-^ar@QxOT~=rldEa=u zGUK(SVm-l8#!R=<)B-_MKM@b}!$-SbeS6c9^fsEB1JZ;(wahnY5Jg~r6PKVa3dxz$ ztuRZIW1tr$;G@)!c#R=AO4{xtY4qJgF3ejD%g^aqFz>l&Eq)^7$bA~cHVWEA)`zo# zeyRS39)QB{Oc^+22-w#=kQcWoVgOE_fk2o~=B@9G|{qBi`H&%6>0$OFqaOcd_c0G?LBmdUY?(CrDbvK2|N;*PK~36 zMW3hVADd3yICS@{Q`=I>Ubld~WRnYPB<2 zpT%OX>iuW2oTG(00zh*#BXO-{@u`;#1|Za#KyRUaC8lk>nasOdDhiK{&;*&3_(Ub3 zr3E;ZATyw-6`&Ts5~s%Ych5Qjt$V#M;f~0kW9q$9-i?WpoJz}yd}RidXpKLeYry(g zfdX)`-`XhyWM1)JTTf?T1KGl)c}*IkR4ynsuNxXKSV|4rF8=H=Q5RAeb~} zQQxn`Tww4;LB<<)&HT(+x<~=j=bcrck_tZo0g=H}oXj#>ED9*xl>^{lE;_jL16n?Y zQVIV6K!ZWhUt|WXdxhP~bJib@7{r08ZjQd!n=B!w4?91!s1*hk+tHF9u7~J3;OaSA zx*qKS?11Th!r-VRaThgZ70^jAhk?D&TdwoX*sQ$P7<}^pkBnXJ@u%BqB&>=r^3<}# z193^|f3SuAA~kgxeJaQbYKbyZ5npL`wIFQBAQVRsLjoY^lu^#d#CQ+M?G?i4G_QpD z(&0&xdvapwTrXpI`=;wkgfA1GJb5Va{+f>>uNkvAihM@C7Pk2z+MBZs%|V#BtWvMwC5_eItx@Ym^~ zM+#=Hn9_seN(T^7+x@Vw#DX~-2E&%Fz|LERhb4gR3ut+w5FB;U7B1*sf!=iB5Jx&iafM2E-bL1rB*84dwfn*xNRMmYe zkfnfN5Muul<1jg!q#aX;s@gH&ncQ$rCLWq-m0w@6)(B{ka69txnF4J|PeFrW4uCL2 zT@lRsY3pp1Q&rX_+aS#4I|U7ZxuCsow8qZVb-|b75p5Afh2N;=TaRcuwB%DeXLA1PEjJFbFxk4fT3cwC#VhzIg*l1BKj)-UfjuadDe^%QVTkNR0PC5WgqNy0$!W%dR7 zxanJi)gt^?8{d@UldIgp-`;+Y@z@o);$^%~K3}4oT5L=8k5_OQ4U{R~L-;p>&?tY> zVRg_8(=Ihm^381MQDEctOdW?t9M>y(AmL0vnH0>?OSo#Bqk*W?>{j|!L5p7uTa3B;(^BXty#C@ZoJiS9g*ejG%e zh3SZDlz`KE&h9 z;y^zDVdU8pzs%tRVo0a@+P}~4lZ;;ohYjfR|B!^y$&o zGjK|pb)GJ*xI0%QVQBGlz?CS;rpsn~SEaH+I3ANyJ z1|W@H`)jIhfmXpwE)WaF0<}4nk{tHErdtYakB&Er=o;z7G&XsM`9hJ2GN6A%2-9L( z_S?Z1@M>BY10!A2B0>GTJq&n{XO*Y3m1$g0e#hm3cI6w6r2F6#mQoWXhAG1Dr=I}) zQTp5FFYbsHV6s=O0JuZs5l}MW|C5qw1j+clAI(+8uuVeOEH!DIMG&A5dd!57+%tjS zI31v++?h6U%^#M>n4D~kl%zm-PLPgte}aHQ5^L9Isael6D|UnWY7+SPL%1IJuzzPv z7m#QES*Mgr$d(HQzZRd%dD_fBLOWDoDGvIE5gJqe<{S>HGK`EJu0<@3j%g2$8&_q9 zG+gcTX;gCO{rtcr3Ch?7eR1~-Gaor=&FjT@=7ncIGS2%9Z7^uXdb;3Wr2U>E3!qspV>jo~ajp?5!DgpAyNW6b1bfAcCS z2>d@gU-fDd<>TNt0(K8>$FdC-#f0td3AC1+%`>1zJ2k!2QjDxL#fw zZVpZOtb`#N9$|=vf5orWON3W2D+Z1C>N{A;QZUcUC{`8NdseKSrVGzST>K`ZU;mq! ze;0uD=>On4S_0SrZ&2@2KFQ1Pw5@l^e*%&ikcvF3e74yjlrH?QIgibc;OvG^4>vGv z^_Q&4E+Le>voBFdT>?`#r5MY3e^ZDb!<>BiuSOw*Z~6eP;ROhE@_^;}qY&Mz!VALt zL|~#r?mSdG{IFFyp_cZ@H~N8K8pwXtzr75Yk+cBHH?0BA?H3(B35IHtLL)PDX+%tZ zP+Ck@rpwBR&FF*^v`QDs2t!vlV{2Q{Se}nyPDqn`n z96_G@fZq}Dplm}~)>AIOSS&vTHnku$(dBSVt1p-vH{;KMg~uNt1%J*H-Z_5bo(HS< zfAfHbsFUUcaQ(fT_#yFDfUsC;N(BW&0Wy;3XP}im>rJa4=9>j;eIj$2A0plwRI^Gh z(t9GklHIm1125pslpAc?EW{<;C-j}Dj$e`o$mrNZFwRLU)(JF%amSm1QI{%HS1_Xz z<^W)m$2BV+CS^X%!Ws*PmIZvb@ZHH`{$bFBJ)9~7H$(0&V4z5)Ia@u#hTpxd-(f=E zblMcGwTAQh--F}i1=)(RIj5mW7!EV!VP+`3)N_<>{Q24O2&SL;p-+;iLptE#X(~he z(>pN)JZL}{1TBU6su>U=;QIQx7X;0Rb3>R=x{aWle{~>VPX0RIJ-b&gA~9v9J@K!z zg^&d$bLm_c5Jh}j?IbB!vg77Y89+Ux1DBovIO9xDi;3bQ;dnu_LFu{Gv$-yEPVLmt zARW+S?mEYNpY7BBoYjXyC zph2U-)#1?Mp#cuntg|8#{IZ_tezKQ!FxY;qz#kz)i6O&!78!?u%HrGXQk-a!%kXb} zK!bW3e!2P~=<)W9JtkOAMXq4J69NFkC*gNLr2#6;1dM~)@1B^kjVZ-AZrX^DyY<8l zzzVDrumkcXYip`%WX5T($nna)%%sSyv(Xiz@)4vzQPdyyH|`Pl@B*XD<@qDt(2riu z&!mQ#-~32++jBIa6AlwaW;0tptAS5XNRKBg5W2AmDz<*$Y{3OqqtxJGt*5 zuaJiSKl2KM-oUIs%UA3glOeZyS;>2+9~g@~i*!~xok0wJr8tj(kbN^6>YOOeU6yoN zk9|y2vIKwwG(yDw>xUn)T%SoIW8;_d=?vpE%JV-?6KUP}mHhbK-=x4$xDuxi^4YEZ zAwjgQP)f=znRoLgKMLk|em}qhnT^HnJcrRwEgw>yZzdEtp;|4Ew11R-UGgDa>v$9v zbuH+bL2DP;6ZDN9jSsfp<0fov3ji^fnI?kv9Et|JIsZ-ueenlSK9?f53}}(tEoXdvi3|#cc@l7Q;+jFxjTb!26tquor^%rmz*T6F0au3R&QK68xRcemx4PRf_XmO* z?y_VAGaNB|c0RyBTpA1f7ywF3K)1CBW(b?hggEFqN5cMueiia4`J{#g{(JiPxm@w1 H*WUjFN$Ic% literal 0 HcmV?d00001 diff --git a/hadoop-hdds/docs/content/feature/HA.md b/hadoop-hdds/docs/content/feature/HA.md new file mode 100644 index 000000000000..116cbb72be4b --- /dev/null +++ b/hadoop-hdds/docs/content/feature/HA.md @@ -0,0 +1,115 @@ +--- +title: "High Availability" +weight: 1 +menu: + main: + parent: Features +summary: HA setup for Ozone to avoid any single point of failure. +--- + + +Ozone has two leader nodes (*Ozone Manager* for key space management and *Storage Container Management* for block space management) and storage nodes (Datanode). Data is replicated between datanodes with the help of RAFT consensus algorithm. + +To avoid any single point of failure the leader nodes also should have a HA setup. + + 1. HA of Ozone Manager is implemented with the help of RAFT (Apache Ratis) + 2. HA of Storage Container Manager is [under implementation]({{< ref "scmha.md">}}) + +## Ozone Manager HA + +A single Ozone Manager uses [RocksDB](https://github.com/facebook/rocksdb/) to persiste metadata (volumes, buckets, keys) locally. HA version of Ozone Manager does exactly the same but all the data is replicated with the help of the RAFT consensus algorithm to follower Ozone Manager instances. + +![OM HA](HA-OM.png) + +Client connects to the Leader Ozone Manager which process the request and schedule the replication with RAFT. When the request is replicated to all the followers the leader can return with the response. + +## Configuration + +HA mode of Ozone Manager can be enabled with the following settings in `ozone-site.xml`: + +```XML + + ozone.om.ratis.enable + true + +``` +One Ozone configuration (`ozone-site.xml`) can support multiple Ozone HA cluster. To select between the available HA clusters a logical name is required for each of the clusters which can be resolved to the IP addresses (and domain names) of the Ozone Managers. + +This logical name is called `serviceId` and can be configured in the `ozone-site.xml` + + ``` + + ozone.om.service.ids + cluster1,cluster2 + +``` + +For each of the defined `serviceId` a logical configuration name should be defined for each of the servers. + +```XML + + ozone.om.nodes.cluster1 + om1,om2,om3 + +``` + +The defined prefixes can be used to define the address of each of the OM services: + +```XML + + ozone.om.address.cluster1.om1 + host1 + + + ozone.om.address.cluster1.om2 + host2 + + + ozone.om.address.cluster1.om3 + host3 + +``` + +The defined `serviceId` can be used instead of a single OM host using [client interfaces]({{< ref "interface/_index.md" >}}) + +For example with `o3fs://` + +```shell +hdfs dfs -ls o3fs://bucket.volume.cluster1/prefix/ +``` + +Or with `ofs://`: + +```shell +hdfs dfs -ls ofs://cluster1/volume/bucket/prefix/ +``` + +## Implementation details + +Raft can guarantee the replication of any request if the request is persisted to the RAFT log on the majority of the nodes. To achive high throghput with Ozone Manager, it returns with the response even if the request is persisted only to the RAFT logs. + +RocksDB instaces are updated by a background thread with batching transactions (so called "double buffer" as when one of the buffers is used to commit the data the other one collects all the new requests for the next commit.) To make all data available for the next request even if the background process is not yet wrote them the key data is cached in the memory. + +![Double buffer](HA-OM-doublebuffer.png + +The details of this approach discussed in a separated [design doc]({{< ref "design/omha.md" >}}) but it's integral part of the OM HA design. + +## References + + * Check [this page]({{< ref "design/omha.md" >}}) for the links to the original design docs + * Ozone distribution contains an example OM HA configuration, under the `compose/ozone-om-ha` directory which can be tested with the help of [docker-compose]({{< ref "start/RunningViaDocker.md" >}}). \ No newline at end of file diff --git a/hadoop-hdds/docs/content/feature/Observability.md b/hadoop-hdds/docs/content/feature/Observability.md new file mode 100644 index 000000000000..2913abd4b125 --- /dev/null +++ b/hadoop-hdds/docs/content/feature/Observability.md @@ -0,0 +1,224 @@ +--- +title: "Observability" +weight: 8 +menu: + main: + parent: Features +summary: Different tools for Ozone to increase Observability +--- + + +Ozone provides multiple tools to get more information about the current state of the cluster. + +## Prometheus + +Ozone has native Prometheus. Each internal metrics (collected by Hadoop metrics framework) published under the `/prom` HTTP context. (For example under http://localhost:9876/prom for SCM). + +The Prometheus endpoint is turned on by default but can be turned off by the `hdds.prometheus.endpoint.enabled` configuration variable. + +In a secure environment the page is guarded with SPNEGO authentication which is not supported by Prometheus. To enable monitoring in a secure environment a specific authentication token cen be configured + +Example `ozone-site.xml`: + +```XML + + hdds.prometheus.endpoint.token + putyourtokenhere + +``` + +Example prometheus configuration: + +```YAML +scrape_configs: + - job_name: ozone + bearer_token: + metrics_path: /prom + static_configs: + - targets: + - "127.0.0.1:9876" +``` + +## Distributed tracing + +Distributed tracing can help to understand performance bottleneck with visualizing end-to-end performance. + +Ozone uses [jaeger](https://jaegertracing.io) tracing library to collect traces which can send tracing data to any compatible backend (Zipkin, ...). + +Tracing is turned off by default, but can be turned on with `hdds.tracing.enabled` from `ozone-site.xml` + +```XML + + hdds.tracing.enabled + true + +``` + +Jager client can be configured with environment variables as documented [here](https://github.com/jaegertracing/jaeger-client-java/blob/master/jaeger-core/README.md): + +For example: + +```shell +JAEGER_SAMPLER_PARAM=0.01 +JAEGER_SAMPLER_TYPE=probabilistic +JAEGER_AGENT_HOST=jaeger +``` + +This configuration will record 1% of the requests to limit the performance overhead. For more information about jaeger sampling [check the documentation](https://www.jaegertracing.io/docs/1.18/sampling/#client-sampling-configuration) + +## ozone insight + +Ozone insight is a swiss-army-knife tool to for checking the current state of Ozone cluster. It can show logging, metrics and configuration for a particular component. + +To check the available components use `ozone insight list`: + +```shell +> ozone insight list + +Available insight points: + + scm.node-manager SCM Datanode management related information. + scm.replica-manager SCM closed container replication manager + scm.event-queue Information about the internal async event delivery + scm.protocol.block-location SCM Block location protocol endpoint + scm.protocol.container-location SCM Container location protocol endpoint + scm.protocol.security SCM Block location protocol endpoint + om.key-manager OM Key Manager + om.protocol.client Ozone Manager RPC endpoint + datanode.pipeline More information about one ratis datanode ring. +``` + +### Configuration + +`ozone insight config` can show configuration related to a specific component (supported only for selected components). + +```shell +> ozone insight config scm.replica-manager + +Configuration for `scm.replica-manager` (SCM closed container replication manager) + +>>> hdds.scm.replication.thread.interval + default: 300s + current: 300s + +There is a replication monitor thread running inside SCM which takes care of replicating the containers in the cluster. This property is used to configure the interval in which that thread runs. + + +>>> hdds.scm.replication.event.timeout + default: 30m + current: 30m + +Timeout for the container replication/deletion commands sent to datanodes. After this timeout the command will be retried. + +``` + +### Metrics + +`ozone insight metrics` can show metrics related to a specific component (supported only for selected components). + + +```shell +> ozone insight metrics scm.protocol.block-location +Metrics for `scm.protocol.block-location` (SCM Block location protocol endpoint) + +RPC connections + + Open connections: 0 + Dropped connections: 0 + Received bytes: 1267 + Sent bytes: 2420 + + +RPC queue + + RPC average queue time: 0.0 + RPC call queue length: 0 + + +RPC performance + + RPC processing time average: 0.0 + Number of slow calls: 0 + + +Message type counters + + Number of AllocateScmBlock: ??? + Number of DeleteScmKeyBlocks: ??? + Number of GetScmInfo: ??? + Number of SortDatanodes: ??? +``` + +### Logs + +`ozone insight logs` can connect to the required service and show the DEBUG/TRACE log related to one specific component. For example to display RPC message: + +```shell +>ozone insight logs om.protocol.client + +[OM] 2020-07-28 12:31:49,988 [DEBUG|org.apache.hadoop.ozone.protocolPB.OzoneManagerProtocolServerSideTranslatorPB|OzoneProtocolMessageDispatcher] OzoneProtocol ServiceList request is received +[OM] 2020-07-28 12:31:50,095 [DEBUG|org.apache.hadoop.ozone.protocolPB.OzoneManagerProtocolServerSideTranslatorPB|OzoneProtocolMessageDispatcher] OzoneProtocol CreateVolume request is received +``` + +Using `-v` flag the content of the protobuf message can also be displayed (TRACE level log): + +```shell +ozone insight logs -v om.protocol.client + +[OM] 2020-07-28 12:33:28,463 [TRACE|org.apache.hadoop.ozone.protocolPB.OzoneManagerProtocolServerSideTranslatorPB|OzoneProtocolMessageDispatcher] [service=OzoneProtocol] [type=CreateVolume] request is received: +cmdType: CreateVolume +traceID: "" +clientId: "client-A31DF5C6ECF2" +createVolumeRequest { + volumeInfo { + adminName: "hadoop" + ownerName: "hadoop" + volume: "vol1" + quotaInBytes: 1152921504606846976 + volumeAcls { + type: USER + name: "hadoop" + rights: "200" + aclScope: ACCESS + } + volumeAcls { + type: GROUP + name: "users" + rights: "200" + aclScope: ACCESS + } + creationTime: 1595939608460 + objectID: 0 + updateID: 0 + modificationTime: 0 + } +} + +[OM] 2020-07-28 12:33:28,474 [TRACE|org.apache.hadoop.ozone.protocolPB.OzoneManagerProtocolServerSideTranslatorPB|OzoneProtocolMessageDispatcher] [service=OzoneProtocol] [type=CreateVolume] request is processed. Response: +cmdType: CreateVolume +traceID: "" +success: false +message: "Volume already exists" +status: VOLUME_ALREADY_EXISTS +``` + + \ No newline at end of file diff --git a/hadoop-hdds/docs/content/shell/_index.md b/hadoop-hdds/docs/content/feature/Recon.md similarity index 52% rename from hadoop-hdds/docs/content/shell/_index.md rename to hadoop-hdds/docs/content/feature/Recon.md index 3cb1a9f61672..7234b809bc7b 100644 --- a/hadoop-hdds/docs/content/shell/_index.md +++ b/hadoop-hdds/docs/content/feature/Recon.md @@ -1,8 +1,10 @@ --- -title: Command Line Interface +title: "Recon" +weight: 7 menu: main: - weight: 3 + parent: Features +summary: Recon is the Web UI and analysis service for Ozone --- +Recon is the Web UI and analytics service for Ozone. It's an optional component, but strongly recommended as it can add additional visibility. -{{}} - Ozone shell is the primary interface to interact with Ozone. - It provides a command shell interface to work against Ozone. -{{}} +Recon collects all the data from an Ozone cluster and **store** them in a SQL database for further analyses. + + 1. Ozone Manager data is downloaded in the background by an async process. A RocksDB snapshots are created on OM side periodically, and the incremental data is copied to Recon and processed. + 2. Datanodes can send Heartbeats not just to SCM but Recon. Recon can be a read-only listener of the Heartbeats and updates the local database based on the received information. \ No newline at end of file diff --git a/hadoop-hdds/docs/content/feature/Topology.md b/hadoop-hdds/docs/content/feature/Topology.md new file mode 100644 index 000000000000..71c289c56d4a --- /dev/null +++ b/hadoop-hdds/docs/content/feature/Topology.md @@ -0,0 +1,108 @@ +--- +title: "Topology awareness" +weight: 2 +menu: + main: + parent: Features +summary: Configuration for rack-awarness for improved read/write +--- + + +Ozone can use topology related information (for example rack placement) to optimize read and write pipelines. To get full rack-aware cluster, Ozone requires three different configuration. + + 1. The topology information should be configured by Ozone. + 2. Topology related information should be used when Ozone chooses 3 different datanodes for a specific pipeline/container. (WRITE) + 3. When Ozone reads a Key it should prefer to read from the closest node. + + + +## Topology hierarchy + +Topology hierarchy can be configured with using `net.topology.node.switch.mapping.impl` configuration key. This configuration should define an implementation of the `org.apache.hadoop.net.CachedDNSToSwitchMapping`. As this is a Hadoop class, the configuration is exactly the same as the Hadoop Configuration + +### Static list + +Static list can be configured with the help of ```TableMapping```: + +```XML + + net.topology.node.switch.mapping.impl + org.apache.hadoop.net.TableMapping + + + net.topology.table.file.name + /opt/hadoop/compose/ozone-topology/network-config + +``` + +The second configuration option should point to a text file. The file format is a two column text file, with columns separated by whitespace. The first column is a DNS or IP address and the second column specifies the rack where the address maps. If no entry corresponding to a host in the cluster is found, then `/default-rack` is assumed. + +### Dynamic list + +Rack information can be identified with the help of an external script: + + +```XML + + net.topology.node.switch.mapping.impl + org.apache.hadoop.net.TableMapping + + + org.apache.hadoop.net.ScriptBasedMapping + /usr/local/bin/rack.sh + +``` + +If implementing an external script, it will be specified with the `net.topology.script.file.name` parameter in the configuration files. Unlike the java class, the external topology script is not included with the Ozone distribution and is provided by the administrator. Ozone will send multiple IP addresses to ARGV when forking the topology script. The number of IP addresses sent to the topology script is controlled with `net.topology.script.number.args` and defaults to 100. If `net.topology.script.number.args` was changed to 1, a topology script would get forked for each IP submitted. + +## Write path + +Placement of the closed containers can be configured with `ozone.scm.container.placement.impl` configuration key. The available container placement policies can be found in the `org.apache.hdds.scm.container.placement` [package](https://github.com/apache/hadoop-ozone/tree/master/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms). + +By default the `SCMContainerPlacementRandom` is used for topology-awareness the `SCMContainerPlacementRackAware` can be used: + +```XML + + ozone.scm.container.placement.impl + org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementRackAware + +``` + +This placement policy complies with the algorithm used in HDFS. With default 3 replica, two replicas will be on the same rack, the third one will on a different rack. + +This implementation applies to network topology like "/rack/node". Don't recommend to use this if the network topology has more layers. + +## Read path + +Finally the read path also should be configured to read the data from the closest pipeline. + +```XML + + ozone.network.topology.aware.read + true + +``` + +## References + + * Hadoop documentation about `net.topology.node.switch.mapping.impl`: https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/RackAwareness.html + * [Design doc]({{< ref "design/topology.md">}}) \ No newline at end of file diff --git a/hadoop-hdds/docs/content/gdpr/_index.md b/hadoop-hdds/docs/content/feature/_index.md similarity index 80% rename from hadoop-hdds/docs/content/gdpr/_index.md rename to hadoop-hdds/docs/content/feature/_index.md index 017206e9fbcd..2b30d3ddb37c 100644 --- a/hadoop-hdds/docs/content/gdpr/_index.md +++ b/hadoop-hdds/docs/content/feature/_index.md @@ -1,9 +1,8 @@ --- -title: GDPR -name: GDPR -identifier: gdpr +title: Features +name: Features menu: main -weight: 5 +weight: 4 --- - - -Enabling GDPR compliance in Ozone is very straight forward. During bucket -creation, you can specify `--enforcegdpr=true` or `-g=true` and this will -ensure the bucket is GDPR compliant. Thus, any key created under this bucket -will automatically be GDPR compliant. - -GDPR can only be enabled on a new bucket. For existing buckets, you would -have to create a new GDPR compliant bucket and copy data from old bucket into - new bucket to take advantage of GDPR. - -Example to create a GDPR compliant bucket: - -`ozone sh bucket create --enforcegdpr=true /hive/jan` - -`ozone sh bucket create -g=true /hive/jan` - -If you want to create an ordinary bucket then you can skip `--enforcegdpr` -and `-g` flags. \ No newline at end of file diff --git a/hadoop-hdds/docs/content/interface/CSI.md b/hadoop-hdds/docs/content/interface/CSI.md index c7046d09f898..d1971c14b7f2 100644 --- a/hadoop-hdds/docs/content/interface/CSI.md +++ b/hadoop-hdds/docs/content/interface/CSI.md @@ -1,6 +1,9 @@ --- title: CSI Protocol -weight: 3 +weight: 6 +menu: + main: + parent: "Client Interfaces" summary: Ozone supports Container Storage Interface(CSI) protocol. You can use Ozone by mounting an Ozone volume by Ozone CSI. --- @@ -21,10 +24,18 @@ summary: Ozone supports Container Storage Interface(CSI) protocol. You can use O limitations under the License. --> -`Container Storage Interface` (CSI) will enable storage vendors (SP) to develop a plugin once and have it work across a number of container orchestration (CO) systems. +`Container Storage Interface` (CSI) will enable storage vendors (SP) to develop a plugin once and have it work across a number of container orchestration (CO) systems like Kubernetes or Yarn. To get more information about CSI at [SCI spec](https://github.com/container-storage-interface/spec/blob/master/spec.md) +CSI defined a simple GRPC interface with 3 interfaces (Identity, Controller, Node). It defined how the Container Orchestrator can request the creation of a new storage space or the mount of the newly created storage but doesn't define how the storage can be mounted. + +![CSI](CSI.png) + +By default Ozone CSI service uses a S3 fuse driver ([goofys](https://github.com/kahing/goofys)) to mount the created Ozone bucket. Implementation of other mounting options such as a dedicated NFS server or native Fuse driver is work in progress. + + + Ozone CSI is an implementation of CSI, it can make possible of using Ozone as a storage volume for a container. ## Getting started diff --git a/hadoop-hdds/docs/content/interface/CSI.png b/hadoop-hdds/docs/content/interface/CSI.png new file mode 100644 index 0000000000000000000000000000000000000000..38720c3019cfdb186d792bf65aed4190a3988a81 GIT binary patch literal 27210 zcmbrlbyStnyEVE2>6DW05RmRhq$LC-rMp|Y5$TfdE=fVUyGx{`Tco?;uKhdbjB&>O z#<+iccQ{md_ulVb>se3EIp44kiqhyPL?{pl1YK4}LInbW)dW93AR&N%%Uu4TLm*g@ zo*y+`R1DoH9GvXUENx6FTs$01DNNlh%^(o>KNV?~PE0N6!OzZkTCmcwIv9x6jDmw_ z=K&Q=_kVm8R}5^y%P%PtUD@wLKv@=H_HG_(OY! z((&!)ZrJo<-pxI{e$VxGtMA0ZA&HMVZ_S#rhw1Ont~cChyfxJsN5Jd&V%mksnny-X_=#-^;Qt?I+Xl+~{S>&{S0tY<5bo@x5*@YukO*@nvZ2DFMXs9( zouts@e|1*8BZU$5O^&5drnIo>*GMdlR(R+~_E=)>8J+iu+SX-l(Mj5Ng|cN8bt_Ua z!T|w=@2lD_R|Z1u*rp81yB>o$Imfxq->8~p+FtSF@710$zE{_^z2tUXvV_C=yUAYAU)998o*@G1*oA z?7(Zd(TH=#Z+@2EBFVicYaq|7kJ_D$npX|pQOzz9IU@YQ@7uO}xcI|Tm|+jr&2zM! zey?Rae(q)e!O{<-1quz!vJuzDW2AMIT9;8WyjJbs(_{vF{k9AW3(f};4p(-j73)o$ z6}8-d>*-Y)Kh)4ZcCMAchRthHYB zXZ%X~eQp-d+kK{O_Ex$bj+9;O={U<4wQ(2k0*_HakuR4ij!_0#)?cp)j8bDw4^^yF zDo9tTrZmpxlb7Ibw_yE8(inuaV6m56h<8b&Si4@6`6cTP{$`i5{3mHHX-M&NXWVua zIU&2W7|qLMQ{4v7Bm*fJv7lV4+WDfQppfbH6-S1V)&ZxQo7DcE%{gcE-{P;|mvi1E z$41DuhziN6sW>uyNUwR%xSc@C*=<;t+|rcCY2%-!fqfUh1=0Qb)Rrxeh9XKX??H>@08cHFVSSNKEW<4qRr|yC+#V zG1j8Ly^g@ffpN3?>&Rj-F{?tbBs0zH9L+qA7%LWrB4W{e!S@@DK(dBPJISVJqnU|7 zI|la8K#ZJ}GHNgm5-B9SeBSM~%5?l<<8z6%smdNc!!N>^fq>#Q<8TT{I9UlT9^R<1 zbxpT&6RDUjEE&Xete@4K|2xa);cvIH=KLQ^CGFmm)^vAO`O@_yk$8q_!T8{6RZe5l zu4{!*N|FDi?)HBUptunmFQ8_>m$7?SoURt-@JdHBhe9UsWk9zW$@>>{a)>f9ID7%` znR*kEO+Trd)LV>e;>8)MnHsUNQ$HwClB)!Dt|J?~xPNp&6(90NxUAKX?R5U6>T;}B z%!t-_HQ2(MDGv`ThxF24S648iM#7!W#ZTCnz`P);^nt_=1_ni8jWj{>KB*I;Tw%}HZhhm`bfNp;NX2r}|ALVH9m7!$h7qeNSGi;$O>DTQ zl6W;o%}?pv&^j0Gp9FRVv9!M*M(LA2Ya7+0r2liJPUZ?=WXQea9{OmNcU)S6Rg~tH zl-Vy{SWcJ2R2O5D3&T*k7FYnFG@C&ra@FjxR;gS-2t$XjjCn{RK zE-5Gvd?StgA%L3oExbv{?|}UPv_@H6lsAL1IaYN|BRZCxv$QZu@pZ|wny)QFaV2@) z(2FNoDvx+jUl7I09>gh9kdeWC`1q~(aN&K4bd0kMR!|t@PJbMOeCUERaJZlUhcU)g&s5OT7g9|-)_4;CWZ3p8TQO^yt|;UasoV5W zIObn$i7$^NLu3Auoun0n5aw{onHYp17QWTR`6dTrqXVy4iI;gOV?pg@Nj_076wvza z9hLW2*r#e$#Km9 z33Q)aj$)j}pW>*(IMOk?5cInP+g_O99KILEG7M(L#9q!%H_j|*N7>FCk~@V{r~P5p z(U@6fTJps(Xl2YoVsHO4^r{lM@ULKdQf1(OpTUup z2wil*P@$LfWb{@^W=+~zY;OJVn@a|M7XOI2r07@U3%OS>G+`8Ct&v)|W;{w?{|O{m z3AjQdl_{RH`1@BFPR82Hp|mW`LHgaN_l+;g4-ycx8oW#gC-c&Ot{v*xoqN&WswJZn zt=jL(N0ybbtJJgMd`ub3 z?ORQzMfWy;aBy&Hjw+6OpW=&&CJA5uGR|-|%9AaUj&s9laq8`#kg|)(RW|0}_sMX6 zMsJ_FdSY;NqV_+rO*t+`nyBTF)jKPZD;&fl`C>1=X0ScX(%_6!FsGk&|K-wsSQ04} zu{ddx(Z8db_=Y-rDaiyS&B7E;Nyza~Z1mb15#6$8ocYL|z4cc^rlglgRhq+@(cLRG zRLedx3u&`_;dzs+!`k==i(_obzfIseHtD7f08?#G)DSD&s%MkT9^@+)<6B-E{i_q)v)KE@d$Rx zf*~sNI@Tcv=bvw+*LF<*;oI*bwl!|y-Z9q;CfIgF`)gK3bzK#Hp&!pXQCf4{QhjM* zOn)yQ60RL{fh8sO@<@6%mS7oUI<)P=+^2|UJLyY>gGUW^`x~q2Rfm$rJybg!XL@Y1DIXj|*^biUYebrJDt~V}`b|-|dB;Jd&9B{aPbh&=Xh3+oTw*R|+KW!$tIyuEarsl2CidmC%@J55ZkPOqmvwq(q*04F-` zdRLTfhT>yRK7B6H8q?Onz^d4Zb;{H25Yk2hZGe=p|L2wV>mSQ$_9*u;F*LAc{W_@n zt?A2>gQG?*)g^zzsf9?yQ8Tln{yM|3$H+ucr=JcPZ@<$a%FYz~xrC;n1yeVXjA*D8 zSz$^(%>~KFcX5%K;zM3&KOo+tOS4DHEXLGt2_ z;;2ZPqL2?1qYsZTBbD8*&7a*H_#^)ELc7x=C4kO3e@s|S=k9S8{&j+tB!>Zfn!~Yg zhth1sPN+Q7%NtU^*E`c@xw$-agRD3<#Fr-jRy}2J;Qh<~5+9h-(1cO$)awf5xJuom zSJHex43@2V#tQw*s2!{MyU`;(Yb?HAo_;;O1i6VZEe74rmIg&15x8G)&s=R$wkE_4*NM7 zSG(8UVJXJ#n#}DOlB;Z(Hp7i!(QI23kIbI_3GZDM5@k3W0#-t*>8PvDDfjm%U#%*Qi)D+9pz_ShF{f?m1Q*WStk-%FUNPVWz1O2buFY&$o&9W znfROT)Vany*|Kvv`)6PeE;hGPg}1Ydl1;nd;B&UywaeAjajLqXC}xM)y z!`}IHLy|?WRYpbS8}8TtTGRND;hMLT}oS#!JP-i@;6pE z(#L4%=>El&sg+4>4<{m{qQb3B=Dzz~s)|^MYr9zQvlcUF^keP#cw(kZgFSuBvSMUH1%vJe#&l}Rr2ke;^_S;D0sOR>AUx`fV0IXz>4aVhv6 zeX?)sOXIedQBtC3VTq=a3}4&df8XYL$>+Qi+4>R~UtiIxx>wkgBbCqV>P-|8cb@0v zKRyx+0k z|N9rNGHgkVPR#SW7#;zk#%hMq^ZIBE?5fHAlqyE(3`w;}-tg~a?(gqvsPRJ*byhQc z9%nSb(9?6Z?x#k;UNTKi+bP`E_|CiIreHHSx3`X4gD)Y_+jIw`S}r$pkEQd@Ww&_W zI+=}UgiTreE|!##VDr4Nc!M7S-3-(*uA_^K=|eC`H~voLkEXEql+G4w*IC^iawujB zNXC!}LuB5+&o>$Rruhb6&-+wTN2g{bNh+UxP9c&RAC+1AXO3(<-N!QZDBz-O=Hnzj zPt-9&uwpVYNZ{ahll6UJa+I<~65eS2veZpvR5O|`Qt0jPuK^plI$C}&CIc$B1+Oxi{h>`muPZ$gRMC{rUdt@ZoHP^^NvV zB>yj8UWDT_e@840Mty}%MixC~u?4(UOkLV1q$hXvj+&0{Ys0E0-qTvDJcoi5ag0s#_lfn z7drezfm7v~jwIcAQc+Wv>b2eJCNB&pGHLxV3U2iPzD}7}N-Wrjboz=^$UXK~;rq?q zUBu4qkz`ppxsB4|ELaM5_BddD7J&G{e3o^0GBVD&MDztN z*z{X)oq;8s|9a25tn9I9!$YT-F<5Qbvo%JKvXUHG^b!wGax9Je^z5v;cpn25)tLxE zDl||c4XIB!nKY>ek5w;Yqc37V#>dC!^zw4d*@K*%{QJjJr|HXG5>X~|)y*jSOSweG z?UrcZP#k_w-g$X>8Qpkdw`beVejjvnN_3iPo*F;oir=k*ka&2%4N`?>t2?`Hi|b64 zt4^(1)XMI@VS~fExSr=8ZEIWG@q&OY5Apf#MDgeSSzH#K#?Is}U_DS3#?GxYKp~I? zYwL(sK~b2#Sqs9nn9O+_FkcKJi!gnxNYrlTJ6>AR50Ftp^2+U9O-<)2bwM;ek1RI3 z9ufh+m?l5|Q}_P)9$4=HxJ+l#G|0wbQBjGQ#aP(YU)&um)A~W>+2=Cp5gi*Fi|UJb zwXgBP%z7#h9^BV*x(M-_#<)?mIJGAPv)1R{m7=WQakgB`^Kix=29D4B#vyaB zKe7E_h$<4Yrg*4V1=xvVohCoG<*yeC)AP-r_u>39KoS+7=6ZA=HLDe2rKAiX6ZZUj zj)+MnudPi45q-SCNh{iWwodB@(TUy(0>E-I2j*P}#!Sf2Bv%xt`S^zlZEoMYJ$lcp zgHloZv#lY;dt6*xiNim2&WG0C-a_Dq>FhaBDmFGZFK=tj#(wR3xVt;tU06a?R8@gX z;?OJRSQ0I_xJgfQ6|*mMaB_l+Od6FrmT4j-%$>Ze-uYG6i5hy2p8Ao5oKo2u9lk!r zk1xe>Nq!-|e_#eTS!i6|7YRxBR2T+Mcy!ld7Q|}(Y8z<{+KegW<2|^Gs$woot8l=zm&O`hG z?3YNUbK^`yFu47#FdlrU(99#@ihs{!B95~nWA^Pmtw6yS$5Jb_36F3m~ zk_?4&5}>Mn18-9PX`u|vJ^=Rqcdm*YVm^^o2x>DflX_&4LOM190g}n3tk?M%FQ{l; z)6?;wKJ39AAs`~wdR^PKJ+wdFS)<@Fe$gqTk$@@qazm`s?yUuLc5MIDU_HlDn4n@j z5X~HW3h9r}UP zI0Nig1?s)in6sOkhS$}>P@U-hY{hg#2dK6_dL||X|0Z*TrfYx&!DrKWY`^RIT&jWZ z0mjl;Uthm-3JVLf0ynzj!NI|Cwmsh(iif|_fs~Y#ye^)*)FTw}Zp^Te44|Ds6&LFK zH(T*OUn+_$;kXP=8g;yhOd7Mt zG*}=#_lDt|lSI0LhL47ZW^H>r81ynyQbu*!0p+Jlfq{V+K4W%ikr5Ho+1a((`k4Zs zHS28%A%uj4kVs-)mN}jZ^)glG!k8FLZmZvu4SW0h>DK8B`#8XJp%E^2d9<7kjb4xQ z9YnM2C@E?N1_qDu5i;r_v)bGQr@Lh5lo(>GwPPz$^$P96RT=Ny@yr*9h`UW04Gp}Y zxuE6bj0X|TZaw?v?qV;0VztB1uhPfoux_q0+rXJC&O8Q{7n~1BU1A`}ozP!?)0IL- zM&?B?<+bD7#i9}q4SaiN5Y`c#o=&_!SCuTH>Bo^Of3ZIoqa^yswJM^?!onh-C6o#h z(=NYgOT5eRN*f`Yfj^iVeMvV_^ZflGz%ix4z5UC*RLn*&kas7t=d3G1pf`D5exIH< z=Y8yRPd{!LAoD_tAL^~QU6hfP9lG|Ajv*@qL7E~@r(JI&`@^WOM6XQ%+8TgpIaqGd zsL~zA&;eO`)XnEpgWYzzeY;`z7vzhhWx)wka7f)hEvGLy{t!zB>bvSfElU*HkHnCoa>8jY$cjHjUSw1>W&p>QXsBE7;P6Ln3K8=nnfp_xya; zS@c}+eBbfB*|eLrSvRLg4H_A+D;zAWoQda+2v&E{)k5n!TdMrWa?Q92k^6&sSa2S7 zDS~b+pjB84qt6=T7Nw#J1hsp=>!;iCiqQ3v^TSCG=Iu((JxJlkAOn_b)gsg`jK)&F z`@7m98awk-v)$Xn+uOTDw}l7f&@GT|CXZjlCWB(Vcl~#?YSw16KMI#wJ3&L=C(&tp z_$4lGFX$oy$`#KQ(zp!IcgBS7{^qxyj|&n+NzGR3GLf@U(ik99p4Aw1!J;9+VTANB zXttg96MLU@A$ml!JyWD`n1<`+pC8mtoNdO4I<0lWL`qR7v*>Ps6;7uQuXye=gGRV~ zRpm7!qg;m*2QULN4jo^GS8wP`Pg@P(da^Py1|ZO({sA>uPfrh=e?kIo`{UIoaI!^+ z_fD`}c1tOsL|_pT4s`_~?#-0F$`o`R! zk6nuR5PDD3;q_8nT~+hy(5)s&4dh=!0T+w*2Do9mYpPa3YUyYb&{jab3T5;7;ai;7 z%Wyx2(k|>gh-RY$rBwdQmSx2TyJb)kOtQvwHmX`YFBw7GZFKQ>>JBg9zq;EHc~Hk5 z@2_`(e;9*okieiSg|cn`(`j4I_xaHc1Q-QrPw;=rwx%uU3PIDLT4N&15P46r%ZB>j z_xR{qYzd}6OiaQ8ng2bj{`d3;3iSW~tD*O!7F=?aNR$#V8&r}My+I1oT+n#|boYg9 z5&?M$d5)X?D*u@;HoAy(ffjB$Mu9WKpRX|u z0XucVNGIQBEu|W}C`Xvb;;*5>j|AvBxRE+Z^2upt zp7mFS9}TI~=vX7M+`fYZr-B5getTq14EP^I810`j2fKw+4KxV2*-Wp8W;#(d>qO&& z@AE7_El6bZv(7-uN2_y(S3!i3|9gO znl*T*4&m(?6WJ;G|APTM-}!&u$cqY&lO3H;l$GUtv9zu(mtweSkB?6txgAWeaAC2n zuzY6bQ3oBxPmfVGrF|RNyc9S_FtlE8p%gwD(J`9*n)ex(+xp3u{3O-KCI}I{g*6uj>eY-}p7w6PRtp?2@}flab@IKZOz!H>@?@DYb+)?S zLH8ge4>MvFawyGlV`YotHLi8q6k)LaLCdGgu0ecw8vZG?meWCzDyC9#y-i(@t71DK zy{t7B>J9Gxa7kkrpD+r~6--d>{Tep*zOOsKf{|__O!>7C@oXn+e66)eDm*s^Z&bBF zi0zABwLOGpxh*?#m(|!}#nBrC#Wlz>{7rxKD0!)R_&K8VqX{0d5M@=mC7T^!Xr9!Q zCzA219x}Ln$7q(DYC*Cz%m2#V(^XP9bfJ?We`CGERk0ryBCGNrtOdoPQ?B84J&DZb zvgDAv7M>nL=XMo50es^FKSolE>}J)9M3;p>JsR6t$55*fYZ!c$`&3Q)t6&oscc>K} z;La2%FyPA?idDF77~gfrpVXO0wfeFWpgU0ZV{{HhsJ9-^bXk0u)-^tRR$;*L4z~A| z={|~mr3H-(cnSjsc&7RyZ2UM0{+>2e^#!gqaUJze0k@&>kkb|N`FJ~`S}Ik-6G0$GD|#pj%uLjn3yTDA1c}dlMUrH7zR~;#jCtsR**`EPP8531 znvb^$dMpk9sM&ka&0twyvJ}_-}&fK7z|A5xVp74^tfh;y5sfRrWm((0hf8C zS*7hK)5Ppqjtjen_fn0(@T)8KSfM(C> zREweK(rXL(RgxKPQ zPqjxA4|%xc*6`=NBjx3ZW*z=BCCT2rt1oQIV;1}$qlMm6YM{?Hy;mPzHJMbZRqnai z<iWmhv(B(_K8QY9m@L~3EUQt|3O_$S9b%%=qO^=Kv5*P4 zwq$~tIX*R&A(#>cw-8soEV1x?vYa89Z<37;8wtaGp9=*4$>t8TpF0UKhqrWBOXJ#L8%E9|l5GESS^r{ONKY)52;ZRSU!LYWa$~Mo9`*=U z_*Bm)=(WHb)G^@O$r<+kGJzcv5BVTBb13tDH z5yEfn6YPyv{!|j$?ptVZe2PY-7*^bEdr=|aFfLvs$v+c|d|F+-V55SR*ZBmgUOco& z``SgJK@{ePS?uOjgxAhz(^Nr!*J6d16))5BW9sf_#yTUz%-bMCjnsC5V6k)b-2MXo z8Jb&9f*Q}h=b?#$9}b@c3Pl^my`$4gGxnc5p7k~}Q;{3+Dj{eXGK%rOpnN%sW(s+H zJ6vptPfDT-rhICHVd9yRni6!cLC}e=J6|Mkdbe4fHE$xH z$eWLqa_(ZD22tN$QF^(ny_sOmJMv)|3NH2fU})0&HJpUrj;mv>nXZ*eJ70&BugqlI zBfMCPlaAL3&KqZrH>;(bn+dICA<7rHKhf&wZpzZ1ALz^vKJw0~+!p+`jrX5hJ)&!R zDSbL_#JTBoT_JcPDUk~xOW;Dc60tMuY0Ue`yhlqBI* zhyp`KQkeT{wm0J7@IX<1s?)hSG@~tb_!r{7nNN3=R$|#Q&5^9we@C9tW>fh5>QvKq zEl4plq9U9Hl0d?anGskO788m*6G0mbNmjulyO^b zi|X~x$^&2fYtpRG#fIog#UAM2n@&&5M9K+X$~Yjd>fotvQe)q^d}hp+EaICnQaD}@ zY7UClltMSc+3uf!X39n-nEh@wrtPDg=R{bkhXH-R3_Ims8nYP;T6#hE_53mu%?#^^ z)+IEwNTOrBbg-S2rY`5 zTWbGhcx26D7EL>W&c9JhI-QH>o+{e4^ypsEe+s_PAS!&7#>sYaZrVASsZ(>&IoA@M|t>`i)1sc+aKAHk6Z-4iKtVq#fsNN4mZCpKNE6Us4loj-)ANq zb>77`U<3FMPhlDWI5417d^gQSMMZTq)LbXnT5SMG35zr-clXBW3xE#hSrRdSsxNpO zgaEMZEMYH5wRQz~*}86u zb$NY8`@SYnFGu`eSQr^0t(I$*2q@5&0GznyEPQ^Dn5I!>o}K#R1Xc# z%?PvgQQ>t1F`=Lyo89;^} zkC$^Wp`oEr4A|x0Z{&z=&+|Pn%v$~`KnDHklr(aceuoGIz@M;d@k%1O0HwP4H^Vd$ z*L!`qC=HrLD5>yTxA~{YV@1PC_v$J+faOh0XaFWmDI$_JI4C1*01pq(ZaR!xq?j45 z67F+uR#q!#R!1vU|NJ1cuM= zNeIBI)NkHI0_@hPVR3J|7&D5PcL$(^9b^~NzbpWR#@c!U3#Q`Mvtg#Y);T?|C_05S zHsgLYpFaTmxcQqOv-KbQ?yK_;JXfpSVT2@}oi(lq{rwPYNIA#sTA6z)hmK?a^Ta9b z>6G^eO`%yI73azDZ}%(3Mv>k<=NVywKPz1cgCBpXkfbp#m6fGmCC-Z+`%6RT05)y`u znf2K4Y&!61HrSB@3<`|sFG)$G%F3AJ7>Iam1_3_KpixQkd^_O>MY`b-&MeK9_j^zPf7r`T2EscSCNDT8=Gd%TvjmMDG4lXZbyO z=;&nJ|82Y3h!{=hjR7OMdX?VT(Q?Zkz?{Lc^sPHoJ8nurQ6NCPgmf8n8n8J4OvlK` zh(Od=2qOeeVLFA+kqUqpdB6=0mYU+hAa?$Af4sT32i=KT?fTA+6hJSPWl?i+~s>Yj})pg-JyMuuO|M#ik*zE}I{|*OmVj4FN z4QZ-ADS0&WE1@xN3wiNbk00BJ9NjB8!@uItUzuGr-(ee6DrDN`Z%f z@fo1_uS6a%RDE$Sa$X}JE{TwN%Vy5EZ~3y5eI-AHequ>J z=$^RnF4b`CCE|;W&f`U7t7O!^F5-5WXs93R8SBJ}A-E~>qcqdRbqsSYF2jmH+IR|l zk7;^RN-EESxe&4U?HZ=AxOnsEXdJAlRHYCFf)NrV6-5L9neg(OS*`??g5qM;d@1yA z-@Z9GI~$*@b$L2(fR(25+LI|OD+9bg*!S^nuTJdta!de(GZ>NG(vkoI_fb`^taQS7&G;_mKzAnNIWUBHw-S!=EYskdJ} z`k4RI^U?}@q7xiws*ndq>`dhCw2~-0;Ieddb=ircUCT#qW`^naexOFByKTmB1@LG}!3^c7WEL2252l1U9Y&_@DPb4Zg07DYA3y zWQr25tda;CAZK6yOuUYgBmeT1Z~DiC!gr{r13!&~G`6qOpdGX{tS;M@Owi{nj`I-o zgXr?dmQ1D>5shM2cRh2MOaL$eI4fUpJSY)s>v5ObfP|E*@&H(NzI2QcPdZ>)(MSan zzznUJ!S~j{fMR83rLnmg`{m2GBT3AcmrX4#s^32{1O^2;e*WBN!!uo`!3wbLbBqlJ z1}>ZV$x>Gvfb!WaH>+U=S5{U6FfP;e@Q-2bg8RjkbmlCSBnC+5K9}u4G$~;05Lj7R zS3Gt}0jMKyWyJ_C_DoDD=^y=@DT5rWw55X-v3Ynn_7$D@BFE!kp8dNh6_ z^z%Clw_2$MX$5yLKHgVZ%_gQs60pQq#V@lrIz1y$zSEOAZg0^`YI42y-}8A$nOqLB zQc7W4DUcht`5pS+lTX@3)cb}FP+IAFWj|dHwS^xJRy(r8@t9!Oc6R!~{}TKe0#HCPvVE>npO#`{v5G&yX=UzmE93*V5^PHu zlah>#44}YR`1nPl9g-3F<$7&tNz9+V#l>}j@^#^nR%FPNetWx{ojI%D;%eLMc3jEI z36NP>SlHL9MQFeWX#@n)Qc?(j8v-18G+Q*=zWw$qK*IzoiUBJR@TMRrbq*2(L|j}P znti|=AvMhfu<344q>(!T{&#z|Xn%R=d)oI3^07iY5qxZU#(D=Z`G6eI{9y#IBy{>h zx7lSK;NecL*y5pxDfNy@--+1hm5;f6^Ubpy)~B!G?6o+zKZMgNmu3i2utKK`ZgI2bus57r~RU!XNOh9os;cMOQL`C$r)9dz2W~X+o z^wIJl^5Ej1*XHoioMKz zsBNkT+~k+wVC8o2+w{sgvs!k?4GG|fS20No074uEl^qN$DPv}T8tjz-mHu=(_X|{^ zfB%N7I-cbE!tvM%(4fal#BC+SM{lJ-4Knc@W-6lTYJOf`iB@gVbWEKWC1@+yjr#|Y z-{X!FmB+t&bNiJEGM2Nq&Pe<2bo;x6r$Xe;4EvP4miOVElf~$A!f=gGD#=Zcl-1X6 z-4P;^-#KrWyQOq=?3VrKf7BDAvk`WVAgPya=wxJmmM)iHj{O?4c=rxQL&rXfac%X)hJU=)Co@M1XC=lX| zA4|lpkCyoX^B=&>1b|3TW<7RN%^e&ab3qrS)94TYIHm9-lX$PFZ@( zS2~sg(C4B)Ta|e}l}UZ)F;-%?XyxN;XP@dxy0>H(x=F)0ygHHF4xNas?ndM6PWL$; zVHxcS+yr}j1cICm`h`dNED0B!)q4e(MRj(Fo20M%-nr4{Hu8etN^Ln({oK&(vM1i* z`*<<1u!xC?X};3x0nJmGA~(=P@~WgJ4YV4d*`cPTl{|h}Em5WhT$?;7J?_`bZpdh8 zFAM-0snhDNY)|C-ue2Bv8W{`Y5Fz0}mJl#pR z=kR@ShZ1+7=Y$dhfI+7c7S05M1A{v4)2pkUDBhLEibqk0H3;CY-Z4mml5DZk${#!9 zcU)H0o(Eui2?>eGich?cd^K7X>Y%%R=jkZ`=mJpWv!KlL`COI$#1G>Ez%{O`9RDC~g12XzVburW_dn*%ZD9nX(3Adf(4v=#4jg*I5Gv>2hYOR6q6ggXxmuX{|pXS zxU}I-eHzJGkHjBQ=<1($mR1?3a+JT9oX@DmzoM_%Oh~Nq3)|h=%5#5cIx>1EG!V#jfSpGmiEg#l0Mry18w`QQMRB?|Xtvek97=u&dtOlR0obSHS*b?z4Uj}d zLd$~w)17&}iEQ!YOujVc&UoesFqw>kLZSEV8ML_soU;>1aY=8qx`9zs`JL&Ygeb_! zbYQRmVgv&=f)b4?GN6wbSa)Ev`u!TDKXK4Rv8T%S_Vp<@Iv9eJ>EWCZ>IsBLJvlj{ zU}iony6>=A;D8E>a^Ho3Mr9N5sV0Dk23#2&o70vIoqS3s=m8@sW7y5*>2LtEvI*$q z^_$a8p|e41AkInyuV%!EKBWMl8Z_{-Iyxk3-#=1vaC`&05rZF^DWLxV^=~_ZwLJ$k z$-gUg7a0tIydkonfCieY`5bb5KOcbm5%Sm&NJo=^_n^CbdSc_?yj$-M_SlT#4Gjx3 z{PWW?gU>M#NUcuxt2$IlRcLRqP#m)@=~=bZnhd9YU$nRUT;DA{-Mlfn+q{sM+^jCs zbdKy5X>8-oB-XX}TJ?Dobd*iIW;ojX&;A23YctpR?C!B(*4sHHoRbIV-vPDw^ViCT zWK6EY1((l4N!d!8$9J9Pt%noSc*|rruTDU;oS|Ooe+OGZBjJnvu2#ljx76rYfT<4X z4G<5_rndT5kP4o3g=hAYz2a;RgHA|-%4i0EyxXczGH6`ni^_>?6{h)I{$c_91r-5BaY}sYdbKjv71`vJl%m4hYlV7By_3cXt`+X1M zV{$Q8vWpL*mC)YT#cbYhE7_HyBx}dGl^B^rH2+dASws&e^O}k4vYp~J^o1s&IRYF; z(Q5bKY=@Sy5+J&We=<#nsx=^rb@B^cYp%2UptF5E2JFpgFOCx5m>=l*pt2~1YbILS zU_gk~fO3M*qB98M3$pv;PfgHhl(&zM2jGHV>$eMmbZNZW?gOMpNnk?6#>Vz9FKRvb z{QPh}VYAYz<>w0;xJzyJpe`{G=Hi zfk`78P^PEz#xc+_9l&?I*1r1H1~9}tHWWZS_@~Yq+Gqm@x-cHrvj-gpXd37MgFKt< zwOR69re5*um|6h%{-Yu+FGBZ$m}~w)(Qd|~_tWjJ{p}wl$|o|Gg*t(tmK&$Pm`uBQ z$E}Pb>;BQ%y+15nDEc02|6HQn6KFj5NZ5N+(+?w_I%( zhUR9#c)x)FIcLkEvHQV~feWH`sJmHA<>`!=tkyF!Gb>;=06eO5WCT@TU%%M&KU@t= zV>+7mN&{95TAQ3ggCHhF7J|E-3EI9ms_Io-d29hpd)8dXjd{8JmO**u2Hn`6D=!8- z43WIvx8C3FS~I6BWC*QQ&!IHTsxQwZNp|y(k49*gi!=pG*nIUcHLy-EF17$)AMbVs z*Eul}Q=IK9uzFisTH1i7A?SM0xF_(0JTx@q?&Wn3IsIMLJtj@v{f%YEsTa>GyB2N^9bOicwIMA=PF)-86Y!&TSDCZ*p)uuU^ z6CgJ8)d8R|Y!L@K|F;N1r^zWePcp)&cA?Q}8-*@j*1nR2+X|Q5_l6Rp?|ll-q+ULm zPI>;T(a{(HW^yhrE?Kz~0|OyocCB{VQ-K~YP@O>C5Kzy*s(jHfF^hrn0qAdmpi&Xc z;{-r<;SC9HoXGekbqb;GKw}tH8(CIcxVM`0^nSC5vFo#+>9%-NV=6&`(|pVJGm4)4 z=atFdku2}uL+RT=DD3qb5D(j1@1IfX`Ns3ke}1t9)*7 zi?+qJ5;RdR`?Kf@(-%Zl8I&bJY9pU1kO(pkmqI>3wVKlIoLI6SucJu?OH_-aMRv3_ zH3@(O!HFp5jVL&i9UubD2V=~gFat1}B5p0Acab5aX$tn4x7m9?8q?8TN{whG>pb{-!rV8Db zq}^;kJrqem5%>GUuNlrFVX{bEu212%^b~dD3V!)~D#<|bphi;6vIrv>GOi`tz!0s~ zS?Vcoiavv7Fx-4>2sydiMYbXPlz=cJ{92VJSk&m~&0mJKjvyelpw*ZQ3$z8O_`|W^ zbIUfKJs=v$NO%><1AL{Z5b>zSoy?gY|B?DFjUm`z^kuD9>0cJZkg*VP3v*dD@80a@ zDi0R$n^u<1kvuptT32^~xnH(D$V{rxp!2=n$2Sazfq84zi}e*>m;lqu3;)J{*i>Hr z=wHwy59{jrInS9*U%8@cON_Zu$H>bK;i&5K53@6&h2hP=isKL)ILUHhv#RWN>gZM! z^QBM$z$T8H;txN0q@|=+;_DRzbLU}2fKG&x(oBP#+!5<&W;@Y&XD-I{&)G}CCkBDw zrn;pg@;3{VpYa>Dt;;3kBAA+q3EItTN^?K2Z(gnXL%<=J6@iQ~-n($i^jU_kEN@17 zO6b!{;&uy%9pS8wF4qnwa_<2Zw+57dx!GNE#dN^rjq(VBZE}qW_?;LS)nRhBc&z?p zHL5J;otLQwK9>4|v~#o_=BDv|u`6`NLKHBlIn^-%QPimMhOC;1>mRTlYIwOvhGh<+ zH+AoX#TVPlWAR?PS{Bomi7S4{4>S+_CCtPBb5e&h@%bn~<D-?8ny+dX3{wwF+($AsZ=cP&9Y;OP=1{3mp{CL>x}z9Weo-e=zfu&jBs+HuY4I) z5MW0>#UcRWO`xAk1`lZ{f(E7#>pu@@z}--~I=*F){P-~eDr-4#ZNUwMK8xYb26QW2 z3M@}p@aJs||Mk{V(5XWy(r+iMQ1uj<3__4jEg)>*cQW2cQb51+|Mg(W|M95Cf7bJV zarOUG?z3UnDDx7W5Kv;Tcf&IvDN`&Qx(+8WfahcWv&>f3YQR~Y0x7kuVJK|h%uFK4 z(kB2Nm4y=F0QOOez-LB6MveoP83zh7z(PMI7GMGuM`*_wbZl*Oqz3g-h2X!QKyj=B zEe4lDd>~Z8^uIUs-;4Y|u2Lxh-k@y8b7Ib`YOXY)+pKokDcytl+>J#4Eh8~FE9~lC zbOD#YodAc*ETW~S&$lE3D{3xYBog*?skHREJ~9P^5#)_l&5MXWz}yKg$AIwy)R+*J za4;{f#QQw2hmg;=`*^jVLn$dRvMTf8-?yE>JAi^oFo{)v+&tT7hl^P-0s7#};P@Ib<&L##2bAa4madF66-wCiA*gz$`18_G`-}@|YAD+hd37k9ux^FRG zZPccL;%2*fWpY==xE)9M};#O_kc$C&4lUs;zW9zx*Ws*Q`&XM zQ`!Ih%W5G`C8bbQB!!I3kfWipSCY}NO3En8ib5nRqq0d>Mn=dk-K2y#$q3!DjwH$G zSkL?FdH#5w|9-F6^T*w*m(Dq^b6ubBXS~Pf244gM?nuta9}ios^n3YN$$r?qg|v_+Ma}L8yh)&$>9`D$r;HFvv56(z*zyxh_Hep5=`TABBn8* zCfukSVBu2?WF&}@Ye`BTS68E)9z{1xHa@NU!2{mLSCeuXJO&@mdjM_UWqdG8moG9# zNj&(0h|X#J24K1%vpAubZE!mVC9Ak;o-fAo=A2VX4bc*$V<-dTIUw${xID7cCL#Es zpcU}hyvOX}`|M=??A#nFNGE0VWaC+le*5;FJFI>1f+Rz{>*IbF`(xI-ouU- z;FUuW$W?p?8{An^vq*k4`LH8I)!|vO{SbS(a|rzxc@uzvlGPnCAq>m_zuS9mY#$)D zxF{cBMkK(C)+rStP<*XTz8I1|W*CT6GU&UYi>z8&R3a7^7eRN1ki!(dt^xGxFgqZC zPS>tLclOve-2d4Qa}yI2f|-Q{=*+Jg3evpN_KOZc4jd4-4!{=ZkaKIDGdh=!`<96t zG#u9Y@neL(R%&W$#`n`~hFV&|`dS$XY9n;iP%gvILjOI|%8<*C_L-uY(9qX7kFg|v zY;83HyW9z0H0KW_&;|&AD`v(!QxPmi77`Qiu4Ktw_}o|Mm>L0JSA#>5SZV~lE#m!oVLzSf&=1(2Him2JlU+f73g0VPOq?Ft7s} z;O;g&(W!o2tqf2-S$OouP<-g$x_NWBHOnv~8E{V)+O1>|=!S^u;JbH?jk1i5pbBo9 zsmMp8>sAnE2-bNqY9^iLU|)7H6u&WMmAzK%6pW-Y2U2w_)O+ zI)|H=NWKL>_-&7VJZTjtC#@+C7^Hzl0EeCz<97?mPzB@1Mu=zwt(|V@8F|*MNhZk? zF91}7>CMf$3v(h;QhIF5mn+5TnB zsus%uKa+3eS-UnRLZ4oLPuKw#5$!p73~%JCr@wcQKgW#~lb8Q&ufoo|$1o`=NlaPU zd}pfRwQDa|tz4;xYix-=QFbMpygqAdYepf=kmS34!oI^k!pW~_Sg|nO#n=m&_%`_( z955pY_&07Gng6m!a4F_wq20aCQkS&OJZt~XlFKP@FvOUZTuM^(oXvYIbtd4Y=b!vc zSYs+ZVOlh(ZkUEAFCE2YC;R}!LMRzDh*gvS(cWXk((cj$U)nB>!{YX>{+{{VEIlhX zlMDw82I)uv^j07#&R$<_&z=>9_!{p-ESA1}Nw3*HmxN7V7g5X4i0WG`lkbyymWedZmy~~P-YsE4h?k4@8*XK)(=&E zDxsqeGo(^)wk`tl6jfV*aGUY`8A?j;%n_u)Hk?`~hDC#f)c{ZCcB_>uO}VaCGa#dO zjvIgDpNr9%ObcuBE*=7z)Yeg8O9M-hNy7XK>|W?f9FXDYfO<`uRAyo;=Rt@XK}K)n z?3{0LCi2v-eB9u3#a*anAJb;OcNk!xSL)6C3G`U!RzLuKvY>T7R75g0LeYD^1Rs^U ze*sC~0Zgp+yBwVT-KKeJTqhydFI>zLl9X(=kLxHpfu`F4I)8^B-KE-Ft;w5>fv5rO zpQ=Z|ty>uw-|6q*$sDcvdth_t0K{G^5e1@GS(h83k8fiD?l@PDIGC&fGe{@-5EqI9T2i!uh&f6uZamgn3qgoBl@%KF%AJ4 z^+mQ#0yHFbqZb!WklBsYUl@T&cLPM3HUKYAiplvQbEhjCaNi!0>B54)+`e@OEm@PD zM?fHlL<33NON2g|4vH>x=6R>5k9q>z2WNgFvI5{uF3g?wjd8TYizkT~=zHl2=HL}sh#jH0JPyF|T=ti5?neDbE4Lwb%96E(TBHQDM-W1#R6Yn zZe4o`CqPR}OCM-+>XRqwxDGFseFC<7M`1mYoCj#_)_kWzIX55 z-+wxm@?(s~Wvs|Y#C5{g;d5RR`pj_FXG${Ytw2sMJ^;EI%LqrX&9FTK@wBBWYQ@B* zKi?kcM1;6bRw)PyCY)S!l-vOK&xa{_1m z&JLaU_sa!6t5FZ^fpg}x_oMHG(O~dy`*nb_<_XXJK!Dsrf^E1xW%&$sb)QOZP@o2G zBS-6&iXUP>1cN)Y|2Ns&a)|2I@S>c??ZAgKu3S#fQYE5Xs-uw z@vWkwK`6!wO?A(YJr0Ay*;-Hgm zt@81~G;sOq)iktAq~Zp+95XjB?{ZQYa|DJq_lbYiKKWJwANz(*dzQhV4?RRGeeN-7 zbMVdvb!X?j$ed4}Jn0)7JD#xL>*wU;1Fklf@31k7h>IWd&Rb;P>yPs@>&FUSCwyH% z$@v-jLIL<@;~euN$j(zr%azl~dFo7<%0ZjKrQ)mOdPb3dHdQBh$vdyrt>$K(TrcB( zjx&C#gWEUdUD#`2bFd((NXvxpUpA>r_HD-;{CQtpPZkvuqW7&a*<0eP&#_WZ zoho@zEINFYsj^Dd)OJak$=-p3K6xUf=q?xT8EIWkQNXK2 z$%`QaDW^X~FIn$|7lu@qYA~9%U=0D``~107a!Ly9$86X+AsXz5$!PPZPoEI?yu3JZ zNtKlXPoF+5f%zX5YE`yz*C1DRadC0y=-}{hP=5YiI~hJ!Hny*b7?V1>si|on{#J*| zC-A3z?nrx&S^n?Y6II?S5|lM64Tuq1-_LZEj2mzT$~pC1$U9XfFc7zI_b}PQi0<0u zkK9o+-I8*M#qLz)3Sx00%x8nS4U5&K+j>nj8V`Jr?wCK@j(RZ$9*gbT1w+2mPh|*6 zp4PnTL9o%Qrc?}p#$yMIyL)=gPi=@N9q%3&T<_Fd84AqQUlFz~TDqq z40W4Ty*C>f849D?@kMTRtHI;Z(V=Fmk5cE!jKFJ+rP_hTBh zP*6}P`jgks1+JKC2qp*ezdRd(;&}#o% zRT8IOvL;Ri5bW&i+ji{W^ISKLT8IMt^aD(9RG_89n5FWoPmIB9U*D|aU1+>yeLH) zR#u6O&w!7u&{KjNRl_@byyqOzBc^@HI2i9WeFClSG75$Axxh9|G4epJMa56dyrhW@ zn-6N>rJ(Tf1)ErSu*u2E@e71xXYXcAkA)1yUtbLogH!Q)fJ&i)KJyfu3gs)~J)D|) zKReIos;9J&%0`DK6Z}H*E)@4%%lh&zwN3VTyz)lOmH(X2tq3SAlySRmVIeRsH2(+# zsK10HS7vfDGg?#rA5IPq2Mi6@ffhcScZ$l*!!xDZ`4j444a&k{Z6U^Sk89Wd1#rXv zqc}hR075Wk4E>eE!{OoALddXCO?jXFpsB6B8fT#E*zwB)4LsfD9&5>wZn=AOWaL+* zLT<|E&d$?le;qvHv;#3i2naEYh={bcx2NDtqorI%$n4wKkKWkM-u?#C0MccPb<8Su zc0GN4X37oYtT!0iIH!G<7G|>h#bo$!_YLFTiDC@`Zq_Ayb5ToueZTp6Pk$wqO?-Ue zj~?Cb#T-h10dKB)T5C~PC5UVpS=pNU`W5gYdTx{0y))SF+qbhEg8RFz{EkyR`ubO{ zUJd>DPp41Q%N_e+#SDSQG|Q3|=3?wD;Bayp-=oqojI{&YYt}fo6H@TO2JVeNrnfF= z1r}&(RRe! zK7)jkHi<{FsDbjXOLZQ;XSt&eF}w&hvX+}$QO$kbt5Hl_$Z;|#C?YEA#ZKuf-%b4; zckkX!A7`Y#1SHa>{Gp{qZ}aBO+f-F8sZ<~WB9MYeijrGhE?>G7K0E79p}cKqxQ{V% zW5hLYifFc{aEwv3F7+=nrW_=aTTuz?mMY3l8V~y!~@(@jzKn{o4m3=W6`e?#IOJ0_PmfYvHa=rLGp-FOG8%i3&;L zE71NJkmCp@YJEW^fCs=pjs58heafcPr87V+-T)E95`+df`tbALl6CF#^S|6pKxK=! z(P&>GHh=~vza}*Wa6orEHRY=I{n7DpE+r+U`KeqV&4UM*frh+;dzm>JC7|HU3bgqZ zXhucPN?uD(3gkE}&5Cv1NZ!_G&afZ}yIJh302-R%KRGqosGz9$s=j{s#)npfiq(Y+ z&pt@E7du&Ei-lENy56$ZJh=H+6Owk#Tiky2VAM;&lOqTxCnr*7i)f~2E=TP%MUh9e zCbih%Ypf#jii-nNQ@20M&ZZzx@4iv=ViYPKG|*dDTUXaRlYbBeFxD<&*avOlMXrIz zsuVYVEPHr^)6{jr$FzU=_dexek-Y5mbjgS5?JX@6IalML4{x36&bfbojiHed63oV9 z!*huj;4|d3zj`v)W|4&MS&N9#9 zfkyAQ(Bf^(3tDV1nP1b=Zwq2!?4o7<*|!osH?FBtS<#?NJyay|-Jerajs`EDJ$v7G zdCY$NuC0zvlU!FP`#~CwmRDHF+N@}ufXNDd@mN5!r9)TT^muO=#)FMS z9#%{|J5{zGD+2VdA6uB8508lnL8Gls4G#}boOb`RF`bW(&+7DPKQyR;(~r?hX}79T zsck2aUIfx@zYT3YoYtC@le5~`*cfE*nsu5ta&sjb-o2ZcsaoR4ipeS9 z`0?Y*tVhwwb5oFDH-jhkO-SgK_$0V)-Axoa;a$7lS(QabMs^yUwYR_B-EAIpxa_>$ z;+T_<0n2?0Y?IKlv<$=6m@A_+9TS$qX)8=4>w=R>hm4-Y&`1;ZwVZzrnN9iBr<2Wg zGW6{OgM&7PCMFswEq&-FC1quA;o8XDbPc(6MlH7q5;ghbEPA%3T{4_Aqk6GVZ^UoL z2coXQZvIUp;jz`o$5h81XUFIOO@e?&e`$}k;X30GlA~Mq+O7#2DJxss<(N_T%$XG| z4+#oNe?YDho0^)M;Wt(i=Du3w+anOCJrnaqc|sEHjWfr33PsQMva|EGKBe&$?m*hh zT<0*`Z^(liNbwznd+uxE2|VDh+Ybl{C4bO2F{!gZ5l@hts;Yt7(!~Y6e&18n+o4j) zw`mh%1n&yT$QThWwcEF#hy(LKufnbdd+Dz`RJ_H-#pUbcaA5_Ivj5tq%(Q>Dk7e2l z4TsxPr~zF^6k>-LM=aZ6mfHK@KgC9;nf{Ady9X@=yxw+XFN`d$l zbp1lJ-QC^Y?n2%+(2>=@duQO&`@(TkenG)k>^TB1kAv9gUdFN3+gB%p&SSU%fhjDeKjjFZ=-e zO^Y0OTow6Wy?)I^V6LyPulI$~XMTQO7r}(7(5}eL6P-5Ui|Oqv7Fpa#7B$6w7~fB9 zc`=V4=be7D>fz(ZLc4c!qR2rgtp=ABu>~CtRua5IXCif}PYvV~8)ioG62axM5hUP> zbHt7WL?$M{OQiIh%8KI8M%32U_JQ~Duc%NV`?9A`caUqcP)BAlUoa6U7Ssa=nCWyn z7(_)v?b0Pl{GEHlh9Ep4ILBC&uKu8UcqpLXOs>pKuX9Dx&KJ_@%o}bK$b9Yb{_O&IS@?#`GmCnkKr6G;#Sm2Pa!i(P^g!XOYko zTSv%kaFw5_9HvxiRRx#oJU6K&!UJmg{CNesS@b-fjFB|bR7B#lQTz~|!XDP#-F+u2 zYCU?+udwvMtaa${;lt7UwX==cHT7ow){+;rFijf4YHO)?1XTsTyma$7&eBA*k*+$B zhgMkkLHGs-uOg~(#8!kJWYN^KI(wECqZJCVHQ28ibAWWKzJLGDajgd3AZ-B+3ET=n zFLf{!Ko&z^#8@XRCbkOHd?2E|z=m_3rn;kJwz>+#3+Z=R^a@Q@EDMrK_aUM2eg_8! zm^5tie6Uc7zKG5ZZCbO{5;WhlHbSS7m6$8%e;p-!0|O70%?-VO#DXS~@Ws8mbLWmE z*vxN(gM1OvNWREZDzuKawwsuqg|}^EMd)!;P${i(U%Bz63JRq0&C`BVUG86oooy;; zf3l`_o0o8d3p;>Zf(NslfGpq7Ovl=KD^cCrx)vYd(8ufxe^PR0<|-5zObY6HdK559 zm#4p7bwhcAVQ}N?GzJx1surQ#%39Tl`;w#eN%okH8e02vRiuIVtPjdd-L8s3hE{@ z0){0i_n%fWa&oU~Ygr)*HRy7uEqiytF*{ zfY%aj@hN334n?2KJ)}{wbuBFJf+YMGzj_!8SuhxSH8Ev9dBWON?1X3jw^y6ZCOJ9B zWi7jc{m>)-ofk{NXLj{@RFs#u3G5~wN?J1mdg*GjG%*2QQ&3<~jQI$}WA*#@93Tx3 zX=rG~Fu<#F3kq@(L7}0VSFXt6n?y(R!d27!`E&igbrcqqytd04Gkkf!?}Ub0J34YA zK7hHcMDltA?q>YY1Q^bMpgufaNFvmAb;CP1oyE7Ns2dopr9NqFigPS!Jt|SbN|27! zU%!441Z;oKWlUUztKz9L$I+gOvd)0J^nKFO{y~Z5<(uKpqe8fL?AWnOlY_@AGyXo; zBSTmZMP7PWSJ{^Z)Keq>CQ#3Ky4@E`)8enuO=vL>9|-aezIAB0NI zqLQkrAU4D0y?7B262eYp#iT*S%+PE2H)~X*84iz#xCv%ttD+(wh68BOYXB%?A6nAt zZULn$;^Z}aP#W)ph5^7Xxj^tac>DRUTt3whGyce^?CdmyZmtiMAocihP9i;;cR9xf z5=tSevB{`SeE_}dt-!##Q`dtm+`qD6L!tmUO4B?m^}qoz+a$CA#1LUNr!niFnV+4YZ<%9aSw{K@_^)5VkO#-%8Ng4T8WcBcvv_87Bu&bb9N-Zs zSngOgKOjM$#!#>YJBm1!VJIeR75WzP`1lVU*4Dl#ecKkDb=lOTn3#UFf^Y)X0##4E z*!1q5Kh6P>n3TkX-EveQp#(8KJ&pE41E2^&VEd`~G&bG}4do>Ia3V<`3vHUHMha{J za$A`c^iopOd(3x*&`SwO0OZ~1>vULKG)OM|WwJkLn%dNy16F?j E1r1& + + +Ozone shell is the primary interface to interact with Ozone from the command line. Behind the scenes it uses the [Java API]({{< ref "interface/JavaApi.md">}}). + + There are some functionality which couldn't be accessed without using `ozone sh` commands. For example: + + 1. Creating volumes with quota + 2. Managing internal ACLs + 3. Creating buckets with encryption key + +All of these are one-time, administration tasks. Applications can use Ozone without this CLI using other interface like Hadoop Compatible File System (o3fs or ofs) or S3 interface. + + +Ozone shell help can be invoked at _object_ level or at _action_ level. + +For example: + +```bash +ozone sh volume --help +``` + +will show all possible actions for volumes. + +Or it can be invoked to explain a specific action like: + +```bash +ozone sh volume create --help +``` + +which will print the command line options of the `create` command for volumes. + +## General Command Format + +Ozone shell commands take the following form: + +> _ozone sh object action url_ + +**ozone** script is used to invoke all Ozone sub-commands. The ozone shell is +invoked via ```sh``` command. + +Object can be volume, bucket or key. Actions are various verbs like +create, list, delete etc. + +Depending on the action, Ozone URL can point to a volume, bucket or key in the following format: + +_\[schema\]\[server:port\]/volume/bucket/key_ + + +Where, + +1. **Schema** - This should be `o3` which is the native RPC protocol to access + Ozone API. The usage of the schema is optional. + +2. **Server:Port** - This is the address of the Ozone Manager. If the port is +omitted the default port from ozone-site.xml will be used. + +Please see volume commands, bucket commands, and key commands section for more +detail. + +## Volume operations + +Volume is the top level element of the hierarchy, managed only by administrators. Optionally, quota and the owner user can be specified. + +Example commands: + +```shell +$ ozone sh volume create /vol1 +``` + +```shell +$ ozone sh volume info /vol1 +{ + "metadata" : { }, + "name" : "vol1", + "admin" : "hadoop", + "owner" : "hadoop", + "creationTime" : "2020-07-28T12:31:50.112Z", + "modificationTime" : "2020-07-28T12:31:50.112Z", + "acls" : [ { + "type" : "USER", + "name" : "hadoop", + "aclScope" : "ACCESS", + "aclList" : [ "ALL" ] + }, { + "type" : "GROUP", + "name" : "users", + "aclScope" : "ACCESS", + "aclList" : [ "ALL" ] + } ], + "quota" : 1152921504606846976 +} +``` + +```shell +$ ozone sh volume list / +{ + "metadata" : { }, + "name" : "s3v", + "admin" : "hadoop", + "owner" : "hadoop", + "creationTime" : "2020-07-27T11:32:22.314Z", + "modificationTime" : "2020-07-27T11:32:22.314Z", + "acls" : [ { + "type" : "USER", + "name" : "hadoop", + "aclScope" : "ACCESS", + "aclList" : [ "ALL" ] + }, { + "type" : "GROUP", + "name" : "users", + "aclScope" : "ACCESS", + "aclList" : [ "ALL" ] + } ], + "quota" : 1152921504606846976 +} +.... +``` +## Bucket operations + +Bucket is the second level of the object hierarchy, and is similar to AWS S3 buckets. Users can create buckets in volumes, if they have the necessary permissions. + +Command examples: + +```shell +$ ozone sh bucket create /vol1/bucket1 +```shell + +```shell +$ ozone sh bucket info /vol1/bucket1 +{ + "metadata" : { }, + "volumeName" : "vol1", + "name" : "bucket1", + "storageType" : "DISK", + "versioning" : false, + "creationTime" : "2020-07-28T13:14:45.091Z", + "modificationTime" : "2020-07-28T13:14:45.091Z", + "encryptionKeyName" : null, + "sourceVolume" : null, + "sourceBucket" : null +} +``` + +[Transparent Data Encryption]({{< ref "security/SecuringTDE.md" >}}) can be enabled at the bucket level. + +## Key operations + +Key is the object which can store the data. + +```shell +$ ozone sh key put /vol1/bucket1/README.md README.md +``` + + + + + +```shell +$ ozone sh key info /vol1/bucket1/README.md +{ + "volumeName" : "vol1", + "bucketName" : "bucket1", + "name" : "README.md", + "dataSize" : 3841, + "creationTime" : "2020-07-28T13:17:20.749Z", + "modificationTime" : "2020-07-28T13:17:21.979Z", + "replicationType" : "RATIS", + "replicationFactor" : 1, + "ozoneKeyLocations" : [ { + "containerID" : 1, + "localID" : 104591670688743424, + "length" : 3841, + "offset" : 0 + } ], + "metadata" : { }, + "fileEncryptionInfo" : null +} +``` + +```shell +$ ozone sh key get /vol1/bucket1/README.md /tmp/ +``` diff --git a/hadoop-hdds/docs/content/interface/JavaApi.md b/hadoop-hdds/docs/content/interface/JavaApi.md index bb18068f4000..2a97922d7415 100644 --- a/hadoop-hdds/docs/content/interface/JavaApi.md +++ b/hadoop-hdds/docs/content/interface/JavaApi.md @@ -1,7 +1,10 @@ --- title: "Java API" date: "2017-09-14" -weight: 1 +weight: 5 +menu: + main: + parent: "Client Interfaces" summary: Ozone has a set of Native RPC based APIs. This is the lowest level API's on which all other protocols are built. This is the most performant and feature-full of all Ozone protocols. --- The Hadoop compatible file system interface allows storage backends like Ozone -to be easily integrated into Hadoop eco-system. Ozone file system is an -Hadoop compatible file system. Currently, Ozone supports two scheme: o3fs and ofs. -The biggest difference between the o3fs and ofs,is that o3fs supports operations -only at a single bucket, while ofs supports operations across all volumes and buckets. -you can Refer to "Differences from existing o3FS "in ofs.md for details of the differences. +to be easily integrated into Hadoop eco-system. Ozone file system is an +Hadoop compatible file system. + + ## Setting up the o3fs @@ -43,7 +52,7 @@ Once this is created, please make sure that bucket exists via the _list volume_ Please add the following entry to the core-site.xml. -{{< highlight xml >}} +```XML fs.AbstractFileSystem.o3fs.impl org.apache.hadoop.fs.ozone.OzFs @@ -52,7 +61,7 @@ Please add the following entry to the core-site.xml. fs.defaultFS o3fs://bucket.volume -{{< /highlight >}} +``` This will make this bucket to be the default Hadoop compatible file system and register the o3fs file system type. @@ -116,55 +125,3 @@ hdfs dfs -ls o3fs://bucket.volume.om-host.example.com:6789/key Note: Only port number from the config is used in this case, whereas the host name in the config `ozone.om.address` is ignored. -## Setting up the ofs -This is just a general introduction. For more detailed usage, you can refer to ofs.md. - -Please add the following entry to the core-site.xml. - -{{< highlight xml >}} - - fs.ofs.impl - org.apache.hadoop.fs.ozone.RootedOzoneFileSystem - - - fs.defaultFS - ofs://om-host.example.com/ - -{{< /highlight >}} - -This will make all the volumes and buckets to be the default Hadoop compatible file system and register the ofs file system type. - -You also need to add the ozone-filesystem-hadoop3.jar file to the classpath: - -{{< highlight bash >}} -export HADOOP_CLASSPATH=/opt/ozone/share/ozonefs/lib/hadoop-ozone-filesystem-hadoop3-*.jar:$HADOOP_CLASSPATH -{{< /highlight >}} - -(Note: with Hadoop 2.x, use the `hadoop-ozone-filesystem-hadoop2-*.jar`) - -Once the default Filesystem has been setup, users can run commands like ls, put, mkdir, etc. -For example: - -{{< highlight bash >}} -hdfs dfs -ls / -{{< /highlight >}} - -Note that ofs works on all buckets and volumes. Users can create buckets and volumes using mkdir, such as create volume named volume1 and bucket named bucket1: - -{{< highlight bash >}} -hdfs dfs -mkdir /volume1 -hdfs dfs -mkdir /volume1/bucket1 -{{< /highlight >}} - - -Or use the put command to write a file to the bucket. - -{{< highlight bash >}} -hdfs dfs -put /etc/hosts /volume1/bucket1/test -{{< /highlight >}} - -For more usage, see: https://issues.apache.org/jira/secure/attachment/12987636/Design%20ofs%20v1.pdf - -## Special note - -Trash is disabled even if `fs.trash.interval` is set on purpose. (HDDS-3982) diff --git a/hadoop-hdds/docs/content/interface/OzoneFS.zh.md b/hadoop-hdds/docs/content/interface/O3fs.zh.md similarity index 97% rename from hadoop-hdds/docs/content/interface/OzoneFS.zh.md rename to hadoop-hdds/docs/content/interface/O3fs.zh.md index 996991962c75..0b2a06f32181 100644 --- a/hadoop-hdds/docs/content/interface/OzoneFS.zh.md +++ b/hadoop-hdds/docs/content/interface/O3fs.zh.md @@ -21,6 +21,12 @@ summary: Hadoop 文件系统兼容使得任何使用类 HDFS 接口的应用无 limitations under the License. --> +
+ +注意:本页面翻译的信息可能滞后,最新的信息请参看英文版的相关页面。 + +
+ Hadoop 的文件系统接口兼容可以让任意像 Ozone 这样的存储后端轻松地整合进 Hadoop 生态系统,Ozone 文件系统就是一个兼容 Hadoop 的文件系统。 目前ozone支持两种协议: o3fs和ofs。两者最大的区别是o3fs只支持在单个bucket上操作,而ofs则支持跨所有volume和bucket的操作。关于两者在操作 上的具体区别可以参考ofs.md中的"Differences from existing o3fs"。 diff --git a/hadoop-hdds/docs/content/interface/Ofs.md b/hadoop-hdds/docs/content/interface/Ofs.md new file mode 100644 index 000000000000..fcc1467a7102 --- /dev/null +++ b/hadoop-hdds/docs/content/interface/Ofs.md @@ -0,0 +1,227 @@ +--- +title: Ofs (Hadoop compatible) +date: 2017-09-14 +weight: 1 +menu: + main: + parent: "Client Interfaces" +summary: Hadoop Compatible file system allows any application that expects an HDFS like interface to work against Ozone with zero changes. Frameworks like Apache Spark, YARN and Hive work against Ozone without needing any change. **Global level view.** +--- + + +The Hadoop compatible file system interface allows storage backends like Ozone +to be easily integrated into Hadoop eco-system. Ozone file system is an +Hadoop compatible file system. + + + + +## The Basics + +Examples of valid OFS paths: + +``` +ofs://om1/ +ofs://om3:9862/ +ofs://omservice/ +ofs://omservice/volume1/ +ofs://omservice/volume1/bucket1/ +ofs://omservice/volume1/bucket1/dir1 +ofs://omservice/volume1/bucket1/dir1/key1 + +ofs://omservice/tmp/ +ofs://omservice/tmp/key1 +``` + +Volumes and mount(s) are located at the root level of an OFS Filesystem. +Buckets are listed naturally under volumes. +Keys and directories are under each buckets. + +Note that for mounts, only temp mount `/tmp` is supported at the moment. + +## Configuration + + +Please add the following entry to the core-site.xml. + +{{< highlight xml >}} + + fs.ofs.impl + org.apache.hadoop.fs.ozone.RootedOzoneFileSystem + + + fs.defaultFS + ofs://om-host.example.com/ + +{{< /highlight >}} + +This will make all the volumes and buckets to be the default Hadoop compatible file system and register the ofs file system type. + +You also need to add the ozone-filesystem-hadoop3.jar file to the classpath: + +{{< highlight bash >}} +export HADOOP_CLASSPATH=/opt/ozone/share/ozonefs/lib/hadoop-ozone-filesystem-hadoop3-*.jar:$HADOOP_CLASSPATH +{{< /highlight >}} + +(Note: with Hadoop 2.x, use the `hadoop-ozone-filesystem-hadoop2-*.jar`) + +Once the default Filesystem has been setup, users can run commands like ls, put, mkdir, etc. +For example: + +{{< highlight bash >}} +hdfs dfs -ls / +{{< /highlight >}} + +Note that ofs works on all buckets and volumes. Users can create buckets and volumes using mkdir, such as create volume named volume1 and bucket named bucket1: + +{{< highlight bash >}} +hdfs dfs -mkdir /volume1 +hdfs dfs -mkdir /volume1/bucket1 +{{< /highlight >}} + + +Or use the put command to write a file to the bucket. + +{{< highlight bash >}} +hdfs dfs -put /etc/hosts /volume1/bucket1/test +{{< /highlight >}} + +For more usage, see: https://issues.apache.org/jira/secure/attachment/12987636/Design%20ofs%20v1.pdf + +## Special note + +Trash is disabled even if `fs.trash.interval` is set on purpose. (HDDS-3982) + +## Differences from [o3fs]({{< ref "interface/O3fs.md" >}}) + +### Creating files + +OFS doesn't allow creating keys(files) directly under root or volumes. +Users will receive an error message when they try to do that: + +``` +$ ozone fs -touch /volume1/key1 +touch: Cannot create file under root or volume. +``` + +### Simplify fs.defaultFS + +With OFS, fs.defaultFS (in core-site.xml) no longer needs to have a specific +volume and bucket in its path like o3fs did. +Simply put the OM host or service ID (in case of HA): + +``` + +fs.defaultFS +ofs://omservice + +``` + +The client would then be able to access every volume and bucket on the cluster +without specifying the hostname or service ID. + +``` +$ ozone fs -mkdir -p /volume1/bucket1 +``` + +### Volume and bucket management directly from FileSystem shell + +Admins can create and delete volumes and buckets easily with Hadoop FS shell. +Volumes and buckets are treated similar to directories so they will be created +if they don't exist with `-p`: + +``` +$ ozone fs -mkdir -p ofs://omservice/volume1/bucket1/dir1/ +``` + +Note that the supported volume and bucket name character set rule still applies. +For instance, bucket and volume names don't take underscore(`_`): + +``` +$ ozone fs -mkdir -p /volume_1 +mkdir: Bucket or Volume name has an unsupported character : _ +``` + +## Mounts + +In order to be compatible with legacy Hadoop applications that use /tmp/, +we have a special temp mount located at the root of the FS. +This feature may be expanded in the feature to support custom mount paths. + +Important: To use it, first, an **admin** needs to create the volume tmp +(the volume name is hardcoded for now) and set its ACL to world ALL access. +Namely: + +``` +$ ozone sh volume create tmp +$ ozone sh volume setacl tmp -al world::a +``` + +These commands only needs to be done **once per cluster**. + +Then, **each user** needs to mkdir first to initialize their own temp bucket +once. + +``` +$ ozone fs -mkdir /tmp +2020-06-04 00:00:00,050 [main] INFO rpc.RpcClient: Creating Bucket: tmp/0238 ... +``` + +After that they can write to it just like they would do to a regular +directory. e.g.: + +``` +$ ozone fs -touch /tmp/key1 +``` + +## Delete with trash enabled + +When keys are deleted with trash enabled, they are moved to a trash directory +under each bucket, because keys aren't allowed to be moved(renamed) between +buckets in Ozone. + +``` +$ ozone fs -rm /volume1/bucket1/key1 +2020-06-04 00:00:00,100 [main] INFO fs.TrashPolicyDefault: Moved: 'ofs://id1/volume1/bucket1/key1' to trash at: ofs://id1/volume1/bucket1/.Trash/hadoop/Current/volume1/bucket1/key1 +``` + +This is very similar to how the HDFS encryption zone handles trash location. + +## Recursive listing + +OFS supports recursive volume, bucket and key listing. + +i.e. `ozone fs -ls -R ofs://omservice/`` will recursively list all volumes, +buckets and keys the user has LIST permission to if ACL is enabled. +If ACL is disabled, the command would just list literally everything on that +cluster. + +This feature wouldn't degrade server performance as the loop is on the client. +Think it as a client is issuing multiple requests to the server to get all the +information. + +## Special note + +Trash is disabled even if `fs.trash.interval` is set on purpose. (HDDS-3982) diff --git a/hadoop-hdds/docs/content/interface/S3.md b/hadoop-hdds/docs/content/interface/S3.md index 2324fcba049a..3404cb8233ec 100644 --- a/hadoop-hdds/docs/content/interface/S3.md +++ b/hadoop-hdds/docs/content/interface/S3.md @@ -1,6 +1,9 @@ --- title: S3 Protocol weight: 3 +menu: + main: + parent: "Client Interfaces" summary: Ozone supports Amazon's Simple Storage Service (S3) protocol. In fact, You can use S3 clients and S3 SDK based applications without any modifications with Ozone. --- @@ -110,6 +113,24 @@ export AWS_SECRET_ACCESS_KEY=c261b6ecabf7d37d5f9ded654b1c724adac9bd9f13e247a235e aws s3api --endpoint http://localhost:9878 create-bucket --bucket bucket1 ``` +## Expose any volume + +Ozone has one more element in the name-space hierarchy compared to S3: the volumes. By default, all the buckets of the `/s3v` volume can be accessed with S3 interface but only the (Ozone) buckets of the `/s3v` volumes are exposed. + +To make any other buckets available with the S3 interface a "symbolic linked" bucket can be created: + +```bash +ozone sh create volume /s3v +ozone sh create volume /vol1 + +ozone sh create bucket /vol1/bucket1 +ozone sh bucket link /vol1/bucket1 /s3v/common-bucket +``` + +This example expose the `/vol1/bucket1` Ozone bucket as an S3 compatible `common-bucket` via the S3 interface. + +(Note: the implementation details of the bucket-linking feature can be found in the [design doc]({{< ref "design/volume-management.md">}})) + ## Clients ### AWS Cli diff --git a/hadoop-hdds/docs/content/interface/_index.md b/hadoop-hdds/docs/content/interface/_index.md index 254864732fb8..40ca5e7b249b 100644 --- a/hadoop-hdds/docs/content/interface/_index.md +++ b/hadoop-hdds/docs/content/interface/_index.md @@ -1,8 +1,8 @@ --- -title: "Programming Interfaces" +title: "Client Interfaces" menu: main: - weight: 4 + weight: 5 --- - -Ozone shell supports the following bucket commands. - - * [create](#create) - * [delete](#delete) - * [info](#info) - * [list](#list) - -### Create - -The `bucket create` command allows users to create a bucket. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| -g, \-\-enforcegdpr | Optional, if set to true it creates a GDPR compliant bucket, if not specified or set to false, it creates an ordinary bucket. -| -k, \-\-bucketKey | Optional, if a bucket encryption key name from the configured KMS server is specified, the files in the bucket will be transparently encrypted. Instruction on KMS configuration can be found from Hadoop KMS document. -| Uri | The name of the bucket in **/volume/bucket** format. - - -{{< highlight bash >}} -ozone sh bucket create /hive/jan -{{< /highlight >}} - -The above command will create a bucket called _jan_ in the _hive_ volume. -Since no scheme was specified this command defaults to O3 (RPC) protocol. - -### Delete - -The `bucket delete` command allows users to delete a bucket. If the -bucket is not empty then this command will fail. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the bucket - -{{< highlight bash >}} -ozone sh bucket delete /hive/jan -{{< /highlight >}} - -The above command will delete _jan_ bucket if it is empty. - -### Info - -The `bucket info` commands returns the information about the bucket. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the bucket. - -{{< highlight bash >}} -ozone sh bucket info /hive/jan -{{< /highlight >}} - -The above command will print out the information about _jan_ bucket. - -### List - -The `bucket list` command allows users to list the buckets in a volume. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| -l, \-\-length | Maximum number of results to return. Default: 100 -| -p, \-\-prefix | Optional, Only buckets that match this prefix will be returned. -| -s, \-\-start | The listing will start from key after the start key. -| Uri | The name of the _volume_. - -{{< highlight bash >}} -ozone sh bucket list /hive -{{< /highlight >}} - -This command will list all buckets on the volume _hive_. diff --git a/hadoop-hdds/docs/content/shell/BucketCommands.zh.md b/hadoop-hdds/docs/content/shell/BucketCommands.zh.md deleted file mode 100644 index 9afd28079c20..000000000000 --- a/hadoop-hdds/docs/content/shell/BucketCommands.zh.md +++ /dev/null @@ -1,98 +0,0 @@ ---- -title: 桶命令 -summary: 用桶命令管理桶的生命周期 -weight: 3 ---- - - -Ozone shell 提供以下桶命令: - - * [创建](#创建) - * [删除](#删除) - * [查看](#查看) - * [列举](#列举) - -### 创建 - -用户使用 `bucket create` 命令来创建桶。 - -***参数:*** - -| 参数名 | 说明 | -|--------------------------------|-----------------------------------------| -| -g, \-\-enforcegdpr | 可选,如果设置为 true 则创建符合 GDPR 规范的桶,设置为 false 或不指定则创建普通的桶| -| -k, \-\-bucketKey | 可选,如果指定了 KMS 服务器中的桶加密密钥名,该桶中的文件都会被自动加密,KMS 的配置说明可以参考 Hadoop KMS 文档。 -| Uri | 桶名,格式为 **/volume/bucket** | - - -{{< highlight bash >}} -ozone sh bucket create /hive/jan -{{< /highlight >}} - -上述命令会在 _hive_ 卷中创建一个名为 _jan_ 的桶,因为没有指定 scheme,默认使用 O3(RPC)协议。 - -### 删除 - -用户使用 `bucket delete` 命令来删除桶,如果桶不为空,此命令将失败。 - -***参数:*** - -| 参数名 | 说明 | -|--------------------------------|-----------------------------------------| -| Uri | 桶名 | - -{{< highlight bash >}} -ozone sh bucket delete /hive/jan -{{< /highlight >}} - -如果 _jan_ 桶不为空,上述命令会将其删除。 - -### 查看 - -`bucket info` 命令返回桶的信息。 - -***参数:*** - -| 参数名 | 说明 | -|--------------------------------|-----------------------------------------| -| Uri | 桶名 | - -{{< highlight bash >}} -ozone sh bucket info /hive/jan -{{< /highlight >}} - -上述命令会打印出 _jan_ 桶的有关信息。 - -### 列举 - -用户通过 `bucket list` 命令列举一个卷下的所有桶。 - -***参数:*** - -| 参数 | 说明 | -|--------------------------------|-----------------------------------------| -| -l, \-\-length | 返回结果的最大数量,默认为 100 -| -p, \-\-prefix | 可选,只有匹配指定前缀的桶会被返回 -| -s, \-\-start | 从指定键开始列举 -| Uri | 卷名 - -{{< highlight bash >}} -ozone sh bucket list /hive -{{< /highlight >}} - -此命令会列出 _hive_ 卷中的所有桶。 diff --git a/hadoop-hdds/docs/content/shell/Format.md b/hadoop-hdds/docs/content/shell/Format.md deleted file mode 100644 index d6c9d2f51802..000000000000 --- a/hadoop-hdds/docs/content/shell/Format.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -title: Shell Overview -summary: Explains the command syntax used by shell command. -weight: 1 ---- - - -Ozone shell help can be invoked at _object_ level or at _action_ level. -For example: - -{{< highlight bash >}} -ozone sh volume --help -{{< /highlight >}} - -This will show all possible actions for volumes. - -or it can be invoked to explain a specific action like -{{< highlight bash >}} -ozone sh volume create --help -{{< /highlight >}} -This command will give you command line options of the create command. - -

- - -### General Command Format - -The Ozone shell commands take the following format. - -> _ozone sh object action url_ - -**ozone** script is used to invoke all Ozone sub-commands. The ozone shell is -invoked via ```sh``` command. - -The object can be a volume, bucket or a key. The action is various verbs like -create, list, delete etc. - - -Ozone URL can point to a volume, bucket or keys in the following format: - -_\[schema\]\[server:port\]/volume/bucket/key_ - - -Where, - -1. **Schema** - This should be `o3` which is the native RPC protocol to access - Ozone API. The usage of the schema is optional. - -2. **Server:Port** - This is the address of the Ozone Manager. If the port is -omitted the default port from ozone-site.xml will be used. - -Depending on the call, the volume/bucket/key names will be part of the URL. -Please see volume commands, bucket commands, and key commands section for more -detail. diff --git a/hadoop-hdds/docs/content/shell/Format.zh.md b/hadoop-hdds/docs/content/shell/Format.zh.md deleted file mode 100644 index edfcbdc24a49..000000000000 --- a/hadoop-hdds/docs/content/shell/Format.zh.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -title: Shell 概述 -summary: shell 命令的语法介绍。 -weight: 1 ---- - - -Ozone shell 的帮助命令既可以在 _对象_ 级别调用,也可以在 _操作_ 级别调用。 -比如: - -{{< highlight bash >}} -ozone sh volume --help -{{< /highlight >}} - -此命令会列出所有对卷的可能操作。 - -你也可以通过它查看特定操作的帮助,比如: - -{{< highlight bash >}} -ozone sh volume create --help -{{< /highlight >}} - -这条命令会给出 create 命令的命令行选项。 - -

- - -### 通用命令格式 - -Ozone shell 命令都遵照以下格式: - -> _ozone sh object action url_ - -**ozone** 脚本用来调用所有 Ozone 子命令,ozone shell 通过 ```sh``` 子命令调用。 - -对象可以是卷、桶或键,操作一般是各种动词,比如 create、list、delete 等等。 - - -Ozone URL 可以指向卷、桶或键,格式如下: - -_\[schema\]\[server:port\]/volume/bucket/key_ - - -其中, - -1. **Schema** - 可选,默认为 `o3`,表示使用原生 RPC 协议来访问 Ozone API。 - -2. **Server:Port** - OM 的地址,如果省略了端口, 则使用 ozone-site.xml 中的默认端口。 - -根据具体的命令不同,卷名、桶名和键名将用来构成 URL,卷、桶和键命令的文档有更多具体的说明。 diff --git a/hadoop-hdds/docs/content/shell/KeyCommands.md b/hadoop-hdds/docs/content/shell/KeyCommands.md deleted file mode 100644 index 11186c422184..000000000000 --- a/hadoop-hdds/docs/content/shell/KeyCommands.md +++ /dev/null @@ -1,177 +0,0 @@ ---- -title: Key Commands -summary: Key commands help you to manage the life cycle of - Keys / Objects. -weight: 4 ---- - - - -Ozone shell supports the following key commands. - - * [get](#get) - * [put](#put) - * [delete](#delete) - * [info](#info) - * [list](#list) - * [rename](#rename) - * [cat](#cat) - * [copy](#cp) - - -### Get - -The `key get` command downloads a key from Ozone cluster to local file system. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the key in **/volume/bucket/key** format. -| FileName | Local file to download the key to. - - -{{< highlight bash >}} -ozone sh key get /hive/jan/sales.orc sales.orc -{{< /highlight >}} -Downloads the file sales.orc from the _/hive/jan_ bucket and writes to the -local file sales.orc. - -### Put - -The `key put` command uploads a file from the local file system to the specified bucket. - -***Params:*** - - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the key in **/volume/bucket/key** format. -| FileName | Local file to upload. -| -r, \-\-replication | Optional, Number of copies, ONE or THREE are the options. Picks up the default from cluster configuration. -| -t, \-\-type | Optional, replication type of the new key. RATIS and STAND_ALONE are the options. Picks up the default from cluster configuration. - -{{< highlight bash >}} -ozone sh key put /hive/jan/corrected-sales.orc sales.orc -{{< /highlight >}} -The above command will put the sales.orc as a new key into _/hive/jan/corrected-sales.orc_. - -### Delete - -The `key delete` command removes the key from the bucket. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the key. - -{{< highlight bash >}} -ozone sh key delete /hive/jan/corrected-sales.orc -{{< /highlight >}} - -The above command deletes the key _/hive/jan/corrected-sales.orc_. - - -### Info - -The `key info` commands returns the information about the key. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the key. - -{{< highlight bash >}} -ozone sh key info /hive/jan/sales.orc -{{< /highlight >}} - -The above command will print out the information about _/hive/jan/sales.orc_ -key. - -### List - -The `key list` command allows user to list all keys in a bucket. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| -l, \-\-length | Maximum number of results to return. Default: 100 -| -p, \-\-prefix | Optional, Only keys that match this prefix will be returned. -| -s, \-\-start | The listing will start from key after the start key. -| Uri | The name of the _volume_. - -{{< highlight bash >}} -ozone sh key list /hive/jan -{{< /highlight >}} - -This command will list all keys in the bucket _/hive/jan_. - -### Rename - -The `key rename` command changes the name of an existing key in the specified bucket. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the bucket in **/volume/bucket** format. -| FromKey | The existing key to be renamed -| ToKey | The new desired name of the key - -{{< highlight bash >}} -ozone sh key rename /hive/jan sales.orc new_name.orc -{{< /highlight >}} -The above command will rename _sales.orc_ to _new\_name.orc_ in the bucket _/hive/jan_. - -### Cat - -The `key cat` command displays the contents of a specific Ozone key to standard output. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the key in **/volume/bucket/key** format. - - -{{< highlight bash >}} -ozone sh key cat /hive/jan/hello.txt -{{< /highlight >}} -Displays the contents of the key hello.txt from the _/hive/jan_ bucket to standard output. - -### Cp - -The `key cp` command copies a key to another one in the specified bucket. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the bucket in **/volume/bucket** format. -| FromKey | The existing key to be copied -| ToKey | The name of the new key -| -r, \-\-replication | Optional, Number of copies, ONE or THREE are the options. Picks up the default from cluster configuration. -| -t, \-\-type | Optional, replication type of the new key. RATIS and STAND_ALONE are the options. Picks up the default from cluster configuration. - -{{< highlight bash >}} -ozone sh key cp /hive/jan sales.orc new_one.orc -{{< /highlight >}} -The above command will copy _sales.orc_ to _new\_one.orc_ in the bucket _/hive/jan_. \ No newline at end of file diff --git a/hadoop-hdds/docs/content/shell/KeyCommands.zh.md b/hadoop-hdds/docs/content/shell/KeyCommands.zh.md deleted file mode 100644 index 2a36e7324f31..000000000000 --- a/hadoop-hdds/docs/content/shell/KeyCommands.zh.md +++ /dev/null @@ -1,176 +0,0 @@ ---- -title: 键命令 -summary: 用键命令管理键/对象的生命周期 -weight: 4 ---- - - - -Ozone shell 提供以下键命令: - - * [下载](#下载) - * [上传](#上传) - * [删除](#删除) - * [查看](#查看) - * [列举](#列举) - * [重命名](#重命名) - * [Cat](#cat) - * [Cp](#cp) - - -### 下载 - -`key get` 命令从 Ozone 集群下载一个键到本地文件系统。 - -***参数:*** - -| 参数名 | 说明 | -|--------------------------------|-----------------------------------------| -| Uri | 键名,格式为 **/volume/bucket/key** -| FileName | 下载到本地后的文件名 - - -{{< highlight bash >}} -ozone sh key get /hive/jan/sales.orc sales.orc -{{< /highlight >}} - -从 _/hive/jan_ 桶中下载 sales.orc 文件,写入到本地名为 sales.orc 的文件。 - -### 上传 - -`key put` 命令从本地文件系统上传一个文件到指定的桶。 - -***参数:*** - -| 参数名 | 说明 | -|--------------------------------|-----------------------------------------| -| Uri | 键名,格式为 **/volume/bucket/key** -| FileName | 待上传的本地文件 -| -r, \-\-replication | 可选,上传后的副本数,合法值为 ONE 或者 THREE,如果不设置,将采用集群配置中的默认值。 -| -t, \-\-type | 可选,副本类型,合法值为 RATIS 或 STAND_ALONE,如果不设置,将采用集群配置中的默认值。 - -{{< highlight bash >}} -ozone sh key put /hive/jan/corrected-sales.orc sales.orc -{{< /highlight >}} - -上述命令将 sales.orc 文件作为新键上传到 _/hive/jan/corrected-sales.orc_ 。 - -### 删除 - -`key delete` 命令用来从桶中删除指定键。 - -***参数:*** - -| 参数名 | 说明 | -|--------------------------------|-----------------------------------------| -| Uri | 键名 - -{{< highlight bash >}} -ozone sh key delete /hive/jan/corrected-sales.orc -{{< /highlight >}} - -上述命令会将 _/hive/jan/corrected-sales.orc_ 这个键删除。 - - -### 查看 - -`key info` 命令返回指定键的信息。 - -***参数:*** - -| 参数名 | 说明 | -|--------------------------------|-----------------------------------------| -| Uri | 键名 - -{{< highlight bash >}} -ozone sh key info /hive/jan/sales.orc -{{< /highlight >}} - -上述命令会打印出 _/hive/jan/sales.orc_ 键的相关信息。 - -### 列举 - -用户通过 `key list` 命令列出一个桶中的所有键。 - -***参数:*** - -| 参数名 | 说明 | -|--------------------------------|-----------------------------------------| -| -l, \-\-length | 返回结果的最大数量,默认值为 100 -| -p, \-\-prefix | 可选,只有匹配指定前缀的键会被返回 -| -s, \-\-start | 从指定键开始列举 -| Uri | 桶名 - -{{< highlight bash >}} -ozone sh key list /hive/jan -{{< /highlight >}} - -此命令会列出 _/hive/jan_ 桶中的所有键。 - -### 重命名 - -`key rename` 命令用来修改指定桶中的已有键的键名。 - -***参数:*** - -| 参数名 | 说明 | -|--------------------------------|-----------------------------------------| -| Uri | 桶名,格式为 **/volume/bucket** -| FromKey | 旧的键名 -| ToKey | 新的键名 - -{{< highlight bash >}} -ozone sh key rename /hive/jan sales.orc new_name.orc -{{< /highlight >}} - -上述命令会将 _/hive/jan_ 桶中的 _sales.orc_ 重命名为 _new\_name.orc_ 。 - -### Cat - -`key cat` 命令用来把指定的键的内容输出到终端。 - -***参数:*** - -| 参数名 | 说明 | -|--------------------------------|-----------------------------------------| -| Uri | 键名,格式为 **/volume/bucket/key** - - -{{< highlight bash >}} -ozone sh key cat /hive/jan/hello.txt -{{< /highlight >}} -上述命令会将 _/hive/jan_ 桶中的 hello.txt 的内容输出到标准输出中来。 - -### Cp - -`key cp` 命令用来在同一个bucket下,从一个key复制出另一个key。 - -***Params:*** - -| 参数名 | 说明 | -|--------------------------------|-----------------------------------------| -| Uri | 桶名 格式为**/volume/bucket**。 -| FromKey | 现有的键名 -| ToKey | 新的键名 -| -r, \-\-replication | 可选,上传后的副本数,合法值为 ONE 或者 THREE,如果不设置,将采用集群配置中的默认值。 -| -t, \-\-type | 可选,副本类型,合法值为 RATIS 或 STAND_ALONE,如果不设置,将采用集群配置中的默认值。 - -{{< highlight bash >}} -ozone sh key cp /hive/jan sales.orc new_one.orc -{{< /highlight >}} -上述命令会将 _/hive/jan_ 桶中的 _sales.orc_ 复制到 _new\_one.orc_ 。 \ No newline at end of file diff --git a/hadoop-hdds/docs/content/shell/VolumeCommands.md b/hadoop-hdds/docs/content/shell/VolumeCommands.md deleted file mode 100644 index fe459f313352..000000000000 --- a/hadoop-hdds/docs/content/shell/VolumeCommands.md +++ /dev/null @@ -1,114 +0,0 @@ ---- -title: Volume Commands -weight: 2 -summary: Volume commands help you to manage the life cycle of a volume. ---- - - -Volume commands generally need administrator privileges. The ozone shell supports the following volume commands. - - * [create](#create) - * [delete](#delete) - * [info](#info) - * [list](#list) - * [update](#update) - -### Create - -The `volume create` command allows an administrator to create a volume and -assign it to a user. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| -q, \-\-quota | Optional, This argument that specifies the maximum size this volume can use in the Ozone cluster. | -| -u, \-\-user | Required, The name of the user who owns this volume. This user can create, buckets and keys on this volume. | -| Uri | The name of the volume. | - -{{< highlight bash >}} -ozone sh volume create --quota=1TB --user=bilbo /hive -{{< /highlight >}} - -The above command will create a volume called _hive_ on the ozone cluster. This -volume has a quota of 1TB, and the owner is _bilbo_. - -### Delete - -The `volume delete` command allows an administrator to delete a volume. If the -volume is not empty then this command will fail. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the volume. - -{{< highlight bash >}} -ozone sh volume delete /hive -{{< /highlight >}} - -The above command will delete the volume hive, if the volume has no buckets -inside it. - -### Info - -The `volume info` commands returns the information about the volume including -quota and owner information. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the volume. - -{{< highlight bash >}} -ozone sh volume info /hive -{{< /highlight >}} - -The above command will print out the information about hive volume. - -### List - -The `volume list` command will list the volumes accessible by a user. - -{{< highlight bash >}} -ozone sh volume list --user hadoop -{{< /highlight >}} - -When ACL is enabled, the above command will print out volumes that the user -hadoop has LIST permission to. When ACL is disabled, the above command will -print out all the volumes owned by the user hadoop. - -### Update - -The volume update command allows changing of owner and quota on a given volume. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| -q, \-\-quota | Optional, This argument that specifies the maximum size this volume can use in the Ozone cluster. | -| -u, \-\-user | Optional, The name of the user who owns this volume. This user can create, buckets and keys on this volume. | -| Uri | The name of the volume. | - -{{< highlight bash >}} -ozone sh volume update --quota=10TB /hive -{{< /highlight >}} - -The above command updates the volume quota to 10TB. diff --git a/hadoop-hdds/docs/content/shell/VolumeCommands.zh.md b/hadoop-hdds/docs/content/shell/VolumeCommands.zh.md deleted file mode 100644 index 190e0994e74c..000000000000 --- a/hadoop-hdds/docs/content/shell/VolumeCommands.zh.md +++ /dev/null @@ -1,108 +0,0 @@ ---- -title: 卷命令 -weight: 2 -summary: 用卷命令管理卷的生命周期 ---- - - -卷命令通常需要管理员权限,ozone shell 支持以下卷命令: - - * [创建](#创建) - * [删除](#删除) - * [查看](#查看) - * [列举](#列举) - * [更新](#更新) - -### 创建 - -管理员可以通过 `volume create` 命令创建一个卷并分配给一个用户。 - -***参数:*** - -| 参数名 | 说明 | -|--------------------------------|-----------------------------------------| -| -q, \-\-quota | 可选,指明该卷在 Ozone 集群所能使用的最大空间,即限额。 | -| -u, \-\-user | 必需,指明该卷的所有者,此用户可以在该卷中创建桶和键。 | -| Uri | 卷名 | - -{{< highlight bash >}} -ozone sh volume create --quota=1TB --user=bilbo /hive -{{< /highlight >}} - -上述命令会在 ozone 集群中创建名为 _hive_ 的卷,卷的限额为 1TB,所有者为 _bilbo_ 。 - -### 删除 - -管理员可以通过 `volume delete` 命令删除一个卷,如果卷不为空,此命令将失败。 - -***参数*** - -| 参数名 | 说明 | -|--------------------------------|-----------------------------------------| -| Uri | 卷名 | - -{{< highlight bash >}} -ozone sh volume delete /hive -{{< /highlight >}} - -如果 hive 卷中不包含任何桶,上述命令将删除 hive 卷。 - -### 查看 - -通过 `volume info` 命令可以获取卷的限额和所有者信息。 - -***参数:*** - -| 参数名 | 说明 | -|--------------------------------|-----------------------------------------| -| Uri | 卷名 | - -{{< highlight bash >}} -ozone sh volume info /hive -{{< /highlight >}} - -上述命令会打印出 hive 卷的相关信息。 - -### 列举 - -`volume list` 命令用来列举一个用户可以访问的所有卷。 - -{{< highlight bash >}} -ozone sh volume list --user hadoop -{{< /highlight >}} - -若 ACL 已启用,上述命令会打印出 hadoop 用户有 LIST 权限的所有卷。 -若 ACL 被禁用,上述命令会打印出 hadoop 用户拥有的所有卷。 - -### 更新 - -`volume update` 命令用来修改卷的所有者和限额。 - -***参数*** - -| 参数名 | 说明 | -|--------------------------------|-----------------------------------------| -| -q, \-\-quota | 可选,重新指定该卷在 Ozone 集群中的限额。 | -| -u, \-\-user | 可选,重新指定该卷的所有者 | -| Uri | 卷名 | - -{{< highlight bash >}} -ozone sh volume update --quota=10TB /hive -{{< /highlight >}} - -上述命令将 hive 卷的限额更新为 10TB。 diff --git a/hadoop-hdds/docs/content/shell/_index.zh.md b/hadoop-hdds/docs/content/shell/_index.zh.md deleted file mode 100644 index 0f6220b5f0e6..000000000000 --- a/hadoop-hdds/docs/content/shell/_index.zh.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: 命令行接口 -menu: - main: - weight: 3 ---- - - - -{{}} - Ozone shell 是用户与 Ozone 进行交互的主要接口,它提供了操作 Ozone 的命令行接口。 -{{}} diff --git a/hadoop-hdds/docs/content/start/FromSource.md b/hadoop-hdds/docs/content/start/FromSource.md index 9ce0cc4b6a8f..80f47fb78f0b 100644 --- a/hadoop-hdds/docs/content/start/FromSource.md +++ b/hadoop-hdds/docs/content/start/FromSource.md @@ -22,18 +22,21 @@ weight: 30 {{< requirements >}} * Java 1.8 * Maven - * Protoc (2.5) {{< /requirements >}} - +planning to build sources yourself, you can safely skip this page. + +
If you are a Hadoop ninja, and wise in the ways of Apache, you already know that a real Apache release is a source release. -If you want to build from sources, Please untar the source tarball and run -the ozone build command. This instruction assumes that you have all the +If you want to build from sources, Please untar the source tarball (or clone the latest code +from the [git repository](https://github.com/apache/hadoop-ozone)) and run the ozone build command. This instruction assumes that you have all the dependencies to build Hadoop on your build machine. If you need instructions on how to build Hadoop, please look at the Apache Hadoop Website. @@ -41,28 +44,27 @@ on how to build Hadoop, please look at the Apache Hadoop Website. mvn clean package -DskipTests=true ``` -This will build an ozone-\.tar.gz in your `hadoop-ozone/dist/target` directory. +This will build an `ozone-\` directory in your `hadoop-ozone/dist/target` directory. You can copy this tarball and use this instead of binary artifacts that are provided along with the official release. -## How to test the build - -You can run the acceptance tests in the hadoop-ozone directory to make sure -that your build is functional. To launch the acceptance tests, please follow - the instructions in the **README.md** in the `smoketest` directory. +To create tar file distribution, use the `-Pdist` profile: ```bash -cd smoketest -./test.sh +mvn clean package -DskipTests=true -Pdist ``` - You can also execute only a minimal subset of the tests: +## How to run Ozone from build + +When you have the new distribution, you can start a local cluster [with docker-compose]({{< ref "start/RunningViaDocker.md">}}). ```bash -cd smoketest -./test.sh --env ozone basic +cd hadoop-ozone/dist/target/ozone-X.X.X... +cd compose/ozone +docker-compose up -d ``` -Acceptance tests will start a small ozone cluster and verify that ozone shell and ozone file - system is fully functional. +## How to test the build + +`compose` subfolder contains multiple type of example setup (secure, non-secure, HA, Yarn). They can be tested with the help of [robotframework](http://robotframework.org/) with executing `test.sh` in any of the directories. \ No newline at end of file diff --git a/hadoop-hdds/docs/content/start/FromSource.zh.md b/hadoop-hdds/docs/content/start/FromSource.zh.md index a1b9f372e5e8..ab740af73828 100644 --- a/hadoop-hdds/docs/content/start/FromSource.zh.md +++ b/hadoop-hdds/docs/content/start/FromSource.zh.md @@ -19,10 +19,15 @@ weight: 30 limitations under the License. --> +
+ +注意:本页面翻译的信息可能滞后,最新的信息请参看英文版的相关页面。 + +
+ {{< requirements >}} * Java 1.8 * Maven - * Protoc (2.5) {{< /requirements >}}
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java index a592119cdb55..f1350a9ebf7d 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java @@ -59,10 +59,14 @@ import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; import org.apache.hadoop.ozone.recon.spi.impl.StorageContainerServiceProviderImpl; import org.apache.hadoop.ozone.recon.tasks.FileSizeCountTask; +import org.apache.hadoop.ozone.recon.tasks.TableCountTask; import org.apache.hadoop.test.LambdaTestUtils; import org.hadoop.ozone.recon.schema.UtilizationSchemaDefinition; import org.hadoop.ozone.recon.schema.tables.daos.FileCountBySizeDao; +import org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao; import org.hadoop.ozone.recon.schema.tables.pojos.FileCountBySize; +import org.jooq.Configuration; +import org.jooq.DSLContext; import org.junit.Assert; import org.junit.Before; import org.junit.Rule; @@ -74,6 +78,7 @@ import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.initializeNewOmMetadataManager; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDataToOm; +import static org.hadoop.ozone.recon.schema.tables.GlobalStatsTable.GLOBAL_STATS; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.mockito.BDDMockito.given; @@ -97,6 +102,7 @@ public class TestEndpoints extends AbstractReconSqlDBTest { private UtilizationEndpoint utilizationEndpoint; private ReconOMMetadataManager reconOMMetadataManager; private FileSizeCountTask fileSizeCountTask; + private TableCountTask tableCountTask; private ReconStorageContainerManagerFacade reconScm; private boolean isSetupDone = false; private String pipelineId; @@ -107,6 +113,7 @@ public class TestEndpoints extends AbstractReconSqlDBTest { private DatanodeDetailsProto datanodeDetailsProto; private Pipeline pipeline; private FileCountBySizeDao fileCountBySizeDao; + private DSLContext dslContext; private final String host1 = "host1.datanode"; private final String host2 = "host2.datanode"; private final String ip1 = "1.1.1.1"; @@ -166,17 +173,23 @@ private void initializeInjector() throws IOException { nodeEndpoint = reconTestInjector.getInstance(NodeEndpoint.class); pipelineEndpoint = reconTestInjector.getInstance(PipelineEndpoint.class); - clusterStateEndpoint = - reconTestInjector.getInstance(ClusterStateEndpoint.class); fileCountBySizeDao = getDao(FileCountBySizeDao.class); + GlobalStatsDao globalStatsDao = getDao(GlobalStatsDao.class); UtilizationSchemaDefinition utilizationSchemaDefinition = getSchemaDefinition(UtilizationSchemaDefinition.class); + Configuration sqlConfiguration = + reconTestInjector.getInstance(Configuration.class); utilizationEndpoint = new UtilizationEndpoint( fileCountBySizeDao, utilizationSchemaDefinition); fileSizeCountTask = new FileSizeCountTask(fileCountBySizeDao, utilizationSchemaDefinition); + tableCountTask = new TableCountTask( + globalStatsDao, sqlConfiguration, reconOMMetadataManager); reconScm = (ReconStorageContainerManagerFacade) reconTestInjector.getInstance(OzoneStorageContainerManager.class); + clusterStateEndpoint = + new ClusterStateEndpoint(reconScm, globalStatsDao); + dslContext = getDslContext(); } @Before @@ -305,6 +318,9 @@ public void setUp() throws Exception { // key = key_three writeDataToOm(reconOMMetadataManager, "key_three"); + + // Truncate global stats table before running each test + dslContext.truncate(GLOBAL_STATS); } private void testDatanodeResponse(DatanodeMetadata datanodeMetadata) @@ -415,9 +431,9 @@ public void testGetClusterState() throws Exception { (ClusterStateResponse) response.getEntity(); Assert.assertEquals(1, clusterStateResponse.getPipelines()); - Assert.assertEquals(2, clusterStateResponse.getVolumes()); - Assert.assertEquals(2, clusterStateResponse.getBuckets()); - Assert.assertEquals(3, clusterStateResponse.getKeys()); + Assert.assertEquals(0, clusterStateResponse.getVolumes()); + Assert.assertEquals(0, clusterStateResponse.getBuckets()); + Assert.assertEquals(0, clusterStateResponse.getKeys()); Assert.assertEquals(2, clusterStateResponse.getTotalDatanodes()); Assert.assertEquals(2, clusterStateResponse.getHealthyDatanodes()); @@ -427,6 +443,16 @@ public void testGetClusterState() throws Exception { (ClusterStateResponse) response1.getEntity(); return (clusterStateResponse1.getContainers() == 1); }); + + // check volume, bucket and key count after running table count task + Pair result = + tableCountTask.reprocess(reconOMMetadataManager); + assertTrue(result.getRight()); + response = clusterStateEndpoint.getClusterState(); + clusterStateResponse = (ClusterStateResponse) response.getEntity(); + Assert.assertEquals(2, clusterStateResponse.getVolumes()); + Assert.assertEquals(2, clusterStateResponse.getBuckets()); + Assert.assertEquals(3, clusterStateResponse.getKeys()); } @Test diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java index d1cb1e96ae97..92c797be7fc2 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,10 +35,12 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.server.ServerUtils; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.codec.OMDBDefinition; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.hdds.utils.db.RDBStore; +import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; @@ -54,6 +56,8 @@ public class TestOMDBUpdatesHandler { @Rule public TemporaryFolder folder = new TemporaryFolder(); + private OMDBDefinition omdbDefinition = new OMDBDefinition(); + private OzoneConfiguration createNewTestPath() throws IOException { OzoneConfiguration configuration = new OzoneConfiguration(); File newFolder = folder.newFolder(); @@ -149,6 +153,7 @@ public void testDelete() throws Exception { // Write 1 volume, 1 key into source and target OM DBs. String volumeKey = metaMgr.getVolumeKey("sampleVol"); + String nonExistVolumeKey = metaMgr.getVolumeKey("nonExistingVolume"); OmVolumeArgs args = OmVolumeArgs.newBuilder() .setVolume("sampleVol") @@ -165,6 +170,9 @@ public void testDelete() throws Exception { // Delete the volume and key from target DB. metaMgr.getKeyTable().delete("/sampleVol/bucketOne/key_one"); metaMgr.getVolumeTable().delete(volumeKey); + // Delete a non-existing volume and key + metaMgr.getKeyTable().delete("/sampleVol/bucketOne/key_two"); + metaMgr.getVolumeTable().delete(metaMgr.getVolumeKey("nonExistingVolume")); RDBStore rdbStore = (RDBStore) metaMgr.getStore(); RocksDB rocksDB = rdbStore.getDb(); @@ -191,7 +199,7 @@ public void testDelete() throws Exception { } List events = omdbUpdatesHandler.getEvents(); - assertTrue(events.size() == 2); + assertEquals(4, events.size()); OMDBUpdateEvent keyEvent = events.get(0); assertEquals(OMDBUpdateEvent.OMDBUpdateAction.DELETE, keyEvent.getAction()); @@ -201,7 +209,35 @@ public void testDelete() throws Exception { OMDBUpdateEvent volEvent = events.get(1); assertEquals(OMDBUpdateEvent.OMDBUpdateAction.DELETE, volEvent.getAction()); assertEquals(volumeKey, volEvent.getKey()); - assertNull(volEvent.getValue()); + assertNotNull(volEvent.getValue()); + OmVolumeArgs volumeInfo = (OmVolumeArgs) volEvent.getValue(); + assertEquals("sampleVol", volumeInfo.getVolume()); + + // Assert the values of non existent keys are set to null. + OMDBUpdateEvent nonExistKey = events.get(2); + assertEquals(OMDBUpdateEvent.OMDBUpdateAction.DELETE, + nonExistKey.getAction()); + assertEquals("/sampleVol/bucketOne/key_two", nonExistKey.getKey()); + assertNull(nonExistKey.getValue()); + + OMDBUpdateEvent nonExistVolume = events.get(3); + assertEquals(OMDBUpdateEvent.OMDBUpdateAction.DELETE, + nonExistVolume.getAction()); + assertEquals(nonExistVolumeKey, nonExistVolume.getKey()); + assertNull(nonExistVolume.getValue()); + } + + @Test + public void testGetKeyType() throws IOException { + OzoneConfiguration configuration = createNewTestPath(); + OmMetadataManagerImpl metaMgr = new OmMetadataManagerImpl(configuration); + OMDBUpdatesHandler omdbUpdatesHandler = + new OMDBUpdatesHandler(metaMgr); + + assertEquals(String.class, omdbDefinition.getKeyType( + metaMgr.getKeyTable().getName()).get()); + assertEquals(OzoneTokenIdentifier.class, omdbDefinition.getKeyType( + metaMgr.getDelegationTokenTable().getName()).get()); } @Test @@ -211,12 +247,12 @@ public void testGetValueType() throws IOException { OMDBUpdatesHandler omdbUpdatesHandler = new OMDBUpdatesHandler(metaMgr); - assertEquals(OmKeyInfo.class, omdbUpdatesHandler.getValueType( - metaMgr.getKeyTable().getName())); - assertEquals(OmVolumeArgs.class, omdbUpdatesHandler.getValueType( - metaMgr.getVolumeTable().getName())); - assertEquals(OmBucketInfo.class, omdbUpdatesHandler.getValueType( - metaMgr.getBucketTable().getName())); + assertEquals(OmKeyInfo.class, omdbDefinition.getValueType( + metaMgr.getKeyTable().getName()).get()); + assertEquals(OmVolumeArgs.class, omdbDefinition.getValueType( + metaMgr.getVolumeTable().getName()).get()); + assertEquals(OmBucketInfo.class, omdbDefinition.getValueType( + metaMgr.getBucketTable().getName()).get()); } private OmKeyInfo getOmKeyInfo(String volumeName, String bucketName, diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestTableCountTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestTableCountTask.java new file mode 100644 index 000000000000..94d76731e2fb --- /dev/null +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestTableCountTask.java @@ -0,0 +1,178 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.tasks; + +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.utils.db.TypedTable; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.recon.persistence.AbstractReconSqlDBTest; +import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; +import org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMUpdateEventBuilder; + +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_KEY_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.VOLUME_TABLE; +import org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao; +import org.jooq.DSLContext; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.io.IOException; +import java.util.ArrayList; + +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.initializeNewOmMetadataManager; +import static org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMDBUpdateAction.DELETE; +import static org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMDBUpdateAction.PUT; +import static org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMDBUpdateAction.UPDATE; +import static org.hadoop.ozone.recon.schema.tables.GlobalStatsTable.GLOBAL_STATS; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Unit test for Object Count Task. + */ +public class TestTableCountTask extends AbstractReconSqlDBTest { + + private GlobalStatsDao globalStatsDao; + private TableCountTask tableCountTask; + private DSLContext dslContext; + private boolean isSetupDone = false; + + @Rule + public TemporaryFolder temporaryFolder = new TemporaryFolder(); + + private void initializeInjector() throws IOException { + ReconOMMetadataManager omMetadataManager = getTestReconOmMetadataManager( + initializeNewOmMetadataManager(temporaryFolder.newFolder()), + temporaryFolder.newFolder()); + globalStatsDao = getDao(GlobalStatsDao.class); + tableCountTask = new TableCountTask(globalStatsDao, getConfiguration(), + omMetadataManager); + dslContext = getDslContext(); + } + + @Before + public void setUp() throws IOException { + // The following setup runs only once + if (!isSetupDone) { + initializeInjector(); + isSetupDone = true; + } + // Truncate table before running each test + dslContext.truncate(GLOBAL_STATS); + } + + @Test + public void testReprocess() { + OMMetadataManager omMetadataManager = mock(OmMetadataManagerImpl.class); + // Mock 5 rows in each table and test the count + for (String tableName: tableCountTask.getTaskTables()) { + TypedTable table = mock(TypedTable.class); + TypedTable.TypedTableIterator mockIter = mock(TypedTable + .TypedTableIterator.class); + when(table.iterator()).thenReturn(mockIter); + when(omMetadataManager.getTable(tableName)).thenReturn(table); + when(mockIter.hasNext()) + .thenReturn(true) + .thenReturn(true) + .thenReturn(true) + .thenReturn(true) + .thenReturn(true) + .thenReturn(false); + } + + Pair result = tableCountTask.reprocess(omMetadataManager); + assertTrue(result.getRight()); + + assertEquals(5L, getCountForTable(KEY_TABLE)); + assertEquals(5L, getCountForTable(VOLUME_TABLE)); + assertEquals(5L, getCountForTable(BUCKET_TABLE)); + assertEquals(5L, getCountForTable(OPEN_KEY_TABLE)); + assertEquals(5L, getCountForTable(DELETED_TABLE)); + } + + @Test + public void testProcess() { + ArrayList events = new ArrayList<>(); + // Create 5 put, 1 delete and 1 update event for each table + for (String tableName: tableCountTask.getTaskTables()) { + for (int i=0; i<5; i++) { + events.add(getOMUpdateEvent("item" + i, null, tableName, PUT)); + } + // for delete event, if value is set to null, the counter will not be + // decremented. This is because the value will be null if item does not + // exist in the database and there is no need to delete. + events.add(getOMUpdateEvent("item0", mock(OmKeyInfo.class), tableName, + DELETE)); + events.add(getOMUpdateEvent("item1", null, tableName, UPDATE)); + } + OMUpdateEventBatch omUpdateEventBatch = new OMUpdateEventBatch(events); + tableCountTask.process(omUpdateEventBatch); + + // Verify 4 items in each table. (5 puts - 1 delete + 0 update) + assertEquals(4L, getCountForTable(KEY_TABLE)); + assertEquals(4L, getCountForTable(VOLUME_TABLE)); + assertEquals(4L, getCountForTable(BUCKET_TABLE)); + assertEquals(4L, getCountForTable(OPEN_KEY_TABLE)); + assertEquals(4L, getCountForTable(DELETED_TABLE)); + + // add a new key and simulate delete on non-existing item (value: null) + ArrayList newEvents = new ArrayList<>(); + for (String tableName: tableCountTask.getTaskTables()) { + newEvents.add(getOMUpdateEvent("item5", null, tableName, PUT)); + // This delete event should be a noop since value is null + newEvents.add(getOMUpdateEvent("item0", null, tableName, DELETE)); + } + + omUpdateEventBatch = new OMUpdateEventBatch(newEvents); + tableCountTask.process(omUpdateEventBatch); + + // Verify 5 items in each table. (1 new put + 0 delete) + assertEquals(5L, getCountForTable(KEY_TABLE)); + assertEquals(5L, getCountForTable(VOLUME_TABLE)); + assertEquals(5L, getCountForTable(BUCKET_TABLE)); + assertEquals(5L, getCountForTable(OPEN_KEY_TABLE)); + assertEquals(5L, getCountForTable(DELETED_TABLE)); + } + + private OMDBUpdateEvent getOMUpdateEvent(String name, Object value, + String table, + OMDBUpdateEvent.OMDBUpdateAction action) { + return new OMUpdateEventBuilder() + .setAction(action) + .setKey(name) + .setValue(value) + .setTable(table) + .build(); + } + + private long getCountForTable(String tableName) { + String key = TableCountTask.getRowKeyFromTable(tableName); + return globalStatsDao.findById(key).getValue(); + } +} From 6447bb6db573e563c5c1200c0a5bfd5e88b101e4 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Wed, 12 Aug 2020 19:27:10 +0200 Subject: [PATCH 126/165] HDDS-4108. ozone debug ldb scan without arguments results in core dump (#1317) --- .../main/java/org/apache/hadoop/ozone/debug/DBScanner.java | 3 ++- .../main/java/org/apache/hadoop/ozone/debug/RDBParser.java | 5 +++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java index 42645409d213..8eea23f02d54 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java @@ -53,7 +53,8 @@ public class DBScanner implements Callable, SubcommandWithParent { @CommandLine.Option(names = {"--column_family"}, - description = "Table name") + required = true, + description = "Table name") private String tableName; @CommandLine.Option(names = {"--with-keys"}, diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/RDBParser.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/RDBParser.java index 4866d081d402..e18baaab1838 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/RDBParser.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/RDBParser.java @@ -41,8 +41,9 @@ public class RDBParser implements Callable, SubcommandWithParent { private CommandSpec spec; @CommandLine.Option(names = {"--db"}, - description = "Database File Path") - private String dbPath; + required = true, + description = "Database File Path") + private String dbPath; public String getDbPath() { return dbPath; From 4df826a498bdad2110698de3bdf8697b8b06b747 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Wed, 12 Aug 2020 19:55:33 +0200 Subject: [PATCH 127/165] HDDS-4099. No Log4j 2 configuration file found error appears in CLI (#1318) --- hadoop-ozone/tools/pom.xml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/hadoop-ozone/tools/pom.xml b/hadoop-ozone/tools/pom.xml index 267e8ef58199..661d5422303d 100644 --- a/hadoop-ozone/tools/pom.xml +++ b/hadoop-ozone/tools/pom.xml @@ -66,6 +66,12 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-ozone-recon + + + org.springframework + spring-jdbc + + org.apache.hadoop From 2c9fb22a5dc7d76b0ff5f1dcd92860820919a1a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Elek=2C=20M=C3=A1rton?= Date: Fri, 14 Aug 2020 15:57:11 +0200 Subject: [PATCH 128/165] HDDS-4114. Bump log4j2 version (#1325) --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 49ca8575caf9..f4b64145992c 100644 --- a/pom.xml +++ b/pom.xml @@ -146,7 +146,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.7.25 1.2.17 - 2.11.0 + 2.13.3 3.4.2 0.7.0 From 9afe66213d7a1e6f4dc30411b85612c5c84529f1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Elek=2C=20M=C3=A1rton?= Date: Tue, 18 Aug 2020 13:20:50 +0200 Subject: [PATCH 129/165] HDDS-4127. Components with web interface should depend on hdds-docs. (#1335) --- hadoop-hdds/container-service/pom.xml | 7 +++++++ hadoop-hdds/server-scm/pom.xml | 1 + hadoop-ozone/ozone-manager/pom.xml | 1 + hadoop-ozone/s3gateway/pom.xml | 5 +++++ 4 files changed, 14 insertions(+) diff --git a/hadoop-hdds/container-service/pom.xml b/hadoop-hdds/container-service/pom.xml index 392cc44c72a1..091e3bd10a72 100644 --- a/hadoop-hdds/container-service/pom.xml +++ b/hadoop-hdds/container-service/pom.xml @@ -70,6 +70,13 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.yaml snakeyaml + + + org.apache.hadoop + hadoop-hdds-docs + provided + + com.github.spotbugs spotbugs diff --git a/hadoop-hdds/server-scm/pom.xml b/hadoop-hdds/server-scm/pom.xml index dcbc42a17d52..5f2d6bdea8ac 100644 --- a/hadoop-hdds/server-scm/pom.xml +++ b/hadoop-hdds/server-scm/pom.xml @@ -60,6 +60,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-hdds-docs + provided io.dropwizard.metrics diff --git a/hadoop-ozone/ozone-manager/pom.xml b/hadoop-ozone/ozone-manager/pom.xml index 056498a9a050..31ffb58eecd7 100644 --- a/hadoop-ozone/ozone-manager/pom.xml +++ b/hadoop-ozone/ozone-manager/pom.xml @@ -43,6 +43,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-hdds-docs + provided diff --git a/hadoop-ozone/s3gateway/pom.xml b/hadoop-ozone/s3gateway/pom.xml index 80bd34fa270d..337fedcf2be3 100644 --- a/hadoop-ozone/s3gateway/pom.xml +++ b/hadoop-ozone/s3gateway/pom.xml @@ -163,6 +163,11 @@ org.apache.hadoop hadoop-ozone-client + + org.apache.hadoop + hadoop-hdds-docs + provided + junit junit From 95cb357848d5fb64707a5296b31251ba4436e367 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Elek=2C=20M=C3=A1rton?= Date: Mon, 24 Aug 2020 15:14:41 +0200 Subject: [PATCH 130/165] HDDS-4094. Support byte-level write in Freon HadoopFsGenerator (#1310) --- .../hadoop/ozone/freon/ContentGenerator.java | 31 ++++++- .../hadoop/ozone/freon/HadoopFsGenerator.java | 12 ++- .../ozone/freon/TestContentGenerator.java | 82 +++++++++++++++++++ 3 files changed, 119 insertions(+), 6 deletions(-) create mode 100644 hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestContentGenerator.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ContentGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ContentGenerator.java index c6ec60e59a25..542634c4884b 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ContentGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ContentGenerator.java @@ -20,6 +20,7 @@ import java.io.OutputStream; import java.nio.charset.StandardCharsets; +import com.google.common.annotations.VisibleForTesting; import org.apache.commons.lang3.RandomStringUtils; /** @@ -38,15 +39,25 @@ public class ContentGenerator { */ private int bufferSize; + /** + * Number of bytes to write in one call. + *

+ * Should be no larger than the bufferSize. + */ + private final int copyBufferSize; + private final byte[] buffer; ContentGenerator(long keySize, int bufferSize) { + this(keySize, bufferSize, bufferSize); + } + + ContentGenerator(long keySize, int bufferSize, int copyBufferSize) { this.keySize = keySize; this.bufferSize = bufferSize; - + this.copyBufferSize = copyBufferSize; buffer = RandomStringUtils.randomAscii(bufferSize) .getBytes(StandardCharsets.UTF_8); - } /** @@ -56,7 +67,21 @@ public void write(OutputStream outputStream) throws IOException { for (long nrRemaining = keySize; nrRemaining > 0; nrRemaining -= bufferSize) { int curSize = (int) Math.min(bufferSize, nrRemaining); - outputStream.write(buffer, 0, curSize); + if (copyBufferSize == 1) { + for (int i = 0; i < curSize; i++) { + outputStream.write(buffer[i]); + } + } else { + for (int i = 0; i < curSize; i += copyBufferSize) { + outputStream.write(buffer, i, + Math.min(copyBufferSize, curSize - i)); + } + } } } + + @VisibleForTesting + byte[] getBuffer() { + return buffer; + } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopFsGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopFsGenerator.java index 548f829fba44..925ba7dc2e96 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopFsGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopFsGenerator.java @@ -57,10 +57,15 @@ public class HadoopFsGenerator extends BaseFreonGenerator private int fileSize; @Option(names = {"--buffer"}, - description = "Size of buffer used to generated the key content.", - defaultValue = "4096") + description = "Size of buffer used store the generated key content", + defaultValue = "10240") private int bufferSize; + @Option(names = {"--copy-buffer"}, + description = "Size of bytes written to the output in one operation", + defaultValue = "4096") + private int copyBufferSize; + private ContentGenerator contentGenerator; private Timer timer; @@ -76,7 +81,8 @@ public Void call() throws Exception { fileSystem = FileSystem.get(URI.create(rootPath), configuration); - contentGenerator = new ContentGenerator(fileSize, bufferSize); + contentGenerator = + new ContentGenerator(fileSize, bufferSize, copyBufferSize); timer = getMetrics().timer("file-create"); diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestContentGenerator.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestContentGenerator.java new file mode 100644 index 000000000000..d61be3a42dcf --- /dev/null +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestContentGenerator.java @@ -0,0 +1,82 @@ +package org.apache.hadoop.ozone.freon; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +import org.junit.Assert; +import org.junit.Test; + +/** + * Tests for the ContentGenerator class of Freon. + */ +public class TestContentGenerator { + + @Test + public void writeWrite() throws IOException { + ContentGenerator generator = new ContentGenerator(1024, 1024); + ByteArrayOutputStream output = new ByteArrayOutputStream(); + + generator.write(output); + Assert.assertArrayEquals(generator.getBuffer(), output.toByteArray()); + } + + @Test + public void writeWithSmallerBuffers() throws IOException { + ContentGenerator generator = new ContentGenerator(10000, 1024, 3); + ByteArrayOutputStream output = new ByteArrayOutputStream(); + + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + generator.write(baos); + + Assert.assertEquals(10000, baos.toByteArray().length); + } + + @Test + public void writeWithByteLevelWrite() throws IOException { + ContentGenerator generator = new ContentGenerator(1024, 1024, 1); + ByteArrayOutputStream output = new ByteArrayOutputStream(); + + generator.write(output); + Assert.assertArrayEquals(generator.getBuffer(), output.toByteArray()); + } + + @Test + public void writeWithSmallBuffer() throws IOException { + ContentGenerator generator = new ContentGenerator(1024, 1024, 10); + ByteArrayOutputStream output = new ByteArrayOutputStream(); + + generator.write(output); + Assert.assertArrayEquals(generator.getBuffer(), output.toByteArray()); + } + + @Test + public void writeWithDistinctSizes() throws IOException { + ContentGenerator generator = new ContentGenerator(20, 8, 3); + ByteArrayOutputStream output = new ByteArrayOutputStream(); + + generator.write(output); + + byte[] expected = new byte[20]; + byte[] buffer = generator.getBuffer(); + System.arraycopy(buffer, 0, expected, 0, buffer.length); + System.arraycopy(buffer, 0, expected, 8, buffer.length); + System.arraycopy(buffer, 0, expected, 16, 4); + Assert.assertArrayEquals(expected, output.toByteArray()); + } +} From 0dff843f43214c9ccf0acbaca0775979f8a30ff2 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Tue, 25 Aug 2020 13:24:01 +0200 Subject: [PATCH 131/165] HDDS-4139. Update version number in upgrade tests (#1347) --- hadoop-ozone/dist/src/main/compose/upgrade/README.md | 2 +- hadoop-ozone/dist/src/main/compose/upgrade/test.sh | 2 +- hadoop-ozone/dist/src/main/compose/upgrade/versions/README.md | 2 +- .../compose/upgrade/versions/{ozone-0.6.0.sh => ozone-1.0.0.sh} | 0 hadoop-ozone/dist/src/shell/upgrade/{0.6.0.sh => 1.0.0.sh} | 2 +- .../src/shell/upgrade/{0.6.0 => 1.0.0}/01-migrate-scm-db.sh | 0 6 files changed, 4 insertions(+), 4 deletions(-) rename hadoop-ozone/dist/src/main/compose/upgrade/versions/{ozone-0.6.0.sh => ozone-1.0.0.sh} (100%) rename hadoop-ozone/dist/src/shell/upgrade/{0.6.0.sh => 1.0.0.sh} (94%) rename hadoop-ozone/dist/src/shell/upgrade/{0.6.0 => 1.0.0}/01-migrate-scm-db.sh (100%) diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/README.md b/hadoop-ozone/dist/src/main/compose/upgrade/README.md index 2a832f4b9e29..5d844e006b50 100644 --- a/hadoop-ozone/dist/src/main/compose/upgrade/README.md +++ b/hadoop-ozone/dist/src/main/compose/upgrade/README.md @@ -26,4 +26,4 @@ current version is run with the `ozone-runner` image using locally built source code. Currently the test script only supports a single version upgrade (eg. -from 0.5.0 to 0.6.0). +from 0.5.0 to 1.0.0). diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/test.sh b/hadoop-ozone/dist/src/main/compose/upgrade/test.sh index 0babd17676d5..0c51325b7c2d 100644 --- a/hadoop-ozone/dist/src/main/compose/upgrade/test.sh +++ b/hadoop-ozone/dist/src/main/compose/upgrade/test.sh @@ -20,7 +20,7 @@ export COMPOSE_DIR : "${OZONE_REPLICATION_FACTOR:=3}" : "${OZONE_UPGRADE_FROM:="0.5.0"}" -: "${OZONE_UPGRADE_TO:="0.6.0"}" +: "${OZONE_UPGRADE_TO:="1.0.0"}" : "${OZONE_VOLUME:="${COMPOSE_DIR}/data"}" export OZONE_VOLUME diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/versions/README.md b/hadoop-ozone/dist/src/main/compose/upgrade/versions/README.md index c662c2f286be..24cd113469a6 100644 --- a/hadoop-ozone/dist/src/main/compose/upgrade/versions/README.md +++ b/hadoop-ozone/dist/src/main/compose/upgrade/versions/README.md @@ -12,4 +12,4 @@ limitations under the License. See accompanying LICENSE file. --> -The scripts in this directory define version-specific behavior required for [`testlib.sh`](../../testlib.sh). For example the `ozone admin` command was renamed from `ozone scmcli` in 0.6.0. +The scripts in this directory define version-specific behavior required for [`testlib.sh`](../../testlib.sh). For example the `ozone admin` command was renamed from `ozone scmcli` in 1.0.0. diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/versions/ozone-0.6.0.sh b/hadoop-ozone/dist/src/main/compose/upgrade/versions/ozone-1.0.0.sh similarity index 100% rename from hadoop-ozone/dist/src/main/compose/upgrade/versions/ozone-0.6.0.sh rename to hadoop-ozone/dist/src/main/compose/upgrade/versions/ozone-1.0.0.sh diff --git a/hadoop-ozone/dist/src/shell/upgrade/0.6.0.sh b/hadoop-ozone/dist/src/shell/upgrade/1.0.0.sh similarity index 94% rename from hadoop-ozone/dist/src/shell/upgrade/0.6.0.sh rename to hadoop-ozone/dist/src/shell/upgrade/1.0.0.sh index 58c78dbdfb64..65739787ee67 100755 --- a/hadoop-ozone/dist/src/shell/upgrade/0.6.0.sh +++ b/hadoop-ozone/dist/src/shell/upgrade/1.0.0.sh @@ -20,4 +20,4 @@ SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" : "${SCM_DIR:="${OZONE_VOLUME}/scm"}" : "${OZONE_RUNNER_VERSION:="20200625-1"}" -docker run --rm -v "${SCM_DIR}":/scm -v "${SCRIPT_DIR}/0.6.0":/upgrade -w /scm/metadata apache/ozone-runner:"${OZONE_RUNNER_VERSION}" /upgrade/01-migrate-scm-db.sh +docker run --rm -v "${SCM_DIR}":/scm -v "${SCRIPT_DIR}/1.0.0":/upgrade -w /scm/metadata apache/ozone-runner:"${OZONE_RUNNER_VERSION}" /upgrade/01-migrate-scm-db.sh diff --git a/hadoop-ozone/dist/src/shell/upgrade/0.6.0/01-migrate-scm-db.sh b/hadoop-ozone/dist/src/shell/upgrade/1.0.0/01-migrate-scm-db.sh similarity index 100% rename from hadoop-ozone/dist/src/shell/upgrade/0.6.0/01-migrate-scm-db.sh rename to hadoop-ozone/dist/src/shell/upgrade/1.0.0/01-migrate-scm-db.sh From 89559f91e34cbc05fdff1d587b79b239c052c9c0 Mon Sep 17 00:00:00 2001 From: Sammi Chen Date: Tue, 25 Aug 2020 20:30:41 +0800 Subject: [PATCH 132/165] HDDS-4144. Update version info in hadoop client dependency readme (#1348) (cherry picked from commit 28d372ca903b4741131bace09e0339e9161257bb) --- hadoop-hdds/hadoop-dependency-client/README.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/hadoop-hdds/hadoop-dependency-client/README.md b/hadoop-hdds/hadoop-dependency-client/README.md index 0ca9a1c7dbcd..a3ec3680da60 100644 --- a/hadoop-hdds/hadoop-dependency-client/README.md +++ b/hadoop-hdds/hadoop-dependency-client/README.md @@ -30,31 +30,31 @@ mvn dependency:tree [INFO] Scanning for projects... [INFO] [INFO] -------< org.apache.hadoop:hadoop-hdds-hadoop-dependency-client >------- -[INFO] Building Apache Hadoop HDDS Hadoop Client dependencies 0.6.0-SNAPSHOT +[INFO] Building Apache Hadoop HDDS Hadoop Client dependencies 1.0.0 [INFO] --------------------------------[ jar ]--------------------------------- -[INFO] +[INFO] [INFO] --- maven-dependency-plugin:3.0.2:tree (default-cli) @ hadoop-hdds-hadoop-dependency-client --- -[INFO] org.apache.hadoop:hadoop-hdds-hadoop-dependency-client:jar:0.6.0-SNAPSHOT -[INFO] +- org.apache.hadoop:hadoop-annotations:jar:3.2.0:compile +[INFO] org.apache.hadoop:hadoop-hdds-hadoop-dependency-client:jar:1.0.0 +[INFO] +- org.apache.hadoop:hadoop-annotations:jar:3.2.1:compile [INFO] | \- jdk.tools:jdk.tools:jar:1.8:system -[INFO] +- org.apache.hadoop:hadoop-common:jar:3.2.0:compile +[INFO] +- org.apache.hadoop:hadoop-common:jar:3.2.1:compile [INFO] | +- org.apache.httpcomponents:httpclient:jar:4.5.2:compile [INFO] | | \- org.apache.httpcomponents:httpcore:jar:4.4.4:compile [INFO] | +- org.apache.commons:commons-configuration2:jar:2.1.1:compile [INFO] | +- com.google.re2j:re2j:jar:1.1:compile [INFO] | +- com.google.protobuf:protobuf-java:jar:2.5.0:compile -[INFO] | +- org.apache.hadoop:hadoop-auth:jar:3.2.0:compile +[INFO] | +- org.apache.hadoop:hadoop-auth:jar:3.2.1:compile [INFO] | +- com.google.code.findbugs:jsr305:jar:3.0.0:compile [INFO] | +- org.apache.htrace:htrace-core4:jar:4.1.0-incubating:compile [INFO] | +- org.codehaus.woodstox:stax2-api:jar:3.1.4:compile [INFO] | \- com.fasterxml.woodstox:woodstox-core:jar:5.0.3:compile -[INFO] +- org.apache.hadoop:hadoop-hdfs:jar:3.2.0:compile +[INFO] +- org.apache.hadoop:hadoop-hdfs:jar:3.2.1:compile [INFO] \- junit:junit:jar:4.11:test [INFO] \- org.hamcrest:hamcrest-core:jar:1.3:test [INFO] ------------------------------------------------------------------------ [INFO] BUILD SUCCESS [INFO] ------------------------------------------------------------------------ -[INFO] Total time: 1.144 s -[INFO] Finished at: 2020-04-01T11:21:46+02:00 +[INFO] Total time: 1.464 s +[INFO] Finished at: 2020-08-25T19:40:29+08:00 [INFO] ------------------------------------------------------------------------ ``` From d433e29a67efa1ebfc02e60cc51770115c588ccd Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Tue, 25 Aug 2020 14:34:55 +0200 Subject: [PATCH 133/165] HDDS-4074. [OFS] Implement AbstractFileSystem for RootedOzoneFileSystem (#1330) --- .../compose/ozone-mr/hadoop27/docker-config | 1 + .../compose/ozone-mr/hadoop31/docker-config | 1 + .../compose/ozone-mr/hadoop32/docker-config | 1 + .../main/compose/ozonesecure-mr/docker-config | 1 + .../apache/hadoop/fs/ozone/RootedOzFs.java | 49 +++++++++++++++++++ .../apache/hadoop/fs/ozone/RootedOzFs.java | 44 +++++++++++++++++ .../apache/hadoop/fs/ozone/RootedOzFs.java | 44 +++++++++++++++++ 7 files changed, 141 insertions(+) create mode 100644 hadoop-ozone/ozonefs-hadoop2/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java create mode 100644 hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java create mode 100644 hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-config index 8180a10f878c..5c1348422f43 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-config @@ -15,6 +15,7 @@ # limitations under the License. CORE-SITE.xml_fs.AbstractFileSystem.o3fs.impl=org.apache.hadoop.fs.ozone.OzFs +CORE-SITE.xml_fs.AbstractFileSystem.ofs.impl=org.apache.hadoop.fs.ozone.RootedOzFs MAPRED-SITE.XML_mapreduce.application.classpath=/opt/hadoop/share/hadoop/mapreduce/*:/opt/hadoop/share/hadoop/mapreduce/lib/*:/opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-hadoop2-@project.version@.jar no_proxy=om,scm,s3g,recon,kdc,localhost,127.0.0.1 diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-config index af8e7f800405..e80f0284a696 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-config @@ -15,6 +15,7 @@ # limitations under the License. CORE-SITE.xml_fs.AbstractFileSystem.o3fs.impl=org.apache.hadoop.fs.ozone.OzFs +CORE-SITE.xml_fs.AbstractFileSystem.ofs.impl=org.apache.hadoop.fs.ozone.RootedOzFs MAPRED-SITE.XML_mapreduce.application.classpath=/opt/hadoop/share/hadoop/mapreduce/*:/opt/hadoop/share/hadoop/mapreduce/lib/*:/opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-hadoop3-@project.version@.jar no_proxy=om,scm,s3g,recon,kdc,localhost,127.0.0.1 diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-config index af8e7f800405..e80f0284a696 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-config @@ -15,6 +15,7 @@ # limitations under the License. CORE-SITE.xml_fs.AbstractFileSystem.o3fs.impl=org.apache.hadoop.fs.ozone.OzFs +CORE-SITE.xml_fs.AbstractFileSystem.ofs.impl=org.apache.hadoop.fs.ozone.RootedOzFs MAPRED-SITE.XML_mapreduce.application.classpath=/opt/hadoop/share/hadoop/mapreduce/*:/opt/hadoop/share/hadoop/mapreduce/lib/*:/opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-hadoop3-@project.version@.jar no_proxy=om,scm,s3g,recon,kdc,localhost,127.0.0.1 diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config index 0adafdfd60ea..7f1050db8b90 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config @@ -67,6 +67,7 @@ HDFS-SITE.XML_rpc.metrics.quantile.enable=true HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 CORE-SITE.XML_fs.AbstractFileSystem.o3fs.impl=org.apache.hadoop.fs.ozone.OzFs +CORE-SITE.XML_fs.AbstractFileSystem.ofs.impl=org.apache.hadoop.fs.ozone.RootedOzFs CORE-SITE.XML_fs.defaultFS=o3fs://bucket1.volume1/ MAPRED-SITE.XML_mapreduce.framework.name=yarn diff --git a/hadoop-ozone/ozonefs-hadoop2/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java b/hadoop-ozone/ozonefs-hadoop2/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java new file mode 100644 index 000000000000..4cd04da9c867 --- /dev/null +++ b/hadoop-ozone/ozonefs-hadoop2/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java @@ -0,0 +1,49 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.ozone; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.DelegateToFileSystem; +import org.apache.hadoop.hdds.annotation.InterfaceAudience; +import org.apache.hadoop.hdds.annotation.InterfaceStability; +import org.apache.hadoop.ozone.OzoneConsts; + +/** + * Ozone implementation of AbstractFileSystem. + * This impl delegates to the RootedOzoneFileSystem + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class RootedOzFs extends DelegateToFileSystem { + + public RootedOzFs(URI theUri, Configuration conf) + throws IOException, URISyntaxException { + super(theUri, new RootedOzoneFileSystem(), conf, + OzoneConsts.OZONE_OFS_URI_SCHEME, false); + } + + @Override + public int getUriDefaultPort() { + return -1; + } +} diff --git a/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java b/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java new file mode 100644 index 000000000000..076287eaac14 --- /dev/null +++ b/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.ozone; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.DelegateToFileSystem; +import org.apache.hadoop.hdds.annotation.InterfaceAudience; +import org.apache.hadoop.hdds.annotation.InterfaceStability; +import org.apache.hadoop.ozone.OzoneConsts; + +/** + * Ozone implementation of AbstractFileSystem. + * This impl delegates to the RootedOzoneFileSystem + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class RootedOzFs extends DelegateToFileSystem { + + public RootedOzFs(URI theUri, Configuration conf) + throws IOException, URISyntaxException { + super(theUri, new RootedOzoneFileSystem(), conf, + OzoneConsts.OZONE_OFS_URI_SCHEME, false); + } +} diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java new file mode 100644 index 000000000000..076287eaac14 --- /dev/null +++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.ozone; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.DelegateToFileSystem; +import org.apache.hadoop.hdds.annotation.InterfaceAudience; +import org.apache.hadoop.hdds.annotation.InterfaceStability; +import org.apache.hadoop.ozone.OzoneConsts; + +/** + * Ozone implementation of AbstractFileSystem. + * This impl delegates to the RootedOzoneFileSystem + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class RootedOzFs extends DelegateToFileSystem { + + public RootedOzFs(URI theUri, Configuration conf) + throws IOException, URISyntaxException { + super(theUri, new RootedOzoneFileSystem(), conf, + OzoneConsts.OZONE_OFS_URI_SCHEME, false); + } +} From c68537af1942870a280690f3212f8a86c243d212 Mon Sep 17 00:00:00 2001 From: maobaolong <307499405@qq.com> Date: Tue, 25 Aug 2020 20:52:12 +0800 Subject: [PATCH 134/165] HDDS-4112. Improve SCM webui page performance (#1323) --- .../src/main/resources/webapps/scm/scm-overview.html | 4 ++-- hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js | 4 ---- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html index ebb117077d92..efed59d53e8b 100644 --- a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html +++ b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html @@ -38,7 +38,7 @@

Status

Node Manager: Safe mode status - {{$ctrl.scmmetrics.InSafeMode}} + {{$ctrl.overview.jmx.InSafeMode}} @@ -47,7 +47,7 @@

Safemode rules statuses

- + diff --git a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js index 2942a561dea3..b5acc19fa386 100644 --- a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js +++ b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js @@ -30,10 +30,6 @@ .then(function (result) { ctrl.nodemanagermetrics = result.data.beans[0]; }); - $http.get("jmx?qry=Hadoop:service=StorageContainerManager,name=StorageContainerManagerInfo,component=ServerRuntime") - .then(function (result) { - ctrl.scmmetrics = result.data.beans[0]; - }); var statusSortOrder = { "HEALTHY": "a", From ff0474783a08b70bb10098297f0f9caf6872c3b4 Mon Sep 17 00:00:00 2001 From: maobaolong <307499405@qq.com> Date: Tue, 25 Aug 2020 20:58:37 +0800 Subject: [PATCH 135/165] HDDS-3654. Let backgroundCreator create pipeline for the support replication factors alternately (#984) --- .../pipeline/BackgroundPipelineCreator.java | 33 ++++++++++++------- .../hdds/scm/node/TestDeadNodeHandler.java | 5 ++- 2 files changed, 26 insertions(+), 12 deletions(-) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreator.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreator.java index f7f1d52f9ef3..591acbc3b154 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreator.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreator.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdds.scm.pipeline; +import org.apache.commons.collections.iterators.LoopingIterator; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.ScmConfigKeys; @@ -26,6 +27,8 @@ import org.slf4j.LoggerFactory; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -109,13 +112,15 @@ private void createPipelines() { ScmConfigKeys.OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE, ScmConfigKeys.OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE_DEFAULT); + List list = + new ArrayList<>(); for (HddsProtos.ReplicationFactor factor : HddsProtos.ReplicationFactor .values()) { if (skipCreation(factor, type, autoCreateFactorOne)) { // Skip this iteration for creating pipeline continue; } - + list.add(factor); if (!pipelineManager.getSafeModeStatus()) { try { pipelineManager.scrubPipeline(type, factor); @@ -123,21 +128,27 @@ private void createPipelines() { LOG.error("Error while scrubbing pipelines {}", e); } } + } - while (true) { - try { - if (scheduler.isClosed()) { - break; - } - pipelineManager.createPipeline(type, factor); - } catch (IOException ioe) { - break; - } catch (Throwable t) { - LOG.error("Error while creating pipelines", t); + LoopingIterator it = new LoopingIterator(list); + while (it.hasNext()) { + HddsProtos.ReplicationFactor factor = + (HddsProtos.ReplicationFactor) it.next(); + + try { + if (scheduler.isClosed()) { break; } + pipelineManager.createPipeline(type, factor); + } catch (IOException ioe) { + it.remove(); + } catch (Throwable t) { + LOG.error("Error while creating pipelines", t); + it.remove(); } } + isPipelineCreatorRunning.set(false); + LOG.debug("BackgroundPipelineCreator createPipelines finished."); } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java index 6a6d3284465b..f05be767e717 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hdds.scm.node; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS; import java.io.File; @@ -162,7 +163,9 @@ public void testOnMessage() throws Exception { LambdaTestUtils.await(120000, 1000, () -> { pipelineManager.triggerPipelineCreation(); - return pipelineManager.getPipelines(RATIS, THREE).size() == 3; + System.out.println(pipelineManager.getPipelines(RATIS, THREE).size()); + System.out.println(pipelineManager.getPipelines(RATIS, ONE).size()); + return pipelineManager.getPipelines(RATIS, THREE).size() > 3; }); TestUtils.openAllRatisPipelines(pipelineManager); From 03269f624eabd7edaf61de04ba603a056a487c39 Mon Sep 17 00:00:00 2001 From: maobaolong <307499405@qq.com> Date: Tue, 25 Aug 2020 21:30:04 +0800 Subject: [PATCH 136/165] HDDS-4111. Keep the CSI.zh.md consistent with CSI.md (#1320) --- hadoop-hdds/docs/content/interface/CSI.zh.md | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/hadoop-hdds/docs/content/interface/CSI.zh.md b/hadoop-hdds/docs/content/interface/CSI.zh.md index 92df695360a0..23fea66ee9d6 100644 --- a/hadoop-hdds/docs/content/interface/CSI.zh.md +++ b/hadoop-hdds/docs/content/interface/CSI.zh.md @@ -21,10 +21,21 @@ summary: Ozone 支持 容器存储接口 (CSI) 协议。你可以通过 Ozone CS limitations under the License. --> -容器存储接口 `Container Storage Interface` (CSI) 使存储供应商(SP)能够一次性开发一个插件,并让它跨多个容器编排工作。 +容器存储接口 `Container Storage Interface` (CSI) 使存储供应商(SP)能够一次性开发一个插件,并让它跨多个容器编排工作, +就像 Kubernetes 或者 YARN。 获取更多 CSI 的信息,可以参考[SCI spec](https://github.com/container-storage-interface/spec/blob/master/spec.md) +CSI 定义了一个简单的,包含3个接口(Identity, Controller, Node)的 GRPC 接口,它定义了容器编排器如何请求创建新的存储空间或挂载新创建的存储, +但没有定义如何挂载存储。 + +![CSI](CSI.png) + +默认情况下,Ozone CSI 服务使用 S3 FUSE 驱动程序([goofys](https://github.com/kahing/goofys))挂载 Ozone 桶。 +其他挂载方式(如专用 NFS 服务或本机FUSE驱动程序)的实现正在进行中。 + + + Ozone CSI 是 CSI 的一种实现,它可以将 Ozone 用作容器的存储卷。 ## 入门 From ad4bf01a0ce97555d1ca94bc739cc4e20b382b8a Mon Sep 17 00:00:00 2001 From: Stephen O'Donnell Date: Wed, 26 Aug 2020 09:21:51 +0100 Subject: [PATCH 137/165] HDDS-4062. Non rack aware pipelines should not be created if multiple racks are alive. (#1291) --- .../scm/pipeline/PipelinePlacementPolicy.java | 35 ++++++++ .../hdds/scm/container/MockNodeManager.java | 13 +++ .../pipeline/TestPipelinePlacementPolicy.java | 81 +++++++++++++++++++ 3 files changed, 129 insertions(+) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java index 524b5ec8b216..84efdc2e1403 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java @@ -56,6 +56,11 @@ public final class PipelinePlacementPolicy extends SCMCommonPlacementPolicy { private final int heavyNodeCriteria; private static final int REQUIRED_RACKS = 2; + public static final String MULTIPLE_RACK_PIPELINE_MSG = + "The cluster has multiple racks, but all nodes with available " + + "pipeline capacity are on a single rack. There are insufficient " + + "cross rack nodes available to create a pipeline"; + /** * Constructs a pipeline placement with considering network topology, * load balancing and rack awareness. @@ -120,6 +125,7 @@ List filterViableNodes( // get nodes in HEALTHY state List healthyNodes = nodeManager.getNodes(HddsProtos.NodeState.HEALTHY); + boolean multipleRacks = multipleRacksAvailable(healthyNodes); if (excludedNodes != null) { healthyNodes.removeAll(excludedNodes); } @@ -163,9 +169,38 @@ List filterViableNodes( throw new SCMException(msg, SCMException.ResultCodes.FAILED_TO_FIND_SUITABLE_NODE); } + + if (!checkAllNodesAreEqual(nodeManager.getClusterNetworkTopologyMap())) { + boolean multipleRacksAfterFilter = multipleRacksAvailable(healthyList); + if (multipleRacks && !multipleRacksAfterFilter) { + LOG.debug(MULTIPLE_RACK_PIPELINE_MSG); + throw new SCMException(MULTIPLE_RACK_PIPELINE_MSG, + SCMException.ResultCodes.FAILED_TO_FIND_SUITABLE_NODE); + } + } return healthyList; } + /** + * Given a list of Datanodes, return false if the entire list is only on a + * single rack, or the list is empty. If there is more than 1 rack, return + * true. + * @param dns List of datanodes to check + * @return True if there are multiple racks, false otherwise + */ + private boolean multipleRacksAvailable(List dns) { + if (dns.size() <= 1) { + return false; + } + String initialRack = dns.get(0).getNetworkLocation(); + for (DatanodeDetails dn : dns) { + if (!dn.getNetworkLocation().equals(initialRack)) { + return true; + } + } + return false; + } + /** * Pipeline placement choose datanodes to join the pipeline. * diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java index 5b635a7bee94..4b8b37dee273 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java @@ -390,6 +390,19 @@ public void clearCommandQueue(UUID dnId) { } } + public void setNodeState(DatanodeDetails dn, HddsProtos.NodeState state) { + healthyNodes.remove(dn); + staleNodes.remove(dn); + deadNodes.remove(dn); + if (state == HEALTHY) { + healthyNodes.add(dn); + } else if (state == STALE) { + staleNodes.add(dn); + } else { + deadNodes.add(dn); + } + } + /** * Closes this stream and releases any system resources associated with it. If * the stream is already closed then invoking this method has no effect. diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java index 8d6a28cc2a0d..1274608c39c2 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java @@ -44,7 +44,9 @@ import org.junit.Assert; import org.junit.Before; +import org.junit.Rule; import org.junit.Test; +import org.junit.rules.ExpectedException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -439,6 +441,85 @@ public void testValidatePlacementPolicySingleRackInCluster() { assertEquals(0, status.misReplicationCount()); } + @Test + public void test3NodesInSameRackReturnedWhenOnlyOneHealthyRackIsPresent() + throws Exception { + List dns = setupSkewedRacks(); + + int nodesRequired = HddsProtos.ReplicationFactor.THREE.getNumber(); + // Set the only node on rack1 stale. This makes the cluster effectively a + // single rack. + nodeManager.setNodeState(dns.get(0), HddsProtos.NodeState.STALE); + + // As there is only 1 rack alive, the 3 DNs on /rack2 should be returned + List pickedDns = placementPolicy.chooseDatanodes( + new ArrayList<>(), new ArrayList<>(), nodesRequired, 0); + + assertEquals(3, pickedDns.size()); + assertTrue(pickedDns.contains(dns.get(1))); + assertTrue(pickedDns.contains(dns.get(2))); + assertTrue(pickedDns.contains(dns.get(3))); + } + + @Rule + public ExpectedException thrownExp = ExpectedException.none(); + + @Test + public void testExceptionIsThrownWhenRackAwarePipelineCanNotBeCreated() + throws Exception { + thrownExp.expect(SCMException.class); + thrownExp.expectMessage(PipelinePlacementPolicy.MULTIPLE_RACK_PIPELINE_MSG); + + List dns = setupSkewedRacks(); + + // Set the first node to its pipeline limit. This means there are only + // 3 hosts on a single rack available for new pipelines + insertHeavyNodesIntoNodeManager(dns, 1); + int nodesRequired = HddsProtos.ReplicationFactor.THREE.getNumber(); + + placementPolicy.chooseDatanodes( + new ArrayList<>(), new ArrayList<>(), nodesRequired, 0); + } + + @Test + public void testExceptionThrownRackAwarePipelineCanNotBeCreatedExcludedNode() + throws Exception { + thrownExp.expect(SCMException.class); + thrownExp.expectMessage(PipelinePlacementPolicy.MULTIPLE_RACK_PIPELINE_MSG); + + List dns = setupSkewedRacks(); + + // Set the first node to its pipeline limit. This means there are only + // 3 hosts on a single rack available for new pipelines + insertHeavyNodesIntoNodeManager(dns, 1); + int nodesRequired = HddsProtos.ReplicationFactor.THREE.getNumber(); + + List excluded = new ArrayList<>(); + excluded.add(dns.get(0)); + placementPolicy.chooseDatanodes( + excluded, new ArrayList<>(), nodesRequired, 0); + } + + private List setupSkewedRacks() { + cluster = initTopology(); + + List dns = new ArrayList<>(); + dns.add(MockDatanodeDetails + .createDatanodeDetails("host1", "/rack1")); + dns.add(MockDatanodeDetails + .createDatanodeDetails("host2", "/rack2")); + dns.add(MockDatanodeDetails + .createDatanodeDetails("host3", "/rack2")); + dns.add(MockDatanodeDetails + .createDatanodeDetails("host4", "/rack2")); + + nodeManager = new MockNodeManager(cluster, dns, + false, PIPELINE_PLACEMENT_MAX_NODES_COUNT); + placementPolicy = new PipelinePlacementPolicy( + nodeManager, stateManager, conf); + return dns; + } + private boolean checkDuplicateNodesUUID(List nodes) { HashSet uuids = nodes.stream(). map(DatanodeDetails::getUuid). From 861bb9e230b0b41a94c346cd250e035eb060105a Mon Sep 17 00:00:00 2001 From: Hanisha Koneru Date: Wed, 26 Aug 2020 15:18:17 -0700 Subject: [PATCH 138/165] HDDS-4068. Client should not retry same OM on network connection failure (#1324) --- .../ozone/om/ha/OMFailoverProxyProvider.java | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java index 3b2692de7ba9..1abe5abfdb1b 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java @@ -37,6 +37,8 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.io.retry.FailoverProxyProvider; import org.apache.hadoop.io.retry.RetryInvocationHandler; +import org.apache.hadoop.io.retry.RetryPolicies; +import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.net.NetUtils; @@ -172,9 +174,17 @@ private OzoneManagerProtocolPB createOMProxy(InetSocketAddress omAddress) LegacyHadoopConfigurationSource.asHadoopConfiguration(conf); RPC.setProtocolEngine(hadoopConf, OzoneManagerProtocolPB.class, ProtobufRpcEngine.class); - return RPC.getProxy(OzoneManagerProtocolPB.class, omVersion, omAddress, ugi, - hadoopConf, NetUtils.getDefaultSocketFactory(hadoopConf), - (int) OmUtils.getOMClientRpcTimeOut(conf)); + + // FailoverOnNetworkException ensures that the IPC layer does not attempt + // retries on the same OM in case of connection exception. This retry + // policy essentially results in TRY_ONCE_THEN_FAIL. + RetryPolicy connectionRetryPolicy = RetryPolicies + .failoverOnNetworkException(0); + + return RPC.getProtocolProxy(OzoneManagerProtocolPB.class, omVersion, + omAddress, ugi, hadoopConf, NetUtils.getDefaultSocketFactory( + hadoopConf), (int) OmUtils.getOMClientRpcTimeOut(conf), + connectionRetryPolicy).getProxy(); } From 2295cc2dfc4b40b040f10fdc41173b7622fd0922 Mon Sep 17 00:00:00 2001 From: Sadanand Shenoy Date: Thu, 27 Aug 2020 14:03:40 +0530 Subject: [PATCH 139/165] HDDS-3972. Add option to limit number of items displaying through ldb tool. (#1206) --- .../apache/hadoop/ozone/om/TestOmLDBCli.java | 120 +++++++++ .../apache/hadoop/ozone/om/TestOmSQLCli.java | 235 ------------------ .../apache/hadoop/ozone/debug/DBScanner.java | 62 +++-- .../apache/hadoop/ozone/debug/RDBParser.java | 4 + 4 files changed, 172 insertions(+), 249 deletions(-) create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmLDBCli.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmLDBCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmLDBCli.java new file mode 100644 index 000000000000..450eebb3a449 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmLDBCli.java @@ -0,0 +1,120 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.ozone.om; + + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.utils.db.DBStore; +import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.debug.DBScanner; +import org.apache.hadoop.ozone.debug.RDBParser; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.Assert; +import org.junit.rules.TemporaryFolder; + +import java.io.File; +import java.util.List; +import java.util.ArrayList; + + +/** + * This class tests the Debug LDB CLI that reads from an om.db file. + */ +public class TestOmLDBCli { + private OzoneConfiguration conf; + + private RDBParser rdbParser; + private DBScanner dbScanner; + private DBStore dbStore = null; + private static List keyNames; + + @Rule + public TemporaryFolder folder = new TemporaryFolder(); + + @Before + public void setup() throws Exception { + conf = new OzoneConfiguration(); + rdbParser = new RDBParser(); + dbScanner = new DBScanner(); + keyNames = new ArrayList<>(); + } + + @After + public void shutdown() throws Exception { + if (dbStore!=null){ + dbStore.close(); + } + } + + @Test + public void testOMDB() throws Exception { + File newFolder = folder.newFolder(); + if(!newFolder.exists()) { + Assert.assertTrue(newFolder.mkdirs()); + } + // Dummy om.db with only keyTable + dbStore = DBStoreBuilder.newBuilder(conf) + .setName("om.db") + .setPath(newFolder.toPath()) + .addTable("keyTable") + .build(); + // insert 5 keys + for (int i = 0; i<5; i++) { + OmKeyInfo value = TestOMRequestUtils.createOmKeyInfo("sampleVol", + "sampleBuck", "key" + (i+1), HddsProtos.ReplicationType.STAND_ALONE, + HddsProtos.ReplicationFactor.ONE); + String key = "key"+ (i); + Table keyTable = dbStore.getTable("keyTable"); + keyTable.put(key.getBytes(), value.getProtobuf().toByteArray()); + } + rdbParser.setDbPath(dbStore.getDbLocation().getAbsolutePath()); + dbScanner.setParent(rdbParser); + Assert.assertEquals(5, getKeyNames(dbScanner).size()); + Assert.assertTrue(getKeyNames(dbScanner).contains("key1")); + Assert.assertTrue(getKeyNames(dbScanner).contains("key5")); + Assert.assertFalse(getKeyNames(dbScanner).contains("key6")); + DBScanner.setLimit(1); + Assert.assertEquals(1, getKeyNames(dbScanner).size()); + DBScanner.setLimit(-1); + try { + getKeyNames(dbScanner); + Assert.fail("IllegalArgumentException is expected"); + }catch (IllegalArgumentException e){ + //ignore + } + } + + private static List getKeyNames(DBScanner dbScanner) + throws Exception { + keyNames.clear(); + dbScanner.setTableName("keyTable"); + dbScanner.call(); + Assert.assertFalse(dbScanner.getScannedObjects().isEmpty()); + for (Object o : dbScanner.getScannedObjects()){ + OmKeyInfo keyInfo = (OmKeyInfo)o; + keyNames.add(keyInfo.getKeyName()); + } + return keyNames; + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java deleted file mode 100644 index b1ce4ba81cda..000000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java +++ /dev/null @@ -1,235 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.om; - -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Paths; -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.UUID; - -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.TestDataUtil; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.scm.cli.SQLCLI; -import org.apache.hadoop.test.GenericTestUtils; - -import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME; -import org.junit.After; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import org.junit.Before; -import org.junit.Ignore; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; - -/** - * This class tests the CLI that transforms om.db into SQLite DB files. - */ -public class TestOmSQLCli { - - /** - * Set a timeout for each test. - */ - @Rule - public Timeout timeout = new Timeout(300000); - private MiniOzoneCluster cluster = null; - - private OzoneConfiguration conf; - private SQLCLI cli; - - private String userName = "userTest"; - private String adminName = "adminTest"; - private String volumeName0 = "volumeTest0"; - private String volumeName1 = "volumeTest1"; - private String bucketName0 = "bucketTest0"; - private String bucketName1 = "bucketTest1"; - private String bucketName2 = "bucketTest2"; - private String keyName0 = "key0"; - private String keyName1 = "key1"; - private String keyName2 = "key2"; - private String keyName3 = "key3"; - - /** - * Create a MiniDFSCluster for testing. - *

- * Ozone is made active by setting OZONE_ENABLED = true - * - * @throws IOException - */ - @Before - public void setup() throws Exception { - conf = new OzoneConfiguration(); - cluster = MiniOzoneCluster.newBuilder(conf).build(); - cluster.waitForClusterToBeReady(); - OzoneBucket bucket0 = - TestDataUtil.createVolumeAndBucket(cluster, volumeName0, bucketName0); - OzoneBucket bucket1 = - TestDataUtil.createVolumeAndBucket(cluster, volumeName1, bucketName1); - OzoneBucket bucket2 = - TestDataUtil.createVolumeAndBucket(cluster, volumeName0, bucketName2); - - TestDataUtil.createKey(bucket0, keyName0, ""); - TestDataUtil.createKey(bucket1, keyName1, ""); - TestDataUtil.createKey(bucket2, keyName2, ""); - TestDataUtil.createKey(bucket2, keyName3, ""); - - cluster.getOzoneManager().stop(); - cluster.getStorageContainerManager().stop(); - cli = new SQLCLI(conf); - } - - @After - public void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - // After HDDS-357, we have to fix SQLCli. - // TODO: fix SQLCli - @Ignore - @Test - public void testOmDB() throws Exception { - String dbOutPath = GenericTestUtils.getTempPath( - UUID.randomUUID() + "/out_sql.db"); - - String dbRootPath = conf.get(HddsConfigKeys.OZONE_METADATA_DIRS); - String dbPath = dbRootPath + "/" + OM_DB_NAME; - String[] args = {"-p", dbPath, "-o", dbOutPath}; - - cli.run(args); - - Connection conn = connectDB(dbOutPath); - String sql = "SELECT * FROM volumeList"; - ResultSet rs = executeQuery(conn, sql); - List expectedValues = - new ArrayList<>(Arrays.asList(volumeName0, volumeName1)); - while (rs.next()) { - String userNameRs = rs.getString("userName"); - String volumeNameRs = rs.getString("volumeName"); - assertEquals(userName, userNameRs.substring(1)); - assertTrue(expectedValues.remove(volumeNameRs)); - } - assertEquals(0, expectedValues.size()); - - sql = "SELECT * FROM volumeInfo"; - rs = executeQuery(conn, sql); - expectedValues = - new ArrayList<>(Arrays.asList(volumeName0, volumeName1)); - while (rs.next()) { - String adName = rs.getString("adminName"); - String ownerName = rs.getString("ownerName"); - String volumeName = rs.getString("volumeName"); - assertEquals(adminName, adName); - assertEquals(userName, ownerName); - assertTrue(expectedValues.remove(volumeName)); - } - assertEquals(0, expectedValues.size()); - - sql = "SELECT * FROM aclInfo"; - rs = executeQuery(conn, sql); - expectedValues = - new ArrayList<>(Arrays.asList(volumeName0, volumeName1)); - while (rs.next()) { - String adName = rs.getString("adminName"); - String ownerName = rs.getString("ownerName"); - String volumeName = rs.getString("volumeName"); - String type = rs.getString("type"); - String uName = rs.getString("userName"); - String rights = rs.getString("rights"); - assertEquals(adminName, adName); - assertEquals(userName, ownerName); - assertEquals("USER", type); - assertEquals(userName, uName); - assertEquals("READ_WRITE", rights); - assertTrue(expectedValues.remove(volumeName)); - } - assertEquals(0, expectedValues.size()); - - sql = "SELECT * FROM bucketInfo"; - rs = executeQuery(conn, sql); - HashMap expectedMap = new HashMap<>(); - expectedMap.put(bucketName0, volumeName0); - expectedMap.put(bucketName2, volumeName0); - expectedMap.put(bucketName1, volumeName1); - while (rs.next()) { - String volumeName = rs.getString("volumeName"); - String bucketName = rs.getString("bucketName"); - boolean versionEnabled = rs.getBoolean("versionEnabled"); - String storegeType = rs.getString("storageType"); - assertEquals(volumeName, expectedMap.remove(bucketName)); - assertFalse(versionEnabled); - assertEquals("DISK", storegeType); - } - assertEquals(0, expectedMap.size()); - - sql = "SELECT * FROM keyInfo"; - rs = executeQuery(conn, sql); - HashMap> expectedMap2 = new HashMap<>(); - // no data written, data size will be 0 - expectedMap2.put(keyName0, - Arrays.asList(volumeName0, bucketName0, "0")); - expectedMap2.put(keyName1, - Arrays.asList(volumeName1, bucketName1, "0")); - expectedMap2.put(keyName2, - Arrays.asList(volumeName0, bucketName2, "0")); - expectedMap2.put(keyName3, - Arrays.asList(volumeName0, bucketName2, "0")); - while (rs.next()) { - String volumeName = rs.getString("volumeName"); - String bucketName = rs.getString("bucketName"); - String keyName = rs.getString("keyName"); - int dataSize = rs.getInt("dataSize"); - List vals = expectedMap2.remove(keyName); - assertNotNull(vals); - assertEquals(vals.get(0), volumeName); - assertEquals(vals.get(1), bucketName); - assertEquals(vals.get(2), Integer.toString(dataSize)); - } - assertEquals(0, expectedMap2.size()); - - conn.close(); - Files.delete(Paths.get(dbOutPath)); - } - - private ResultSet executeQuery(Connection conn, String sql) - throws SQLException { - Statement stmt = conn.createStatement(); - return stmt.executeQuery(sql); - } - - private Connection connectDB(String dbPath) throws Exception { - Class.forName("org.sqlite.JDBC"); - String connectPath = - String.format("jdbc:sqlite:%s", dbPath); - return DriverManager.getConnection(connectPath); - } -} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java index 8eea23f02d54..b1139df9595e 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java @@ -61,25 +61,24 @@ public class DBScanner implements Callable, SubcommandWithParent { description = "List Key -> Value instead of just Value.", defaultValue = "false", showDefaultValue = CommandLine.Help.Visibility.ALWAYS) - private boolean withKey; + private static boolean withKey; + + @CommandLine.Option(names = {"--length", "-l"}, + description = "Maximum number of items to list") + private static int limit = 100; @CommandLine.ParentCommand private RDBParser parent; private HashMap columnFamilyMap; - private static void displayTable(RocksDB rocksDB, - DBColumnFamilyDefinition dbColumnFamilyDefinition, - List list, boolean withKey) throws IOException { - ColumnFamilyHandle columnFamilyHandle = getColumnFamilyHandle( - dbColumnFamilyDefinition.getTableName() - .getBytes(StandardCharsets.UTF_8), list); - if (columnFamilyHandle == null) { - throw new IllegalArgumentException("columnFamilyHandle is null"); - } - RocksIterator iterator = rocksDB.newIterator(columnFamilyHandle); + private List scannedObjects; + + private static List displayTable(RocksIterator iterator, + DBColumnFamilyDefinition dbColumnFamilyDefinition) throws IOException { + List outputs = new ArrayList<>(); iterator.seekToFirst(); - while (iterator.isValid()){ + while (iterator.isValid() && limit > 0){ StringBuilder result = new StringBuilder(); if (withKey) { Object key = dbColumnFamilyDefinition.getKeyCodec() @@ -90,11 +89,34 @@ private static void displayTable(RocksDB rocksDB, } Object o = dbColumnFamilyDefinition.getValueCodec() .fromPersistedFormat(iterator.value()); + outputs.add(o); Gson gson = new GsonBuilder().setPrettyPrinting().create(); result.append(gson.toJson(o)); System.out.println(result.toString()); + limit--; iterator.next(); } + return outputs; + } + + public void setTableName(String tableName) { + this.tableName = tableName; + } + + public RDBParser getParent() { + return parent; + } + + public void setParent(RDBParser parent) { + this.parent = parent; + } + + public static void setLimit(int limit) { + DBScanner.limit = limit; + } + + public List getScannedObjects() { + return scannedObjects; } private static ColumnFamilyHandle getColumnFamilyHandle( @@ -150,6 +172,10 @@ public Void call() throws Exception { private void printAppropriateTable( List columnFamilyHandleList, RocksDB rocksDB, String dbPath) throws IOException { + if (limit < 1) { + throw new IllegalArgumentException( + "List length should be a positive number"); + } dbPath = removeTrailingSlashIfNeeded(dbPath); this.constructColumnFamilyMap(DBDefinitionFactory. getDefinition(new File(dbPath).getName())); @@ -159,8 +185,15 @@ private void printAppropriateTable( } else { DBColumnFamilyDefinition columnFamilyDefinition = this.columnFamilyMap.get(tableName); - displayTable(rocksDB, columnFamilyDefinition, columnFamilyHandleList, - withKey); + ColumnFamilyHandle columnFamilyHandle = getColumnFamilyHandle( + columnFamilyDefinition.getTableName() + .getBytes(StandardCharsets.UTF_8), + columnFamilyHandleList); + if (columnFamilyHandle == null) { + throw new IllegalArgumentException("columnFamilyHandle is null"); + } + RocksIterator iterator = rocksDB.newIterator(columnFamilyHandle); + scannedObjects = displayTable(iterator, columnFamilyDefinition); } } else { System.out.println("Incorrect db Path"); @@ -179,3 +212,4 @@ public Class getParentType() { return RDBParser.class; } } + diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/RDBParser.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/RDBParser.java index e18baaab1838..f133386ab13f 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/RDBParser.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/RDBParser.java @@ -49,6 +49,10 @@ public String getDbPath() { return dbPath; } + public void setDbPath(String dbPath) { + this.dbPath = dbPath; + } + @Override public Class getParentType() { return OzoneDebug.class; From 1ba4854752933d3dee163fd303e5e1ee291a3b23 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Thu, 27 Aug 2020 10:43:39 +0200 Subject: [PATCH 140/165] HDDS-4056. Convert OzoneAdmin to pluggable model (#1285) --- .../apache/hadoop/hdds/cli/package-info.java | 4 +- hadoop-hdds/tools/pom.xml | 8 ++ .../apache/hadoop/hdds/cli/OzoneAdmin.java | 67 ++++++++++ .../package-info.java} | 19 +-- .../scm/cli/ReplicationManagerCommands.java | 23 ++-- .../ReplicationManagerStartSubcommand.java | 21 +-- .../ReplicationManagerStatusSubcommand.java | 32 ++--- .../cli/ReplicationManagerStopSubcommand.java | 25 ++-- .../hdds/scm/cli/SafeModeCheckSubcommand.java | 40 +++--- .../hadoop/hdds/scm/cli/SafeModeCommands.java | 27 ++-- .../hdds/scm/cli/SafeModeExitSubcommand.java | 22 +--- .../hdds/scm/cli/SafeModeWaitSubcommand.java | 13 +- .../apache/hadoop/hdds/scm/cli/ScmOption.java | 72 +++++++++++ .../hadoop/hdds/scm/cli/ScmSubcommand.java | 43 ++++++ .../hdds/scm/cli/TopologySubcommand.java | 65 +++++----- .../scm/cli/container/CloseSubcommand.java | 20 +-- .../scm/cli/container/ContainerCommands.java | 21 +-- .../scm/cli/container/CreateSubcommand.java | 26 ++-- .../scm/cli/container/DeleteSubcommand.java | 20 +-- .../scm/cli/container/InfoSubcommand.java | 40 +++--- .../scm/cli/container/ListSubcommand.java | 32 ++--- .../scm/cli/datanode/DatanodeCommands.java | 21 +-- .../scm/cli/datanode/ListInfoSubcommand.java | 48 +++---- .../pipeline/ActivatePipelineSubcommand.java | 19 +-- .../cli/pipeline/ClosePipelineSubcommand.java | 19 +-- .../pipeline/CreatePipelineSubcommand.java | 38 +++--- .../DeactivatePipelineSubcommand.java | 19 +-- .../cli/pipeline/ListPipelinesSubcommand.java | 40 +++--- .../scm/cli/pipeline/PipelineCommands.java | 22 ++-- .../src/main/smoketest/admincli/admin.robot | 32 +++++ .../main/smoketest/admincli/container.robot | 68 ++++++++++ .../main/smoketest/admincli/datanode.robot | 19 ++- .../main/smoketest/admincli/pipeline.robot | 49 +++++-- .../admincli/replicationmanager.robot | 53 ++++++++ .../main/smoketest/admincli/safemode.robot | 45 +++++++ hadoop-ozone/dist/src/shell/ozone/ozone | 2 +- .../ozone/shell/TestOzoneDatanodeShell.java | 2 +- hadoop-ozone/tools/pom.xml | 2 - .../apache/hadoop/ozone/admin/OzoneAdmin.java | 122 ------------------ .../apache/hadoop/ozone/admin/om/OMAdmin.java | 2 +- ...stGenerateOzoneRequiredConfigurations.java | 5 +- pom.xml | 8 +- 42 files changed, 731 insertions(+), 544 deletions(-) create mode 100644 hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/OzoneAdmin.java rename hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/{scm/cli/container/WithScmClient.java => cli/package-info.java} (71%) create mode 100644 hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ScmOption.java create mode 100644 hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ScmSubcommand.java create mode 100644 hadoop-ozone/dist/src/main/smoketest/admincli/admin.robot create mode 100644 hadoop-ozone/dist/src/main/smoketest/admincli/container.robot create mode 100644 hadoop-ozone/dist/src/main/smoketest/admincli/replicationmanager.robot create mode 100644 hadoop-ozone/dist/src/main/smoketest/admincli/safemode.robot delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/OzoneAdmin.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/package-info.java index 8dcc1d1a3c91..aabad6f14464 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/package-info.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/package-info.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,4 +19,4 @@ /** * Generic helper class to make instantiate picocli based cli tools. */ -package org.apache.hadoop.hdds.cli; \ No newline at end of file +package org.apache.hadoop.hdds.cli; diff --git a/hadoop-hdds/tools/pom.xml b/hadoop-hdds/tools/pom.xml index f362a0bfea61..fcc553fb4306 100644 --- a/hadoop-hdds/tools/pom.xml +++ b/hadoop-hdds/tools/pom.xml @@ -66,6 +66,14 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> commons-cli commons-cli + + log4j + log4j + + + org.kohsuke.metainf-services + metainf-services + org.xerial sqlite-jdbc diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/OzoneAdmin.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/OzoneAdmin.java new file mode 100644 index 000000000000..aca8a4cf8d79 --- /dev/null +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/OzoneAdmin.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.cli; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.util.NativeCodeLoader; + +import org.apache.log4j.ConsoleAppender; +import org.apache.log4j.Level; +import org.apache.log4j.LogManager; +import org.apache.log4j.Logger; +import org.apache.log4j.PatternLayout; +import picocli.CommandLine; + +/** + * Ozone Admin Command line tool. + */ +@CommandLine.Command(name = "ozone admin", + hidden = true, + description = "Developer tools for Ozone Admin operations", + versionProvider = HddsVersionProvider.class, + mixinStandardHelpOptions = true) +public class OzoneAdmin extends GenericCli { + + private OzoneConfiguration ozoneConf; + + public OzoneAdmin() { + super(OzoneAdmin.class); + } + + public OzoneConfiguration getOzoneConf() { + if (ozoneConf == null) { + ozoneConf = createOzoneConfiguration(); + } + return ozoneConf; + } + + /** + * Main for the Ozone Admin shell Command handling. + * + * @param argv - System Args Strings[] + */ + public static void main(String[] argv) { + LogManager.resetConfiguration(); + Logger.getRootLogger().setLevel(Level.INFO); + Logger.getRootLogger() + .addAppender(new ConsoleAppender(new PatternLayout("%m%n"))); + Logger.getLogger(NativeCodeLoader.class).setLevel(Level.ERROR); + + new OzoneAdmin().run(argv); + } +} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/WithScmClient.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/package-info.java similarity index 71% rename from hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/WithScmClient.java rename to hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/package-info.java index 9852d50fc8c9..82fbd722932e 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/WithScmClient.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/package-info.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,24 +6,17 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

+ * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdds.scm.cli.container; - -import org.apache.hadoop.hdds.scm.client.ScmClient; /** - * Command which provides a SCM client based on the current config. + * Command-line tools for HDDS. */ -public interface WithScmClient { - - ScmClient createScmClient(); - -} +package org.apache.hadoop.hdds.cli; diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerCommands.java index fcb9ad6b2f6a..cd5aba3a82e2 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerCommands.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,11 +21,12 @@ import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.scm.cli.container.WithScmClient; +import org.apache.hadoop.hdds.cli.OzoneAdmin; +import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; import picocli.CommandLine.Model.CommandSpec; -import picocli.CommandLine.ParentCommand; import picocli.CommandLine.Spec; /** @@ -41,21 +42,21 @@ ReplicationManagerStopSubcommand.class, ReplicationManagerStatusSubcommand.class }) -public class ReplicationManagerCommands implements Callable { +@MetaInfServices(SubcommandWithParent.class) +public class ReplicationManagerCommands implements Callable, + SubcommandWithParent { @Spec private CommandSpec spec; - @ParentCommand - private WithScmClient parent; - - public WithScmClient getParent() { - return parent; - } - @Override public Void call() throws Exception { GenericCli.missingSubcommand(spec); return null; } + + @Override + public Class getParentType() { + return OzoneAdmin.class; + } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStartSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStartSubcommand.java index 1adec6b0c4b4..ff82b82ec87a 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStartSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStartSubcommand.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,32 +22,25 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; -import picocli.CommandLine.ParentCommand; -import java.util.concurrent.Callable; +import java.io.IOException; /** - * This is the handler that process safe mode check command. + * Handler to start replication manager. */ @Command( name = "start", description = "Start ReplicationManager", mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class ReplicationManagerStartSubcommand implements Callable { +public class ReplicationManagerStartSubcommand extends ScmSubcommand { private static final Logger LOG = LoggerFactory.getLogger(ReplicationManagerStartSubcommand.class); - @ParentCommand - private ReplicationManagerCommands parent; - @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.getParent().createScmClient()) { - scmClient.startReplicationManager(); - LOG.info("Starting ReplicationManager..."); - return null; - } + public void execute(ScmClient scmClient) throws IOException { + scmClient.startReplicationManager(); + LOG.info("Starting ReplicationManager..."); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStatusSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStatusSubcommand.java index 2ebf28c80741..c6800befd8cd 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStatusSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStatusSubcommand.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,39 +22,31 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; -import picocli.CommandLine.ParentCommand; -import java.util.concurrent.Callable; +import java.io.IOException; /** - * This is the handler that process safe mode check command. + * Handler to query status of replication manager. */ @Command( name = "status", description = "Check if ReplicationManager is running or not", mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class ReplicationManagerStatusSubcommand implements Callable { +public class ReplicationManagerStatusSubcommand extends ScmSubcommand { private static final Logger LOG = LoggerFactory.getLogger(ReplicationManagerStatusSubcommand.class); - @ParentCommand - private ReplicationManagerCommands parent; - @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.getParent().createScmClient()) { - - boolean execReturn = scmClient.getReplicationManagerStatus(); - - // Output data list - if(execReturn){ - LOG.info("ReplicationManager is Running."); - } else { - LOG.info("ReplicationManager is Not Running."); - } - return null; + public void execute(ScmClient scmClient) throws IOException { + boolean execReturn = scmClient.getReplicationManagerStatus(); + + // Output data list + if(execReturn){ + LOG.info("ReplicationManager is Running."); + } else { + LOG.info("ReplicationManager is Not Running."); } } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStopSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStopSubcommand.java index 7cafd01b12d6..7d3063a7636c 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStopSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStopSubcommand.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,34 +22,27 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; -import picocli.CommandLine.ParentCommand; -import java.util.concurrent.Callable; +import java.io.IOException; /** - * This is the handler that process safe mode check command. + * Handler to stop replication manager. */ @Command( name = "stop", description = "Stop ReplicationManager", mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class ReplicationManagerStopSubcommand implements Callable { +public class ReplicationManagerStopSubcommand extends ScmSubcommand { private static final Logger LOG = LoggerFactory.getLogger(ReplicationManagerStopSubcommand.class); - @ParentCommand - private ReplicationManagerCommands parent; - @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.getParent().createScmClient()) { - scmClient.stopReplicationManager(); - LOG.info("Stopping ReplicationManager..."); - LOG.info("Requested SCM to stop ReplicationManager, " + - "it might take sometime for the ReplicationManager to stop."); - return null; - } + public void execute(ScmClient scmClient) throws IOException { + scmClient.stopReplicationManager(); + LOG.info("Stopping ReplicationManager..."); + LOG.info("Requested SCM to stop ReplicationManager, " + + "it might take sometime for the ReplicationManager to stop."); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCheckSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCheckSubcommand.java index b2cfea3daaaa..ba359af1c59b 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCheckSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCheckSubcommand.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdds.scm.cli; +import java.io.IOException; import java.util.Map; -import java.util.concurrent.Callable; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.cli.HddsVersionProvider; @@ -28,7 +28,6 @@ import org.slf4j.LoggerFactory; import picocli.CommandLine; import picocli.CommandLine.Command; -import picocli.CommandLine.ParentCommand; /** * This is the handler that process safe mode check command. @@ -38,39 +37,32 @@ description = "Check if SCM is in safe mode", mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class SafeModeCheckSubcommand implements Callable { +public class SafeModeCheckSubcommand extends ScmSubcommand { private static final Logger LOG = LoggerFactory.getLogger(SafeModeCheckSubcommand.class); - @ParentCommand - private SafeModeCommands parent; - @CommandLine.Option(names = {"--verbose"}, description = "Show detailed status of rules.") private boolean verbose; @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.getParent().createScmClient()) { - - boolean execReturn = scmClient.inSafeMode(); + public void execute(ScmClient scmClient) throws IOException { + boolean execReturn = scmClient.inSafeMode(); - // Output data list - if(execReturn){ - LOG.info("SCM is in safe mode."); - if (verbose) { - for (Map.Entry> entry : - scmClient.getSafeModeRuleStatuses().entrySet()) { - Pair value = entry.getValue(); - LOG.info("validated:{}, {}, {}", - value.getLeft(), entry.getKey(), value.getRight()); - } + // Output data list + if(execReturn){ + LOG.info("SCM is in safe mode."); + if (verbose) { + for (Map.Entry> entry : + scmClient.getSafeModeRuleStatuses().entrySet()) { + Pair value = entry.getValue(); + LOG.info("validated:{}, {}, {}", + value.getLeft(), entry.getKey(), value.getRight()); } - } else { - LOG.info("SCM is out of safe mode."); } - return null; + } else { + LOG.info("SCM is out of safe mode."); } } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCommands.java index 017e1ba3c2c7..6ba7cf295470 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCommands.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,13 +21,12 @@ import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.scm.cli.container.WithScmClient; +import org.apache.hadoop.hdds.cli.OzoneAdmin; +import org.apache.hadoop.hdds.cli.SubcommandWithParent; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; import picocli.CommandLine.Model.CommandSpec; -import picocli.CommandLine.ParentCommand; import picocli.CommandLine.Spec; /** @@ -43,24 +42,20 @@ SafeModeExitSubcommand.class, SafeModeWaitSubcommand.class }) -public class SafeModeCommands implements Callable { - - private static final Logger LOG = - LoggerFactory.getLogger(SafeModeCommands.class); +@MetaInfServices(SubcommandWithParent.class) +public class SafeModeCommands implements Callable, SubcommandWithParent { @Spec private CommandSpec spec; - @ParentCommand - private WithScmClient parent; - - public WithScmClient getParent() { - return parent; - } - @Override public Void call() throws Exception { GenericCli.missingSubcommand(spec); return null; } + + @Override + public Class getParentType() { + return OzoneAdmin.class; + } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeExitSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeExitSubcommand.java index 9f1db45bb4e2..12490c5c2c51 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeExitSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeExitSubcommand.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdds.scm.cli; -import java.util.concurrent.Callable; +import java.io.IOException; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.scm.client.ScmClient; @@ -25,7 +25,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; -import picocli.CommandLine.ParentCommand; /** * This is the handler that process safe mode exit command. @@ -35,23 +34,16 @@ description = "Force SCM out of safe mode", mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class SafeModeExitSubcommand implements Callable { +public class SafeModeExitSubcommand extends ScmSubcommand { private static final Logger LOG = LoggerFactory.getLogger(SafeModeExitSubcommand.class); - @ParentCommand - private SafeModeCommands parent; - @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.getParent().createScmClient()) { - - boolean execReturn = scmClient.forceExitSafeMode(); - if(execReturn){ - LOG.info("SCM exit safe mode successfully."); - } - return null; + public void execute(ScmClient scmClient) throws IOException { + boolean execReturn = scmClient.forceExitSafeMode(); + if(execReturn){ + LOG.info("SCM exit safe mode successfully."); } } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeWaitSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeWaitSubcommand.java index 7668a47d24d2..e3fb5c1e718e 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeWaitSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeWaitSubcommand.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,7 +27,7 @@ import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; import picocli.CommandLine.Option; -import picocli.CommandLine.ParentCommand; +import picocli.CommandLine.Mixin; /** * This is the handler that process safe mode wait command. @@ -45,21 +45,20 @@ public class SafeModeWaitSubcommand implements Callable { @Option(description = "Define timeout (in second) to wait until (exit code 1) " + "or until safemode is ended (exit code 0).", defaultValue = "30", - required = false, names = { - "-t", "--timeout"}) + names = { "-t", "--timeout"}) private long timeoutSeconds; private long startTestTime; - @ParentCommand - private SafeModeCommands parent; + @Mixin + private ScmOption scmOption; @Override public Void call() throws Exception { startTestTime = System.currentTimeMillis(); while (getRemainingTimeInSec() > 0) { - try (ScmClient scmClient = parent.getParent().createScmClient()) { + try (ScmClient scmClient = scmOption.createScmClient()) { while (getRemainingTimeInSec() > 0) { boolean isSafeModeActive = scmClient.inSafeMode(); diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ScmOption.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ScmOption.java new file mode 100644 index 000000000000..5b8b81436c78 --- /dev/null +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ScmOption.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm.cli; + +import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.hdds.HddsUtils; +import org.apache.hadoop.hdds.cli.GenericParentCommand; +import org.apache.hadoop.hdds.conf.MutableConfigurationSource; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.client.ScmClient; +import picocli.CommandLine; + +import java.io.IOException; + +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY; +import static picocli.CommandLine.Spec.Target.MIXEE; + +/** + * Defines command-line option for SCM address. + */ +public class ScmOption { + + @CommandLine.Spec(MIXEE) + private CommandLine.Model.CommandSpec spec; + + @CommandLine.Option(names = {"--scm"}, + description = "The destination scm (host:port)") + private String scm; + + public ScmClient createScmClient() { + try { + GenericParentCommand parent = (GenericParentCommand) + spec.root().userObject(); + OzoneConfiguration conf = parent.createOzoneConfiguration(); + checkAndSetSCMAddressArg(conf); + + return new ContainerOperationClient(conf); + } catch (IOException ex) { + throw new IllegalArgumentException("Can't create SCM client", ex); + } + } + + private void checkAndSetSCMAddressArg(MutableConfigurationSource conf) { + if (StringUtils.isNotEmpty(scm)) { + conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, scm); + } + if (!HddsUtils.getHostNameFromConfigKeys(conf, + ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY).isPresent()) { + + throw new IllegalArgumentException( + ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY + + " should be set in ozone-site.xml or with the --scm option"); + } + } + +} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ScmSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ScmSubcommand.java new file mode 100644 index 000000000000..6dc09c2cbecd --- /dev/null +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ScmSubcommand.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm.cli; + +import org.apache.hadoop.hdds.scm.client.ScmClient; +import picocli.CommandLine; + +import java.io.IOException; +import java.util.concurrent.Callable; + +/** + * Base class for admin commands that connect via SCM client. + */ +public abstract class ScmSubcommand implements Callable { + + @CommandLine.Mixin + private ScmOption scmOption; + + protected abstract void execute(ScmClient client) throws IOException; + + @Override + public final Void call() throws Exception { + try (ScmClient scmClient = scmOption.createScmClient()) { + execute(scmClient); + return null; + } + } +} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/TopologySubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/TopologySubcommand.java index 214da34561b5..c1aebaeec22f 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/TopologySubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/TopologySubcommand.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,18 +18,19 @@ package org.apache.hadoop.hdds.scm.cli; +import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.TreeSet; -import java.util.concurrent.Callable; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.cli.OzoneAdmin; +import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.cli.container.WithScmClient; import org.apache.hadoop.hdds.scm.client.ScmClient; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DEAD; @@ -37,9 +38,9 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DECOMMISSIONING; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE; + +import org.kohsuke.MetaInfServices; import picocli.CommandLine; -import picocli.CommandLine.Model.CommandSpec; -import picocli.CommandLine.Spec; /** * Handler of printTopology command. @@ -49,22 +50,18 @@ description = "Print a tree of the network topology as reported by SCM", mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class TopologySubcommand implements Callable { - - @Spec - private CommandSpec spec; - - @CommandLine.ParentCommand - private WithScmClient parent; +@MetaInfServices(SubcommandWithParent.class) +public class TopologySubcommand extends ScmSubcommand + implements SubcommandWithParent { - private static List stateArray = new ArrayList<>(); + private static final List STATES = new ArrayList<>(); static { - stateArray.add(HEALTHY); - stateArray.add(STALE); - stateArray.add(DEAD); - stateArray.add(DECOMMISSIONING); - stateArray.add(DECOMMISSIONED); + STATES.add(HEALTHY); + STATES.add(STALE); + STATES.add(DEAD); + STATES.add(DECOMMISSIONING); + STATES.add(DECOMMISSIONED); } @CommandLine.Option(names = {"-o", "--order"}, @@ -76,22 +73,24 @@ public class TopologySubcommand implements Callable { private boolean fullInfo; @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.createScmClient()) { - for (HddsProtos.NodeState state : stateArray) { - List nodes = scmClient.queryNode(state, - HddsProtos.QueryScope.CLUSTER, ""); - if (nodes != null && nodes.size() > 0) { - // show node state - System.out.println("State = " + state.toString()); - if (order) { - printOrderedByLocation(nodes); - } else { - printNodesWithLocation(nodes); - } + public Class getParentType() { + return OzoneAdmin.class; + } + + @Override + protected void execute(ScmClient scmClient) throws IOException { + for (HddsProtos.NodeState state : STATES) { + List nodes = scmClient.queryNode(state, + HddsProtos.QueryScope.CLUSTER, ""); + if (nodes != null && !nodes.isEmpty()) { + // show node state + System.out.println("State = " + state.toString()); + if (order) { + printOrderedByLocation(nodes); + } else { + printNodesWithLocation(nodes); } } - return null; } } @@ -124,7 +123,7 @@ private String formatPortOutput(List ports) { StringBuilder sb = new StringBuilder(); for (int i = 0; i < ports.size(); i++) { HddsProtos.Port port = ports.get(i); - sb.append(port.getName() + "=" + port.getValue()); + sb.append(port.getName()).append("=").append(port.getValue()); if (i < ports.size() - 1) { sb.append(","); } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseSubcommand.java index cd81d32b8a82..53cbd2f63da3 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseSubcommand.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,15 +17,15 @@ */ package org.apache.hadoop.hdds.scm.cli.container; -import java.util.concurrent.Callable; +import java.io.IOException; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; import org.apache.hadoop.hdds.scm.client.ScmClient; import static org.apache.hadoop.hdds.scm.cli.container.ContainerCommands.checkContainerExists; import picocli.CommandLine.Command; import picocli.CommandLine.Parameters; -import picocli.CommandLine.ParentCommand; /** * The handler of close container command. @@ -35,21 +35,15 @@ description = "close container", mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class CloseSubcommand implements Callable { - - @ParentCommand - private ContainerCommands parent; +public class CloseSubcommand extends ScmSubcommand { @Parameters(description = "Id of the container to close") private long containerId; @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.getParent().createScmClient()) { - checkContainerExists(scmClient, containerId); - scmClient.closeContainer(containerId); - return null; - } + public void execute(ScmClient scmClient) throws IOException { + checkContainerExists(scmClient, containerId); + scmClient.closeContainer(containerId); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java index cf665b008f72..de1015d141e7 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,12 +22,14 @@ import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.cli.OzoneAdmin; +import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.scm.client.ScmClient; import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; import picocli.CommandLine.Model.CommandSpec; -import picocli.CommandLine.ParentCommand; import picocli.CommandLine.Spec; /** @@ -45,24 +47,23 @@ CreateSubcommand.class, CloseSubcommand.class }) -public class ContainerCommands implements Callable { +@MetaInfServices(SubcommandWithParent.class) +public class ContainerCommands implements Callable, SubcommandWithParent { @Spec private CommandSpec spec; - @ParentCommand - private WithScmClient parent; - - public WithScmClient getParent() { - return parent; - } - @Override public Void call() throws Exception { GenericCli.missingSubcommand(spec); return null; } + @Override + public Class getParentType() { + return OzoneAdmin.class; + } + public static void checkContainerExists(ScmClient scmClient, long containerId) throws IOException { ContainerInfo container = scmClient.getContainer(containerId); diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateSubcommand.java index eb79e50506e2..9eedbf858958 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateSubcommand.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,9 +17,10 @@ */ package org.apache.hadoop.hdds.scm.cli.container; -import java.util.concurrent.Callable; +import java.io.IOException; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; import org.apache.hadoop.hdds.scm.client.ScmClient; import org.apache.hadoop.hdds.scm.container.common.helpers .ContainerWithPipeline; @@ -28,7 +29,6 @@ import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; import picocli.CommandLine.Option; -import picocli.CommandLine.ParentCommand; /** * This is the handler that process container creation command. @@ -38,27 +38,19 @@ description = "Create container", mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class CreateSubcommand implements Callable { +public class CreateSubcommand extends ScmSubcommand { private static final Logger LOG = LoggerFactory.getLogger(CreateSubcommand.class); - @ParentCommand - private ContainerCommands parent; - @Option(description = "Owner of the new container", defaultValue = "OZONE", - required = false, names = { - "-o", "--owner"}) - + names = { "-o", "--owner"}) private String owner; @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.getParent().createScmClient()) { - ContainerWithPipeline container = scmClient.createContainer(owner); - LOG.info("Container {} is created.", - container.getContainerInfo().getContainerID()); - return null; - } + public void execute(ScmClient scmClient) throws IOException { + ContainerWithPipeline container = scmClient.createContainer(owner); + LOG.info("Container {} is created.", + container.getContainerInfo().getContainerID()); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/DeleteSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/DeleteSubcommand.java index a438fe906131..62d1b8ab2ae3 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/DeleteSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/DeleteSubcommand.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,16 +18,16 @@ package org.apache.hadoop.hdds.scm.cli.container; -import java.util.concurrent.Callable; +import java.io.IOException; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; import org.apache.hadoop.hdds.scm.client.ScmClient; import static org.apache.hadoop.hdds.scm.cli.container.ContainerCommands.checkContainerExists; import picocli.CommandLine.Command; import picocli.CommandLine.Option; import picocli.CommandLine.Parameters; -import picocli.CommandLine.ParentCommand; /** * This is the handler that process delete container command. @@ -37,7 +37,7 @@ description = "Delete container", mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class DeleteSubcommand implements Callable { +public class DeleteSubcommand extends ScmSubcommand { @Parameters(description = "Id of the container to close") private long containerId; @@ -46,15 +46,9 @@ public class DeleteSubcommand implements Callable { "--force"}, description = "forcibly delete the container") private boolean force; - @ParentCommand - private ContainerCommands parent; - @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.getParent().createScmClient()) { - checkContainerExists(scmClient, containerId); - scmClient.deleteContainer(containerId, force); - return null; - } + public void execute(ScmClient scmClient) throws IOException { + checkContainerExists(scmClient, containerId); + scmClient.deleteContainer(containerId, force); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java index 31e2a45dfc58..5defc2456a42 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,11 +17,12 @@ */ package org.apache.hadoop.hdds.scm.cli.container; -import java.util.concurrent.Callable; +import java.io.IOException; import java.util.stream.Collectors; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; import org.apache.hadoop.hdds.scm.client.ScmClient; import org.apache.hadoop.hdds.scm.container.common.helpers .ContainerWithPipeline; @@ -31,7 +32,6 @@ import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; import picocli.CommandLine.Parameters; -import picocli.CommandLine.ParentCommand; /** * This is the handler that process container info command. @@ -41,36 +41,30 @@ description = "Show information about a specific container", mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class InfoSubcommand implements Callable { +public class InfoSubcommand extends ScmSubcommand { private static final Logger LOG = LoggerFactory.getLogger(InfoSubcommand.class); - @ParentCommand - private ContainerCommands parent; - @Parameters(description = "Decimal id of the container.") private long containerID; @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.getParent().createScmClient()) { - final ContainerWithPipeline container = scmClient. - getContainerWithPipeline(containerID); - Preconditions.checkNotNull(container, "Container cannot be null"); + public void execute(ScmClient scmClient) throws IOException { + final ContainerWithPipeline container = scmClient. + getContainerWithPipeline(containerID); + Preconditions.checkNotNull(container, "Container cannot be null"); - // Print container report info. - LOG.info("Container id: {}", containerID); - LOG.info("Pipeline id: {}", container.getPipeline().getId().getId()); - LOG.info("Container State: {}", container.getContainerInfo().getState()); + // Print container report info. + LOG.info("Container id: {}", containerID); + LOG.info("Pipeline id: {}", container.getPipeline().getId().getId()); + LOG.info("Container State: {}", container.getContainerInfo().getState()); - // Print pipeline of an existing container. - String machinesStr = container.getPipeline().getNodes().stream().map( - InfoSubcommand::buildDatanodeDetails) - .collect(Collectors.joining(",\n")); - LOG.info("Datanodes: [{}]", machinesStr); - return null; - } + // Print pipeline of an existing container. + String machinesStr = container.getPipeline().getNodes().stream().map( + InfoSubcommand::buildDatanodeDetails) + .collect(Collectors.joining(",\n")); + LOG.info("Datanodes: [{}]", machinesStr); } private static String buildDatanodeDetails(DatanodeDetails details) { diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java index 3ffc118b57be..e9b0b7dc9a50 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,9 +19,9 @@ import java.io.IOException; import java.util.List; -import java.util.concurrent.Callable; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; import org.apache.hadoop.hdds.scm.client.ScmClient; import org.apache.hadoop.hdds.scm.container.ContainerInfo; @@ -36,7 +36,6 @@ import picocli.CommandLine.Command; import picocli.CommandLine.Help.Visibility; import picocli.CommandLine.Option; -import picocli.CommandLine.ParentCommand; /** * This is the handler that process container list command. @@ -46,22 +45,19 @@ description = "List containers", mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class ListSubcommand implements Callable { +public class ListSubcommand extends ScmSubcommand { private static final Logger LOG = LoggerFactory.getLogger(ListSubcommand.class); - @ParentCommand - private ContainerCommands parent; - @Option(names = {"-s", "--start"}, - description = "Container id to start the iteration", required = false) - private long startId = 0; + description = "Container id to start the iteration") + private long startId; @Option(names = {"-c", "--count"}, description = "Maximum number of containers to list", defaultValue = "20", showDefaultValue = Visibility.ALWAYS) - private int count = 20; + private int count; private static final ObjectWriter WRITER; @@ -83,17 +79,13 @@ private void outputContainerInfo(ContainerInfo containerInfo) } @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.getParent().createScmClient()) { - - List containerList = - scmClient.listContainer(startId, count); + public void execute(ScmClient scmClient) throws IOException { + List containerList = + scmClient.listContainer(startId, count); - // Output data list - for (ContainerInfo container : containerList) { - outputContainerInfo(container); - } - return null; + // Output data list + for (ContainerInfo container : containerList) { + outputContainerInfo(container); } } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DatanodeCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DatanodeCommands.java index b7ba59c77604..7e77c60f6e1e 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DatanodeCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DatanodeCommands.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,8 +21,10 @@ import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.scm.cli.container.WithScmClient; +import org.apache.hadoop.hdds.cli.OzoneAdmin; +import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.kohsuke.MetaInfServices; import picocli.CommandLine; import picocli.CommandLine.Model.CommandSpec; import picocli.CommandLine.Spec; @@ -38,21 +40,20 @@ subcommands = { ListInfoSubcommand.class }) -public class DatanodeCommands implements Callable { +@MetaInfServices(SubcommandWithParent.class) +public class DatanodeCommands implements Callable, SubcommandWithParent { @Spec private CommandSpec spec; - @CommandLine.ParentCommand - private WithScmClient parent; - - public WithScmClient getParent() { - return parent; - } - @Override public Void call() throws Exception { GenericCli.missingSubcommand(spec); return null; } + + @Override + public Class getParentType() { + return OzoneAdmin.class; + } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java index e4060b3dadaf..80c5ecaae820 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,13 +21,13 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; import org.apache.hadoop.hdds.scm.client.ScmClient; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import picocli.CommandLine; import java.io.IOException; import java.util.List; -import java.util.concurrent.Callable; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -39,44 +39,36 @@ description = "List info of datanodes", mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class ListInfoSubcommand implements Callable { - - @CommandLine.ParentCommand - private DatanodeCommands parent; +public class ListInfoSubcommand extends ScmSubcommand { @CommandLine.Option(names = {"--ip"}, description = "Show info by ip address.", - defaultValue = "", - required = false) + defaultValue = "") private String ipaddress; @CommandLine.Option(names = {"--id"}, description = "Show info by datanode UUID.", - defaultValue = "", - required = false) + defaultValue = "") private String uuid; private List pipelines; @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.getParent().createScmClient()) { - pipelines = scmClient.listPipelines(); - if (Strings.isNullOrEmpty(ipaddress) && Strings.isNullOrEmpty(uuid)) { - getAllNodes(scmClient).stream().forEach(p -> printDatanodeInfo(p)); - } else { - Stream allNodes = getAllNodes(scmClient).stream(); - if (!Strings.isNullOrEmpty(ipaddress)) { - allNodes = allNodes.filter(p -> p.getIpAddress() - .compareToIgnoreCase(ipaddress) == 0); - } - if (!Strings.isNullOrEmpty(uuid)) { - allNodes = allNodes.filter(p -> p.getUuid().toString().equals(uuid)); - } - allNodes.forEach(p -> printDatanodeInfo(p)); + public void execute(ScmClient scmClient) throws IOException { + pipelines = scmClient.listPipelines(); + if (Strings.isNullOrEmpty(ipaddress) && Strings.isNullOrEmpty(uuid)) { + getAllNodes(scmClient).forEach(this::printDatanodeInfo); + } else { + Stream allNodes = getAllNodes(scmClient).stream(); + if (!Strings.isNullOrEmpty(ipaddress)) { + allNodes = allNodes.filter(p -> p.getIpAddress() + .compareToIgnoreCase(ipaddress) == 0); + } + if (!Strings.isNullOrEmpty(uuid)) { + allNodes = allNodes.filter(p -> p.getUuid().toString().equals(uuid)); } - return null; + allNodes.forEach(this::printDatanodeInfo); } } @@ -101,7 +93,7 @@ private void printDatanodeInfo(DatanodeDetails datanode) { " or the node is not in Healthy state."); } else { relatedPipelineNum = relatedPipelines.size(); - relatedPipelines.stream().forEach( + relatedPipelines.forEach( p -> pipelineListInfo.append(p.getId().getId().toString()) .append("/").append(p.getFactor().toString()).append("/") .append(p.getType().toString()).append("/") @@ -118,4 +110,4 @@ private void printDatanodeInfo(DatanodeDetails datanode) { + "/" + datanode.getHostName() + "/" + relatedPipelineNum + " pipelines) \n" + "Related pipelines: \n" + pipelineListInfo); } -} \ No newline at end of file +} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ActivatePipelineSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ActivatePipelineSubcommand.java index ec4b1b789e8c..a61655dc66b0 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ActivatePipelineSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ActivatePipelineSubcommand.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,10 +20,11 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; import org.apache.hadoop.hdds.scm.client.ScmClient; import picocli.CommandLine; -import java.util.concurrent.Callable; +import java.io.IOException; /** * Handler of activate pipeline command. @@ -33,20 +34,14 @@ description = "Activates the given Pipeline", mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class ActivatePipelineSubcommand implements Callable { - - @CommandLine.ParentCommand - private PipelineCommands parent; +public class ActivatePipelineSubcommand extends ScmSubcommand { @CommandLine.Parameters(description = "ID of the pipeline to activate") private String pipelineId; @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.getParent().createScmClient()) { - scmClient.activatePipeline( - HddsProtos.PipelineID.newBuilder().setId(pipelineId).build()); - return null; - } + public void execute(ScmClient scmClient) throws IOException { + scmClient.activatePipeline( + HddsProtos.PipelineID.newBuilder().setId(pipelineId).build()); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ClosePipelineSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ClosePipelineSubcommand.java index 89a280e805c0..78b83e56db07 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ClosePipelineSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ClosePipelineSubcommand.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,10 +20,11 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; import org.apache.hadoop.hdds.scm.client.ScmClient; import picocli.CommandLine; -import java.util.concurrent.Callable; +import java.io.IOException; /** * Handler of close pipeline command. @@ -33,20 +34,14 @@ description = "Close pipeline", mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class ClosePipelineSubcommand implements Callable { - - @CommandLine.ParentCommand - private PipelineCommands parent; +public class ClosePipelineSubcommand extends ScmSubcommand { @CommandLine.Parameters(description = "ID of the pipeline to close") private String pipelineId; @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.getParent().createScmClient()) { - scmClient.closePipeline( - HddsProtos.PipelineID.newBuilder().setId(pipelineId).build()); - return null; - } + public void execute(ScmClient scmClient) throws IOException { + scmClient.closePipeline( + HddsProtos.PipelineID.newBuilder().setId(pipelineId).build()); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/CreatePipelineSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/CreatePipelineSubcommand.java index e0bdddb7797e..c784be88b376 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/CreatePipelineSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/CreatePipelineSubcommand.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,11 +20,12 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; import org.apache.hadoop.hdds.scm.client.ScmClient; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import picocli.CommandLine; -import java.util.concurrent.Callable; +import java.io.IOException; /** * Handler of createPipeline command. @@ -34,44 +35,37 @@ description = "create pipeline", mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class CreatePipelineSubcommand implements Callable { - @CommandLine.ParentCommand - private PipelineCommands parent; +public class CreatePipelineSubcommand extends ScmSubcommand { @CommandLine.Option( names = {"-t", "--replicationType"}, description = "Replication type (STAND_ALONE, RATIS)", defaultValue = "STAND_ALONE" ) - private HddsProtos.ReplicationType type - = HddsProtos.ReplicationType.STAND_ALONE; + private HddsProtos.ReplicationType type; @CommandLine.Option( names = {"-f", "--replicationFactor"}, description = "Replication factor (ONE, THREE)", defaultValue = "ONE" ) - private HddsProtos.ReplicationFactor factor - = HddsProtos.ReplicationFactor.ONE; + private HddsProtos.ReplicationFactor factor; @Override - public Void call() throws Exception { + public void execute(ScmClient scmClient) throws IOException { if (type == HddsProtos.ReplicationType.CHAINED) { throw new IllegalArgumentException(type.name() + " is not supported yet."); } - try (ScmClient scmClient = parent.getParent().createScmClient()) { - Pipeline pipeline = scmClient.createReplicationPipeline( - type, - factor, - HddsProtos.NodePool.getDefaultInstance()); + Pipeline pipeline = scmClient.createReplicationPipeline( + type, + factor, + HddsProtos.NodePool.getDefaultInstance()); - if (pipeline != null) { - System.out.println(pipeline.getId().toString() + - " is created. Factor: " + pipeline.getFactor() + - ", Type: " + pipeline.getType()); - } - return null; + if (pipeline != null) { + System.out.println(pipeline.getId().toString() + + " is created. Factor: " + pipeline.getFactor() + + ", Type: " + pipeline.getType()); } } -} \ No newline at end of file +} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/DeactivatePipelineSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/DeactivatePipelineSubcommand.java index 4f4f741a3647..70df4d91fae9 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/DeactivatePipelineSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/DeactivatePipelineSubcommand.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,10 +20,11 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; import org.apache.hadoop.hdds.scm.client.ScmClient; import picocli.CommandLine; -import java.util.concurrent.Callable; +import java.io.IOException; /** * Handler of deactivate pipeline command. @@ -33,20 +34,14 @@ description = "Deactivates the given Pipeline", mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class DeactivatePipelineSubcommand implements Callable { - - @CommandLine.ParentCommand - private PipelineCommands parent; +public class DeactivatePipelineSubcommand extends ScmSubcommand { @CommandLine.Parameters(description = "ID of the pipeline to deactivate") private String pipelineId; @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.getParent().createScmClient()) { - scmClient.deactivatePipeline( - HddsProtos.PipelineID.newBuilder().setId(pipelineId).build()); - return null; - } + public void execute(ScmClient scmClient) throws IOException { + scmClient.deactivatePipeline( + HddsProtos.PipelineID.newBuilder().setId(pipelineId).build()); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ListPipelinesSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ListPipelinesSubcommand.java index 729daeae56bc..58ae26e500e1 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ListPipelinesSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ListPipelinesSubcommand.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,11 +20,12 @@ import com.google.common.base.Strings; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; import org.apache.hadoop.hdds.scm.client.ScmClient; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import picocli.CommandLine; -import java.util.concurrent.Callable; +import java.io.IOException; import java.util.stream.Stream; /** @@ -35,38 +36,29 @@ description = "List all active pipelines", mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class ListPipelinesSubcommand implements Callable { - - @CommandLine.ParentCommand - private PipelineCommands parent; +public class ListPipelinesSubcommand extends ScmSubcommand { @CommandLine.Option(names = {"-ffc", "--filterByFactor"}, description = "Filter listed pipelines by Factor(ONE/one)", - defaultValue = "", - required = false) + defaultValue = "") private String factor; @CommandLine.Option(names = {"-fst", "--filterByState"}, description = "Filter listed pipelines by State(OPEN/CLOSE)", - defaultValue = "", - required = false) + defaultValue = "") private String state; - @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.getParent().createScmClient()) { - Stream stream = scmClient.listPipelines().stream(); - if (!Strings.isNullOrEmpty(factor)) { - stream = stream.filter( - p -> p.getFactor().toString().compareToIgnoreCase(factor) == 0); - } - if (!Strings.isNullOrEmpty(state)) { - stream = stream.filter(p -> p.getPipelineState().toString() - .compareToIgnoreCase(state) == 0); - } - stream.forEach(System.out::println); - return null; + public void execute(ScmClient scmClient) throws IOException { + Stream stream = scmClient.listPipelines().stream(); + if (!Strings.isNullOrEmpty(factor)) { + stream = stream.filter( + p -> p.getFactor().toString().compareToIgnoreCase(factor) == 0); + } + if (!Strings.isNullOrEmpty(state)) { + stream = stream.filter(p -> p.getPipelineState().toString() + .compareToIgnoreCase(state) == 0); } + stream.forEach(System.out::println); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/PipelineCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/PipelineCommands.java index d5c0234d01f4..ba7371e6214a 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/PipelineCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/PipelineCommands.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,11 +21,12 @@ import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.scm.cli.container.WithScmClient; +import org.apache.hadoop.hdds.cli.OzoneAdmin; +import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; import picocli.CommandLine.Model.CommandSpec; -import picocli.CommandLine.ParentCommand; import picocli.CommandLine.Spec; /** @@ -43,21 +44,20 @@ CreatePipelineSubcommand.class, ClosePipelineSubcommand.class }) -public class PipelineCommands implements Callable { +@MetaInfServices(SubcommandWithParent.class) +public class PipelineCommands implements Callable, SubcommandWithParent { @Spec private CommandSpec spec; - @ParentCommand - private WithScmClient parent; - - public WithScmClient getParent() { - return parent; - } - @Override public Void call() throws Exception { GenericCli.missingSubcommand(spec); return null; } + + @Override + public Class getParentType() { + return OzoneAdmin.class; + } } diff --git a/hadoop-ozone/dist/src/main/smoketest/admincli/admin.robot b/hadoop-ozone/dist/src/main/smoketest/admincli/admin.robot new file mode 100644 index 000000000000..a28888b23f4b --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/admincli/admin.robot @@ -0,0 +1,32 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Documentation Test ozone admin command +Library BuiltIn +Resource ../commonlib.robot +Test Timeout 5 minutes + +*** Test Cases *** +Incomplete command + ${output} = Execute And Ignore Error ozone admin + Should contain ${output} Incomplete command + Should contain ${output} container + Should contain ${output} datanode + Should contain ${output} om + Should contain ${output} pipeline + Should contain ${output} replicationmanager + Should contain ${output} safemode + Should contain ${output} printTopology diff --git a/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot b/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot new file mode 100644 index 000000000000..0560880de79d --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot @@ -0,0 +1,68 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Documentation Test ozone admin container command +Library BuiltIn +Resource ../commonlib.robot +Test Timeout 5 minutes +Suite Setup Create test data + +*** Variables *** +${CONTAINER} + +*** Keywords *** +Create test data + Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit test user testuser testuser.keytab + Execute ozone freon ockg -n1 -t1 -p container + +*** Test Cases *** +Create container + ${output} = Execute ozone admin container create + Should contain ${output} is created + ${container} = Execute echo "${output}" | grep 'is created' | cut -f2 -d' ' + Set Suite Variable ${CONTAINER} ${container} + +List containers + ${output} = Execute ozone admin container list + Should contain ${output} OPEN + +List containers with explicit host + ${output} = Execute ozone admin container list --scm scm + Should contain ${output} OPEN + +Container info + ${output} = Execute ozone admin container info "${CONTAINER}" + Should contain ${output} Container id: ${CONTAINER} + Should contain ${output} Datanodes + +Close container + Execute ozone admin container close "${CONTAINER}" + ${output} = Execute ozone admin container info "${CONTAINER}" + Should contain ${output} CLOS + +Incomplete command + ${output} = Execute And Ignore Error ozone admin container + Should contain ${output} Incomplete command + Should contain ${output} list + Should contain ${output} info + Should contain ${output} delete + Should contain ${output} create + Should contain ${output} close + +List containers on unknown host + ${output} = Execute And Ignore Error ozone admin --verbose container list --scm unknown-host + Should contain ${output} Invalid host name + diff --git a/hadoop-ozone/dist/src/main/smoketest/admincli/datanode.robot b/hadoop-ozone/dist/src/main/smoketest/admincli/datanode.robot index cb16bc8bc86a..b34f3af6255a 100644 --- a/hadoop-ozone/dist/src/main/smoketest/admincli/datanode.robot +++ b/hadoop-ozone/dist/src/main/smoketest/admincli/datanode.robot @@ -14,17 +14,22 @@ # limitations under the License. *** Settings *** -Documentation Smoketest ozone cluster startup -Library OperatingSystem +Documentation Test ozone admin datanode command Library BuiltIn Resource ../commonlib.robot Test Timeout 5 minutes -*** Variables *** - - *** Test Cases *** -Run list datanodes +List datanodes ${output} = Execute ozone admin datanode list Should contain ${output} Datanode: - Should contain ${output} Related pipelines: \ No newline at end of file + Should contain ${output} Related pipelines: + +Incomplete command + ${output} = Execute And Ignore Error ozone admin datanode + Should contain ${output} Incomplete command + Should contain ${output} list + +List datanodes on unknown host + ${output} = Execute And Ignore Error ozone admin --verbose datanode list --scm unknown-host + Should contain ${output} Invalid host name diff --git a/hadoop-ozone/dist/src/main/smoketest/admincli/pipeline.robot b/hadoop-ozone/dist/src/main/smoketest/admincli/pipeline.robot index b514ae7b07ad..3a97f8394977 100644 --- a/hadoop-ozone/dist/src/main/smoketest/admincli/pipeline.robot +++ b/hadoop-ozone/dist/src/main/smoketest/admincli/pipeline.robot @@ -14,21 +14,52 @@ # limitations under the License. *** Settings *** -Documentation Smoketest ozone cluster startup -Library OperatingSystem +Documentation Test ozone admin pipeline command Library BuiltIn Resource ../commonlib.robot Test Timeout 5 minutes *** Variables *** - +${PIPELINE} *** Test Cases *** -Run list pipeline +Create pipeline + ${output} = Execute ozone admin pipeline create + Should contain ${output} is created. Factor: ONE, Type: STAND_ALONE + ${pipeline} = Execute echo "${output}" | grep 'is created' | cut -f1 -d' ' | cut -f2 -d'=' + Set Suite Variable ${PIPELINE} ${pipeline} + +List pipelines ${output} = Execute ozone admin pipeline list - Should contain ${output} Type: - Should contain ${output} Factor:ONE, State: + Should contain ${output} Factor:ONE -Run create pipeline - ${output} = Execute ozone admin pipeline create - Should contain ${output} is created. Factor: ONE, Type: STAND_ALONE \ No newline at end of file +List pipelines with explicit host + ${output} = Execute ozone admin pipeline list --scm scm + Should contain ${output} Factor:ONE + +Deactivate pipeline + Execute ozone admin pipeline deactivate "${PIPELINE}" + ${output} = Execute ozone admin pipeline list | grep "${PIPELINE}" + Should contain ${output} DORMANT + +Activate pipeline + Execute ozone admin pipeline activate "${PIPELINE}" + ${output} = Execute ozone admin pipeline list | grep "${PIPELINE}" + Should contain ${output} OPEN + +Close pipeline + Execute ozone admin pipeline close "${PIPELINE}" + ${output} = Execute ozone admin pipeline list | grep "${PIPELINE}" + Should contain ${output} CLOSED + +Incomplete command + ${output} = Execute And Ignore Error ozone admin pipeline + Should contain ${output} Incomplete command + Should contain ${output} close + Should contain ${output} create + Should contain ${output} deactivate + Should contain ${output} list + +List pipelines on unknown host + ${output} = Execute And Ignore Error ozone admin --verbose pipeline list --scm unknown-host + Should contain ${output} Invalid host name diff --git a/hadoop-ozone/dist/src/main/smoketest/admincli/replicationmanager.robot b/hadoop-ozone/dist/src/main/smoketest/admincli/replicationmanager.robot new file mode 100644 index 000000000000..cef294f1e8d7 --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/admincli/replicationmanager.robot @@ -0,0 +1,53 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Documentation Test ozone admin replicationmanager command +Library BuiltIn +Resource ../commonlib.robot +Test Timeout 5 minutes + +*** Test Cases *** +Check replicationmanager + ${output} = Execute ozone admin replicationmanager status + Should contain ${output} ReplicationManager + Should contain ${output} Running + +Check replicationmanager with explicit host + ${output} = Execute ozone admin replicationmanager status --scm scm + Should contain ${output} ReplicationManager + Should contain ${output} Running + +Start replicationmanager + ${output} = Execute ozone admin replicationmanager start + Should contain ${output} Starting ReplicationManager + Wait Until Keyword Succeeds 30sec 5sec Execute ozone admin replicationmanager status | grep -q 'is Running' + +Stop replicationmanager + ${output} = Execute ozone admin replicationmanager stop + Should contain ${output} Stopping ReplicationManager + Wait Until Keyword Succeeds 30sec 5sec Execute ozone admin replicationmanager status | grep -q 'is Not Running' + +Incomplete command + ${output} = Execute And Ignore Error ozone admin replicationmanager + Should contain ${output} Incomplete command + Should contain ${output} start + Should contain ${output} stop + Should contain ${output} status + +Check replicationmanager on unknown host + ${output} = Execute And Ignore Error ozone admin --verbose replicationmanager status --scm unknown-host + Should contain ${output} Invalid host name + diff --git a/hadoop-ozone/dist/src/main/smoketest/admincli/safemode.robot b/hadoop-ozone/dist/src/main/smoketest/admincli/safemode.robot new file mode 100644 index 000000000000..114d846e0e07 --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/admincli/safemode.robot @@ -0,0 +1,45 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Documentation Test ozone admin safemode command +Library BuiltIn +Resource ../commonlib.robot +Test Timeout 5 minutes + +*** Test Cases *** +Check safemode + ${output} = Execute ozone admin safemode status + Should contain ${output} SCM is out of safe mode + +Check safemode with explicit host + ${output} = Execute ozone admin safemode status --scm scm + Should contain ${output} SCM is out of safe mode + +Wait for safemode exit + ${output} = Execute ozone admin safemode wait -t 2 + Should contain ${output} SCM is out of safe mode + +Incomplete command + ${output} = Execute And Ignore Error ozone admin safemode + Should contain ${output} Incomplete command + Should contain ${output} status + Should contain ${output} exit + Should contain ${output} wait + +Check safemode on unknown host + ${output} = Execute And Ignore Error ozone admin --verbose safemode status --scm unknown-host + Should contain ${output} Invalid host name + diff --git a/hadoop-ozone/dist/src/shell/ozone/ozone b/hadoop-ozone/dist/src/shell/ozone/ozone index e957f7f39b43..c536484e9b56 100755 --- a/hadoop-ozone/dist/src/shell/ozone/ozone +++ b/hadoop-ozone/dist/src/shell/ozone/ozone @@ -214,7 +214,7 @@ function ozonecmd_case OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-tools" ;; admin) - HADOOP_CLASSNAME=org.apache.hadoop.ozone.admin.OzoneAdmin + HADOOP_CLASSNAME=org.apache.hadoop.hdds.cli.OzoneAdmin OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-tools" ;; debug) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDatanodeShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDatanodeShell.java index c2ed02ee4914..d52dd33f570f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDatanodeShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDatanodeShell.java @@ -130,7 +130,7 @@ public void testDatanodeCommand() { @Test public void testDatanodeInvalidParamCommand() { LOG.info("Running testDatanodeIncompleteCommand"); - String expectedError = "Unknown option: -invalidParam"; + String expectedError = "Unknown option: '-invalidParam'"; //executing 'ozone datanode -invalidParam' String[] args = new String[]{"-invalidParam"}; diff --git a/hadoop-ozone/tools/pom.xml b/hadoop-ozone/tools/pom.xml index 661d5422303d..cc97e3bb4f36 100644 --- a/hadoop-ozone/tools/pom.xml +++ b/hadoop-ozone/tools/pom.xml @@ -118,8 +118,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.kohsuke.metainf-services metainf-services - 1.1 - true com.github.spotbugs diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/OzoneAdmin.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/OzoneAdmin.java deleted file mode 100644 index 7f748aac3e3e..000000000000 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/OzoneAdmin.java +++ /dev/null @@ -1,122 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.admin; - -import java.io.IOException; - -import org.apache.hadoop.hdds.HddsUtils; -import org.apache.hadoop.hdds.cli.GenericCli; -import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.conf.MutableConfigurationSource; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.cli.ContainerOperationClient; -import org.apache.hadoop.hdds.scm.cli.ReplicationManagerCommands; -import org.apache.hadoop.hdds.scm.cli.SafeModeCommands; -import org.apache.hadoop.hdds.scm.cli.TopologySubcommand; -import org.apache.hadoop.hdds.scm.cli.container.ContainerCommands; -import org.apache.hadoop.hdds.scm.cli.container.WithScmClient; -import org.apache.hadoop.hdds.scm.cli.datanode.DatanodeCommands; -import org.apache.hadoop.hdds.scm.cli.pipeline.PipelineCommands; -import org.apache.hadoop.hdds.scm.client.ScmClient; -import org.apache.hadoop.util.NativeCodeLoader; - -import org.apache.commons.lang3.StringUtils; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY; -import org.apache.log4j.ConsoleAppender; -import org.apache.log4j.Level; -import org.apache.log4j.LogManager; -import org.apache.log4j.Logger; -import org.apache.log4j.PatternLayout; -import picocli.CommandLine; -import picocli.CommandLine.Option; - -/** - * Ozone Admin Command line tool. - */ -@CommandLine.Command(name = "ozone admin", - hidden = true, - description = "Developer tools for Ozone Admin operations", - versionProvider = HddsVersionProvider.class, - subcommands = { - SafeModeCommands.class, - ContainerCommands.class, - PipelineCommands.class, - DatanodeCommands.class, - TopologySubcommand.class, - ReplicationManagerCommands.class - }, - mixinStandardHelpOptions = true) -public class OzoneAdmin extends GenericCli implements WithScmClient { - - private OzoneConfiguration ozoneConf; - - @Option(names = {"--scm"}, description = "The destination scm (host:port)") - private String scm = ""; - - public OzoneAdmin() { - super(OzoneAdmin.class); - } - - public OzoneConfiguration getOzoneConf() { - if (ozoneConf == null) { - ozoneConf = createOzoneConfiguration(); - } - return ozoneConf; - } - - /** - * Main for the Ozone Admin shell Command handling. - * - * @param argv - System Args Strings[] - * @throws Exception - */ - public static void main(String[] argv) throws Exception { - LogManager.resetConfiguration(); - Logger.getRootLogger().setLevel(Level.INFO); - Logger.getRootLogger() - .addAppender(new ConsoleAppender(new PatternLayout("%m%n"))); - Logger.getLogger(NativeCodeLoader.class).setLevel(Level.ERROR); - - new OzoneAdmin().run(argv); - } - - public ScmClient createScmClient() { - try { - OzoneConfiguration conf = createOzoneConfiguration(); - checkAndSetSCMAddressArg(conf); - - return new ContainerOperationClient(conf); - } catch (IOException ex) { - throw new IllegalArgumentException("Can't create SCM client", ex); - } - } - - private void checkAndSetSCMAddressArg(MutableConfigurationSource conf) { - if (StringUtils.isNotEmpty(scm)) { - conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, scm); - } - if (!HddsUtils.getHostNameFromConfigKeys(conf, - ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY).isPresent()) { - - throw new IllegalArgumentException( - ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY - + " should be set in ozone-site.xml or with the --scm option"); - } - } -} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/OMAdmin.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/OMAdmin.java index ba5fe8154aac..f9321ab5cf2f 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/OMAdmin.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/OMAdmin.java @@ -19,12 +19,12 @@ import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.cli.OzoneAdmin; import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.ozone.admin.OzoneAdmin; import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java index 9279d7f226d3..e520190e4c95 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java @@ -143,7 +143,8 @@ public List handleExecutionException(ExecutionException ex, cmd.parseWithHandlers(new CommandLine.RunLast(), exceptionHandler, args); }catch(Exception ex){ - Assert.assertTrue(ex.getMessage().contains(msg)); + Assert.assertTrue("Expected " + msg + ", but got: " + ex.getMessage(), + ex.getMessage().contains(msg)); } } @@ -225,7 +226,7 @@ public void genconfFailureByInvalidPath() throws Exception { public void genconfPathNotSpecified() throws Exception { File tempPath = getRandomTempDir(); String[] args = new String[]{}; - executeWithException(args, "Missing required parameter: "); + executeWithException(args, "Missing required parameter: ''"); } /** diff --git a/pom.xml b/pom.xml index f4b64145992c..b2984e9f2dbb 100644 --- a/pom.xml +++ b/pom.xml @@ -260,7 +260,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs info.picocli picocli - 3.9.6 + 4.4.0 jdiff @@ -1332,6 +1332,12 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs hsqldb ${hsqldb.version} + + org.kohsuke.metainf-services + metainf-services + 1.1 + true + io.dropwizard.metrics metrics-core From fc0bcdfdee0e9948c8db94192b04e4e9269aed31 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Thu, 27 Aug 2020 10:56:56 +0200 Subject: [PATCH 141/165] HDDS-4152. Archive container logs for kubernetes check (#1355) --- hadoop-ozone/dev-support/checks/kubernetes.sh | 2 +- .../src/main/k8s/examples/getting-started/test.sh | 2 ++ .../dist/src/main/k8s/examples/minikube/test.sh | 2 ++ .../dist/src/main/k8s/examples/ozone-dev/test.sh | 2 ++ hadoop-ozone/dist/src/main/k8s/examples/ozone/test.sh | 2 ++ hadoop-ozone/dist/src/main/k8s/examples/test-all.sh | 11 ++++++++--- hadoop-ozone/dist/src/main/k8s/examples/testlib.sh | 7 +++++++ 7 files changed, 24 insertions(+), 4 deletions(-) diff --git a/hadoop-ozone/dev-support/checks/kubernetes.sh b/hadoop-ozone/dev-support/checks/kubernetes.sh index a23aa839dad3..7f68da1884ca 100755 --- a/hadoop-ozone/dev-support/checks/kubernetes.sh +++ b/hadoop-ozone/dev-support/checks/kubernetes.sh @@ -31,6 +31,6 @@ mkdir -p "$REPORT_DIR" cd "$DIST_DIR/kubernetes/examples" || exit 1 ./test-all.sh RES=$? -cp result/* "$REPORT_DIR/" +cp -r result/* "$REPORT_DIR/" cp "$REPORT_DIR/log.html" "$REPORT_DIR/summary.html" exit $RES diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/test.sh b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/test.sh index dabe394226bb..7d6bdfb981e0 100755 --- a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/test.sh +++ b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/test.sh @@ -32,6 +32,8 @@ execute_robot_test scm-0 smoketest/basic/basic.robot combine_reports +get_logs + stop_k8s_env revert_resources diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/test.sh b/hadoop-ozone/dist/src/main/k8s/examples/minikube/test.sh index dabe394226bb..7d6bdfb981e0 100755 --- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/test.sh +++ b/hadoop-ozone/dist/src/main/k8s/examples/minikube/test.sh @@ -32,6 +32,8 @@ execute_robot_test scm-0 smoketest/basic/basic.robot combine_reports +get_logs + stop_k8s_env revert_resources diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/test.sh b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/test.sh index dabe394226bb..7d6bdfb981e0 100755 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/test.sh +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/test.sh @@ -32,6 +32,8 @@ execute_robot_test scm-0 smoketest/basic/basic.robot combine_reports +get_logs + stop_k8s_env revert_resources diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/test.sh b/hadoop-ozone/dist/src/main/k8s/examples/ozone/test.sh index dabe394226bb..7d6bdfb981e0 100755 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/test.sh +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone/test.sh @@ -32,6 +32,8 @@ execute_robot_test scm-0 smoketest/basic/basic.robot combine_reports +get_logs + stop_k8s_env revert_resources diff --git a/hadoop-ozone/dist/src/main/k8s/examples/test-all.sh b/hadoop-ozone/dist/src/main/k8s/examples/test-all.sh index 1d763ffdddeb..ae810c9f679c 100755 --- a/hadoop-ozone/dist/src/main/k8s/examples/test-all.sh +++ b/hadoop-ozone/dist/src/main/k8s/examples/test-all.sh @@ -31,13 +31,18 @@ RESULT=0 IFS=$'\n' # shellcheck disable=SC2044 for test in $(find "$SCRIPT_DIR" -name test.sh | grep "${OZONE_TEST_SELECTOR:-""}" |sort); do + TEST_DIR="$(dirname $test)" + TEST_NAME="$(basename "$TEST_DIR")" + echo "" - echo "#### Executing tests of $(dirname "$test") #####" + echo "#### Executing tests of ${TEST_DIR} #####" echo "" - TEST_DIR="$(dirname $test)" cd "$TEST_DIR" || continue ./test.sh - cp "$TEST_DIR"/result/output.xml "$ALL_RESULT_DIR"/"$(basename "$TEST_DIR")".xml + + cp "$TEST_DIR"/result/output.xml "$ALL_RESULT_DIR"/"${TEST_NAME}".xml + mkdir -p "$ALL_RESULT_DIR"/"${TEST_NAME}" + mv "$TEST_DIR"/logs/*log "$ALL_RESULT_DIR"/"${TEST_NAME}"/ done rebot -N "smoketests" -d "$ALL_RESULT_DIR/" "$ALL_RESULT_DIR/*.xml" diff --git a/hadoop-ozone/dist/src/main/k8s/examples/testlib.sh b/hadoop-ozone/dist/src/main/k8s/examples/testlib.sh index d33194d09609..5dff2260ff05 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/testlib.sh +++ b/hadoop-ozone/dist/src/main/k8s/examples/testlib.sh @@ -77,6 +77,13 @@ start_k8s_env() { wait_for_startup } +get_logs() { + mkdir -p logs + for pod in $(kubectl get pods -o custom-columns=NAME:.metadata.name | tail -n +2); do + kubectl logs "${pod}" > "logs/pod-${pod}.log" + done +} + stop_k8s_env() { if [ ! "$KEEP_RUNNING" ]; then kubectl delete -f . From ecb6c253eef141bd9544c88fc6a6a6fe630ea87a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Elek=2C=20M=C3=A1rton?= Date: Thu, 27 Aug 2020 11:02:55 +0200 Subject: [PATCH 142/165] HDDS-4140. Auto-close /pending pull requests after 21 days of inactivity (#1344) --- .github/close-pending.sh | 41 ++++++++++++++++++++++++++++ .github/closing-message.txt | 7 +++++ .github/comment-commands/close.sh | 10 ++----- .github/comment-commands/pending.sh | 1 + .github/workflows/close-pending.yaml | 32 ++++++++++++++++++++++ 5 files changed, 83 insertions(+), 8 deletions(-) create mode 100755 .github/close-pending.sh create mode 100644 .github/closing-message.txt create mode 100644 .github/workflows/close-pending.yaml diff --git a/.github/close-pending.sh b/.github/close-pending.sh new file mode 100755 index 000000000000..ae05001d479e --- /dev/null +++ b/.github/close-pending.sh @@ -0,0 +1,41 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" +MESSAGE=$(cat $SCRIPT_DIR/closing-message.txt) + +while IFS= read -r number && + IFS= read -r title; do + echo "Closing PR ($number): $title" + curl -s -o /dev/null \ + -X POST \ + --data "$(jq --arg body "$MESSAGE" -n '{body: $body}')" \ + --header "authorization: Bearer $GITHUB_TOKEN" \ + --header 'content-type: application/json' \ + "https://api.github.com/repos/apache/hadoop-ozone/issues/$number/comments" + + curl -s -o /dev/null \ + -X PATCH \ + --data '{"state": "close"}' \ + --header "authorization: Bearer $GITHUB_TOKEN" \ + --header 'content-type: application/json' \ + "https://api.github.com/repos/apache/hadoop-ozone/pulls/$number" +done < <(curl -H "Content-Type: application/json" \ + --header "authorization: Bearer $GITHUB_TOKEN" \ + "https://api.github.com/search/issues?q=repo:apache/hadoop-ozone+type:pr+updated:<$(date -d "-21 days" +%Y-%m-%d)+label:pending+is:open" \ + | jq -r '.items[] | (.number,.title)') diff --git a/.github/closing-message.txt b/.github/closing-message.txt new file mode 100644 index 000000000000..261eac275e0e --- /dev/null +++ b/.github/closing-message.txt @@ -0,0 +1,7 @@ +Thank you very much for the patch. I am closing this PR __temporarily__ as there was no activity recently and it is waiting for response from its author. + +It doesn't mean that this PR is not important or ignored: feel free to reopen the PR at any time. + +It only means that attention of committers is not required. We prefer to keep the review queue clean. This ensures PRs in need of review are more visible, which results in faster feedback for all PRs. + +If you need ANY help to finish this PR, please [contact the community](https://github.com/apache/hadoop-ozone#contact) on the mailing list or the slack channel." diff --git a/.github/comment-commands/close.sh b/.github/comment-commands/close.sh index 4624bd869c3f..cb57b7192138 100755 --- a/.github/comment-commands/close.sh +++ b/.github/comment-commands/close.sh @@ -16,14 +16,8 @@ #doc: Close pending pull request temporary # shellcheck disable=SC2124 -MESSAGE="Thank you very much for the patch. I am closing this PR __temporarily__ as there was no -activity recently and it is waiting for response from its author. - -It doesn't mean that this PR is not important or ignored: feel free to reopen the PR at any time. - -It only means that attention of committers is not required. We prefer to keep the review queue clean. This ensures PRs in need of review are more visible, which results in faster feedback for all PRs. - -If you need ANY help to finish this PR, please [contact the community](https://github.com/apache/hadoop-ozone#contact) on the mailing list or the slack channel." +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" +MESSAGE=$(cat $SCRIPT_DIR/../closing-message.txt) set +x #GITHUB_TOKEN curl -s -o /dev/null \ diff --git a/.github/comment-commands/pending.sh b/.github/comment-commands/pending.sh index 08947f636b8b..840ed82889d2 100755 --- a/.github/comment-commands/pending.sh +++ b/.github/comment-commands/pending.sh @@ -20,6 +20,7 @@ MESSAGE="Marking this issue as un-mergeable as requested. Please use \`/ready\` comment when it's resolved. +Please note that the PR will be closed after 21 days of inactivity from now. (But can be re-opened anytime later...) > $@" URL="$(jq -r '.issue.pull_request.url' "$GITHUB_EVENT_PATH")/reviews" diff --git a/.github/workflows/close-pending.yaml b/.github/workflows/close-pending.yaml new file mode 100644 index 000000000000..e335701e4cf2 --- /dev/null +++ b/.github/workflows/close-pending.yaml @@ -0,0 +1,32 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +name: close-prs + +on: + schedule: + - cron: '0 0 * * *' + +jobs: + close-pending: + name: close-pending + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@master + - name: Execute close-pending script + if: github.repository == 'apache/hadoop-ozone' + run: ./.github/close-pending.sh + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} From be111fc5ce17fbce31a69437da2370bf3ad74458 Mon Sep 17 00:00:00 2001 From: frischHWC <47358141+frischHWC@users.noreply.github.com> Date: Thu, 27 Aug 2020 11:06:29 +0200 Subject: [PATCH 143/165] HDDS-2411. add a datanode chunk validator fo datanode chunk generator (#1312) --- .../ozone/freon/DatanodeChunkValidator.java | 244 ++++++++++++++++++ .../org/apache/hadoop/ozone/freon/Freon.java | 1 + 2 files changed, 245 insertions(+) create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeChunkValidator.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeChunkValidator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeChunkValidator.java new file mode 100644 index 000000000000..7300fa5441eb --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeChunkValidator.java @@ -0,0 +1,244 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.ozone.freon; + +import java.io.IOException; +import java.util.List; +import java.util.concurrent.Callable; + +import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.XceiverClientManager; +import org.apache.hadoop.hdds.scm.XceiverClientSpi; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; +import org.apache.hadoop.ozone.OzoneSecurityUtil; +import org.apache.hadoop.ozone.common.Checksum; +import org.apache.hadoop.ozone.common.ChecksumData; + +import com.codahale.metrics.Timer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import picocli.CommandLine.Command; +import picocli.CommandLine.Option; + +/** + * Data validator of chunks to use pure datanode XCeiver interface. + */ +@Command(name = "dcv", + aliases = "datanode-chunk-validator", + description = "Validate generated Chunks are the same ", + versionProvider = HddsVersionProvider.class, + mixinStandardHelpOptions = true, + showDefaultValues = true) +public class DatanodeChunkValidator extends BaseFreonGenerator + implements Callable { + + private static final Logger LOG = + LoggerFactory.getLogger(DatanodeChunkValidator.class); + + @Option(names = {"-l", "--pipeline"}, + description = "Pipeline to use. By default the first RATIS/THREE " + + "pipeline will be used.", + defaultValue = "") + private String pipelineId; + + @Option(names = {"-s", "--size"}, + description = "Size of the generated chunks (in bytes)", + defaultValue = "1024") + private int chunkSize; + + private XceiverClientSpi xceiverClientSpi; + + private Timer timer; + + private ChecksumData checksumReference; + + private Checksum checksum; + + + @Override + public Void call() throws Exception { + + init(); + + OzoneConfiguration ozoneConf = createOzoneConfiguration(); + if (OzoneSecurityUtil.isSecurityEnabled(ozoneConf)) { + throw new IllegalArgumentException( + "Datanode chunk validator is not supported in secure environment" + ); + } + + try (StorageContainerLocationProtocol scmLocationClient = + createStorageContainerLocationClient(ozoneConf)) { + List pipelines = scmLocationClient.listPipelines(); + Pipeline pipeline; + if (pipelineId != null && pipelineId.length() > 0) { + pipeline = pipelines.stream() + .filter(p -> p.getId().toString().equals(pipelineId)) + .findFirst() + .orElseThrow(() -> new IllegalArgumentException( + "Pipeline ID is defined, but there is no such pipeline: " + + pipelineId)); + + } else { + pipeline = pipelines.stream() + .filter(p -> p.getFactor() == HddsProtos.ReplicationFactor.THREE) + .findFirst() + .orElseThrow(() -> new IllegalArgumentException( + "Pipeline ID is NOT defined, and no pipeline " + + "has been found with factor=THREE")); + LOG.info("Using pipeline {}", pipeline.getId()); + } + + try (XceiverClientManager xceiverClientManager = + new XceiverClientManager(ozoneConf)) { + xceiverClientSpi = xceiverClientManager.acquireClient(pipeline); + + readReference(); + + timer = getMetrics().timer("chunk-validate"); + + runTests(this::validateChunk); + } + + } finally { + if (xceiverClientSpi != null) { + xceiverClientSpi.close(); + } + } + return null; + } + + /** + * Read a reference chunk using same name than one from the + * {@link org.apache.hadoop.ozone.freon.DatanodeChunkGenerator}. + * @throws IOException + */ + private void readReference() throws IOException { + ContainerProtos.DatanodeBlockID blockId = + ContainerProtos.DatanodeBlockID.newBuilder() + .setContainerID(1L) + .setLocalID(0 % 20) + .setBlockCommitSequenceId(0) + .build(); + + // As a reference, the first one generated (at step 0) is taken + ContainerProtos.ChunkInfo chunkInfo = ContainerProtos.ChunkInfo.newBuilder() + .setChunkName(getPrefix() + "_testdata_chunk_" + 0) + .setOffset((0 / 20) * chunkSize) + .setLen(chunkSize) + .setChecksumData( + ContainerProtos.ChecksumData.newBuilder() + .setBytesPerChecksum(4) + .setType(ContainerProtos.ChecksumType.CRC32) + .build()) + .build(); + + ContainerProtos.ReadChunkRequestProto.Builder readChunkRequest = + ContainerProtos.ReadChunkRequestProto + .newBuilder() + .setBlockID(blockId) + .setChunkData(chunkInfo); + + String id = xceiverClientSpi.getPipeline().getFirstNode().getUuidString(); + + ContainerProtos.ContainerCommandRequestProto.Builder builder = + ContainerProtos.ContainerCommandRequestProto + .newBuilder() + .setCmdType(ContainerProtos.Type.ReadChunk) + .setContainerID(blockId.getContainerID()) + .setDatanodeUuid(id) + .setReadChunk(readChunkRequest); + + ContainerProtos.ContainerCommandRequestProto request = builder.build(); + ContainerProtos.ContainerCommandResponseProto response = + xceiverClientSpi.sendCommand(request); + + checksum = new Checksum(ContainerProtos.ChecksumType.CRC32, chunkSize); + checksumReference = checksum.computeChecksum( + response.getReadChunk().getData().toByteArray() + ); + + } + + + private void validateChunk(long stepNo) throws Exception { + ContainerProtos.DatanodeBlockID blockId = + ContainerProtos.DatanodeBlockID.newBuilder() + .setContainerID(1L) + .setLocalID(stepNo % 20) + .setBlockCommitSequenceId(stepNo) + .build(); + + ContainerProtos.ChunkInfo chunkInfo = ContainerProtos.ChunkInfo.newBuilder() + .setChunkName(getPrefix() + "_testdata_chunk_" + stepNo) + .setChecksumData( + ContainerProtos.ChecksumData.newBuilder() + .setBytesPerChecksum(4) + .setType(ContainerProtos.ChecksumType.CRC32) + .build()) + .setOffset((stepNo / 20) * chunkSize) + .setLen(chunkSize) + .build(); + + ContainerProtos.ReadChunkRequestProto.Builder readChunkRequest = + ContainerProtos.ReadChunkRequestProto + .newBuilder() + .setBlockID(blockId) + .setChunkData(chunkInfo); + + String id = xceiverClientSpi.getPipeline().getFirstNode().getUuidString(); + + ContainerProtos.ContainerCommandRequestProto.Builder builder = + ContainerProtos.ContainerCommandRequestProto + .newBuilder() + .setCmdType(ContainerProtos.Type.ReadChunk) + .setContainerID(blockId.getContainerID()) + .setDatanodeUuid(id) + .setReadChunk(readChunkRequest); + + ContainerProtos.ContainerCommandRequestProto request = builder.build(); + + timer.time(() -> { + try { + ContainerProtos.ContainerCommandResponseProto response = + xceiverClientSpi.sendCommand(request); + + ChecksumData checksumOfChunk = + checksum.computeChecksum( + response.getReadChunk().getData().toByteArray() + ); + + if (!checksumReference.equals(checksumOfChunk)) { + throw new IllegalStateException( + "Reference (=first) message checksum doesn't match " + + "with checksum of chunk " + + response.getReadChunk() + .getChunkData().getChunkName()); + } + } catch (IOException e) { + LOG.warn("Could not read chunk due to IOException: ", e); + } + }); + + } + + +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/Freon.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/Freon.java index 20c9a1821554..1b03540019bc 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/Freon.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/Freon.java @@ -48,6 +48,7 @@ SameKeyReader.class, S3KeyGenerator.class, DatanodeChunkGenerator.class, + DatanodeChunkValidator.class, DatanodeBlockPutter.class, FollowerAppendLogEntryGenerator.class, ChunkManagerDiskWrite.class, From 64c45002f2ffa8330c64724d8584b22be67ccf0c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Elek=2C=20M=C3=A1rton?= Date: Thu, 27 Aug 2020 13:32:52 +0200 Subject: [PATCH 144/165] HDDS-4153. Increase default timeout in kubernetes tests (#1357) --- hadoop-ozone/dist/src/main/k8s/examples/testlib.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/dist/src/main/k8s/examples/testlib.sh b/hadoop-ozone/dist/src/main/k8s/examples/testlib.sh index 5dff2260ff05..2442cb9a70fe 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/testlib.sh +++ b/hadoop-ozone/dist/src/main/k8s/examples/testlib.sh @@ -17,14 +17,14 @@ retry() { n=0 - until [ $n -ge 30 ] + until [ $n -ge 100 ] do "$@" && break n=$[$n+1] echo "$n '$@' is failed..." sleep ${RETRY_SLEEP:-3} done - if [ $n -eq 30 ]; then + if [ $n -eq 100 ]; then return 255 fi } From 47928f75d94cbdac5c3b2eead5d0978ddc7640c2 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Thu, 27 Aug 2020 13:33:55 +0200 Subject: [PATCH 145/165] HDDS-4149. Implement OzoneFileStatus#toString (#1356) --- .../ozone/om/helpers/OzoneFileStatus.java | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFileStatus.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFileStatus.java index 2ff69c3a5ab2..ca861d3a4b42 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFileStatus.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFileStatus.java @@ -130,4 +130,22 @@ public boolean equals(Object o) { public int hashCode() { return Objects.hash(getTrimmedName()); } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()); + sb.append("{"); + if (keyInfo == null) { + sb.append(""); + } else { + sb.append(getTrimmedName()); + if (isDirectory) { + sb.append(" (dir)"); + } + } + sb.append("}"); + return sb.toString(); + } + } From 98a5405ac37b7f977c4449956f82bb6fab90847c Mon Sep 17 00:00:00 2001 From: Siyao Meng <50227127+smengcl@users.noreply.github.com> Date: Thu, 27 Aug 2020 14:25:05 -0700 Subject: [PATCH 146/165] HDDS-4109. Tests in TestOzoneFileSystem should use the existing MiniOzoneCluster (#1316) --- .../hadoop/fs/ozone/TestOzoneFileSystem.java | 22 +++++++++++-------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java index cdfe0cfdade3..4e728f7475ef 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java @@ -76,6 +76,9 @@ /** * Ozone file system tests that are not covered by contract tests. + * + * Note: When adding new test(s), please append it in testFileSystem() to + * avoid test run time regression. */ @RunWith(Parameterized.class) public class TestOzoneFileSystem { @@ -107,7 +110,6 @@ public TestOzoneFileSystem(boolean setDefaultFs) { private int rootItemCount; private Trash trash; - @Test(timeout = 300_000) public void testCreateFileShouldCheckExistenceOfDirWithSameName() throws Exception { /* @@ -120,7 +122,6 @@ public void testCreateFileShouldCheckExistenceOfDirWithSameName() * * Op 3. create file -> /d1/d2/d3 (d3 as a file inside /d1/d2) */ - setupOzoneFileSystem(); Path parent = new Path("/d1/d2/d3/d4/"); Path file1 = new Path(parent, "key1"); @@ -154,6 +155,9 @@ public void testCreateFileShouldCheckExistenceOfDirWithSameName() } catch (FileAlreadyExistsException fae) { // ignore as its expected } + + // Cleanup + fs.delete(new Path("/d1/"), true); } /** @@ -161,14 +165,11 @@ public void testCreateFileShouldCheckExistenceOfDirWithSameName() * directories. Has roughly the semantics of Unix @{code mkdir -p}. * {@link FileSystem#mkdirs(Path)} */ - @Test(timeout = 300_000) public void testMakeDirsWithAnExistingDirectoryPath() throws Exception { /* * Op 1. create file -> /d1/d2/d3/d4/k1 (d3 is a sub-dir inside /d1/d2) * Op 2. create dir -> /d1/d2 */ - setupOzoneFileSystem(); - Path parent = new Path("/d1/d2/d3/d4/"); Path file1 = new Path(parent, "key1"); try (FSDataOutputStream outputStream = fs.create(file1, false)) { @@ -178,11 +179,11 @@ public void testMakeDirsWithAnExistingDirectoryPath() throws Exception { Path subdir = new Path("/d1/d2/"); boolean status = fs.mkdirs(subdir); assertTrue("Shouldn't send error if dir exists", status); + // Cleanup + fs.delete(new Path("/d1"), true); } - @Test public void testCreateWithInvalidPaths() throws Exception { - setupOzoneFileSystem(); Path parent = new Path("../../../../../d1/d2/"); Path file1 = new Path(parent, "key1"); checkInvalidPath(file1); @@ -212,6 +213,11 @@ public void testFileSystem() throws Exception { testOzoneFsServiceLoader(); o3fs = (OzoneFileSystem) fs; + testCreateFileShouldCheckExistenceOfDirWithSameName(); + testMakeDirsWithAnExistingDirectoryPath(); + testCreateWithInvalidPaths(); + testListStatusWithIntermediateDir(); + testRenameToTrashDisabled(); testGetTrashRoots(); @@ -459,9 +465,7 @@ private void testListStatus() throws Exception { 3, fileStatuses.length); } - @Test public void testListStatusWithIntermediateDir() throws Exception { - setupOzoneFileSystem(); String keyName = "object-dir/object-name"; OmKeyArgs keyArgs = new OmKeyArgs.Builder() .setVolumeName(volumeName) From 5ea4994dc33ded270d0ec44856a4587ed672699f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Elek=2C=20M=C3=A1rton?= Date: Fri, 28 Aug 2020 01:48:37 +0200 Subject: [PATCH 147/165] HDDS-4145. Bump version to 1.1.0-SNAPSHOT on master (#1349) --- hadoop-hdds/client/pom.xml | 4 ++-- hadoop-hdds/common/pom.xml | 4 ++-- hadoop-hdds/config/pom.xml | 4 ++-- hadoop-hdds/container-service/pom.xml | 4 ++-- hadoop-hdds/docs/pom.xml | 4 ++-- hadoop-hdds/framework/pom.xml | 4 ++-- hadoop-hdds/hadoop-dependency-client/pom.xml | 4 ++-- hadoop-hdds/hadoop-dependency-server/pom.xml | 4 ++-- hadoop-hdds/hadoop-dependency-test/pom.xml | 4 ++-- hadoop-hdds/interface-admin/pom.xml | 4 ++-- hadoop-hdds/interface-client/pom.xml | 4 ++-- hadoop-hdds/interface-server/pom.xml | 4 ++-- hadoop-hdds/pom.xml | 4 ++-- hadoop-hdds/server-scm/pom.xml | 4 ++-- hadoop-hdds/test-utils/pom.xml | 4 ++-- hadoop-hdds/tools/pom.xml | 4 ++-- hadoop-ozone/client/pom.xml | 4 ++-- hadoop-ozone/common/pom.xml | 4 ++-- hadoop-ozone/csi/pom.xml | 4 ++-- hadoop-ozone/datanode/pom.xml | 4 ++-- hadoop-ozone/dist/pom.xml | 4 ++-- hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml | 4 ++-- hadoop-ozone/fault-injection-test/network-tests/pom.xml | 2 +- hadoop-ozone/fault-injection-test/pom.xml | 4 ++-- hadoop-ozone/insight/pom.xml | 4 ++-- hadoop-ozone/integration-test/pom.xml | 4 ++-- hadoop-ozone/interface-client/pom.xml | 4 ++-- hadoop-ozone/ozone-manager/pom.xml | 4 ++-- hadoop-ozone/ozonefs-common/pom.xml | 4 ++-- hadoop-ozone/ozonefs-hadoop2/pom.xml | 4 ++-- hadoop-ozone/ozonefs-hadoop3/pom.xml | 4 ++-- hadoop-ozone/ozonefs-shaded/pom.xml | 4 ++-- hadoop-ozone/ozonefs/pom.xml | 4 ++-- hadoop-ozone/pom.xml | 4 ++-- hadoop-ozone/recon-codegen/pom.xml | 2 +- hadoop-ozone/recon/pom.xml | 2 +- hadoop-ozone/s3gateway/pom.xml | 4 ++-- hadoop-ozone/tools/pom.xml | 4 ++-- pom.xml | 4 ++-- 39 files changed, 75 insertions(+), 75 deletions(-) diff --git a/hadoop-hdds/client/pom.xml b/hadoop-hdds/client/pom.xml index de6f8bf171bf..e7a8ebb73c90 100644 --- a/hadoop-hdds/client/pom.xml +++ b/hadoop-hdds/client/pom.xml @@ -20,11 +20,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-hdds - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-hdds-client - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop Distributed Data Store Client Library Apache Hadoop HDDS Client jar diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml index 1f0b3e61f73a..5784c68ec74e 100644 --- a/hadoop-hdds/common/pom.xml +++ b/hadoop-hdds/common/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-hdds - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-hdds-common - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop Distributed Data Store Common Apache Hadoop HDDS Common jar diff --git a/hadoop-hdds/config/pom.xml b/hadoop-hdds/config/pom.xml index 105e8ac8f096..98ac44df7899 100644 --- a/hadoop-hdds/config/pom.xml +++ b/hadoop-hdds/config/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-hdds - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-hdds-config - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop Distributed Data Store Config Tools Apache Hadoop HDDS Config jar diff --git a/hadoop-hdds/container-service/pom.xml b/hadoop-hdds/container-service/pom.xml index 091e3bd10a72..b71f8e3471e7 100644 --- a/hadoop-hdds/container-service/pom.xml +++ b/hadoop-hdds/container-service/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-hdds - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-hdds-container-service - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop Distributed Data Store Container Service Apache Hadoop HDDS Container Service jar diff --git a/hadoop-hdds/docs/pom.xml b/hadoop-hdds/docs/pom.xml index dd4e5b74927b..404b6c2a253b 100644 --- a/hadoop-hdds/docs/pom.xml +++ b/hadoop-hdds/docs/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-hdds - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-hdds-docs - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop HDDS/Ozone Documentation Apache Hadoop HDDS/Ozone Documentation jar diff --git a/hadoop-hdds/framework/pom.xml b/hadoop-hdds/framework/pom.xml index 4b541b00d0df..91eb43c83465 100644 --- a/hadoop-hdds/framework/pom.xml +++ b/hadoop-hdds/framework/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-hdds - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-hdds-server-framework - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop Distributed Data Store Server Framework Apache Hadoop HDDS Server Framework diff --git a/hadoop-hdds/hadoop-dependency-client/pom.xml b/hadoop-hdds/hadoop-dependency-client/pom.xml index 9a735df3e0ac..e20c478ab18c 100644 --- a/hadoop-hdds/hadoop-dependency-client/pom.xml +++ b/hadoop-hdds/hadoop-dependency-client/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-hdds - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-hdds-hadoop-dependency-client - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop Distributed Data Store Hadoop client dependencies Apache Hadoop HDDS Hadoop Client dependencies diff --git a/hadoop-hdds/hadoop-dependency-server/pom.xml b/hadoop-hdds/hadoop-dependency-server/pom.xml index 1ddcfd33c83c..06f0f87da478 100644 --- a/hadoop-hdds/hadoop-dependency-server/pom.xml +++ b/hadoop-hdds/hadoop-dependency-server/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-hdds - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-hdds-hadoop-dependency-server - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop Distributed Data Store Hadoop server dependencies Apache Hadoop HDDS Hadoop Server dependencies diff --git a/hadoop-hdds/hadoop-dependency-test/pom.xml b/hadoop-hdds/hadoop-dependency-test/pom.xml index aae0f750a6d7..0dcbcc4fcaf0 100644 --- a/hadoop-hdds/hadoop-dependency-test/pom.xml +++ b/hadoop-hdds/hadoop-dependency-test/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-hdds - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-hdds-hadoop-dependency-test - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop Distributed Data Store Hadoop test dependencies Apache Hadoop HDDS Hadoop Test dependencies diff --git a/hadoop-hdds/interface-admin/pom.xml b/hadoop-hdds/interface-admin/pom.xml index 04965aa8cf9f..1666c6aa1f8b 100644 --- a/hadoop-hdds/interface-admin/pom.xml +++ b/hadoop-hdds/interface-admin/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-hdds - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-hdds-interface-admin - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop Distributed Data Store Admin interface Apache Hadoop HDDS Admin Interface diff --git a/hadoop-hdds/interface-client/pom.xml b/hadoop-hdds/interface-client/pom.xml index fab8544e881e..31a396081288 100644 --- a/hadoop-hdds/interface-client/pom.xml +++ b/hadoop-hdds/interface-client/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-hdds - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-hdds-interface-client - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop Distributed Data Store Client interface Apache Hadoop HDDS Client Interface diff --git a/hadoop-hdds/interface-server/pom.xml b/hadoop-hdds/interface-server/pom.xml index de6f98085bdb..82f19a40bd89 100644 --- a/hadoop-hdds/interface-server/pom.xml +++ b/hadoop-hdds/interface-server/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-hdds - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-hdds-interface-server - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop Distributed Data Store Server interface Apache Hadoop HDDS Server Interface diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml index e30d89ac07b4..26ca19a8cd4e 100644 --- a/hadoop-hdds/pom.xml +++ b/hadoop-hdds/pom.xml @@ -20,11 +20,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-main-ozone - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-hdds - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop Distributed Data Store Project Apache Hadoop HDDS pom diff --git a/hadoop-hdds/server-scm/pom.xml b/hadoop-hdds/server-scm/pom.xml index 5f2d6bdea8ac..c007ef1ec214 100644 --- a/hadoop-hdds/server-scm/pom.xml +++ b/hadoop-hdds/server-scm/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-hdds - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-hdds-server-scm - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop Distributed Data Store Storage Container Manager Server Apache Hadoop HDDS SCM Server jar diff --git a/hadoop-hdds/test-utils/pom.xml b/hadoop-hdds/test-utils/pom.xml index 831a22b1275c..ceed47ff4513 100644 --- a/hadoop-hdds/test-utils/pom.xml +++ b/hadoop-hdds/test-utils/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-hdds - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-hdds-test-utils - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop Distributed Data Store Test Utils Apache Hadoop HDDS Test Utils jar diff --git a/hadoop-hdds/tools/pom.xml b/hadoop-hdds/tools/pom.xml index fcc553fb4306..dfda5a668fdd 100644 --- a/hadoop-hdds/tools/pom.xml +++ b/hadoop-hdds/tools/pom.xml @@ -20,11 +20,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-hdds - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-hdds-tools - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop Distributed Data Store Tools Apache Hadoop HDDS Tools jar diff --git a/hadoop-ozone/client/pom.xml b/hadoop-ozone/client/pom.xml index 64b0aca7006c..c512a9005dd6 100644 --- a/hadoop-ozone/client/pom.xml +++ b/hadoop-ozone/client/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-ozone - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-ozone-client - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop Ozone Client Apache Hadoop Ozone Client jar diff --git a/hadoop-ozone/common/pom.xml b/hadoop-ozone/common/pom.xml index 754351b46315..78eb2e71fa4b 100644 --- a/hadoop-ozone/common/pom.xml +++ b/hadoop-ozone/common/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-ozone - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-ozone-common - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop Ozone Common Apache Hadoop Ozone Common jar diff --git a/hadoop-ozone/csi/pom.xml b/hadoop-ozone/csi/pom.xml index 9fd212626e78..f678280d5888 100644 --- a/hadoop-ozone/csi/pom.xml +++ b/hadoop-ozone/csi/pom.xml @@ -20,10 +20,10 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-ozone - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-ozone-csi - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop Ozone CSI service Apache Hadoop Ozone CSI service jar diff --git a/hadoop-ozone/datanode/pom.xml b/hadoop-ozone/datanode/pom.xml index 75eaa8ccadd9..1c8d81e52fcc 100644 --- a/hadoop-ozone/datanode/pom.xml +++ b/hadoop-ozone/datanode/pom.xml @@ -19,12 +19,12 @@ org.apache.hadoop hadoop-ozone - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-ozone-datanode Apache Hadoop Ozone Datanode jar - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT diff --git a/hadoop-ozone/dist/pom.xml b/hadoop-ozone/dist/pom.xml index 343bc9b0ccf9..a9d877170bff 100644 --- a/hadoop-ozone/dist/pom.xml +++ b/hadoop-ozone/dist/pom.xml @@ -19,12 +19,12 @@ org.apache.hadoop hadoop-ozone - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-ozone-dist Apache Hadoop Ozone Distribution jar - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT UTF-8 true diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml b/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml index 941a9cf354c4..5523150b8585 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml @@ -20,9 +20,9 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-ozone-fault-injection-test org.apache.hadoop - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop Ozone Mini Ozone Chaos Tests Apache Hadoop Ozone Mini Ozone Chaos Tests diff --git a/hadoop-ozone/fault-injection-test/network-tests/pom.xml b/hadoop-ozone/fault-injection-test/network-tests/pom.xml index 49c509853cdf..7fe3790b08fc 100644 --- a/hadoop-ozone/fault-injection-test/network-tests/pom.xml +++ b/hadoop-ozone/fault-injection-test/network-tests/pom.xml @@ -20,7 +20,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-ozone-fault-injection-test - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-ozone-network-tests Apache Hadoop Ozone Network Tests diff --git a/hadoop-ozone/fault-injection-test/pom.xml b/hadoop-ozone/fault-injection-test/pom.xml index 99b396784a1b..bf933a3b355f 100644 --- a/hadoop-ozone/fault-injection-test/pom.xml +++ b/hadoop-ozone/fault-injection-test/pom.xml @@ -20,10 +20,10 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-ozone - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-ozone-fault-injection-test - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop Ozone Fault Injection Tests Apache Hadoop Ozone Fault Injection Tests pom diff --git a/hadoop-ozone/insight/pom.xml b/hadoop-ozone/insight/pom.xml index f40175099922..090aecad341c 100644 --- a/hadoop-ozone/insight/pom.xml +++ b/hadoop-ozone/insight/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-ozone - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-ozone-insight - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop Ozone Insight Tool Apache Hadoop Ozone Insight Tool jar diff --git a/hadoop-ozone/integration-test/pom.xml b/hadoop-ozone/integration-test/pom.xml index 622a4077da0c..ebfe1c0057c8 100644 --- a/hadoop-ozone/integration-test/pom.xml +++ b/hadoop-ozone/integration-test/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-ozone - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-ozone-integration-test - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop Ozone Integration Tests Apache Hadoop Ozone Integration Tests jar diff --git a/hadoop-ozone/interface-client/pom.xml b/hadoop-ozone/interface-client/pom.xml index 9a5c5abc7d05..dc6de8c51331 100644 --- a/hadoop-ozone/interface-client/pom.xml +++ b/hadoop-ozone/interface-client/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-ozone - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-ozone-interface-client - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop Ozone Client interface Apache Hadoop Ozone Client Interface jar diff --git a/hadoop-ozone/ozone-manager/pom.xml b/hadoop-ozone/ozone-manager/pom.xml index 31ffb58eecd7..d01569ae0987 100644 --- a/hadoop-ozone/ozone-manager/pom.xml +++ b/hadoop-ozone/ozone-manager/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-ozone - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-ozone-ozone-manager - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop Ozone Manager Server Apache Hadoop Ozone Manager Server jar diff --git a/hadoop-ozone/ozonefs-common/pom.xml b/hadoop-ozone/ozonefs-common/pom.xml index d636b1f2302d..968262fa6cb4 100644 --- a/hadoop-ozone/ozonefs-common/pom.xml +++ b/hadoop-ozone/ozonefs-common/pom.xml @@ -19,12 +19,12 @@ org.apache.hadoop hadoop-ozone - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-ozone-filesystem-common Apache Hadoop Ozone FileSystem Common jar - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT UTF-8 true diff --git a/hadoop-ozone/ozonefs-hadoop2/pom.xml b/hadoop-ozone/ozonefs-hadoop2/pom.xml index 2de88531b8e0..60bda723681f 100644 --- a/hadoop-ozone/ozonefs-hadoop2/pom.xml +++ b/hadoop-ozone/ozonefs-hadoop2/pom.xml @@ -19,12 +19,12 @@ org.apache.hadoop hadoop-ozone - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-ozone-filesystem-hadoop2 Apache Hadoop Ozone FS Hadoop 2.x compatibility jar - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT org.apache.hadoop.ozone.shaded diff --git a/hadoop-ozone/ozonefs-hadoop3/pom.xml b/hadoop-ozone/ozonefs-hadoop3/pom.xml index 8bce6fd82017..bad9c41f5737 100644 --- a/hadoop-ozone/ozonefs-hadoop3/pom.xml +++ b/hadoop-ozone/ozonefs-hadoop3/pom.xml @@ -19,12 +19,12 @@ org.apache.hadoop hadoop-ozone - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-ozone-filesystem-hadoop3 Apache Hadoop Ozone FS Hadoop 3.x compatibility jar - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT org.apache.hadoop.ozone.shaded diff --git a/hadoop-ozone/ozonefs-shaded/pom.xml b/hadoop-ozone/ozonefs-shaded/pom.xml index da8ffc8ea824..2747229d0a23 100644 --- a/hadoop-ozone/ozonefs-shaded/pom.xml +++ b/hadoop-ozone/ozonefs-shaded/pom.xml @@ -19,12 +19,12 @@ org.apache.hadoop hadoop-ozone - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-ozone-filesystem-shaded Apache Hadoop Ozone FileSystem Shaded jar - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT org.apache.hadoop.ozone.shaded diff --git a/hadoop-ozone/ozonefs/pom.xml b/hadoop-ozone/ozonefs/pom.xml index b758b42e80ee..849b3972f7cf 100644 --- a/hadoop-ozone/ozonefs/pom.xml +++ b/hadoop-ozone/ozonefs/pom.xml @@ -19,12 +19,12 @@ org.apache.hadoop hadoop-ozone - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-ozone-filesystem Apache Hadoop Ozone FileSystem jar - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT UTF-8 true diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml index 73bce82e846c..6f4b2b2e82ea 100644 --- a/hadoop-ozone/pom.xml +++ b/hadoop-ozone/pom.xml @@ -16,10 +16,10 @@ org.apache.hadoop hadoop-main-ozone - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-ozone - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop Ozone Project Apache Hadoop Ozone pom diff --git a/hadoop-ozone/recon-codegen/pom.xml b/hadoop-ozone/recon-codegen/pom.xml index 9b8780bbff37..917d7a72f9bf 100644 --- a/hadoop-ozone/recon-codegen/pom.xml +++ b/hadoop-ozone/recon-codegen/pom.xml @@ -18,7 +18,7 @@ hadoop-ozone org.apache.hadoop - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT 4.0.0 hadoop-ozone-reconcodegen diff --git a/hadoop-ozone/recon/pom.xml b/hadoop-ozone/recon/pom.xml index 6d94cc5b2919..40d0d911ecbf 100644 --- a/hadoop-ozone/recon/pom.xml +++ b/hadoop-ozone/recon/pom.xml @@ -18,7 +18,7 @@ org.apache.hadoop hadoop-ozone - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop Ozone Recon 4.0.0 diff --git a/hadoop-ozone/s3gateway/pom.xml b/hadoop-ozone/s3gateway/pom.xml index 337fedcf2be3..4a62fc72dceb 100644 --- a/hadoop-ozone/s3gateway/pom.xml +++ b/hadoop-ozone/s3gateway/pom.xml @@ -19,12 +19,12 @@ org.apache.hadoop hadoop-ozone - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-ozone-s3gateway Apache Hadoop Ozone S3 Gateway jar - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT UTF-8 true diff --git a/hadoop-ozone/tools/pom.xml b/hadoop-ozone/tools/pom.xml index cc97e3bb4f36..0fbc7f1a477c 100644 --- a/hadoop-ozone/tools/pom.xml +++ b/hadoop-ozone/tools/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-ozone - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-ozone-tools - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop Ozone Tools Apache Hadoop Ozone Tools jar diff --git a/pom.xml b/pom.xml index b2984e9f2dbb..eca71e9e1333 100644 --- a/pom.xml +++ b/pom.xml @@ -18,7 +18,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 4.0.0 org.apache.hadoop hadoop-main-ozone - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop Ozone Main Apache Hadoop Ozone Main pom @@ -73,7 +73,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs ${ozone.version} - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Denali ${hdds.version} ${ozone.version} From d9d56f99b3f1d2eca436e36fe31ac4f302b35ec6 Mon Sep 17 00:00:00 2001 From: maobaolong Date: Fri, 28 Aug 2020 07:49:34 +0800 Subject: [PATCH 148/165] HDDS-4146. Show the ScmId and ClusterId in the scm web ui. (#1350) --- .../apache/hadoop/hdds/scm/server/SCMMXBean.java | 4 ++++ .../hdds/scm/server/StorageContainerManager.java | 8 ++++++++ .../main/resources/webapps/scm/scm-overview.html | 14 ++++++++++++++ 3 files changed, 26 insertions(+) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java index f0a497ad375d..f10a544f8943 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java @@ -66,4 +66,8 @@ public interface SCMMXBean extends ServiceRuntimeInfo { Map getContainerStateCount(); Map getRuleStatusMetrics(); + + String getScmId(); + + String getClusterId(); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java index 352e34abea96..3cf12e75d235 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java @@ -1141,4 +1141,12 @@ public Map getRuleStatusMetrics() { public PipelineChoosePolicy getPipelineChoosePolicy() { return this.pipelineChoosePolicy; } + + public String getScmId() { + return getScmStorageConfig().getScmId(); + } + + public String getClusterId() { + return getScmStorageConfig().getClusterID(); + } } diff --git a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html index efed59d53e8b..4e900bbe3785 100644 --- a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html +++ b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html @@ -14,6 +14,20 @@ See the License for the specific language governing permissions and limitations under the License. --> +

SCM Information

+

{{typestat.key}} {{typestat.value}}
+ + + + + + + + + + +
Scm Id:{{$ctrl.overview.jmx.ScmId}}
Cluster Id:{{$ctrl.overview.jmx.ClusterId}}
+

Node counts

From fe00ba93c4c6a2eabfc12945203c559ba50f4318 Mon Sep 17 00:00:00 2001 From: maobaolong Date: Fri, 28 Aug 2020 14:39:44 +0800 Subject: [PATCH 149/165] HDDS-4137. Turn on the verbose mode of safe mode check on testlib (#1343) --- hadoop-ozone/dist/src/main/compose/testlib.sh | 4 ++-- .../dist/src/main/compose/upgrade/versions/ozone-0.5.0.sh | 1 + .../dist/src/main/compose/upgrade/versions/ozone-1.0.0.sh | 1 + 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/dist/src/main/compose/testlib.sh b/hadoop-ozone/dist/src/main/compose/testlib.sh index b000c913c1f7..228572fe2c73 100755 --- a/hadoop-ozone/dist/src/main/compose/testlib.sh +++ b/hadoop-ozone/dist/src/main/compose/testlib.sh @@ -63,7 +63,7 @@ find_tests(){ ## @description wait until safemode exit (or 180 seconds) wait_for_safemode_exit(){ # version-dependent - : ${OZONE_ADMIN_COMMAND:=admin} + : ${OZONE_SAFEMODE_STATUS_COMMAND:=ozone admin safemode status --verbose} #Reset the timer SECONDS=0 @@ -72,7 +72,7 @@ wait_for_safemode_exit(){ while [[ $SECONDS -lt 180 ]]; do #This line checks the safemode status in scm - local command="ozone ${OZONE_ADMIN_COMMAND} safemode status" + local command="${OZONE_SAFEMODE_STATUS_COMMAND}" if [[ "${SECURITY_ENABLED}" == 'true' ]]; then status=$(docker-compose exec -T scm bash -c "kinit -k HTTP/scm@EXAMPLE.COM -t /etc/security/keytabs/HTTP.keytab && $command" || true) else diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/versions/ozone-0.5.0.sh b/hadoop-ozone/dist/src/main/compose/upgrade/versions/ozone-0.5.0.sh index 68f6c75764df..667ce959026a 100644 --- a/hadoop-ozone/dist/src/main/compose/upgrade/versions/ozone-0.5.0.sh +++ b/hadoop-ozone/dist/src/main/compose/upgrade/versions/ozone-0.5.0.sh @@ -16,3 +16,4 @@ # limitations under the License. export OZONE_ADMIN_COMMAND=scmcli +export OZONE_SAFEMODE_STATUS_COMMAND='ozone scmcli safemode status' diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/versions/ozone-1.0.0.sh b/hadoop-ozone/dist/src/main/compose/upgrade/versions/ozone-1.0.0.sh index bab1dba91baf..3ff23e0441bc 100644 --- a/hadoop-ozone/dist/src/main/compose/upgrade/versions/ozone-1.0.0.sh +++ b/hadoop-ozone/dist/src/main/compose/upgrade/versions/ozone-1.0.0.sh @@ -16,3 +16,4 @@ # limitations under the License. export OZONE_ADMIN_COMMAND=admin +export OZONE_SAFEMODE_STATUS_COMMAND='ozone admin safemode status --verbose' From cc6be920a81fc97b9da91370de8805ef2eb46255 Mon Sep 17 00:00:00 2001 From: Siyao Meng <50227127+smengcl@users.noreply.github.com> Date: Fri, 28 Aug 2020 09:15:15 -0700 Subject: [PATCH 150/165] HDDS-4147. Add OFS to FileSystem META-INF (#1352) --- .../dist/src/main/compose/ozone-om-ha-s3/docker-config | 1 - .../dist/src/main/compose/ozone-om-ha/docker-config | 1 - hadoop-ozone/dist/src/main/compose/ozone/docker-config | 2 -- .../src/main/compose/ozonesecure-om-ha/docker-config | 1 - .../dist/src/main/compose/ozonesecure/docker-config | 2 -- .../META-INF/services/org.apache.hadoop.fs.FileSystem | 1 + .../hadoop/fs/ozone/TestRootedOzoneFileSystem.java | 9 ++------- .../fs/ozone/contract/rooted/RootedOzoneContract.java | 7 +------ .../org/apache/hadoop/ozone/shell/TestOzoneShellHA.java | 3 +-- .../META-INF/services/org.apache.hadoop.fs.FileSystem | 1 + .../META-INF/services/org.apache.hadoop.fs.FileSystem | 1 + .../META-INF/services/org.apache.hadoop.fs.FileSystem | 1 + .../META-INF/services/org.apache.hadoop.fs.FileSystem | 1 + .../META-INF/services/org.apache.hadoop.fs.FileSystem | 1 + 14 files changed, 10 insertions(+), 22 deletions(-) diff --git a/hadoop-ozone/dist/src/main/compose/ozone-om-ha-s3/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-om-ha-s3/docker-config index b835147ee261..4d47bf0b6f19 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-om-ha-s3/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-om-ha-s3/docker-config @@ -14,7 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -CORE-SITE.XML_fs.o3fs.impl=org.apache.hadoop.fs.ozone.OzoneFileSystem CORE-SITE.XML_fs.defaultFS=o3fs://bucket.volume.id1 OZONE-SITE.XML_ozone.om.service.ids=id1 OZONE-SITE.XML_ozone.om.nodes.id1=om1,om2,om3 diff --git a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config index 4eb1c8a5b6b7..008c684aeb99 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config @@ -14,7 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -CORE-SITE.XML_fs.o3fs.impl=org.apache.hadoop.fs.ozone.OzoneFileSystem CORE-SITE.XML_fs.defaultFS=o3fs://bucket1.volume1.omservice OZONE-SITE.XML_ozone.om.service.ids=omservice OZONE-SITE.XML_ozone.om.nodes.omservice=om1,om2,om3 diff --git a/hadoop-ozone/dist/src/main/compose/ozone/docker-config b/hadoop-ozone/dist/src/main/compose/ozone/docker-config index 4d53acbad793..f100e823517a 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone/docker-config @@ -14,8 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -CORE-SITE.XML_fs.ofs.impl=org.apache.hadoop.fs.ozone.RootedOzoneFileSystem -CORE-SITE.XML_fs.o3fs.impl=org.apache.hadoop.fs.ozone.OzoneFileSystem OZONE-SITE.XML_ozone.om.address=om OZONE-SITE.XML_ozone.om.http-address=om:9874 OZONE-SITE.XML_ozone.scm.container.size=1GB diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-om-ha/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure-om-ha/docker-config index e245b7e01998..c332448a9508 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-om-ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-om-ha/docker-config @@ -14,7 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -CORE-SITE.XML_fs.o3fs.impl=org.apache.hadoop.fs.ozone.OzoneFileSystem CORE-SITE.XML_fs.defaultFS=o3fs://bucket.volume.id1 OZONE-SITE.XML_ozone.om.service.ids=id1 OZONE-SITE.XML_ozone.om.internal.service.id=id1 diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config index abb30453e9c7..30e1816ab8c1 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config @@ -14,8 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -CORE-SITE.XML_fs.ofs.impl=org.apache.hadoop.fs.ozone.RootedOzoneFileSystem -CORE-SITE.XML_fs.o3fs.impl=org.apache.hadoop.fs.ozone.OzoneFileSystem OZONE-SITE.XML_ozone.om.volume.listall.allowed=false OZONE-SITE.XML_ozone.om.address=om diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/resources/META-INF/services/org.apache.hadoop.fs.FileSystem b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/resources/META-INF/services/org.apache.hadoop.fs.FileSystem index 03680027d539..e444f66e7ce1 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/resources/META-INF/services/org.apache.hadoop.fs.FileSystem +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/resources/META-INF/services/org.apache.hadoop.fs.FileSystem @@ -14,3 +14,4 @@ # limitations under the License. org.apache.hadoop.fs.ozone.OzoneFileSystem +org.apache.hadoop.fs.ozone.RootedOzoneFileSystem diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java index cc316d0e3fc5..b812b4530d33 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java @@ -143,9 +143,7 @@ public static void init() throws Exception { conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); // Set the number of keys to be processed during batch operate. conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5); - // Note: FileSystem#loadFileSystems won't load OFS class due to META-INF - // hence this workaround. - conf.set("fs.ofs.impl", "org.apache.hadoop.fs.ozone.RootedOzoneFileSystem"); + // fs.ofs.impl would be loaded from META-INF, no need to manually set it fs = FileSystem.get(conf); trash = new Trash(conf); ofs = (RootedOzoneFileSystem) fs; @@ -163,10 +161,7 @@ public static void teardown() { @Test public void testOzoneFsServiceLoader() throws IOException { OzoneConfiguration confTestLoader = new OzoneConfiguration(); - // Note: FileSystem#loadFileSystems won't load OFS class due to META-INF - // hence this workaround. - confTestLoader.set("fs.ofs.impl", - "org.apache.hadoop.fs.ozone.RootedOzoneFileSystem"); + // fs.ofs.impl should be loaded from META-INF, no need to explicitly set it Assert.assertEquals(FileSystem.getFileSystemClass( OzoneConsts.OZONE_OFS_URI_SCHEME, confTestLoader), RootedOzoneFileSystem.class); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/RootedOzoneContract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/RootedOzoneContract.java index c9f063b37874..9eb43a0f7937 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/RootedOzoneContract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/RootedOzoneContract.java @@ -99,12 +99,7 @@ public FileSystem getTestFileSystem() throws IOException { OzoneConsts.OZONE_OFS_URI_SCHEME, cluster.getOzoneManager().getRpcPort()); getConf().set("fs.defaultFS", uri); - - // Note: FileSystem#loadFileSystems doesn't load OFS class because - // META-INF points to org.apache.hadoop.fs.ozone.OzoneFileSystem - getConf().set("fs.ofs.impl", - "org.apache.hadoop.fs.ozone.RootedOzoneFileSystem"); - + // fs.ofs.impl should be loaded from META-INF, no need to explicitly set it copyClusterConfigs(OMConfigKeys.OZONE_OM_ADDRESS_KEY); copyClusterConfigs(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY); return FileSystem.get(getConf()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java index 45258db96287..513049d3a441 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java @@ -456,8 +456,7 @@ private OzoneConfiguration getClientConfForOFS( String hostPrefix, OzoneConfiguration configuration) { OzoneConfiguration clientConf = new OzoneConfiguration(configuration); - clientConf.set("fs.ofs.impl", - "org.apache.hadoop.fs.ozone.RootedOzoneFileSystem"); + // fs.ofs.impl should be loaded from META-INF, no need to explicitly set it clientConf.set(FS_DEFAULT_NAME_KEY, hostPrefix); clientConf.setInt(FS_TRASH_INTERVAL_KEY, 60); return clientConf; diff --git a/hadoop-ozone/ozonefs-common/src/test/resources/META-INF/services/org.apache.hadoop.fs.FileSystem b/hadoop-ozone/ozonefs-common/src/test/resources/META-INF/services/org.apache.hadoop.fs.FileSystem index 03680027d539..e444f66e7ce1 100644 --- a/hadoop-ozone/ozonefs-common/src/test/resources/META-INF/services/org.apache.hadoop.fs.FileSystem +++ b/hadoop-ozone/ozonefs-common/src/test/resources/META-INF/services/org.apache.hadoop.fs.FileSystem @@ -14,3 +14,4 @@ # limitations under the License. org.apache.hadoop.fs.ozone.OzoneFileSystem +org.apache.hadoop.fs.ozone.RootedOzoneFileSystem diff --git a/hadoop-ozone/ozonefs-hadoop2/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem b/hadoop-ozone/ozonefs-hadoop2/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem index 03680027d539..e444f66e7ce1 100644 --- a/hadoop-ozone/ozonefs-hadoop2/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem +++ b/hadoop-ozone/ozonefs-hadoop2/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem @@ -14,3 +14,4 @@ # limitations under the License. org.apache.hadoop.fs.ozone.OzoneFileSystem +org.apache.hadoop.fs.ozone.RootedOzoneFileSystem diff --git a/hadoop-ozone/ozonefs-hadoop3/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem b/hadoop-ozone/ozonefs-hadoop3/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem index 03680027d539..e444f66e7ce1 100644 --- a/hadoop-ozone/ozonefs-hadoop3/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem +++ b/hadoop-ozone/ozonefs-hadoop3/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem @@ -14,3 +14,4 @@ # limitations under the License. org.apache.hadoop.fs.ozone.OzoneFileSystem +org.apache.hadoop.fs.ozone.RootedOzoneFileSystem diff --git a/hadoop-ozone/ozonefs/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem b/hadoop-ozone/ozonefs/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem index 03680027d539..e444f66e7ce1 100644 --- a/hadoop-ozone/ozonefs/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem +++ b/hadoop-ozone/ozonefs/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem @@ -14,3 +14,4 @@ # limitations under the License. org.apache.hadoop.fs.ozone.OzoneFileSystem +org.apache.hadoop.fs.ozone.RootedOzoneFileSystem diff --git a/hadoop-ozone/tools/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem b/hadoop-ozone/tools/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem index 03680027d539..e444f66e7ce1 100644 --- a/hadoop-ozone/tools/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem +++ b/hadoop-ozone/tools/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem @@ -14,3 +14,4 @@ # limitations under the License. org.apache.hadoop.fs.ozone.OzoneFileSystem +org.apache.hadoop.fs.ozone.RootedOzoneFileSystem From c72680d781c738e54330d22ad80b9a4367f74ebb Mon Sep 17 00:00:00 2001 From: maobaolong Date: Sat, 29 Aug 2020 03:31:09 +0800 Subject: [PATCH 151/165] HDDS-4151. Skip the inputstream while offset larger than zero in s3g (#1354) --- .../hadoop/ozone/client/io/OzoneInputStream.java | 5 +++++ .../hadoop/ozone/s3/endpoint/ObjectEndpoint.java | 14 +++++++++++--- 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneInputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneInputStream.java index a69740f07952..14b28665bdb1 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneInputStream.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneInputStream.java @@ -57,6 +57,11 @@ public int available() throws IOException { return inputStream.available(); } + @Override + public long skip(long n) throws IOException { + return inputStream.skip(n); + } + public InputStream getInputStream() { return inputStream; } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index 5502173dc790..f6655602d7d0 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -38,6 +38,7 @@ import javax.ws.rs.core.Response.ResponseBuilder; import javax.ws.rs.core.Response.Status; import javax.ws.rs.core.StreamingOutput; +import java.io.EOFException; import java.io.IOException; import java.io.InputStream; import java.time.Instant; @@ -277,7 +278,8 @@ public Response get( try (S3WrapperInputStream s3WrapperInputStream = new S3WrapperInputStream( key.getInputStream())) { - IOUtils.copyLarge(s3WrapperInputStream, dest, startOffset, + s3WrapperInputStream.seek(startOffset); + IOUtils.copyLarge(s3WrapperInputStream, dest, 0, copyLength, new byte[bufferSize]); } }; @@ -557,8 +559,14 @@ private Response createMultipartKey(String bucket, String key, long length, if (range != null) { RangeHeader rangeHeader = RangeHeaderParserUtil.parseRangeHeader(range, 0); - IOUtils.copyLarge(sourceObject, ozoneOutputStream, - rangeHeader.getStartOffset(), + final long skipped = + sourceObject.skip(rangeHeader.getStartOffset()); + if (skipped != rangeHeader.getStartOffset()) { + throw new EOFException( + "Bytes to skip: " + + rangeHeader.getStartOffset() + " actual: " + skipped); + } + IOUtils.copyLarge(sourceObject, ozoneOutputStream, 0, rangeHeader.getEndOffset() - rangeHeader.getStartOffset()); } else { IOUtils.copy(sourceObject, ozoneOutputStream); From 4a63c53a82224f8c5a529b7924689047a6c9bb15 Mon Sep 17 00:00:00 2001 From: micah zhao Date: Sat, 29 Aug 2020 04:54:24 +0800 Subject: [PATCH 152/165] HDDS-3903. OzoneRpcClient support batch rename keys. (#1150) --- .../org/apache/hadoop/ozone/OzoneConsts.java | 2 + .../hadoop/ozone/client/OzoneBucket.java | 10 + .../ozone/client/protocol/ClientProtocol.java | 12 +- .../hadoop/ozone/client/rpc/RpcClient.java | 13 + .../java/org/apache/hadoop/ozone/OmUtils.java | 1 + .../apache/hadoop/ozone/audit/OMAction.java | 1 + .../ozone/om/exceptions/OMException.java | 5 +- .../hadoop/ozone/om/helpers/OmRenameKeys.java | 59 ++++ .../om/protocol/OzoneManagerProtocol.java | 9 + ...ManagerProtocolClientSideTranslatorPB.java | 32 +++ .../rpc/TestOzoneRpcClientAbstract.java | 106 ++++++- .../src/main/proto/OmClientProtocol.proto | 26 ++ .../apache/hadoop/ozone/om/OzoneManager.java | 12 +- .../ratis/utils/OzoneManagerRatisUtils.java | 3 + .../om/request/key/OMKeysRenameRequest.java | 271 ++++++++++++++++++ .../om/response/key/OMKeysRenameResponse.java | 80 ++++++ .../request/key/TestOMKeysRenameRequest.java | 160 +++++++++++ .../key/TestOMKeysRenameResponse.java | 131 +++++++++ 18 files changed, 919 insertions(+), 14 deletions(-) create mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmRenameKeys.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysRenameRequest.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysRenameResponse.java create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysRenameRequest.java create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysRenameResponse.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index 4b380948abd9..9854d40494be 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -293,6 +293,8 @@ private OzoneConsts() { public static final String MAX_PARTS = "maxParts"; public static final String S3_BUCKET = "s3Bucket"; public static final String S3_GETSECRET_USER = "S3GetSecretUser"; + public static final String RENAMED_KEYS_MAP = "renamedKeysMap"; + public static final String UNRENAMED_KEYS_MAP = "unRenamedKeysMap"; public static final String MULTIPART_UPLOAD_PART_NUMBER = "partNumber"; public static final String MULTIPART_UPLOAD_PART_NAME = "partName"; public static final String BUCKET_ENCRYPTION_KEY = "bucketEncryptionKey"; diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java index 79712bbfddb2..d71e03c9b881 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java @@ -472,6 +472,16 @@ public void renameKey(String fromKeyName, String toKeyName) proxy.renameKey(volumeName, name, fromKeyName, toKeyName); } + /** + * Rename the key by keyMap, The key is fromKeyName and value is toKeyName. + * @param keyMap The key is original key name nad value is new key name. + * @throws IOException + */ + public void renameKeys(Map keyMap) + throws IOException { + proxy.renameKeys(volumeName, name, keyMap); + } + /** * Initiate multipart upload for a specified key. * @param keyName diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java index 9c662efbf000..1b8d93ac7258 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java @@ -314,7 +314,17 @@ void deleteKeys(String volumeName, String bucketName, * @throws IOException */ void renameKey(String volumeName, String bucketName, String fromKeyName, - String toKeyName) throws IOException; + String toKeyName) throws IOException; + + /** + * Renames existing keys within a bucket. + * @param volumeName Name of the Volume + * @param bucketName Name of the Bucket + * @param keyMap The key is original key name nad value is new key name. + * @throws IOException + */ + void renameKeys(String volumeName, String bucketName, + Map keyMap) throws IOException; /** * Returns list of Keys in {Volume/Bucket} that matches the keyPrefix, diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 46df61a4d5e8..d72d930e54f3 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -85,6 +85,7 @@ import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList; import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts; import org.apache.hadoop.ozone.om.helpers.OmPartInfo; +import org.apache.hadoop.ozone.om.helpers.OmRenameKeys; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; @@ -760,6 +761,18 @@ public void renameKey(String volumeName, String bucketName, ozoneManagerClient.renameKey(keyArgs, toKeyName); } + @Override + public void renameKeys(String volumeName, String bucketName, + Map keyMap) throws IOException { + verifyVolumeName(volumeName); + verifyBucketName(bucketName); + HddsClientUtils.checkNotNull(keyMap); + OmRenameKeys omRenameKeys = + new OmRenameKeys(volumeName, bucketName, keyMap, null); + ozoneManagerClient.renameKeys(omRenameKeys); + } + + @Override public List listKeys(String volumeName, String bucketName, String keyPrefix, String prevKey, diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java index bb9aec4748f2..93e0e7f7dec0 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java @@ -246,6 +246,7 @@ public static boolean isReadOnly( case DeleteBucket: case CreateKey: case RenameKey: + case RenameKeys: case DeleteKey: case DeleteKeys: case CommitKey: diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java index 6b34e8180026..3480063d1323 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java @@ -32,6 +32,7 @@ public enum OMAction implements AuditAction { DELETE_BUCKET, DELETE_KEY, RENAME_KEY, + RENAME_KEYS, SET_OWNER, SET_QUOTA, UPDATE_VOLUME, diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java index 54b5458af08b..e08dccb6a5a1 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java @@ -227,6 +227,9 @@ public enum ResultCodes { DETECTED_LOOP_IN_BUCKET_LINKS, - NOT_SUPPORTED_OPERATION + NOT_SUPPORTED_OPERATION, + + PARTIAL_RENAME + } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmRenameKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmRenameKeys.java new file mode 100644 index 000000000000..d550817b6a25 --- /dev/null +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmRenameKeys.java @@ -0,0 +1,59 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.helpers; + +import java.util.HashMap; +import java.util.Map; + +/** + * This class is used for rename keys. + */ +public class OmRenameKeys { + + private String volume; + private String bucket; + private Map fromAndToKey = new HashMap<>(); + private Map fromKeyAndToKeyInfo = new HashMap<>(); + + public OmRenameKeys(String volume, String bucket, + Map fromAndToKey, + Map fromKeyAndToKeyInfo) { + this.volume = volume; + this.bucket = bucket; + this.fromAndToKey = fromAndToKey; + this.fromKeyAndToKeyInfo = fromKeyAndToKeyInfo; + } + + public String getVolume() { + return volume; + } + + public String getBucket() { + return bucket; + } + + public Map getFromAndToKey() { + return fromAndToKey; + } + + public Map getFromKeyAndToKeyInfo() { + return fromKeyAndToKeyInfo; + } + +} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java index 9ae107b071ed..267ac89be03f 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java @@ -39,6 +39,7 @@ import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteList; import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList; import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts; +import org.apache.hadoop.ozone.om.helpers.OmRenameKeys; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; @@ -217,6 +218,14 @@ OmKeyLocationInfo allocateBlock(OmKeyArgs args, long clientID, */ void renameKey(OmKeyArgs args, String toKeyName) throws IOException; + /** + * Rename existing keys within a bucket. + * @param omRenameKeys Includes volume, bucket, and fromKey toKey name map + * and fromKey name toKey info Map. + * @throws IOException + */ + void renameKeys(OmRenameKeys omRenameKeys) throws IOException; + /** * Deletes an existing key. * diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java index 6afb28887dd7..506d84cbca19 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java @@ -21,6 +21,7 @@ import java.time.Instant; import java.util.ArrayList; import java.util.List; +import java.util.Map; import java.util.stream.Collectors; import org.apache.hadoop.hdds.annotation.InterfaceAudience; @@ -44,6 +45,7 @@ import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteList; import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList; import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts; +import org.apache.hadoop.ozone.om.helpers.OmRenameKeys; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; @@ -121,7 +123,10 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RecoverTrashResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RemoveAclRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RemoveAclResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeysArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeysMap; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeyRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeysRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenewDelegationTokenResponseProto; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServiceListRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServiceListResponse; @@ -675,6 +680,33 @@ public OmKeyInfo lookupKey(OmKeyArgs args) throws IOException { return OmKeyInfo.getFromProtobuf(resp.getKeyInfo()); } + @Override + public void renameKeys(OmRenameKeys omRenameKeys) throws IOException { + + List renameKeyList = new ArrayList<>(); + for (Map.Entry< String, String> entry : + omRenameKeys.getFromAndToKey().entrySet()) { + RenameKeysMap.Builder renameKey = RenameKeysMap.newBuilder() + .setFromKeyName(entry.getKey()) + .setToKeyName(entry.getValue()); + renameKeyList.add(renameKey.build()); + } + + RenameKeysArgs.Builder renameKeyArgs = RenameKeysArgs.newBuilder() + .setVolumeName(omRenameKeys.getVolume()) + .setBucketName(omRenameKeys.getBucket()) + .addAllRenameKeysMap(renameKeyList); + + RenameKeysRequest.Builder reqKeys = RenameKeysRequest.newBuilder() + .setRenameKeysArgs(renameKeyArgs.build()); + + OMRequest omRequest = createOMRequest(Type.RenameKeys) + .setRenameKeysRequest(reqKeys.build()) + .build(); + + handleError(submitRequest(omRequest)); + } + @Override public void renameKey(OmKeyArgs args, String toKeyName) throws IOException { RenameKeyRequest.Builder req = RenameKeyRequest.newBuilder(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java index 32b6bca6a5dd..45d07b097a38 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java @@ -110,6 +110,8 @@ import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.DEFAULT; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.PARTIAL_RENAME; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.GROUP; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.USER; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.READ; @@ -121,6 +123,7 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; + import org.junit.Test; /** @@ -1202,7 +1205,7 @@ public void testDeleteKey() Assert.assertEquals(keyName, key.getName()); bucket.deleteKey(keyName); - OzoneTestUtils.expectOmException(ResultCodes.KEY_NOT_FOUND, + OzoneTestUtils.expectOmException(KEY_NOT_FOUND, () -> bucket.getKey(keyName)); } @@ -1217,13 +1220,7 @@ public void testRenameKey() OzoneVolume volume = store.getVolume(volumeName); volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); - OzoneOutputStream out = bucket.createKey(fromKeyName, - value.getBytes().length, STAND_ALONE, - ONE, new HashMap<>()); - out.write(value.getBytes()); - out.close(); - OzoneKey key = bucket.getKey(fromKeyName); - Assert.assertEquals(fromKeyName, key.getName()); + createTestKey(bucket, fromKeyName, value); // Rename to empty string should fail. OMException oe = null; @@ -1244,12 +1241,79 @@ public void testRenameKey() } catch (OMException e) { oe = e; } - Assert.assertEquals(ResultCodes.KEY_NOT_FOUND, oe.getResult()); + Assert.assertEquals(KEY_NOT_FOUND, oe.getResult()); - key = bucket.getKey(toKeyName); + OzoneKey key = bucket.getKey(toKeyName); Assert.assertEquals(toKeyName, key.getName()); } + @Test + public void testKeysRename() throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + String keyName1 = "dir/file1"; + String keyName2 = "dir/file2"; + + String newKeyName1 = "dir/key1"; + String newKeyName2 = "dir/key2"; + + String value = "sample value"; + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + volume.createBucket(bucketName); + OzoneBucket bucket = volume.getBucket(bucketName); + createTestKey(bucket, keyName1, value); + createTestKey(bucket, keyName2, value); + + Map keyMap = new HashMap(); + keyMap.put(keyName1, newKeyName1); + keyMap.put(keyName2, newKeyName2); + bucket.renameKeys(keyMap); + + // new key should exist + Assert.assertEquals(newKeyName1, bucket.getKey(newKeyName1).getName()); + Assert.assertEquals(newKeyName2, bucket.getKey(newKeyName2).getName()); + + // old key should not exist + assertKeyRenamedEx(bucket, keyName1); + assertKeyRenamedEx(bucket, keyName2); + } + + @Test + public void testKeysRenameFail() throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + String keyName1 = "dir/file1"; + String keyName2 = "dir/file2"; + + String newKeyName1 = "dir/key1"; + String newKeyName2 = "dir/key2"; + + String value = "sample value"; + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + volume.createBucket(bucketName); + OzoneBucket bucket = volume.getBucket(bucketName); + + // Create only keyName1 to test the partial failure of renameKeys. + createTestKey(bucket, keyName1, value); + + Map keyMap = new HashMap(); + keyMap.put(keyName1, newKeyName1); + keyMap.put(keyName2, newKeyName2); + + try { + bucket.renameKeys(keyMap); + } catch (OMException ex) { + Assert.assertEquals(PARTIAL_RENAME, ex.getResult()); + } + + // newKeyName1 should exist + Assert.assertEquals(newKeyName1, bucket.getKey(newKeyName1).getName()); + // newKeyName2 should not exist + assertKeyRenamedEx(bucket, keyName2); + } + @Test public void testListVolume() throws IOException { String volBase = "vol-" + RandomStringUtils.randomNumeric(3); @@ -2685,6 +2749,28 @@ private void completeMultipartUpload(OzoneBucket bucket, String keyName, Assert.assertNotNull(omMultipartUploadCompleteInfo.getHash()); } + private void createTestKey(OzoneBucket bucket, String keyName, + String keyValue) throws IOException { + OzoneOutputStream out = bucket.createKey(keyName, + keyValue.getBytes().length, STAND_ALONE, + ONE, new HashMap<>()); + out.write(keyValue.getBytes()); + out.close(); + OzoneKey key = bucket.getKey(keyName); + Assert.assertEquals(keyName, key.getName()); + } + + private void assertKeyRenamedEx(OzoneBucket bucket, String keyName) + throws Exception { + OMException oe = null; + try { + bucket.getKey(keyName); + } catch (OMException e) { + oe = e; + } + Assert.assertEquals(KEY_NOT_FOUND, oe.getResult()); + } + /** * Tests GDPR encryption/decryption. * 1. Create GDPR Enabled bucket. diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index 1b2075e17d9b..c6e2949122c1 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -60,6 +60,7 @@ enum Type { CommitKey = 36; AllocateBlock = 37; DeleteKeys = 38; + RenameKeys = 39; InitiateMultiPartUpload = 45; CommitMultiPartUpload = 46; @@ -126,6 +127,7 @@ message OMRequest { optional CommitKeyRequest commitKeyRequest = 36; optional AllocateBlockRequest allocateBlockRequest = 37; optional DeleteKeysRequest deleteKeysRequest = 38; + optional RenameKeysRequest renameKeysRequest = 39; optional MultipartInfoInitiateRequest initiateMultiPartUploadRequest = 45; optional MultipartCommitUploadPartRequest commitMultiPartUploadRequest = 46; @@ -198,6 +200,7 @@ message OMResponse { optional CommitKeyResponse commitKeyResponse = 36; optional AllocateBlockResponse allocateBlockResponse = 37; optional DeleteKeysResponse deleteKeysResponse = 38; + optional RenameKeysResponse renameKeysResponse = 39; optional MultipartInfoInitiateResponse initiateMultiPartUploadResponse = 45; optional MultipartCommitUploadPartResponse commitMultiPartUploadResponse = 46; @@ -308,6 +311,9 @@ enum Status { DETECTED_LOOP_IN_BUCKET_LINKS = 63; NOT_SUPPORTED_OPERATION = 64; + + PARTIAL_RENAME = 65; + } /** @@ -839,6 +845,26 @@ message LookupKeyResponse { optional uint64 openVersion = 4; } +message RenameKeysRequest { + required RenameKeysArgs renameKeysArgs = 1; +} + +message RenameKeysArgs { + required string volumeName = 1; + required string bucketName = 2; + repeated RenameKeysMap renameKeysMap = 3; +} + +message RenameKeysMap { + required string fromKeyName = 1; + required string toKeyName = 2; +} + +message RenameKeysResponse{ + repeated RenameKeysMap unRenamedKeys = 1; + optional bool status = 2; +} + message RenameKeyRequest{ required KeyArgs keyArgs = 1; required string toKeyName = 2; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index 9c22f2df1eb9..e43524aee2a7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -48,7 +48,6 @@ import java.util.concurrent.TimeUnit; import com.google.common.base.Optional; -import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; @@ -125,6 +124,7 @@ import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteList; import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList; import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts; +import org.apache.hadoop.ozone.om.helpers.OmRenameKeys; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; @@ -182,7 +182,7 @@ import com.google.protobuf.BlockingService; import com.google.protobuf.ProtocolMessageEnum; import org.apache.commons.lang3.StringUtils; - +import org.apache.commons.lang3.tuple.Pair; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED_DEFAULT; @@ -2244,6 +2244,14 @@ public OmKeyInfo lookupKey(OmKeyArgs args) throws IOException { } } + + @Override + public void renameKeys(OmRenameKeys omRenameKeys) + throws IOException { + throw new UnsupportedOperationException("OzoneManager does not require " + + "this to be implemented. As write requests use a new approach"); + } + @Override public void renameKey(OmKeyArgs args, String toKeyName) throws IOException { Preconditions.checkNotNull(args); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java index ddb6841ae31e..681c0da87e6d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java @@ -41,6 +41,7 @@ import org.apache.hadoop.ozone.om.request.key.OMKeyDeleteRequest; import org.apache.hadoop.ozone.om.request.key.OMKeyPurgeRequest; import org.apache.hadoop.ozone.om.request.key.OMKeyRenameRequest; +import org.apache.hadoop.ozone.om.request.key.OMKeysRenameRequest; import org.apache.hadoop.ozone.om.request.key.OMTrashRecoverRequest; import org.apache.hadoop.ozone.om.request.key.acl.OMKeyAddAclRequest; import org.apache.hadoop.ozone.om.request.key.acl.OMKeyRemoveAclRequest; @@ -129,6 +130,8 @@ public static OMClientRequest createClientRequest(OMRequest omRequest) { return new OMKeysDeleteRequest(omRequest); case RenameKey: return new OMKeyRenameRequest(omRequest); + case RenameKeys: + return new OMKeysRenameRequest(omRequest); case CreateDirectory: return new OMDirectoryCreateRequest(omRequest); case CreateFile: diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysRenameRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysRenameRequest.java new file mode 100644 index 000000000000..dbcde6d4ce14 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysRenameRequest.java @@ -0,0 +1,271 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request.key; + +import com.google.common.base.Optional; +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.audit.AuditLogger; +import org.apache.hadoop.ozone.audit.OMAction; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.ResolvedBucket; +import org.apache.hadoop.ozone.om.helpers.OmRenameKeys; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.key.OMKeysRenameResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeysArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeysMap; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeysRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeysResponse; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; +import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.apache.hadoop.util.Time; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK; +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.PARTIAL_RENAME; +import static org.apache.hadoop.ozone.OzoneConsts.RENAMED_KEYS_MAP; +import static org.apache.hadoop.ozone.OzoneConsts.UNRENAMED_KEYS_MAP; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; + +/** + * Handles rename keys request. + */ +public class OMKeysRenameRequest extends OMKeyRequest { + + private static final Logger LOG = + LoggerFactory.getLogger(OMKeysRenameRequest.class); + + public OMKeysRenameRequest(OMRequest omRequest) { + super(omRequest); + } + + @Override + @SuppressWarnings("methodlength") + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, + long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) { + + RenameKeysRequest renameKeysRequest = getOmRequest().getRenameKeysRequest(); + RenameKeysArgs renameKeysArgs = renameKeysRequest.getRenameKeysArgs(); + String volumeName = renameKeysArgs.getVolumeName(); + String bucketName = renameKeysArgs.getBucketName(); + OMClientResponse omClientResponse = null; + + List unRenamedKeys = new ArrayList<>(); + + // fromKeyName -> toKeyName + Map renamedKeys = new HashMap<>(); + + Map fromKeyAndToKeyInfo = new HashMap<>(); + OMMetrics omMetrics = ozoneManager.getMetrics(); + omMetrics.incNumKeyRenames(); + + AuditLogger auditLogger = ozoneManager.getAuditLogger(); + + OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( + getOmRequest()); + + OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + IOException exception = null; + OmKeyInfo fromKeyValue = null; + Result result = null; + Map auditMap = new LinkedHashMap<>(); + String fromKeyName = null; + String toKeyName = null; + boolean acquiredLock = false; + boolean renameStatus = true; + + try { + ResolvedBucket bucket = ozoneManager.resolveBucketLink( + Pair.of(volumeName, bucketName)); + bucket.audit(auditMap); + volumeName = bucket.realVolume(); + bucketName = bucket.realBucket(); + acquiredLock = + omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, + volumeName, bucketName); + + for (RenameKeysMap renameKey : renameKeysArgs.getRenameKeysMapList()) { + + fromKeyName = renameKey.getFromKeyName(); + toKeyName = renameKey.getToKeyName(); + RenameKeysMap.Builder unRenameKey = RenameKeysMap.newBuilder(); + + if (toKeyName.length() == 0 || fromKeyName.length() == 0) { + renameStatus = false; + unRenamedKeys.add( + unRenameKey.setFromKeyName(fromKeyName).setToKeyName(toKeyName) + .build()); + LOG.error("Key name is empty fromKeyName {} toKeyName {}", + fromKeyName, toKeyName); + continue; + } + + try { + // check Acls to see if user has access to perform delete operation + // on old key and create operation on new key + checkKeyAcls(ozoneManager, volumeName, bucketName, fromKeyName, + IAccessAuthorizer.ACLType.DELETE, OzoneObj.ResourceType.KEY); + checkKeyAcls(ozoneManager, volumeName, bucketName, toKeyName, + IAccessAuthorizer.ACLType.CREATE, OzoneObj.ResourceType.KEY); + } catch (Exception ex) { + renameStatus = false; + unRenamedKeys.add( + unRenameKey.setFromKeyName(fromKeyName).setToKeyName(toKeyName) + .build()); + LOG.error("Acl check failed for fromKeyName {} toKeyName {}", + fromKeyName, toKeyName, ex); + continue; + } + + // Check if toKey exists + String fromKey = omMetadataManager.getOzoneKey(volumeName, bucketName, + fromKeyName); + String toKey = + omMetadataManager.getOzoneKey(volumeName, bucketName, toKeyName); + OmKeyInfo toKeyValue = omMetadataManager.getKeyTable().get(toKey); + + if (toKeyValue != null) { + + renameStatus = false; + unRenamedKeys.add( + unRenameKey.setFromKeyName(fromKeyName).setToKeyName(toKeyName) + .build()); + LOG.error("Received a request name of new key {} already exists", + toKeyName); + } + + // fromKeyName should exist + fromKeyValue = omMetadataManager.getKeyTable().get(fromKey); + if (fromKeyValue == null) { + renameStatus = false; + unRenamedKeys.add( + unRenameKey.setFromKeyName(fromKeyName).setToKeyName(toKeyName) + .build()); + LOG.error("Received a request to rename a Key does not exist {}", + fromKey); + continue; + } + + fromKeyValue.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); + + fromKeyValue.setKeyName(toKeyName); + + //Set modification time + fromKeyValue.setModificationTime(Time.now()); + + // Add to cache. + // fromKey should be deleted, toKey should be added with newly updated + // omKeyInfo. + Table keyTable = omMetadataManager.getKeyTable(); + keyTable.addCacheEntry(new CacheKey<>(fromKey), + new CacheValue<>(Optional.absent(), trxnLogIndex)); + keyTable.addCacheEntry(new CacheKey<>(toKey), + new CacheValue<>(Optional.of(fromKeyValue), trxnLogIndex)); + renamedKeys.put(fromKeyName, toKeyName); + fromKeyAndToKeyInfo.put(fromKeyName, fromKeyValue); + } + + OmRenameKeys newOmRenameKeys = + new OmRenameKeys(volumeName, bucketName, null, fromKeyAndToKeyInfo); + omClientResponse = new OMKeysRenameResponse(omResponse + .setRenameKeysResponse(RenameKeysResponse.newBuilder() + .setStatus(renameStatus) + .addAllUnRenamedKeys(unRenamedKeys)) + .setStatus(renameStatus ? OK : PARTIAL_RENAME) + .setSuccess(renameStatus).build(), + newOmRenameKeys); + + result = Result.SUCCESS; + } catch (IOException ex) { + result = Result.FAILURE; + exception = ex; + createErrorOMResponse(omResponse, ex); + + omResponse.setRenameKeysResponse(RenameKeysResponse.newBuilder() + .setStatus(renameStatus).addAllUnRenamedKeys(unRenamedKeys).build()); + omClientResponse = new OMKeysRenameResponse(omResponse.build()); + + } finally { + addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, + omDoubleBufferHelper); + if (acquiredLock) { + omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, + bucketName); + } + } + + auditMap = buildAuditMap(auditMap, renamedKeys, unRenamedKeys); + auditLog(auditLogger, buildAuditMessage(OMAction.RENAME_KEYS, auditMap, + exception, getOmRequest().getUserInfo())); + + switch (result) { + case SUCCESS: + LOG.debug("Rename Keys is successfully completed for auditMap:{}.", + auditMap); + break; + case FAILURE: + ozoneManager.getMetrics().incNumKeyRenameFails(); + LOG.error("Rename keys failed for auditMap:{}.", auditMap); + break; + default: + LOG.error("Unrecognized Result for OMKeysRenameRequest: {}", + renameKeysRequest); + } + + return omClientResponse; + } + + /** + * Build audit map for RenameKeys request. + * + * @param auditMap + * @param renamedKeys + * @param unRenameKeys + * @return + */ + private Map buildAuditMap(Map auditMap, + Map renamedKeys, + List unRenameKeys) { + Map unRenameKeysMap = new HashMap<>(); + for (RenameKeysMap renameKeysMap : unRenameKeys) { + unRenameKeysMap.put(renameKeysMap.getFromKeyName(), + renameKeysMap.getToKeyName()); + } + auditMap.put(RENAMED_KEYS_MAP, renamedKeys.toString()); + auditMap.put(UNRENAMED_KEYS_MAP, unRenameKeysMap.toString()); + return auditMap; + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysRenameResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysRenameResponse.java new file mode 100644 index 000000000000..a9ff7ada1bd7 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysRenameResponse.java @@ -0,0 +1,80 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.response.key; + +import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.OmRenameKeys; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.response.CleanupTableInfo; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; + +import javax.annotation.Nonnull; +import java.io.IOException; +import java.util.Map; + +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE; + +/** + * Response for RenameKeys request. + */ +@CleanupTableInfo(cleanupTables = {KEY_TABLE}) +public class OMKeysRenameResponse extends OMClientResponse { + + private OmRenameKeys omRenameKeys; + + public OMKeysRenameResponse(@Nonnull OMResponse omResponse, + OmRenameKeys omRenameKeys) { + super(omResponse); + this.omRenameKeys = omRenameKeys; + } + + + /** + * For when the request is not successful or it is a replay transaction. + * For a successful request, the other constructor should be used. + */ + public OMKeysRenameResponse(@Nonnull OMResponse omResponse) { + super(omResponse); + checkStatusNotOK(); + } + + @Override + public void addToDBBatch(OMMetadataManager omMetadataManager, + BatchOperation batchOperation) throws IOException { + String volumeName = omRenameKeys.getVolume(); + String bucketName = omRenameKeys.getBucket(); + + for (Map.Entry< String, OmKeyInfo> entry : + omRenameKeys.getFromKeyAndToKeyInfo().entrySet()) { + String fromKeyName = entry.getKey(); + OmKeyInfo newKeyInfo = entry.getValue(); + String toKeyName = newKeyInfo.getKeyName(); + + omMetadataManager.getKeyTable().deleteWithBatch(batchOperation, + omMetadataManager + .getOzoneKey(volumeName, bucketName, fromKeyName)); + omMetadataManager.getKeyTable().putWithBatch(batchOperation, + omMetadataManager.getOzoneKey(volumeName, bucketName, toKeyName), + newKeyInfo); + } + } + +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysRenameRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysRenameRequest.java new file mode 100644 index 000000000000..947590660a84 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysRenameRequest.java @@ -0,0 +1,160 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request.key; + +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeysArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeysMap; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeysRequest; +import org.junit.Assert; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; + +/** + * Tests RenameKey request. + */ +public class TestOMKeysRenameRequest extends TestOMKeyRequest { + + private int count = 10; + private String parentDir = "/test"; + + @Test + public void testKeysRenameRequest() throws Exception { + + OMRequest modifiedOmRequest = createRenameKeyRequest(false); + + OMKeysRenameRequest omKeysRenameRequest = + new OMKeysRenameRequest(modifiedOmRequest); + + OMClientResponse omKeysRenameResponse = + omKeysRenameRequest.validateAndUpdateCache(ozoneManager, 100L, + ozoneManagerDoubleBufferHelper); + + Assert.assertTrue(omKeysRenameResponse.getOMResponse().getSuccess()); + Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, + omKeysRenameResponse.getOMResponse().getStatus()); + + for (int i = 0; i < count; i++) { + // Original key should be deleted, toKey should exist. + OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get( + omMetadataManager.getOzoneKey(volumeName, bucketName, + parentDir.concat("/key" + i))); + Assert.assertNull(omKeyInfo); + + omKeyInfo = + omMetadataManager.getKeyTable().get(omMetadataManager.getOzoneKey( + volumeName, bucketName, parentDir.concat("/newKey" + i))); + Assert.assertNotNull(omKeyInfo); + } + + } + + @Test + public void testKeysRenameRequestFail() throws Exception { + OMRequest modifiedOmRequest = createRenameKeyRequest(true); + + OMKeysRenameRequest omKeysRenameRequest = + new OMKeysRenameRequest(modifiedOmRequest); + + OMClientResponse omKeysRenameResponse = + omKeysRenameRequest.validateAndUpdateCache(ozoneManager, 100L, + ozoneManagerDoubleBufferHelper); + + Assert.assertFalse(omKeysRenameResponse.getOMResponse().getSuccess()); + Assert.assertEquals(OzoneManagerProtocolProtos.Status.PARTIAL_RENAME, + omKeysRenameResponse.getOMResponse().getStatus()); + + // The keys(key0 to key9)can be renamed success. + for (int i = 0; i < count; i++) { + // Original key should be deleted, toKey should exist. + OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get( + omMetadataManager.getOzoneKey(volumeName, bucketName, + parentDir.concat("/key" + i))); + Assert.assertNull(omKeyInfo); + + omKeyInfo = + omMetadataManager.getKeyTable().get(omMetadataManager.getOzoneKey( + volumeName, bucketName, parentDir.concat("/newKey" + i))); + Assert.assertNotNull(omKeyInfo); + } + + // The key not rename should be in unRenamedKeys. + RenameKeysMap unRenamedKeys = omKeysRenameResponse.getOMResponse() + .getRenameKeysResponse().getUnRenamedKeys(0); + Assert.assertEquals("testKey", unRenamedKeys.getFromKeyName()); + } + + /** + * Create OMRequest which encapsulates RenameKeyRequest. + * + * @return OMRequest + */ + private OMRequest createRenameKeyRequest(Boolean isIllegal) throws Exception { + + // Add volume, bucket and key entries to OM DB. + TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + omMetadataManager); + + List renameKeyList = new ArrayList<>(); + + for (int i = 0; i < count; i++) { + String key = parentDir.concat("/key" + i); + String toKey = parentDir.concat("/newKey" + i); + TestOMRequestUtils.addKeyToTableCache(volumeName, bucketName, + parentDir.concat("/key" + i), HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE, omMetadataManager); + + RenameKeysMap.Builder renameKey = RenameKeysMap.newBuilder() + .setFromKeyName(key) + .setToKeyName(toKey); + renameKeyList.add(renameKey.build()); + } + + + // Generating illegal data causes Rename Keys to fail. + if (isIllegal) { + RenameKeysMap.Builder renameKey = RenameKeysMap.newBuilder() + .setFromKeyName("testKey") + .setToKeyName("toKey"); + renameKeyList.add(renameKey.build()); + } + + RenameKeysArgs.Builder renameKeyArgs = RenameKeysArgs.newBuilder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .addAllRenameKeysMap(renameKeyList); + + RenameKeysRequest.Builder renameKeysReq = RenameKeysRequest.newBuilder() + .setRenameKeysArgs(renameKeyArgs.build()); + + return OMRequest.newBuilder() + .setClientId(UUID.randomUUID().toString()) + .setRenameKeysRequest(renameKeysReq.build()) + .setCmdType(OzoneManagerProtocolProtos.Type.RenameKeys).build(); + } + +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysRenameResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysRenameResponse.java new file mode 100644 index 000000000000..a9db1b839758 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysRenameResponse.java @@ -0,0 +1,131 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.response.key; + +import org.apache.hadoop.ozone.om.helpers.OmRenameKeys; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeysResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; + +import org.junit.Assert; +import org.junit.Test; + +import java.util.HashMap; +import java.util.Map; + +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS; + +/** + * Tests OMKeyRenameResponse. + */ +public class TestOMKeysRenameResponse extends TestOMKeyResponse { + private OmRenameKeys omRenameKeys; + private int count = 10; + private String parentDir = "/test"; + + @Test + public void testKeysRenameResponse() throws Exception { + + createPreRequisities(); + + OMResponse omResponse = OMResponse.newBuilder() + .setRenameKeysResponse(RenameKeysResponse.getDefaultInstance()) + .setStatus(Status.OK).setCmdType(Type.RenameKeys).build(); + + OMKeysRenameResponse omKeysRenameResponse = new OMKeysRenameResponse( + omResponse, omRenameKeys); + + omKeysRenameResponse.addToDBBatch(omMetadataManager, batchOperation); + + // Do manual commit and see whether addToBatch is successful or not. + omMetadataManager.getStore().commitBatchOperation(batchOperation); + + // Add volume, bucket and key entries to OM DB. + TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + omMetadataManager); + + for (int i = 0; i < count; i++) { + String key = parentDir.concat("/key" + i); + String toKey = parentDir.concat("/newKey" + i); + key = omMetadataManager.getOzoneKey(volumeName, bucketName, key); + toKey = omMetadataManager.getOzoneKey(volumeName, bucketName, toKey); + Assert.assertFalse(omMetadataManager.getKeyTable().isExist(key)); + Assert.assertTrue(omMetadataManager.getKeyTable().isExist(toKey)); + } + } + + @Test + public void testKeysRenameResponseFail() throws Exception { + + createPreRequisities(); + + OMResponse omResponse = OMResponse.newBuilder().setRenameKeysResponse( + RenameKeysResponse.getDefaultInstance()) + .setStatus(Status.KEY_NOT_FOUND) + .setCmdType(Type.RenameKeys) + .build(); + + OMKeysRenameResponse omKeyRenameResponse = new OMKeysRenameResponse( + omResponse, omRenameKeys); + + omKeyRenameResponse.checkAndUpdateDB(omMetadataManager, batchOperation); + + // Do manual commit and see whether addToBatch is successful or not. + omMetadataManager.getStore().commitBatchOperation(batchOperation); + + for (int i = 0; i < count; i++) { + String key = parentDir.concat("/key" + i); + String toKey = parentDir.concat("/newKey" + i); + key = omMetadataManager.getOzoneKey(volumeName, bucketName, key); + toKey = omMetadataManager.getOzoneKey(volumeName, bucketName, toKey); + // As omResponse has error, it is a no-op. So, no changes should happen. + Assert.assertTrue(omMetadataManager.getKeyTable().isExist(key)); + Assert.assertFalse(omMetadataManager.getKeyTable().isExist(toKey)); + } + + } + + private void createPreRequisities() throws Exception { + + // Add volume, bucket and key entries to OM DB. + TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + omMetadataManager); + Map formAndToKeyInfo = new HashMap<>(); + + for (int i = 0; i < count; i++) { + String key = parentDir.concat("/key" + i); + String toKey = parentDir.concat("/newKey" + i); + TestOMRequestUtils.addKeyToTable(false, volumeName, + bucketName, parentDir.concat("/key" + i), 0L, RATIS, THREE, + omMetadataManager); + + OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get( + omMetadataManager.getOzoneKey(volumeName, bucketName, key)); + omKeyInfo.setKeyName(toKey); + formAndToKeyInfo.put(key, omKeyInfo); + } + omRenameKeys = + new OmRenameKeys(volumeName, bucketName, null, formAndToKeyInfo); + + } +} From 3ca3278e6179cbf93c3ac19ffc7ec4ca644ebeb9 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Mon, 31 Aug 2020 08:41:20 +0200 Subject: [PATCH 153/165] HDDS-4077. Incomplete OzoneFileSystem statistics (#1329) --- .../fs/ozone/TestOzoneFileInterfaces.java | 15 ++-- .../hadoop/fs/ozone/BasicOzoneFileSystem.java | 71 +++++++++++++++++++ .../fs/ozone/BasicRootedOzoneFileSystem.java | 71 +++++++++++++++++++ 3 files changed, 153 insertions(+), 4 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java index 06d1bd366e35..2b8803edc41e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java @@ -357,15 +357,18 @@ public void testListStatus() throws IOException { String dirPath = RandomStringUtils.randomAlphanumeric(5); Path path = createPath("/" + dirPath); paths.add(path); + + long mkdirs = statistics.getLong( + StorageStatistics.CommonStatisticNames.OP_MKDIRS); assertTrue("Makedirs returned with false for the path " + path, fs.mkdirs(path)); + assertCounter(++mkdirs, StorageStatistics.CommonStatisticNames.OP_MKDIRS); long listObjects = statistics.getLong(Statistic.OBJECTS_LIST.getSymbol()); long omListStatus = omMetrics.getNumListStatus(); FileStatus[] statusList = fs.listStatus(createPath("/")); assertEquals(1, statusList.length); - assertEquals(++listObjects, - statistics.getLong(Statistic.OBJECTS_LIST.getSymbol()).longValue()); + assertCounter(++listObjects, Statistic.OBJECTS_LIST.getSymbol()); assertEquals(++omListStatus, omMetrics.getNumListStatus()); assertEquals(fs.getFileStatus(path), statusList[0]); @@ -374,11 +377,11 @@ public void testListStatus() throws IOException { paths.add(path); assertTrue("Makedirs returned with false for the path " + path, fs.mkdirs(path)); + assertCounter(++mkdirs, StorageStatistics.CommonStatisticNames.OP_MKDIRS); statusList = fs.listStatus(createPath("/")); assertEquals(2, statusList.length); - assertEquals(++listObjects, - statistics.getLong(Statistic.OBJECTS_LIST.getSymbol()).longValue()); + assertCounter(++listObjects, Statistic.OBJECTS_LIST.getSymbol()); assertEquals(++omListStatus, omMetrics.getNumListStatus()); for (Path p : paths) { assertTrue(Arrays.asList(statusList).contains(fs.getFileStatus(p))); @@ -528,4 +531,8 @@ private FileStatus getDirectoryStat(Path path) throws IOException { return status; } + + private void assertCounter(long value, String key) { + assertEquals(value, statistics.getLong(key).longValue()); + } } diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java index 49f12f0ff095..e4acabc21443 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java @@ -26,13 +26,16 @@ import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileAlreadyExistsException; +import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.InvalidPathException; import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException; +import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.annotation.InterfaceStability; @@ -692,6 +695,7 @@ private boolean mkdir(Path path) throws IOException { @Override public boolean mkdirs(Path f, FsPermission permission) throws IOException { + incrementCounter(Statistic.INVOCATION_MKDIRS); LOG.trace("mkdir() path:{} ", f); String key = pathToKey(f); if (isEmpty(key)) { @@ -735,6 +739,73 @@ public short getDefaultReplication() { return adapter.getDefaultReplication(); } + @Override + public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path[] srcs, + Path dst) throws IOException { + incrementCounter(Statistic.INVOCATION_COPY_FROM_LOCAL_FILE); + super.copyFromLocalFile(delSrc, overwrite, srcs, dst); + } + + @Override + public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, + Path dst) throws IOException { + incrementCounter(Statistic.INVOCATION_COPY_FROM_LOCAL_FILE); + super.copyFromLocalFile(delSrc, overwrite, src, dst); + } + + @Override + public boolean exists(Path f) throws IOException { + incrementCounter(Statistic.INVOCATION_EXISTS); + return super.exists(f); + } + + @Override + public FileChecksum getFileChecksum(Path f, long length) throws IOException { + incrementCounter(Statistic.INVOCATION_GET_FILE_CHECKSUM); + return super.getFileChecksum(f, length); + } + + @Override + public FileStatus[] globStatus(Path pathPattern) throws IOException { + incrementCounter(Statistic.INVOCATION_GLOB_STATUS); + return super.globStatus(pathPattern); + } + + @Override + public FileStatus[] globStatus(Path pathPattern, PathFilter filter) + throws IOException { + incrementCounter(Statistic.INVOCATION_GLOB_STATUS); + return super.globStatus(pathPattern, filter); + } + + @Override + @SuppressWarnings("deprecation") + public boolean isDirectory(Path f) throws IOException { + incrementCounter(Statistic.INVOCATION_IS_DIRECTORY); + return super.isDirectory(f); + } + + @Override + @SuppressWarnings("deprecation") + public boolean isFile(Path f) throws IOException { + incrementCounter(Statistic.INVOCATION_IS_FILE); + return super.isFile(f); + } + + @Override + public RemoteIterator listFiles(Path f, boolean recursive) + throws IOException { + incrementCounter(Statistic.INVOCATION_LIST_FILES); + return super.listFiles(f, recursive); + } + + @Override + public RemoteIterator listLocatedStatus(Path f) + throws IOException { + incrementCounter(Statistic.INVOCATION_LIST_LOCATED_STATUS); + return super.listLocatedStatus(f); + } + /** * Turn a path (relative or otherwise) into an Ozone key. * diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java index 59aec470447a..015621c2b52a 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java @@ -24,12 +24,15 @@ import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileAlreadyExistsException; +import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException; +import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.annotation.InterfaceStability; @@ -715,6 +718,7 @@ private boolean mkdir(Path path) throws IOException { @Override public boolean mkdirs(Path f, FsPermission permission) throws IOException { + incrementCounter(Statistic.INVOCATION_MKDIRS); LOG.trace("mkdir() path:{} ", f); String key = pathToKey(f); if (isEmpty(key)) { @@ -764,6 +768,73 @@ public short getDefaultReplication() { return adapter.getDefaultReplication(); } + @Override + public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path[] srcs, + Path dst) throws IOException { + incrementCounter(Statistic.INVOCATION_COPY_FROM_LOCAL_FILE); + super.copyFromLocalFile(delSrc, overwrite, srcs, dst); + } + + @Override + public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, + Path dst) throws IOException { + incrementCounter(Statistic.INVOCATION_COPY_FROM_LOCAL_FILE); + super.copyFromLocalFile(delSrc, overwrite, src, dst); + } + + @Override + public boolean exists(Path f) throws IOException { + incrementCounter(Statistic.INVOCATION_EXISTS); + return super.exists(f); + } + + @Override + public FileChecksum getFileChecksum(Path f, long length) throws IOException { + incrementCounter(Statistic.INVOCATION_GET_FILE_CHECKSUM); + return super.getFileChecksum(f, length); + } + + @Override + public FileStatus[] globStatus(Path pathPattern) throws IOException { + incrementCounter(Statistic.INVOCATION_GLOB_STATUS); + return super.globStatus(pathPattern); + } + + @Override + public FileStatus[] globStatus(Path pathPattern, PathFilter filter) + throws IOException { + incrementCounter(Statistic.INVOCATION_GLOB_STATUS); + return super.globStatus(pathPattern, filter); + } + + @Override + @SuppressWarnings("deprecation") + public boolean isDirectory(Path f) throws IOException { + incrementCounter(Statistic.INVOCATION_IS_DIRECTORY); + return super.isDirectory(f); + } + + @Override + @SuppressWarnings("deprecation") + public boolean isFile(Path f) throws IOException { + incrementCounter(Statistic.INVOCATION_IS_FILE); + return super.isFile(f); + } + + @Override + public RemoteIterator listFiles(Path f, boolean recursive) + throws IOException { + incrementCounter(Statistic.INVOCATION_LIST_FILES); + return super.listFiles(f, recursive); + } + + @Override + public RemoteIterator listLocatedStatus(Path f) + throws IOException { + incrementCounter(Statistic.INVOCATION_LIST_LOCATED_STATUS); + return super.listLocatedStatus(f); + } + /** * Turn a path (relative or otherwise) into an Ozone key. * From 0be49a29d87a30f48feb8496035314ef5e56602e Mon Sep 17 00:00:00 2001 From: Sadanand Shenoy Date: Mon, 31 Aug 2020 14:06:06 +0530 Subject: [PATCH 154/165] HDDS-3867. Extend the chunkinfo tool to display information from all nodes in the pipeline. (#1154) --- .../hadoop/hdds/scm/XceiverClientGrpc.java | 30 ++++ .../hadoop/hdds/scm/XceiverClientRatis.java | 7 + .../hadoop/hdds/scm/XceiverClientSpi.java | 11 ++ .../scm/storage/ContainerProtocolCalls.java | 34 ++++ .../main/smoketest/debug/ozone-debug.robot | 4 +- .../hadoop/ozone/debug/ChunkKeyHandler.java | 149 ++++++++++-------- .../ozone/debug/ContainerChunkInfo.java | 21 +-- 7 files changed, 175 insertions(+), 81 deletions(-) diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java index 4adfa8521b07..c2743c4e4113 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java @@ -247,6 +247,36 @@ public ContainerCommandResponseProto sendCommand( } } + @Override + public Map + sendCommandOnAllNodes( + ContainerCommandRequestProto request) throws IOException { + HashMap + responseProtoHashMap = new HashMap<>(); + List datanodeList = pipeline.getNodes(); + HashMap> + futureHashMap = new HashMap<>(); + for (DatanodeDetails dn : datanodeList) { + try { + futureHashMap.put(dn, sendCommandAsync(request, dn).getResponse()); + } catch (InterruptedException e) { + LOG.error("Command execution was interrupted."); + } + } + try{ + for (Map.Entry > + entry : futureHashMap.entrySet()){ + responseProtoHashMap.put(entry.getKey(), entry.getValue().get()); + } + } catch (InterruptedException e) { + LOG.error("Command execution was interrupted."); + } catch (ExecutionException e) { + LOG.error("Failed to execute command " + request, e); + } + return responseProtoHashMap; + } + @Override public ContainerCommandResponseProto sendCommand( ContainerCommandRequestProto request, List validators) diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java index 1c7779b4aed6..23fca738a16d 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java @@ -22,6 +22,7 @@ import java.security.cert.X509Certificate; import java.util.Collection; import java.util.List; +import java.util.Map; import java.util.Objects; import java.util.OptionalLong; import java.util.UUID; @@ -352,4 +353,10 @@ public XceiverClientReply sendCommandAsync( return asyncReply; } + @Override + public Map + sendCommandOnAllNodes(ContainerCommandRequestProto request) { + throw new UnsupportedOperationException( + "Operation Not supported for ratis client"); + } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java index 328777799bd1..1c7d1f6408d8 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java @@ -21,10 +21,12 @@ import java.io.Closeable; import java.io.IOException; import java.util.List; +import java.util.Map; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicInteger; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -186,4 +188,13 @@ public abstract XceiverClientReply watchForCommit(long index) * @return min commit index replicated to all servers. */ public abstract long getReplicatedMinCommitIndex(); + + /** + * Sends command to all nodes in the pipeline. + * @return a map containing datanode as the key and + * the command response from that datanode + */ + public abstract Map + sendCommandOnAllNodes(ContainerCommandRequestProto request) + throws IOException, InterruptedException; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java index 6b0d8f8cda2a..11acf82ff32f 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdds.scm.storage; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.XceiverClientReply; import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; import org.apache.hadoop.hdds.scm.container.common.helpers @@ -73,7 +74,9 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.concurrent.ExecutionException; /** @@ -578,4 +581,35 @@ public static List getValidatorList() { validators.add(validator); return validators; } + + public static HashMap + getBlockFromAllNodes( + XceiverClientSpi xceiverClient, + DatanodeBlockID datanodeBlockID) throws IOException, + InterruptedException { + GetBlockRequestProto.Builder readBlockRequest = GetBlockRequestProto + .newBuilder() + .setBlockID(datanodeBlockID); + HashMap datanodeToResponseMap + = new HashMap<>(); + String id = xceiverClient.getPipeline().getFirstNode().getUuidString(); + ContainerCommandRequestProto.Builder builder = ContainerCommandRequestProto + .newBuilder() + .setCmdType(Type.GetBlock) + .setContainerID(datanodeBlockID.getContainerID()) + .setDatanodeUuid(id) + .setGetBlock(readBlockRequest); + String encodedToken = getEncodedBlockToken(getService(datanodeBlockID)); + if (encodedToken != null) { + builder.setEncodedToken(encodedToken); + } + ContainerCommandRequestProto request = builder.build(); + Map responses = + xceiverClient.sendCommandOnAllNodes(request); + for(Map.Entry entry: + responses.entrySet()){ + datanodeToResponseMap.put(entry.getKey(), entry.getValue().getGetBlock()); + } + return datanodeToResponseMap; + } } diff --git a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug.robot b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug.robot index 39e561af6cf0..f7e3274fcedd 100644 --- a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug.robot +++ b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug.robot @@ -29,8 +29,8 @@ Write key *** Test Cases *** Test ozone debug - ${result} = Execute ozone debug chunkinfo o3://om/vol1/bucket1/debugKey | jq -r '.[]' + ${result} = Execute ozone debug chunkinfo o3://om/vol1/bucket1/debugKey | jq -r '.KeyLocations[0][0].Locations' Should contain ${result} files - ${result} = Execute ozone debug chunkinfo o3://om/vol1/bucket1/debugKey | jq -r '.[].files[0]' + ${result} = Execute ozone debug chunkinfo o3://om/vol1/bucket1/debugKey | jq -r '.KeyLocations[0][0].Locations.files[0]' File Should Exist ${result} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java index c245490881be..4f69da78b905 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java @@ -22,13 +22,22 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; - +import java.util.Map; +import java.util.HashMap; +import java.util.HashSet; +import com.google.gson.GsonBuilder; +import com.google.gson.Gson; +import com.google.gson.JsonObject; +import com.google.gson.JsonArray; +import com.google.gson.JsonElement; import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.cli.ContainerOperationClient; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier; import org.apache.hadoop.ozone.OzoneConsts; @@ -44,12 +53,6 @@ import org.apache.hadoop.ozone.shell.keys.KeyHandler; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; - -import com.google.gson.Gson; -import com.google.gson.GsonBuilder; -import com.google.gson.JsonElement; -import com.google.gson.JsonObject; -import org.apache.ratis.protocol.ClientId; import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; import picocli.CommandLine.Parameters; @@ -70,7 +73,6 @@ public class ChunkKeyHandler extends KeyHandler implements private ContainerOperationClient containerOperationClient; private XceiverClientManager xceiverClientManager; private XceiverClientSpi xceiverClient; - private final ClientId clientId = ClientId.randomId(); private OzoneManagerProtocol ozoneManagerClient; private String getChunkLocationPath(String containerLocation) { @@ -79,22 +81,22 @@ private String getChunkLocationPath(String containerLocation) { @Override protected void execute(OzoneClient client, OzoneAddress address) - throws IOException, OzoneClientException { + throws IOException, OzoneClientException{ containerOperationClient = new - ContainerOperationClient(createOzoneConfiguration()); + ContainerOperationClient(createOzoneConfiguration()); xceiverClientManager = containerOperationClient - .getXceiverClientManager(); + .getXceiverClientManager(); ozoneManagerClient = client.getObjectStore().getClientProxy() .getOzoneManagerClient(); address.ensureKeyAddress(); - JsonObject jsonObj = new JsonObject(); JsonElement element; + JsonObject result = new JsonObject(); String volumeName = address.getVolumeName(); String bucketName = address.getBucketName(); String keyName = address.getKeyName(); List tempchunks = null; List chunkDetailsList = new ArrayList(); - List chunkPaths = new ArrayList(); + HashSet chunkPaths = new HashSet<>(); OmKeyArgs keyArgs = new OmKeyArgs.Builder() .setVolumeName(volumeName) .setBucketName(bucketName) @@ -102,19 +104,31 @@ protected void execute(OzoneClient client, OzoneAddress address) .setRefreshPipeline(true) .build(); OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs); - List locationInfos = keyInfo - .getLatestVersionLocations().getBlocksLatestVersionOnly(); // querying the keyLocations.The OM is queried to get containerID and // localID pertaining to a given key + List locationInfos = keyInfo + .getLatestVersionLocations().getBlocksLatestVersionOnly(); + // for zero-sized key + if(locationInfos.isEmpty()){ + System.out.println("No Key Locations Found"); + return; + } ChunkLayOutVersion chunkLayOutVersion = ChunkLayOutVersion .getConfiguredVersion(getConf()); + JsonArray responseArrayList = new JsonArray(); for (OmKeyLocationInfo keyLocation:locationInfos) { ContainerChunkInfo containerChunkInfoVerbose = new ContainerChunkInfo(); ContainerChunkInfo containerChunkInfo = new ContainerChunkInfo(); long containerId = keyLocation.getContainerID(); + chunkPaths.clear(); Token token = keyLocation.getToken(); + Pipeline pipeline = keyLocation.getPipeline(); + if (pipeline.getType() != HddsProtos.ReplicationType.STAND_ALONE) { + pipeline = Pipeline.newBuilder(pipeline) + .setType(HddsProtos.ReplicationType.STAND_ALONE).build(); + } xceiverClient = xceiverClientManager - .acquireClient(keyLocation.getPipeline()); + .acquireClientForReadData(pipeline); // Datanode is queried to get chunk information.Thus querying the // OM,SCM and datanode helps us get chunk location information if (token != null) { @@ -122,55 +136,65 @@ protected void execute(OzoneClient client, OzoneAddress address) } ContainerProtos.DatanodeBlockID datanodeBlockID = keyLocation.getBlockID() .getDatanodeBlockIDProtobuf(); - ContainerProtos.GetBlockResponseProto response = ContainerProtocolCalls - .getBlock(xceiverClient, datanodeBlockID); - tempchunks = response.getBlockData().getChunksList(); - ContainerProtos.ContainerDataProto containerData = - containerOperationClient.readContainer( - keyLocation.getContainerID(), - keyLocation.getPipeline()); - for (ContainerProtos.ChunkInfo chunkInfo:tempchunks) { - ChunkDetails chunkDetails = new ChunkDetails(); - chunkDetails.setChunkName(chunkInfo.getChunkName()); - chunkDetails.setChunkOffset(chunkInfo.getOffset()); - chunkDetailsList.add(chunkDetails); - chunkPaths.add(chunkLayOutVersion.getChunkFile(new File( - getChunkLocationPath(containerData.getContainerPath())), - keyLocation.getBlockID(), - ChunkInfo.getFromProtoBuf(chunkInfo)).toString()); + // doing a getBlock on all nodes + HashMap + responses = null; + try { + responses = ContainerProtocolCalls + .getBlockFromAllNodes(xceiverClient, datanodeBlockID); + } catch (InterruptedException e) { + LOG.error("Execution interrupted due to " + e); } - containerChunkInfoVerbose - .setContainerPath(containerData.getContainerPath()); - containerChunkInfoVerbose - .setDataNodeList(keyLocation.getPipeline().getNodes()); - containerChunkInfoVerbose.setPipeline(keyLocation.getPipeline()); - containerChunkInfoVerbose.setChunkInfos(chunkDetailsList); - containerChunkInfo.setFiles(chunkPaths); - List chunkDataNodeDetails = new - ArrayList(); - for (DatanodeDetails datanodeDetails:keyLocation - .getPipeline().getNodes()) { - chunkDataNodeDetails.add( - new ChunkDataNodeDetails(datanodeDetails.getIpAddress(), - datanodeDetails.getHostName())); - } - containerChunkInfo.setChunkDataNodeDetails(chunkDataNodeDetails); - containerChunkInfo.setPipelineID( - keyLocation.getPipeline().getId().getId()); - Gson gson = new GsonBuilder().create(); - if (isVerbose()) { - element = gson.toJsonTree(containerChunkInfoVerbose); - jsonObj.add("container Id :" + containerId + " " - + "blockId :" + keyLocation.getLocalID() + "", element); - } else { - element = gson.toJsonTree(containerChunkInfo); - jsonObj.add("container Id :" + containerId + " " - + "blockId :" + keyLocation.getLocalID() + "", element); + JsonArray responseFromAllNodes = new JsonArray(); + for (Map.Entry + entry: responses.entrySet()) { + JsonObject jsonObj = new JsonObject(); + if(entry.getValue() == null){ + LOG.error("Cant execute getBlock on this node"); + continue; + } + tempchunks = entry.getValue().getBlockData().getChunksList(); + ContainerProtos.ContainerDataProto containerData = + containerOperationClient.readContainer( + keyLocation.getContainerID(), + keyLocation.getPipeline()); + for (ContainerProtos.ChunkInfo chunkInfo : tempchunks) { + String fileName = chunkLayOutVersion.getChunkFile(new File( + getChunkLocationPath(containerData.getContainerPath())), + keyLocation.getBlockID(), + ChunkInfo.getFromProtoBuf(chunkInfo)).toString(); + chunkPaths.add(fileName); + ChunkDetails chunkDetails = new ChunkDetails(); + chunkDetails.setChunkName(fileName); + chunkDetails.setChunkOffset(chunkInfo.getOffset()); + chunkDetailsList.add(chunkDetails); + } + containerChunkInfoVerbose + .setContainerPath(containerData.getContainerPath()); + containerChunkInfoVerbose.setPipeline(keyLocation.getPipeline()); + containerChunkInfoVerbose.setChunkInfos(chunkDetailsList); + containerChunkInfo.setFiles(chunkPaths); + containerChunkInfo.setPipelineID( + keyLocation.getPipeline().getId().getId()); + Gson gson = new GsonBuilder().create(); + if (isVerbose()) { + element = gson.toJsonTree(containerChunkInfoVerbose); + } else { + element = gson.toJsonTree(containerChunkInfo); + } + jsonObj.addProperty("Datanode-HostName", entry.getKey().getHostName()); + jsonObj.addProperty("Datanode-IP", entry.getKey().getIpAddress()); + jsonObj.addProperty("Container-ID", containerId); + jsonObj.addProperty("Block-ID", keyLocation.getLocalID()); + jsonObj.add("Locations", element); + responseFromAllNodes.add(jsonObj); + xceiverClientManager.releaseClientForReadData(xceiverClient, false); } + responseArrayList.add(responseFromAllNodes); } - xceiverClientManager.releaseClient(xceiverClient, false); - Gson gson = new GsonBuilder().setPrettyPrinting().create(); - String prettyJson = gson.toJson(jsonObj); + result.add("KeyLocations", responseArrayList); + Gson gson2 = new GsonBuilder().setPrettyPrinting().create(); + String prettyJson = gson2.toJson(result); System.out.println(prettyJson); } @@ -178,4 +202,5 @@ protected void execute(OzoneClient client, OzoneAddress address) public Class getParentType() { return OzoneDebug.class; } + } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerChunkInfo.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerChunkInfo.java index 0e969c7dcf80..cf57d95397bb 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerChunkInfo.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerChunkInfo.java @@ -19,9 +19,10 @@ package org.apache.hadoop.ozone.debug; import com.fasterxml.jackson.annotation.JsonInclude; + +import java.util.HashSet; import java.util.List; import java.util.UUID; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; /** @@ -30,19 +31,12 @@ @JsonInclude(JsonInclude.Include.NON_NULL) public class ContainerChunkInfo { private String containerPath; - private List dataNodeList; private List chunkInfos; - private List files; - private List chunkDataNodeDetails; + private HashSet files; private UUID pipelineID; private Pipeline pipeline; - public void setChunkDataNodeDetails(List - chunkDataNodeDetails) { - this.chunkDataNodeDetails = chunkDataNodeDetails; - } - - public void setFiles(List files) { + public void setFiles(HashSet files) { this.files = files; } @@ -66,9 +60,6 @@ public void setChunkInfos(List chunkInfos) { this.chunkInfos = chunkInfos; } - public void setDataNodeList(List dataNodeList) { - this.dataNodeList = dataNodeList; - } @Override public String toString() { @@ -76,8 +67,6 @@ public String toString() { + "containerPath='" + containerPath + '\'' - + ", dataNodeList=" - + dataNodeList + ", chunkInfos=" + chunkInfos + ", pipeline=" @@ -85,8 +74,6 @@ public String toString() { + '}' + "files=" + files - + "chunkdatanodeDetails=" - + chunkDataNodeDetails + "PipelineID=" + pipelineID; } From 897e9d5784548a1d26d33d172fd0178ef4103229 Mon Sep 17 00:00:00 2001 From: Ethan Rose <33912936+errose28@users.noreply.github.com> Date: Mon, 31 Aug 2020 17:57:43 -0400 Subject: [PATCH 155/165] HDDS-4121. Implement OmMetadataMangerImpl#getExpiredOpenKeys. (#1351) --- .../apache/hadoop/ozone/om/KeyManager.java | 11 +-- .../hadoop/ozone/om/KeyManagerImpl.java | 5 +- .../hadoop/ozone/om/OMMetadataManager.java | 10 +-- .../ozone/om/OmMetadataManagerImpl.java | 35 ++++++++- .../ozone/om/OpenKeyCleanupService.java | 42 ++-------- .../ozone/om/TestOmMetadataManager.java | 77 +++++++++++++++++++ .../ozone/om/request/TestOMRequestUtils.java | 33 +++++++- 7 files changed, 159 insertions(+), 54 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java index dbcecc8baa40..658f503a1a70 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java @@ -182,14 +182,15 @@ List listTrash(String volumeName, String bucketName, List getPendingDeletionKeys(int count) throws IOException; /** - * Returns a list of all still open key info. Which contains the info about - * the key name and all its associated block IDs. A pending open key has - * prefix #open# in OM DB. + * Returns the names of up to {@code count} open keys that are older than + * the configured expiration age. * - * @return a list of {@link BlockGroup} representing keys and blocks. + * @param count The maximum number of expired open keys to return. + * @return a list of {@link String} representing the names of expired + * open keys. * @throws IOException */ - List getExpiredOpenKeys() throws IOException; + List getExpiredOpenKeys(int count) throws IOException; /** * Deletes a expired open key by its name. Called when a hanging key has been diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index f42f5700480d..c32e80724d9f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -940,9 +940,8 @@ public List getPendingDeletionKeys(final int count) } @Override - public List getExpiredOpenKeys() throws IOException { - return metadataManager.getExpiredOpenKeys(); - + public List getExpiredOpenKeys(int count) throws IOException { + return metadataManager.getExpiredOpenKeys(count); } @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java index 439f5465087c..c687a4b22907 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java @@ -244,14 +244,14 @@ List listVolumes(String userName, String prefix, List getPendingDeletionKeys(int count) throws IOException; /** - * Returns a list of all still open key info. Which contains the info about - * the key name and all its associated block IDs. A pending open key has - * prefix #open# in OM DB. + * Returns the names of up to {@code count} open keys that are older than + * the configured expiration age. * - * @return a list of {@link BlockGroup} representing keys and blocks. + * @param count The maximum number of open keys to return. + * @return a list of {@link String} representing names of open expired keys. * @throws IOException */ - List getExpiredOpenKeys() throws IOException; + List getExpiredOpenKeys(int count) throws IOException; /** * Returns the user Table. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index 36d219bd9af6..aff8a14e2710 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -19,6 +19,9 @@ import java.io.File; import java.io.IOException; import java.nio.file.Paths; +import java.time.Duration; +import java.time.Instant; +import java.time.temporal.ChronoUnit; import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; @@ -992,10 +995,34 @@ public List getPendingDeletionKeys(final int keyCount) } @Override - public List getExpiredOpenKeys() throws IOException { - List keyBlocksList = Lists.newArrayList(); - // TODO: Fix the getExpiredOpenKeys, Not part of this patch. - return keyBlocksList; + public List getExpiredOpenKeys(int count) throws IOException { + // Only check for expired keys in the open key table, not its cache. + // If a key expires while it is in the cache, it will be cleaned + // up after the cache is flushed. + final Duration expirationDuration = + Duration.of(openKeyExpireThresholdMS, ChronoUnit.MILLIS); + List expiredKeys = Lists.newArrayList(); + + try (TableIterator> + keyValueTableIterator = getOpenKeyTable().iterator()) { + + while (keyValueTableIterator.hasNext() && expiredKeys.size() < count) { + KeyValue openKeyValue = keyValueTableIterator.next(); + String openKey = openKeyValue.getKey(); + OmKeyInfo openKeyInfo = openKeyValue.getValue(); + + Duration openKeyAge = + Duration.between( + Instant.ofEpochMilli(openKeyInfo.getCreationTime()), + Instant.now()); + + if (openKeyAge.compareTo(expirationDuration) >= 0) { + expiredKeys.add(openKey); + } + } + } + + return expiredKeys; } @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OpenKeyCleanupService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OpenKeyCleanupService.java index 79bc39f49846..6a5045a8f630 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OpenKeyCleanupService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OpenKeyCleanupService.java @@ -18,10 +18,7 @@ package org.apache.hadoop.ozone.om; -import org.apache.hadoop.ozone.common.BlockGroup; -import org.apache.hadoop.ozone.common.DeleteBlockGroupResult; import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; -import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.hdds.utils.BackgroundService; import org.apache.hadoop.hdds.utils.BackgroundTask; import org.apache.hadoop.hdds.utils.BackgroundTaskQueue; @@ -30,7 +27,6 @@ import org.slf4j.LoggerFactory; import java.io.IOException; -import java.util.List; import java.util.concurrent.TimeUnit; /** @@ -76,39 +72,13 @@ public int getPriority() { @Override public BackgroundTaskResult call() throws Exception { + // This method is currently never used. It will be implemented in + // HDDS-4122, and integrated into the rest of the code base in HDDS-4123. try { - List keyBlocksList = keyManager.getExpiredOpenKeys(); - if (keyBlocksList.size() > 0) { - int toDeleteSize = keyBlocksList.size(); - LOG.debug("Found {} to-delete open keys in OM", toDeleteSize); - List results = - scmClient.deleteKeyBlocks(keyBlocksList); - int deletedSize = 0; - for (DeleteBlockGroupResult result : results) { - if (result.isSuccess()) { - try { - keyManager.deleteExpiredOpenKey(result.getObjectKey()); - if (LOG.isDebugEnabled()) { - LOG.debug("Key {} deleted from OM DB", result.getObjectKey()); - } - deletedSize += 1; - } catch (IOException e) { - LOG.warn("Failed to delete hanging-open key {}", - result.getObjectKey(), e); - } - } else { - LOG.warn("Deleting open Key {} failed because some of the blocks" - + " were failed to delete, failed blocks: {}", - result.getObjectKey(), - StringUtils.join(",", result.getFailedBlocks())); - } - } - LOG.info("Found {} expired open key entries, successfully " + - "cleaned up {} entries", toDeleteSize, deletedSize); - return results::size; - } else { - LOG.debug("No hanging open key found in OM"); - } + // The new API for deleting expired open keys in OM HA will differ + // significantly from the old implementation. + // The old implementation has been removed so the code compiles. + keyManager.getExpiredOpenKeys(0); } catch (IOException e) { LOG.error("Unable to get hanging open keys, retry in" + " next interval", e); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java index 6226c5bbc9f1..7c2d258e9a00 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java @@ -33,9 +33,15 @@ import org.junit.Test; import org.junit.rules.TemporaryFolder; +import java.util.HashSet; import java.util.List; +import java.util.Set; import java.util.TreeSet; +import java.time.Instant; +import java.time.temporal.ChronoUnit; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT; import static org.apache.hadoop.ozone.OzoneConsts.TRANSACTION_INFO_KEY; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS; @@ -521,6 +527,77 @@ public void testListKeysWithFewDeleteEntriesInCache() throws Exception { } + @Test + public void testGetExpiredOpenKeys() throws Exception { + final String bucketName = "bucket"; + final String volumeName = "volume"; + final int numExpiredOpenKeys = 4; + final int numUnexpiredOpenKeys = 1; + final long clientID = 1000L; + // To create expired keys, they will be assigned a creation time twice as + // old as the minimum expiration time. + final long minExpiredTimeSeconds = ozoneConfiguration.getInt( + OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS, + OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT); + final long expiredAgeMillis = + Instant.now().minus(minExpiredTimeSeconds * 2, + ChronoUnit.SECONDS).toEpochMilli(); + + // Add expired keys to open key table. + // The method under test does not check for expired open keys in the + // cache, since they will be picked up once the cache is flushed. + Set expiredKeys = new HashSet<>(); + for (int i = 0; i < numExpiredOpenKeys; i++) { + OmKeyInfo keyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName, + bucketName, "expired" + i, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, 0L, expiredAgeMillis); + + TestOMRequestUtils.addKeyToTable(true, false, + keyInfo, clientID, 0L, omMetadataManager); + + String groupID = omMetadataManager.getOpenKey(volumeName, bucketName, + keyInfo.getKeyName(), clientID); + expiredKeys.add(groupID); + } + + // Add unexpired keys to open key table. + for (int i = 0; i < numUnexpiredOpenKeys; i++) { + OmKeyInfo keyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName, + bucketName, "unexpired" + i, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE); + + TestOMRequestUtils.addKeyToTable(true, false, + keyInfo, clientID, 0L, omMetadataManager); + } + + // Test retrieving fewer expired keys than actually exist. + List someExpiredKeys = + omMetadataManager.getExpiredOpenKeys(numExpiredOpenKeys - 1); + + Assert.assertEquals(numExpiredOpenKeys - 1, someExpiredKeys.size()); + for (String key: someExpiredKeys) { + Assert.assertTrue(expiredKeys.contains(key)); + } + + // Test attempting to retrieving more expired keys than actually exist. + List allExpiredKeys = + omMetadataManager.getExpiredOpenKeys(numExpiredOpenKeys + 1); + + Assert.assertEquals(numExpiredOpenKeys, allExpiredKeys.size()); + for (String key: allExpiredKeys) { + Assert.assertTrue(expiredKeys.contains(key)); + } + + // Test retrieving exact amount of expired keys that exist. + allExpiredKeys = + omMetadataManager.getExpiredOpenKeys(numExpiredOpenKeys); + + Assert.assertEquals(numExpiredOpenKeys, allExpiredKeys.size()); + for (String key: allExpiredKeys) { + Assert.assertTrue(expiredKeys.contains(key)); + } + } + private void addKeysToOM(String volumeName, String bucketName, String keyName, int i) throws Exception { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java index dadeacb1943c..25268ff64194 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java @@ -136,6 +136,26 @@ public static void addKeyToTable(boolean openKeyTable, boolean addToCache, OmKeyInfo omKeyInfo = createOmKeyInfo(volumeName, bucketName, keyName, replicationType, replicationFactor, trxnLogIndex); + addKeyToTable(openKeyTable, addToCache, omKeyInfo, clientID, trxnLogIndex, + omMetadataManager); + + } + + /** + * Add key entry to KeyTable. if openKeyTable flag is true, add's entries + * to openKeyTable, else add's it to keyTable. + * @throws Exception + */ + public static void addKeyToTable(boolean openKeyTable, boolean addToCache, + OmKeyInfo omKeyInfo, long clientID, + long trxnLogIndex, + OMMetadataManager omMetadataManager) + throws Exception { + + String volumeName = omKeyInfo.getVolumeName(); + String bucketName = omKeyInfo.getBucketName(); + String keyName = omKeyInfo.getKeyName(); + if (openKeyTable) { String ozoneKey = omMetadataManager.getOpenKey(volumeName, bucketName, keyName, clientID); @@ -213,13 +233,24 @@ public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, String keyName, HddsProtos.ReplicationType replicationType, HddsProtos.ReplicationFactor replicationFactor, long objectID) { + return createOmKeyInfo(volumeName, bucketName, keyName, replicationType, + replicationFactor, objectID, Time.now()); + } + + /** + * Create OmKeyInfo. + */ + public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, + String keyName, HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor replicationFactor, long objectID, + long creationTime) { return new OmKeyInfo.Builder() .setVolumeName(volumeName) .setBucketName(bucketName) .setKeyName(keyName) .setOmKeyLocationInfos(Collections.singletonList( new OmKeyLocationInfoGroup(0, new ArrayList<>()))) - .setCreationTime(Time.now()) + .setCreationTime(creationTime) .setModificationTime(Time.now()) .setDataSize(1000L) .setReplicationType(replicationType) From 3cb257194726ac5d0b0a219d21845c2e6a8e84df Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Tue, 1 Sep 2020 16:39:51 +0200 Subject: [PATCH 156/165] HDDS-4167. Acceptance test logs missing if fails during cluster startup (#1366) --- .../dist/src/main/compose/ozone-mr/test.sh | 22 +++++------- .../dist/src/main/compose/test-all.sh | 21 +++-------- hadoop-ozone/dist/src/main/compose/testlib.sh | 36 +++++++++++++++++++ 3 files changed, 49 insertions(+), 30 deletions(-) diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/test.sh b/hadoop-ozone/dist/src/main/compose/ozone-mr/test.sh index 6146dab871e7..3a18d4df2860 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-mr/test.sh +++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/test.sh @@ -1,3 +1,4 @@ +#!/usr/bin/env bash # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -15,29 +16,22 @@ # limitations under the License. SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd ) ALL_RESULT_DIR="$SCRIPT_DIR/result" +mkdir -p "$ALL_RESULT_DIR" +rm "$ALL_RESULT_DIR/*" || true source "$SCRIPT_DIR/../testlib.sh" tests=$(find_tests) +cd "$SCRIPT_DIR" RESULT=0 # shellcheck disable=SC2044 for t in ${tests}; do d="$(dirname "${t}")" - echo "Executing test in ${d}" - #required to read the .env file from the right location - cd "${d}" || continue - ./test.sh - ret=$? - if [[ $ret -ne 0 ]]; then - RESULT=1 - echo "ERROR: Test execution of ${d} is FAILED!!!!" + if ! run_test_script "${d}"; then + RESULT=1 fi - cd "$SCRIPT_DIR" - RESULT_DIR="${d}/result" - TEST_DIR_NAME=$(basename ${d}) - rebot -N $TEST_DIR_NAME -o "$ALL_RESULT_DIR"/$TEST_DIR_NAME.xml "$RESULT_DIR"/"*.xml" - cp "$RESULT_DIR"/docker-*.log "$ALL_RESULT_DIR"/ - cp "$RESULT_DIR"/*.out* "$ALL_RESULT_DIR"/ || true + + copy_results "${d}" "${ALL_RESULT_DIR}" done diff --git a/hadoop-ozone/dist/src/main/compose/test-all.sh b/hadoop-ozone/dist/src/main/compose/test-all.sh index 1fdc0ffcbb83..45a3c52d52f1 100755 --- a/hadoop-ozone/dist/src/main/compose/test-all.sh +++ b/hadoop-ozone/dist/src/main/compose/test-all.sh @@ -34,29 +34,18 @@ if [ "$OZONE_WITH_COVERAGE" ]; then fi tests=$(find_tests) +cd "$SCRIPT_DIR" RESULT=0 # shellcheck disable=SC2044 for t in ${tests}; do d="$(dirname "${t}")" - echo "Executing test in ${d}" - #required to read the .env file from the right location - cd "${d}" || continue - set +e - ./test.sh - ret=$? - set -e - if [[ $ret -ne 0 ]]; then - RESULT=1 - echo "ERROR: Test execution of ${d} is FAILED!!!!" + if ! run_test_script "${d}"; then + RESULT=1 fi - cd "$SCRIPT_DIR" - RESULT_DIR="${d}/result" - TEST_DIR_NAME=$(basename ${d}) - rebot --nostatusrc -N $TEST_DIR_NAME -o "$ALL_RESULT_DIR"/$TEST_DIR_NAME.xml "$RESULT_DIR"/"*.xml" - cp "$RESULT_DIR"/docker-*.log "$ALL_RESULT_DIR"/ - cp "$RESULT_DIR"/*.out* "$ALL_RESULT_DIR"/ || true + + copy_results "${d}" "${ALL_RESULT_DIR}" done rebot --nostatusrc -N acceptance -d "$ALL_RESULT_DIR" "$ALL_RESULT_DIR"/*.xml diff --git a/hadoop-ozone/dist/src/main/compose/testlib.sh b/hadoop-ozone/dist/src/main/compose/testlib.sh index 228572fe2c73..db449b90ad9c 100755 --- a/hadoop-ozone/dist/src/main/compose/testlib.sh +++ b/hadoop-ozone/dist/src/main/compose/testlib.sh @@ -247,3 +247,39 @@ generate_report(){ exit 1 fi } + +## @description Copy results of a single test environment to the "all tests" dir. +copy_results() { + local test_dir="$1" + local all_result_dir="$2" + + local result_dir="${test_dir}/result" + local test_dir_name=$(basename ${test_dir}) + if [[ -n "$(find "${result_dir}" -name "*.xml")" ]]; then + rebot --nostatusrc -N "${test_dir_name}" -o "${all_result_dir}/${test_dir_name}.xml" "${result_dir}/*.xml" + fi + + cp "${result_dir}"/docker-*.log "${all_result_dir}"/ + if [[ -n "$(find "${result_dir}" -name "*.out")" ]]; then + cp "${result_dir}"/*.out* "${all_result_dir}"/ + fi +} + +run_test_script() { + local d="$1" + + echo "Executing test in ${d}" + + #required to read the .env file from the right location + cd "${d}" || return + + ret=0 + if ! ./test.sh; then + ret=1 + echo "ERROR: Test execution of ${d} is FAILED!!!!" + fi + + cd - > /dev/null + + return ${ret} +} From f892094183a3b092cb46668c95c61bd4c74860c5 Mon Sep 17 00:00:00 2001 From: runzhiwang <51938049+runzhiwang@users.noreply.github.com> Date: Wed, 2 Sep 2020 10:15:01 +0800 Subject: [PATCH 157/165] HDDS-4176. Fix failed UT: test2WayCommitForTimeoutException (#1370) --- .../apache/hadoop/ozone/container/ContainerTestHelper.java | 5 +++++ .../apache/hadoop/ozone/client/rpc/TestWatchForCommit.java | 5 ++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java index 45fe38b937ae..b8ebaecf282d 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java @@ -583,6 +583,11 @@ public static String getFixedLengthString(String string, int length) { private static RaftServerImpl getRaftServerImpl(HddsDatanodeService dn, Pipeline pipeline) throws Exception { + if (!pipeline.getNodes().contains(dn.getDatanodeDetails())) { + throw new IllegalArgumentException("Pipeline:" + pipeline.getId() + + " not exist in datanode:" + dn.getDatanodeDetails().getUuid()); + } + XceiverServerSpi server = dn.getDatanodeStateMachine(). getContainer().getWriteChannel(); RaftServerProxy proxy = diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java index df08713ddd8a..c918b9b80da9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; import org.apache.hadoop.hdds.scm.*; @@ -297,9 +298,11 @@ public void test2WayCommitForTimeoutException() throws Exception { xceiverClient.getPipeline())); reply.getResponse().get(); Assert.assertEquals(3, ratisClient.getCommitInfoMap().size()); + List nodesInPipeline = pipeline.getNodes(); for (HddsDatanodeService dn : cluster.getHddsDatanodes()) { // shutdown the ratis follower - if (ContainerTestHelper.isRatisFollower(dn, pipeline)) { + if (nodesInPipeline.contains(dn.getDatanodeDetails()) + && ContainerTestHelper.isRatisFollower(dn, pipeline)) { cluster.shutdownHddsDatanode(dn.getDatanodeDetails()); break; } From 24aa0dfe5c0072638fa4546efc61df88718ba5c4 Mon Sep 17 00:00:00 2001 From: Stephen O'Donnell Date: Wed, 2 Sep 2020 07:10:54 +0100 Subject: [PATCH 158/165] HDDS-4131. Container report should update container key count and bytes used if they differ in SCM (#1339) --- .../AbstractContainerReportHandler.java | 47 ++++- .../scm/container/ReplicationManager.java | 34 ---- .../container/TestContainerReportHandler.java | 164 +++++++++++++++++- .../commandhandler/TestBlockDeletion.java | 5 +- 4 files changed, 204 insertions(+), 46 deletions(-) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java index 29f0083c0b67..bc83b8da6b17 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java @@ -20,6 +20,7 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; @@ -28,6 +29,9 @@ import org.slf4j.Logger; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Set; import java.util.UUID; import java.util.function.Supplier; @@ -76,7 +80,7 @@ protected void processContainerReplica(final DatanodeDetails datanodeDetails, // Synchronized block should be replaced by container lock, // once we have introduced lock inside ContainerInfo. synchronized (containerManager.getContainer(containerId)) { - updateContainerStats(containerId, replicaProto); + updateContainerStats(datanodeDetails, containerId, replicaProto); updateContainerState(datanodeDetails, containerId, replicaProto); updateContainerReplica(datanodeDetails, containerId, replicaProto); } @@ -90,7 +94,8 @@ protected void processContainerReplica(final DatanodeDetails datanodeDetails, * @param replicaProto Container Replica information * @throws ContainerNotFoundException If the container is not present */ - private void updateContainerStats(final ContainerID containerId, + private void updateContainerStats(final DatanodeDetails datanodeDetails, + final ContainerID containerId, final ContainerReplicaProto replicaProto) throws ContainerNotFoundException { @@ -103,14 +108,44 @@ private void updateContainerStats(final ContainerID containerId, containerInfo.updateSequenceId( replicaProto.getBlockCommitSequenceId()); } + List otherReplicas = + getOtherReplicas(containerId, datanodeDetails); + long usedBytes = replicaProto.getUsed(); + long keyCount = replicaProto.getKeyCount(); + for (ContainerReplica r : otherReplicas) { + // Open containers are generally growing in key count and size, the + // overall size should be the min of all reported replicas. + if (containerInfo.getState().equals(HddsProtos.LifeCycleState.OPEN)) { + usedBytes = Math.min(usedBytes, r.getBytesUsed()); + keyCount = Math.min(keyCount, r.getKeyCount()); + } else { + // Containers which are not open can only shrink in size, so use the + // largest values reported. + usedBytes = Math.max(usedBytes, r.getBytesUsed()); + keyCount = Math.max(keyCount, r.getKeyCount()); + } + } - if (containerInfo.getUsedBytes() < replicaProto.getUsed()) { - containerInfo.setUsedBytes(replicaProto.getUsed()); + if (containerInfo.getUsedBytes() != usedBytes) { + containerInfo.setUsedBytes(usedBytes); + } + if (containerInfo.getNumberOfKeys() != keyCount) { + containerInfo.setNumberOfKeys(keyCount); } - if (containerInfo.getNumberOfKeys() < replicaProto.getKeyCount()) { - containerInfo.setNumberOfKeys(replicaProto.getKeyCount()); + } + } + + private List getOtherReplicas(ContainerID containerId, + DatanodeDetails exclude) throws ContainerNotFoundException { + List filteredReplicas = new ArrayList<>(); + Set replicas + = containerManager.getContainerReplicas(containerId); + for (ContainerReplica r : replicas) { + if (!r.getDatanodeDetails().equals(exclude)) { + filteredReplicas.add(r); } } + return filteredReplicas; } /** diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java index 9914f8950e00..3a9ad1bc8acb 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java @@ -312,14 +312,6 @@ private void processContainer(ContainerID id) { action -> replicas.stream() .noneMatch(r -> r.getDatanodeDetails().equals(action.datanode))); - /* - * If the container is in CLOSED state, check and update it's key count - * and bytes used statistics if needed. - */ - if (state == LifeCycleState.CLOSED) { - checkAndUpdateContainerInfo(container, replicas); - } - /* * We don't have to take any action if the container is healthy. * @@ -773,32 +765,6 @@ private void handleUnstableContainer(final ContainerInfo container, } - /** - * Check and update Container key count and used bytes based on it's replica's - * data. - */ - private void checkAndUpdateContainerInfo(final ContainerInfo container, - final Set replicas) { - // check container key count and bytes used - long maxUsedBytes = 0; - long maxKeyCount = 0; - ContainerReplica[] rps = replicas.toArray(new ContainerReplica[0]); - for (int i = 0; i < rps.length; i++) { - maxUsedBytes = Math.max(maxUsedBytes, rps[i].getBytesUsed()); - maxKeyCount = Math.max(maxKeyCount, rps[i].getKeyCount()); - } - if (maxKeyCount < container.getNumberOfKeys()) { - LOG.debug("Container {} key count changed from {} to {}", - container.containerID(), container.getNumberOfKeys(), maxKeyCount); - container.setNumberOfKeys(maxKeyCount); - } - if (maxUsedBytes < container.getUsedBytes()) { - LOG.debug("Container {} used bytes changed from {} to {}", - container.containerID(), container.getUsedBytes(), maxUsedBytes); - container.setUsedBytes(maxUsedBytes); - } - } - /** * Sends close container command for the given container to the given * datanode. diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java index c7ec835e55b9..9f308fa9738e 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java @@ -39,11 +39,13 @@ import org.mockito.Mockito; import java.io.IOException; +import java.util.HashSet; import java.util.Iterator; import java.util.Set; import java.util.stream.Collectors; import java.util.stream.Stream; +import static junit.framework.TestCase.assertEquals; import static org.apache.hadoop.hdds.scm.TestUtils.getReplicas; import static org.apache.hadoop.hdds.scm.TestUtils.getContainer; @@ -483,9 +485,167 @@ public void testQuasiClosedToClosed() Assert.assertEquals(LifeCycleState.CLOSED, containerOne.getState()); } + @Test + public void openContainerKeyAndBytesUsedUpdatedToMinimumOfAllReplicas() + throws SCMException { + final ContainerReportHandler reportHandler = new ContainerReportHandler( + nodeManager, containerManager); + final Iterator nodeIterator = nodeManager.getNodes( + NodeState.HEALTHY).iterator(); + + final DatanodeDetails datanodeOne = nodeIterator.next(); + final DatanodeDetails datanodeTwo = nodeIterator.next(); + final DatanodeDetails datanodeThree = nodeIterator.next(); + + final ContainerReplicaProto.State replicaState + = ContainerReplicaProto.State.OPEN; + final ContainerInfo containerOne = getContainer(LifeCycleState.OPEN); + + final Set containerIDSet = new HashSet<>(); + containerIDSet.add(containerOne.containerID()); + + containerStateManager.loadContainer(containerOne); + // Container loaded, no replicas reported from DNs. Expect zeros for + // usage values. + assertEquals(0L, containerOne.getUsedBytes()); + assertEquals(0L, containerOne.getNumberOfKeys()); + + reportHandler.onMessage(getContainerReportFromDatanode( + containerOne.containerID(), replicaState, + datanodeOne, 50L, 60L), publisher); + + // Single replica reported - ensure values are updated + assertEquals(50L, containerOne.getUsedBytes()); + assertEquals(60L, containerOne.getNumberOfKeys()); + + reportHandler.onMessage(getContainerReportFromDatanode( + containerOne.containerID(), replicaState, + datanodeTwo, 50L, 60L), publisher); + reportHandler.onMessage(getContainerReportFromDatanode( + containerOne.containerID(), replicaState, + datanodeThree, 50L, 60L), publisher); + + // All 3 DNs are reporting the same values. Counts should be as expected. + assertEquals(50L, containerOne.getUsedBytes()); + assertEquals(60L, containerOne.getNumberOfKeys()); + + // Now each DN reports a different lesser value. Counts should be the min + // reported. + reportHandler.onMessage(getContainerReportFromDatanode( + containerOne.containerID(), replicaState, + datanodeOne, 1L, 10L), publisher); + reportHandler.onMessage(getContainerReportFromDatanode( + containerOne.containerID(), replicaState, + datanodeTwo, 2L, 11L), publisher); + reportHandler.onMessage(getContainerReportFromDatanode( + containerOne.containerID(), replicaState, + datanodeThree, 3L, 12L), publisher); + + // All 3 DNs are reporting different values. The actual value should be the + // minimum. + assertEquals(1L, containerOne.getUsedBytes()); + assertEquals(10L, containerOne.getNumberOfKeys()); + + // Have the lowest value report a higher value and ensure the new value + // is the minimum + reportHandler.onMessage(getContainerReportFromDatanode( + containerOne.containerID(), replicaState, + datanodeOne, 3L, 12L), publisher); + + assertEquals(2L, containerOne.getUsedBytes()); + assertEquals(11L, containerOne.getNumberOfKeys()); + } + + @Test + public void notOpenContainerKeyAndBytesUsedUpdatedToMaximumOfAllReplicas() + throws SCMException { + final ContainerReportHandler reportHandler = new ContainerReportHandler( + nodeManager, containerManager); + final Iterator nodeIterator = nodeManager.getNodes( + NodeState.HEALTHY).iterator(); + + final DatanodeDetails datanodeOne = nodeIterator.next(); + final DatanodeDetails datanodeTwo = nodeIterator.next(); + final DatanodeDetails datanodeThree = nodeIterator.next(); + + final ContainerReplicaProto.State replicaState + = ContainerReplicaProto.State.CLOSED; + final ContainerInfo containerOne = getContainer(LifeCycleState.CLOSED); + + final Set containerIDSet = new HashSet<>(); + containerIDSet.add(containerOne.containerID()); + + containerStateManager.loadContainer(containerOne); + // Container loaded, no replicas reported from DNs. Expect zeros for + // usage values. + assertEquals(0L, containerOne.getUsedBytes()); + assertEquals(0L, containerOne.getNumberOfKeys()); + + reportHandler.onMessage(getContainerReportFromDatanode( + containerOne.containerID(), replicaState, + datanodeOne, 50L, 60L), publisher); + + // Single replica reported - ensure values are updated + assertEquals(50L, containerOne.getUsedBytes()); + assertEquals(60L, containerOne.getNumberOfKeys()); + + reportHandler.onMessage(getContainerReportFromDatanode( + containerOne.containerID(), replicaState, + datanodeTwo, 50L, 60L), publisher); + reportHandler.onMessage(getContainerReportFromDatanode( + containerOne.containerID(), replicaState, + datanodeThree, 50L, 60L), publisher); + + // All 3 DNs are reporting the same values. Counts should be as expected. + assertEquals(50L, containerOne.getUsedBytes()); + assertEquals(60L, containerOne.getNumberOfKeys()); + + // Now each DN reports a different lesser value. Counts should be the max + // reported. + reportHandler.onMessage(getContainerReportFromDatanode( + containerOne.containerID(), replicaState, + datanodeOne, 1L, 10L), publisher); + reportHandler.onMessage(getContainerReportFromDatanode( + containerOne.containerID(), replicaState, + datanodeTwo, 2L, 11L), publisher); + reportHandler.onMessage(getContainerReportFromDatanode( + containerOne.containerID(), replicaState, + datanodeThree, 3L, 12L), publisher); + + // All 3 DNs are reporting different values. The actual value should be the + // maximum. + assertEquals(3L, containerOne.getUsedBytes()); + assertEquals(12L, containerOne.getNumberOfKeys()); + + // Have the highest value report a lower value and ensure the new value + // is the new maximumu + reportHandler.onMessage(getContainerReportFromDatanode( + containerOne.containerID(), replicaState, + datanodeThree, 1L, 10L), publisher); + + assertEquals(2L, containerOne.getUsedBytes()); + assertEquals(11L, containerOne.getNumberOfKeys()); + } + + private ContainerReportFromDatanode getContainerReportFromDatanode( + ContainerID containerId, ContainerReplicaProto.State state, + DatanodeDetails dn, long bytesUsed, long keyCount) { + ContainerReportsProto containerReport = getContainerReportsProto( + containerId, state, dn.getUuidString(), bytesUsed, keyCount); + + return new ContainerReportFromDatanode(dn, containerReport); + } + private static ContainerReportsProto getContainerReportsProto( final ContainerID containerId, final ContainerReplicaProto.State state, final String originNodeId) { + return getContainerReportsProto(containerId, state, originNodeId, + 2000000000L, 100000000L); + } + + private static ContainerReportsProto getContainerReportsProto( + final ContainerID containerId, final ContainerReplicaProto.State state, + final String originNodeId, final long usedBytes, final long keyCount) { final ContainerReportsProto.Builder crBuilder = ContainerReportsProto.newBuilder(); final ContainerReplicaProto replicaProto = @@ -495,8 +655,8 @@ private static ContainerReportsProto getContainerReportsProto( .setOriginNodeId(originNodeId) .setFinalhash("e16cc9d6024365750ed8dbd194ea46d2") .setSize(5368709120L) - .setUsed(2000000000L) - .setKeyCount(100000000L) + .setUsed(usedBytes) + .setKeyCount(keyCount) .setReadCount(100000000L) .setWriteCount(100000000L) .setReadBytes(2000000000L) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java index e451c07f56c3..aeb5bc7fbd5f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java @@ -270,11 +270,8 @@ public void testContainerStatistics() throws Exception { }); om.deleteKey(keyArgs); - // Want for blocks to be deleted + // Wait for blocks to be deleted and container reports to be processed Thread.sleep(5000); - scm.getReplicationManager().processContainersNow(); - // Wait for container statistics change - Thread.sleep(1000); containerInfos = scm.getContainerManager().getContainers(); containerInfos.stream().forEach(container -> { Assert.assertEquals(0, container.getUsedBytes()); From 612c1cefcbe5f0bcf45bd003bdf1ad766766f802 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Wed, 2 Sep 2020 16:05:15 +0200 Subject: [PATCH 159/165] HDDS-4165. GitHub Actions cache does not work outside of workspace (#1364) --- .github/workflows/post-commit.yml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/workflows/post-commit.yml b/.github/workflows/post-commit.yml index 4a4af1a91af3..20d2a2a5d1f5 100644 --- a/.github/workflows/post-commit.yml +++ b/.github/workflows/post-commit.yml @@ -164,6 +164,8 @@ jobs: - misc fail-fast: false steps: + - name: Checkout project + uses: actions/checkout@v2 - name: Cache for maven dependencies uses: actions/cache@v2 with: @@ -290,6 +292,8 @@ jobs: kubernetes: runs-on: ubuntu-18.04 steps: + - name: Checkout project + uses: actions/checkout@v2 - name: Cache for maven dependencies uses: actions/cache@v2 with: @@ -351,4 +355,4 @@ jobs: #Never cache local artifacts rm -rf ~/.m2/repository/org/apache/hadoop/hdds rm -rf ~/.m2/repository/org/apache/hadoop/ozone - if: always() \ No newline at end of file + if: always() From 5b06fec2146e61fe10da19d72d5257f4aa53c0ed Mon Sep 17 00:00:00 2001 From: Huang-Mu Zheng Date: Thu, 3 Sep 2020 00:42:31 +0800 Subject: [PATCH 160/165] HDDS-3804. Recon start fails with SQL exception with MySQL DB. (#1377) --- .../ozone/recon/schema/ContainerSchemaDefinition.java | 4 ++-- .../ozone/recon/schema/ReconTaskSchemaDefinition.java | 2 +- .../ozone/recon/schema/StatsSchemaDefinition.java | 2 +- .../recon/schema/UtilizationSchemaDefinition.java | 10 +++++----- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ContainerSchemaDefinition.java b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ContainerSchemaDefinition.java index 5696ab3e01bc..1be715dc7d22 100644 --- a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ContainerSchemaDefinition.java +++ b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ContainerSchemaDefinition.java @@ -81,8 +81,8 @@ public void initializeSchema() throws SQLException { */ private void createContainerHistoryTable() { dslContext.createTableIfNotExists(CONTAINER_HISTORY_TABLE_NAME) - .column(CONTAINER_ID, SQLDataType.BIGINT) - .column("datanode_host", SQLDataType.VARCHAR(1024)) + .column(CONTAINER_ID, SQLDataType.BIGINT.nullable(false)) + .column("datanode_host", SQLDataType.VARCHAR(766).nullable(false)) .column("first_report_timestamp", SQLDataType.BIGINT) .column("last_report_timestamp", SQLDataType.BIGINT) .constraint(DSL.constraint("pk_container_id_datanode_host") diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ReconTaskSchemaDefinition.java b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ReconTaskSchemaDefinition.java index 45fc1ba0d73b..72e27024feea 100644 --- a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ReconTaskSchemaDefinition.java +++ b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ReconTaskSchemaDefinition.java @@ -61,7 +61,7 @@ public void initializeSchema() throws SQLException { */ private void createReconTaskStatusTable(Connection conn) { DSL.using(conn).createTableIfNotExists(RECON_TASK_STATUS_TABLE_NAME) - .column("task_name", SQLDataType.VARCHAR(1024)) + .column("task_name", SQLDataType.VARCHAR(768).nullable(false)) .column("last_updated_timestamp", SQLDataType.BIGINT) .column("last_updated_seq_number", SQLDataType.BIGINT) .constraint(DSL.constraint("pk_task_name") diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/StatsSchemaDefinition.java b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/StatsSchemaDefinition.java index 55f3b93c6ba4..394c9de8df59 100644 --- a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/StatsSchemaDefinition.java +++ b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/StatsSchemaDefinition.java @@ -59,7 +59,7 @@ public void initializeSchema() throws SQLException { */ private void createGlobalStatsTable() { dslContext.createTableIfNotExists(GLOBAL_STATS_TABLE_NAME) - .column("key", SQLDataType.VARCHAR(255)) + .column("key", SQLDataType.VARCHAR(255).nullable(false)) .column("value", SQLDataType.BIGINT) .column("last_updated_timestamp", SQLDataType.TIMESTAMP) .constraint(DSL.constraint("pk_key") diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/UtilizationSchemaDefinition.java b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/UtilizationSchemaDefinition.java index b2b2881b6c7a..193ee758fecc 100644 --- a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/UtilizationSchemaDefinition.java +++ b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/UtilizationSchemaDefinition.java @@ -72,8 +72,8 @@ public void initializeSchema() throws SQLException { private void createClusterGrowthTable() { dslContext.createTableIfNotExists(CLUSTER_GROWTH_DAILY_TABLE_NAME) - .column("timestamp", SQLDataType.TIMESTAMP) - .column("datanode_id", SQLDataType.INTEGER) + .column("timestamp", SQLDataType.TIMESTAMP.nullable(false)) + .column("datanode_id", SQLDataType.INTEGER.nullable(false)) .column("datanode_host", SQLDataType.VARCHAR(1024)) .column("rack_id", SQLDataType.VARCHAR(1024)) .column("available_size", SQLDataType.BIGINT) @@ -87,9 +87,9 @@ private void createClusterGrowthTable() { private void createFileSizeCountTable() { dslContext.createTableIfNotExists(FILE_COUNT_BY_SIZE_TABLE_NAME) - .column("volume", SQLDataType.VARCHAR(64)) - .column("bucket", SQLDataType.VARCHAR(64)) - .column("file_size", SQLDataType.BIGINT) + .column("volume", SQLDataType.VARCHAR(64).nullable(false)) + .column("bucket", SQLDataType.VARCHAR(64).nullable(false)) + .column("file_size", SQLDataType.BIGINT.nullable(false)) .column("count", SQLDataType.BIGINT) .constraint(DSL.constraint("pk_volume_bucket_file_size") .primaryKey("volume", "bucket", "file_size")) From eb2a78b4613132f00ab5f2f733a694fcf6c09ca7 Mon Sep 17 00:00:00 2001 From: Neo Yang Date: Thu, 3 Sep 2020 11:30:17 +0800 Subject: [PATCH 161/165] HDDS-4169. Fix some minor errors in StorageContainerManager.md (#1367) --- hadoop-hdds/docs/content/concept/StorageContainerManager.md | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/hadoop-hdds/docs/content/concept/StorageContainerManager.md b/hadoop-hdds/docs/content/concept/StorageContainerManager.md index 68953ced24d3..c0a7c0bd9ee8 100644 --- a/hadoop-hdds/docs/content/concept/StorageContainerManager.md +++ b/hadoop-hdds/docs/content/concept/StorageContainerManager.md @@ -46,7 +46,7 @@ replicas. If there is a loss of data node or a disk, SCM detects it and instructs data nodes make copies of the missing blocks to ensure high availability. - 3. **SCM's Ceritificate authority** is in + 3. **SCM's Certificate Authority** is in charge of issuing identity certificates for each and every service in the cluster. This certificate infrastructure makes it easy to enable mTLS at network layer and the block @@ -90,9 +90,6 @@ The following data is persisted in Storage Container Manager side in a specific ## Notable configuration - -## Notable configuration - key | default | description | ----|-------------|-------- ozone.scm.container.size | 5GB | Default container size used by Ozone From 5ebb9a34c97c2d301db496cf4a1da1720ddb35b4 Mon Sep 17 00:00:00 2001 From: lamber-ken <2217232293@qq.com> Date: Thu, 3 Sep 2020 11:32:17 +0800 Subject: [PATCH 162/165] HDDS-4200. Fix missing right bracket in HA doc (#1380) Co-authored-by: lamberken --- hadoop-hdds/docs/content/feature/HA.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdds/docs/content/feature/HA.md b/hadoop-hdds/docs/content/feature/HA.md index 116cbb72be4b..3e683f94f378 100644 --- a/hadoop-hdds/docs/content/feature/HA.md +++ b/hadoop-hdds/docs/content/feature/HA.md @@ -105,7 +105,7 @@ Raft can guarantee the replication of any request if the request is persisted to RocksDB instaces are updated by a background thread with batching transactions (so called "double buffer" as when one of the buffers is used to commit the data the other one collects all the new requests for the next commit.) To make all data available for the next request even if the background process is not yet wrote them the key data is cached in the memory. -![Double buffer](HA-OM-doublebuffer.png +![Double buffer](HA-OM-doublebuffer.png) The details of this approach discussed in a separated [design doc]({{< ref "design/omha.md" >}}) but it's integral part of the OM HA design. From e582d8afcf4d949f0cb79d7157b1113e83079475 Mon Sep 17 00:00:00 2001 From: llemec Date: Thu, 3 Sep 2020 05:42:51 +0200 Subject: [PATCH 163/165] HDDS-1889. Add support for verifying multiline log entry (#1308) --- hadoop-hdds/common/pom.xml | 5 ++ .../ozone/audit/TestOzoneAuditLogger.java | 62 +++++++++++++++++-- 2 files changed, 61 insertions(+), 6 deletions(-) diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml index 5784c68ec74e..cc171f163776 100644 --- a/hadoop-hdds/common/pom.xml +++ b/hadoop-hdds/common/pom.xml @@ -183,6 +183,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-hdds-interface-client + + org.hamcrest + hamcrest-all + test + diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java index 55ba6ab72fa7..30cdc62c5dc3 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java @@ -25,13 +25,20 @@ import java.io.File; import java.io.IOException; +import java.util.Arrays; import java.util.List; import java.util.Map; +import java.util.stream.Collectors; import static org.apache.hadoop.ozone.audit.AuditEventStatus.FAILURE; import static org.apache.hadoop.ozone.audit.AuditEventStatus.SUCCESS; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.core.StringContains.containsString; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import org.hamcrest.Matcher; +import org.hamcrest.collection.IsIterableContainingInOrder; + /** * Test Ozone Audit Logger. @@ -143,7 +150,35 @@ public void notLogReadEvents() throws IOException { verifyNoLog(); } - private void verifyLog(String expected) throws IOException { + /** + * Test to verify if multiline entries can be checked. + */ + + @Test + public void messageIncludesMultilineException() throws IOException { + String exceptionMessage = "Dummy exception message"; + TestException testException = new TestException(exceptionMessage); + AuditMessage exceptionAuditMessage = + new AuditMessage.Builder() + .setUser(USER) + .atIp(IP_ADDRESS) + .forOperation(DummyAction.CREATE_VOLUME) + .withParams(PARAMS) + .withResult(FAILURE) + .withException(testException).build(); + AUDIT.logWriteFailure(exceptionAuditMessage); + verifyLog( + "ERROR | OMAudit | user=john | " + + "ip=192.168.0.1 | op=CREATE_VOLUME " + + "{key1=value1, key2=value2} | ret=FAILURE", + "org.apache.hadoop.ozone.audit." + + "TestOzoneAuditLogger$TestException: Dummy exception message", + "at org.apache.hadoop.ozone.audit.TestOzoneAuditLogger" + + ".messageIncludesMultilineException" + + "(TestOzoneAuditLogger.java"); + } + + private void verifyLog(String... expectedStrings) throws IOException { File file = new File("audit.log"); List lines = FileUtils.readLines(file, (String)null); final int retry = 5; @@ -158,11 +193,11 @@ private void verifyLog(String expected) throws IOException { } i++; } - - // When log entry is expected, the log file will contain one line and - // that must be equal to the expected string - assertTrue(lines.size() != 0); - assertTrue(expected.equalsIgnoreCase(lines.get(0))); + //check if every expected string can be found in the log entry + assertThat( + lines.subList(0, expectedStrings.length), + containsInOrder(expectedStrings) + ); //empty the file lines.clear(); FileUtils.writeLines(file, lines, false); @@ -174,4 +209,19 @@ private void verifyNoLog() throws IOException { // When no log entry is expected, the log file must be empty assertEquals(0, lines.size()); } + + private class TestException extends Exception{ + TestException(String message) { + super(message); + } + } + + private Matcher> containsInOrder( + String[] expectedStrings) { + return IsIterableContainingInOrder.contains( + Arrays.stream(expectedStrings) + .map(str -> containsString(str)) + .collect(Collectors.toList()) + ); + } } From c15866fafc87dd560de7b2e1c266d4250ec51955 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Thu, 3 Sep 2020 06:50:00 +0200 Subject: [PATCH 164/165] HDDS-3762. Intermittent failure in TestDeleteWithSlowFollower (#1376) --- .../statemachine/commandhandler/DeleteBlocksCommandHandler.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java index f222daea4408..bd5e7b7c467c 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java @@ -95,7 +95,6 @@ public void handle(SCMCommand command, OzoneContainer container, return; } LOG.debug("Processing block deletion command."); - invocationCount++; // move blocks to deleting state. // this is a metadata update, the actual deletion happens in another @@ -177,6 +176,7 @@ public void handle(SCMCommand command, OzoneContainer container, updateCommandStatus(context, command, statusUpdater, LOG); long endTime = Time.monotonicNow(); totalTime += endTime - startTime; + invocationCount++; } } From fa6a55d54e2cc84163024d3a13663573c2876a14 Mon Sep 17 00:00:00 2001 From: Rakesh Radhakrishnan Date: Fri, 10 Jul 2020 15:53:54 +0530 Subject: [PATCH 165/165] HDDS-3947: Sort DNs for client when the key is a file for #getFileStatus #listStatus APIs --- .../main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java | 3 --- 1 file changed, 3 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index c32e80724d9f..de395b43c5ba 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -2131,9 +2131,6 @@ public List listStatus(OmKeyArgs args, boolean recursive, // No need to check if a key is deleted or not here, this is handled // when adding entries to cacheKeyMap from DB. OzoneFileStatus fileStatus = entry.getValue(); - if (fileStatus.isFile()) { - refreshPipeline(fileStatus.getKeyInfo()); - } fileStatusList.add(fileStatus); countEntries++; if (countEntries >= numEntries) {