Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -322,6 +322,7 @@ public static boolean isReadOnly(
case RecoverLease:
case SetTimes:
case AbortExpiredMultiPartUploads:
case SetSnapshotProperty:
case UnknownCommand:
return false;
case EchoRPC:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ enum Type {
PrintCompactionLogDag = 125;
ListKeysLight = 126;
AbortExpiredMultiPartUploads = 127;

SetSnapshotProperty = 128;
}

enum SafeMode {
Expand Down Expand Up @@ -277,6 +277,7 @@ message OMRequest {
optional PrintCompactionLogDagRequest PrintCompactionLogDagRequest = 125;

optional MultipartUploadsExpiredAbortRequest multipartUploadsExpiredAbortRequest = 126;
optional SetSnapshotPropertyRequest SetSnapshotPropertyRequest = 127;
}

message OMResponse {
Expand Down Expand Up @@ -395,6 +396,7 @@ message OMResponse {
optional PrintCompactionLogDagResponse PrintCompactionLogDagResponse = 125;
optional ListKeysLightResponse listKeysLightResponse = 126;
optional MultipartUploadsExpiredAbortResponse multipartUploadsExpiredAbortResponse = 127;
optional SetSnapshotPropertyResponse SetSnapshotPropertyResponse = 128;
}

enum Status {
Expand Down Expand Up @@ -1860,6 +1862,16 @@ message SnapshotPurgeRequest {
repeated string updatedSnapshotDBKey = 2;
}

message SetSnapshotPropertyRequest {
optional SnapshotProperty snapshotProperty = 1;
}

message SnapshotProperty {
optional string snapshotKey = 1;
optional uint64 exclusiveSize = 2;
optional uint64 exclusiveReplicatedSize = 3;
}

message DeleteTenantRequest {
optional string tenantId = 1;
}
Expand Down Expand Up @@ -1945,6 +1957,10 @@ message SnapshotPurgeResponse {

}

message SetSnapshotPropertyResponse {

}

message SnapshotDiffReportProto {
optional string volumeName = 1;
optional string bucketName = 2;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,7 @@
import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotDeleteRequest;
import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotMoveDeletedKeysRequest;
import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotPurgeRequest;
import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotSetPropertyRequest;
import org.apache.hadoop.ozone.om.request.upgrade.OMCancelPrepareRequest;
import org.apache.hadoop.ozone.om.request.upgrade.OMFinalizeUpgradeRequest;
import org.apache.hadoop.ozone.om.request.upgrade.OMPrepareRequest;
Expand Down Expand Up @@ -226,6 +227,8 @@ public static OMClientRequest createClientRequest(OMRequest omRequest,
return new OMSnapshotMoveDeletedKeysRequest(omRequest);
case SnapshotPurge:
return new OMSnapshotPurgeRequest(omRequest);
case SetSnapshotProperty:
return new OMSnapshotSetPropertyRequest(omRequest);
case DeleteOpenKeys:
BucketLayout bktLayout = BucketLayout.DEFAULT;
if (omRequest.getDeleteOpenKeysRequest().hasBucketLayout()) {
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.om.request.snapshot;

import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.SnapshotInfo;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.apache.hadoop.ozone.om.request.OMClientRequest;
import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotSetPropertyResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotProperty;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;

import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_SNAPSHOT_ERROR;

/**
* Updates the exclusive size of the snapshot.
*/
public class OMSnapshotSetPropertyRequest extends OMClientRequest {
private static final Logger LOG =
LoggerFactory.getLogger(OMSnapshotSetPropertyRequest.class);

public OMSnapshotSetPropertyRequest(OMRequest omRequest) {
super(omRequest);
}

@Override
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {

OMClientResponse omClientResponse = null;
OMMetadataManager metadataManager = ozoneManager.getMetadataManager();

OzoneManagerProtocolProtos.OMResponse.Builder omResponse =
OmResponseUtil.getOMResponseBuilder(getOmRequest());
OzoneManagerProtocolProtos.SetSnapshotPropertyRequest
setSnapshotPropertyRequest = getOmRequest()
.getSetSnapshotPropertyRequest();

SnapshotProperty snapshotProperty = setSnapshotPropertyRequest
.getSnapshotProperty();
SnapshotInfo updatedSnapInfo = null;

try {
String snapshotKey = snapshotProperty.getSnapshotKey();
long exclusiveSize = snapshotProperty.getExclusiveSize();
long exclusiveReplicatedSize = snapshotProperty
.getExclusiveReplicatedSize();
updatedSnapInfo = metadataManager.getSnapshotInfoTable()
.get(snapshotKey);

if (updatedSnapInfo == null) {
LOG.error("SnapshotInfo for Snapshot: {} is not found", snapshotKey);
throw new OMException("SnapshotInfo for Snapshot: " + snapshotKey +
" is not found", INVALID_SNAPSHOT_ERROR);
}

// Set Exclusive size.
updatedSnapInfo.setExclusiveSize(exclusiveSize);
updatedSnapInfo.setExclusiveReplicatedSize(exclusiveReplicatedSize);
// Update Table Cache
metadataManager.getSnapshotInfoTable().addCacheEntry(
new CacheKey<>(snapshotKey),
CacheValue.get(trxnLogIndex, updatedSnapInfo));
omClientResponse = new OMSnapshotSetPropertyResponse(
omResponse.build(), updatedSnapInfo);
} catch (IOException ex) {
omClientResponse = new OMSnapshotSetPropertyResponse(
createErrorOMResponse(omResponse, ex));
} finally {
addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
omDoubleBufferHelper);
}
return omClientResponse;
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.hadoop.ozone.om.response.snapshot;

import org.apache.hadoop.hdds.utils.db.BatchOperation;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.helpers.SnapshotInfo;
import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;

import javax.annotation.Nonnull;
import java.io.IOException;

import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.SNAPSHOT_INFO_TABLE;

/**
* Response for OMSnapshotSetPropertyRequest.
*/
@CleanupTableInfo(cleanupTables = {SNAPSHOT_INFO_TABLE})
public class OMSnapshotSetPropertyResponse extends OMClientResponse {
private final SnapshotInfo updatedSnapInfo;

public OMSnapshotSetPropertyResponse(
@Nonnull OMResponse omResponse,
@Nonnull SnapshotInfo updatedSnapInfo) {
super(omResponse);
this.updatedSnapInfo = updatedSnapInfo;
}

public OMSnapshotSetPropertyResponse(@Nonnull OMResponse omResponse) {
super(omResponse);
checkStatusNotOK();
this.updatedSnapInfo = null;
}

@Override
protected void addToDBBatch(OMMetadataManager omMetadataManager,
BatchOperation batchOperation)
throws IOException {
omMetadataManager.getSnapshotInfoTable().putWithBatch(batchOperation,
updatedSnapInfo.getTableKey(), updatedSnapInfo);
}
}
Loading