From e05b2ce22f38f0cf35e90e88279782c6fed98efd Mon Sep 17 00:00:00 2001 From: Siddharth Wagle Date: Fri, 15 Nov 2019 15:07:10 -0800 Subject: [PATCH 1/2] HDDS-2501. Sonar: Fix issues found in the ObjectEndpoint class. --- .../ozone/s3/endpoint/ObjectEndpoint.java | 110 +++++++++--------- 1 file changed, 52 insertions(+), 58 deletions(-) diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index 6769c526e338..fbbf4e317002 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -233,16 +233,19 @@ public Response get( String rangeHeaderVal = headers.getHeaderString(RANGE_HEADER); RangeHeader rangeHeader = null; - LOG.debug("range Header provided value is {}", rangeHeaderVal); + if (LOG.isDebugEnabled()) { + LOG.debug("range Header provided value is {}", rangeHeaderVal); + } if (rangeHeaderVal != null) { rangeHeader = RangeHeaderParserUtil.parseRangeHeader(rangeHeaderVal, length); - LOG.debug("range Header provided value is {}", rangeHeader); + if (LOG.isDebugEnabled()) { + LOG.debug("range Header provided value is {}", rangeHeader); + } if (rangeHeader.isInValidRange()) { - OS3Exception exception = S3ErrorTable.newError(S3ErrorTable - .INVALID_RANGE, rangeHeaderVal); - throw exception; + throw S3ErrorTable.newError( + S3ErrorTable.INVALID_RANGE, rangeHeaderVal); } } ResponseBuilder responseBuilder; @@ -351,8 +354,7 @@ public Response head( .header("Content-Length", key.getDataSize()) .header("Content-Type", "binary/octet-stream"); addLastModifiedDate(response, key); - return response - .build(); + return response.build(); } /** @@ -467,8 +469,8 @@ public Response initializeMultipartUpload( return Response.status(Status.OK).entity( multipartUploadInitiateResponse).build(); } catch (IOException ex) { - LOG.error("Error in Initiate Multipart Upload Request for bucket: " + - bucket + ", key: " + key, ex); + LOG.error("Error in Initiate Multipart Upload Request for bucket: {}, " + + "key: ", bucket, key, ex); throw ex; } } @@ -494,8 +496,9 @@ public Response completeMultipartUpload(@PathParam("bucket") String bucket, for (CompleteMultipartUploadRequest.Part part : partList) { partsMap.put(part.getPartNumber(), part.geteTag()); } - - LOG.debug("Parts map {}", partsMap.toString()); + if (LOG.isDebugEnabled()) { + LOG.debug("Parts map {}", partsMap); + } omMultipartUploadCompleteInfo = ozoneBucket.completeMultipartUpload( key, uploadID, partsMap); @@ -510,27 +513,18 @@ public Response completeMultipartUpload(@PathParam("bucket") String bucket, return Response.status(Status.OK).entity(completeMultipartUploadResponse) .build(); } catch (OMException ex) { - LOG.error("Error in Complete Multipart Upload Request for bucket: " + - bucket + ", key: " + key, ex); + LOG.error("Error in Complete Multipart Upload Request for bucket: {}, " + + ", key: {}", bucket, key, ex); if (ex.getResult() == ResultCodes.INVALID_PART) { - OS3Exception oex = - S3ErrorTable.newError(S3ErrorTable.INVALID_PART, key); - throw oex; + throw S3ErrorTable.newError(S3ErrorTable.INVALID_PART, key); } else if (ex.getResult() == ResultCodes.INVALID_PART_ORDER) { - OS3Exception oex = - S3ErrorTable.newError(S3ErrorTable.INVALID_PART_ORDER, key); - throw oex; + throw S3ErrorTable.newError(S3ErrorTable.INVALID_PART_ORDER, key); } else if (ex.getResult() == ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR) { - OS3Exception os3Exception = S3ErrorTable.newError(NO_SUCH_UPLOAD, - uploadID); - throw os3Exception; + throw S3ErrorTable.newError(NO_SUCH_UPLOAD, uploadID); } else if (ex.getResult() == ResultCodes.ENTITY_TOO_SMALL) { - OS3Exception os3Exception = S3ErrorTable.newError(ENTITY_TOO_SMALL, - key); - throw os3Exception; + throw S3ErrorTable.newError(ENTITY_TOO_SMALL, key); } else if(ex.getResult() == ResultCodes.INVALID_REQUEST) { - OS3Exception os3Exception = S3ErrorTable.newError(INVALID_REQUEST, - key); + OS3Exception os3Exception = S3ErrorTable.newError(INVALID_REQUEST, key); os3Exception.setErrorMessage("An error occurred (InvalidRequest) " + "when calling the CompleteMultipartUpload operation: You must " + "specify at least one part"); @@ -546,39 +540,39 @@ private Response createMultipartKey(String bucket, String key, long length, throws IOException, OS3Exception { try { OzoneBucket ozoneBucket = getBucket(bucket); - OzoneOutputStream ozoneOutputStream = ozoneBucket.createMultipartKey( - key, length, partNumber, uploadID); - - String copyHeader = headers.getHeaderString(COPY_SOURCE_HEADER); - if (copyHeader != null) { - Pair result = parseSourceHeader(copyHeader); - - String sourceBucket = result.getLeft(); - String sourceKey = result.getRight(); - - try (OzoneInputStream sourceObject = - getBucket(sourceBucket).readKey(sourceKey)) { - - String range = - headers.getHeaderString(COPY_SOURCE_HEADER_RANGE); - if (range != null) { - RangeHeader rangeHeader = - RangeHeaderParserUtil.parseRangeHeader(range, 0); - IOUtils.copyLarge(sourceObject, ozoneOutputStream, - rangeHeader.getStartOffset(), - rangeHeader.getEndOffset() - rangeHeader.getStartOffset()); - - } else { - IOUtils.copy(sourceObject, ozoneOutputStream); + String copyHeader; + OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo; + try (OzoneOutputStream ozoneOutputStream = ozoneBucket.createMultipartKey( + key, length, partNumber, uploadID)) { + copyHeader = headers.getHeaderString(COPY_SOURCE_HEADER); + if (copyHeader != null) { + Pair result = parseSourceHeader(copyHeader); + + String sourceBucket = result.getLeft(); + String sourceKey = result.getRight(); + + try (OzoneInputStream sourceObject = + getBucket(sourceBucket).readKey(sourceKey)) { + + String range = + headers.getHeaderString(COPY_SOURCE_HEADER_RANGE); + if (range != null) { + RangeHeader rangeHeader = + RangeHeaderParserUtil.parseRangeHeader(range, 0); + IOUtils.copyLarge(sourceObject, ozoneOutputStream, + rangeHeader.getStartOffset(), + rangeHeader.getEndOffset() - rangeHeader.getStartOffset()); + + } else { + IOUtils.copy(sourceObject, ozoneOutputStream); + } } + } else { + IOUtils.copy(body, ozoneOutputStream); } - - } else { - IOUtils.copy(body, ozoneOutputStream); + omMultipartCommitUploadPartInfo = ozoneOutputStream + .getCommitUploadPartInfo(); } - ozoneOutputStream.close(); - OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = - ozoneOutputStream.getCommitUploadPartInfo(); String eTag = omMultipartCommitUploadPartInfo.getPartName(); if (copyHeader != null) { @@ -761,7 +755,7 @@ public static Pair parseSourceHeader(String copyHeader) if (header.startsWith("/")) { header = copyHeader.substring(1); } - int pos = header.indexOf("/"); + int pos = header.indexOf('/'); if (pos == -1) { OS3Exception ex = S3ErrorTable.newError(S3ErrorTable .INVALID_ARGUMENT, header); From b2227c89dfd5b32b9fef8ae98edf5e1e9aebd9ba Mon Sep 17 00:00:00 2001 From: Siddharth Wagle Date: Fri, 15 Nov 2019 15:50:27 -0800 Subject: [PATCH 2/2] HDDS-2501. Addressed review comment. --- .../org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index fbbf4e317002..2472fe13f29a 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -470,7 +470,7 @@ public Response initializeMultipartUpload( multipartUploadInitiateResponse).build(); } catch (IOException ex) { LOG.error("Error in Initiate Multipart Upload Request for bucket: {}, " + - "key: ", bucket, key, ex); + "key: {}", bucket, key, ex); throw ex; } }