From 4ab2b9afe2f6a3a1c4782411ee3d6b0760ef083a Mon Sep 17 00:00:00 2001 From: Bryan Beaudreault Date: Mon, 26 Jul 2021 15:21:24 -0400 Subject: [PATCH 1/3] HBASE-26122: Implement an optional maximum size for Gets, after which a partial result is returned --- .../org/apache/hadoop/hbase/client/Get.java | 23 +++++ .../hadoop/hbase/protobuf/ProtobufUtil.java | 9 +- .../hbase/shaded/protobuf/ProtobufUtil.java | 9 +- .../src/main/protobuf/Client.proto | 2 + hbase-protocol/src/main/protobuf/Client.proto | 2 + .../hadoop/hbase/regionserver/HRegion.java | 40 +++++++-- .../hbase/regionserver/RSRpcServices.java | 10 ++- .../TestPartialResultsFromClientSide.java | 48 +++++++++++ .../hbase/regionserver/TestHRegion.java | 85 +++++++++++++++++++ 9 files changed, 215 insertions(+), 13 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java index a671b9f6b269..53b71548eaa6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java @@ -76,6 +76,7 @@ public class Get extends Query implements Row { private boolean checkExistenceOnly = false; private boolean closestRowBefore = false; private Map> familyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); + private long maxResultSize = -1; /** * Create a Get operation for the specified row. @@ -339,6 +340,21 @@ public Get setFilter(Filter filter) { return this; } + /** + * Set the maximum result size. The default is -1; this means that no specific + * maximum result size will be set for this Get. + * + * If set to a value greater than zero, the server may respond with a Result where + * {@link Result#mayHaveMoreCellsInRow()} is true. The user is required to handle + * this case. + * + * @param maxResultSize The maximum result size in bytes + */ + public Get setMaxResultSize(long maxResultSize) { + this.maxResultSize = maxResultSize; + return this; + } + /* Accessors */ /** @@ -458,6 +474,13 @@ public Map getFingerprint() { return map; } + /** + * @return the maximum result size in bytes. See {@link #setMaxResultSize(long)} + */ + public long getMaxResultSize() { + return maxResultSize; + } + /** * Compile the details beyond the scope of getFingerprint (row, columns, * timestamps, etc.) into a Map along with the fingerprinted information. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index 5a01af9fd2a5..07e2eeaf22ee 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -434,6 +434,9 @@ public static Get toGet(final ClientProtos.Get proto) throws IOException { if (proto.hasLoadColumnFamiliesOnDemand()) { get.setLoadColumnFamiliesOnDemand(proto.getLoadColumnFamiliesOnDemand()); } + if (proto.hasMaxResultSize()) { + get.setMaxResultSize(proto.getMaxResultSize()); + } return get; } @@ -1122,6 +1125,10 @@ public static ClientProtos.Get toGet( builder.setLoadColumnFamiliesOnDemand(loadColumnFamiliesOnDemand); } + if (get.getMaxResultSize() > 0) { + builder.setMaxResultSize(get.getMaxResultSize()); + } + return builder.build(); } @@ -1382,7 +1389,7 @@ public static Result toResult(final ClientProtos.Result proto, final CellScanner return (cells == null || cells.isEmpty()) ? (proto.getStale() ? EMPTY_RESULT_STALE : EMPTY_RESULT) - : Result.create(cells, null, proto.getStale()); + : Result.create(cells, null, proto.getStale(), proto.getPartial()); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index c2544f6d008a..d6c7811b9dc7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -592,6 +592,9 @@ public static Get toGet(final ClientProtos.Get proto) throws IOException { if (proto.hasLoadColumnFamiliesOnDemand()) { get.setLoadColumnFamiliesOnDemand(proto.getLoadColumnFamiliesOnDemand()); } + if (proto.hasMaxResultSize()) { + get.setMaxResultSize(proto.getMaxResultSize()); + } return get; } @@ -1256,6 +1259,9 @@ public static ClientProtos.Get toGet( if (loadColumnFamiliesOnDemand != null) { builder.setLoadColumnFamiliesOnDemand(loadColumnFamiliesOnDemand); } + if (get.getMaxResultSize() > 0) { + builder.setMaxResultSize(get.getMaxResultSize()); + } return builder.build(); } @@ -1457,6 +1463,7 @@ public static ClientProtos.Result toResultNoData(final Result result) { ClientProtos.Result.Builder builder = ClientProtos.Result.newBuilder(); builder.setAssociatedCellCount(size); builder.setStale(result.isStale()); + builder.setPartial(result.mayHaveMoreCellsInRow()); return builder.build(); } @@ -1547,7 +1554,7 @@ public static Result toResult(final ClientProtos.Result proto, final CellScanner return (cells == null || cells.isEmpty()) ? (proto.getStale() ? EMPTY_RESULT_STALE : EMPTY_RESULT) - : Result.create(cells, null, proto.getStale()); + : Result.create(cells, null, proto.getStale(), proto.getPartial()); } diff --git a/hbase-protocol-shaded/src/main/protobuf/Client.proto b/hbase-protocol-shaded/src/main/protobuf/Client.proto index 13917b6d66cb..7081d50d4dfc 100644 --- a/hbase-protocol-shaded/src/main/protobuf/Client.proto +++ b/hbase-protocol-shaded/src/main/protobuf/Client.proto @@ -90,6 +90,8 @@ message Get { optional Consistency consistency = 12 [default = STRONG]; repeated ColumnFamilyTimeRange cf_time_range = 13; optional bool load_column_families_on_demand = 14; /* DO NOT add defaults to load_column_families_on_demand. */ + + optional uint64 max_result_size = 15; } message Result { diff --git a/hbase-protocol/src/main/protobuf/Client.proto b/hbase-protocol/src/main/protobuf/Client.proto index b2a6ec39fe8a..fd1aab1723e6 100644 --- a/hbase-protocol/src/main/protobuf/Client.proto +++ b/hbase-protocol/src/main/protobuf/Client.proto @@ -91,6 +91,8 @@ message Get { optional Consistency consistency = 12 [default = STRONG]; repeated ColumnFamilyTimeRange cf_time_range = 13; optional bool load_column_families_on_demand = 14; /* DO NOT add defaults to load_column_families_on_demand. */ + + optional uint64 max_result_size = 15; } message Result { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 6628328a8839..38415ef10e2c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -146,6 +146,7 @@ import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl.WriteEntry; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker; +import org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope; import org.apache.hadoop.hbase.regionserver.throttle.CompactionThroughputControllerFactory; import org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController; import org.apache.hadoop.hbase.regionserver.throttle.StoreHotnessProtector; @@ -3864,8 +3865,7 @@ public void prepareMiniBatchOperations(MiniBatchOperationInProgress mi Result result; if (returnResults) { // convert duplicate increment/append to get - List results = region.get(toGet(mutation), false, nonceGroup, nonce); - result = Result.create(results); + result = region.get(toGet(mutation), false, nonceGroup, nonce); } else { result = Result.EMPTY_RESULT; } @@ -7497,9 +7497,7 @@ public static boolean rowIsInRange(RegionInfo info, final byte [] row, final int @Override public Result get(final Get get) throws IOException { prepareGet(get); - List results = get(get, true); - boolean stale = this.getRegionInfo().getReplicaId() != 0; - return Result.create(results, get.isCheckExistenceOnly() ? !results.isEmpty() : null, stale); + return get(get, true, HConstants.NO_NONCE, HConstants.NO_NONCE); } void prepareGet(final Get get) throws IOException { @@ -7518,11 +7516,35 @@ void prepareGet(final Get get) throws IOException { @Override public List get(Get get, boolean withCoprocessor) throws IOException { - return get(get, withCoprocessor, HConstants.NO_NONCE, HConstants.NO_NONCE); + return getInternal(get, withCoprocessor, HConstants.NO_NONCE, HConstants.NO_NONCE).getFirst(); } - private List get(Get get, boolean withCoprocessor, long nonceGroup, long nonce) - throws IOException { + private Result get(Get get, boolean withCoprocessor, long nonceGroup, long nonce) + throws IOException { + Pair, ScannerContext> result = getInternal(get, withCoprocessor, nonceGroup, nonce); + boolean stale = this.getRegionInfo().getReplicaId() != 0; + + return Result.create( + result.getFirst(), + get.isCheckExistenceOnly() ? !result.getFirst().isEmpty() : null, + stale, + result.getSecond().mayHaveMoreCellsInRow()); + } + + private Pair, ScannerContext> getInternal(Get get, boolean withCoprocessor, long nonceGroup, long nonce) + throws IOException { + ScannerContext scannerContext = ScannerContext.newBuilder() + .setSizeLimit(LimitScope.BETWEEN_CELLS, get.getMaxResultSize(), get.getMaxResultSize()) + .build(); + + return Pair.newPair( + getInternal(get, scannerContext, withCoprocessor, nonceGroup, nonce), + scannerContext + ); + } + + private List getInternal(Get get, ScannerContext scannerContext, boolean withCoprocessor, + long nonceGroup, long nonce) throws IOException { List results = new ArrayList<>(); long before = EnvironmentEdgeManager.currentTime(); @@ -7539,7 +7561,7 @@ private List get(Get get, boolean withCoprocessor, long nonceGroup, long n } try (RegionScanner scanner = getScanner(scan, null, nonceGroup, nonce)) { List tmp = new ArrayList<>(); - scanner.next(tmp); + scanner.next(tmp, scannerContext); // Copy EC to heap, then close the scanner. // This can be an EXPENSIVE call. It may make an extra copy from offheap to onheap buffers. // See more details in HBASE-26036. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index 92a10c8ec991..4717f7513cb5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -2651,10 +2651,15 @@ private Result get(Get get, HRegion region, RegionScannersCloseCallBack closeCal if (scan.getLoadColumnFamiliesOnDemandValue() == null) { scan.setLoadColumnFamiliesOnDemand(region.isLoadingCfsOnDemandDefault()); } + + ScannerContext scannerContext = ScannerContext.newBuilder() + .setSizeLimit(LimitScope.BETWEEN_CELLS, get.getMaxResultSize(), get.getMaxResultSize()) + .build(); + RegionScannerImpl scanner = null; try { scanner = region.getScanner(scan); - scanner.next(results); + scanner.next(results, scannerContext); } finally { if (scanner != null) { if (closeCallBack == null) { @@ -2679,7 +2684,8 @@ private Result get(Get get, HRegion region, RegionScannersCloseCallBack closeCal } region.metricsUpdateForGet(results, before); - return Result.create(results, get.isCheckExistenceOnly() ? !results.isEmpty() : null, stale); + return Result.create(results, get.isCheckExistenceOnly() ? !results.isEmpty() : null, stale, + scannerContext.mayHaveMoreCellsInRow()); } private void checkBatchSizeAndLogLargeSize(MultiRequest request) throws ServiceException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java index 4e2d13303923..19fb996d4250 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java @@ -31,18 +31,26 @@ import java.util.Set; import org.apache.hadoop.hbase.client.ClientScanner; import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.filter.BinaryComparator; +import org.apache.hadoop.hbase.filter.ByteArrayComparable; import org.apache.hadoop.hbase.filter.ColumnPrefixFilter; import org.apache.hadoop.hbase.filter.ColumnRangeFilter; +import org.apache.hadoop.hbase.filter.FamilyFilter; import org.apache.hadoop.hbase.filter.Filter; +import org.apache.hadoop.hbase.filter.FilterList; +import org.apache.hadoop.hbase.filter.FilterListWithAND; import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; import org.apache.hadoop.hbase.filter.FirstKeyValueMatchingQualifiersFilter; +import org.apache.hadoop.hbase.filter.PageFilter; import org.apache.hadoop.hbase.filter.RandomRowFilter; +import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClassSize; @@ -136,6 +144,46 @@ public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } + @Test + public void testGetPartialResults() throws Exception { + byte[] row = ROWS[0]; + + Result result; + int cf = 0; + int qf = 0; + int total = 0; + + do { + // this will ensure we always return only 1 result + Get get = new Get(row) + .setMaxResultSize(1); + + // we want to page through the entire row, this will ensure we always get the next + if (total > 0) { + get.setFilter(new FilterList(FilterList.Operator.MUST_PASS_ALL, + new ColumnRangeFilter(QUALIFIERS[qf], true, null, false), + new FamilyFilter(CompareOperator.GREATER_OR_EQUAL, new BinaryComparator(FAMILIES[cf])))); + } + + // all values are the same, but there should be a value + result = TABLE.get(get); + assertTrue(String.format("Value for family %s (# %s) and qualifier %s (# %s)", + Bytes.toStringBinary(FAMILIES[cf]), cf, Bytes.toStringBinary(QUALIFIERS[qf]), qf), + Bytes.equals(VALUE, result.getValue(FAMILIES[cf], QUALIFIERS[qf]))); + + total++; + if (++qf >= NUM_QUALIFIERS) { + cf++; + qf = 0; + } + } while (result.mayHaveMoreCellsInRow()); + + // ensure we iterated all cells in row + assertEquals(NUM_COLS, total); + assertEquals(NUM_FAMILIES, cf); + assertEquals(0, qf); + } + /** * Ensure that the expected key values appear in a result returned from a scanner that is * combining partial results into complete results diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index 9763841ff2d7..3d00eb8539c1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -7861,4 +7861,89 @@ public void run() { assertFalse("Region lock holder should not have been interrupted", holderInterrupted.get()); } + @Test + public void testOversizedGetsReturnPartialResult() throws IOException { + HRegion region = initHRegion(tableName, name.getMethodName(), CONF, fam1); + + Put p = new Put(row) + .addColumn(fam1, qual1, value1) + .addColumn(fam1, qual2, value2); + + region.put(p); + + Get get = new Get(row) + .addColumn(fam1, qual1) + .addColumn(fam1, qual2) + .setMaxResultSize(1); // 0 doesn't count as a limit, according to HBase + + Result r = region.get(get); + + assertTrue("Expected partial result, but result was not marked as partial", r.mayHaveMoreCellsInRow()); + } + + @Test + public void testGetsWithoutResultSizeLimitAreNotPartial() throws IOException { + HRegion region = initHRegion(tableName, name.getMethodName(), CONF, fam1); + + Put p = new Put(row) + .addColumn(fam1, qual1, value1) + .addColumn(fam1, qual2, value2); + + region.put(p); + + Get get = new Get(row) + .addColumn(fam1, qual1) + .addColumn(fam1, qual2); + + Result r = region.get(get); + + assertFalse("Expected full result, but it was marked as partial", r.mayHaveMoreCellsInRow()); + assertTrue(Bytes.equals(value1, r.getValue(fam1, qual1))); + assertTrue(Bytes.equals(value2, r.getValue(fam1, qual2))); + } + + @Test + public void testGetsWithinResultSizeLimitAreNotPartial() throws IOException { + HRegion region = initHRegion(tableName, name.getMethodName(), CONF, fam1); + + Put p = new Put(row) + .addColumn(fam1, qual1, value1) + .addColumn(fam1, qual2, value2); + + region.put(p); + + Get get = new Get(row) + .addColumn(fam1, qual1) + .addColumn(fam1, qual2) + .setMaxResultSize(Long.MAX_VALUE); + + Result r = region.get(get); + + assertFalse("Expected full result, but it was marked as partial", r.mayHaveMoreCellsInRow()); + assertTrue(Bytes.equals(value1, r.getValue(fam1, qual1))); + assertTrue(Bytes.equals(value2, r.getValue(fam1, qual2))); + } + + @Test + public void testGetsWithResultSizeLimitReturnPartialResults() throws IOException { + HRegion region = initHRegion(tableName, name.getMethodName(), CONF, fam1); + + Put p = new Put(row) + .addColumn(fam1, qual1, value1) + .addColumn(fam1, qual2, value2); + + region.put(p); + + Get get = new Get(row) + .addColumn(fam1, qual1) + .addColumn(fam1, qual2) + .setMaxResultSize(10); + + Result r = region.get(get); + + assertTrue("Expected partial result, but it was marked as complete", r.mayHaveMoreCellsInRow()); + assertTrue(Bytes.equals(value1, r.getValue(fam1, qual1))); + assertEquals("Got more results than expected", 1, r.size()); + } + } From 071aebd726fd55430b2dd656565bae972eef79a5 Mon Sep 17 00:00:00 2001 From: Bryan Beaudreault Date: Tue, 10 Aug 2021 15:46:20 -0400 Subject: [PATCH 2/3] Only create a ScannerContext if getMaxResultSize is greater than 0 --- .../hadoop/hbase/regionserver/HRegion.java | 30 ++++++++----------- 1 file changed, 13 insertions(+), 17 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 38415ef10e2c..9751db8bc625 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -7516,31 +7516,27 @@ void prepareGet(final Get get) throws IOException { @Override public List get(Get get, boolean withCoprocessor) throws IOException { - return getInternal(get, withCoprocessor, HConstants.NO_NONCE, HConstants.NO_NONCE).getFirst(); + return getInternal(get, null, withCoprocessor, HConstants.NO_NONCE, HConstants.NO_NONCE); } private Result get(Get get, boolean withCoprocessor, long nonceGroup, long nonce) throws IOException { - Pair, ScannerContext> result = getInternal(get, withCoprocessor, nonceGroup, nonce); + ScannerContext scannerContext = get.getMaxResultSize() > 0 + ? ScannerContext.newBuilder() + .setSizeLimit(LimitScope.BETWEEN_CELLS, get.getMaxResultSize(), get.getMaxResultSize()) + .build() + : null; + + List result = getInternal(get, scannerContext, withCoprocessor, nonceGroup, nonce); boolean stale = this.getRegionInfo().getReplicaId() != 0; + boolean mayHaveMoreCellsInRow = + scannerContext != null && scannerContext.mayHaveMoreCellsInRow(); return Result.create( - result.getFirst(), - get.isCheckExistenceOnly() ? !result.getFirst().isEmpty() : null, + result, + get.isCheckExistenceOnly() ? !result.isEmpty() : null, stale, - result.getSecond().mayHaveMoreCellsInRow()); - } - - private Pair, ScannerContext> getInternal(Get get, boolean withCoprocessor, long nonceGroup, long nonce) - throws IOException { - ScannerContext scannerContext = ScannerContext.newBuilder() - .setSizeLimit(LimitScope.BETWEEN_CELLS, get.getMaxResultSize(), get.getMaxResultSize()) - .build(); - - return Pair.newPair( - getInternal(get, scannerContext, withCoprocessor, nonceGroup, nonce), - scannerContext - ); + mayHaveMoreCellsInRow); } private List getInternal(Get get, ScannerContext scannerContext, boolean withCoprocessor, From a3473ede9d82e100f940a3e9d0fc73967cd0df27 Mon Sep 17 00:00:00 2001 From: Bryan Beaudreault Date: Tue, 10 Aug 2021 15:56:30 -0400 Subject: [PATCH 3/3] Remove usage of non-shaded proto --- .../org/apache/hadoop/hbase/protobuf/ProtobufUtil.java | 7 ------- hbase-protocol/src/main/protobuf/Client.proto | 2 -- 2 files changed, 9 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index 07e2eeaf22ee..1c17866ec258 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -434,9 +434,6 @@ public static Get toGet(final ClientProtos.Get proto) throws IOException { if (proto.hasLoadColumnFamiliesOnDemand()) { get.setLoadColumnFamiliesOnDemand(proto.getLoadColumnFamiliesOnDemand()); } - if (proto.hasMaxResultSize()) { - get.setMaxResultSize(proto.getMaxResultSize()); - } return get; } @@ -1125,10 +1122,6 @@ public static ClientProtos.Get toGet( builder.setLoadColumnFamiliesOnDemand(loadColumnFamiliesOnDemand); } - if (get.getMaxResultSize() > 0) { - builder.setMaxResultSize(get.getMaxResultSize()); - } - return builder.build(); } diff --git a/hbase-protocol/src/main/protobuf/Client.proto b/hbase-protocol/src/main/protobuf/Client.proto index fd1aab1723e6..b2a6ec39fe8a 100644 --- a/hbase-protocol/src/main/protobuf/Client.proto +++ b/hbase-protocol/src/main/protobuf/Client.proto @@ -91,8 +91,6 @@ message Get { optional Consistency consistency = 12 [default = STRONG]; repeated ColumnFamilyTimeRange cf_time_range = 13; optional bool load_column_families_on_demand = 14; /* DO NOT add defaults to load_column_families_on_demand. */ - - optional uint64 max_result_size = 15; } message Result {