Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantLock;

import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.MetricName;
import org.apache.kafka.common.errors.TimeoutException;
import org.apache.kafka.common.metrics.Metrics;
Expand Down Expand Up @@ -55,6 +56,7 @@ public class BufferPool {
private final Metrics metrics;
private final Time time;
private final Sensor waitTime;
private boolean closed;

/**
* Create a new buffer pool
Expand Down Expand Up @@ -82,6 +84,7 @@ public BufferPool(long memory, int poolableSize, Metrics metrics, Time time, Str
metricGrpName,
"The total time an appender waits for space allocation.");
this.waitTime.add(new Meter(TimeUnit.NANOSECONDS, rateMetricName, totalMetricName));
this.closed = false;
}

/**
Expand All @@ -104,6 +107,12 @@ public ByteBuffer allocate(int size, long maxTimeToBlockMs) throws InterruptedEx

ByteBuffer buffer = null;
this.lock.lock();

if (this.closed) {
this.lock.unlock();
throw new KafkaException("Producer closed while allocating memory");
}

try {
// check if we have a free buffer of the right size pooled
if (size == poolableSize && !this.free.isEmpty())
Expand Down Expand Up @@ -138,6 +147,9 @@ public ByteBuffer allocate(int size, long maxTimeToBlockMs) throws InterruptedEx
recordWaitTime(timeNs);
}

if (this.closed)
throw new KafkaException("Producer closed while allocating memory");
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@bdbyrne @hachikuji Currently on Producer.send our javadoc mentioned "If a Kafka related error occurs that does not belong to the public API exceptions." for KafkaException and most callers default it to fatal. However if we consider the pattern where thread A blocked on send#bufferPool, and then thread B calls producer.close which would cause thread A to be unblocked by throwing a KafkaException to be a recommended pattern, should we use a different exception than KafkaException to differentiate it with other other fatal exceptions?

I'm thinking for Streams if we eventually want to move to this pattern, i.e. the stream thread blocked on producer.send while the closing thread calls producer.close then stream thread would throw KafkaException that in turn would be interpreted as fatal and then the stream thread tries to shutdown itself as "shutdown unclean" whereas here since we are indeed closing we should just proceed with "shutdown clean" --- of course this is still doable with some extra check but I'm wondering if such complexity would be universal for any callers like Streams.


if (waitingTimeElapsed) {
throw new TimeoutException("Failed to allocate memory within the configured max blocking time " + maxTimeToBlockMs + " ms.");
}
Expand Down Expand Up @@ -316,4 +328,19 @@ public long totalMemory() {
Deque<Condition> waiters() {
return this.waiters;
}

/**
* Closes the buffer pool. Memory will be prevented from being allocated, but may be deallocated. All allocations
* awaiting available memory will be notified to abort.
*/
public void close() {
this.lock.lock();
this.closed = true;
try {
for (Condition waiter : this.waiters)
waiter.signal();
} finally {
this.lock.unlock();
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -802,6 +802,7 @@ public void unmutePartition(TopicPartition tp, long throttleUntilTimeMs) {
*/
public void close() {
this.closed = true;
this.free.close();
}

/*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
*/
package org.apache.kafka.clients.producer.internals;

import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.errors.TimeoutException;
import org.apache.kafka.common.metrics.Metrics;
import org.apache.kafka.common.utils.MockTime;
Expand All @@ -28,14 +29,18 @@
import java.util.ArrayList;
import java.util.Deque;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Executors;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.Condition;

import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertThrows;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.ArgumentMatchers.anyLong;
Expand Down Expand Up @@ -232,7 +237,7 @@ public void testCleanupMemoryAvailabilityWaiterOnInterruption() throws Exception
// both the allocate() called by threads t1 and t2 should have been interrupted and the waiters queue should be empty
assertEquals(pool.queued(), 0);
}

@Test
public void testCleanupMemoryAvailabilityOnMetricsException() throws Exception {
BufferPool bufferPool = spy(new BufferPool(2, 1, new Metrics(), time, metricGroup));
Expand Down Expand Up @@ -377,4 +382,48 @@ public void run() {
}
}

@Test
public void testCloseAllocations() throws Exception {
BufferPool pool = new BufferPool(10, 1, metrics, Time.SYSTEM, metricGroup);
ByteBuffer buffer = pool.allocate(1, maxBlockTimeMs);

// Close the buffer pool. This should prevent any further allocations.
pool.close();

assertThrows(KafkaException.class, () -> pool.allocate(1, maxBlockTimeMs));

// Ensure deallocation still works.
pool.deallocate(buffer);
}

@Test
public void testCloseNotifyWaiters() throws Exception {
final int numWorkers = 2;

BufferPool pool = new BufferPool(1, 1, metrics, Time.SYSTEM, metricGroup);
ByteBuffer buffer = pool.allocate(1, Long.MAX_VALUE);

CountDownLatch completed = new CountDownLatch(numWorkers);
ExecutorService executor = Executors.newFixedThreadPool(numWorkers);
Callable<Void> work = new Callable<Void>() {
public Void call() throws Exception {
assertThrows(KafkaException.class, () -> pool.allocate(1, Long.MAX_VALUE));
completed.countDown();
return null;
}
};
for (int i = 0; i < numWorkers; ++i) {
executor.submit(work);
}

assertEquals("Allocation shouldn't have happened yet, waiting on memory", numWorkers, completed.getCount());

// Close the buffer pool. This should notify all waiters.
Comment thread
bdbyrne marked this conversation as resolved.
pool.close();

completed.await(15, TimeUnit.SECONDS);

pool.deallocate(buffer);
}

}