Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -127,4 +127,4 @@ public long getEntry() {
}
}

}
}
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,7 @@ public interface Code {
int UnknownBookieIdException = -107;
int OperationRejectedException = -108;
int CookieExistsException = -109;
int EntryLogMetadataMapException = -110;
}

public int getCode() {
Expand Down Expand Up @@ -124,6 +125,9 @@ public String getMessage(int code) {
case Code.CookieExistsException:
err = "Cookie already exists";
break;
case Code.EntryLogMetadataMapException:
err = "Error in accessing Entry-log metadata map";
break;
case Code.MetadataStoreException:
err = "Error performing metadata operations";
break;
Expand Down Expand Up @@ -254,6 +258,15 @@ public CookieExistException(Throwable cause) {
}
}

/**
* Signal that error while accessing entry-log metadata map.
*/
public static class EntryLogMetadataMapException extends BookieException {
public EntryLogMetadataMapException(Throwable cause) {
super(Code.EntryLogMetadataMapException, cause);
}
}

/**
* Signals that an exception occurs on upgrading a bookie.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1184,6 +1184,13 @@ public static boolean format(ServerConfiguration conf,
}
}

// Clean up metadata directories if they are separate from the
// ledger dirs
File metadataDir = new File(conf.getGcEntryLogMetadataCachePath());
if (!cleanDir(metadataDir)) {
LOG.error("Formatting ledger metadata directory {} failed", metadataDir);
return false;
}
LOG.info("Bookie format completed successfully");
return true;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,24 +21,37 @@

package org.apache.bookkeeper.bookie;

import io.netty.util.Recycler;
import io.netty.util.Recycler.Handle;

import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;

import java.util.function.LongPredicate;

import org.apache.bookkeeper.bookie.EntryLogMetadata.EntryLogMetadataRecyclable;
import org.apache.bookkeeper.util.collections.ConcurrentLongLongHashMap;

/**
* Records the total size, remaining size and the set of ledgers that comprise a entry log.
* Records the total size, remaining size and the set of ledgers that comprise a
* entry log.
*/
public class EntryLogMetadata {
private final long entryLogId;
private long totalSize;
private long remainingSize;
private final ConcurrentLongLongHashMap ledgersMap;
protected long entryLogId;
protected long totalSize;
protected long remainingSize;
protected final ConcurrentLongLongHashMap ledgersMap;
private static final short DEFAULT_SERIALIZATION_VERSION = 0;

protected EntryLogMetadata() {
ledgersMap = new ConcurrentLongLongHashMap(256, 1);
}

public EntryLogMetadata(long logId) {
this();
this.entryLogId = logId;

totalSize = remainingSize = 0;
ledgersMap = new ConcurrentLongLongHashMap(256, 1);
}

public void addLedgerSize(long ledgerId, long size) {
Expand Down Expand Up @@ -96,4 +109,111 @@ public String toString() {
return sb.toString();
}

/**
* Serializes {@link EntryLogMetadata} and writes to
* {@link DataOutputStream}.
* <pre>
* schema:
* 2-bytes: schema-version
* 8-bytes: entrylog-entryLogId
* 8-bytes: entrylog-totalSize
* 8-bytes: entrylog-remainingSize
* 8-bytes: total number of ledgers
* ledgers-map
* [repeat]: (8-bytes::ledgerId, 8-bytes::size-of-ledger)
* </pre>
* @param out
* @throws IOException
* throws if it couldn't serialize metadata-fields
* @throws IllegalStateException
* throws if it couldn't serialize ledger-map
*/
public void serialize(DataOutputStream out) throws IOException, IllegalStateException {
out.writeShort(DEFAULT_SERIALIZATION_VERSION);
out.writeLong(entryLogId);
out.writeLong(totalSize);
out.writeLong(remainingSize);
out.writeLong(ledgersMap.size());
ledgersMap.forEach((ledgerId, size) -> {
try {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this try/catch should be around whole method body, i.e. out.writeShort(DEFAULT_SERIALIZATION_VERSION) as well as out.flush() can throw IOException too.
Overall, I'd either leave it at IOException and not catch anything (and pass IOException up) or catch everything and throw something like SerializationException

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

serialize method anyway throws IOException so, no need to catch IOException for out.flush(). the only reason of try/catch and throwing runtime-exception is because it's in lambda and it requires runtime-exception.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Probably this is one of the reason to avoid lamba here. Just a normal iteration should be just fine.

out.writeLong(ledgerId);
out.writeLong(size);
} catch (IOException e) {
throw new IllegalStateException("Failed to serialize entryLogMetadata", e);
}
});
out.flush();
}

/**
* Deserializes {@link EntryLogMetadataRecyclable} from given {@link DataInputStream}.
* Caller has to recycle returned {@link EntryLogMetadataRecyclable}.
* @param in
* @return
* @throws IOException
*/
public static EntryLogMetadataRecyclable deserialize(DataInputStream in) throws IOException {
EntryLogMetadataRecyclable metadata = EntryLogMetadataRecyclable.get();
try {
short serVersion = in.readShort();
if ((serVersion != DEFAULT_SERIALIZATION_VERSION)) {
throw new IOException(String.format("%s. expected =%d, found=%d", "serialization version doesn't match",
DEFAULT_SERIALIZATION_VERSION, serVersion));
}
metadata.entryLogId = in.readLong();
metadata.totalSize = in.readLong();
metadata.remainingSize = in.readLong();
long ledgersMapSize = in.readLong();
for (int i = 0; i < ledgersMapSize; i++) {
long ledgerId = in.readLong();
long entryId = in.readLong();
metadata.ledgersMap.put(ledgerId, entryId);
}
return metadata;
} catch (IOException e) {
metadata.recycle();
throw e;
} catch (Exception e) {
metadata.recycle();
throw new IOException(e);
}
}

public void clear() {
entryLogId = -1L;
totalSize = -1L;
remainingSize = -1L;
ledgersMap.clear();
}

/**
* Recyclable {@link EntryLogMetadata} class.
*
*/
public static class EntryLogMetadataRecyclable extends EntryLogMetadata {

private final Handle<EntryLogMetadataRecyclable> recyclerHandle;

private EntryLogMetadataRecyclable(Handle<EntryLogMetadataRecyclable> recyclerHandle) {
this.recyclerHandle = recyclerHandle;
}

private static final Recycler<EntryLogMetadataRecyclable> RECYCLER =
new Recycler<EntryLogMetadataRecyclable>() {
protected EntryLogMetadataRecyclable newObject(Recycler.Handle<EntryLogMetadataRecyclable> handle) {
return new EntryLogMetadataRecyclable(handle);
}
};

public static EntryLogMetadataRecyclable get() {
EntryLogMetadataRecyclable metadata = RECYCLER.get();
return metadata;
}

public void recycle() {
clear();
recyclerHandle.recycle(this);
}

}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/

package org.apache.bookkeeper.bookie;

import java.io.Closeable;
import java.io.IOException;
import java.util.function.BiConsumer;

import org.apache.bookkeeper.bookie.BookieException.EntryLogMetadataMapException;

/**
* Map-store to store Entrylogger metadata.
*/
public interface EntryLogMetadataMap extends Closeable {

/**
* Checks if record with entryLogId exists into the map.
*
* @param entryLogId
* @return
* @throws IOException
*/
boolean containsKey(long entryLogId) throws EntryLogMetadataMapException;

/**
* Adds entryLogMetadata record into the map.
*
* @param entryLogId
* @param entryLogMeta
* @throws IOException
*/
void put(long entryLogId, EntryLogMetadata entryLogMeta) throws EntryLogMetadataMapException;

/**
* Performs the given action for each entry in this map until all entries
* have been processed or the action throws an exception.
*
* @param action
* @throws IOException
*/
void forEach(BiConsumer<Long, EntryLogMetadata> action) throws EntryLogMetadataMapException;

/**
* Removes entryLogMetadata record from the map.
*
* @param entryLogId
* @throws IOException
*/
void remove(long entryLogId) throws EntryLogMetadataMapException;

/**
* Returns number of entryLogMetadata records presents into the map.
*
* @return
* @throws IOException
*/
int size() throws EntryLogMetadataMapException;

}
Loading