diff --git a/ivy.xml b/ivy.xml
index ab586e7..e530350 100644
--- a/ivy.xml
+++ b/ivy.xml
@@ -20,27 +20,22 @@
-
-
-
-
-
-
+
+
+
-
-
-
-
-
-
+
+
+
+
+
-
diff --git a/resources/cassandra/cassandra.yaml b/resources/cassandra/cassandra.yaml
index 05bfdde..d37de63 100644
--- a/resources/cassandra/cassandra.yaml
+++ b/resources/cassandra/cassandra.yaml
@@ -21,21 +21,13 @@ cluster_name: 'Solandra Cluster'
# a random token, which will lead to hot spots.
initial_token:
-# Set to true to make new [non-seed] nodes automatically migrate data
-# to themselves from the pre-existing nodes in the cluster. Defaults
-# to false because you can only bootstrap N machines at a time from
-# an existing cluster of N, so if you are bringing up a cluster of
-# 10 machines with 3 seeds you would have to do it in stages. Leaving
-# this off for the initial start simplifies that.
-auto_bootstrap: false
-
# See http://wiki.apache.org/cassandra/HintedHandoff
hinted_handoff_enabled: true
# this defines the maximum amount of time a dead host will have hints
# generated. After it has been dead this long, hints will be dropped.
max_hint_window_in_ms: 3600000 # one hour
-# Sleep this long after delivering each row or row fragment
-hinted_handoff_throttle_delay_in_ms: 50
+# Sleep this long after delivering each hint
+hinted_handoff_throttle_delay_in_ms: 1
# authentication backend, implementing IAuthenticator; used to identify users
authenticator: org.apache.cassandra.auth.AllowAllAuthenticator
@@ -74,26 +66,92 @@ data_file_directories:
# commit log
commitlog_directory: /tmp/cassandra-data/commitlog
+# Maximum size of the key cache in memory.
+#
+# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the
+# minimum, sometimes more. The key cache is fairly tiny for the amount of
+# time it saves, so it's worthwhile to use it at large numbers.
+# The row cache saves even more time, but must store the whole values of
+# its rows, so it is extremely space-intensive. It's best to only use the
+# row cache if you have hot rows or static rows.
+#
+# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
+#
+# Default value is 2 (call hold > 200000 keys). Set to 0 to disable key cache.
+key_cache_size_in_mb: 2
+
+# Duration in seconds after which Cassandra should
+# safe the keys cache. Caches are saved to saved_caches_directory as
+# specified in this configuration file.
+#
+# Saved caches greatly improve cold-start speeds, and is relatively cheap in
+# terms of I/O for the key cache. Row cache saving is much more expensive and
+# has limited use.
+#
+# Default is 14400 or 4 hours.
+key_cache_save_period: 14400
+
+# Number of keys from the key cache to save
+# Disabled by default, meaning all keys are going to be saved
+# key_cache_keys_to_save: 100
+
+# Maximum size of the row cache in memory.
+# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
+#
+# Default value is 0, to disable row caching.
+row_cache_size_in_mb: 0
+
+# Duration in seconds after which Cassandra should
+# safe the row cache. Caches are saved to saved_caches_directory as specified
+# in this configuration file.
+#
+# Saved caches greatly improve cold-start speeds, and is relatively cheap in
+# terms of I/O for the key cache. Row cache saving is much more expensive and
+# has limited use.
+#
+# Default is 0 to disable saving the row cache.
+row_cache_save_period: 0
+
+# Number of keys from the row cache to save
+# Disabled by default, meaning all keys are going to be saved
+# row_cache_keys_to_save: 100
+
+# The provider for the row cache to use.
+#
+# Supported values are: ConcurrentLinkedHashCacheProvider, SerializingCacheProvider
+#
+# SerializingCacheProvider serialises the contents of the row and stores
+# it in native memory, i.e., off the JVM Heap. Serialized rows take
+# significantly less memory than "live" rows in the JVM, so you can cache
+# more rows in a given memory footprint. And storing the cache off-heap
+# means you can use smaller heap sizes, reducing the impact of GC pauses.
+#
+# It is also valid to specify the fully-qualified class name to a class
+# that implements org.apache.cassandra.cache.IRowCacheProvider.
+#
+# Defaults to SerializingCacheProvider
+row_cache_provider: SerializingCacheProvider
+
# saved caches
saved_caches_directory: /tmp/cassandra-data/saved_caches
-# Size to allow commitlog to grow to before creating a new segment
-commitlog_rotation_threshold_in_mb: 128
-
# commitlog_sync may be either "periodic" or "batch."
# When in batch mode, Cassandra won't ack writes until the commit log
# has been fsynced to disk. It will wait up to
-# CommitLogSyncBatchWindowInMS milliseconds for other writes, before
+# commitlog_sync_batch_window_in_ms milliseconds for other writes, before
# performing the sync.
-commitlog_sync: periodic
-
+#
+# commitlog_sync: batch
+# commitlog_sync_batch_window_in_ms: 50
+#
# the other option is "periodic" where writes may be acked immediately
# and the CommitLog is simply synced every commitlog_sync_period_in_ms
# milliseconds.
+commitlog_sync: periodic
commitlog_sync_period_in_ms: 10000
-# any class that implements the SeedProvider interface and has a constructor that takes a Map of
-# parameters will do.
+# any class that implements the SeedProvider interface and has a
+# constructor that takes a Map of parameters will do.
seed_provider:
# Addresses of hosts that are deemed contact points.
# Cassandra nodes use this list of hosts to find each other and learn
@@ -143,12 +201,16 @@ concurrent_reads: 32
concurrent_writes: 32
# Total memory to use for memtables. Cassandra will flush the largest
-# memtable when this much memory is used. Prefer using this to
-# the older, per-ColumnFamily memtable flush thresholds.
+# memtable when this much memory is used.
# If omitted, Cassandra will set it to 1/3 of the heap.
-# If set to 0, only the old flush thresholds are used.
# memtable_total_space_in_mb: 2048
+# Total space to use for commitlogs.
+# If space gets above this value (it will round up to the next nearest
+# segment multiple), Cassandra will flush every dirty CF in the oldest
+# segment and remove it.
+# commitlog_total_space_in_mb: 4096
+
# This sets the amount of memtable flush writer threads. These will
# be blocked by disk io, and each one will hold a memtable in memory
# while blocked. If you have a large heap and many data directories,
@@ -165,9 +227,21 @@ memtable_flush_queue_size: 4
# Increase this to the size of the column slices you typically perform
sliced_buffer_size_in_kb: 64
+# Whether to, when doing sequential writing, fsync() at intervals in
+# order to force the operating system to flush the dirty
+# buffers. Enable this to avoid sudden dirty buffer flushing from
+# impacting read latencies. Almost always a good idea on SSD:s; not
+# necessarily on platters.
+trickle_fsync: false
+trickle_fsync_interval_in_kb: 10240
+
# TCP port, for commands and data
storage_port: 7000
+# SSL port, for encrypted communication. Unused unless enabled in
+# encryption_options
+ssl_storage_port: 7001
+
# Address to bind to and tell other Cassandra nodes to connect to. You
# _must_ change this if you want multiple nodes to be able to
# communicate!
@@ -180,6 +254,10 @@ storage_port: 7000
# Setting this to 0.0.0.0 is always wrong.
listen_address: localhost
+# Address to broadcast to other Cassandra nodes
+# Leaving this blank will set it to the same value as listen_address
+# broadcast_address: 1.2.3.4
+
# The address to bind the Thrift RPC service to -- clients connect
# here. Unlike ListenAddress above, you *can* specify 0.0.0.0 here if
# you want Thrift to listen on all interfaces.
@@ -193,16 +271,36 @@ rpc_port: 9160
# enable or disable keepalive on rpc connections
rpc_keepalive: true
-# Cassandra uses thread-per-client for client RPC. This can
-# be expensive in memory used for thread stack for a large
-# enough number of clients. (Hence, connection pooling is
-# very, very strongly recommended.)
-#
+# Cassandra provides three options for the RPC Server:
+#
+# sync -> One connection per thread in the rpc pool (see below).
+# For a very large number of clients, memory will be your limiting
+# factor; on a 64 bit JVM, 128KB is the minimum stack size per thread.
+# Connection pooling is very, very strongly recommended.
+#
+# async -> Nonblocking server implementation with one thread to serve
+# rpc connections. This is not recommended for high throughput use
+# cases. Async has been tested to be about 50% slower than sync
+# or hsha and is deprecated: it will be removed in the next major release.
+#
+# hsha -> Stands for "half synchronous, half asynchronous." The rpc thread pool
+# (see below) is used to manage requests, but the threads are multiplexed
+# across the different clients.
+#
+# The default is sync because on Windows hsha is about 30% slower. On Linux,
+# sync/hsha performance is about the same, with hsha of course using less memory.
+rpc_server_type: sync
+
# Uncomment rpc_min|max|thread to set request pool size.
-# You would primarily set max as a safeguard against misbehaved
-# clients; if you do hit the max, Cassandra will block until
-# one disconnects before accepting more. The defaults are
-# min of 16 and max unlimited.
+# You would primarily set max for the sync server to safeguard against
+# misbehaved clients; if you do hit the max, Cassandra will block until one
+# disconnects before accepting more. The defaults for sync are min of 16 and max
+# unlimited.
+#
+# For the Hsha server, the min and max both default to quadruple the number of
+# CPU cores.
+#
+# This configuration is ignored by the async server.
#
# rpc_min_threads: 16
# rpc_max_threads: 2048
@@ -232,10 +330,6 @@ incremental_backups: false
# is a data format change.
snapshot_before_compaction: false
-# change this to increase the compaction thread's priority. In java, 1 is the
-# lowest priority and that is our default.
-# compaction_thread_priority: 1
-
# Add column indexes to a row after its contents reach this size.
# Increase if your column values are large, or if you have a very large
# number of columns. The competing causes are, Cassandra has to
@@ -250,21 +344,34 @@ column_index_size_in_kb: 64
# will be logged specifying the row key.
in_memory_compaction_limit_in_mb: 64
-# Number of compaction threads. This default to the number of processors,
-# enabling multiple compactions to execute at once. Using more than one
-# thread is highly recommended to preserve read performance in a mixed
-# read/write workload as this avoids sstables from accumulating during long
-# running compactions. The default is usually fine and if you experience
-# problems with compaction running too slowly or too fast, you should look at
+# Number of simultaneous compactions to allow, NOT including
+# validation "compactions" for anti-entropy repair. Simultaneous
+# compactions can help preserve read performance in a mixed read/write
+# workload, by mitigating the tendency of small sstables to accumulate
+# during a single long running compactions. The default is usually
+# fine and if you experience problems with compaction running too
+# slowly or too fast, you should look at
# compaction_throughput_mb_per_sec first.
-# Uncomment to make compaction mono-threaded.
+#
+# This setting has no effect on LeveledCompactionStrategy.
+#
+# concurrent_compactors defaults to the number of cores.
+# Uncomment to make compaction mono-threaded, the pre-0.8 default.
#concurrent_compactors: 1
+# Multi-threaded compaction. When enabled, each compaction will use
+# up to one thread per core, plus one thread per sstable being merged.
+# This is usually only useful for SSD-based hardware: otherwise,
+# your concern is usually to get compaction to do LESS i/o (see:
+# compaction_throughput_mb_per_sec), not more.
+multithreaded_compaction: false
+
# Throttles compaction to the given total throughput across the entire
# system. The faster you insert data, the faster you need to compact in
# order to keep the sstable count down, but in general, setting this to
# 16 to 32 times the rate you are inserting data is more than sufficient.
-# Setting this to 0 disables throttling.
+# Setting this to 0 disables throttling. Note that this account for all types
+# of compaction, including validation compaction.
compaction_throughput_mb_per_sec: 16
# Track cached row keys during compaction, and re-cache their new
@@ -272,9 +379,23 @@ compaction_throughput_mb_per_sec: 16
# key caches.
compaction_preheat_key_cache: true
+# Throttles all outbound streaming file transfers on this node to the
+# given total throughput in Mbps. This is necessary because Cassandra does
+# mostly sequential IO when streaming data during bootstrap or repair, which
+# can lead to saturating the network connection and degrading rpc performance.
+# When unset, the default is 400 Mbps or 50 MB/s.
+# stream_throughput_outbound_megabits_per_sec: 400
+
# Time to wait for a reply from other nodes before failing the command
rpc_timeout_in_ms: 10000
+# Enable socket timeout for streaming operation.
+# When a timeout occurs during streaming, streaming is retried from the start
+# of the current file. This *can* involve re-streaming an important amount of
+# data, so you should avoid setting the value too low.
+# Default value is 0, which never timeout streams.
+# streaming_socket_timeout_in_ms: 0
+
# phi value that must be reached for a host to be marked down.
# most users should never need to adjust this.
# phi_convict_threshold: 8
@@ -295,11 +416,6 @@ rpc_timeout_in_ms: 10000
# explicitly configured in cassandra-topology.properties.
endpoint_snitch: org.apache.cassandra.locator.SimpleSnitch
-# dynamic_snitch -- This boolean controls whether the above snitch is
-# wrapped with a dynamic snitch, which will monitor read latencies
-# and avoid reading from hosts that have slowed (due to compaction,
-# for instance)
-dynamic_snitch: true
# controls how often to perform the more expensive part of host score
# calculation
dynamic_snitch_update_interval_in_ms: 100
@@ -313,7 +429,7 @@ dynamic_snitch_reset_interval_in_ms: 600000
# expressed as a double which represents a percentage. Thus, a value of
# 0.2 means Cassandra would continue to prefer the static snitch values
# until the pinned host was 20% worse than the fastest.
-dynamic_snitch_badness_threshold: 0.0
+dynamic_snitch_badness_threshold: 0.1
# request_scheduler -- Set this to a class that implements
# RequestScheduler, which will schedule incoming client requests
@@ -355,9 +471,16 @@ request_scheduler: org.apache.cassandra.scheduler.NoScheduler
# the request scheduling. Currently the only valid option is keyspace.
# request_scheduler_id: keyspace
-# The Index Interval determines how large the sampling of row keys
-# is for a given SSTable. The larger the sampling, the more effective
-# the index is at the cost of space.
+# index_interval controls the sampling of entries from the primrary
+# row index in terms of space versus time. The larger the interval,
+# the smaller and less effective the sampling will be. In technicial
+# terms, the interval coresponds to the number of index entries that
+# are skipped between taking each sample. All the sampled entries
+# must fit in memory. Generally, a value between 128 and 512 here
+# coupled with a large key cache size on CFs results in the best trade
+# offs. This value is not often changed, however if you have many
+# very small rows (many to an OS page), then increasing this will
+# often lower memory usage without a impact on performance.
index_interval: 128
# Enable or disable inter-node encryption
@@ -365,14 +488,23 @@ index_interval: 128
# users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher
# suite for authentication, key exchange and encryption of the actual data transfers.
# NOTE: No custom encryption options are enabled at the moment
-# The available internode options are : all, none
+# The available internode options are : all, none, dc, rack
+#
+# If set to dc cassandra will encrypt the traffic between the DCs
+# If set to rack cassandra will encrypt the traffic between the racks
#
# The passwords used in these options must match the passwords used when generating
# the keystore and truststore. For instructions on generating these files, see:
# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore
+#
encryption_options:
internode_encryption: none
keystore: conf/.keystore
keystore_password: cassandra
truststore: conf/.truststore
truststore_password: cassandra
+ # More advanced defaults below:
+ # protocol: TLS
+ # algorithm: SunX509
+ # store_type: JKS
+ # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA]
diff --git a/resources/cassandra/solandra-env.sh b/resources/cassandra/solandra-env.sh
index d5d5784..c69d334 100644
--- a/resources/cassandra/solandra-env.sh
+++ b/resources/cassandra/solandra-env.sh
@@ -95,7 +95,7 @@ JVM_OPTS="$JVM_OPTS -ea"
check_openjdk=$(java -version 2>&1 | awk '{if (NR == 2) {print $1}}')
if [ "$check_openjdk" != "OpenJDK" ]
then
- JVM_OPTS="$JVM_OPTS -javaagent:$SOLANDRA_HOME/lib/jamm-0.2.2.jar"
+ JVM_OPTS="$JVM_OPTS -javaagent:$SOLANDRA_HOME/lib/jamm-0.2.5.jar"
fi
# enable thread priorities, primarily so we can give periodic tasks
diff --git a/src/lucandra/CassandraUtils.java b/src/lucandra/CassandraUtils.java
index df9b06a..7f30a2f 100644
--- a/src/lucandra/CassandraUtils.java
+++ b/src/lucandra/CassandraUtils.java
@@ -34,7 +34,7 @@
import java.util.zip.Inflater;
import org.apache.cassandra.config.ConfigurationException;
-import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.config.Schema;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.filter.QueryPath;
import org.apache.cassandra.service.AbstractCassandraDaemon;
@@ -307,7 +307,7 @@ public static void createCassandraSchema() throws IOException
return;
}
- if (DatabaseDescriptor.getNonSystemTables().contains(keySpace))
+ if (Schema.instance.getNonSystemTables().contains(keySpace))
{
logger.info("Found Solandra specific schema");
return;
@@ -329,7 +329,7 @@ public static void createCassandraSchema() throws IOException
System.exit(2);
}
- if (DatabaseDescriptor.getNonSystemTables().contains(keySpace))
+ if (Schema.instance.getNonSystemTables().contains(keySpace))
{
logger.info("Found Solandra specific schema");
return;
@@ -388,8 +388,13 @@ public static void createCassandraSchema() throws IOException
cfs.add(cf);
- KsDef solandraKS = new KsDef().setName(keySpace).setReplication_factor(1).setStrategy_class(
- "org.apache.cassandra.locator.SimpleStrategy").setCf_defs(cfs);
+ Map strategyOptions = new HashMap();
+ strategyOptions.put("replication_factor", "1");
+ KsDef solandraKS = new KsDef()
+ .setName(keySpace)
+ .setStrategy_class("org.apache.cassandra.locator.SimpleStrategy")
+ .setStrategy_options(strategyOptions)
+ .setCf_defs(cfs);
CassandraServer cs = new CassandraServer();
diff --git a/src/lucandra/VIntType.java b/src/lucandra/VIntType.java
index 5f6db56..4030b3f 100644
--- a/src/lucandra/VIntType.java
+++ b/src/lucandra/VIntType.java
@@ -30,13 +30,18 @@
public class VIntType extends AbstractType {
public static final VIntType instance = new VIntType();
-
-
+
+ @Override
public String getString(ByteBuffer bytes) {
return Integer.toString(CassandraUtils.readVInt(bytes));
}
-
+ @Override
+ public ByteBuffer fromString(String source) {
+ return ByteBuffer.wrap(CassandraUtils.writeVInt(new Integer(source)));
+ }
+
+ @Override
public int compare(ByteBuffer o1, ByteBuffer o2) {
if(null == o1){
if(null == o2) return 0;
@@ -65,11 +70,13 @@ public int compare(ByteBuffer o1, ByteBuffer o2) {
return i1 < i2 ? -1 : 1;
}
+ @Override
public BigInteger compose(ByteBuffer bytes)
{
return new BigInteger(ByteBufferUtil.getArray(bytes));
}
+ @Override
public ByteBuffer decompose(BigInteger value)
{
return ByteBuffer.wrap(value.toByteArray());
@@ -79,7 +86,7 @@ public String toString(BigInteger bigInteger) {
return bigInteger.toString();
}
-
+ @Override
public void validate(ByteBuffer bytes) throws MarshalException
{
@@ -98,7 +105,6 @@ public boolean isCaseSensitive()
return false;
}
-
public boolean isCurrency()
{
return false;
@@ -108,7 +114,6 @@ public int getPrecision(BigInteger obj) {
return obj.toString().length();
}
-
public int getScale(BigInteger obj) {
return 0;
}
@@ -117,7 +122,6 @@ public int getJdbcType() {
return Types.BIGINT;
}
-
public boolean needsQuotes()
{
return false;
diff --git a/src/solandra/SolandraComponent.java b/src/solandra/SolandraComponent.java
index 8cc7ef7..cb498e5 100644
--- a/src/solandra/SolandraComponent.java
+++ b/src/solandra/SolandraComponent.java
@@ -35,6 +35,7 @@
import com.google.common.collect.MapMaker;
import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.config.Schema;
import org.apache.cassandra.db.Row;
import org.apache.cassandra.db.filter.QueryPath;
import org.apache.cassandra.service.StorageService;
@@ -93,7 +94,7 @@ public static boolean prepare(ResponseBuilder rb) throws IOException
if(!hasSolandraSchema.get())
{
//Check is Solandra schema exists, if not die
- if(! DatabaseDescriptor.getNonSystemTables().contains(CassandraUtils.keySpace) )
+ if(! Schema.instance.getNonSystemTables().contains(CassandraUtils.keySpace) )
throw new IOException("Solandra keyspace is missing, please import then retry");
else
hasSolandraSchema.set(true);