diff --git a/.gitignore b/.gitignore index 9bb4877..1e6f0b7 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ *~ *.exe +_vagrant/.vagrant diff --git a/.travis.yml b/.travis.yml index 1c03fcf..84a85c5 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,6 +3,9 @@ language: go go: - 1.3.1 +services: + - redis-server + before_script: - source /etc/lsb-release && echo "deb http://download.rethinkdb.com/apt $DISTRIB_CODENAME main" | sudo tee /etc/apt/sources.list.d/rethinkdb.list - wget -qO- http://download.rethinkdb.com/apt/pubkey.gpg | sudo apt-key add - diff --git a/_vagrant/Vagrantfile b/_vagrant/Vagrantfile new file mode 100644 index 0000000..8b5447e --- /dev/null +++ b/_vagrant/Vagrantfile @@ -0,0 +1,20 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +# Vagrantfile API/syntax version. Don't touch unless you know what you're doing! +VAGRANTFILE_API_VERSION = "2" + +Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| + config.vm.box = "ubuntu/trusty64" + + # rethinkdb + config.vm.network "forwarded_port", guest: 8080, host: 8080 + config.vm.network "forwarded_port", guest: 28015, host: 28015 + config.vm.network "forwarded_port", guest: 29015, host: 29015 + + # redis + config.vm.network "forwarded_port", guest: 6379, host: 6379 + + # load ansible playbook + config.vm.provision "shell", path: "deploy.sh" +end diff --git a/_vagrant/deploy.sh b/_vagrant/deploy.sh new file mode 100644 index 0000000..95804d3 --- /dev/null +++ b/_vagrant/deploy.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + +export DEBIAN_FRONTEND=noninteractive + +apt-get update -qq +apt-get install -y python2.7 python-pip + +pip install ansible + +mkdir /etc/ansible +echo "localhost" > /etc/ansible/hosts + +cd /vagrant +ansible-playbook -vvvv playbook.yml diff --git a/_vagrant/playbook.yml b/_vagrant/playbook.yml new file mode 100644 index 0000000..55e9e82 --- /dev/null +++ b/_vagrant/playbook.yml @@ -0,0 +1,46 @@ +--- +- hosts: 127.0.0.1 + connection: local + sudo: true + tasks: + # install rethinkdb + - name: add rethinkdb sources + shell: . /etc/lsb-release && echo "deb http://download.rethinkdb.com/apt $DISTRIB_CODENAME main" | tee /etc/apt/sources.list.d/rethinkdb.list + - name: add rethinkdb key + shell: wget -qO- http://download.rethinkdb.com/apt/pubkey.gpg | apt-key add - + - name: update apt cache + apt: update_cache=yes + - name: install rethinkdb + apt: pkg=rethinkdb state=present + # configure rethinkdb + - name: ensure group is present + group: name=rethinkdb state=present + - name: ensure user is present + user: name=rethinkdb state=present + - name: copy master config file + copy: src=./rethinkdb.conf dest=/etc/rethinkdb/instances.d/rethinkdb.conf owner=rethinkdb group=rethinkdb mode=664 + - name: start rethinkdb + service: name=rethinkdb state=restarted + # install redis + - name: prepare a directory + shell: mkdir /tmp/redis-stable + - name: download redis + get_url: url=http://download.redis.io/redis-stable.tar.gz dest=/tmp/redis-stable.tar.gz + - name: unpack redis + unarchive: src=/tmp/redis-stable.tar.gz dest=/tmp/redis-stable copy=no + - name: make redis + shell: 'cd /tmp/redis-stable/redis-stable && make && make install' + - name: ensure group is present + group: name=redis state=present + - name: ensure user is present + user: name=redis state=present + - name: prepare config and storage directories for redis + shell: mkdir /etc/redis; mkdir /var/redis; sudo mkdir /var/redis/6379 + - name: prepare the config and an init script for redis + shell: sudo cp /tmp/redis-stable/redis-stable/utils/redis_init_script /etc/init.d/redis_6379 + - name: upload the config to the vm + copy: src=./redis.conf dest=/etc/redis/6379.conf owner=redis group=redis mode=664 + - name: put the prepared config on the server + shell: sudo update-rc.d redis_6379 defaults + - name: start redis_6379 + service: name=redis_6379 state=restarted diff --git a/_vagrant/redis.conf b/_vagrant/redis.conf new file mode 100644 index 0000000..d176fe0 --- /dev/null +++ b/_vagrant/redis.conf @@ -0,0 +1,827 @@ +# Redis configuration file example + +# Note on units: when memory size is needed, it is possible to specify +# it in the usual form of 1k 5GB 4M and so forth: +# +# 1k => 1000 bytes +# 1kb => 1024 bytes +# 1m => 1000000 bytes +# 1mb => 1024*1024 bytes +# 1g => 1000000000 bytes +# 1gb => 1024*1024*1024 bytes +# +# units are case insensitive so 1GB 1Gb 1gB are all the same. + +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all Redis servers but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# Notice option "include" won't be rewritten by command "CONFIG REWRITE" +# from admin or Redis Sentinel. Since Redis always uses the last processed +# line as value of a configuration directive, you'd better put includes +# at the beginning of this file to avoid overwriting config change at runtime. +# +# If instead you are interested in using includes to override configuration +# options, it is better to use include as the last line. +# +# include /path/to/local.conf +# include /path/to/other.conf + +################################ GENERAL ##################################### + +# By default Redis does not run as a daemon. Use 'yes' if you need it. +# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. +daemonize yes + +# When running daemonized, Redis writes a pid file in /var/run/redis.pid by +# default. You can specify a custom pid file location here. +pidfile /var/run/redis_6379.pid + +# Accept connections on the specified port, default is 6379. +# If port 0 is specified Redis will not listen on a TCP socket. +port 6379 + +# TCP listen() backlog. +# +# In high requests-per-second environments you need an high backlog in order +# to avoid slow clients connections issues. Note that the Linux kernel +# will silently truncate it to the value of /proc/sys/net/core/somaxconn so +# make sure to raise both the value of somaxconn and tcp_max_syn_backlog +# in order to get the desired effect. +tcp-backlog 511 + +# By default Redis listens for connections from all the network interfaces +# available on the server. It is possible to listen to just one or multiple +# interfaces using the "bind" configuration directive, followed by one or +# more IP addresses. +# +# Examples: +# +# bind 192.168.1.100 10.0.0.1 +# bind 127.0.0.1 + +# Specify the path for the Unix socket that will be used to listen for +# incoming connections. There is no default, so Redis will not listen +# on a unix socket when not specified. +# +# unixsocket /tmp/redis.sock +# unixsocketperm 700 + +# Close the connection after a client is idle for N seconds (0 to disable) +timeout 0 + +# TCP keepalive. +# +# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence +# of communication. This is useful for two reasons: +# +# 1) Detect dead peers. +# 2) Take the connection alive from the point of view of network +# equipment in the middle. +# +# On Linux, the specified value (in seconds) is the period used to send ACKs. +# Note that to close the connection the double of the time is needed. +# On other kernels the period depends on the kernel configuration. +# +# A reasonable value for this option is 60 seconds. +tcp-keepalive 0 + +# Specify the server verbosity level. +# This can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +loglevel notice + +# Specify the log file name. Also the empty string can be used to force +# Redis to log on the standard output. Note that if you use standard +# output for logging but daemonize, logs will be sent to /dev/null +logfile /var/log/redis_6379.log + +# To enable logging to the system logger, just set 'syslog-enabled' to yes, +# and optionally update the other syslog parameters to suit your needs. +# syslog-enabled no + +# Specify the syslog identity. +# syslog-ident redis + +# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. +# syslog-facility local0 + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +databases 16 + +################################ SNAPSHOTTING ################################ +# +# Save the DB on disk: +# +# save +# +# Will save the DB if both the given number of seconds and the given +# number of write operations against the DB occurred. +# +# In the example below the behaviour will be to save: +# after 900 sec (15 min) if at least 1 key changed +# after 300 sec (5 min) if at least 10 keys changed +# after 60 sec if at least 10000 keys changed +# +# Note: you can disable saving completely by commenting out all "save" lines. +# +# It is also possible to remove all the previously configured save +# points by adding a save directive with a single empty string argument +# like in the following example: +# +# save "" + +save 900 1 +save 300 10 +save 60 10000 + +# By default Redis will stop accepting writes if RDB snapshots are enabled +# (at least one save point) and the latest background save failed. +# This will make the user aware (in a hard way) that data is not persisting +# on disk properly, otherwise chances are that no one will notice and some +# disaster will happen. +# +# If the background saving process will start working again Redis will +# automatically allow writes again. +# +# However if you have setup your proper monitoring of the Redis server +# and persistence, you may want to disable this feature so that Redis will +# continue to work as usual even if there are problems with disk, +# permissions, and so forth. +stop-writes-on-bgsave-error yes + +# Compress string objects using LZF when dump .rdb databases? +# For default that's set to 'yes' as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +rdbcompression yes + +# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. +# This makes the format more resistant to corruption but there is a performance +# hit to pay (around 10%) when saving and loading RDB files, so you can disable it +# for maximum performances. +# +# RDB files created with checksum disabled have a checksum of zero that will +# tell the loading code to skip the check. +rdbchecksum yes + +# The filename where to dump the DB +dbfilename dump.rdb + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# The Append Only File will also be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +dir /var/redis/6379 + +################################# REPLICATION ################################# + +# Master-Slave replication. Use slaveof to make a Redis instance a copy of +# another Redis server. A few things to understand ASAP about Redis replication. +# +# 1) Redis replication is asynchronous, but you can configure a master to +# stop accepting writes if it appears to be not connected with at least +# a given number of slaves. +# 2) Redis slaves are able to perform a partial resynchronization with the +# master if the replication link is lost for a relatively small amount of +# time. You may want to configure the replication backlog size (see the next +# sections of this file) with a sensible value depending on your needs. +# 3) Replication is automatic and does not need user intervention. After a +# network partition slaves automatically try to reconnect to masters +# and resynchronize with them. +# +# slaveof + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the slave to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the slave request. +# +# masterauth + +# When a slave loses its connection with the master, or when the replication +# is still in progress, the slave can act in two different ways: +# +# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will +# still reply to client requests, possibly with out of date data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) if slave-serve-stale-data is set to 'no' the slave will reply with +# an error "SYNC with master in progress" to all the kind of commands +# but to INFO and SLAVEOF. +# +slave-serve-stale-data yes + +# You can configure a slave instance to accept writes or not. Writing against +# a slave instance may be useful to store some ephemeral data (because data +# written on a slave will be easily deleted after resync with the master) but +# may also cause problems if clients are writing to it because of a +# misconfiguration. +# +# Since Redis 2.6 by default slaves are read-only. +# +# Note: read only slaves are not designed to be exposed to untrusted clients +# on the internet. It's just a protection layer against misuse of the instance. +# Still a read only slave exports by default all the administrative commands +# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve +# security of read only slaves using 'rename-command' to shadow all the +# administrative / dangerous commands. +slave-read-only yes + +# Replication SYNC strategy: disk or socket. +# +# ------------------------------------------------------- +# WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY +# ------------------------------------------------------- +# +# New slaves and reconnecting slaves that are not able to continue the replication +# process just receiving differences, need to do what is called a "full +# synchronization". An RDB file is transmitted from the master to the slaves. +# The transmission can happen in two different ways: +# +# 1) Disk-backed: The Redis master creates a new process that writes the RDB +# file on disk. Later the file is transferred by the parent +# process to the slaves incrementally. +# 2) Diskless: The Redis master creates a new process that directly writes the +# RDB file to slave sockets, without touching the disk at all. +# +# With disk-backed replication, while the RDB file is generated, more slaves +# can be queued and served with the RDB file as soon as the current child producing +# the RDB file finishes its work. With diskless replication instead once +# the transfer starts, new slaves arriving will be queued and a new transfer +# will start when the current one terminates. +# +# When diskless replication is used, the master waits a configurable amount of +# time (in seconds) before starting the transfer in the hope that multiple slaves +# will arrive and the transfer can be parallelized. +# +# With slow disks and fast (large bandwidth) networks, diskless replication +# works better. +repl-diskless-sync no + +# When diskless replication is enabled, it is possible to configure the delay +# the server waits in order to spawn the child that trnasfers the RDB via socket +# to the slaves. +# +# This is important since once the transfer starts, it is not possible to serve +# new slaves arriving, that will be queued for the next RDB transfer, so the server +# waits a delay in order to let more slaves arrive. +# +# The delay is specified in seconds, and by default is 5 seconds. To disable +# it entirely just set it to 0 seconds and the transfer will start ASAP. +repl-diskless-sync-delay 5 + +# Slaves send PINGs to server in a predefined interval. It's possible to change +# this interval with the repl_ping_slave_period option. The default value is 10 +# seconds. +# +# repl-ping-slave-period 10 + +# The following option sets the replication timeout for: +# +# 1) Bulk transfer I/O during SYNC, from the point of view of slave. +# 2) Master timeout from the point of view of slaves (data, pings). +# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings). +# +# It is important to make sure that this value is greater than the value +# specified for repl-ping-slave-period otherwise a timeout will be detected +# every time there is low traffic between the master and the slave. +# +# repl-timeout 60 + +# Disable TCP_NODELAY on the slave socket after SYNC? +# +# If you select "yes" Redis will use a smaller number of TCP packets and +# less bandwidth to send data to slaves. But this can add a delay for +# the data to appear on the slave side, up to 40 milliseconds with +# Linux kernels using a default configuration. +# +# If you select "no" the delay for data to appear on the slave side will +# be reduced but more bandwidth will be used for replication. +# +# By default we optimize for low latency, but in very high traffic conditions +# or when the master and slaves are many hops away, turning this to "yes" may +# be a good idea. +repl-disable-tcp-nodelay no + +# Set the replication backlog size. The backlog is a buffer that accumulates +# slave data when slaves are disconnected for some time, so that when a slave +# wants to reconnect again, often a full resync is not needed, but a partial +# resync is enough, just passing the portion of data the slave missed while +# disconnected. +# +# The bigger the replication backlog, the longer the time the slave can be +# disconnected and later be able to perform a partial resynchronization. +# +# The backlog is only allocated once there is at least a slave connected. +# +# repl-backlog-size 1mb + +# After a master has no longer connected slaves for some time, the backlog +# will be freed. The following option configures the amount of seconds that +# need to elapse, starting from the time the last slave disconnected, for +# the backlog buffer to be freed. +# +# A value of 0 means to never release the backlog. +# +# repl-backlog-ttl 3600 + +# The slave priority is an integer number published by Redis in the INFO output. +# It is used by Redis Sentinel in order to select a slave to promote into a +# master if the master is no longer working correctly. +# +# A slave with a low priority number is considered better for promotion, so +# for instance if there are three slaves with priority 10, 100, 25 Sentinel will +# pick the one with priority 10, that is the lowest. +# +# However a special priority of 0 marks the slave as not able to perform the +# role of master, so a slave with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +slave-priority 100 + +# It is possible for a master to stop accepting writes if there are less than +# N slaves connected, having a lag less or equal than M seconds. +# +# The N slaves need to be in "online" state. +# +# The lag in seconds, that must be <= the specified value, is calculated from +# the last ping received from the slave, that is usually sent every second. +# +# This option does not GUARANTEE that N replicas will accept the write, but +# will limit the window of exposure for lost writes in case not enough slaves +# are available, to the specified number of seconds. +# +# For example to require at least 3 slaves with a lag <= 10 seconds use: +# +# min-slaves-to-write 3 +# min-slaves-max-lag 10 +# +# Setting one or the other to 0 disables the feature. +# +# By default min-slaves-to-write is set to 0 (feature disabled) and +# min-slaves-max-lag is set to 10. + +################################## SECURITY ################################### + +# Require clients to issue AUTH before processing any other +# commands. This might be useful in environments in which you do not trust +# others with access to the host running redis-server. +# +# This should stay commented out for backward compatibility and because most +# people do not need auth (e.g. they run their own servers). +# +# Warning: since Redis is pretty fast an outside user can try up to +# 150k passwords per second against a good box. This means that you should +# use a very strong password otherwise it will be very easy to break. +# +# requirepass foobared + +# Command renaming. +# +# It is possible to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# hard to guess so that it will still be available for internal-use tools +# but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possible to completely kill a command by renaming it into +# an empty string: +# +# rename-command CONFIG "" +# +# Please note that changing the name of commands that are logged into the +# AOF file or transmitted to slaves may cause problems. + +################################### LIMITS #################################### + +# Set the max number of connected clients at the same time. By default +# this limit is set to 10000 clients, however if the Redis server is not +# able to configure the process file limit to allow for the specified limit +# the max number of allowed clients is set to the current file limit +# minus 32 (as Redis reserves a few file descriptors for internal uses). +# +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# maxclients 10000 + +# Don't use more memory than the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys +# according to the eviction policy selected (see maxmemory-policy). +# +# If Redis can't remove keys according to the policy, or if the policy is +# set to 'noeviction', Redis will start to reply with errors to commands +# that would use more memory, like SET, LPUSH, and so on, and will continue +# to reply to read-only commands like GET. +# +# This option is usually useful when using Redis as an LRU cache, or to set +# a hard memory limit for an instance (using the 'noeviction' policy). +# +# WARNING: If you have slaves attached to an instance with maxmemory on, +# the size of the output buffers needed to feed the slaves are subtracted +# from the used memory count, so that network problems / resyncs will +# not trigger a loop where keys are evicted, and in turn the output +# buffer of slaves is full with DELs of keys evicted triggering the deletion +# of more keys, and so forth until the database is completely emptied. +# +# In short... if you have slaves attached it is suggested that you set a lower +# limit for maxmemory so that there is some free RAM on the system for slave +# output buffers (but this is not needed if the policy is 'noeviction'). +# +# maxmemory + +# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory +# is reached. You can select among five behaviors: +# +# volatile-lru -> remove the key with an expire set using an LRU algorithm +# allkeys-lru -> remove any key according to the LRU algorithm +# volatile-random -> remove a random key with an expire set +# allkeys-random -> remove a random key, any key +# volatile-ttl -> remove the key with the nearest expire time (minor TTL) +# noeviction -> don't expire at all, just return an error on write operations +# +# Note: with any of the above policies, Redis will return an error on write +# operations, when there are no suitable keys for eviction. +# +# At the date of writing these commands are: set setnx setex append +# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd +# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby +# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby +# getset mset msetnx exec sort +# +# The default is: +# +# maxmemory-policy volatile-lru + +# LRU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can select as well the sample +# size to check. For instance for default Redis will check three keys and +# pick the one that was used less recently, you can change the sample size +# using the following configuration directive. +# +# maxmemory-samples 3 + +############################## APPEND ONLY MODE ############################### + +# By default Redis asynchronously dumps the dataset on disk. This mode is +# good enough in many applications, but an issue with the Redis process or +# a power outage may result into a few minutes of writes lost (depending on +# the configured save points). +# +# The Append Only File is an alternative persistence mode that provides +# much better durability. For instance using the default data fsync policy +# (see later in the config file) Redis can lose just one second of writes in a +# dramatic event like a server power outage, or a single write if something +# wrong with the Redis process itself happens, but the operating system is +# still running correctly. +# +# AOF and RDB persistence can be enabled at the same time without problems. +# If the AOF is enabled on startup Redis will load the AOF, that is the file +# with the better durability guarantees. +# +# Please check http://redis.io/topics/persistence for more information. + +appendonly no + +# The name of the append only file (default: "appendonly.aof") + +appendfilename "appendonly.aof" + +# The fsync() call tells the Operating System to actually write data on disk +# instead of waiting for more data in the output buffer. Some OS will really flush +# data on disk, some other OS will just try to do it ASAP. +# +# Redis supports three different modes: +# +# no: don't fsync, just let the OS flush the data when it wants. Faster. +# always: fsync after every write to the append only log. Slow, Safest. +# everysec: fsync only one time every second. Compromise. +# +# The default is "everysec", as that's usually the right compromise between +# speed and data safety. It's up to you to understand if you can relax this to +# "no" that will let the operating system flush the output buffer when +# it wants, for better performances (but if you can live with the idea of +# some data loss consider the default persistence mode that's snapshotting), +# or on the contrary, use "always" that's very slow but a bit safer than +# everysec. +# +# More details please check the following article: +# http://antirez.com/post/redis-persistence-demystified.html +# +# If unsure, use "everysec". + +# appendfsync always +appendfsync everysec +# appendfsync no + +# When the AOF fsync policy is set to always or everysec, and a background +# saving process (a background save or AOF log background rewriting) is +# performing a lot of I/O against the disk, in some Linux configurations +# Redis may block too long on the fsync() call. Note that there is no fix for +# this currently, as even performing fsync in a different thread will block +# our synchronous write(2) call. +# +# In order to mitigate this problem it's possible to use the following option +# that will prevent fsync() from being called in the main process while a +# BGSAVE or BGREWRITEAOF is in progress. +# +# This means that while another child is saving, the durability of Redis is +# the same as "appendfsync none". In practical terms, this means that it is +# possible to lose up to 30 seconds of log in the worst scenario (with the +# default Linux settings). +# +# If you have latency problems turn this to "yes". Otherwise leave it as +# "no" that is the safest pick from the point of view of durability. + +no-appendfsync-on-rewrite no + +# Automatic rewrite of the append only file. +# Redis is able to automatically rewrite the log file implicitly calling +# BGREWRITEAOF when the AOF log size grows by the specified percentage. +# +# This is how it works: Redis remembers the size of the AOF file after the +# latest rewrite (if no rewrite has happened since the restart, the size of +# the AOF at startup is used). +# +# This base size is compared to the current size. If the current size is +# bigger than the specified percentage, the rewrite is triggered. Also +# you need to specify a minimal size for the AOF file to be rewritten, this +# is useful to avoid rewriting the AOF file even if the percentage increase +# is reached but it is still pretty small. +# +# Specify a percentage of zero in order to disable the automatic AOF +# rewrite feature. + +auto-aof-rewrite-percentage 100 +auto-aof-rewrite-min-size 64mb + +# An AOF file may be found to be truncated at the end during the Redis +# startup process, when the AOF data gets loaded back into memory. +# This may happen when the system where Redis is running +# crashes, especially when an ext4 filesystem is mounted without the +# data=ordered option (however this can't happen when Redis itself +# crashes or aborts but the operating system still works correctly). +# +# Redis can either exit with an error when this happens, or load as much +# data as possible (the default now) and start if the AOF file is found +# to be truncated at the end. The following option controls this behavior. +# +# If aof-load-truncated is set to yes, a truncated AOF file is loaded and +# the Redis server starts emitting a log to inform the user of the event. +# Otherwise if the option is set to no, the server aborts with an error +# and refuses to start. When the option is set to no, the user requires +# to fix the AOF file using the "redis-check-aof" utility before to restart +# the server. +# +# Note that if the AOF file will be found to be corrupted in the middle +# the server will still exit with an error. This option only applies when +# Redis will try to read more data from the AOF file but not enough bytes +# will be found. +aof-load-truncated yes + +################################ LUA SCRIPTING ############################### + +# Max execution time of a Lua script in milliseconds. +# +# If the maximum execution time is reached Redis will log that a script is +# still in execution after the maximum allowed time and will start to +# reply to queries with an error. +# +# When a long running script exceeds the maximum execution time only the +# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be +# used to stop a script that did not yet called write commands. The second +# is the only way to shut down the server in the case a write command was +# already issued by the script but the user doesn't want to wait for the natural +# termination of the script. +# +# Set it to 0 or a negative value for unlimited execution without warnings. +lua-time-limit 5000 + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +slowlog-log-slower-than 10000 + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +slowlog-max-len 128 + +################################ LATENCY MONITOR ############################## + +# The Redis latency monitoring subsystem samples different operations +# at runtime in order to collect data related to possible sources of +# latency of a Redis instance. +# +# Via the LATENCY command this information is available to the user that can +# print graphs and obtain reports. +# +# The system only logs operations that were performed in a time equal or +# greater than the amount of milliseconds specified via the +# latency-monitor-threshold configuration directive. When its value is set +# to zero, the latency monitor is turned off. +# +# By default latency monitoring is disabled since it is mostly not needed +# if you don't have latency issues, and collecting data has a performance +# impact, that while very small, can be measured under big load. Latency +# monitoring can easily be enalbed at runtime using the command +# "CONFIG SET latency-monitor-threshold " if needed. +latency-monitor-threshold 0 + +############################# Event notification ############################## + +# Redis can notify Pub/Sub clients about events happening in the key space. +# This feature is documented at http://redis.io/topics/notifications +# +# For instance if keyspace events notification is enabled, and a client +# performs a DEL operation on key "foo" stored in the Database 0, two +# messages will be published via Pub/Sub: +# +# PUBLISH __keyspace@0__:foo del +# PUBLISH __keyevent@0__:del foo +# +# It is possible to select the events that Redis will notify among a set +# of classes. Every class is identified by a single character: +# +# K Keyspace events, published with __keyspace@__ prefix. +# E Keyevent events, published with __keyevent@__ prefix. +# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... +# $ String commands +# l List commands +# s Set commands +# h Hash commands +# z Sorted set commands +# x Expired events (events generated every time a key expires) +# e Evicted events (events generated when a key is evicted for maxmemory) +# A Alias for g$lshzxe, so that the "AKE" string means all the events. +# +# The "notify-keyspace-events" takes as argument a string that is composed +# of zero or multiple characters. The empty string means that notifications +# are disabled. +# +# Example: to enable list and generic events, from the point of view of the +# event name, use: +# +# notify-keyspace-events Elg +# +# Example 2: to get the stream of the expired keys subscribing to channel +# name __keyevent@0__:expired use: +# +# notify-keyspace-events Ex +# +# By default all notifications are disabled because most users don't need +# this feature and the feature has some overhead. Note that if you don't +# specify at least one of K or E, no events will be delivered. +notify-keyspace-events "" + +############################### ADVANCED CONFIG ############################### + +# Hashes are encoded using a memory efficient data structure when they have a +# small number of entries, and the biggest entry does not exceed a given +# threshold. These thresholds can be configured using the following directives. +hash-max-ziplist-entries 512 +hash-max-ziplist-value 64 + +# Similarly to hashes, small lists are also encoded in a special way in order +# to save a lot of space. The special representation is only used when +# you are under the following limits: +list-max-ziplist-entries 512 +list-max-ziplist-value 64 + +# Sets have a special encoding in just one case: when a set is composed +# of just strings that happen to be integers in radix 10 in the range +# of 64 bit signed integers. +# The following configuration setting sets the limit in the size of the +# set in order to use this special memory saving encoding. +set-max-intset-entries 512 + +# Similarly to hashes and lists, sorted sets are also specially encoded in +# order to save a lot of space. This encoding is only used when the length and +# elements of a sorted set are below the following limits: +zset-max-ziplist-entries 128 +zset-max-ziplist-value 64 + +# HyperLogLog sparse representation bytes limit. The limit includes the +# 16 bytes header. When an HyperLogLog using the sparse representation crosses +# this limit, it is converted into the dense representation. +# +# A value greater than 16000 is totally useless, since at that point the +# dense representation is more memory efficient. +# +# The suggested value is ~ 3000 in order to have the benefits of +# the space efficient encoding without slowing down too much PFADD, +# which is O(N) with the sparse encoding. The value can be raised to +# ~ 10000 when CPU is not a concern, but space is, and the data set is +# composed of many HyperLogLogs with cardinality in the 0 - 15000 range. +hll-sparse-max-bytes 3000 + +# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in +# order to help rehashing the main Redis hash table (the one mapping top-level +# keys to values). The hash table implementation Redis uses (see dict.c) +# performs a lazy rehashing: the more operation you run into a hash table +# that is rehashing, the more rehashing "steps" are performed, so if the +# server is idle the rehashing is never complete and some more memory is used +# by the hash table. +# +# The default is to use this millisecond 10 times every second in order to +# actively rehash the main dictionaries, freeing memory when possible. +# +# If unsure: +# use "activerehashing no" if you have hard latency requirements and it is +# not a good thing in your environment that Redis can reply from time to time +# to queries with 2 milliseconds delay. +# +# use "activerehashing yes" if you don't have such hard requirements but +# want to free memory asap when possible. +activerehashing yes + +# The client output buffer limits can be used to force disconnection of clients +# that are not reading data from the server fast enough for some reason (a +# common reason is that a Pub/Sub client can't consume messages as fast as the +# publisher can produce them). +# +# The limit can be set differently for the three different classes of clients: +# +# normal -> normal clients including MONITOR clients +# slave -> slave clients +# pubsub -> clients subscribed to at least one pubsub channel or pattern +# +# The syntax of every client-output-buffer-limit directive is the following: +# +# client-output-buffer-limit +# +# A client is immediately disconnected once the hard limit is reached, or if +# the soft limit is reached and remains reached for the specified number of +# seconds (continuously). +# So for instance if the hard limit is 32 megabytes and the soft limit is +# 16 megabytes / 10 seconds, the client will get disconnected immediately +# if the size of the output buffers reach 32 megabytes, but will also get +# disconnected if the client reaches 16 megabytes and continuously overcomes +# the limit for 10 seconds. +# +# By default normal clients are not limited because they don't receive data +# without asking (in a push way), but just after a request, so only +# asynchronous clients may create a scenario where data is requested faster +# than it can read. +# +# Instead there is a default limit for pubsub and slave clients, since +# subscribers and slaves receive data in a push fashion. +# +# Both the hard or the soft limit can be disabled by setting them to zero. +client-output-buffer-limit normal 0 0 0 +client-output-buffer-limit slave 256mb 64mb 60 +client-output-buffer-limit pubsub 32mb 8mb 60 + +# Redis calls an internal function to perform many background tasks, like +# closing connections of clients in timeout, purging expired keys that are +# never requested, and so forth. +# +# Not all tasks are performed with the same frequency, but Redis checks for +# tasks to perform according to the specified "hz" value. +# +# By default "hz" is set to 10. Raising the value will use more CPU when +# Redis is idle, but at the same time will make Redis more responsive when +# there are many keys expiring at the same time, and timeouts may be +# handled with more precision. +# +# The range is between 1 and 500, however a value over 100 is usually not +# a good idea. Most users should use the default of 10 and raise this up to +# 100 only in environments where very low latency is required. +hz 10 + +# When a child rewrites the AOF file, if the following option is enabled +# the file will be fsync-ed every 32 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +aof-rewrite-incremental-fsync yes diff --git a/_vagrant/rethinkdb.conf b/_vagrant/rethinkdb.conf new file mode 100644 index 0000000..4e6cce2 --- /dev/null +++ b/_vagrant/rethinkdb.conf @@ -0,0 +1,68 @@ +# +# RethinkDB instance configuration sample +# +# - Give this file the extension .conf and put it in /etc/rethinkdb/instances.d in order to enable it. +# - See http://www.rethinkdb.com/docs/guides/startup/ for the complete documentation +# - Uncomment an option to change its value. +# + +############################### +## RethinkDB configuration +############################### + +### Process options + +## User and group used to run rethinkdb +## Command line default: do not change user or group +## Init script default: rethinkdb user and group +# runuser=rethinkdb +# rungroup=rethinkdb + +## Stash the pid in this file when the process is running +## Command line default: none +## Init script default: /var/run/rethinkdb//pid_file (where is the name of this config file without the extension) +# pid-file=/var/run/rethinkdb/rethinkdb.pid + +### File path options + +## Directory to store data and metadata +## Command line default: ./rethinkdb_data +## Init script default: /var/lib/rethinkdb// (where is the name of this file without the extension) +# directory=/var/lib/rethinkdb/default + +### Network options + +## Address of local interfaces to listen on when accepting connections +## May be 'all' or an IP address, loopback addresses are enabled by default +## Default: all local addresses +bind=all + +## The port for rethinkdb protocol for client drivers +## Default: 28015 + port-offset +# driver-port=28015 + +## The port for receiving connections from other nodes +## Default: 29015 + port-offset +# cluster-port=29015 + +## The host:port of a node that rethinkdb will connect to +## This option can be specified multiple times. +## Default: none +# join=example.com:29015 + +## All ports used locally will have this value added +## Default: 0 +# port-offset=0 + +### Web options + +## Port for the http admin console +## Default: 8080 + port-offset +# http-port=8080 + +### CPU options + +## The number of cores to use +## Default: total number of cores of the CPU +# cores=2 + diff --git a/cache/cache.go b/cache/cache.go new file mode 100644 index 0000000..a95ad01 --- /dev/null +++ b/cache/cache.go @@ -0,0 +1,13 @@ +package cache + +import "time" + +// Cache is the basic interface for cache implementations +type Cache interface { + Get(key string, pointer interface{}) error + Set(key string, value interface{}, expires time.Duration) error + Delete(key string) error + DeleteMask(mask string) error + DeleteMulti(keys ...interface{}) error + Exists(key string) (bool, error) +} diff --git a/cache/redis_cache.go b/cache/redis_cache.go new file mode 100644 index 0000000..d83e968 --- /dev/null +++ b/cache/redis_cache.go @@ -0,0 +1,160 @@ +package cache + +import ( + "bytes" + "encoding/gob" + "time" + + "github.com/garyburd/redigo/redis" +) + +// Scripts! +var ( + scriptDeleteMask = redis.NewScript(0, ` + for _, k in ipairs( redis.call( 'keys', ARGV[1] ) ) do + redis.call( 'del', k ) + end + `) +) + +// RedisCache is an implementation of Cache that uses Redis as a backend +type RedisCache struct { + pool *redis.Pool +} + +// RedisCacheOpts is used to pass options to NewRedisCache +type RedisCacheOpts struct { + Address string + Database int + Password string + MaxIdle int + IdleTimeout time.Duration +} + +// NewRedisCache creates a new cache with a redis backend +func NewRedisCache(options *RedisCacheOpts) (*RedisCache, error) { + // Default values + if options.MaxIdle == 0 { + options.MaxIdle = 3 + } + if options.IdleTimeout == 0 { + options.IdleTimeout = 240 * time.Second + } + + // Create a new redis pool + pool := &redis.Pool{ + MaxIdle: options.MaxIdle, + IdleTimeout: options.IdleTimeout, + Dial: func() (redis.Conn, error) { + c, err := redis.Dial("tcp", options.Address) + if err != nil { + return nil, err + } + + if options.Password != "" { + if _, err := c.Do("AUTH", options.Password); err != nil { + c.Close() + return nil, err + } + } + + if options.Database != 0 { + if _, err := c.Do("SELECT", options.Database); err != nil { + c.Close() + return nil, err + } + } + + return c, err + }, + TestOnBorrow: func(c redis.Conn, t time.Time) error { + _, err := c.Do("PING") + return err + }, + } + + // Test the pool + conn := pool.Get() + defer conn.Close() + if err := pool.TestOnBorrow(conn, time.Now()); err != nil { + return nil, err + } + + // Return a new cache struct + return &RedisCache{ + pool: pool, + }, nil +} + +// Get retrieves data from the database and then decodes it into the passed pointer. +func (r *RedisCache) Get(key string, pointer interface{}) error { + conn := r.pool.Get() + defer conn.Close() + + // Perform the get + data, err := redis.Bytes(conn.Do("GET", key)) + if err != nil { + return err + } + + // Initialize a new decoder + dec := gob.NewDecoder(bytes.NewReader(data)) + + // Decode it into pointer + return dec.Decode(pointer) +} + +// Set encodes passed value and sends it to redis +func (r *RedisCache) Set(key string, value interface{}, expires time.Duration) error { + conn := r.pool.Get() + defer conn.Close() + + // Initialize a new encoder + var buffer bytes.Buffer + enc := gob.NewEncoder(&buffer) + + // Encode the value + if err := enc.Encode(value); err != nil { + return err + } + + // Save it into redis + if expires == 0 { + _, err := conn.Do("SET", key, buffer.Bytes()) + return err + } + + _, err := conn.Do("SETEX", key, expires.Seconds(), buffer.Bytes()) + return err +} + +// Delete removes data in redis by key +func (r *RedisCache) Delete(key string) error { + conn := r.pool.Get() + defer conn.Close() + _, err := redis.Int(conn.Do("DEL", key)) + return err +} + +// DeleteMask removes data using KEYS masks +func (r *RedisCache) DeleteMask(mask string) error { + conn := r.pool.Get() + defer conn.Close() + _, err := scriptDeleteMask.Do(conn, mask) + return err +} + +// DeleteMulti removes multiple keys +func (r *RedisCache) DeleteMulti(keys ...interface{}) error { + conn := r.pool.Get() + defer conn.Close() + _, err := redis.Int(conn.Do("DEL", keys...)) + return err +} + +// Exists performs a check whether a key exists +func (r *RedisCache) Exists(key string) (bool, error) { + conn := r.pool.Get() + defer conn.Close() + return redis.Bool(conn.Do("EXISTS", key)) +} diff --git a/circle.yml b/circle.yml new file mode 100644 index 0000000..3250787 --- /dev/null +++ b/circle.yml @@ -0,0 +1,22 @@ +machine: + timezone: + Europe/Berlin + +dependencies: + pre: + - source /etc/lsb-release && echo "deb http://download.rethinkdb.com/apt $DISTRIB_CODENAME main" | sudo tee /etc/apt/sources.list.d/rethinkdb.list + - wget -qO- http://download.rethinkdb.com/apt/pubkey.gpg | sudo apt-key add - + - sudo apt-get update + - sudo apt-get install rethinkdb + - wget http://download.redis.io/releases/redis-2.8.18.tar.gz + - tar xvzf redis-2.8.18.tar.gz + - cd redis-2.8.18 && make + post: + - rethinkdb --bind all: + background: true + - src/redis-server: + background: true + +test: + override: + - go test -v ./... diff --git a/db/default_crud.go b/db/default_crud.go index c3976c5..8cdfef5 100644 --- a/db/default_crud.go +++ b/db/default_crud.go @@ -35,6 +35,11 @@ func (d *Default) GetTable() gorethink.Term { return gorethink.Table(d.table) } +// GetSession returns the current session +func (d *Default) GetSession() *gorethink.Session { + return d.session +} + // Insert inserts a document into the database func (d *Default) Insert(data interface{}) error { _, err := d.GetTable().Insert(data).RunWrite(d.session) diff --git a/db/rethink_crud.go b/db/rethink_crud.go index 8c669ef..adaeaed 100644 --- a/db/rethink_crud.go +++ b/db/rethink_crud.go @@ -1,13 +1,15 @@ package db import ( - rethink "github.com/dancannon/gorethink" + "github.com/dancannon/gorethink" ) // RethinkTable contains the most basic table functions type RethinkTable interface { GetTableName() string GetDBName() string + GetTable() gorethink.Term + GetSession() *gorethink.Session } // RethinkCreator contains a function to create new instances in the table @@ -17,19 +19,19 @@ type RethinkCreator interface { // RethinkReader allows fetching resources from the database type RethinkReader interface { - Find(id string) (*rethink.Cursor, error) + Find(id string) (*gorethink.Cursor, error) FindFetchOne(id string, value interface{}) error - FindBy(key string, value interface{}) (*rethink.Cursor, error) + FindBy(key string, value interface{}) (*gorethink.Cursor, error) FindByAndFetch(key string, value interface{}, results interface{}) error FindByAndFetchOne(key string, value interface{}, result interface{}) error FindByAndCount(key string, value interface{}) (int, error) - Where(filter map[string]interface{}) (*rethink.Cursor, error) + Where(filter map[string]interface{}) (*gorethink.Cursor, error) WhereAndFetch(filter map[string]interface{}, results interface{}) error WhereAndFetchOne(filter map[string]interface{}, result interface{}) error - FindByIndex(index string, values ...interface{}) (*rethink.Cursor, error) + FindByIndex(index string, values ...interface{}) (*gorethink.Cursor, error) FindByIndexFetch(results interface{}, index string, values ...interface{}) error FindByIndexFetchOne(result interface{}, index string, values ...interface{}) error } diff --git a/db/table_tokens.go b/db/table_tokens.go index 8de409e..5703067 100644 --- a/db/table_tokens.go +++ b/db/table_tokens.go @@ -1,12 +1,96 @@ package db import ( + "time" + + "github.com/dancannon/gorethink" + + "github.com/lavab/api/cache" "github.com/lavab/api/models" ) // TokensTable implements the CRUD interface for tokens type TokensTable struct { RethinkCRUD + Cache cache.Cache + Expires time.Duration +} + +// Insert monkey-patches the DefaultCRUD method and introduces caching +func (t *TokensTable) Insert(data interface{}) error { + if err := t.RethinkCRUD.Insert(data); err != nil { + return err + } + + token, ok := data.(*models.Token) + if !ok { + return nil + } + + return t.Cache.Set(t.RethinkCRUD.GetTableName()+":"+token.ID, token, t.Expires) +} + +// Update clears all updated keys +func (t *TokensTable) Update(data interface{}) error { + if err := t.RethinkCRUD.Update(data); err != nil { + return err + } + + return t.Cache.DeleteMask(t.RethinkCRUD.GetTableName() + ":*") +} + +// UpdateID updates the specified token and updates cache +func (t *TokensTable) UpdateID(id string, data interface{}) error { + if err := t.RethinkCRUD.UpdateID(id, data); err != nil { + return err + } + + token, err := t.GetToken(id) + if err != nil { + return err + } + + return t.Cache.Set(t.RethinkCRUD.GetTableName()+":"+id, token, t.Expires) +} + +// Delete removes from db and cache using filter +func (t *TokensTable) Delete(cond interface{}) error { + result, err := t.GetTable().Filter(cond).Delete(gorethink.DeleteOpts{ + ReturnChanges: true, + }).RunWrite(t.GetSession()) + if err != nil { + return err + } + + var ids []interface{} + for _, change := range result.Changes { + ids = append(ids, t.RethinkCRUD.GetTableName()+":"+change.OldValue.(map[string]interface{})["id"].(string)) + } + + return t.Cache.DeleteMulti(ids...) +} + +// DeleteID removes from db and cache using id query +func (t *TokensTable) DeleteID(id string) error { + if err := t.RethinkCRUD.DeleteID(id); err != nil { + return err + } + + return t.Cache.Delete(t.RethinkCRUD.GetTableName() + ":" + id) +} + +// FindFetchOne tries cache and then tries using DefaultCRUD's fetch operation +func (t *TokensTable) FindFetchOne(id string, value interface{}) error { + if err := t.Cache.Get(id, value); err == nil { + return nil + } + + err := t.RethinkCRUD.FindFetchOne(id, value) + if err != nil { + return err + } + + return t.Cache.Set(t.RethinkCRUD.GetTableName()+":"+id, value, t.Expires) } // GetToken returns a token with specified name diff --git a/env/config.go b/env/config.go index 93ea52a..69b1ff9 100644 --- a/env/config.go +++ b/env/config.go @@ -11,7 +11,11 @@ type Flags struct { ClassicRegistration bool UsernameReservation bool - RethinkDBURL string + RedisAddress string + RedisDatabase int + RedisPassword string + + RethinkDBAddress string RethinkDBKey string RethinkDBDatabase string } diff --git a/env/env.go b/env/env.go index 0a32a7c..bfdac13 100644 --- a/env/env.go +++ b/env/env.go @@ -4,6 +4,7 @@ import ( "github.com/Sirupsen/logrus" "github.com/dancannon/gorethink" + "github.com/lavab/api/cache" "github.com/lavab/api/db" ) @@ -14,6 +15,8 @@ var ( Log *logrus.Logger // Rethink contains the RethinkDB session used in the API Rethink *gorethink.Session + // Cache is the global instance of the cache interface + Cache cache.Cache // Accounts is the global instance of AccountsTable Accounts *db.AccountsTable // Tokens is the global instance of TokensTable diff --git a/main.go b/main.go index db34c48..e7393c0 100644 --- a/main.go +++ b/main.go @@ -27,8 +27,18 @@ var ( sessionDuration = flag.Int("session_duration", 72, "Session duration expressed in hours") classicRegistration = flag.Bool("classic_registration", false, "Classic registration enabled?") usernameReservation = flag.Bool("username_reservation", false, "Username reservation enabled?") + // Cache-related flags + redisAddress = flag.String("redis_address", func() string { + address := os.Getenv("REDIS_PORT_6379_TCP_ADDR") + if address == "" { + address = "127.0.0.1" + } + return address + ":6379" + }(), "Address of the redis server") + redisDatabase = flag.Int("redis_db", 0, "Index of redis database to use") + redisPassword = flag.String("redis_password", "", "Password of the redis server") // Database-related flags - rethinkdbURL = flag.String("rethinkdb_url", func() string { + rethinkdbAddress = flag.String("rethinkdb_address", func() string { address := os.Getenv("RETHINKDB_PORT_28015_TCP_ADDR") if address == "" { address = "127.0.0.1" @@ -60,7 +70,11 @@ func main() { ClassicRegistration: *classicRegistration, UsernameReservation: *usernameReservation, - RethinkDBURL: *rethinkdbURL, + RedisAddress: *redisAddress, + RedisDatabase: *redisDatabase, + RedisPassword: *redisPassword, + + RethinkDBAddress: *rethinkdbAddress, RethinkDBKey: *rethinkdbKey, RethinkDBDatabase: *rethinkdbDatabase, } diff --git a/routes/init_test.go b/routes/init_test.go index 6d1d576..a57c5a7 100644 --- a/routes/init_test.go +++ b/routes/init_test.go @@ -1,6 +1,7 @@ package routes_test import ( + "fmt" "net/http/httptest" "time" @@ -27,14 +28,16 @@ func init() { ClassicRegistration: true, UsernameReservation: true, - RethinkDBURL: "127.0.0.1:28015", + RedisAddress: "127.0.0.1:6379", + + RethinkDBAddress: "127.0.0.1:28015", RethinkDBKey: "", RethinkDBDatabase: "test", } // Connect to the RethinkDB server rdbSession, err := gorethink.Connect(gorethink.ConnectOpts{ - Address: env.Config.RethinkDBURL, + Address: env.Config.RethinkDBAddress, AuthKey: env.Config.RethinkDBKey, MaxIdle: 10, IdleTimeout: time.Second * 10, @@ -46,7 +49,7 @@ func init() { // Clear the test database err = gorethink.DbDrop("test").Exec(rdbSession) if err != nil { - panic("removing the test database should not return an error, got " + err.Error()) + fmt.Println("removing the test database should not return an error, got " + err.Error()) } // Disconnect diff --git a/setup/setup.go b/setup/setup.go index ab93a54..07bb0fc 100644 --- a/setup/setup.go +++ b/setup/setup.go @@ -8,6 +8,7 @@ import ( "github.com/zenazn/goji/web" "github.com/zenazn/goji/web/middleware" + "github.com/lavab/api/cache" "github.com/lavab/api/db" "github.com/lavab/api/env" "github.com/lavab/api/routes" @@ -31,14 +32,28 @@ func PrepareMux(flags *env.Flags) *web.Mux { // Pass it to the environment package env.Log = log + // Initialize the cache + redis, err := cache.NewRedisCache(&cache.RedisCacheOpts{ + Address: flags.RedisAddress, + Database: flags.RedisDatabase, + Password: flags.RedisPassword, + }) + if err != nil { + log.WithFields(logrus.Fields{ + "error": err, + }).Fatal("Unable to connect to the redis server") + } + + env.Cache = redis + // Set up the database rethinkOpts := gorethink.ConnectOpts{ - Address: flags.RethinkDBURL, + Address: flags.RethinkDBAddress, AuthKey: flags.RethinkDBKey, MaxIdle: 10, IdleTimeout: time.Second * 10, } - err := db.Setup(rethinkOpts) + err = db.Setup(rethinkOpts) if err != nil { log.WithFields(logrus.Fields{ "error": err, @@ -64,6 +79,7 @@ func PrepareMux(flags *env.Flags) *web.Mux { rethinkOpts.Database, "tokens", ), + Cache: redis, } env.Accounts = &db.AccountsTable{ RethinkCRUD: db.NewCRUDTable( diff --git a/setup/setup_test.go b/setup/setup_test.go index ebbe49b..406ecbe 100644 --- a/setup/setup_test.go +++ b/setup/setup_test.go @@ -18,7 +18,9 @@ func TestSetup(t *testing.T) { SessionDuration: 72, ClassicRegistration: true, - RethinkDBURL: "127.0.0.1:28015", + RedisAddress: "127.0.0.1:6379", + + RethinkDBAddress: "127.0.0.1:28015", RethinkDBKey: "", RethinkDBDatabase: "test", }