Skip to content

From 0 to 1 - create Redis cluster

homepage-banner

Prerequisites

Start 6 Redis instances (3 masters + 3 slaves) with following configuration replace dir, pidfile, logfile and port.

reference redis.conf

dir /opt/redis/16380/
pidfile /opt/redis/16380/r-server.pid
logfile /opt/redis/16380/r-server.log
port 16380

bind 0.0.0.0
protected-mode no
tcp-backlog 511
timeout 600
tcp-keepalive 300
supervised no
loglevel notice
databases 16
always-show-logo yes
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
dbfilename dump.rdb
rdb-del-sync-files no
replica-serve-stale-data yes
replica-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-diskless-load disabled
repl-disable-tcp-nodelay no
repl-timeout 1800
repl-backlog-size 100mb
replica-priority 100
acllog-max-len 128
lazyfree-lazy-eviction no
lazyfree-lazy-expire no
lazyfree-lazy-server-del no
replica-lazy-flush no
lazyfree-lazy-user-del no
oom-score-adj no
oom-score-adj-values 0 200 800
appendonly no
appendfilename "appendonly.aof"
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 200
auto-aof-rewrite-min-size 64gb
aof-load-truncated yes
aof-use-rdb-preamble yes
lua-time-limit 5000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events ""
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
stream-node-max-bytes 4096
stream-node-max-entries 100
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 4096mb 4096mb 1800
client-output-buffer-limit replica 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
dynamic-hz yes
aof-rewrite-incremental-fsync yes
rdb-save-incremental-fsync yes
jemalloc-bg-thread yes
cluster-enabled yes
cluster-config-file nodes.conf
cluster-node-timeout 5000
cluster-slave-validity-factor 0
cluster-migration-barrier 999
cluster-require-full-coverage no
maxmemory 10g
maxmemory-policy allkeys-lru

start redis-server with command line or using systemd

redis-server redis16380.conf
redis-server redis16381.conf
redis-server redis16382.conf
redis-server redis16383.conf
redis-server redis16384.conf
redis-server redis16385.conf

1. Manually create cluster

1.1 cluster meet

choose one nodes to meet the others

redis-cli -h ${ip} -p ${port} cluster meet ${ip} 16380
redis-cli -h ${ip} -p ${port} cluster meet ${ip} 16381
redis-cli -h ${ip} -p ${port} cluster meet ${ip} 16382
redis-cli -h ${ip} -p ${port} cluster meet ${ip} 16383
redis-cli -h ${ip} -p ${port} cluster meet ${ip} 16384
redis-cli -h ${ip} -p ${port} cluster meet ${ip} 16385

redis-cli -h ${ip} -p ${port} cluster nodes

1.2 set slot

for 3 masters nodes

redis-cli -h ${ip} -p 16380 cluster addslots {0..5461}
redis-cli -h ${ip} -p 16381 cluster addslots {5462..10922}
redis-cli -h ${ip} -p 16382 cluster addslots {10923..16383}

for 3 slaves nodes, use cluster nodes to see master nodeID, and use cluster replicate to become slave of each master.

redis-cli -h ${ip} -p 16383 cluster nodes

01c75d49d3371484623b8fce8c6f5893c3bd6081 10.6.31.2:16390@26390 master - 0 1631867123426 3 connected 10923-16383
9f9fa66f3d4ed5caf66f5f05d6cff75424933aea 10.6.31.2:16370@26370 master - 0 1631867122425 1 connected 0-5460
9a43f689185ad2d85e45a2f469854eca71748274 10.6.31.2:16380@26380 myself,master - 0 1631867122000 2 connected 5461-10922

redis-cli -h ${ip} -p 16383 cluster replicate 01c75d49d3371484623b8fce8c6f5893c3bd6081
redis-cli -h ${ip} -p 16384 cluster replicate 9f9fa66f3d4ed5caf66f5f05d6cff75424933aea
redis-cli -h ${ip} -p 16385 cluster replicate 9a43f689185ad2d85e45a2f469854eca71748274

2. use redis-cli to create cluster

redis-cli --cluster create ip:16380 ip:16381 ip:16382 ip:16383 ip:16384 ip:16385
>>> Performing hash slots allocation on 3 nodes...
Master[0] -> Slots 0 - 5460
Master[1] -> Slots 5461 - 10922
Master[2] -> Slots 10923 - 16383
M: 9f9fa66f3d4ed5caf66f5f05d6cff75424933aea 10.6.31.2:16370
   slots:[0-5460] (5461 slots) master
M: 9a43f689185ad2d85e45a2f469854eca71748274 10.6.31.2:16380
   slots:[5461-10922] (5462 slots) master
M: 01c75d49d3371484623b8fce8c6f5893c3bd6081 10.6.31.2:16390
   slots:[10923-16383] (5461 slots) master
Can I set the above configuration? (type 'yes' to accept): yes
>>> Nodes configuration updated
>>> Assign a different config epoch to each node
>>> Sending CLUSTER MEET messages to join the cluster
Waiting for the cluster to join
.
..
>>> Performing Cluster Check (using node 10.6.31.2:16370)
M: 9f9fa66f3d4ed5caf66f5f05d6cff75424933aea 10.6.31.2:16370
   slots:[0-5460] (5461 slots) master
M: 01c75d49d3371484623b8fce8c6f5893c3bd6081 10.6.31.2:16390
   slots:[10923-16383] (5461 slots) master
M: 9a43f689185ad2d85e45a2f469854eca71748274 10.6.31.2:16380
   slots:[5461-10922] (5462 slots) master
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
Leave a message