redis文档:http://doc.redisfans.com/

参考:https://www.cnblogs.com/wuxl360/p/5920330.html

http://www.cnblogs.com/carryping/p/7447823.html

https://www.jianshu.com/p/2639549bedc8

1. 下载并解压

cd /root/software
wget http://download.redis.io/releases/redis-3.2.3.tar.gz
tar -zxvf redis-3.2.4.tar.gz 

2. 编译安装

cd redis-3.2.3
make PREFIX=/usr/local/redis-3.2.3 install
ln -sv /usr/local/redis-3.2.3 /usr/local/redis

3. 将 redis-trib.rb 复制到 /usr/local/bin 目录下

cd src
cp redis-trib.rb /usr/local/bin/  

4. 创建 Redis 节点

首先在机器上/usr/local/redis/ 目录下创建 cluster-test 目录;

mkdir redis_cluster  

在cluster-test 目录下,创建名为7001、7002、7003,7004、7005、7006的目录,并将 redis.conf 拷贝到这刘个目录中

mkdir 7001 7002 7003 7004 7005 
cp redis.conf cluster-test/7001
cp redis.conf cluster-test/7002
cp redis.conf cluster-test/7003
cp redis.conf cluster-test/7004
cp redis.conf cluster-test/7005
cp redis.conf cluster-test/7006    

分别修改这6个配置文件,修改如下内容

bind 0.0.0.0
port 7001
dir /usr/local/redis/cluster-test/7001
pidfile /var/run/redis_7001.pid
cluster-enabled yes
cluster-config-file nodes_7001.conf
cluster-node-timeout 15000
appendonly yes
appendfilename "appendonly.aof"
daemonize yes protected-mode yes
tcp-backlog 511
timeout 0
tcp-keepalive 300
supervised no
loglevel verbose
logfile ""
databases 16
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
slave-serve-stale-data yes
slave-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-disable-tcp-nodelay no
slave-priority 100
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
lua-time-limit 5000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events ""
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
aof-rewrite-incremental-fsync yes

7001 redis.conf

bind 0.0.0.0
port 7002
dir /usr/local/redis/cluster-test/7002
pidfile /var/run/redis_7002.pid
cluster-enabled yes
cluster-config-file nodes_7002.conf
cluster-node-timeout 15000
appendonly yes
appendfilename "appendonly.aof"
daemonize yes protected-mode yes
tcp-backlog 511
timeout 0
tcp-keepalive 300
supervised no
loglevel verbose
logfile ""
databases 16
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
slave-serve-stale-data yes
slave-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-disable-tcp-nodelay no
slave-priority 100
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
lua-time-limit 5000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events ""
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
aof-rewrite-incremental-fsync yes

7002 redis.conf

bind 0.0.0.0
port 7003
dir /usr/local/redis/cluster-test/7003
pidfile /var/run/redis_7003.pid
cluster-enabled yes
cluster-config-file nodes_7003.conf
cluster-node-timeout 15000
appendonly yes
appendfilename "appendonly.aof"
daemonize yes protected-mode yes
tcp-backlog 511
timeout 0
tcp-keepalive 300
supervised no
loglevel verbose
logfile ""
databases 16
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
slave-serve-stale-data yes
slave-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-disable-tcp-nodelay no
slave-priority 100
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
lua-time-limit 5000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events ""
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
aof-rewrite-incremental-fsync yes

7003 redis.conf

bind 0.0.0.0
port 7004
dir /usr/local/redis/cluster-test/7004
pidfile /var/run/redis_7004.pid
cluster-enabled yes
cluster-config-file nodes_7004.conf
cluster-node-timeout 15000
appendonly yes
appendfilename "appendonly.aof"
daemonize yes protected-mode yes
tcp-backlog 511
timeout 0
tcp-keepalive 300
supervised no
loglevel verbose
logfile ""
databases 16
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
slave-serve-stale-data yes
slave-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-disable-tcp-nodelay no
slave-priority 100
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
lua-time-limit 5000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events ""
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
aof-rewrite-incremental-fsync yes

7004 redis.conf

bind 0.0.0.0
port 7005
dir /usr/local/redis/cluster-test/7005
pidfile /var/run/redis_7005.pid
cluster-enabled yes
cluster-config-file nodes_7005.conf
cluster-node-timeout 15000
appendonly yes
appendfilename "appendonly.aof"
daemonize yes protected-mode yes
tcp-backlog 511
timeout 0
tcp-keepalive 300
supervised no
loglevel verbose
logfile ""
databases 16
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
slave-serve-stale-data yes
slave-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-disable-tcp-nodelay no
slave-priority 100
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
lua-time-limit 5000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events ""
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
aof-rewrite-incremental-fsync yes

7005 redis.conf

pidfile /var/run/redis_7006.pid
cluster-enabled yes
cluster-config-file nodes_7006.conf
cluster-node-timeout 15000
appendonly yes
appendfilename "appendonly.aof"
daemonize yes protected-mode yes
tcp-backlog 511
timeout 0
tcp-keepalive 300
supervised no
loglevel verbose
logfile ""
databases 16
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
slave-serve-stale-data yes
slave-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-disable-tcp-nodelay no
slave-priority 100
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
lua-time-limit 5000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events ""
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
aof-rewrite-incremental-fsync yes

7006 redis.conf

port  7001                                        //端口7001,7002,7001
bind 本机ip //默认ip为127.0.0.1 需要改为其他节点机器可访问的ip 否则创建集群时无法访问对应的端口,无法创建集群
daemonize yes //redis后台运行
pidfile /var/run/redis_7001.pid //pidfile文件对应7001,7002,7003
cluster-enabled yes //开启集群 把注释#去掉
cluster-config-file nodes_7001.conf //集群的配置 配置文件首次启动自动生成 7001,7002,7003
cluster-node-timeout 15000 //请求超时 默认15秒,可自行设置
appendonly yes //aof日志开启 有需要就开启,它会每次写操作都记录一条日志 

5.启动并查看

/usr/local/redis/bin/redis-server /usr/local/redis/cluster-test/7001/redis.conf
/usr/local/redis/bin/redis-server /usr/local/redis/cluster-test/7002/redis.conf
/usr/local/redis/bin/redis-server /usr/local/redis/cluster-test/7003/redis.conf
/usr/local/redis/bin/redis-server /usr/local/redis/cluster-test/7004/redis.conf
/usr/local/redis/bin/redis-server /usr/local/redis/cluster-test/7005/redis.conf
/usr/local/redis/bin/redis-server /usr/local/redis/cluster-test/7006/redis.conf 查看显示下面信息
[root@localhost 7001]# netstat -ntlp |grep redis
tcp        0      0 0.0.0.0:7005                0.0.0.0:*                   LISTEN      19225/redis-server  
tcp        0      0 0.0.0.0:7006                0.0.0.0:*                   LISTEN      19229/redis-server  
tcp        0      0 0.0.0.0:17001               0.0.0.0:*                   LISTEN      19209/redis-server  
tcp        0      0 0.0.0.0:17002               0.0.0.0:*                   LISTEN      19213/redis-server  
tcp        0      0 0.0.0.0:17003               0.0.0.0:*                   LISTEN      19215/redis-server  
tcp        0      0 0.0.0.0:17004               0.0.0.0:*                   LISTEN      19221/redis-server  
tcp        0      0 0.0.0.0:17005               0.0.0.0:*                   LISTEN      19225/redis-server  
tcp        0      0 0.0.0.0:17006               0.0.0.0:*                   LISTEN      19229/redis-server  
tcp        0      0 0.0.0.0:7001                0.0.0.0:*                   LISTEN      19209/redis-server  
tcp        0      0 0.0.0.0:7002                0.0.0.0:*                   LISTEN      19213/redis-server  
tcp        0      0 0.0.0.0:7003                0.0.0.0:*                   LISTEN      19215/redis-server  
tcp        0      0 0.0.0.0:7004                0.0.0.0:*                   LISTEN      19221/redis-server  

6.创建集群

redis-trib.rb  create  --replicas  1 192.168.8.102:7001  192.168.8.102:7002 192.168.8.102:7003  192.168.8.102:7004  192.168.8.102:7005 192.168.8.102:7006
gem install redis 报错
ERROR: Error installing redis:
redis requires Ruby version >= 2.2.2. 解决办法是 先安装rvm,再把ruby版本提升至2.4.5 1.安装curl
sudo yum install curl 2. 安装RVM
curl -L get.rvm.io | bash -s stable 再次报错:
[root@localhost yum.repos.d]# curl -L get.rvm.io | bash -s stable
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
100 24173  100 24173    0     0  21587      0  0:00:01  0:00:01 --:--:--  128k
Downloading https://github.com/rvm/rvm/archive/1.29.7.tar.gz
Downloading https://github.com/rvm/rvm/releases/download/1.29.7/1.29.7.tar.gz.asc
gpg: Signature made Fri 04 Jan 2019 06:01:48 AM CST using RSA key ID 39499BDB
gpg: Can't check signature: No public key
GPG signature verification failed for '/usr/local/rvm/archives/rvm-1.29.7.tgz' - 'https://github.com/rvm/rvm/releases/download/1.29.7/1.29.7.tar.gz.asc'! Try to install GPG v2 and then fetch the public key:     gpg2 --keyserver hkp://pool.sks-keyservers.net --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3 7D2BAF1CF37B13E2069D6956105BD0E739499BDB or if it fails:     command curl -sSL https://rvm.io/mpapis.asc | gpg2 --import -
    command curl -sSL https://rvm.io/pkuczynski.asc | gpg2 --import - In case of further problems with validation please refer to https://rvm.io/rvm/security 那就按照上面的执行上面语句就OK啦 3.
source /usr/local/rvm/scripts/rvm 4. 查看rvm库中已知的ruby版本 rvm list known 5. 安装一个ruby版本 rvm install 2.4.5 6. 使用一个ruby版本 rvm use 2.4.5 7. 卸载一个已知版本 rvm remove 2.0.0 8. 查看版本 ruby --version 9. 再安装redis就可以了
gem install redis 再执行 输入 yes 即可,然后出现如下内容,说明安装成功:
>>> Creating cluster
>>> Performing hash slots allocation on 6 nodes...
Using 3 masters:
192.168.8.102:7001
192.168.8.102:7002
192.168.8.102:7003
Adding replica 192.168.8.102:7004 to 192.168.8.102:7001
Adding replica 192.168.8.102:7005 to 192.168.8.102:7002
Adding replica 192.168.8.102:7006 to 192.168.8.102:7003
M: 229393055278b1cded847e554739255905b33fb3 192.168.8.102:7001
   slots:0-5460 (5461 slots) master
M: ef175d84db52e084b5d74cf9f1c414011bf6cce9 192.168.8.102:7002
   slots:5461-10922 (5462 slots) master
M: 1bca1b7b96f3fe936ad44f254d17da26da9fd186 192.168.8.102:7003
   slots:10923-16383 (5461 slots) master
S: 837eea90f07c9cdebfa7e1924d2e2788cf5573eb 192.168.8.102:7004
   replicates 229393055278b1cded847e554739255905b33fb3
S: 0f3d5f7e78dc857efc5b58ab674faee9fba876af 192.168.8.102:7005
   replicates ef175d84db52e084b5d74cf9f1c414011bf6cce9
S: b41ae432b4bd8d7ca44bf318c7b9382f8dbd7a79 192.168.8.102:7006
   replicates 1bca1b7b96f3fe936ad44f254d17da26da9fd186
Can I set the above configuration? (type 'yes' to accept): yes
>>> Nodes configuration updated
>>> Assign a different config epoch to each node
>>> Sending CLUSTER MEET messages to join the cluster
Waiting for the cluster to join....
>>> Performing Cluster Check (using node 192.168.8.102:7001)
M: 229393055278b1cded847e554739255905b33fb3 192.168.8.102:7001
   slots:0-5460 (5461 slots) master
   1 additional replica(s)
M: 1bca1b7b96f3fe936ad44f254d17da26da9fd186 192.168.8.102:7003
   slots:10923-16383 (5461 slots) master
   1 additional replica(s)
M: ef175d84db52e084b5d74cf9f1c414011bf6cce9 192.168.8.102:7002
   slots:5461-10922 (5462 slots) master
   1 additional replica(s)
S: 0f3d5f7e78dc857efc5b58ab674faee9fba876af 192.168.8.102:7005
   slots: (0 slots) slave
   replicates ef175d84db52e084b5d74cf9f1c414011bf6cce9
S: 837eea90f07c9cdebfa7e1924d2e2788cf5573eb 192.168.8.102:7004
   slots: (0 slots) slave
   replicates 229393055278b1cded847e554739255905b33fb3
S: b41ae432b4bd8d7ca44bf318c7b9382f8dbd7a79 192.168.8.102:7006
   slots: (0 slots) slave
   replicates 1bca1b7b96f3fe936ad44f254d17da26da9fd186
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.

7.测试

[root@localhost 7001]# ../../bin/redis-cli -c -h 192.168.8.102 -p 7001
192.168.8.102:7001>
192.168.8.102:7001>
192.168.8.102:7001> set name zhangsan
-> Redirected to slot [5798] located at 192.168.8.102:7002
OK [root@localhost 7001]# ../../../redis/bin/redis-cli -c -p 7006
127.0.0.1:7006>
127.0.0.1:7006>
127.0.0.1:7006>
127.0.0.1:7006> get name
-> Redirected to slot [5798] located at 192.168.8.102:7002
"zhangsan" 验证数据一致性
[root@localhost 7003]# md5sum dump.rdb
2604704e38811948117ddc473d62dc55  dump.rdb [root@localhost 7001]# md5sum dump.rdb
2604704e38811948117ddc473d62dc55  dump.rdb 说明集群运作正常。

8.测试故障

让192.168.8.102:7003当掉

[root@localhost 7001]# cat nodes_7001.conf
1bca1b7b96f3fe936ad44f254d17da26da9fd186 192.168.8.102:7003 master - 0 1551780190387 3 connected 10923-16383
ef175d84db52e084b5d74cf9f1c414011bf6cce9 192.168.8.102:7002 master - 0 1551780191393 2 connected 5461-10922
0f3d5f7e78dc857efc5b58ab674faee9fba876af 192.168.8.102:7005 slave ef175d84db52e084b5d74cf9f1c414011bf6cce9 0 1551780188372 5 connected
229393055278b1cded847e554739255905b33fb3 192.168.8.102:7001 myself,master - 0 0 1 connected 0-5460
837eea90f07c9cdebfa7e1924d2e2788cf5573eb 192.168.8.102:7004 slave 229393055278b1cded847e554739255905b33fb3 0 1551780187366 4 connected
b41ae432b4bd8d7ca44bf318c7b9382f8dbd7a79 192.168.8.102:7006 slave 1bca1b7b96f3fe936ad44f254d17da26da9fd186 0 1551780189379 6 connected [root@localhost 7001]# netstat -ntlp |grep 7003
tcp        0      0 0.0.0.0:17003               0.0.0.0:*                   LISTEN      19215/redis-server  
tcp        0      0 0.0.0.0:7003                0.0.0.0:*                   LISTEN      19215/redis-server  
[root@localhost 7001]#
[root@localhost 7001]#
[root@localhost 7001]#
[root@localhost 7001]#
[root@localhost 7001]# kill -9 19215
[root@localhost 7001]#
[root@localhost 7001]# cat nodes_7001.conf
1bca1b7b96f3fe936ad44f254d17da26da9fd186 192.168.8.102:7003 master,fail - 1551784667064 1551784663541 3 disconnected
ef175d84db52e084b5d74cf9f1c414011bf6cce9 192.168.8.102:7002 master - 0 1551784682702 2 connected 5461-10922
0f3d5f7e78dc857efc5b58ab674faee9fba876af 192.168.8.102:7005 slave ef175d84db52e084b5d74cf9f1c414011bf6cce9 0 1551784681691 5 connected
229393055278b1cded847e554739255905b33fb3 192.168.8.102:7001 myself,master - 0 0 1 connected 0-5460
837eea90f07c9cdebfa7e1924d2e2788cf5573eb 192.168.8.102:7004 slave 229393055278b1cded847e554739255905b33fb3 0 1551784679675 4 connected
b41ae432b4bd8d7ca44bf318c7b9382f8dbd7a79 192.168.8.102:7006 master - 0 1551784680684 7 connected 10923-16383
vars currentEpoch 7 lastVoteEpoch 7
[root@localhost 7001]# redis-cli -c -p 7001
127.0.0.1:7001>
127.0.0.1:7001>
127.0.0.1:7001>
127.0.0.1:7001> get name
-> Redirected to slot [5798] located at 192.168.8.102:7002
"zhangsan"
可以访问 再当掉192.168.8.102:7006 [root@localhost 7001]# redis-cli -c -p 7001
127.0.0.1:7001>
127.0.0.1:7001>
127.0.0.1:7001>
127.0.0.1:7001> get name
(error) CLUSTERDOWN The cluster is down
127.0.0.1:7001>
[root@localhost 7001]# !cat
cat nodes_7001.conf
1bca1b7b96f3fe936ad44f254d17da26da9fd186 192.168.8.102:7003 slave,fail b41ae432b4bd8d7ca44bf318c7b9382f8dbd7a79 1551785439155 1551785434725 7 disconnected
ef175d84db52e084b5d74cf9f1c414011bf6cce9 192.168.8.102:7002 master - 0 1551785536517 2 connected 5461-10922
0f3d5f7e78dc857efc5b58ab674faee9fba876af 192.168.8.102:7005 slave ef175d84db52e084b5d74cf9f1c414011bf6cce9 0 1551785537528 5 connected
229393055278b1cded847e554739255905b33fb3 192.168.8.102:7001 myself,master - 0 0 1 connected 0-5460
837eea90f07c9cdebfa7e1924d2e2788cf5573eb 192.168.8.102:7004 slave 229393055278b1cded847e554739255905b33fb3 0 1551785539547 4 connected
b41ae432b4bd8d7ca44bf318c7b9382f8dbd7a79 :0 master,fail,noaddr - 1551785518472 1551785516358 7 disconnected 10923-16383
vars currentEpoch 7 lastVoteEpoch 7 开启主节点:7006端口
[root@localhost 7001]# cat ../start_cluster.sh
/usr/local/redis/bin/redis-server /usr/local/redis/cluster-test/7001/redis.conf
/usr/local/redis/bin/redis-server /usr/local/redis/cluster-test/7002/redis.conf
/usr/local/redis/bin/redis-server /usr/local/redis/cluster-test/7003/redis.conf
/usr/local/redis/bin/redis-server /usr/local/redis/cluster-test/7004/redis.conf
/usr/local/redis/bin/redis-server /usr/local/redis/cluster-test/7005/redis.conf
/usr/local/redis/bin/redis-server /usr/local/redis/cluster-test/7006/redis.conf
[root@localhost 7001]#
[root@localhost 7001]#
[root@localhost 7001]# /usr/local/redis/bin/redis-server /usr/local/redis/cluster-test/7006/redis.conf
[root@localhost 7001]#
[root@localhost 7001]# cat nodes_7001.conf
1bca1b7b96f3fe936ad44f254d17da26da9fd186 192.168.8.102:7003 slave,fail b41ae432b4bd8d7ca44bf318c7b9382f8dbd7a79 0 1551785119857 7 connected
ef175d84db52e084b5d74cf9f1c414011bf6cce9 192.168.8.102:7002 master - 0 1551785118344 2 connected 5461-10922
0f3d5f7e78dc857efc5b58ab674faee9fba876af 192.168.8.102:7005 slave ef175d84db52e084b5d74cf9f1c414011bf6cce9 0 1551785119352 5 connected
229393055278b1cded847e554739255905b33fb3 192.168.8.102:7001 myself,master - 0 0 1 connected 0-5460
837eea90f07c9cdebfa7e1924d2e2788cf5573eb 192.168.8.102:7004 slave 229393055278b1cded847e554739255905b33fb3 0 1551785117336 4 connected
b41ae432b4bd8d7ca44bf318c7b9382f8dbd7a79 192.168.8.102:7006 master - 0 1551785786843 7 connected 10923-16383 再检测又可以访问了
[root@localhost 7001]# redis-cli -c -p 7001
127.0.0.1:7001>
127.0.0.1:7001>
127.0.0.1:7001> get name
-> Redirected to slot [5798] located at 192.168.8.102:7002
"zhangsan" 注意:恢复时,先恢复主节点 再恢复从节点

简单说一下原理

redis cluster在设计的时候,就考虑到了去中心化,去中间件,也就是说,集群中的每个节点都是平等的关系,都是对等的,每个节点都保存各自的数据和整个集群的状态。每个节点都和其他所有节点连接,而且这些连接保持活跃,这样就保证了我们只需要连接集群中的任意一个节点,就可以获取到其他节点的数据。

Redis 集群没有并使用传统的一致性哈希来分配数据,而是采用另外一种叫做哈希槽 (hash slot)的方式来分配的。redis cluster 默认分配了 16384 个slot,当我们set一个key 时,会用CRC16算法来取模得到所属的slot,然后将这个key 分到哈希槽区间的节点上,具体算法就是:CRC16(key) % 16384。所以我们在测试的时候看到set 和 get 的时候,直接跳转到了7000端口的节点。

Redis 集群会把数据存在一个 master 节点,然后在这个 master 和其对应的salve 之间进行数据同步。当读取数据时,也根据一致性哈希算法到对应的 master 节点获取数据。只有当一个master 挂掉之后,才会启动一个对应的 salve 节点,充当 master 。

需要注意的是:必须要3个或以上的主节点,否则在创建集群时会失败,并且当存活的主节点数小于总节点数的一半时,整个集群就无法提供服务了。

最新文章

  1. 【原】实时渲染中常用的几种Rendering Path
  2. vmware克隆虚拟机后配置网络
  3. Android WebView 保存cache至External Storage
  4. My first blog!!!!!
  5. Android Studio 查看密钥库证书指纹SHA1
  6. windows7+eclipse+hadoop2.5.2环境配置
  7. WCF中自定义消息编码器:压缩编码器的使用
  8. Oracle表操作 (未完待续)
  9. php 函数 array_slice
  10. 依赖注入(DI)有助于应用对象之间的解耦,而面向切面编程(AOP)有助于横切关注点与所影响的对象之间的解耦(转good)
  11. 三种Join方法
  12. LINQ Distinct()
  13. Android 开发笔记___EditText__文本编辑框
  14. windows下编译安装BOOST
  15. Java中ArrayList的删除元素总结
  16. Notepad++的一个用法 转换为unix 格式的文件
  17. 2018 ACM 网络选拔赛 焦作赛区
  18. android:View的setTag和getTag
  19. java 反射工具
  20. MySQL基本SQL语句之数据插入、删除数据和更新数据

热门文章

  1. (转)python高级:列表解析和生成表达式
  2. 手淘适配-flexible
  3. 第6章—渲染web视图—使用Thymeleaf
  4. Dubbo-使用Maven构建Dubbo服务的可执行jar包
  5. Android_PullListView
  6. mongodb3.4.15集群搭建
  7. nodejs zip压缩版安装与配置
  8. Object类上的方法
  9. c#与IronPython Clojure-clr的调用
  10. Oracle官网下载参考文档