需求:一、请求量过大 100w/s 二、数据量大node
哈希分布特色:redis
顺序分布特色:shell
节点取余分区特色:vim
一致性哈希特色:ruby
虚拟哈希分区特色:bash
原生安装:服务器
一、配置节点架构
port ${port} daemonize yes dir "" dbfilename "dump-${port}.rdb" logfile "${port}.log" # 表明当前节点为cluster节点 cluster-enabled yes # 指定当前cluster节点的配置 cluster-config-file nodes-${port}.conf
二、配置开启Redis运维
三、cluster meet ip porttcp
redis-cli -h 127.0.0.1 -p 7000 cluster meet 127.0.0.1 7001
四、cluster节点配置
# 表明当前节点为cluster节点 cluster-ebabled yes # 节点超时时间为15秒 cluster-node-timeout 15000 # 集群节点 cluster-config-file "nodes.conf" # 集群内全部节点都正确,才能提供服务,配置成 cluster-require-full-coverage no
五、分配槽
redis-cli -h 127.0.0.1 -p 7000 cluster addslots {0...5461}
六、设置主从关系
redis-cli -h 127.0.0.1 -p 7003 cluster replication ${node-id-7000}
单机部署:
一、建立配置
mkdir cluster vim redis-6379.conf
port 6379 daemonize yes dir "./" logfile "6379.log" dbfilename "dump-7000.rdb" # 集群配置 cluster-enabled yes cluster-config-file node-6379.conf cluster-require-full-coverage no
sed "s/6379/6380/g" redis-6379.conf > redis-6380.conf sed "s/6379/6381/g" redis-6379.conf > redis-6381.conf sed "s/6379/6382/g" redis-6379.conf > redis-6382.conf sed "s/6379/6383/g" redis-6379.conf > redis-6383.conf sed "s/6379/6384/g" redis-6379.conf > redis-6384.conf
启动:
redis-server redis-6379.conf redis-server redis-6380.conf redis-server redis-6381.conf redis-server redis-6382.conf redis-server redis-6383.conf redis-server redis-6384.conf
二、meet握手
redis-cli -p 6379 cluster meet 127.0.0.1 6380 redis-cli -p 6379 cluster meet 127.0.0.1 6381 redis-cli -p 6379 cluster meet 127.0.0.1 6382 redis-cli -p 6379 cluster meet 127.0.0.1 6383 redis-cli -p 6379 cluster meet 127.0.0.1 6384 redis-cli -p 6380 cluster meet 127.0.0.1 6381 redis-cli -p 6380 cluster meet 127.0.0.1 6382 redis-cli -p 6380 cluster meet 127.0.0.1 6383 redis-cli -p 6380 cluster meet 127.0.0.1 6384 redis-cli -p 6381 cluster meet 127.0.0.1 6382 redis-cli -p 6381 cluster meet 127.0.0.1 6383 redis-cli -p 6381 cluster meet 127.0.0.1 6384 redis-cli -p 6382 cluster meet 127.0.0.1 6383 redis-cli -p 6382 cluster meet 127.0.0.1 6384 redis-cli -p 6383 cluster meet 127.0.0.1 6384 redis-cli -p 6383 cluster meet 127.0.0.1 6385 redis-cli -p 6384 cluster meet 127.0.0.1 6385
当前状态
[root@localhost cluster]# redis-cli -p 6379 127.0.0.1:6379> cluster nodes 171ad8b979d147dfe069dc7accf183adec22e1e3 127.0.0.1:6379@16379 myself,master - 0 1553940474000 4 connected 46b59f04b4ff7e3c691e7d8561f79e75d774eae3 127.0.0.1:6381@16381 master - 0 1553940472000 1 connected 60f54b28c08b3f96e31fe532000ba0b53fffdcec 127.0.0.1:6384@16384 master - 0 1553940474552 2 connected 12718197ace83ae68d876e7dee03d8e5774aed43 127.0.0.1:6382@16382 master - 0 1553940474000 5 connected ec6bdcea4b3244d6f2315c8a7b82b54775f1c38e 127.0.0.1:6380@16380 master - 0 1553940475557 0 connected a8face71e2648047748980c8f2c612c1b3be7cfd 127.0.0.1:6383@16383 master - 0 1553940473542 3 connected 127.0.0.1:6379> cluster info cluster_state:fail cluster_slots_assigned:0 cluster_slots_ok:0 cluster_slots_pfail:0 cluster_slots_fail:0 cluster_known_nodes:6 cluster_size:0 cluster_current_epoch:5 cluster_my_epoch:4 cluster_stats_messages_ping_sent:36 cluster_stats_messages_pong_sent:38 cluster_stats_messages_meet_sent:5 cluster_stats_messages_sent:79 cluster_stats_messages_ping_received:38 cluster_stats_messages_pong_received:41 cluster_stats_messages_received:79
三、分配槽
#!/bin/bash start=$1 end=$2 port=$3 for item in `seq $start $end` do redis-cli -p $port cluster addslots $item done
./addSlot.sh 0 5461 6379 ./addSlot.sh 5462 10922 6380 ./addSlot.sh 10923 16383 6381
四、配置主从
redis-cli -p 6382 cluster replicate 171ad8b979d147dfe069dc7accf183adec22e1e3 redis-cli -p 6383 cluster replicate ec6bdcea4b3244d6f2315c8a7b82b54775f1c38e redis-cli -p 6384 cluster replicate 46b59f04b4ff7e3c691e7d8561f79e75d774eae3
当前状态:
[root@localhost cluster]# redis-cli -p 6379 cluster nodes 171ad8b979d147dfe069dc7accf183adec22e1e3 127.0.0.1:6379@16379 myself,master - 0 1553943779000 4 connected 0-5461 46b59f04b4ff7e3c691e7d8561f79e75d774eae3 127.0.0.1:6381@16381 master - 0 1553943779745 1 connected 10923-16383 60f54b28c08b3f96e31fe532000ba0b53fffdcec 127.0.0.1:6384@16384 slave 46b59f04b4ff7e3c691e7d8561f79e75d774eae3 0 1553943777725 2 connected 12718197ace83ae68d876e7dee03d8e5774aed43 127.0.0.1:6382@16382 slave 171ad8b979d147dfe069dc7accf183adec22e1e3 0 1553943779000 5 connected ec6bdcea4b3244d6f2315c8a7b82b54775f1c38e 127.0.0.1:6380@16380 master - 0 1553943780000 0 connected 5462-10922 a8face71e2648047748980c8f2c612c1b3be7cfd 127.0.0.1:6383@16383 slave ec6bdcea4b3244d6f2315c8a7b82b54775f1c38e 0 1553943780753 3 connected
五、测试集群
redis-cli -c -p 6379:-c集群模式
[root@localhost cluster]# redis-cli -c -p 6379 127.0.0.1:6379> set name aa -> Redirected to slot [5798] located at 127.0.0.1:6380 OK 127.0.0.1:6380> set key redis -> Redirected to slot [12539] located at 127.0.0.1:6381 OK 127.0.0.1:6381> get name -> Redirected to slot [5798] located at 127.0.0.1:6380 "aa" 127.0.0.1:6380> get key -> Redirected to slot [12539] located at 127.0.0.1:6381 "redis" 127.0.0.1:6381>
一、ruby环境:
这里从本机上传过来的:scp ruby-2.6.2.tar.gz root@192.168.0.109:~
虚拟机接收:
wget https://cache.ruby-lang.org/pub/ruby/2.6/ruby-2.6.2.tar.gz tar -zxvf ruby-2.6.2.tar.gz -C /usr/local/ cd /usr/local/ruby-2.6.2 ./configure make && make install cd gems/ # 下载redis.gem wget https://rubygems.org/downloads/redis-4.1.0.gem # 安装redis.gem gem install -l redis-4.1.0.gem # 检查安装状况 gem list -- check redis gem # 执行redis-trib.rb ./redis-trib.rb
二、准备配置
port 6379 daemonize yes dir "./" logfile "6379.log" dbfilename "dump-7000.rdb" protected-mode no # 集群配置 cluster-enabled yes cluster-config-file node-6379.conf cluster-require-full-coverage no
六份
sed "s/6379/6380/g" redis-6379.conf > redis-6380.conf sed "s/6379/6381/g" redis-6379.conf > redis-6381.conf sed "s/6379/6382/g" redis-6379.conf > redis-6382.conf sed "s/6379/6383/g" redis-6379.conf > redis-6383.conf sed "s/6379/6384/g" redis-6379.conf > redis-6384.conf
三、启动服务:
redis-server redis-6379.conf redis-server redis-6380.conf redis-server redis-6381.conf redis-server redis-6382.conf redis-server redis-6383.conf redis-server redis-6384.conf
检查状态:
127.0.0.1:6379> cluster nodes 91d2f29b7bf974ebbaeeb4ac90a2232f9a1d126d :6379@16379 myself,master - 0 0 0 connected 127.0.0.1:6379> cluster info cluster_state:fail cluster_slots_assigned:0 cluster_slots_ok:0 cluster_slots_pfail:0 cluster_slots_fail:0 cluster_known_nodes:1 cluster_size:0 cluster_current_epoch:0 cluster_my_epoch:0 cluster_stats_messages_sent:0 cluster_stats_messages_received:0 127.0.0.1:6379> set name aaa (error) CLUSTERDOWN Hash slot not served
四、建立集群
多机测试要取消保护模式
protected-mode no
, 开放端口xxxx和1xxxx
如:端口为6379 ,开放6379和16379
# 建立集群,前三个为主节点,1:为每一个主节点的从节点的个数 redis-cli --cluster create --cluster-replicas 1 127.0.0.1:6379 127.0.0.1:6380 127.0.0.1:6381 127.0.0.1:6382 127.0.0.1:6383 127.0.0.1:6384 # 响应 >>> Performing hash slots allocation on 6 nodes... # 肯定master节点 Master[0] -> Slots 0 - 5460 Master[1] -> Slots 5461 - 10922 Master[2] -> Slots 10923 - 16383 # 分配从节点 Adding replica 127.0.0.1:6383 to 127.0.0.1:6379 Adding replica 127.0.0.1:6384 to 127.0.0.1:6380 Adding replica 127.0.0.1:6382 to 127.0.0.1:6381 >>> Trying to optimize slaves allocation for anti-affinity [WARNING] Some slaves are in the same host as their master # 分配槽 M: 91d2f29b7bf974ebbaeeb4ac90a2232f9a1d126d 127.0.0.1:6379 slots:[0-5460] (5461 slots) master M: 9111432777a2356508706c07e44bc0340ee6e594 127.0.0.1:6380 slots:[5461-10922] (5462 slots) master M: 29b0cb2bd387a428bd34109efa5514d221da174b 127.0.0.1:6381 slots:[10923-16383] (5461 slots) master S: 442d077607428ec3116b1ae9a0c5dbea89567c7c 127.0.0.1:6382 replicates 29b0cb2bd387a428bd34109efa5514d221da174b S: 3912cc4baaf6964b07ca05020f6f28f4d7370f38 127.0.0.1:6383 replicates 91d2f29b7bf974ebbaeeb4ac90a2232f9a1d126d S: 61beb8c84aed2079e8e7232b374a20bf1e4dd90c 127.0.0.1:6384 replicates 9111432777a2356508706c07e44bc0340ee6e594 Can I set the above configuration? (type 'yes' to accept): yes # 确认 >>> Nodes configuration updated >>> Assign a different config epoch to each node >>> Sending CLUSTER MEET messages to join the cluster Waiting for the cluster to join ........ >>> Performing Cluster Check (using node 127.0.0.1:6379) M: 91d2f29b7bf974ebbaeeb4ac90a2232f9a1d126d 127.0.0.1:6379 slots:[0-5460] (5461 slots) master 1 additional replica(s) M: 9111432777a2356508706c07e44bc0340ee6e594 127.0.0.1:6380 slots:[5461-10922] (5462 slots) master 1 additional replica(s) S: 61beb8c84aed2079e8e7232b374a20bf1e4dd90c 127.0.0.1:6384 slots: (0 slots) slave replicates 9111432777a2356508706c07e44bc0340ee6e594 S: 442d077607428ec3116b1ae9a0c5dbea89567c7c 127.0.0.1:6382 slots: (0 slots) slave replicates 29b0cb2bd387a428bd34109efa5514d221da174b S: 3912cc4baaf6964b07ca05020f6f28f4d7370f38 127.0.0.1:6383 slots: (0 slots) slave replicates 91d2f29b7bf974ebbaeeb4ac90a2232f9a1d126d M: 29b0cb2bd387a428bd34109efa5514d221da174b 127.0.0.1:6381 slots:[10923-16383] (5461 slots) master 1 additional replica(s) [OK] All nodes agree about slots configuration. >>> Check for open slots... >>> Check slots coverage... [OK] All 16384 slots covered.
五、验证集群:
[root@localhost src]# redis-cli -c -p 6379 127.0.0.1:6379> cluster info cluster_state:ok cluster_slots_assigned:16384 cluster_slots_ok:16384 cluster_slots_pfail:0 cluster_slots_fail:0 cluster_known_nodes:6 cluster_size:3 cluster_current_epoch:6 cluster_my_epoch:1 cluster_stats_messages_ping_sent:219 cluster_stats_messages_pong_sent:230 cluster_stats_messages_sent:449 cluster_stats_messages_ping_received:225 cluster_stats_messages_pong_received:219 cluster_stats_messages_meet_received:5 cluster_stats_messages_received:449 127.0.0.1:6379> cluster nodes 9111432777a2356508706c07e44bc0340ee6e594 127.0.0.1:6380@16380 master - 0 1554094994000 2 connected 5461-10922 61beb8c84aed2079e8e7232b374a20bf1e4dd90c 127.0.0.1:6384@16384 slave 9111432777a2356508706c07e44bc0340ee6e594 0 1554094993530 6 connected 91d2f29b7bf974ebbaeeb4ac90a2232f9a1d126d 127.0.0.1:6379@16379 myself,master - 0 1554094993000 1 connected 0-5460 442d077607428ec3116b1ae9a0c5dbea89567c7c 127.0.0.1:6382@16382 slave 29b0cb2bd387a428bd34109efa5514d221da174b 0 1554094994541 4 connected 3912cc4baaf6964b07ca05020f6f28f4d7370f38 127.0.0.1:6383@16383 slave 91d2f29b7bf974ebbaeeb4ac90a2232f9a1d126d 0 1554094995554 5 connected 29b0cb2bd387a428bd34109efa5514d221da174b 127.0.0.1:6381@16381 master - 0 1554094993000 3 connected 10923-16383 127.0.0.1:6379> set name aaaaaa -> Redirected to slot [5798] located at 127.0.0.1:6380 OK 127.0.0.1:6380> get name "aaaaaa" 127.0.0.1:6380>
六、常见问题
Redis集群不只须要开通redis客户端链接的端口,并且须要开通集群总线端口,集群总线端口为redis客户端链接的端口 + 10000,如redis端口为7000,则集群总线端口为17000,所以全部服务器的点须要开通redis的客户端链接端口和集群总线端口
Redis 集群模式下链接须要已-h ip地址方式链接,命令应为./redis-cli -h 192.168.118.110 -c -p 7000
伸缩原理:槽和数据在节点之间的移动
--cluster add-node 192.168.5.100:8007 192.168.5.100:8000
准备新节点:
准备新节点并启动
[root@localhost cluster]# sed "s/6379/6385/g" redis-6379.conf > redis-6385.conf [root@localhost cluster]# sed "s/6379/6386/g" redis-6379.conf > redis-6386.conf [root@localhost cluster]# redis-server redis-6385.conf [root@localhost cluster]# redis-server redis-6386.conf [root@localhost cluster]# ps -ef | grep redis root 93290 1 0 13:34 ? 00:00:03 redis-server *:6379 [cluster] root 93295 1 0 13:34 ? 00:00:03 redis-server *:6380 [cluster] root 93300 1 0 13:34 ? 00:00:03 redis-server *:6381 [cluster] root 93305 1 0 13:35 ? 00:00:03 redis-server *:6382 [cluster] root 93310 1 0 13:35 ? 00:00:04 redis-server *:6383 [cluster] root 93315 1 0 13:35 ? 00:00:03 redis-server *:6384 [cluster] root 93415 1 0 14:27 ? 00:00:00 redis-server *:6385 [cluster] root 93420 1 0 14:27 ? 00:00:00 redis-server *:6386 [cluster] [root@localhost cluster]# redis-cli -p 6385 127.0.0.1:6385> cluster nodes 9c491c885d8ec3e885c79b3cabb9603e4c386019 :6385@16385 myself,master - 0 0 0 connected
加入集群:
127.0.0.1:6385 # 新增的节点
127.0.0.1:6379 # 集群中已存在的节点
[root@localhost cluster]# redis-cli --cluster add-node 127.0.0.1:6385 127.0.0.1:6379 >>> Adding node 127.0.0.1:6385 to cluster 127.0.0.1:6379 >>> Performing Cluster Check (using node 127.0.0.1:6379) M: 91d2f29b7bf974ebbaeeb4ac90a2232f9a1d126d 127.0.0.1:6379 slots:[0-5460] (5461 slots) master 1 additional replica(s) M: 9111432777a2356508706c07e44bc0340ee6e594 127.0.0.1:6380 slots:[5461-10922] (5462 slots) master 1 additional replica(s) S: 61beb8c84aed2079e8e7232b374a20bf1e4dd90c 127.0.0.1:6384 slots: (0 slots) slave replicates 29b0cb2bd387a428bd34109efa5514d221da174b S: 442d077607428ec3116b1ae9a0c5dbea89567c7c 127.0.0.1:6382 slots: (0 slots) slave replicates 91d2f29b7bf974ebbaeeb4ac90a2232f9a1d126d S: 3912cc4baaf6964b07ca05020f6f28f4d7370f38 127.0.0.1:6383 slots: (0 slots) slave replicates 9111432777a2356508706c07e44bc0340ee6e594 M: 29b0cb2bd387a428bd34109efa5514d221da174b 127.0.0.1:6381 slots:[10923-16383] (5461 slots) master 1 additional replica(s) [OK] All nodes agree about slots configuration. >>> Check for open slots... >>> Check slots coverage... [OK] All 16384 slots covered. >>> Send CLUSTER MEET to node 127.0.0.1:6385 to make it join the cluster. [OK] New node added correctly.
迁移槽和数据:
redis-cli --cluster reshard 127.0.0.1:6385 # 选择为节点分配多少个槽(4069) How many slots do you want to move (from 1 to 16384)? 4096 # 选择分配给哪一个节点 What is the receiving node ID? 9c491c885d8ec3e885c79b3cabb9603e4c386019 # 选择分配哪些槽(all:自动分配;done:手动分配) Please enter all the source node IDs. Type 'all' to use all the nodes as source nodes for the hash slots. Type 'done' once you entered all the source nodes IDs. Source node #1: all # 确认 Do you want to proceed with the proposed reshard plan (yes/no)? yes
添加从节点6386到6385
# 先将6386添加到集群 redis-cli --cluster add-node 127.0.0.1:6386 127.0.0.1:6379 # 客户端登陆6386 redis-cli -c -p 6386 cluster nodes # 在客户端使用replicate指定该节点的主节点 redis-cli -c -p 6386 cluster replicate 9c491c885d8ec3e885c79b3cabb9603e4c386019
四主四从完成
删除从节点:
redis-cli --cluster del-node 127.0.0.1:6386 d327a7e60d078eeaa98122fb1c196fba7bc468b8 >>> Removing node d327a7e60d078eeaa98122fb1c196fba7bc468b8 from cluster 127.0.0.1:6386 >>> Sending CLUSTER FORGET messages to the cluster... >>> SHUTDOWN the node.
分配槽:
能够分三次平均分给每一个节点
[root@localhost cluster]# redis-cli --cluster reshard 127.0.0.1:6385 >>> Performing Cluster Check (using node 127.0.0.1:6385) M: 9c491c885d8ec3e885c79b3cabb9603e4c386019 127.0.0.1:6385 slots:[11046-12287] (1242 slots) master S: 3912cc4baaf6964b07ca05020f6f28f4d7370f38 127.0.0.1:6383 slots: (0 slots) slave replicates 9111432777a2356508706c07e44bc0340ee6e594 M: 91d2f29b7bf974ebbaeeb4ac90a2232f9a1d126d 127.0.0.1:6379 slots:[0-554],[1964-2650],[4250-5460],[6802-7887],[10923-11045],[12288-13348] (4723 slots) master 1 additional replica(s) S: 61beb8c84aed2079e8e7232b374a20bf1e4dd90c 127.0.0.1:6384 slots: (0 slots) slave replicates 29b0cb2bd387a428bd34109efa5514d221da174b M: 29b0cb2bd387a428bd34109efa5514d221da174b 127.0.0.1:6381 slots:[555-1963],[3237-4249],[6380-6801],[14571-16383] (4657 slots) master 1 additional replica(s) S: 442d077607428ec3116b1ae9a0c5dbea89567c7c 127.0.0.1:6382 slots: (0 slots) slave replicates 91d2f29b7bf974ebbaeeb4ac90a2232f9a1d126d M: 9111432777a2356508706c07e44bc0340ee6e594 127.0.0.1:6380 slots:[2651-3236],[5461-6379],[7888-10922],[13349-14570] (5762 slots) master 1 additional replica(s) [OK] All nodes agree about slots configuration. >>> Check for open slots... >>> Check slots coverage... [OK] All 16384 slots covered. How many slots do you want to move (from 1 to 16384)? 1242 What is the receiving node ID? 91d2f29b7bf974ebbaeeb4ac90a2232f9a1d126d Please enter all the source node IDs. Type 'all' to use all the nodes as source nodes for the hash slots. Type 'done' once you entered all the source nodes IDs. Source node #1: 9c491c885d8ec3e885c79b3cabb9603e4c386019 Source node #2: done
退出集群:
[root@localhost cluster]# redis-cli --cluster del-node 127.0.0.1:6385 9c491c885d8ec3e885c79b3cabb9603e4c386019 >>> Removing node 9c491c885d8ec3e885c79b3cabb9603e4c386019 from cluster 127.0.0.1:6385 >>> Sending CLUSTER FORGET messages to the cluster... >>> SHUTDOWN the node.
集群状态和节点状态:
# 集群状态 [root@localhost cluster]# redis-cli -c cluster nodes 9111432777a2356508706c07e44bc0340ee6e594 127.0.0.1:6380@16380 master - 0 1554129407823 9 connected 2651-3236 5461-6379 7888-10922 13349-14570 61beb8c84aed2079e8e7232b374a20bf1e4dd90c 127.0.0.1:6384@16384 slave 29b0cb2bd387a428bd34109efa5514d221da174b 0 1554129407000 10 connected 91d2f29b7bf974ebbaeeb4ac90a2232f9a1d126d 127.0.0.1:6379@16379 myself,master - 0 1554129407000 11 connected 0-554 1964-2650 4250-5460 6802-7887 10923-13348 442d077607428ec3116b1ae9a0c5dbea89567c7c 127.0.0.1:6382@16382 slave 91d2f29b7bf974ebbaeeb4ac90a2232f9a1d126d 0 1554129406000 11 connected 3912cc4baaf6964b07ca05020f6f28f4d7370f38 127.0.0.1:6383@16383 slave 9111432777a2356508706c07e44bc0340ee6e594 0 1554129406812 9 connected 29b0cb2bd387a428bd34109efa5514d221da174b 127.0.0.1:6381@16381 master - 0 1554129405000 10 connected 555-1963 3237-4249 6380-6801 14571-16383 # 节点状态,退出集群,集群关闭 [root@localhost cluster]# redis-cli -c -p 6385 Could not connect to Redis at 127.0.0.1:6385: Connection refused not connected>
# 虚拟机: 192.168.0.109 192.168.0.110 192.168.0.111 # 目标 192.168.0.109:6379 主 <- 192.168.0.110:6380 从 192.168.0.110:6379 主 <- 192.168.0.111:6380 从 192.168.0.111:6379 主 <- 192.168.0.109:6380 从
一、三台机器安装redis [见redis-安装]
二、建立配置文件【三台机器同步】
[root@localhost redis-5.0.4]# mkdir vm-cluster [root@localhost redis-5.0.4]# cd vm-cluster [root@localhost vm-cluster]# vim redis-6379.conf port 6379 daemonize yes dir "./" logfile "6379.log" dbfilename "dump-6379.rdb" protected-mode no # 集群配置 cluster-enabled yes cluster-config-file node-6379.conf cluster-require-full-coverage no [root@localhost vm-cluster]# sed "s/6379/6380/g" redis-6379.conf > redis-6380.conf # 开启端口 [root@localhost vm-cluster]# firewall-cmd --permanent --add-port=6379/tcp [root@localhost vm-cluster]# firewall-cmd --permanent --add-port=16379/tcp [root@localhost vm-cluster]# firewall-cmd --permanent --add-port=6380/tcp [root@localhost vm-cluster]# firewall-cmd --permanent --add-port=16380/tcp [root@localhost vm-cluster]# firewall-cmd --reload # 启动 redis [root@localhost vm-cluster]# redis-server redis-6379.conf [root@localhost vm-cluster]# redis-server redis-6380.conf
三、建立集群:
[root@localhost vm-cluster]# redis-cli --cluster create --cluster-replicas 1 192.168.0.109:6379 192.168.0.110:6379 192.168.0.111:6379 192.168.0.110:6380 192.168.0.111:6380 192.168.0.109:6380 >>> Performing hash slots allocation on 6 nodes... Master[0] -> Slots 0 - 5460 Master[1] -> Slots 5461 - 10922 Master[2] -> Slots 10923 - 16383 Adding replica 192.168.0.110:6380 to 192.168.0.109:6379 Adding replica 192.168.0.111:6380 to 192.168.0.110:6379 Adding replica 192.168.0.109:6380 to 192.168.0.111:6379 M: cd831b6cb2e1de4933c7e57a2e6dd9f7c3179879 192.168.0.109:6379 slots:[0-5460] (5461 slots) master M: 418bcea8d67d4194c6ea0019c16536683a1385e7 192.168.0.110:6379 slots:[5461-10922] (5462 slots) master M: c99f237c3f795577fcfbb4d9f44aa974c2f7cc10 192.168.0.111:6379 slots:[10923-16383] (5461 slots) master S: 3c28541b32771a5316f9c52cbfc0ad66729d2eb7 192.168.0.110:6380 replicates cd831b6cb2e1de4933c7e57a2e6dd9f7c3179879 S: a1f239e706445494f685ebaf4dd0032bb66d9060 192.168.0.111:6380 replicates 418bcea8d67d4194c6ea0019c16536683a1385e7 S: b38161698f89cf41f2a7a49b7a8c33506f0c95f0 192.168.0.109:6380 replicates c99f237c3f795577fcfbb4d9f44aa974c2f7cc10 Can I set the above configuration? (type 'yes' to accept): yes >>> Nodes configuration updated >>> Assign a different config epoch to each node >>> Sending CLUSTER MEET messages to join the cluster Waiting for the cluster to join ...... >>> Performing Cluster Check (using node 192.168.0.109:6379) M: cd831b6cb2e1de4933c7e57a2e6dd9f7c3179879 192.168.0.109:6379 slots:[0-5460] (5461 slots) master 1 additional replica(s) S: a1f239e706445494f685ebaf4dd0032bb66d9060 192.168.0.111:6380 slots: (0 slots) slave replicates 418bcea8d67d4194c6ea0019c16536683a1385e7 M: 418bcea8d67d4194c6ea0019c16536683a1385e7 192.168.0.110:6379 slots:[5461-10922] (5462 slots) master 1 additional replica(s) S: 3c28541b32771a5316f9c52cbfc0ad66729d2eb7 192.168.0.110:6380 slots: (0 slots) slave replicates cd831b6cb2e1de4933c7e57a2e6dd9f7c3179879 M: c99f237c3f795577fcfbb4d9f44aa974c2f7cc10 192.168.0.111:6379 slots:[10923-16383] (5461 slots) master 1 additional replica(s) S: b38161698f89cf41f2a7a49b7a8c33506f0c95f0 192.168.0.109:6380 slots: (0 slots) slave replicates c99f237c3f795577fcfbb4d9f44aa974c2f7cc10 [OK] All nodes agree about slots configuration. >>> Check for open slots... >>> Check slots coverage... [OK] All 16384 slots covered.
四、测试集群
[root@localhost vm-cluster]# redis-cli -c -p 6379 127.0.0.1:6379> set name aa -> Redirected to slot [5798] located at 192.168.0.110:6379 OK 192.168.0.110:6379> set key value -> Redirected to slot [12539] located at 192.168.0.111:6379 OK 192.168.0.111:6379> get name -> Redirected to slot [5798] located at 192.168.0.110:6379 "aa" 192.168.0.110:6379> get key -> Redirected to slot [12539] located at 192.168.0.111:6379 "value"
一、经过 ping/pong
实现故障发现
二、主观下线和客观下线
一、资格检查
cluster-node-timeout * cluster-slave-validity-factory
取消资格二、选举投票
三、替换主节点
slaveof no one
)clusterDelSlot
撤销故障主节点负责的槽,并执行clusterAddSlot把这些槽分配给本身cluster-require-full-coverage: 默认为yes
redis-trib.rb info ip:port
:查看节点、槽、键值分布
redis-trib.rb rebalance
:均衡(谨慎使用)cluster countkeysinslot {slot}
:查看某个槽内的键值数量