机器规划
redis版本5.0.10,操作系统CentOS7.7.x
192.168.228.28 安装1个redis节点,端口为7000,宿主机A
192.168.228.29 安装1个redis节点,端口为7000,宿主机B
192.168.228.30 安装1个redis节点,端口为7000,宿主机A
192.168.228.31 安装1个redis节点,端口为7000,宿主机B
192.168.228.32 安装1个redis节点,端口为7000,宿主机A
192.168.228.33 安装1个redis节点,端口为7000,宿主机B
目录规划
[root@resdis-1 ~]# mkdir soft #软件安装包
[root@resdis-1 ~]# mkdir -p /app/redis #redis配置地址
源码安装
下载地址:http://redis.io/download
下载稳定版5.0.10
[root@resdis-1 ~]# cd soft/
[root@resdis-1 soft]# wget https://download.redis.io/releases/redis-5.0.10.tar.gz
解压、编译、安装
1.安装依赖
[root@resdis-1 soft]# yum -y install gcc gcc-c++ libstdc++-devel
2.解压、编译
[root@resdis-1 soft]# tar -xzvf redis-5.0.10
[root@resdis-1 soft]# cd redis-5.0.10
[root@resdis-1 redis-5.0.10]# make
3.编译完安装
[root@resdis-1 redis-5.0.10]# cd src/
[root@resdis-1 src]# make install
集群安装
每台机器都要执行
[root@resdis-1 ~]# mkdir -p /app/redis/cluster/
[root@resdis-1 ~]# cp ~/soft/redis-5.0.10/redis.conf /app/redis/cluster/
[root@resdis-1 ~]# vi /app/redis/cluster/redis.conf
修改内容如下
#bind 127.0.0.1
protected-mode no
port 7000
daemonize yes //redis后台运行
pidfile /var/run/redis_7000.pid
cluster-enabled yes //开启集群
cluster-config-file nodes_7000.conf //集群的配置
cluster-node-timeout 15000 //请求超时 默认15秒,可自行设置
appendonly yes //aof日志开启,它会每次写操作都记录一条日志
appendfilename "appendonly7000.aof"
dbfilename dump7000.rdb
编辑启动脚本
[root@resdis-1 cluster]# cd /app/redis/cluster/
[root@resdis-1 cluster]# vi start.sh
#!/bin/bash
redis-server redis.conf
停止脚本
[root@resdis-6 cluster]# vi stop.sh
#!/bin/bash
kill `ps -ef | grep redis-server| grep 7000 | awk '{print $2}'`
启动服务并查看
[root@resdis-1 cluster]# ./start.sh
[root@resdis-1 cluster]# ps -ef|grep redis
root 2521 1 0 21:47 ? 00:00:00 redis-server *:7000 [cluster]
root 2526 2389 0 21:47 pts/0 00:00:00 grep --color=auto redis
创建集群
当前操作只能在一台机器上创建
[root@resdis-1 cluster]# redis-cli --cluster-replicas 1 --cluster create 192.168.228.28:7000 192.168.228.29:7000 192.168.228.30:7000 192.168.228.31:7000 192.168.228.32:7000 192.168.228.33:7000
>>> Performing hash slots allocation on 6 nodes...
Master[0] -> Slots 0 - 5460
Master[1] -> Slots 5461 - 10922
Master[2] -> Slots 10923 - 16383
Adding replica 192.168.228.32:7000 to 192.168.228.28:7000
Adding replica 192.168.228.33:7000 to 192.168.228.29:7000
Adding replica 192.168.228.31:7000 to 192.168.228.30:7000
M: 72b70ee170f98c698299541f58fb649c4d37df4b 192.168.228.28:7000
slots:[0-5460] (5461 slots) master
M: 0b0e54c2b82d3cf0e00428a8a0291a68c5114e1c 192.168.228.29:7000
slots:[5461-10922] (5462 slots) master
M: 7c6ad9fb18309f4707b2db10fcb29417e95cc495 192.168.228.30:7000
slots:[10923-16383] (5461 slots) master
S: c1b8e8b2577cb87203987c294f274ba0658aebdf 192.168.228.31:7000
replicates 7c6ad9fb18309f4707b2db10fcb29417e95cc495
S: 282d12b10fa09d4273dbddf6fe79ad89c8e44975 192.168.228.32:7000
replicates 72b70ee170f98c698299541f58fb649c4d37df4b
S: 609fbb0263c938909c320d0010af4c5f84ad2cde 192.168.228.33:7000
replicates 0b0e54c2b82d3cf0e00428a8a0291a68c5114e1c
Can I set the above configuration? (type 'yes' to accept): yes
>>> Nodes configuration updated
>>> Assign a different config epoch to each node
>>> Sending CLUSTER MEET messages to join the cluster
Waiting for the cluster to join
>>> Performing Cluster Check (using node 192.168.228.28:7000)
M: 72b70ee170f98c698299541f58fb649c4d37df4b 192.168.228.28:7000
slots:[0-5460] (5461 slots) master
1 additional replica(s)
M: 7c6ad9fb18309f4707b2db10fcb29417e95cc495 192.168.228.30:7000
slots:[10923-16383] (5461 slots) master
1 additional replica(s)
M: 0b0e54c2b82d3cf0e00428a8a0291a68c5114e1c 192.168.228.29:7000
slots:[5461-10922] (5462 slots) master
1 additional replica(s)
S: 282d12b10fa09d4273dbddf6fe79ad89c8e44975 192.168.228.32:7000
slots: (0 slots) slave
replicates 72b70ee170f98c698299541f58fb649c4d37df4b
S: 609fbb0263c938909c320d0010af4c5f84ad2cde 192.168.228.33:7000
slots: (0 slots) slave
replicates 0b0e54c2b82d3cf0e00428a8a0291a68c5114e1c
S: c1b8e8b2577cb87203987c294f274ba0658aebdf 192.168.228.31:7000
slots: (0 slots) slave
replicates 7c6ad9fb18309f4707b2db10fcb29417e95cc495
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
查看集群状态
[root@resdis-1 cluster]# redis-cli -h 127.0.0.1 -p 7000
127.0.0.1:7000> CLUSTER INFO
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:6
cluster_size:3
cluster_current_epoch:6
cluster_my_epoch:1
cluster_stats_messages_ping_sent:177
cluster_stats_messages_pong_sent:188
cluster_stats_messages_sent:365
cluster_stats_messages_ping_received:183
cluster_stats_messages_pong_received:177
cluster_stats_messages_meet_received:5
cluster_stats_messages_received:365
127.0.0.1:7000> CLUSTER nodes
7c6ad9fb18309f4707b2db10fcb29417e95cc495 192.168.228.30:7000@17000 master - 0 1618927000000 3 connected 10923-16383
0b0e54c2b82d3cf0e00428a8a0291a68c5114e1c 192.168.228.29:7000@17000 master - 0 1618927000378 2 connected 5461-10922
72b70ee170f98c698299541f58fb649c4d37df4b 192.168.228.28:7000@17000 myself,master - 0 1618926997000 1 connected 0-5460
282d12b10fa09d4273dbddf6fe79ad89c8e44975 192.168.228.32:7000@17000 slave 72b70ee170f98c698299541f58fb649c4d37df4b 0 1618926999376 5 connected
609fbb0263c938909c320d0010af4c5f84ad2cde 192.168.228.33:7000@17000 slave 0b0e54c2b82d3cf0e00428a8a0291a68c5114e1c 0 1618926999000 6 connected
c1b8e8b2577cb87203987c294f274ba0658aebdf 192.168.228.31:7000@17000 slave 7c6ad9fb18309f4707b2db10fcb29417e95cc495 0 1618926998374 4 connected