package docker

//redis集群，主从扩容案例
//3主3从已经扛不住了，现在需要变成4主4从
// .17 和 .18 怎么加入集群，切 .18 作为 .17 的从机

/*
	目录：
		1.新建 .17 .18 连个节点 + 新建后启动 +查看时候为8个节点
		2.进入 .17容器实例内部
		3. 将新增的 .17 节点(空槽位)作为master节点加入原集群
		4.查看集群情况第1次
		5.重新分配哈希槽号
				槽号的分配说明
				 	.17有3个新的区间，而以前是连续的
				 	因为重新分配的成本太高，所以以前的3个master都各自匀/分配了 1364个槽位给 .17
		6.检查集群情况第2次
		7.为主节点 .17分配从节点 .18
		8.检查集群情况第3次

		构图查看：
			images/docker-125-redis集群-主从扩容-01.png
			images/docker-125-redis集群-主从扩容-02.png
			images/docker-125-redis集群-主从扩容-03.png




-------------------------1.新建 .17 .18 连个节点 + 新建后启动 +查看时候为8个节点-------------------------

------新建 .17 .18 连个节点
		步骤根据 docker-110-redis集群 来


	以下是编写的shell脚本
root@VM-4-8-debian:/home/zydh/docker/etc# pwd
/home/zydh/docker/etc
root@VM-4-8-debian:/home/zydh/docker/etc#
root@VM-4-8-debian:/home/zydh/docker/etc# touch redis-cluster-add-node-shell
root@VM-4-8-debian:/home/zydh/docker/etc# vim redis-cluster-add-node-shell
root@VM-4-8-debian:/home/zydh/docker/etc#
root@VM-4-8-debian:/home/zydh/docker/etc# chmod 777 redis-cluster-add-node-shell
root@VM-4-8-debian:/home/zydh/docker/etc# cat redis-cluster-add-node-shell
#! /bin/bash
for port in $(seq 7 8);
do
mkdir -p /home/zydh/docker/etc/redis/node-${port}/conf
touch /home/zydh/docker/etc/redis/node-${port}/conf/redis.conf
cat << EOF >/home/zydh/docker/etc/redis/node-${port}/conf/redis.conf
port 6379
bind 0.0.0.0
cluster-enabled yes
cluster-config-file nodes.conf
cluster-node-timeout 5000
cluster-announce-ip 173.38.0.1${port}
cluster-announce-port 6379
cluster-announce-bus-port 16379
appendonly yes
EOF
done
root@VM-4-8-debian:/home/zydh/docker/etc#


root@VM-4-8-debian:/home/zydh/docker/etc# ./redis-cluster-add-node-shell    ###执行
root@VM-4-8-debian:/home/zydh/docker/etc#
root@VM-4-8-debian:/home/zydh/docker/etc/redis# pwd
/home/zydh/docker/etc/redis
root@VM-4-8-debian:/home/zydh/docker/etc/redis#
root@VM-4-8-debian:/home/zydh/docker/etc/redis# ll
total 216
drwxr-xr-x 3 root root  4096 Sep 29 19:55 node-1
drwxr-xr-x 3 root root  4096 Sep 29 19:55 node-2
drwxr-xr-x 3 root root  4096 Sep 29 19:55 node-3
drwxr-xr-x 3 root root  4096 Sep 29 19:55 node-4
drwxr-xr-x 3 root root  4096 Sep 29 19:55 node-5
drwxr-xr-x 3 root root  4096 Sep 29 19:55 node-6
drwxr-xr-x 3 root root  4096 Oct  9 17:05 node-7
drwxr-xr-x 3 root root  4096 Oct  9 17:05 node-8						###已经8个节点
-rw-r--r-- 1 root root 93838 Oct  7 17:00 redis.conf
-rw-r--r-- 1 root root 93837 Oct  7 16:29 redis.conf.back
root@VM-4-8-debian:/home/zydh/docker/etc/redis#




------新建后启动并查看

	注意更改两个 -p ,--name ,-v 的目录 和 --ip

	docker run -p 56377:6379 -p 57377:16379 --privileged=true  --name redis-7 \
	-v /home/zydh/docker/volume/redis/node-7/data:/data \
	-v /home/zydh/docker/etc/redis/node-7/conf/redis.conf:/etc/redis/redis.conf \
	-d --net redis-colony --ip 173.38.0.17 redis redis-server /etc/redis/redis.conf


	docker run -p 56378:6379 -p 57378:16379 --privileged=true  --name redis-8 \
	-v /home/zydh/docker/volume/redis/node-8/data:/data \
	-v /home/zydh/docker/etc/redis/node-8/conf/redis.conf:/etc/redis/redis.conf \
	-d --net redis-colony --ip 173.38.0.18 redis redis-server /etc/redis/redis.conf



root@VM-4-8-debian:/home/zydh/docker/etc/redis#
root@VM-4-8-debian:/home/zydh/docker/etc/redis#
root@VM-4-8-debian:/home/zydh/docker/etc/redis# docker ps
CONTAINER ID   IMAGE     COMMAND                  CREATED         STATUS          PORTS                                                                                        NAMES
b258e205ea91   redis     "docker-entrypoint.s…"   5 seconds ago   Up 4 seconds    0.0.0.0:56378->6379/tcp, :::56378->6379/tcp, 0.0.0.0:57378->16379/tcp, :::57378->16379/tcp   redis-8
8232cff01091   redis     "docker-entrypoint.s…"   6 seconds ago   Up 5 seconds    0.0.0.0:56377->6379/tcp, :::56377->6379/tcp, 0.0.0.0:57377->16379/tcp, :::57377->16379/tcp   redis-7
c76ca5b0a058   redis     "docker-entrypoint.s…"   2 hours ago     Up 2 hours      0.0.0.0:56376->6379/tcp, :::56376->6379/tcp, 0.0.0.0:57376->16379/tcp, :::57376->16379/tcp   redis-6
66717a63d9ce   redis     "docker-entrypoint.s…"   2 hours ago     Up 30 minutes   0.0.0.0:56375->6379/tcp, :::56375->6379/tcp, 0.0.0.0:57375->16379/tcp, :::57375->16379/tcp   redis-5
83f317518d49   redis     "docker-entrypoint.s…"   2 hours ago     Up 2 hours      0.0.0.0:56374->6379/tcp, :::56374->6379/tcp, 0.0.0.0:57374->16379/tcp, :::57374->16379/tcp   redis-4
3220a45570a3   redis     "docker-entrypoint.s…"   2 hours ago     Up 2 hours      0.0.0.0:56373->6379/tcp, :::56373->6379/tcp, 0.0.0.0:57373->16379/tcp, :::57373->16379/tcp   redis-3
a173e090b68a   redis     "docker-entrypoint.s…"   2 hours ago     Up 2 hours      0.0.0.0:56372->6379/tcp, :::56372->6379/tcp, 0.0.0.0:57372->16379/tcp, :::57372->16379/tcp   redis-2
3d185c900b30   redis     "docker-entrypoint.s…"   2 hours ago     Up 38 minutes   0.0.0.0:56371->6379/tcp, :::56371->6379/tcp, 0.0.0.0:57371->16379/tcp, :::57371->16379/tcp   redis-1
root@VM-4-8-debian:/home/zydh/docker/etc/redis#


		## redis-7 redis-8 已经启动



-------------------------2.进入 .17容器实例内部-------------------------

	docker exec -it redis-7 /bin/bash


-------------------------3. 将新增的 .17 节点(空槽位)作为master节点加入原集群-------------------------

	redis-cli --cluster add-node  自己的ip[.17]:端口  需要加入集群主机ip[.11]:端口

	redis-cli --cluster add-node  173.38.0.17:6379  173.38.0.11:6379

		.17:6379 就是需要加入集群的 ip:端口号
		.11:6379 就是原来集群节点里面的领路人 ip:端口号
					相当于通过ip：端口号，把 .17加入集群中



root@VM-4-8-debian:/home/zydh/docker/etc/redis# docker exec -it redis-7 /bin/bash
root@8232cff01091:/data#
root@8232cff01091:/data#
root@8232cff01091:/data# redis-cli --cluster add-node  173.38.0.17:6379  173.38.0.11:6379
>>> Adding node 173.38.0.17:6379 to cluster 173.38.0.11:6379						#### 新增节点 .17  到 集群 .11
>>> Performing Cluster Check (using node 173.38.0.11:6379)
M: 835210e0718b8cb17ec5174234642d83d315b3fa 173.38.0.11:6379
   slots:[0-5460] (5461 slots) master
   1 additional replica(s)
M: def95daeb6a1a0ac5c037a826f1d660cc82bd87e 173.38.0.12:6379
   slots:[5461-10922] (5462 slots) master
   1 additional replica(s)
S: 4604d2508991c66160c6e0629b5d950f87b8b9f1 173.38.0.16:6379
   slots: (0 slots) slave
   replicates def95daeb6a1a0ac5c037a826f1d660cc82bd87e
S: 0e7f087ee4b8a5ea8ec4a38f0e724f20eb6b27bb 173.38.0.14:6379
   slots: (0 slots) slave
   replicates bc371156139e77e245427e278100744a497e3c0b
M: bc371156139e77e245427e278100744a497e3c0b 173.38.0.13:6379
   slots:[10923-16383] (5461 slots) master
   1 additional replica(s)
S: 8ca526361a34855123de34902c49838fe1a5a46f 173.38.0.15:6379
   slots: (0 slots) slave
   replicates 835210e0718b8cb17ec5174234642d83d315b3fa
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
>>> Send CLUSTER MEET to node 173.38.0.17:6379 to make it join the cluster.		###加入集群
[OK] New node added correctly.													###加入集群成功
root@8232cff01091:/data#






-------------------------4.查看集群情况第1次-------------------------

		redis-cli --cluster check ip:端口号
		redis-cli --cluster check 宿主机ip/docker自定义网络分配的ip:端口号

		redis-cli --cluster check  173.38.0.11:6379	 //这里用的是docker network自定义网络的ip



root@8232cff01091:/data# redis-cli --cluster check  173.38.0.11:6379
173.38.0.11:6379 (835210e0...) -> 1 keys | 5461 slots | 1 slaves.
173.38.0.12:6379 (def95dae...) -> 0 keys | 5462 slots | 1 slaves.
173.38.0.17:6379 (636d3970...) -> 0 keys | 0 slots | 0 slaves.			## .17 已经加入   0 slots | 0 slaves.  暂时没有哈希槽位，
173.38.0.13:6379 (bc371156...) -> 1 keys | 5461 slots | 1 slaves.
[OK] 2 keys in 4 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 173.38.0.11:6379)
M: 835210e0718b8cb17ec5174234642d83d315b3fa 173.38.0.11:6379
   slots:[0-5460] (5461 slots) master
   1 additional replica(s)
M: def95daeb6a1a0ac5c037a826f1d660cc82bd87e 173.38.0.12:6379
   slots:[5461-10922] (5462 slots) master
   1 additional replica(s)
M: 636d39709bf414075fb45c923b7dd8398a888c3f 173.38.0.17:6379		## .17
   slots: (0 slots) master
S: 4604d2508991c66160c6e0629b5d950f87b8b9f1 173.38.0.16:6379
   slots: (0 slots) slave
   replicates def95daeb6a1a0ac5c037a826f1d660cc82bd87e
S: 0e7f087ee4b8a5ea8ec4a38f0e724f20eb6b27bb 173.38.0.14:6379
   slots: (0 slots) slave
   replicates bc371156139e77e245427e278100744a497e3c0b
M: bc371156139e77e245427e278100744a497e3c0b 173.38.0.13:6379
   slots:[10923-16383] (5461 slots) master
   1 additional replica(s)
S: 8ca526361a34855123de34902c49838fe1a5a46f 173.38.0.15:6379
   slots: (0 slots) slave
   replicates 835210e0718b8cb17ec5174234642d83d315b3fa
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
root@8232cff01091:/data#




	173.38.0.17:6379 (636d3970...) -> 0 keys | 0 slots | 0 slaves.
	## .17 已经加入   0 slots | 0 slaves.  暂时没有分配哈希槽位




-------------------------5.重新分配哈希槽号-------------------------

	重新分配哈希槽号/位
	redis-cli --cluster reshard ip:端口号

	redis-cli --cluster reshard 173.38.0.11:6379
				##注意，这里以 某ip:端口号 重新分配时，最好还是用原集群的master[0]的ip地址和端口


	注意点：
		看图：
			images/docker-128-redis集群-主从扩容-注意点-04.png
			images/docker-129-redis集群-主从扩容-注意点-05.png
			images/docker-130-redis集群-主从扩容-注意点-06.png


		槽号的分配说明
				.17有3个新的区间，而以前是连续的
				因为重新分配的成本太高，所以以前的3个master都各自匀/分配了 1364个槽位给 .17

			看图：images/docker-131-redis集群-主从扩容-新的哈希槽分配规则.png



root@8232cff01091:/data# redis-cli --cluster reshard 173.38.0.11:6379		##以原集群master[0]的ip分配重新分配
>>> Performing Cluster Check (using node 173.38.0.11:6379)
M: 835210e0718b8cb17ec5174234642d83d315b3fa 173.38.0.11:6379
   slots:[1365-5460] (4096 slots) master
   1 additional replica(s)
M: def95daeb6a1a0ac5c037a826f1d660cc82bd87e 173.38.0.12:6379
   slots:[6827-10922] (4096 slots) master
   1 additional replica(s)
M: 636d39709bf414075fb45c923b7dd8398a888c3f 173.38.0.17:6379			  ## .17  的唯一标识等会要用
   slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master
S: 4604d2508991c66160c6e0629b5d950f87b8b9f1 173.38.0.16:6379
   slots: (0 slots) slave
   replicates def95daeb6a1a0ac5c037a826f1d660cc82bd87e
S: 0e7f087ee4b8a5ea8ec4a38f0e724f20eb6b27bb 173.38.0.14:6379
   slots: (0 slots) slave
   replicates bc371156139e77e245427e278100744a497e3c0b
M: bc371156139e77e245427e278100744a497e3c0b 173.38.0.13:6379
   slots:[12288-16383] (4096 slots) master
   1 additional replica(s)
S: 8ca526361a34855123de34902c49838fe1a5a46f 173.38.0.15:6379
   slots: (0 slots) slave
   replicates 835210e0718b8cb17ec5174234642d83d315b3fa
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
How many slots do you want to move (from 1 to 16384)?        ### 想以什么方式分配，我们用平均 现在有4个master,平均分配 16384/4 = 4096


How many slots do you want to move (from 1 to 16384)? 4096
What is the receiving node ID?								###想分配给谁，这里要给 .17 分配，上面显示 .17的唯一标识 636d39709bf414075fb45c923b7dd8398a888c3f



How many slots do you want to move (from 1 to 16384)? 4096
What is the receiving node ID? 636d39709bf414075fb45c923b7dd8398a888c3f
Please enter all the source node IDs.
  Type 'all' to use all the nodes as source nodes for the hash slots.
  Type 'done' once you entered all the source nodes IDs.
Source node #1: all													######选择all
	........................
    Moving slot 13650 from bc371156139e77e245427e278100744a497e3c0b
    Moving slot 13651 from bc371156139e77e245427e278100744a497e3c0b
    Moving slot 13652 from bc371156139e77e245427e278100744a497e3c0b
Do you want to proceed with the proposed reshard plan (yes/no)?		yes   ###你想继续进行拟议的重新洗牌计划吗
.....
Moving slot 13650 from 173.38.0.13:6379 to 173.38.0.17:6379:
Moving slot 13651 from 173.38.0.13:6379 to 173.38.0.17:6379:
Moving slot 13652 from 173.38.0.13:6379 to 173.38.0.17:6379:
root@8232cff01091:/data#

		分配完毕



-------------------------6.检查集群情况第2次-------------------------

	redis-cli --cluster check  173.38.0.11:6379

root@8232cff01091:/data# redis-cli --cluster check  173.38.0.11:6379
173.38.0.11:6379 (835210e0...) -> 1 keys | 4096 slots | 1 slaves.
173.38.0.12:6379 (def95dae...) -> 0 keys | 4096 slots | 1 slaves.
173.38.0.17:6379 (636d3970...) -> 0 keys | 4096 slots | 0 slaves.	## .17 分配到了 4096 个槽位
173.38.0.13:6379 (bc371156...) -> 1 keys | 4096 slots | 1 slaves.
[OK] 2 keys in 4 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 173.38.0.11:6379)
M: 835210e0718b8cb17ec5174234642d83d315b3fa 173.38.0.11:6379
   slots:[1365-5460] (4096 slots) master
   1 additional replica(s)
M: def95daeb6a1a0ac5c037a826f1d660cc82bd87e 173.38.0.12:6379
   slots:[6827-10922] (4096 slots) master
   1 additional replica(s)
M: 636d39709bf414075fb45c923b7dd8398a888c3f 173.38.0.17:6379
   slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master    ### 这里的槽位段，是由原集群三段每个都分配了一些出来给.17
S: 4604d2508991c66160c6e0629b5d950f87b8b9f1 173.38.0.16:6379
   slots: (0 slots) slave
   replicates def95daeb6a1a0ac5c037a826f1d660cc82bd87e
S: 0e7f087ee4b8a5ea8ec4a38f0e724f20eb6b27bb 173.38.0.14:6379
   slots: (0 slots) slave
   replicates bc371156139e77e245427e278100744a497e3c0b
M: bc371156139e77e245427e278100744a497e3c0b 173.38.0.13:6379
   slots:[12288-16383] (4096 slots) master
   1 additional replica(s)
S: 8ca526361a34855123de34902c49838fe1a5a46f 173.38.0.15:6379
   slots: (0 slots) slave
   replicates 835210e0718b8cb17ec5174234642d83d315b3fa
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
root@8232cff01091:/data#



-------------------------7.为主节点 .17分配从节点 .18-------------------------

	redis-cli --cluster add-node  新slave-ip:端口号  新master-ip:端口号 --cluster-slave --cluster-master-id 新主机节点id

	//redis-cli --cluster add-node 192.168.111.147:6388 192.168.111.147:6387 --cluster-slave --cluster-master-id e4781f644d4a4e4d4b4d107157b9ba8144631451    //------这个是6387的编号，按照自己实际情况

									.18 从机    加入到   .17 主机										 .17主机的唯一标识，上面用 redis-cli --cluster check  173.38.0.11:6379 查出来的
	redis-cli --cluster add-node 173.38.0.18:6379 173.38.0.17:6379 --cluster-slave --cluster-master-id 636d39709bf414075fb45c923b7dd8398a888c3f   //------这个是 .17 的编号，按照自己实际情况



root@8232cff01091:/data# redis-cli --cluster add-node 173.38.0.18:6379 173.38.0.17:6379 --cluster-slave --cluster-master-id 636d39709bf414075fb45c923b7dd8398a888c3f
>>> Adding node 173.38.0.18:6379 to cluster 173.38.0.17:6379			### 加入到 .17
>>> Performing Cluster Check (using node 173.38.0.17:6379)
M: 636d39709bf414075fb45c923b7dd8398a888c3f 173.38.0.17:6379
   slots:[0-2730],[5461-8191],[10923-13652] (4096 slots) master
S: 8ca526361a34855123de34902c49838fe1a5a46f 173.38.0.15:6379
   slots: (0 slots) slave
   replicates 835210e0718b8cb17ec5174234642d83d315b3fa
M: bc371156139e77e245427e278100744a497e3c0b 173.38.0.13:6379
   slots:[13653-16383] (4096 slots) master
   1 additional replica(s)
M: def95daeb6a1a0ac5c037a826f1d660cc82bd87e 173.38.0.12:6379
   slots:[8192-10922] (4096 slots) master
   1 additional replica(s)
M: 835210e0718b8cb17ec5174234642d83d315b3fa 173.38.0.11:6379
   slots:[2731-5460] (4096 slots) master
   1 additional replica(s)
S: 4604d2508991c66160c6e0629b5d950f87b8b9f1 173.38.0.16:6379
   slots: (0 slots) slave
   replicates def95daeb6a1a0ac5c037a826f1d660cc82bd87e
S: 0e7f087ee4b8a5ea8ec4a38f0e724f20eb6b27bb 173.38.0.14:6379
   slots: (0 slots) slave
   replicates bc371156139e77e245427e278100744a497e3c0b
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
>>> Send CLUSTER MEET to node 173.38.0.18:6379 to make it join the cluster.
Waiting for the cluster to join

>>> Configure node as replica of 173.38.0.17:6379.
[OK] New node added correctly.									###加入成功
root@8232cff01091:/data#



-------------------------8.检查集群情况第3次-------------------------

	redis-cli --cluster check  173.38.0.11:6379


root@8232cff01091:/data# redis-cli --cluster check  173.38.0.11:6379
173.38.0.11:6379 (835210e0...) -> 1 keys | 4096 slots | 1 slaves.
173.38.0.12:6379 (def95dae...) -> 0 keys | 4096 slots | 1 slaves.
173.38.0.17:6379 (636d3970...) -> 0 keys | 4096 slots | 1 slaves.		##有slave挂载了
173.38.0.13:6379 (bc371156...) -> 1 keys | 4096 slots | 1 slaves.
[OK] 2 keys in 4 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 173.38.0.11:6379)
M: 835210e0718b8cb17ec5174234642d83d315b3fa 173.38.0.11:6379
   slots:[2731-5460] (4096 slots) master
   1 additional replica(s)
M: def95daeb6a1a0ac5c037a826f1d660cc82bd87e 173.38.0.12:6379
   slots:[8192-10922] (4096 slots) master
   1 additional replica(s)
M: 636d39709bf414075fb45c923b7dd8398a888c3f 173.38.0.17:6379
   slots:[0-2730],[5461-8191],[10923-13652] (4096 slots) master
   1 additional replica(s)
S: 4604d2508991c66160c6e0629b5d950f87b8b9f1 173.38.0.16:6379
   slots: (0 slots) slave
   replicates def95daeb6a1a0ac5c037a826f1d660cc82bd87e
S: 0e7f087ee4b8a5ea8ec4a38f0e724f20eb6b27bb 173.38.0.14:6379
   slots: (0 slots) slave
   replicates bc371156139e77e245427e278100744a497e3c0b
M: bc371156139e77e245427e278100744a497e3c0b 173.38.0.13:6379
   slots:[13653-16383] (4096 slots) master
   1 additional replica(s)
S: 93f82e0655769655262b7a69945331e88c4b8882 173.38.0.18:6379
   slots: (0 slots) slave
   replicates 636d39709bf414075fb45c923b7dd8398a888c3f
S: 8ca526361a34855123de34902c49838fe1a5a46f 173.38.0.15:6379
   slots: (0 slots) slave
   replicates 835210e0718b8cb17ec5174234642d83d315b3fa
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
root@8232cff01091:/data#



*/
