package docker

//redis集群-主从缩容案例

/*
	目的  .17 和 .18下线

	目录：
		1.检查集群情况第1次，获得 .18 的节点id
		2.将 .18删除，从集群中将 .17 的从节点 .18 删除
		3.将 .17 的槽号清空，重新分配槽号，本例将所有的槽号都给了 .11
		4.检查集群情况第2次
		5.将 .17 删除
		6.检查集群情况第3次


------------------1.检查集群情况第1次，获得 .18 的节点id------------------

		redis-cli --cluster check  173.38.0.11:6379


root@8232cff01091:/data#
root@8232cff01091:/data#        redis-cli --cluster check  173.38.0.11:6379
173.38.0.11:6379 (835210e0...) -> 0 keys | 2730 slots | 1 slaves.
173.38.0.12:6379 (def95dae...) -> 0 keys | 2731 slots | 1 slaves.
173.38.0.17:6379 (636d3970...) -> 2 keys | 8192 slots | 1 slaves.
173.38.0.13:6379 (bc371156...) -> 0 keys | 2731 slots | 1 slaves.
[OK] 2 keys in 4 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 173.38.0.11:6379)
M: 835210e0718b8cb17ec5174234642d83d315b3fa 173.38.0.11:6379
   slots:[2731-5460] (2730 slots) master
   1 additional replica(s)
M: def95daeb6a1a0ac5c037a826f1d660cc82bd87e 173.38.0.12:6379
   slots:[8192-10922] (2731 slots) master
   1 additional replica(s)
M: 636d39709bf414075fb45c923b7dd8398a888c3f 173.38.0.17:6379
   slots:[0-2730],[5461-8191],[10923-13652] (8192 slots) master
   1 additional replica(s)
S: 4604d2508991c66160c6e0629b5d950f87b8b9f1 173.38.0.16:6379
   slots: (0 slots) slave
   replicates def95daeb6a1a0ac5c037a826f1d660cc82bd87e
S: 0e7f087ee4b8a5ea8ec4a38f0e724f20eb6b27bb 173.38.0.14:6379
   slots: (0 slots) slave
   replicates bc371156139e77e245427e278100744a497e3c0b
M: bc371156139e77e245427e278100744a497e3c0b 173.38.0.13:6379
   slots:[13653-16383] (2731 slots) master
   1 additional replica(s)
S: 93f82e0655769655262b7a69945331e88c4b8882 173.38.0.18:6379		## 获取 .18 的节点id
   slots: (0 slots) slave
   replicates 636d39709bf414075fb45c923b7dd8398a888c3f
S: 8ca526361a34855123de34902c49838fe1a5a46f 173.38.0.15:6379
   slots: (0 slots) slave
   replicates 835210e0718b8cb17ec5174234642d83d315b3fa
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
root@8232cff01091:/data#



------------------2.将 .18删除，从集群中将 .17 的从节点 .18 删除------------------

	redis-cli --cluster del-node 从机-ip:端口号  从机 .18节点id

	//redis-cli --cluster del-node 192.168.111.147:6388  bc371156139e77e245427e278100744a497e3c0b

	redis-cli --cluster del-node  173.38.0.18:6379  93f82e0655769655262b7a69945331e88c4b8882


root@8232cff01091:/data# redis-cli --cluster del-node  173.38.0.18:6379  93f82e0655769655262b7a69945331e88c4b8882
>>> Removing node 93f82e0655769655262b7a69945331e88c4b8882 from cluster 173.38.0.18:6379
>>> Sending CLUSTER FORGET messages to the cluster...
>>> Sending CLUSTER RESET SOFT to the deleted node.
root@8232cff01091:/data#

	使用  redis-cli --cluster check  173.38.0.11:6379 检查，发现只剩下了七台机器





------------------3.将 .17 的槽号清空，重新分配槽号，本例将所有的槽号都给了 .11------------------


	redis-cli --cluster reshard 173.38.0.11:6379




root@8232cff01091:/data# redis-cli --cluster reshard 173.38.0.11:6379		### 重新分配
>>> Performing Cluster Check (using node 173.38.0.11:6379)
M: 835210e0718b8cb17ec5174234642d83d315b3fa 173.38.0.11:6379				## .11 的节点id  835210e0718b8cb17ec5174234642d83d315b3fa
   slots:[2731-5460] (2730 slots) master
   1 additional replica(s)
M: def95daeb6a1a0ac5c037a826f1d660cc82bd87e 173.38.0.12:6379
   slots:[8192-10922] (2731 slots) master
   1 additional replica(s)
M: 636d39709bf414075fb45c923b7dd8398a888c3f 173.38.0.17:6379				## .17 的节点id  636d39709bf414075fb45c923b7dd8398a888c3f
   slots:[0-2730],[5461-8191],[10923-13652] (8192 slots) master
S: 4604d2508991c66160c6e0629b5d950f87b8b9f1 173.38.0.16:6379
   slots: (0 slots) slave
   replicates def95daeb6a1a0ac5c037a826f1d660cc82bd87e
S: 0e7f087ee4b8a5ea8ec4a38f0e724f20eb6b27bb 173.38.0.14:6379
   slots: (0 slots) slave
   replicates bc371156139e77e245427e278100744a497e3c0b
M: bc371156139e77e245427e278100744a497e3c0b 173.38.0.13:6379
   slots:[13653-16383] (2731 slots) master
   1 additional replica(s)
S: 8ca526361a34855123de34902c49838fe1a5a46f 173.38.0.15:6379
   slots: (0 slots) slave
   replicates 835210e0718b8cb17ec5174234642d83d315b3fa
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
How many slots do you want to move (from 1 to 16384)? 4096					### 这里还是4096，并没有  16384/3=5461,
What is the receiving node ID? 835210e0718b8cb17ec5174234642d83d315b3fa   	### 这里选择的接受者是 .11，把槽位给 .11 /由 这个节点id来接收空出来的槽位
Please enter all the source node IDs.
  Type 'all' to use all the nodes as source nodes for the hash slots.
  Type 'done' once you entered all the source nodes IDs.
Source node #1: 636d39709bf414075fb45c923b7dd8398a888c3f					###需要清空谁  这里清空 .17,删除 .17 的槽位
Source node #2: done
......
    Moving slot 6824 from 636d39709bf414075fb45c923b7dd8398a888c3f
    Moving slot 6825 from 636d39709bf414075fb45c923b7dd8398a888c3f
Do you want to proceed with the proposed reshard plan (yes/no)?yes			###yes 执行
.....
Moving slot 6824 from 173.38.0.17:6379 to 173.38.0.11:6379:
Moving slot 6825 from 173.38.0.17:6379 to 173.38.0.11:6379:
root@8232cff01091:/data#


	完成







------------------4.检查集群情况第2次------------------

	 redis-cli --cluster check  173.38.0.11:6379



.....
Moving slot 13651 from 173.38.0.17:6379 to 173.38.0.11:6379:
Moving slot 13652 from 173.38.0.17:6379 to 173.38.0.11:6379:
Node 173.38.0.17:6379 replied with error:									## 迁移的时候报错
ERR Please use SETSLOT only with masters.
root@8232cff01091:/data#  redis-cli --cluster check  173.38.0.11:6379
173.38.0.11:6379 (835210e0...) -> 2 keys | 10922 slots | 2 slaves.			## ？ .17 自动归属到 .11下？ 还是因为自己的操作有问题？看视频是.17在列表中，只是槽位被清空了。
173.38.0.12:6379 (def95dae...) -> 0 keys | 2731 slots | 1 slaves.
173.38.0.13:6379 (bc371156...) -> 0 keys | 2731 slots | 1 slaves.
[OK] 2 keys in 3 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 173.38.0.11:6379)
M: 835210e0718b8cb17ec5174234642d83d315b3fa 173.38.0.11:6379
   slots:[0-8191],[10923-13652] (10922 slots) master
   2 additional replica(s)
M: def95daeb6a1a0ac5c037a826f1d660cc82bd87e 173.38.0.12:6379
   slots:[8192-10922] (2731 slots) master
   1 additional replica(s)
S: 636d39709bf414075fb45c923b7dd8398a888c3f 173.38.0.17:6379
   slots: (0 slots) slave
   replicates 835210e0718b8cb17ec5174234642d83d315b3fa
S: 4604d2508991c66160c6e0629b5d950f87b8b9f1 173.38.0.16:6379
   slots: (0 slots) slave
   replicates def95daeb6a1a0ac5c037a826f1d660cc82bd87e
S: 0e7f087ee4b8a5ea8ec4a38f0e724f20eb6b27bb 173.38.0.14:6379
   slots: (0 slots) slave
   replicates bc371156139e77e245427e278100744a497e3c0b
M: bc371156139e77e245427e278100744a497e3c0b 173.38.0.13:6379
   slots:[13653-16383] (2731 slots) master
   1 additional replica(s)
S: 8ca526361a34855123de34902c49838fe1a5a46f 173.38.0.15:6379
   slots: (0 slots) slave
   replicates 835210e0718b8cb17ec5174234642d83d315b3fa
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
root@8232cff01091:/data#








------------------5.将 .17 删除------------------


	redis-cli --cluster del-node  173.38.0.17:6379  636d39709bf414075fb45c923b7dd8398a888c3f



root@8232cff01091:/data# redis-cli --cluster del-node  173.38.0.17:6379  636d39709bf414075fb45c923b7dd8398a888c3f
>>> Removing node 636d39709bf414075fb45c923b7dd8398a888c3f from cluster 173.38.0.17:6379
>>> Sending CLUSTER FORGET messages to the cluster...
>>> Sending CLUSTER RESET SOFT to the deleted node.
root@8232cff01091:/data#




------------------6.检查集群情况第3次------------------

		redis-cli --cluster check  173.38.0.11:6379

root@8232cff01091:/data#  redis-cli --cluster check  173.38.0.11:6379
173.38.0.11:6379 (835210e0...) -> 2 keys | 10922 slots | 1 slaves.
173.38.0.12:6379 (def95dae...) -> 0 keys | 2731 slots | 1 slaves.
173.38.0.13:6379 (bc371156...) -> 0 keys | 2731 slots | 1 slaves.
[OK] 2 keys in 3 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 173.38.0.11:6379)
M: 835210e0718b8cb17ec5174234642d83d315b3fa 173.38.0.11:6379
   slots:[0-8191],[10923-13652] (10922 slots) master
   1 additional replica(s)
M: def95daeb6a1a0ac5c037a826f1d660cc82bd87e 173.38.0.12:6379
   slots:[8192-10922] (2731 slots) master
   1 additional replica(s)
S: 4604d2508991c66160c6e0629b5d950f87b8b9f1 173.38.0.16:6379
   slots: (0 slots) slave
   replicates def95daeb6a1a0ac5c037a826f1d660cc82bd87e
S: 0e7f087ee4b8a5ea8ec4a38f0e724f20eb6b27bb 173.38.0.14:6379
   slots: (0 slots) slave
   replicates bc371156139e77e245427e278100744a497e3c0b
M: bc371156139e77e245427e278100744a497e3c0b 173.38.0.13:6379
   slots:[13653-16383] (2731 slots) master
   1 additional replica(s)
S: 8ca526361a34855123de34902c49838fe1a5a46f 173.38.0.15:6379
   slots: (0 slots) slave
   replicates 835210e0718b8cb17ec5174234642d83d315b3fa
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
root@8232cff01091:/data#



*/
