package docker

//redis集群3主3从集群配置

/*

		本章节和docker-110-redis集群一模一样。

	目录：
		1.启动docker systemctl start docker
		2.新建6个docker容器实例
		3.进入容器redis-node-1并为6台机器构建集群关系
		4.进入集群




-------------------------1.启动docker-------------------------
	如果报错：
	cannot connect to the Docker daemon at unix: ///var/run/docker.sock. Is the docker daemon running?
	表示docker未启动

	systemctl start docker


-------------------------2.新建6个docker容器实例-------------------------
	查看并使用  docker-110-redis集群.go
	创建网络
	使用shell脚本创建配置文件后，使用docker启动容器

	和docker-110*相比，-p的端口发生了变化

	docker run -p 56371:6379 -p 57371:16379 --privileged=true  --name redis-1 \
	-v /home/zydh/docker/volume/redis/node-1/data:/data \
	-v /home/zydh/docker/etc/redis/node-1/conf/redis.conf:/etc/redis/redis.conf \
	-d --net redis-colony --ip 173.38.0.11 redis redis-server /etc/redis/redis.conf


	docker run -p 56372:6379 -p 57372:16379 --privileged=true  --name redis-2 \
	-v /home/zydh/docker/volume/redis/node-2/data:/data \
	-v /home/zydh/docker/etc/redis/node-2/conf/redis.conf:/etc/redis/redis.conf \
	-d --net redis-colony --ip 173.38.0.12 redis redis-server /etc/redis/redis.conf

	docker run -p 56373:6379 -p 57373:16379 --privileged=true   --name redis-3 \
	-v /home/zydh/docker/volume/redis/node-3/data:/data \
	-v /home/zydh/docker/etc/redis/node-3/conf/redis.conf:/etc/redis/redis.conf \
	-d --net redis-colony --ip 173.38.0.13 redis redis-server /etc/redis/redis.conf

	docker run -p 56374:6379 -p 57374:16379 --privileged=true  --name redis-4 \
	-v /home/zydh/docker/volume/redis/node-4/data:/data \
	-v /home/zydh/docker/etc/redis/node-4/conf/redis.conf:/etc/redis/redis.conf \
	-d --net redis-colony --ip 173.38.0.14 redis redis-server /etc/redis/redis.conf

	docker run -p 56375:6379 -p 57375:16379 --privileged=true  --name redis-5 \
	-v /home/zydh/docker/volume/redis/node-5/data:/data \
	-v /home/zydh/docker/etc/redis/node-5/conf/redis.conf:/etc/redis/redis.conf \
	-d --net redis-colony --ip 173.38.0.15 redis redis-server /etc/redis/redis.conf

	docker run -p 56376:6379 -p 57376:16379 --privileged=true  --name redis-6 \
	-v /home/zydh/docker/volume/redis/node-6/data:/data \
	-v /home/zydh/docker/etc/redis/node-6/conf/redis.conf:/etc/redis/redis.conf \
	-d --net redis-colony --ip 173.38.0.16 redis redis-server /etc/redis/redis.conf


-------------------------3.进入容器redis-node-1并为6台机器构建集群关系-------------------------

	docker exec -it redis-1 /bin/bash

	//创建集群  使用的自定义的网络ip
	redis-cli --cluster create 173.38.0.11:6379 173.38.0.12:6379 173.38.0.13:6379 173.38.0.14:6379 173.38.0.15:6379 173.38.0.16:6379 --cluster-replicas 1

	--cluster create  			创建一个集群
	--cluster-replicas 1		表示主从比例1，即一个主节点对应一个从节点，先主后从


	Node 173.38.0.12:6379 is not empty. Either the node already knows other nodes (check with CLUSTER NODES) or contains some key in database 0.
	https://blog.csdn.net/qq_37300273/article/details/77267377
	删除nodes-xxx.conf配置文件，删除pid文件，删除各节点aof,rdb文件,杀掉所有redis进程，然后重启redis集群搞定
	https://blog.csdn.net/xinyuanjieyiwife/article/details/109581272
		解决办法：
		1，先kill redis创建的集群节点进程
		2，删除每个redis节点的appendonly.aof文件，dump.rdb文件，nodes.conf文件
		并且执行./redis-cli flushdb 命令，清空每个redis里面的数据。
		3，重启每个redis节点，再执行集群操作即可

	docker exec -it redis-[123456] /bin/bash
	ls
	//rm -f /data/\*
	退出，在删除之前用代码创建的/mydata文件夹






root@3d185c900b30:/data# redis-cli --cluster create 173.38.0.11:6379 173.38.0.12:6379 173.38.0.13:6379 173.38.0.14:6379 173.38.0.15:6379 173.38.0.16:6379 --cluster-replicas 1
>>> Performing hash slots allocation on 6 nodes...		## 哈希槽 hash slots 对着6个节点进行了分配
Master[0] -> Slots 0 - 5460								## 哈希槽  看图 images/docker-121-redis集群-3主3从-哈希槽hash slot-02.png
Master[1] -> Slots 5461 - 10922
Master[2] -> Slots 10923 - 16383
Adding replica 173.38.0.15:6379 to 173.38.0.11:6379
Adding replica 173.38.0.16:6379 to 173.38.0.12:6379
Adding replica 173.38.0.14:6379 to 173.38.0.13:6379
M: 835210e0718b8cb17ec5174234642d83d315b3fa 173.38.0.11:6379
   slots:[0-5460] (5461 slots) master
M: def95daeb6a1a0ac5c037a826f1d660cc82bd87e 173.38.0.12:6379
   slots:[5461-10922] (5462 slots) master
M: bc371156139e77e245427e278100744a497e3c0b 173.38.0.13:6379
   slots:[10923-16383] (5461 slots) master
S: 0e7f087ee4b8a5ea8ec4a38f0e724f20eb6b27bb 173.38.0.14:6379
   replicates bc371156139e77e245427e278100744a497e3c0b
S: 8ca526361a34855123de34902c49838fe1a5a46f 173.38.0.15:6379
   replicates 835210e0718b8cb17ec5174234642d83d315b3fa
S: 4604d2508991c66160c6e0629b5d950f87b8b9f1 173.38.0.16:6379
   replicates def95daeb6a1a0ac5c037a826f1d660cc82bd87e
Can I set the above configuration? (type 'yes' to accept): yes
>>> Nodes configuration updated
>>> Assign a different config epoch to each node
>>> Sending CLUSTER MEET messages to join the cluster
Waiting for the cluster to join

>>> Performing Cluster Check (using node 173.38.0.11:6379)
M: 835210e0718b8cb17ec5174234642d83d315b3fa 173.38.0.11:6379
   slots:[0-5460] (5461 slots) master
   1 additional replica(s)
S: 4604d2508991c66160c6e0629b5d950f87b8b9f1 173.38.0.16:6379
   slots: (0 slots) slave
   replicates def95daeb6a1a0ac5c037a826f1d660cc82bd87e
S: 0e7f087ee4b8a5ea8ec4a38f0e724f20eb6b27bb 173.38.0.14:6379
   slots: (0 slots) slave
   replicates bc371156139e77e245427e278100744a497e3c0b
M: bc371156139e77e245427e278100744a497e3c0b 173.38.0.13:6379
   slots:[10923-16383] (5461 slots) master
   1 additional replica(s)
S: 8ca526361a34855123de34902c49838fe1a5a46f 173.38.0.15:6379
   slots: (0 slots) slave
   replicates 835210e0718b8cb17ec5174234642d83d315b3fa
M: def95daeb6a1a0ac5c037a826f1d660cc82bd87e 173.38.0.12:6379
   slots:[5461-10922] (5462 slots) master
   1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.			### 16384 个哈希槽全部覆盖
root@3d185c900b30:/data#


	哈希槽为 看 docker-1000-1003*




-------------------------4.进入集群-------------------------

	操作，一定要【-c】，-c代表的是集群，redis-cli只是单机
	root@75332c57aac0:/data#  redis-cli -c
	127.0.0.1:6379>
					cluster info
					cluster nodes


root@3d185c900b30:/data# redis-cli -c
127.0.0.1:6379> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384					##哈希槽 16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:6					##已经知道的节点 6 个
cluster_size:3
cluster_current_epoch:6
cluster_my_epoch:1
cluster_stats_messages_ping_sent:1089
cluster_stats_messages_pong_sent:1118
cluster_stats_messages_sent:2207
cluster_stats_messages_ping_received:1113
cluster_stats_messages_pong_received:1089
cluster_stats_messages_meet_received:5
cluster_stats_messages_received:2207
127.0.0.1:6379>

127.0.0.1:6379> cluster nodes
4604d2508991c66160c6e0629b5d950f87b8b9f1 173.38.0.16:6379@16379 slave def95daeb6a1a0ac5c037a826f1d660cc82bd87e 0 1665300770000 2 connected
0e7f087ee4b8a5ea8ec4a38f0e724f20eb6b27bb 173.38.0.14:6379@16379 slave bc371156139e77e245427e278100744a497e3c0b 0 1665300770000 3 connected
bc371156139e77e245427e278100744a497e3c0b 173.38.0.13:6379@16379 master - 0 1665300769764 3 connected 10923-16383
8ca526361a34855123de34902c49838fe1a5a46f 173.38.0.15:6379@16379 slave 835210e0718b8cb17ec5174234642d83d315b3fa 0 1665300770767 1 connected
def95daeb6a1a0ac5c037a826f1d660cc82bd87e 173.38.0.12:6379@16379 master - 0 1665300769563 2 connected 5461-10922
835210e0718b8cb17ec5174234642d83d315b3fa 173.38.0.11:6379@16379 myself,master - 0 1665300768000 1 connected 0-5460
127.0.0.1:6379>

第4行和第6行 提炼出来==>

										 173.38.0.15:6379@16379 slave 835210e0718b8cb17ec5174234642d83d315b3fa 0 1665300770767 1 connected
835210e0718b8cb17ec5174234642d83d315b3fa 173.38.0.11:6379@16379 myself,master - 0 1665300768000 1 connected 0-5460

		==>
			173.38.0.15:6379@16379 slave 835210e071
835210e071  173.38.0.11:6379@16379 myself,master

		==>
			slave 835210e071
835210e071  myself,master

		==>
			非常明显的可以看到 谁是主机，谁是从机

*/
