package docker


//docker 网络

func net(){
	//docker 网络原理部分看视频和doc文档
	docker network --help
			[root@VM_0_13_centos ~]# docker network --help
			Usage:  docker network COMMAND
			Manage networks
			Commands:
			connect     Connect a container to a network
			create      Create a network
			disconnect  Disconnect a container from a network
			inspect     Display detailed information on one or more networks
			ls          List networks
			prune       Remove all unused networks
			rm          Remove one or more networks

			Run 'docker network COMMAND --help' for more information on a command.
			[root@VM_0_13_centos ~]#

	docker run -it --name app01 images1
	docker run -it --name app01 images1 ip addr

	//自定义网络：容器互联
	网络模式
	bridge : 桥接 docker (默认，自己创建也使用bridge模式)
	none : 不配置网络
	host : 和宿主机共享网络
	container : 容器网络连通（用得少，局限大）

	//创建一个自定义网络
	docker network create --driver bridge --subnet 192.168.0.1/16 --gateway 192.168.0.1 mynet
		--driver bridge 使用桥接模式
		--subnet 192.168.0.1/16 子网 其中/16 表示
				192		 168		0		 1
			00000000   00000000  00000000  00000000
			每一个都是8位，/16表示 255*255 -2 = 65535个网络 = (00000000   00000000) * (00000000  00000000) z种子码
						/24  = 255 种
			范围 198.168.0.2 - 198.168.255.255

		--gateway 192.168.0.1  网管，从哪里出去

	//查看网络情况
	docker network inspect mynet
		[root@VM_0_13_centos ~]# docker network ls
		NETWORK ID     NAME      DRIVER    SCOPE
		2d0791fbd230   bridge    bridge    local
		3fa9081e776b   host      host      local
		f51aef503967   mynet     bridge    local
		f6051ab217a7   none      null      local
		[root@VM_0_13_centos ~]# docker network inspect mynet
		[
		{
		"Name": "mynet",
		"Id": "f51aef503967d2bc3af251fa0b577356c3eacba196457ce9f443e0b3426199a3",
		"Created": "2021-02-25T21:39:52.638664844+08:00",
		"Scope": "local",
		"Driver": "bridge",
		"EnableIPv6": false,
		"IPAM": {
		"Driver": "default",
		"Options": {},
		"Config": [
		{
		"Subnet": "192.168.0.1/16",
		"Gateway": "192.168.0.1"
		}
		]
		},
		"Internal": false,
		"Attachable": false,
		"Ingress": false,
		"ConfigFrom": {
		"Network": ""
		},
		"ConfigOnly": false,
		"Containers": {},
		"Options": {},
		"Labels": {}
		}
		]
		[root@VM_0_13_centos ~]#
		以上，我们自己的网络就创建好了

		启动容器01
		docker run -d -it -P --name mysql-net-01 --net mynet mysql /bin/bash
		启动容器02
		docker run -d -it -P --name mysql-net-02 --net mynet mysql /bin/bash


			[root@VM_0_13_centos ~]# docker network inspect mynet
			[
			{
			"Name": "mynet",
			"Id": "f51aef503967d2bc3af251fa0b577356c3eacba196457ce9f443e0b3426199a3",
			"Created": "2021-02-25T21:39:52.638664844+08:00",
			"Scope": "local",
			"Driver": "bridge",
			"EnableIPv6": false,
			"IPAM": {
			"Driver": "default",
			"Options": {},
			"Config": [
			{
			"Subnet": "192.168.0.1/16",
			"Gateway": "192.168.0.1"
			}
			]
			},
			"Internal": false,
			"Attachable": false,
			"Ingress": false,
			"ConfigFrom": {
			"Network": ""
			},
			"ConfigOnly": false,
			"Containers": {
				"5c54c6d1f52660cbc40e1b943191b39f57e0f7ebba9e3c093db6510e138b586d": {
				"Name": "mysql-net-02",
				"EndpointID": "d0da4e34089d68572fd77a5f4a24f96d7656d5c21e7acfd143ba24e6e844bc40",
				"MacAddress": "02:42:c0:a8:00:03",
				"IPv4Address": "192.168.0.3/16",
				"IPv6Address": ""
				},
				"8bf707397e35db235ebe0f6c63ec6d2c371e85bfdd2f32d5c622b45d58cbd60f": {
				"Name": "mysql-net-01",
				"EndpointID": "60bbeffc838614bb84b053f670dbc4b2bc48894e5c1fc8f537aa5b82613f22c3",
				"MacAddress": "02:42:c0:a8:00:02",
				"IPv4Address": "192.168.0.2/16",
				"IPv6Address": ""
				}
			},
			"Options": {},
			"Labels": {}
			}
			]
			[root@VM_0_13_centos ~]#

			发现mynet下已经载入了两个容器，以为下面的mysql没有ping的概念，一下继续使用tomcat
			docker exec -it tomcat01 ping 192.168.0.3
			成功ping通
			docker exec -it tomcat01 ping tomcat02
			成功ping通

			自定义网络修复了docker --link的缺点
			我们自定义的网络docker都已经帮我们维护好了对应的关系，推荐我们平时这样使用网络
			好处：
						redis集群
						mysql集群
				不同的集群使用不同的网络，保证集群是安全和健康的



		网络连通
			此时已经有了docker0和mynet两个网卡，在docker0上面的容器想要去pingmynet上的容器
			不是网卡[docker0和mynet]连通，而是docekr0上面的容器去链接mysql网卡
			具体网络看doc文档

			docker network --help
			docker network connect --help  //链接一个容器到一个网络

			[root@VM_0_13_centos ~]# docker network connect --help

			Usage:  docker network connect [OPTIONS] NETWORK CONTAINER
														网卡	容器名
			Connect a container to a network

			Options:
			--alias strings           Add network-scoped alias for the container
			--driver-opt strings      driver options for the network
			--ip string               IPv4 address (e.g., 172.30.100.104)
			--ip6 string              IPv6 address (e.g., 2001:db8::33)
			--link list               Add link to another container
			--link-local-ip strings   Add a link-local address for the container
			[root@VM_0_13_centos ~]#


			测试打通docker0上的容器tomcat01 - mynet上的tomcat-net-01容器

			docker network connet mynet tomat01

			docker network inspect mynet
			会发现 containers中会新增一个 tomat01
			此时，tomat01在两个网卡上都有ip，一个容器，两个ip

}

//redis 集群
func redisColony(){
	//先为集群建立一个网卡
	docker network create --driver bridge --subnet 173.38.0.0/16 --gateway 173.38.0.1 redis-colony
	docker network create  --subnet 173.38.0.0/16 redis-colony1


	//集群

	for port in $(seq 1 6); \
	do \
	mkdir -p /mydata/redis/node-${port}/conf
	touch /mydata/redis/node-${port}/conf/redis.conf
	cat << EOF >/mydata/redis/node-${port}/conf/redis.conf
	port 6379
	bind 0.0.0.0
	cluster-enabled yes
	cluster-config-file nodes.conf
	cluster-node-timeout 5000
	cluster-announce-ip 173.38.0.1${port}
	cluster-announce-port 6379
	cluster-announce-bus-port 16379
	appendonly yes
	EOF
	done

	docker run -p 6371:6379 -p 16371:16379 --name redis-1 \
	-v /mydata/redis/node-1/data:/data \
	-v /mydata/redis/node-1/conf/redis.conf:/etc/redis/redis.conf \
	-d --net redis-colony --ip 173.38.0.11 redis redis-server /etc/redis/redis.conf

	/**

		[root@VM_0_13_centos conf]# docker run -p 6371:6379 -p 16371:16379 --name redis-1 \
		> redis.conf -v /mydata/redis/node-1/data:/data \
		> redis.conf -v /mydata/redis/node-1/conf/redis.conf:/etc/redis/redis.conf \
		> redis.conf -d --net redis-colony --ip 173.38.0.11 redis redis-server /etc/redis/redis.conf
		Unable to find image 'redis.conf:latest' locally
		docker: Error response from daemon: pull access denied for redis.conf, repository does not exist or may require 'docker login': denied: requested access to the resource is denied.
		See 'docker run --help'.
		[root@VM_0_13_centos conf]# cd /	跳出当前文件夹

	 */




	docker run -p 6372:6379 -p 16372:16379 --name redis-2 \
	-v /mydata/redis/node-2/data:/data \
	-v /mydata/redis/node-2/conf/redis.conf:/etc/redis/redis.conf \
	-d --net redis-colony --ip 173.38.0.12 redis redis-server /etc/redis/redis.conf

	docker run -p 6373:6379 -p 16373:16379 --name redis-3 \
	-v /mydata/redis/node-3/data:/data \
	-v /mydata/redis/node-3/conf/redis.conf:/etc/redis/redis.conf \
	-d --net redis-colony --ip 173.38.0.13 redis redis-server /etc/redis/redis.conf

	docker run -p 6374:6379 -p 16374:16379 --name redis-4 \
	-v /mydata/redis/node-4/data:/data \
	-v /mydata/redis/node-4/conf/redis.conf:/etc/redis/redis.conf \
	-d --net redis-colony --ip 173.38.0.14 redis redis-server /etc/redis/redis.conf

	docker run -p 6375:6379 -p 16375:16379 --name redis-5 \
	-v /mydata/redis/node-5/data:/data \
	-v /mydata/redis/node-5/conf/redis.conf:/etc/redis/redis.conf \
	-d --net redis-colony --ip 173.38.0.15 redis redis-server /etc/redis/redis.conf

	docker run -p 6376:6379 -p 16376:16379 --name redis-6 \
	-v /mydata/redis/node-6/data:/data \
	-v /mydata/redis/node-6/conf/redis.conf:/etc/redis/redis.conf \
	-d --net redis-colony --ip 173.38.0.16 redis redis-server /etc/redis/redis.conf

	//进入redis容器1
	docker exec -it redis-1 /bin/bash

	//创建集群
	redis-cli --cluster create 173.38.0.11:6379 173.38.0.12:6379 173.38.0.13:6379 173.38.0.14:6379 173.38.0.15:6379 173.38.0.16:6379 --cluster-replicas 1

	Node 173.38.0.12:6379 is not empty. Either the node already knows other nodes (check with CLUSTER NODES) or contains some key in database 0.
	https://blog.csdn.net/qq_37300273/article/details/77267377
	删除nodes-xxx.conf配置文件，删除pid文件，删除各节点aof,rdb文件,杀掉所有redis进程，然后重启redis集群搞定
	https://blog.csdn.net/xinyuanjieyiwife/article/details/109581272
		解决办法：
		1，先kill redis创建的集群节点进程
		2，删除每个redis节点的appendonly.aof文件，dump.rdb文件，nodes.conf文件
		并且执行./redis-cli flushdb 命令，清空每个redis里面的数据。
		3，重启每个redis节点，再执行集群操作即可

	docker exec -it redis-[123456] /bin/bash
	ls
	//rm -f /data/*
	退出，在删除之前用代码创建的/mydata文件夹


	集群成功
	操作，一定要【-c】，-c代表的是集群，redis-cli只是单机
	root@c9bd957ed44c:/data# redis-cli -c
	查看集群信息
	127.0.0.1:6379> cluster info
	cluster_state:ok
	cluster_slots_assigned:16384
	cluster_slots_ok:16384
	cluster_slots_pfail:0
	cluster_slots_fail:0
	cluster_known_nodes:6
	cluster_size:3  集群数量3，主从复制
	cluster_current_epoch:6
	cluster_my_epoch:1
	cluster_stats_messages_ping_sent:1831
	cluster_stats_messages_pong_sent:1858
	cluster_stats_messages_sent:3689
	cluster_stats_messages_ping_received:1853
	cluster_stats_messages_pong_received:1831
	cluster_stats_messages_meet_received:5
	cluster_stats_messages_received:3689
	查看节点，三主三从
	127.0.0.1:6379> cluster nodes
	5a6a79b50cd829e712d88a0ecf26fb46d4445663 173.38.0.16:6379@16379 slave 84b22a0592dc25fc3a3b21b613b27a1e5dc22792 0 1614308838000 2 connected
	e729d5d2069c74bf61cadf47fc370d9d1bcca2eb 173.38.0.13:6379@16379 master - 0 1614308838380 3 connected 10923-16383
	bc6a2184d0596f97c1c0440f1062f33169343365 173.38.0.15:6379@16379 slave 4ddf8e94a52400a28f040782958bdf5a983641d4 0 1614308837377 1 connected
	84b22a0592dc25fc3a3b21b613b27a1e5dc22792 173.38.0.12:6379@16379 master - 0 1614308838000 2 connected 5461-10922
	e268d75b2744663e19367c1140e6b63b5df06afa 173.38.0.14:6379@16379 slave e729d5d2069c74bf61cadf47fc370d9d1bcca2eb 0 1614308838582 3 connected
	4ddf8e94a52400a28f040782958bdf5a983641d4 173.38.0.11:6379@16379 myself,master - 0 1614308836000 1 connected 0-5460
	127.0.0.1:6379> set a 1
	-> Redirected to slot [15495] located at 173.38.0.13:6379  是.13主机处理了请求
	OK
	173.38.0.13:6379>

	然后另开一个xshell链接，听到.13的容器redis-3

	然后回到该xshll，退出，在连接，获取a
	173.38.0.13:6379>
		root@c9bd957ed44c:/data# redis-cli -c
	127.0.0.1:6379> get a
	-> Redirected to slot [15495] located at 173.38.0.14:6379  是.14处理
	"1"
	173.38.0.14:6379>

	集群高可用成功

	另一个再次开启redis-3
	[root@VM_0_13_centos /]# docker start redis-3
	redis-3

	另一个xshell链接中

	[root@VM_0_13_centos /]#
	173.38.0.14:6379>
	root@c9bd957ed44c:/data# redis-cli -c
	127.0.0.1:6379> cluster nodes
	5a6a79b50cd829e712d88a0ecf26fb46d4445663 173.38.0.16:6379@16379 slave 84b22a0592dc25fc3a3b21b613b27a1e5dc22792 0 1614309563939 2 connected
	e729d5d2069c74bf61cadf47fc370d9d1bcca2eb 173.38.0.13:6379@16379 slave e268d75b2744663e19367c1140e6b63b5df06afa 0 1614309565000 7 connected
	bc6a2184d0596f97c1c0440f1062f33169343365 173.38.0.15:6379@16379 slave 4ddf8e94a52400a28f040782958bdf5a983641d4 0 1614309564943 1 connected
	84b22a0592dc25fc3a3b21b613b27a1e5dc22792 173.38.0.12:6379@16379 master - 0 1614309565946 2 connected 5461-10922
	e268d75b2744663e19367c1140e6b63b5df06afa 173.38.0.14:6379@16379 master - 0 1614309565000 7 connected 10923-16383
	4ddf8e94a52400a28f040782958bdf5a983641d4 173.38.0.11:6379@16379 myself,master - 0 1614309564000 1 connected 0-5460
	127.0.0.1:6379> get a
	-> Redirected to slot [15495] located at 173.38.0.14:6379
	"1"
	173.38.0.14:6379> set b
	(error) ERR wrong number of arguments for 'set' command
	173.38.0.14:6379> set b2
	(error) ERR wrong number of arguments for 'set' command
	173.38.0.14:6379> set b 2
	-> Redirected to slot [3300] located at 173.38.0.11:6379
	OK
	173.38.0.11:6379> get b
	"2"
	173.38.0.11:6379>












	集群整体代码整体如下
	[root@VM_0_13_centos /]# docker ps
	CONTAINER ID   IMAGE     COMMAND   CREATED   STATUS    PORTS     NAMES
	[root@VM_0_13_centos /]# docker ps -a
	CONTAINER ID   IMAGE     COMMAND   CREATED   STATUS    PORTS     NAMES
	[root@VM_0_13_centos /]# docker images
	REPOSITORY   TAG       IMAGE ID       CREATED       SIZE
	mysql        latest    c8562eaf9d81   5 weeks ago   546MB
	redis        latest    621ceef7494a   6 weeks ago   104MB
	golang       latest    5f46b413e8f5   6 weeks ago   839MB
	[root@VM_0_13_centos /]# clear
	创建本地映射文件
	[root@VM_0_13_centos /]# for port in $(seq 1 6); \
	> do \
	> mkdir -p /mydata/redis/node-${port}/conf
	> touch /mydata/redis/node-${port}/conf/redis.conf
	> cat << EOF >/mydata/redis/node-${port}/conf/redis.conf
	> port 6379
	> bind 0.0.0.0
	> cluster-enabled yes
	> cluster-config-file nodes.conf
	> cluster-node-timeout 5000
	> cluster-announce-ip 173.38.0.1${port}
	> cluster-announce-port 6379
	> cluster-announce-bus-port 16379
	> appendonly yes
	> EOF
	> done
	启动六个redis容器
	[root@VM_0_13_centos /]# docker run -p 6371:6379 -p 16371:16379 --name redis-1 \
	> -v /mydata/redis/node-1/data:/data \
	> -v /mydata/redis/node-1/conf/redis.conf:/etc/redis/redis.conf \
	> -d --net redis-colony --ip 173.38.0.11 redis redis-server /etc/redis/redis.conf
	c9bd957ed44cfd30880b0d338d8c844ab203ada7224928d0bc78104c041c5154
	[root@VM_0_13_centos /]# docker run -p 6372:6379 -p 16372:16379 --name redis-2 \
	> -v /mydata/redis/node-2/data:/data \
	> -v /mydata/redis/node-2/conf/redis.conf:/etc/redis/redis.conf \
	> -d --net redis-colony --ip 173.38.0.12 redis redis-server /etc/redis/redis.conf
	d1416df1a96437456fc48dc5c973894dd55a91527cbfc8cf6529cad04c1b7c14
	[root@VM_0_13_centos /]# docker run -p 6373:6379 -p 16373:16379 --name redis-3 \
	> -v /mydata/redis/node-3/data:/data \
	> -v /mydata/redis/node-3/conf/redis.conf:/etc/redis/redis.conf \
	> -d --net redis-colony --ip 173.38.0.13 redis redis-server /etc/redis/redis.conf
	855f036a42d3814e7361af35677f25dbb4f7348da6a286eb1710201359c0a108
	[root@VM_0_13_centos /]# docker run -p 6374:6379 -p 16374:16379 --name redis-4 \
	> -v /mydata/redis/node-4/data:/data \
	> -v /mydata/redis/node-4/conf/redis.conf:/etc/redis/redis.conf \
	> -d --net redis-colony --ip 173.38.0.14 redis redis-server /etc/redis/redis.conf
	4b8fb5acd10286f7869c923c13233f56292c16b2b78779f415c9400faed376c8
	[root@VM_0_13_centos /]# docker run -p 6375:6379 -p 16375:16379 --name redis-5 \
	> -v /mydata/redis/node-5/data:/data \
	> -v /mydata/redis/node-5/conf/redis.conf:/etc/redis/redis.conf \
	> -d --net redis-colony --ip 173.38.0.15 redis redis-server /etc/redis/redis.conf
	ef12597cec64dc97cbc322d92cec596dda4d940770dedb5446d29056346223d3
	[root@VM_0_13_centos /]# docker run -p 6376:6379 -p 16376:16379 --name redis-6 \
	> -v /mydata/redis/node-6/data:/data \
	> -v /mydata/redis/node-6/conf/redis.conf:/etc/redis/redis.conf \
	> -d --net redis-colony --ip 173.38.0.16 redis redis-server /etc/redis/redis.conf
	71f11587d1f945b5a33ed5f1f66907df498e4cc98699e343f69be0686c1e5d17
	进入redis-1中开始集群
	[root@VM_0_13_centos /]# docker exec -it redis-1 /bin/bash
	root@c9bd957ed44c:/data# ls
	appendonly.aof	nodes.conf
	root@c9bd957ed44c:/data# redis-cli --cluster create 173.38.0.11:6379 173.38.0.12:6379 173.38.0.13:6379 173.38.0.14:6379 173.38.0.15:6379 173.38.0.16:6379 --cluster-replicas 1
	>>> Performing hash slots allocation on 6 nodes...
	Master[0] -> Slots 0 - 5460
	Master[1] -> Slots 5461 - 10922
	Master[2] -> Slots 10923 - 16383
	Adding replica 173.38.0.15:6379 to 173.38.0.11:6379
	Adding replica 173.38.0.16:6379 to 173.38.0.12:6379
	Adding replica 173.38.0.14:6379 to 173.38.0.13:6379
	M: 4ddf8e94a52400a28f040782958bdf5a983641d4 173.38.0.11:6379
	   slots:[0-5460] (5461 slots) master
	M: 84b22a0592dc25fc3a3b21b613b27a1e5dc22792 173.38.0.12:6379
	   slots:[5461-10922] (5462 slots) master
	M: e729d5d2069c74bf61cadf47fc370d9d1bcca2eb 173.38.0.13:6379
	   slots:[10923-16383] (5461 slots) master
	S: e268d75b2744663e19367c1140e6b63b5df06afa 173.38.0.14:6379
	   replicates e729d5d2069c74bf61cadf47fc370d9d1bcca2eb
	S: bc6a2184d0596f97c1c0440f1062f33169343365 173.38.0.15:6379
	   replicates 4ddf8e94a52400a28f040782958bdf5a983641d4
	S: 5a6a79b50cd829e712d88a0ecf26fb46d4445663 173.38.0.16:6379
	   replicates 84b22a0592dc25fc3a3b21b613b27a1e5dc22792
	Can I set the above configuration? (type 'yes' to accept): yes
	>>> Nodes configuration updated
	>>> Assign a different config epoch to each node
	>>> Sending CLUSTER MEET messages to join the cluster
	Waiting for the cluster to join
	..
	>>> Performing Cluster Check (using node 173.38.0.11:6379)
	M: 4ddf8e94a52400a28f040782958bdf5a983641d4 173.38.0.11:6379
	   slots:[0-5460] (5461 slots) master
	   1 additional replica(s)
	S: 5a6a79b50cd829e712d88a0ecf26fb46d4445663 173.38.0.16:6379
	   slots: (0 slots) slave
	   replicates 84b22a0592dc25fc3a3b21b613b27a1e5dc22792
	M: e729d5d2069c74bf61cadf47fc370d9d1bcca2eb 173.38.0.13:6379
	   slots:[10923-16383] (5461 slots) master
	   1 additional replica(s)
	S: bc6a2184d0596f97c1c0440f1062f33169343365 173.38.0.15:6379
	   slots: (0 slots) slave
	   replicates 4ddf8e94a52400a28f040782958bdf5a983641d4
	M: 84b22a0592dc25fc3a3b21b613b27a1e5dc22792 173.38.0.12:6379
	   slots:[5461-10922] (5462 slots) master
	   1 additional replica(s)
	S: e268d75b2744663e19367c1140e6b63b5df06afa 173.38.0.14:6379
	   slots: (0 slots) slave
	   replicates e729d5d2069c74bf61cadf47fc370d9d1bcca2eb
	[OK] All nodes agree about slots configuration.
	>>> Check for open slots...
	>>> Check slots coverage...
	[OK] All 16384 slots covered.
	root@c9bd957ed44c:/data#
	集群成功






	}