#
var1=k8s-master01
var2=10.160.169.161
port=17061
## clone vm
virt-clone -o centos7-2c4g -n k8s-master01 -f /var/lib/libvirt/images/k8s-master01.img
sed -i "s#port='-1' autoport='yes'#port='17061' autoport='no'#g" /etc/libvirt/qemu/k8s-master01.xml;
virsh define /etc/libvirt/qemu/k8s-master01.xml;
# sed -i 's/4194304/8388608/g' /etc/libvirt/qemu/k8s-master01.xml;
# sed -i 's#2</vcpu>#4</vcpu>#g' /etc/libvirt/qemu/k8s-master01.xml;
# virsh define /etc/libvirt/qemu/k8s-master01.xml;
virsh start k8s-master01;

## ssh root@10.160.169.220
## rename hostname
ssh root@10.160.169.220 "sed -i 's#localhost#k8s-master01#g' /etc/hostname"
ssh root@10.160.169.220 "sed -i 's#localdomain#1718281828.com#g' /etc/hostname"

## modify ip
ssh root@10.160.169.220 "sed -i 's#IPADDR=\"10.160.169.220\"#IPADDR=\"10.160.169.161\"#g' /etc/sysconfig/network-scripts/ifcfg-eth0"
ssh root@10.160.169.220 "reboot"

#
var1=k8s-master02
var2=10.160.169.162
port=17062
## clone vm
virt-clone -o centos7-2c4g -n k8s-master02 -f /var/lib/libvirt/images/k8s-master02.img
sed -i "s#port='-1' autoport='yes'#port='17062' autoport='no'#g" /etc/libvirt/qemu/k8s-master02.xml;
virsh define /etc/libvirt/qemu/k8s-master02.xml;
# sed -i 's/4194304/8388608/g' /etc/libvirt/qemu/k8s-master02.xml;
# sed -i 's#2</vcpu>#4</vcpu>#g' /etc/libvirt/qemu/k8s-master02.xml;
# virsh define /etc/libvirt/qemu/k8s-master02.xml;
virsh start k8s-master02;

## ssh root@10.160.169.220
## rename hostname
ssh root@10.160.169.220 "sed -i 's#localhost#k8s-master02#g' /etc/hostname"
ssh root@10.160.169.220 "sed -i 's#localdomain#1718281828.com#g' /etc/hostname"

## modify ip
ssh root@10.160.169.220 "sed -i 's#IPADDR=\"10.160.169.220\"#IPADDR=\"10.160.169.162\"#g' /etc/sysconfig/network-scripts/ifcfg-eth0"
ssh root@10.160.169.220 "reboot"

#
var1=k8s-master03
var2=10.160.169.163
port=17063
## clone vm
virt-clone -o centos7-2c4g -n k8s-master03 -f /var/lib/libvirt/images/k8s-master03.img
sed -i "s#port='-1' autoport='yes'#port='17063' autoport='no'#g" /etc/libvirt/qemu/k8s-master03.xml;
virsh define /etc/libvirt/qemu/k8s-master03.xml;
# sed -i 's/4194304/8388608/g' /etc/libvirt/qemu/k8s-master03.xml;
# sed -i 's#2</vcpu>#4</vcpu>#g' /etc/libvirt/qemu/k8s-master03.xml;
# virsh define /etc/libvirt/qemu/k8s-master03.xml;
virsh start k8s-master03;

## ssh root@10.160.169.220
## rename hostname
ssh root@10.160.169.220 "sed -i 's#localhost#k8s-master03#g' /etc/hostname"
ssh root@10.160.169.220 "sed -i 's#localdomain#1718281828.com#g' /etc/hostname"

## modify ip
ssh root@10.160.169.220 "sed -i 's#IPADDR=\"10.160.169.220\"#IPADDR=\"10.160.169.163\"#g' /etc/sysconfig/network-scripts/ifcfg-eth0"
ssh root@10.160.169.220 "reboot"

######
var1=k8s-worker01
var2=10.160.169.181
port=17081
## clone vm
virt-clone -o centos7-2c4g -n k8s-worker01 -f /var/lib/libvirt/images/k8s-worker01.img
sed -i "s#port='-1' autoport='yes'#port='17063' autoport='no'#g" /etc/libvirt/qemu/k8s-worker01.xml;
virsh define /etc/libvirt/qemu/k8s-worker01.xml;
# sed -i 's/4194304/8388608/g' /etc/libvirt/qemu/k8s-worker01.xml;
# sed -i 's#2</vcpu>#4</vcpu>#g' /etc/libvirt/qemu/k8s-worker01.xml;
# virsh define /etc/libvirt/qemu/k8s-worker01.xml;
virsh start k8s-worker01;

## ssh root@10.160.169.220
## rename hostname
ssh root@10.160.169.220 "sed -i 's#localhost#k8s-worker01#g' /etc/hostname"
ssh root@10.160.169.220 "sed -i 's#localdomain#1718281828.com#g' /etc/hostname"

## modify ip
ssh root@10.160.169.220 "sed -i 's#IPADDR=\"10.160.169.220\"#IPADDR=\"10.160.169.181\"#g' /etc/sysconfig/network-scripts/ifcfg-eth0"
ssh root@10.160.169.220 "reboot"
######

######
var1=k8s-worker02
var2=10.160.169.182
port=17082
## clone vm
virt-clone -o centos7-2c4g -n k8s-worker02 -f /var/lib/libvirt/images/k8s-worker02.img
sed -i "s#port='-1' autoport='yes'#port='17064' autoport='no'#g" /etc/libvirt/qemu/k8s-worker02.xml;
virsh define /etc/libvirt/qemu/k8s-worker02.xml;
# sed -i 's/4194304/8388608/g' /etc/libvirt/qemu/k8s-worker02.xml;
# sed -i 's#2</vcpu>#4</vcpu>#g' /etc/libvirt/qemu/k8s-worker02.xml;
# virsh define /etc/libvirt/qemu/k8s-worker02.xml;
virsh start k8s-worker02;

## ssh root@10.160.169.220
## rename hostname
ssh root@10.160.169.220 "sed -i 's#localhost#k8s-worker02#g' /etc/hostname"
ssh root@10.160.169.220 "sed -i 's#localdomain#1718281828.com#g' /etc/hostname"

## modify ip
ssh root@10.160.169.220 "sed -i 's#IPADDR=\"10.160.169.220\"#IPADDR=\"10.160.169.182\"#g' /etc/sysconfig/network-scripts/ifcfg-eth0"
ssh root@10.160.169.220 "reboot"
######

######
var1=k8s-worker03
var2=10.160.169.183
port=17083
## clone vm
virt-clone -o centos7-2c4g -n k8s-worker03 -f /var/lib/libvirt/images/k8s-worker03.img
sed -i "s#port='-1' autoport='yes'#port='17065' autoport='no'#g" /etc/libvirt/qemu/k8s-worker03.xml;
virsh define /etc/libvirt/qemu/k8s-worker03.xml;
# sed -i 's/4194304/8388608/g' /etc/libvirt/qemu/k8s-worker03.xml;
# sed -i 's#2</vcpu>#4</vcpu>#g' /etc/libvirt/qemu/k8s-worker03.xml;
# virsh define /etc/libvirt/qemu/k8s-worker03.xml;
virsh start k8s-worker03;

## ssh root@10.160.169.220
## rename hostname
ssh root@10.160.169.220 "sed -i 's#localhost#k8s-worker03#g' /etc/hostname"
ssh root@10.160.169.220 "sed -i 's#localdomain#1718281828.com#g' /etc/hostname"

## modify ip
ssh root@10.160.169.220 "sed -i 's#IPADDR=\"10.160.169.220\"#IPADDR=\"10.160.169.183\"#g' /etc/sysconfig/network-scripts/ifcfg-eth0"
ssh root@10.160.169.220 "reboot"
######

# @all or DNSPod
cat >> /etc/hosts <-'EOF'
#add k8s hosts
10.160.169.161 k8s-master01.1718281828.com
10.160.169.162 k8s-master02.1718281828.com
10.160.169.163 k8s-master03.1718281828.com
10.160.169.181 k8s-worker01.1718281828.com
10.160.169.182 k8s-worker02.1718281828.com
10.160.169.183 k8s-worker03.1718281828.com 
EOF

# @all
yum -y install socat conntrack ebtables ipset ipvsadm
systemctl disable firewalld.service
yum -y remove docker* && yum -y install yum-utils
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum list docker-ce --showduplicates | sort -r
yum -y upgrade

# @master01
curl -sfLk https://get-kk.kubesphere.io | sh -
curl -oL https://github.com/kubesphere/kubekey/releases/download/v3.0.10/kubekey-v3.0.10-linux-amd64.tar.gz
mv kk /usr/local/bin/

# all-in-one @master01
# kk create cluster --container-manager containerd --with-kubernetes v1.19.8 --with-kubesphere v3.1.0 

export KKZONE=cn
# kk create config --with-kubernetes v1.19.8 --with-kubesphere v3.1.0 -f config-sample.yaml
kk create config -f k8s-cluster-config-sample.yaml
cp k8s-cluster-config-sample.yaml k8s-cluster-config-test.yaml
kk create cluster -f k8s-cluster-config-test.yaml


# @master
kubectl run nginx --labels="app=brokereye" --image=uhub.service.ucloud.cn/basic/nginx:1.16.1 --port=80
kubectl create deployment nginx image=nginx
kubectl expose deployment nginx --port=32188 --type=NodePort --target-port=80 --name=nginx-service
kubectl get pod, service
kubectl get cs
kubectl get nodes
kubectl apply -f sample.yaml

kk create config --with-kubesphere -f k8s-cluster-config-sphere.yam
kk create cluster -f k8s-cluster-config-sphere.yaml

#####################################################
###              Welcome to KubeSphere!           ###
#####################################################

Console: http://10.160.169.161:30880
Account: admin
Password: P@88w0rd
newPassword: Pi.1415926

NOTES：
  1. After you log into the console, please check the
     monitoring status of service components in
     "Cluster Management". If any service is not
     ready, please wait patiently until all components
     are up and running.
  2. Please change the default password after login.

#####################################################
https://kubesphere.io             2023-11-03 10:25:29
#####################################################
10:25:32 CST skipped: [k8s-master03]
10:25:32 CST skipped: [k8s-master02]
10:25:32 CST success: [k8s-master01]
10:25:32 CST Pipeline[CreateClusterPipeline] execute successfully
Installation is complete.

Please check the result using the command:
kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f

kubectl get services -A | grep argo
kubectl port-forward --address 0.0.0.0 -n argocd services/devops-argocd-server 30080:80
kubectl get secret -A |grep argo
kubectl get secret -n argocd argocd-initial-admin-secret -o yaml | awk '/password/ {print $2}' | base64 -d
kubectl get secret -n argocd argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d

kubectl get services -A | grep dash
kubectl get secret -n kubesphere-system |grep token
kubectl port-forward --address 0.0.0.0 -n kubernetes-dashboard services/kubernetes-dashboard 30443:443
kubectl describe secret -n kubesphere-system kubesphere-token-4llrg

# install jenkins @ladp user admin/Pi.1415926
http://10.160.169.161:30180/

# install argoCD @user admin/4rPSK5d-6ucfLDzf
http://10.160.169.161:30080/

# install harbor @user admin / Harbor12345
helm pull harbor/harbor --version=1.13.1
sed -i 's/core.harbor.domain/core.harbor.1718281828.com/g' values.yaml
helm install harbor-registry ./ -n kubesphere-devops-system
# 修改文件内容后自动重启apiserver，如果sed不生效，那么用vi手动变更如下内容在所有master节点
sed -i '/--advertise-address=/a\    - --service-node-port-range=1-65535' /etc/kubernetes/manifests/kube-apiserver.yaml
sed -i '/- --service-node-port-range=1-65535/d' /etc/kubernetes/manifests/kube-apiserver.yaml

kubectl edit service -n kubesphere-controls-system kubesphere-router-kubesphere-system
:37, 43s/nodePort: 30xxx/nodePort: 80/g
:44, 49s/nodePort: 30xxx/nodePort: 443/g

https://core.harbor.1718281828.com/
@user ruoyi / Ruoyi123 

kubectl run acme --rm -it --restart='Never' --image=uhub.service.ucloud.cn/basic/oraclejdk:17u06 -n x-public --command -- bash
# kubectl run acme --rm -it --restart='Never' --image=uhub.service.ucloud.cn/basic/nginx:1.16.1 -n x-public --command -- bash
# curl  https://get.acme.sh | sh
# git clone https://gitee.com/neilpang/acme.sh.git
curl -k -OL https://gitee.com/neilpang/acme.sh/repository/archive/master.zip
unzip master.zip && cd acme.sh-master/
mkdir /root/.acme.sh
acme.sh --set-default-ca --server letsencrypt
acme.sh --install -m harold.zhou@1718281828.com
acme.sh --issue --dns -d core.harbor.1718281828.com --yes-I-know-dns-manual-mode-enough-go-ahead-please
# add dns record "_acme-challenge.core.harbor TXT hU30MVl4bq6-xBOjXJDBtKSYfHHgoJQA1sw2U8NOM68"
acme.sh --renew -d core.harbor.1718281828.com --yes-I-know-dns-manual-mode-enough-go-ahead-please
kubectl cp -n x-public acme:/root/.acme.sh/core.harbor.1718281828.com /tmp/core.harbor.1718281828.com/

kubectl get secrets -n kubesphere-devops-system harbor-registry-ingress -o yaml > harbor-registry-ingress-secrets.yml
cp harbor-registry-ingress-secrets.yml harbor-registry-ingress-secrets-origin.yml
cat ca.cer | base64 -w 0
cat core.harbor.1718281828.com.cer | base64 -w 0
cat core.harbor.1718281828.com.key | base64 -w 0
#cat root_bundle.crt | base64 -w 0
#cat core.harbor.1718281828.com.crt | base64 -w 0
#cat core.harbor.1718281828.com.key | base64 -w 0
kubectl edit secrets -n kubesphere-devops-system harbor-registry-ingress
# x509: certificate signed by unknown authority
# cp ca.cer /etc/docker/certs.d/core.harbor.1718281828.com/

# install nexus
helm pull sonatype/nexus-repository-manager --version=61.0.2
sed -i 's/repo.demo/nexus3.1718281828.com/g' values.yaml
sed -i '110s/enabled: false/enabled: true/g' values.yaml
sed -i 's/ingressClassName: nginx/ingressClassName: ""/g' values.yaml
helm install nexus-registry ./ -n nexus
helm uninstall nexus-registry -n nexus
kubectl describe ingress -n nexus
kubectl run curl -it --rm --image=radial/busyboxplus:curl -n nexus

admin / 2c677066-d84a-4b61-8156-03cbb7adf369

# install mysql
helm pull bitnami/mysql --version=9.14.3
sed -i '115s/rootPassword: ""/rootPassword: "123456"/g' values.yaml
sed -i '123s/database: "my_database"/database: "db_test"/g' values.yaml
sed -i '127s/username: ""/username: "test"/g' values.yaml
sed -i '130s/password: ""/password: "123456"/g' values.yaml
kubectl create namespace x-public
helm install x-mysql ./ -n x-public
kubectl get pods -w --namespace x-public
echo Username: root MYSQL_ROOT_PASSWORD=$(kubectl get secret --namespace x-public x-mysql -o jsonpath="{.data.mysql-root-password}" | base64 -d)
kubectl run --rm -it --restart='Never' x-mysql-client --image docker.io/bitnami/mysql:8.0.35-debian-11-r0 -n x-public --env MYSQL_ROOT_PASSWORD=123456 --command -- bash
mysql -hx-mysql.x-public.svc.cluster.local -uroot -p"$MYSQL_ROOT_PASSWORD"
# nacos database
create database nacos;
create user 'nacos'@'%' IDENTIFIED BY '123456';
grant all privileges ON nacos.* TO 'nacos'@'%';
alter user 'nacos'@'%' IDENTIFIED BY 'nacos123';
mysql -h x-mysql.x-public.svc.cluster.local -unacos -pnacos123
create table t_test (c1 varchar(255), c2 int);
source ry_config_20220929.sql; #使用外部数据库一定要手动建表，否则报错No DataSource set
# ruoyi database
create database db_ruoyi;
create database db_seata;
create user 'ruoyi'@'%' IDENTIFIED BY 'ruoyi123';
grant all privileges ON db_ruoyi.* TO 'ruoyi'@'%';
grant all privileges ON db_seata.* TO 'ruoyi'@'%';
mysql -hx-mysql.x-public.svc.cluster.local -uruoyi -pruoyi123
create table t_test (c1 varchar(255), c2 int);


# install nacos
git clone https://github.com/nacos-group/nacos-k8s.git
sed -i 's/namespace: default/namespace: x-public/g' values.yaml
sed -i 's/#type: ClusterIP/type: ClusterIP/g' values.yaml
sed -i 's/type: NodePort/#type: NodePort/g' values.yaml
sed -i 's/nodePort: 30000/#nodePort: 30000/g' values.yaml
sed -i '59s/enabled: false/enabled: true/g' values.yaml
sed -i '68s/ingressClassName: "nginx"/ingressClassName: ""/g' values.yaml
sed -i 's/nacos.example.com/nacos.1718281828.com/g' values.yaml
sed -i '30s/type: embedded/#type: embedded/g' values.yaml
sed -i '31,38s/#//g' values.yaml
sed -i '33s/localhost/x-mysql.x-public.svc.cluster.local/g' values.yaml
sed -i '36s/username: usernmae/username: nacos/g' values.yaml
sed -i '37s/password: password/password: nacos123/g' values.yaml
helm install nacos ./ -n x-public --dry-run --debug
helm install nacos ./ -n x-public
http://nacos.1718281828.com/

# install redis
helm pull bitnami/redis --version=18.3.2
sed -i '24s/password: ""/password: "redis123"/g' values.yaml
sed -i '119s/replication/standalone/g' values.yaml
helm install x-redis ./ -n x-public --dry-run --debug
helm install x-redis ./ -n x-public
kubectl get pods -w -n x-public
x-redis-master.x-public.svc.cluster.local
echo REDIS_PASSWORD=$(kubectl get secret --namespace x-public x-redis -o jsonpath="{.data.redis-password}" | base64 -d)
kubectl run --rm -it --restart='Never' redis-client --image docker.io/bitnami/redis:7.2.3-debian-11-r1 -n x-public --env REDIS_PASSWORD=redis123 --command -- bash
REDISCLI_AUTH="$REDIS_PASSWORD" redis-cli -h x-redis-master
kubectl port-forward -n x-public svc/x-redis-master 6379:6379

# create ruoyi pipelines
create 企业空间freebao -> create DevOps项目ruoyi-cloud-test -> create 流水线rouyi-monitor -> 编辑流水线 -> 持续集成(CI) -> 

DO：
1. kubernetes & kubesphere & jenkins & argocd & nexus & NFS? harbor? Ingress? nacos？redis？

TODO：
1. 