module "vpc" {
  source     = "../module/vpc"
  secret_id  = var.secret_id
  secret_key = var.secret_key
}

# cluster-2
module "cvm_2" {
  source     = "../module/cvm"
  secret_id  = var.secret_id
  secret_key = var.secret_key
  password   = var.password
  instance_name = "cluster-2"
  cpu           = 2
  memory        = 2
}

module "k3s_2" {
  depends_on  = [module.cvm_2]
  source      = "../module/k3s"
  public_ip   = module.cvm_2.public_ip
  private_ip  = module.cvm_2.private_ip
  server_name = "k3s-hongkong-2"
  pods_cidr = "10.2.0.0/16"
  service_cidr = "10.21.0.0/16"
}

# cluster-3
module "cvm_3" {
  source     = "../module/cvm"
  secret_id  = var.secret_id
  secret_key = var.secret_key
  password   = var.password
  instance_name = "cluster-3"
  cpu           = 2
  memory        = 2
}

module "k3s_3" {
  depends_on  = [module.cvm_3]
  source      = "../module/k3s"
  public_ip   = module.cvm_3.public_ip
  private_ip  = module.cvm_3.private_ip
  server_name = "k3s-hongkong-3"
  pods_cidr = "10.3.0.0/16"
  service_cidr = "10.31.0.0/16"
}

resource "local_sensitive_file" "kubeconfig2" {
  content  = module.k3s_2.kube_config
  filename = "${path.module}/k3s-config2.yaml"
}

resource "local_sensitive_file" "kubeconfig3" {
  content  = module.k3s_3.kube_config
  filename = "${path.module}/k3s-config3.yaml"
}

resource "null_resource" "argocd-cluster" {
  depends_on = [module.k3s_3, module.k3s_2]
  connection {
    host     = var.public_ip
    password = var.password
    type     = "ssh"
    user     = "ubuntu"
  }

  triggers = {
    id = "${uuid()}"
    script_hash = filemd5("./Jenkinsfile")
  }

  provisioner "file" {
    source      = "${path.module}/k3s-config2.yaml"
    destination = "/tmp/k3s-config2.yaml"
  }

  provisioner "file" {
    source      = "${path.module}/k3s-config3.yaml"
    destination = "/tmp/k3s-config3.yaml"
  }

  provisioner "file" {
    source      = "${path.module}/ingress-values.yaml"
    destination = "/tmp/ingress-values.yaml"
  }

  provisioner "remote-exec" {
    inline = [<<EOF

      kubectl wait --for=condition=Ready pod --all -n kube-system --timeout=900s --kubeconfig=/tmp/k3s-config2.yaml
      kubectl wait --for=condition=Ready pod --all -n kube-system --timeout=900s --kubeconfig=/tmp/k3s-config3.yaml

      kubectl config set-context host --kubeconfig=/tmp/k3s-config1.yaml --cluster=cluster.local --user=master-user
      kubectl config set-context cluster2 --kubeconfig=/tmp/k3s-config2.yaml --cluster=cluster.local --user=master-user
      kubectl config set-context cluster3 --kubeconfig=/tmp/k3s-config3.yaml --cluster=cluster.local --user=master-user

      echo y | argocd login 127.0.0.1:30090 --username admin --password password123

      echo y | argocd cluster add host --kubeconfig=/tmp/k3s-config1.yaml
      echo y | argocd cluster add cluster2 --kubeconfig=/tmp/k3s-config2.yaml
      echo y | argocd cluster add cluster3 --kubeconfig=/tmp/k3s-config3.yaml

      # 安装 karmada 控制平面
      karmadactl init --kubeconfig=/tmp/k3s-config1.yaml \
        --karmada-data=./karmada \
        --karmada-pki=./karmada/pki \
        --karmada-apiserver-advertise-address ${var.public_ip} --cert-external-ip ${var.public_ip}

      # 加入集群
      karmadactl --kubeconfig ./karmada/karmada-apiserver.config  join cluster2 --cluster-kubeconfig=/tmp/k3s-config2.yaml
      karmadactl --kubeconfig ./karmada/karmada-apiserver.config  join cluster3 --cluster-kubeconfig=/tmp/k3s-config3.yaml

      EOF
    ]
  }
}