package cmd

import (
	"bytes"
	ckeadm "ckeadm/cmd/apis"
	"ckeadm/cmd/apis/v1alpha1"
	"ckeadm/cmd/executor"
	logscmd "ckeadm/cmd/logs"
	"ckeadm/cmd/runner"
	"ckeadm/constants"
	"ckeadm/util"
	"crypto/md5"
	"encoding/hex"
	"fmt"
	"github.com/sirupsen/logrus"
	"github.com/urfave/cli"
	"gopkg.in/yaml.v2"
	"io/ioutil"
	"os"
	"os/exec"
	"path/filepath"
	"sort"
	"strconv"
	"strings"
	"text/template"
	"time"
)

func CreateCommand() cli.Command {
	var createFlags = []cli.Flag{
		runner.ConfigPathFlag,
		runner.PrivateKey,
		runner.LogLevelFlag,
		runner.AddressFlag,
		runner.SkipFlag,
	}

	return cli.Command{
		Name:   "create",
		Usage:  "Bring the cluster create",
		Action: clusterCreateFromCli,
		Flags:  createFlags,
	}
}

//初始安装关键组件 wait以服务形式 port使用默认端口 不需要传入port
func clusterCreateFromCli(ctx *cli.Context) error {
	logrus.Infof("running ckeadm up")
	logrus.SetLevel(logrus.Level(ctx.Int("v")))

	var path = ctx.String("f")
	var skip = ctx.String("skip")
	var privatekey string
	var err error
	var config *ckeadm.CkeAdmKubernetesConfig

	if ctx.String("address") == "" {
		return fmt.Errorf("arguments --address must be defined for ckeadm create")
	}

	//增加读取文件 确定是否包含component功能(vip判断)
	ckeComponents, ckeCluster, ckeNodes, err := runner.GetComponents(path)
	if err != nil {
		return fmt.Errorf("failed to get components: %s", err.Error())
	}
	//	遍历组件列表，读取组件对应的yaml文件并计算md5
	var filePaths []string
	if filePaths, err = util.ListAllFilesInPath(path); err != nil {
		return err
	}

	// read files to gvk content slice
	cr_md5_file :=  filepath.Join(constants.CkeCompInstalled, ckeCluster.Name, "cke-cr-md5.txt")
	var compMd5_old []string
	md5Bytes, _ := ioutil.ReadFile(cr_md5_file)
	if md5Bytes != nil {
		compMd5_old = strings.Split(string(md5Bytes),"\n")
	}

	compMd5 := ""
	for index, _ := range *ckeComponents {
		for _, filePath := range filePaths {
			if strings.Contains(filePath, (*ckeComponents)[index].Name + ".yaml") {
				var fileBytes []byte
				if fileBytes, err = ioutil.ReadFile(filePath); err != nil {
					return fmt.Errorf("%s: %s", filePath, err)
				}
				md5Ctx := md5.New()
				md5Ctx.Write(fileBytes)
				cipherStr := md5Ctx.Sum(nil)
				md5_new := hex.EncodeToString(cipherStr)

				compMd5 += (*ckeComponents)[index].Name + ".yaml:" + md5_new + "\n"
				if compMd5_old != nil {
					for _, value := range compMd5_old{
						if strings.Contains(value, (*ckeComponents)[index].Name + ".yaml") && !strings.Contains(value, md5_new) {
							logrus.Infof("CR-File:%s,[Current MD5]:%s,[Previous MD5]:%s", (*ckeComponents)[index].Name + ".yaml", md5_new, value)
							if (*ckeComponents)[index].Annotations == nil {
								(*ckeComponents)[index].Annotations = map[string]string{}
							}
							(*ckeComponents)[index].Annotations[constants.ForceInstall] = "true"
						}
					}
				}
			}
		}
	}
	err = util.WriteFile(filepath.Join(constants.CkeCompInstalled, ckeCluster.Name, "cke-cr-md5.txt"), compMd5)
	if err != nil {
		return err
	}

	//不包含components 转原逻辑处理
	if ckeComponents == nil {
		if config, err = runner.GetConfigFromFile(path); err != nil {
			return fmt.Errorf("error while get config from %s, err: %s", path, err.Error())
		}
		logrus.Debugf("configration get from %s, list here: \n%+v\n", path, config)

		if ctx.String("address") == "" {
			return fmt.Errorf("arguments --address must be defined for ckeadm create")
		}
		var watch = &ckeadm.WatchConfig{ListenAddress: ctx.String("address"), ListenPort: constants.WaitPort, LogCacheDir: constants.CkeadmLogCacheDir, CluserID: config.ClusterName, ConfigPath: path}
		config.WatchConfig = watch

		if privatekey, err = runner.GetPrivateKeyFromFile(ctx.String("private-key")); err != nil {
			return fmt.Errorf("error while get connection private key %s, err: %s", ctx.String("private-key"), err.Error())
		}

		// invfile == my_inventory.cfg
		// allfile == all.yml
		// sourceFile == sources.yml # back up cluster.yml
		invFile, allFile, sourceFile := GetAnsibleConfigPath(config)
		if err = WriteConfigFile(config, invFile, allFile); err != nil {
			return fmt.Errorf("error while write config: %s", err.Error())
		}

		// use for create crd sources
		if err = runner.BackUpConfigurations(path, sourceFile); err != nil {
			return fmt.Errorf("error while backup config: %s", err.Error())
		}
		logrus.Infof("installing configration create at: %s, %s, %s\n", invFile, allFile, sourceFile)

		var ansibleArgs = &ckeadm.AnsibleDockerPerms{
			ContainerName: fmt.Sprintf("%s-%s", constants.CkeDeployContainerName, config.ClusterName),
			ImageName:     ImageGetFromConfig(config),
			PrivateKey:    privatekey,
			InvFilePath:   invFile,
			AllFilePath:   allFile,
			SourcesPath:   sourceFile,
		}
		return ClusterCreate(ansibleArgs, config)
	}

	baseComponents := make([]v1alpha1.CkeComponent, 0)
	ckecomponentsDepend := make(map[string][]string)
	for _, ckecomponent := range *ckeComponents {
		if ckecomponent.Labels["baseComponent"] == "true" {
			baseComponents = append(baseComponents, ckecomponent)
			if len(ckecomponent.Spec.Depend) == 0 {
				ckecomponentsDepend[ckecomponent.Name] = nil
				continue
			}
		}
		ckecomponentsDepend[ckecomponent.Name] = ckecomponent.Spec.Depend
	}
	missingComponent := util.Check(ckecomponentsDepend)
	if len(missingComponent) != 0 {
		return fmt.Errorf("some ckecomponents' depend are missing: %+v ", missingComponent)
	}
	baseComponentsDepend := make(map[string][]string)
	for _, ckecomponent := range baseComponents {
		if len(ckecomponent.Spec.Depend) == 0 {
			baseComponentsDepend[ckecomponent.Name] = nil
		} else {
			baseComponentsDepend[ckecomponent.Name] = ckecomponent.Spec.Depend
		}
	}
	missingBaseComponent := util.Check(baseComponentsDepend)
	if len(missingBaseComponent) != 0 {
		return fmt.Errorf("some base ckecomponents' depend are missing: %+v ", missingBaseComponent)
	}

	var watchConfig = &ckeadm.WatchConfig{ListenAddress: ctx.String("address"), ListenPort: constants.WaitPort,
		LogCacheDir: constants.CkeadmLogCacheDir, CluserID: ckeCluster.ObjectMeta.Name, ConfigPath: path}
	keyFile, _ := runner.GetPrivateKeyFile(ctx.String("private-key"))

	executor.RecordBeginTime(ckeCluster.Name)
	for _, node := range *ckeNodes {
		executor.RecordStatus(ckeCluster.Name, node.Name, constants.CkeStatusPrepare, 0, nil)
	}
	if skip != "init" {
		//run ansible initial
		if config, err = runner.GetConfigFromComponentFile(ckeComponents, ckeCluster, ckeNodes); err != nil {
			return fmt.Errorf("error while get config from %s, err: %s", path, err.Error())
		}
		logrus.Debugf("configration get from %s, list here: \n%+v\n", path, config)

		config.WatchConfig = watchConfig

		if privatekey, err = runner.GetPrivateKeyFromFile(ctx.String("private-key")); err != nil {
			return fmt.Errorf("error while get connection private key %s, err: %s", ctx.String("private-key"), err.Error())
		}

		// invfile == my_inventory.cfg
		// allfile == all.yml
		// sourceFile == sources.yml # back up cluster.yml
		//返回3个文件的完整路径字符串
		invFile, allFile, sourceFile := GetAnsibleConfigPath(config)
		//从config生成两种文件
		if err = WriteConfigFile(config, invFile, allFile); err != nil {
			return fmt.Errorf("error while write config: %s", err.Error())
		}

		var ansibleArgs = &ckeadm.AnsibleDockerPerms{
			ContainerName: fmt.Sprintf("%s-%s", constants.CkeDeployContainerName, config.ClusterName),
			ImageName:     ImageGetFromConfig(config),
			PrivateKey:    privatekey,
			InvFilePath:   invFile,
			AllFilePath:   allFile,
			SourcesPath:   sourceFile,
		}
		//改成等待ansible执行完成
		err = ClusterCreateWithComponent(ansibleArgs, config)
		if err != nil {
			logrus.Errorf("ClusterCreate failed: %s", err.Error())
			return fmt.Errorf("ClusterCreate failed: %s", err.Error())
		}
	}
	// use for create crd sources
	sourceFile := constants.SourcesDir + ckeCluster.Name + "/" + constants.SourcesFile
	if err = runner.BackUpComponentConfigurations(ckeComponents, &ckeCluster, ckeNodes, sourceFile); err != nil {
		return fmt.Errorf("error while create crd sources: %s", err.Error())
	}
	logrus.Infof("installing cr create at: %s\n", sourceFile)

	err = executor.CreateComponent(&baseComponents, ckeCluster, ckeNodes, keyFile, *watchConfig)
	if err != nil {
		return fmt.Errorf("creating components failed: %s", err.Error())
	}

	//sort ckeNode
	sort.Sort(util.CkeNodeSort(*ckeNodes))

	//get cluster address
	clusterAddress := ""
	for _, node := range *ckeNodes {
		if executor.InArray("master", node.Spec.Roles) {
			clusterAddress = node.Spec.Address
			break
		}
	}

	err = CreateCR(clusterAddress, keyFile, sourceFile, constants.SourcesDir)
	if err != nil {
		return fmt.Errorf("CreateCR failed: %s", err.Error())
	}

	return nil
}

func ClusterCreateWithComponent(ansibleArgs *ckeadm.AnsibleDockerPerms, clusterconfig *ckeadm.CkeAdmKubernetesConfig) error {
	var watchConfig = clusterconfig.WatchConfig

	// clean logs before clusterID first
	if err := util.DeleteMatchFiles(fmt.Sprintf("%s/%s", watchConfig.LogCacheDir, watchConfig.CluserID), fmt.Sprintf(".*%s", strings.ReplaceAll(constants.AnsibleUpDoneFile, ".", "\\."))); err != nil {
		logrus.Debugf("error while delete logs files: %s", err.Error())
	}

	ckeadmwait := logscmd.NewCkeadmWaitService(constants.CkeadmWatchServiceName, constants.WhichCkeadm, watchConfig.ListenAddress, watchConfig.ListenPort, watchConfig.LogCacheDir, false, "", "")
	if ckeadmwait.IsRunning() == false {
		if err := ckeadmwait.CreateAndStartService(logscmd.CkeadmWaitServiceTemplate, os.Args[0]); err != nil {
			return fmt.Errorf("error while set up ckeadm wait service: %s", err.Error())
		}
	}

	if docker := runner.NewDockerService("docker", constants.DockerdPath, runner.GetDockerCgroupDriver(), runner.GetRegistry(clusterconfig.Registry.Domain, clusterconfig.Registry.Port)); docker.CheckServiceConfig() == false {
		if err := docker.CreateAndStartService(runner.DockerServiceTemplate, constants.WhichDockerd); err != nil {
			return fmt.Errorf("error while set up ckeadm wait service: %s", err.Error())
		}
	}

	// run docker ansible command
	if err := runner.CreateRunerWithComponent(ansibleArgs.ContainerName, ansibleArgs.ImageName, ansibleArgs.InvFilePath, ansibleArgs.AllFilePath, "cluster-component", ansibleArgs.PrivateKey, fmt.Sprintf("%s:%s", ansibleArgs.SourcesPath, constants.SourceContainerPath)); err != nil {
		return fmt.Errorf("error while run container: %s", err.Error())
	}

	logrus.Infof("see logs under \"%s\", or container [%s] and [%s]\n", constants.AnsibleLogDir, constants.CkectlContainerName, ansibleArgs.ContainerName)

	logrus.Infof("view the progress can use: \n")
	logrus.Infof("\tcommand: ckeadm status -f %s", watchConfig.ConfigPath)
	logrus.Infof("or\trequest: curl %s:%d%s%s", watchConfig.ListenAddress, watchConfig.ListenPort, constants.CkeadmWaitProgressPattern, watchConfig.CluserID)

	return nil
}

func CreateCR(clusterAddress, keyFile, sourceFile, sourceDir string) error {
	var mkdirCmd = fmt.Sprintf("mkdir -p %s", constants.SourcesDir)
	_, _, _, err := util.SetupSSH(clusterAddress, keyFile).Run(mkdirCmd, 60*time.Second)
	if err != nil {
		return fmt.Errorf("make crs dir failed: %s", err.Error())
	}

	scpCmd := fmt.Sprintf("scp -o StrictHostKeyChecking=no -i %s -r %s %s:%s", keyFile, sourceFile, clusterAddress, sourceDir)
	// #nosec
	cmd := exec.Command("sh", "-c", scpCmd)
	output, err := cmd.CombinedOutput()
	logrus.Infof("scp crs output: %s", string(output))
	if err != nil {
		return fmt.Errorf("execute scp cmd failed, err: %s", err.Error())
	}

	var waitPort int
	getPortCmd := `kubectl get svc -n kube-system svc-ckeadm -o yaml | grep nodePort: | awk '{print $3}'`
	stdout, _, _, err := util.SetupSSH(clusterAddress, keyFile).Run(getPortCmd, 60*time.Second)
	if err != nil {
		return fmt.Errorf("get service svc-ckeadm port failed, err: %s", err.Error())
	} else {
		waitPort, _ = strconv.Atoi(strings.TrimSpace(stdout))
		if waitPort == 0 {
			return fmt.Errorf("get service svc-ckeadm port failed ")
		}
	}
	logrus.Infof("address: %s, port: %v", clusterAddress, waitPort)
	wait := logscmd.NewCkeadmWaitService("", "", clusterAddress, waitPort, "", false, "", "")
	for {
		if wait.IsRunning() == true {
			break
		}
		logrus.Infof("wait for ckeadm-wait working")
		time.Sleep(5 * time.Second)
	}

	var ckectlCmd = fmt.Sprintf(`kubectl apply -f %s`, sourceDir)
	//日志暂未加任何包装
	stdoutChan, stderrChan, doneChan, errChan, err := util.SetupSSH(clusterAddress, keyFile).Stream(ckectlCmd, 60*time.Second)
	if err != nil {
		return err
	} else {
		// read from the output channel until the done signal is passed
		isTimeout := true
	loop1:
		for {
			select {
			case isTimeout = <-doneChan:
				break loop1
			case outline := <-stdoutChan:
				logrus.Info(outline)
			case errline := <-stderrChan:
				logrus.Info(errline)
			case err = <-errChan:
			}
		}

		// get exit code or command error.
		if err != nil {
			return err
		}

		// command time out
		if !isTimeout {
			return fmt.Errorf("create cr failed, err: command timeout")
		}
	}
	return nil
}

// before create, ansible configruation has been generate.
// step:
//   1. remove old log create files
//   2. create watch service for get intall progress
//   3. if docker has not been configed, configed docker
//   4. run docker container, with ansible command
func ClusterCreate(ansibleArgs *ckeadm.AnsibleDockerPerms, clusterconfig *ckeadm.CkeAdmKubernetesConfig) error {
	var watchConfig = clusterconfig.WatchConfig

	// clean logs before clusterID first
	if err := util.DeleteMatchFiles(fmt.Sprintf("%s/%s", watchConfig.LogCacheDir, watchConfig.CluserID), fmt.Sprintf(".*%s", strings.ReplaceAll(constants.AnsibleUpDoneFile, ".", "\\."))); err != nil {
		logrus.Debugf("error while delete logs files: %s", err.Error())
	}

	if err := logscmd.NewCkeadmWaitService(constants.CkeadmWatchServiceName, constants.WhichCkeadm, watchConfig.ListenAddress, watchConfig.ListenPort, watchConfig.LogCacheDir, false, "", "").
		CreateAndStartService(logscmd.CkeadmWaitServiceTemplate, os.Args[0]); err != nil {
		return fmt.Errorf("error while set up ckeadm wait service: %s", err.Error())
	}

	if docker := runner.NewDockerService("docker", constants.DockerdPath, runner.GetDockerCgroupDriver(), runner.GetRegistry(clusterconfig.Registry.Domain, clusterconfig.Registry.Port)); docker.CheckServiceConfig() == false {
		if err := docker.CreateAndStartService(runner.DockerServiceTemplate, constants.WhichDockerd); err != nil {
			return fmt.Errorf("error while set up ckeadm wait service: %s", err.Error())
		}
	}

	// run docker ansible command
	if err := runner.CreateRuner(ansibleArgs.ContainerName, ansibleArgs.ImageName, ansibleArgs.InvFilePath, ansibleArgs.AllFilePath, "cluster", ansibleArgs.PrivateKey, fmt.Sprintf("%s:%s", ansibleArgs.SourcesPath, constants.SourceContainerPath)); err != nil {
		return fmt.Errorf("error while run container: %s", err.Error())
	}

	logrus.Infof("running in background ...\n")
	logrus.Infof("see logs under \"%s\", or container [%s] and [%s]\n", constants.AnsibleLogDir, constants.CkectlContainerName, ansibleArgs.ContainerName)

	logrus.Infof("view the progress can use: \n")
	logrus.Infof("\tcommand: ckeadm status -f %s", watchConfig.ConfigPath)
	logrus.Infof("or\trequest: curl %s:%d%s%s", watchConfig.ListenAddress, watchConfig.ListenPort, constants.CkeadmWaitProgressPattern, watchConfig.CluserID)

	return nil
}

func GetAnsibleConfigPath(config *ckeadm.CkeAdmKubernetesConfig) (inv, all, source string) {
	var dirbase = func(file string) string {
		return fmt.Sprintf("%s/%s/%s", constants.CkeDeployConfigPath, config.ClusterName, file)
	}
	return dirbase("myinventory.cfg"), dirbase("all.yml"), dirbase(constants.SourcesFile)
}

func WriteConfigFile(config *ckeadm.CkeAdmKubernetesConfig, invPath string, allPath string) (err error) {
	if config == nil {
		return fmt.Errorf("configration is null, cannot use it")
	}
	return writeConfigFile(config, invPath, allPath)
}

func WriteComponentConfigFile(config *ckeadm.CkeAdmKubernetesConfig, invPath string, allPath string, components *[]v1alpha1.CkeComponent) (err error) {
	if config == nil {
		return fmt.Errorf("configration is null, cannot use it")
	}
	return writeConfigFile(config, invPath, allPath)
}

func ImageGetFromConfig(cfg *ckeadm.CkeAdmKubernetesConfig) string {
	if cfg == nil {
		return ""
	}
	return runner.GetImage(cfg.Registry.Domain, cfg.Registry.Port, constants.AnsibleImageRepo, cfg.Version, cfg.ClusterParameters.ImageManifestEnabled)
}

func writeConfigFile(config *ckeadm.CkeAdmKubernetesConfig, nodesFilepath string, allFilepath string) error {
	var inventoryFileContent, allFileContent string
	var err error

	if err = util.BackupFiles(nodesFilepath, allFilepath); err != nil {
		logrus.Warningf("unable to back up config file: %s, %s\n", nodesFilepath, allFilepath)
	}

	if inventoryFileContent, err = GetInventoryFile(config); err == nil {
		err = util.WriteFile(nodesFilepath, inventoryFileContent)
	}
	if err != nil {
		return err
	} // error while write inventory configuration

	if allFileContent, err = GetAllConfigFile(config); err == nil {
		err = util.WriteFile(allFilepath, allFileContent)
	}

	return err
}

func AddSubAnnotations(all *string, emptyskip bool, name string, dvalue string, annotation string) {
	if all == nil || (emptyskip && dvalue == "" && annotation == "") {
		return
	}
	var value = dvalue
	if annotation != "" {
		value = annotation
	}
	*all = fmt.Sprintf("%s %s=%s", *all, name, value)
}

func GetInventoryFile(config *ckeadm.CkeAdmKubernetesConfig) (string, error) {
	var nodeConfigrationGroup = runner.NewGroupMap()
	var allgroup = func(node *ckeadm.CkeConfigNode) string {
		var all = fmt.Sprintf("%s ip=%s", node.Name, node.Address)
		if node.Annotations == nil {
			node.Annotations = make(map[string]string, 0)
		}

		AddSubAnnotations(&all, true, "ansible_host", node.Address, node.Annotations["install_net_address"])
		AddSubAnnotations(&all, true, "physical_machine_ip", node.Address, node.Annotations["physical_machine_ip"])
		AddSubAnnotations(&all, true, "node_resources", "", node.Annotations["resources"])
		AddSubAnnotations(&all, true, "instanceID", "", node.Annotations["instanceID"])
		AddSubAnnotations(&all, true, "ansible_ssh_port", "22", util.ItoaEmptyZeao(config.AnsibleSshPort))
		AddSubAnnotations(&all, true, "max_pods", "", util.ItoaEmptyZeao(node.MaxPods))
		return all
	}
	var rolegroup = func(role string) string {
		switch role {
		case "scale", "master", "monitor", "node":
			return fmt.Sprintf("kube-%s", role)
		case "worker":
			return "kube-node"
		case "scale-down":
			return "scale_down"
		default:
			return role
		}
	}

	for index := range config.Nodes {
		nodeConfigrationGroup.AppendGroup("all", allgroup(&config.Nodes[index]))
		for _, role := range config.Nodes[index].Roles {
			nodeConfigrationGroup.AppendGroup(rolegroup(role), config.Nodes[index].Name)
		}
	}

	nodeConfigrationGroup.AppendSubGroup(rolegroup("k8s-cluster"), rolegroup("master"), rolegroup("worker"))
	return nodeConfigrationGroup.GetContext(), nil
}

func GetAllConfigFile(config *ckeadm.CkeAdmKubernetesConfig) (string, error) {

	var all, externalRegistry string
	for _, value := range config.Docker.Registries {
		if len(externalRegistry) != 0 {
			externalRegistry += " "
		}
		externalRegistry += "--insecure-registry=" + value
	}

	var labels []ckeadm.AllLabel
	for _, node := range config.Nodes {
		for _, label := range node.Labels {
			labels = append(labels, ckeadm.AllLabel{
				Detail:  label,
				Members: []string{node.Name},
				Taint:   "",
			})
		}
		for _, taint := range node.Taints {
			labels = append(labels, ckeadm.AllLabel{
				Detail:  "",
				Members: []string{node.Name},
				Taint:   taint,
			})
		}
	}

	var vipDisabled = false
	// 只支持 apiserver 使用的 vip，其它 vip 不使用
	var clusterVip = ckeadm.AllClusterVip{
		MasterVip:       config.ClusterVip.Ip,
		VirtualRouterId: config.ClusterVip.RouterId,
		VipInterface:    config.ClusterVip.Interface,
	}
	// 兼容老的 vip 配置文件
	if clusterVip.MasterVip == "" && clusterVip.VirtualRouterId == 0 {
		for _, vip := range config.ClusterVips {
			if vip.IsApiserver {
				clusterVip.MasterVip = vip.Ip
				clusterVip.VipInterface = vip.Interface
				clusterVip.VirtualRouterId = vip.RouterId
			} else {
				clusterVip.VipList = append(clusterVip.VipList, vip)
			}
		}
	}
	if clusterVip.MasterVip == "" {
		vipDisabled = true
	}

	var otherConfig = struct {
		Preconfig              []string           `yaml:"docker_preconfiged,omitempty"`
		ClusterId              string             `yaml:"cluster_id,omitempty"`
		AnsibleImageName       string             `yaml:"ansible_image_name,omitempty"`
		ExternalRegistry       string             `yaml:"external_registry,omitempty"`
		Label                  []ckeadm.AllLabel  `yaml:"labels,omitempty"`
		VipDisabled            bool               `yaml:"loadbalancer_apiserver_localhost"`
		CkeadmWaitAddress      string             `yaml:"ckeadm_wait_address,omitempty"`
		NginxKubeApiserverPort int                `yaml:"nginx_kube_apiserver_port,omitempty"`
		BaseImageRepo          string             `yaml:"base_image_repo,omitempty"`
		RegistryProxy          string             `yaml:"registry_proxy,omitempty"`
		LoadbalancerApiserver  ckeadm.LBApiserver `yaml:"loadbalancer_apiserver,omitempty"`
	}{
		Preconfig:              config.DockerPreconfiged,
		ClusterId:              config.ClusterName,
		AnsibleImageName:       ImageGetFromConfig(config),
		ExternalRegistry:       externalRegistry,
		Label:                  labels,
		VipDisabled:            vipDisabled,
		NginxKubeApiserverPort: constants.NginxKubeApiserverPort,
		BaseImageRepo:          "{{ registry_domain }}",
		RegistryProxy:          "{{ base_image_repo }}",
	}
	if config.WatchConfig != nil {
		otherConfig.CkeadmWaitAddress = fmt.Sprintf("http://%s:%d%s", config.WatchConfig.ListenAddress, config.WatchConfig.ListenPort, constants.CkeadmWaitInfoPattern)
	}
	if config.Registry.Port > 0 {
		otherConfig.BaseImageRepo = "{{ registry_domain }}:{{ registry_port }}"
	}
	if !otherConfig.VipDisabled {
		otherConfig.LoadbalancerApiserver.Address = "{{ master_vip }}"
		otherConfig.LoadbalancerApiserver.Port = "{{ nginx_kube_apiserver_port }}"
	}

	// 兼容老的 api
	if len(config.ClusterParameters.NtpServers) == 0 && len(config.ClusterParameters.ExternalNtpServer) != 0 {
		config.ClusterParameters.NtpServers = config.ClusterParameters.ExternalNtpServer
	}
	if len(config.Dns.ExternalServers) == 0 && len(config.Dns.ExternalDns) != 0 {
		config.Dns.ExternalServers = config.Dns.ExternalDns
	}

	for _, part := range []interface{}{config.ClusterParameters, clusterVip, config.Registry, config.Calico, config.Docker, config.Dns, config.Etcd, otherConfig, util.TypeToInterface(config.ExtraArgs)} {
		if constants.Debug {
			fmt.Printf("%+v\n", part)
		}

		if cfg, err := yaml.Marshal(part); part != nil && err == nil {
			all = fmt.Sprintf("%s\n%s", all, string(cfg))
		}
	}

	var AddonsTemplate = template.Must(template.New("addons").Parse(`
use_{{ .Name }}: true
{{ .Name }}_enabled: true

{{- if .Parameters }}
{{ .Name }}_parameters:
{{- range $key, $value := .Parameters }}
  {{ $key }}: {{ $value }}
{{- end }}
{{- end }}

`[1:]))

	// addons && plugins
	for _, addon := range append(config.Addons, config.Plugins...) {
		var buff = bytes.NewBuffer([]byte{})
		_ = AddonsTemplate.Execute(buff, addon)
		all = fmt.Sprintf("%s%s", all, buff.String())
	}

	//process component switch
	var ComponentTemplate = template.Must(template.New("component").Parse(`
use_{{ .Name }}: true
{{ .Name }}_enabled: true
`[1:]))

	for _, component := range config.Components {
		var buff = bytes.NewBuffer([]byte{})
		_ = ComponentTemplate.Execute(buff, component)
		all = fmt.Sprintf("%s%s", all, buff.String())
	}

	// fix: dns没有数据时，生成的all.yaml是一个{}，导致解析错误
	return strings.ReplaceAll(all, "\n{}\n", "\n"), nil
}
