package gcloud_k8s

import (
	"bytes"
	"context"
	"encoding/json"
	"errors"
	"flag"
	"fmt"
	"go.uber.org/zap"
	"io"
	"k8s.io/client-go/kubernetes/scheme"
	"k8s.io/client-go/rest"
	"os"
	"os/exec"
	"path/filepath"
	logger "platon-tools/go-service/go-logger"
	"strings"
	"sync"
	"sync/atomic"
	"time"

	"google.golang.org/api/container/v1"
	"google.golang.org/api/option"

	corev1 "k8s.io/api/core/v1"
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
	"k8s.io/client-go/kubernetes"
	_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
	"k8s.io/client-go/tools/clientcmd"
	metrics "k8s.io/metrics/pkg/client/clientset/versioned"
)

const (
	minUpdateTimer = 30 * time.Second
)

type ClustersEle struct {
	Name   string `json:"name"`
	Status string `json:"status"`
	IP     string `json:"ip"`
	Index  int    `json:"index"`
}

type GCloudConfig struct {
	Account       string   `json:"account"`
	KeyFile       string   `json:"key_file"`
	ProjectID     string   `json:"project_id"`
	ComputeRegion string   `json:"compute_region"`
	Cluster       string   `json:"cluster"`
	YamlPath      string   `json:"yaml_path"`
	Chains        []string `json:"chains"`
	ClusterName   string   `json:"cluster_name"`
	Delay         int64    `json:"delay"`
}

type GCPlatonK8s struct {
	Config *GCloudConfig

	clusterList []ClustersEle

	k8sClientset *kubernetes.Clientset
	restClient   *rest.RESTClient
	config       *rest.Config

	ctx      context.Context
	docancel context.CancelFunc

	metricsList *PodMetricsList
	podList     []PodEle
	rw          sync.RWMutex
	delay       time.Duration

	running int32
}

func (gc *GCPlatonK8s) Start(ctx context.Context, config GCloudConfig) error {
	gc.ctx, gc.docancel = context.WithCancel(ctx)
	gc.Config = &config

	if config.Delay <= 30 || config.Delay > 300 {
		gc.delay = minUpdateTimer
	} else {
		gc.delay = time.Duration(config.Delay) * time.Second
	}

	// todo 集群必须先创建好，目前只支持单一集群访问
	_, err := gc.getClusters()
	if err != nil {
		return err
	}
	//
	//gc.clusterList = clusters
	if _, err := gc.InitGCloud(); err != nil {
		logger.Logger.Error("init gcloud auth failed", zap.Error(err))
		return err
	}

	// todo 需要用户操作，目前先默认设置
	err = gc.initK8sConfig(gc.Config.ClusterName)
	if err != nil {
		return err
	}

	err = gc.initRestK8s()
	if err != nil {
		return err
	}

	gc.podList = gc.GetPodsFromRest()

	go gc.runLoop()

	return nil
}

func (gc *GCPlatonK8s) runLoop() {
	defer gc.Close()

	sugar := logger.Logger.Sugar()
	timer := time.NewTicker(gc.delay)
	defer timer.Stop()

	for {
		select {
		case <-gc.ctx.Done():
			sugar.Infof("platon k8s loop quit timer: %s", time.Now().String())
			return
		case <-timer.C:
			gc.rw.Lock()
			gc.podList = gc.GetPodsFromRest()
			gc.rw.Unlock()
			timer.Reset(gc.delay)
		}
	}
}

func (gc *GCPlatonK8s) Close() error {
	if gc.IsRunning() {
		atomic.StoreInt32(&gc.running, 0)
		gc.docancel()
	}

	return nil
}

func (gc *GCPlatonK8s) Name() string {
	return "PLATON"
}

func (gc *GCPlatonK8s) IsRunning() bool {
	if atomic.LoadInt32(&gc.running) == 1 {
		return true
	}

	return false
}

func (gc *GCPlatonK8s) getClusters() ([]ClustersEle, error) {
	containerService, err := container.NewService(gc.ctx, option.WithCredentialsFile(gc.Config.KeyFile))
	if err != nil {
		logger.Logger.Error("new gcloud container service failed", zap.Error(err))
		return nil, err
	}

	list, err := containerService.Projects.Zones.Clusters.List(gc.Config.ProjectID, "-").Do()
	if err != nil {
		logger.Logger.Error("get clusters list failed", zap.Error(err))
		return nil, err
	}

	clusters := make([]ClustersEle, 0)
	for i, v := range list.Clusters {
		c := ClustersEle{
			Name:   v.Name,
			Status: v.Status,
			IP:     v.Endpoint,
			Index:  i,
		}
		clusters = append(clusters, c)
	}

	return clusters, nil
}

func (gc *GCPlatonK8s) InitGCloud() ([]byte, error) {
	out, err := gc.runCmd("gcloud", "auth", "activate-service-account", gc.Config.Account, fmt.Sprintf("--key-file=%s", gc.Config.KeyFile), fmt.Sprintf("--project=%s", gc.Config.ProjectID))
	if err != nil {
		logger.Logger.Error("init gcloud", zap.Error(err))
		return nil, err
	}

	return out, nil
}

func (gc *GCPlatonK8s) runShell(file, dir string, args ...string) error {
	argsAll := []string{}
	argsAll = append(argsAll, file)
	argsAll = append(argsAll, args...)
	cmd := exec.Command("/bin/bash", argsAll...)
	cmd.Dir = dir

	logger.Logger.Info("run /bin/bash", zap.String("shell file", file), zap.Strings("shel args", args))

	stdout, _ := cmd.StdoutPipe()
	stderr, _ := cmd.StderrPipe()

	go syncLog("", stdout)
	go syncLog("", stderr)

	if err := cmd.Start(); err != nil {
		logger.Logger.Error("cmd start", zap.Error(err))
		return err
	}

	if err := cmd.Wait(); err != nil {
		logger.Logger.Error("cmd run err", zap.Error(err))
		return err
	}

	return nil
}

func (gc *GCPlatonK8s) runCmd(command string, args ...string) ([]byte, error) {
	cmd := exec.Command(command, args...)
	logger.Logger.Info("run cmd", zap.Strings("CMD", cmd.Args))

	var (
		out    bytes.Buffer
		errOut bytes.Buffer
	)

	cmd.Stdout = &out
	cmd.Stderr = &errOut

	cmd.Dir = gc.Config.YamlPath

	if err := cmd.Start(); err != nil {
		logger.Logger.Error("cmd start", zap.Error(err))
		newErr := errors.New(errOut.String())
		return nil, newErr
	}

	if err := cmd.Wait(); err != nil {
		logger.Logger.Error("cmd run err", zap.Error(errors.New(errOut.String())))
		newErr := errors.New(errOut.String())
		return nil, newErr
	}

	if out.Len() <= 1 {
		return out.Bytes(), nil
	}

	return out.Bytes()[:(out.Len() - 1)], nil
}

func (gc *GCPlatonK8s) BuileImage() error {
	projectNumber, err := gc.getProjectNumber()
	if err != nil {
		logger.Logger.Error("build image", zap.Error(err))
		return err
	}

	member := fmt.Sprintf("--member=serviceAccount:%s@cloudbuild.gserviceaccount.com", projectNumber)
	aipb := "add-iam-policy-binding"

	out, err := gc.runCmd("gcloud", "projects", aipb, gc.Config.ProjectID, member, "--role", "roles/iam.serviceAccountTokenCreator")
	if err != nil {
		logger.Logger.Error("add-iam-policy-binding failed", zap.Error(err))
		return err
	}

	logger.Logger.Debug("add-iam-policy-binding", zap.String("logs", string(out)))

	out, err = gc.runCmd("gcloud", "projects", aipb, gc.Config.ProjectID, member, "--role", "roles/container.clusterAdmin")
	if err != nil {
		logger.Logger.Error("add-iam-policy-binding failed", zap.Error(err))
		return err
	}

	logger.Logger.Debug("add-iam-policy-binding", zap.String("logs", string(out)))

	out, err = gc.runCmd("gcloud", "projects", aipb, gc.Config.ProjectID, member, "--role", "roles/container.admin")
	if err != nil {
		logger.Logger.Error("add-iam-policy-binding failed", zap.Error(err))
		return err
	}

	logger.Logger.Debug("add-iam-policy-binding", zap.String("logs", string(out)))

	out, err = gc.runCmd("gcloud", "projects", aipb, gc.Config.ProjectID, member, "--role", "roles/compute.admin")
	if err != nil {
		logger.Logger.Error("add-iam-policy-binding failed", zap.Error(err))
		return err
	}

	logger.Logger.Debug("add-iam-policy-binding", zap.String("logs", string(out)))

	out, err = gc.runCmd("gcloud", "projects", aipb, gc.Config.ProjectID, member, "--role", "roles/iam.serviceAccountUser")
	if err != nil {
		logger.Logger.Error("add-iam-policy-binding failed", zap.Error(err))
		return err
	}

	logger.Logger.Debug("add-iam-policy-binding", zap.String("logs", string(out)))

	out, err = gc.runCmd("gcloud", "projects", aipb, gc.Config.ProjectID, member, "--role", "roles/compute.storageAdmin")
	if err != nil {
		logger.Logger.Error("add-iam-policy-binding failed", zap.Error(err))
		return err
	}

	logger.Logger.Debug("add-iam-policy-binding", zap.String("logs", string(out)))

	out, err = gc.runCmd("gcloud", "builds", "submit", fmt.Sprintf("--config=%s/cloudbuild-build-image.yaml", gc.Config.YamlPath), fmt.Sprintf("--substitutions=_PROJECT_ID=%s", gc.Config.ProjectID))
	if err != nil {
		logger.Logger.Error("builds failed", zap.Error(err))
		return err
	}

	outs := strings.Split(string(out), "\n")
	logger.Logger.Debug("build image", zap.Strings("logs", outs))

	return nil
}

func (gc *GCPlatonK8s) AddNode(nodeName, clusterName, chainName string) error {
	substitutions := fmt.Sprintf("--substitutions=_PROJECT_ID=%s,_NODE_NAME=%s,_GCP_REGION=%s,_CHAIN_NAME=%s,_CLUSTER_NAME=%s", gc.Config.ProjectID, nodeName, gc.Config.ComputeRegion, chainName, clusterName)

	out, err := gc.runCmd("gcloud", "builds", "submit", "--config=./add-node/cloudbuild.yaml", substitutions)
	if err != nil {
		logger.Logger.Error("add node failed", zap.Error(err))
		return err
	}

	outs := strings.Split(string(out), "\n")
	logger.Logger.Debug("add node", zap.Strings("logs", outs))

	return nil
}

func (gc *GCPlatonK8s) getProject() (string, error) {
	out, err := gc.runCmd("gcloud", "config", "get-value", "project")
	if err != nil {
		return "", err
	}

	if string(out) != gc.Config.ProjectID {
		strErr := fmt.Sprintf("The configuration parameters (project=%s) are inconsistent with the initialization parameters(project=%s)", string(out), gc.Config.ProjectID)
		err := errors.New(strErr)
		logger.Logger.Error("get project failed", zap.Error(err))
		return "", err
	}

	return gc.Config.ProjectID, nil
}

func (gc *GCPlatonK8s) initK8sConfig(clusterName string) error {
	_, err := gc.runCmd("gcloud", "container", "clusters", "get-credentials", clusterName, "--region", gc.Config.ComputeRegion, "--project", gc.Config.ProjectID)
	if err != nil {
		logger.Logger.Error("init k8s Config to local failed", zap.Error(err))
		return err
	}

	return nil
}

func (gc *GCPlatonK8s) getProjectNumber() (string, error) {
	project, err := gc.getProject()
	if err != nil {
		return "", err
	}

	out, err := gc.runCmd("gcloud", "projects", "list", fmt.Sprintf("--filter=%s", project), "--format=value(PROJECT_NUMBER)")
	if err != nil {
		return "", err
	}

	return string(out), nil
}

func (gc *GCPlatonK8s) getRegion() (string, error) {
	out, err := gc.runCmd("gcloud", "config", "get-value", "compute/region")
	return string(out), err
}

func (gc *GCPlatonK8s) StartImage(clusterName, nodeName string) error {
	_, err := gc.runCmd("gcloud", "config", "set", "project", gc.Config.ProjectID)
	if err != nil {
		logger.Logger.Error("Start image failed", zap.Error(err))
		return err
	}

	_, err = gc.runCmd("gcloud", "config", "set", "compute/region", gc.Config.ComputeRegion)
	if err != nil {
		logger.Logger.Error("Start image failed", zap.Error(err))
		return err
	}

	substitutions := fmt.Sprintf("--substitutions=_PROJECT_ID=%s", gc.Config.ProjectID)
	gcpr := fmt.Sprintf("_GCP_REGION=%s", gc.Config.ComputeRegion)
	node := fmt.Sprintf("_NODE_NAME=%s", nodeName)
	cluster := fmt.Sprintf("_CLUSTER_NAME=%s", clusterName)

	substitutions2 := fmt.Sprintf("%s,%s,%s,%s", substitutions, gcpr, node, cluster)
	out, err := gc.runCmd("gcloud", "builds", "submit", "--config=./cloudbuild-demo.yaml", substitutions2)

	if err != nil {
		logger.Logger.Error("build failed", zap.Error(err))
		return err
	}

	logger.Logger.Debug("start images", zap.String("logs", string(out)))

	return nil
}

func (gc *GCPlatonK8s) MetricsK8s() error {
	var kubeconfig *string
	var master string
	if home := homeDir(); home != "" {
		kubeconfig = flag.String("kubeconfig", filepath.Join(home, ".kube", "Config"), "(optional) absolute path to the kubeconfig file")
	} else {
		kubeconfig = flag.String("kubeconfig", "", "absolute path to the kubeconfig file")
	}
	flag.Parse()

	config, err := clientcmd.BuildConfigFromFlags(master, *kubeconfig)
	if err != nil {
		logger.Logger.Error("build k8s config failed", zap.Error(err))
		return err
	}

	mc, err := metrics.NewForConfig(config)
	if err != nil {
		logger.Logger.Error("create k8s failed", zap.Error(err))
		return err
	}

	pods, err := mc.MetricsV1beta1().PodMetricses("default").List(gc.ctx, metav1.ListOptions{})
	buf, err := json.Marshal(pods)
	if err != nil {
		logger.Logger.Error("to json failed", zap.Error(err))
		return err
	}
	logger.Logger.Sugar().Debug("metrics k8s ", string(buf))
	return nil
}

func (gc *GCPlatonK8s) initK8s() error {
	var kubeconfig *string
	if home := homeDir(); home != "" {
		kubeconfig = flag.String("kubeconfig", filepath.Join(home, ".kube", "config"), "(optional) absolute path to the kubeconfig file")
	} else {
		kubeconfig = flag.String("kubeconfig", "", "absolute path to the kubeconfig file")
	}
	flag.Parse()

	config, err := clientcmd.BuildConfigFromFlags("", *kubeconfig)
	if err != nil {
		logger.Logger.Error("build k8s config failed", zap.Error(err))
		return err
	}

	clientset, err := kubernetes.NewForConfig(config)
	if err != nil {
		logger.Logger.Error("create k8s failed", zap.Error(err))
		return err
	}

	////pods, err := clientset.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{})
	//pods, err := clientset.CoreV1().Pods("default").List(gc.ctx, metav1.ListOptions{})
	//if err != nil {
	//	logger.Logger.Error("get pods failed", zap.Error(err))
	//	return err
	//}

	gc.k8sClientset = clientset

	return nil
}

func (gc *GCPlatonK8s) initRestK8s() error {
	var kubeconfig *string
	if home := homeDir(); home != "" {
		kubeconfig = flag.String("kubeconfig", filepath.Join(home, ".kube", "config"), "(optional) absolute path to the kubeconfig file")
	} else {
		kubeconfig = flag.String("kubeconfig", "", "absolute path to the kubeconfig file")
	}
	flag.Parse()

	config, err := clientcmd.BuildConfigFromFlags("", *kubeconfig)
	if err != nil {
		logger.Logger.Error("build k8s config failed", zap.Error(err))
		return err
	}

	config.APIPath = "api"
	config.GroupVersion = &corev1.SchemeGroupVersion
	config.NegotiatedSerializer = scheme.Codecs

	restClient, err := rest.RESTClientFor(config)
	if err != nil {
		logger.Logger.Error("rest client", zap.Error(err))
		return err
	}

	gc.restClient = restClient
	gc.config = config

	return nil
}

//func (gc *GCPlatonK8s) execPod(name string) (string, error)  {
//
//	req := gc.restClient.
//		Post().
//		Resource("pods").
//		Name("full-node-platon-test-nw-2021090321-b7df9bc6-z887v").
//		Namespace("default").
//		SubResource("exec").
//		VersionedParams(&corev1.PodExecOptions{
//			Command: []string{"echo", "hello world"},
//			Stdin:   true,
//			Stdout:  true,
//			Stderr:  true,
//			TTY:     false,
//		}, scheme.ParameterCodec)
//	exec, err := remotecommand.NewSPDYExecutor(gc.config, "POST", req.URL())
//
//	if !terminal.IsTerminal(0) || !terminal.IsTerminal(1) {
//		fmt.Errorf("stdin/stdout should be terminal")
//	}
//
//	oldState, err := terminal.MakeRaw(0)
//	if err != nil {
//		fmt.Println(err)
//	}
//
//	defer terminal.Restore(0, oldState)
//
//	screen := struct {
//		io.Reader
//		io.Writer
//	}{os.Stdin, os.Stdout}
//
//	if err = exec.Stream(remotecommand.StreamOptions{
//		Stdin: screen,
//		Stdout: screen,
//		Stderr: screen,
//		Tty:    false,
//	}); err != nil {
//		fmt.Print(err)
//	}
//
//}

func homeDir() string {
	if h := os.Getenv("HOME"); h != "" {
		return h
	}
	return os.Getenv("USERPROFILE") // windows
}

func syncLog(prefix string, reader io.ReadCloser) {
	//因为logger的print方法会自动添加一个换行，所以我们需要一个cache暂存不满一行的log
	cache := ""
	buf := make([]byte, 1024, 1024)
	for {
		strNum, err := reader.Read(buf)
		if strNum > 0 {
			outputByte := buf[:strNum]
			//这里的切分是为了将整行的log提取出来，然后将不满整行和下次一同打印
			outputSlice := strings.Split(string(outputByte), "\n")
			logText := strings.Join(outputSlice[:len(outputSlice)-1], "\n")
			logger.Logger.Sugar().Infof("%s%s", cache, logText)
			cache = outputSlice[len(outputSlice)-1]
		}
		if err != nil {
			if err == io.EOF || strings.Contains(err.Error(), "file already closed") {
				err = nil
			}
		}
	}
}
