// Copyright (c) 2022 by Duguang.IO Inc. All Rights Reserved.
// Author: Ethan Liu
// Date: 2022-06-05 09:23:34

package command

import (
	"context"
	"errors"
	"fmt"
	"jianmu-worker-kube/livelog"
	logger "jianmu-worker-kube/logging"
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
	"k8s.io/client-go/rest"
	"os"
	"os/signal"
	"runtime/debug"
	"syscall"
	"time"

	"jianmu-worker-kube/client"
	"jianmu-worker-kube/engine"
	"jianmu-worker-kube/poller"
	"jianmu-worker-kube/runtime"
	"jianmu-worker-kube/untils/kube"

	"github.com/joho/godotenv"
	"github.com/sirupsen/logrus"
	"golang.org/x/sync/errgroup"
	"k8s.io/client-go/kubernetes"
)

type DaemonCmd struct {
	Envfile string `arg:"" optional:"" name:"envfile" help:"load the environment variable file." type:"path"`
}

// 空context.
var emptyContext = context.Background()

func (d *DaemonCmd) run(ctx context.Context, config Config) error {
	cli := client.New(
		config.Client.Address,
		config.Client.Secret,
		false,
		config.Worker.ID,
	)

	var kubeClient *kubernetes.Clientset
	var restConfig *rest.Config
	var err error

	if path := config.Worker.Config; path != "" {
		// if the configuration path is specified, we create
		// the kubernetes client from the configuration file.
		// This is used primarily for local out-of-cluster
		// testing.
		kubeClient, restConfig, err = kube.NewFromConfig((*kube.ClientConfig)(&config.KubernetesClient), path)
		if err != nil {
			logrus.WithError(err).
				Fatalln("cannot load the kubernetes client from config")
		}
	} else {
		// else, if no configuration is specified, we create
		// the kubernetes client using the in-cluster
		// configuration file.
		kubeClient, restConfig, err = kube.NewInCluster((*kube.ClientConfig)(&config.KubernetesClient))
		if err != nil {
			logrus.WithError(err).
				Fatalln("cannot load the in-cluster kubernetes client")
		}
	}

	TestKubeClient(kubeClient)

	kubeEngine := engine.New(kubeClient, restConfig,
		time.Duration(config.Engine.ContainerStartTimeout)*time.Second)

	worker := &runtime.Worker{
		Client: cli,
		Engine: kubeEngine,
		ID:     config.Worker.ID,
		Name:   config.Worker.Name,
		Type:   config.Worker.Type,
		Mask:   config.Worker.Mask,
		Resources: &runtime.Resources{
			HelperResources: &engine.Resources{
				Limits: &engine.ResourceObject{
					CPU:    config.Worker.HelperCpuLimits,
					Memory: config.Worker.HelperMemoryLimits,
				},
				Requests: &engine.ResourceObject{
					CPU:    config.Worker.HelperCpuRequests,
					Memory: config.Worker.HelperMemoryRequests,
				},
			},
			BuildResources: &engine.Resources{
				Limits: &engine.ResourceObject{
					CPU:    config.Worker.BuildCpuLimits,
					Memory: config.Worker.BuildMemoryLimits,
				},
				Requests: &engine.ResourceObject{
					CPU:    config.Worker.BuildCpuRequests,
					Memory: config.Worker.BuildMemoryRequests,
				},
			},
		},
	}
	poller := &poller.Poller{
		Client:      cli,
		Dispatch:    worker.Run,
		Filter:      nil,
		NodeName:    config.Worker.NodeName,
		NameSpace:   config.Worker.NameSpace,
		DataDir:     config.Worker.DataDir,
		HelperImage: config.Worker.HelperImage,
	}
	var g errgroup.Group

	timeOut := time.Duration(config.Worker.RegisterTimeout) * time.Second
	// 0 表示一直ping
	if timeOut == 0 {
		timeOut = 1<<63 - 1
	}

	registerCtx, cancelFunc := context.WithTimeout(ctx, timeOut)
	defer cancelFunc()

	// 循环Ping服务器直到成功
	for {
		err := cli.Ping(registerCtx)
		select {
		case <-registerCtx.Done():
			return errors.New(fmt.Sprintf("注册到%s超时", config.Client.Address))
		default:
		}
		if registerCtx.Err() != nil {
			return registerCtx.Err()
		}
		if err != nil {
			logrus.WithError(err).
				WithField("address", config.Client.Address).
				Errorln("cannot ping the remote server")
			time.Sleep(time.Second)
		} else {
			logrus.WithField("address", config.Client.Address).
				Debugln("successfully pinged the remote server")
			break
		}
	}

	w := &engine.Worker{
		ID:   config.Worker.ID,
		Name: config.Worker.Name,
		Type: config.Worker.Type,
		Tag:  config.Worker.Tags,
	}

	if err := cli.Join(registerCtx, w); err != nil {
		logrus.WithError(err).
			WithField("address", config.Client.Address).
			Errorln("cannot join the server")
		return err
	}

	processExistsPods(ctx, config.Worker.ID, config.Worker.NodeName, config.Worker.HelperImage, config.Worker.NameSpace, worker, cli)

	g.Go(func() error {
		logrus.WithField("capacity", config.Worker.Capacity).
			WithField("endpoint", config.Client.Address).
			WithField("os", config.Platform.OS).
			WithField("arch", config.Platform.Arch).
			Infoln("polling the remote server")

		poller.Poll(ctx, config.Worker.Capacity)
		return nil
	})

	err = g.Wait()
	if err != nil {
		logrus.WithError(err).
			Errorln("shutting down the server")
	}
	return err
}

func processExistsPods(ctx context.Context, workerId, nodeName, helperImage, nameSpace string, w *runtime.Worker, client *client.HTTPClient) {
	pods, err := w.Engine.GetRunningPods(workerId, nameSpace)
	if err != nil {
		return
	}

	valueCtx := context.WithValue(ctx, "nodeName", nodeName)
	valueCtx = context.WithValue(valueCtx, "nameSpace", nameSpace)
	valueCtx = context.WithValue(valueCtx, "helperImage", helperImage)

	for _, pod := range pods {
		fmt.Println(pod.Status.Phase)
		taskId := pod.Labels["task-id"]
		if taskId == "" {
			logrus.WithField("pod", pod.Name).
				WithField("labels", pod.Labels).
				Warnln("pod has no task-id label")
			continue
		}

		unit, err := client.FindById(valueCtx, taskId)
		if err != nil {
			logrus.WithError(err).
				WithField("taskId", taskId).
				WithField("pod", pod.Name).
				WithField("labels", pod.Labels).
				Errorln("cannot find the task")
			continue
		}

		go func() {
			defer func() {
				if err := recover(); err != nil {
					logger.FromContext(ctx).Errorf("panic in poller err: %v", err)
					stackStr := string(debug.Stack())
					fmt.Println(stackStr)
				}
			}()

			unit.PodSpec.Name = pod.Name
			unit.Stop = make(chan struct{})
			livelog := livelog.New(w.Client, "build", unit.TaskInstanceId)
			writer := runtime.NewReplacer(livelog, unit.Secrets, w.Mask)
			unit.Log = writer
			defer writer.Close()
			w.RunTaskReal(ctx, unit, unit.Runners[0])
		}()
	}
}

func (d *DaemonCmd) Run() error {
	// load environment variables from file.
	godotenv.Load(d.Envfile)

	// load the configuration from the environment.
	config, err := FromEnviron()
	if err != nil {
		return err
	}

	// setup the global logrus logger.
	setupLogger(config)

	ctx, cancel := context.WithCancel(emptyContext)
	defer cancel()

	// 侦听终止信号实现正常退出
	ctx = WithContextFunc(ctx, func() {
		println("received signal, terminating process")
		cancel()
	})

	return d.run(ctx, config)
}

func setupLogger(config Config) {
	if config.Debug {
		logrus.SetLevel(logrus.DebugLevel)
	}
	if config.Trace {
		logrus.SetLevel(logrus.TraceLevel)
	}
	logrus.SetFormatter(&logrus.TextFormatter{
		ForceQuote:       false,
		DisableQuote:     true,
		QuoteEmptyFields: false,
	})
}

// WithContextFunc 收到OS中断信号时调用回调函数f，之后调用cancel
func WithContextFunc(ctx context.Context, f func()) context.Context {
	ctx, cancel := context.WithCancel(ctx)
	go func() {
		c := make(chan os.Signal, 1)
		signal.Notify(c, syscall.SIGINT, syscall.SIGTERM)
		defer signal.Stop(c)

		select {
		case <-ctx.Done():
		case <-c:
			f()
			cancel()
		}
	}()

	return ctx
}

// TestKubeClient test kubelet client connectivity
func TestKubeClient(kubeClient *kubernetes.Clientset) {
	if version, err := kubeClient.ServerVersion(); err != nil {
		logrus.WithError(err)
	} else {
		logrus.
			WithField("version", version).
			WithField("BuildDate", version.BuildDate).
			WithField("GoVersion", version.GoVersion).
			Infoln("kubernetes server info")

	}

	var opts metav1.ListOptions
	list, err := kubeClient.CoreV1().Nodes().List(context.TODO(), opts)
	if err != nil {
		logrus.WithError(err).Errorln("get nodes error")
	} else {
		for _, item := range list.Items {
			logrus.WithField("node", item.Name).
				WithField("kubelet version", item.Status.NodeInfo.KubeletVersion).
				Infoln("node info")

		}
	}
}
