package K8Sapi

import (
	"context"
	"flag"
	"fmt"
	log "github.com/sirupsen/logrus"
	"io/ioutil"
	v1 "k8s.io/api/core/v1"
	"k8s.io/apimachinery/pkg/runtime"
	"k8s.io/apimachinery/pkg/util/wait"
	"k8s.io/client-go/informers"
	"k8s.io/client-go/kubernetes"
	"k8s.io/client-go/rest"
	"k8s.io/client-go/tools/cache"
	"k8s.io/client-go/tools/clientcmd"
	"k8s.io/client-go/util/homedir"
	"k8s.io/client-go/util/workqueue"
	"os"
	"path/filepath"
	"queue-gateway/config"
	"queue-gateway/tokenBuket"
	"reflect"
	"strconv"
	"strings"
	"time"
)

var serverConfig = config.ServiceConfig
var ResolverMap = make(map[string]K8sServiceResolver)

func localInit(masterUrl string) (*kubernetes.Clientset, error) {
	var kubeconfig *string
	if home := homedir.HomeDir(); home != "" {
		kubeconfig = flag.String("kubeconfig", filepath.Join(home, ".kube", "config"), "(optional) absolute path to the kubeconfig file")
	} else {
		kubeconfig = flag.String("kubeconfig", "", "absolute path to the kubeconfig file")
	}

	cfg, err := clientcmd.BuildConfigFromFlags(masterUrl, *kubeconfig)
	if err != nil {
		return nil, fmt.Errorf("error building kubernetes config:%s", err.Error())
	}
	cfg.QPS = 50
	cfg.Burst = 50
	cfg.ContentConfig.ContentType = runtime.ContentTypeProtobuf
	return kubernetes.NewForConfig(cfg)
}

type K8sServiceResolver struct {
	Service *ServiceName
	PodIps  []string
}

type K8sServiceWatcher struct {
	ctx              context.Context
	masterUrl        string
	kubeConfigPath   string
	defaultNamespace string
	endpointCache    map[string]*v1.Endpoints
	svcCache         map[string]*v1.Service
	endpointQueue    workqueue.DelayingInterface    // watch的service，如果有update的回填入延迟队列
	eventList        map[string]UpdateStateFuncList // 记录watch的service信息，其中有回调函数和相关service信息
	isRunning        bool
}

type ServiceName struct {
	Namespace string
	Name      string
	Port      string
}

func getEndpointKey(ep *v1.Endpoints) string {
	return getKey(ep.Namespace, ep.Name)
}

func getSvcKey(svc *v1.Service) string {
	return getKey(svc.Namespace, svc.Name)
}

func getKey(namespace string, name string) string {
	return name + "." + namespace
}

type UpdateStateFunc struct {
	svc *ServiceName
	f   func(resolver K8sServiceResolver)
}

type UpdateStateFuncList []UpdateStateFunc

// 根据service名解析信息
func (k *K8sServiceWatcher) parseService(serviceName string) (error, string, string, string, int) {
	var svc, ns, port string
	var connSize int
	var err error
	tmpl := strings.Split(serviceName, "/")
	if len(tmpl) == 1 {
		connSize = 1
	} else {
		connSize, err = strconv.Atoi(tmpl[1])
		if err != nil {
			return fmt.Errorf("invalid client connection size:%s", tmpl[1]), "", "", "", 0
		}
	}
	tmpl = strings.Split(tmpl[0], ":")
	if len(tmpl) == 1 {
		port = "80"
	} else {
		port = tmpl[1]
	}

	tmpl = strings.Split(tmpl[0], ".")

	if len(tmpl) >= 2 {
		ns = tmpl[1]
	} else {
		ns = k.defaultNamespace
	}
	svc = tmpl[0]
	return nil, svc, ns, port, connSize
}

// 将watcher的endpoint event queue信息解析到resolver中
func (k *K8sServiceWatcher) endpointToResolverState(ep *v1.Endpoints, svc *ServiceName) {
	//ep.version
	log.Println("update endpoints resource version:", ep.ResourceVersion)
	k8sresolver := K8sServiceResolver{Service: svc}

	var endpointPort int32
	if s, ok := k.svcCache[getKey(svc.Namespace, svc.Name)]; ok {
		// 解析endpoint port端口号
		for _, p := range s.Spec.Ports {
			if strconv.Itoa(int(p.Port)) == svc.Port || p.Name == svc.Port {
				if p.TargetPort.IntVal > 0 {
					endpointPort = p.TargetPort.IntVal
				} else {
					if len(p.TargetPort.StrVal) > 0 {
						if len(ep.Subsets) == 0 {
							//log.Error("invalid endpoint:%s", getEndpointKey(ep))
							log.Errorf("invalid endpoint:%s", getEndpointKey(ep))
							//return reso
						}
						for _, epp := range ep.Subsets[0].Ports {
							if epp.Name == p.TargetPort.StrVal {
								endpointPort = epp.Port
							}
						}
					} else {
						log.Fatal("resolve endpoint port error")
						//return reso
					}
				}
			}
		}
	} else {
		log.Fatal("service not found:%s", getKey(svc.Namespace, svc.Name))
		//return reso
	}
	for _, sub := range ep.Subsets {
		for _, addr := range sub.Addresses {
			log.Printf("k8s resolver,resolve service:%s,addr: %s:%d", svc.Name, addr.IP, endpointPort)
			k8sresolver.PodIps = append(k8sresolver.PodIps, fmt.Sprintf("%s:%d", addr.IP, endpointPort))
		}
	}
	service_s := fmt.Sprintf("%s.%s:%v", svc.Name, svc.Namespace, svc.Port)
	ResolverMap[service_s] = k8sresolver
	// 更新tokenBuket中serviceMap中的podip缓存
	tokenBuket.UpdateServiceMap(k.ctx, service_s, k8sresolver.PodIps, ep.ResourceVersion)
}

// AddWatch 将resolver 注册的service endpoint状态更新
func (k *K8sServiceWatcher) AddWatch(serviceName string, f func(resolver K8sServiceResolver)) {
	if len(serviceName) == 0 {
		return
	}
	// 根据service名解析出服务名、命名空间、端口号
	err, svc, ns, port, connSize := k.parseService(serviceName)
	if err != nil {
		//log.Error(err.Error())
		return
	}
	//log.Debug("k8s resolver,svc:%s,ns:%s,port:%s,conn size:%d", svc, ns, port, connSize)
	log.Infof("k8s resolver,svc:%s,ns:%s,port:%s,conn size:%d", svc, ns, port, connSize)
	ep, ok := k.endpointCache[getKey(ns, svc)]
	if !ok {
		log.Fatal("endpoint not found")
		return
	}
	updateState := UpdateStateFunc{}
	updateState.svc = &ServiceName{Namespace: ns, Name: svc, Port: port}
	updateState.f = f // update state回调函数，预留
	k.endpointToResolverState(ep, updateState.svc)

	if e, ok := k.eventList[getKey(ns, svc)]; ok {
		log.Infof("append event for svc:%s", getKey(ns, svc))
		e = append(e, updateState)
		k.eventList[getKey(ns, svc)] = e
	} else {
		log.Infof("add event for svc:%s", getKey(ns, svc))
		k.eventList[getKey(ns, svc)] = UpdateStateFuncList{updateState}
	}
}

func NewWatcher(ctx context.Context) *K8sServiceWatcher {
	defaultNS := os.Getenv("POD_NAMESPACE")
	if len(defaultNS) == 0 {
		const nsFile = "/var/run/secrets/kubernetes.io/serviceaccount/namespace"
		ns, err := ioutil.ReadFile(nsFile)
		if err == nil {
			defaultNS = string(ns)
		} else {
			defaultNS = "default"
		}
	}

	return &K8sServiceWatcher{
		ctx:              ctx,
		svcCache:         make(map[string]*v1.Service),
		endpointCache:    make(map[string]*v1.Endpoints),
		eventList:        make(map[string]UpdateStateFuncList),
		defaultNamespace: defaultNS,
		endpointQueue:    workqueue.NewNamedDelayingQueue("endpoints"), // update endpoints事件队列
		isRunning:        false,
		kubeConfigPath:   os.Getenv("KUBE_CONFIG_PATH"),
	}
}

func (k *K8sServiceWatcher) Start() error {
	var kubeClient *kubernetes.Clientset
	var err error
	if config.RUN_ENV == "local" {
		// 本地调试时候使用
		kubeClient, err = localInit(k.masterUrl)

	} else {
		// 基于k8s部署时候使用
		kubeConfig, _ := rest.InClusterConfig()
		kubeClient, err = kubernetes.NewForConfig(kubeConfig)
	}

	if err != nil {
		return fmt.Errorf("error building kubernetes client:%s", err.Error())
	}
	factory := informers.NewSharedInformerFactory(kubeClient, 0)
	epInformer := factory.Core().V1().Endpoints()
	svcInformer := factory.Core().V1().Services()

	epInformer.Informer().AddEventHandler(
		cache.ResourceEventHandlerFuncs{
			AddFunc: func(cur interface{}) {
				endpoint, ok := cur.(*v1.Endpoints)
				if ok {
					k.endpointCache[getEndpointKey(endpoint)] = endpoint
				}
			},
			UpdateFunc: func(originalEndpoint, newEndpoint interface{}) {
				ep1, ok1 := originalEndpoint.(*v1.Endpoints)
				ep2, ok2 := newEndpoint.(*v1.Endpoints)
				if ok1 && ok2 && !reflect.DeepEqual(ep1.Subsets, ep2.Subsets) {
					k.endpointCache[getEndpointKey(ep2)] = ep2
					k.endpointQueue.Add(ep2)
				}
			},
			DeleteFunc: func(cur interface{}) {
				// do nothing
			},
		})

	svcInformer.Informer().AddEventHandler(
		cache.ResourceEventHandlerFuncs{
			AddFunc: func(cur interface{}) {
				svc, ok := cur.(*v1.Service)
				if ok {
					k.svcCache[getSvcKey(svc)] = svc
				}
			},
			UpdateFunc: func(originalSvc, newSvc interface{}) {
				fmt.Println(2222222222222222, "update")
				svc1, ok1 := originalSvc.(*v1.Service)
				svc2, ok2 := newSvc.(*v1.Service)
				if ok1 && ok2 {
					k.svcCache[getSvcKey(svc2)] = svc2
					if !reflect.DeepEqual(svc1.Spec.Ports, svc2.Spec.Ports) {
						if ep, ok := k.endpointCache[getSvcKey(svc2)]; ok {
							k.endpointQueue.Add(ep)
						}
					}
				}
			},
			DeleteFunc: func(cur interface{}) {
				// do nothing
			},
		})

	factory.Start(k.ctx.Done())
	if !cache.WaitForCacheSync(k.ctx.Done(), svcInformer.Informer().HasSynced, epInformer.Informer().HasSynced) {
		return fmt.Errorf("waiting for cached sync timeout")
	}
	log.Println("k8s service watcher is ready")
	return nil
}

func (k *K8sServiceWatcher) Run() {
	if k.isRunning {
		return
	}
	k.isRunning = true
	log.Printf("start k8s service watcher")
	defer log.Printf("shutdown k8s service watcher ")
	wait.Until(k.work, time.Millisecond*10, k.ctx.Done())
	<-k.ctx.Done()
}

// 将endpoint delay queue的ep注册到resolver。
func (k *K8sServiceWatcher) work() {
	for {
		func() {
			key, quit := k.endpointQueue.Get()
			if quit {
				return
			}
			defer k.endpointQueue.Done(key)
			ep, _ := key.(*v1.Endpoints)
			if e, ok := k.eventList[getEndpointKey(ep)]; ok {
				for _, f := range e {

					k.endpointToResolverState(ep, &ServiceName{Namespace: f.svc.Namespace, Name: f.svc.Name, Port: f.svc.Port})
					//f.f(k.endpointToResolverState(ep, f.svc))
				}
			}
		}()
	}
}

func WatchInit(ctx context.Context) error {
	w := NewWatcher(ctx)
	err := w.Start()
	if err != nil {
		return err
	}
	go w.Run()
	for _, v := range serverConfig.Services {
		w.AddWatch(v.Name, func(resolver K8sServiceResolver) {
			log.Printf("update resolver state func!, service:%s", v.Name)
		})

	}
	return nil
}

/*func main() {
	//w := NewWatcher(context.Background())
	//err := w.Start()
	//if err != nil {
	//	return
	//}
	//go w.Run()
	//w.AddWatch("nginx-app.default:80", func(resolver K8sServiceResolver) {
	//	fmt.Println("update resolver state func!")
	//})
	WatchInit()
	for {
		time.Sleep(time.Second * 2)
		v, _ := tokenBuket.ServiceMap.Load("nginx-app.default:80")
		ip, tokenid := tokenBuket.GetToken(context.Background(), []string{"nginx-app.default:80"}, []float32{1})
		//ip, err := tokenBuket.GetPodIp(context.Background(), "infer-balance_nginx-app.default:801", 5)

		fmt.Println(ResolverMap["nginx-app.default:80"].PodIps, v, ip, tokenid)
		service := v.(*tokenBuket.Service)
		tokenBuket.RevertToken(context.Background(), service.HashName, ip, tokenid)
	}
}*/
