/*
Copyright 2021.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package controllers

import (
	"context"
	nodev1 "k8s.io/api/node/v1beta1"
	"k8s.io/apimachinery/pkg/types"
	"k8s.io/client-go/tools/record"
	"k8s.io/client-go/util/workqueue"
	"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
	"sigs.k8s.io/controller-runtime/pkg/event"
	"sigs.k8s.io/controller-runtime/pkg/handler"
	"sigs.k8s.io/controller-runtime/pkg/reconcile"
	"sigs.k8s.io/controller-runtime/pkg/source"
	"strings"
	"time"

	"github.com/go-logr/logr"
	corev1 "k8s.io/api/core/v1"
	"k8s.io/apimachinery/pkg/runtime"
	ctrl "sigs.k8s.io/controller-runtime"
	"sigs.k8s.io/controller-runtime/pkg/client"

	nodesv1 "gitee.com/jinmingzhi/k8s-crd/node-pool/api/v1"
)

const nodeFinalizer = "node.finalizers.node-pool.xiaobaiskill.cn"

// NodePoolReconciler reconciles a NodePool object
type NodePoolReconciler struct {
	client.Client
	Log      logr.Logger
	Scheme   *runtime.Scheme
	Recorder record.EventRecorder
}

//+kubebuilder:rbac:groups=nodes.xiaobaiskill.cn,resources=nodepools,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=nodes.xiaobaiskill.cn,resources=nodepools/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=nodes.xiaobaiskill.cn,resources=nodepools/finalizers,verbs=update
//+kubebuilder:rbac:groups="",resources=nodes,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups="",resources=nodes/status,verbs=get
//+kubebuilder:rbac:groups=node.k8s.io,resources=runtimeclasses,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups="",resources=events,verbs=get;list;watch;create;update;patch;delete

// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
// TODO(user): Modify the Reconcile function to compare the state specified by
// the NodePool object against the actual cluster state, and then
// perform operations to make the cluster state reflect the state specified by
// the user.
//
// For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.7.2/pkg/reconcile
func (r *NodePoolReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
	_ = r.Log.WithValues("nodepool", req.NamespacedName)

	pool := &nodesv1.NodePool{}
	if err := r.Client.Get(ctx, req.NamespacedName, pool); client.IgnoreNotFound(err) != nil {
		return ctrl.Result{}, err
	}
	if pool.Name == "" {
		r.Log.Info("deleted")
		return ctrl.Result{}, nil
	}

	r.Log.Info("pool info", "data", pool)

	// 加入nodeFinalizer， 使k8s 删除资源前 先不走删除逻辑， 待 control 删除资源后，删除 Finalizers,在由k8s走删除逻辑。
	// 如果删除时间戳为空说明现在不需要删除该数据，我们将 nodeFinalizer 加入到资源中
	if !containsString(pool.Finalizers, nodeFinalizer) {
		pool.Finalizers = append(pool.Finalizers, nodeFinalizer)
		err := r.Client.Update(ctx, pool)
		return ctrl.Result{}, err
	}

	var nodes corev1.NodeList
	err := r.Client.List(ctx, &nodes, &client.ListOptions{
		LabelSelector: pool.NodeLabelSelector(),
	})
	if client.IgnoreNotFound(err) != nil {
		return ctrl.Result{}, err
	}
	// k8s 删除资源时有一个特性， 即先更新 DeletionTimestamp， 再判断Finalizers是否为空，如果为空，再执行删除逻辑
	if !pool.DeletionTimestamp.IsZero() {
		r.Log.Info("delete handler")
		return ctrl.Result{}, r.nodeFinalizer(ctx, pool, nodes.Items)
	}

	if len(nodes.Items) > 0 {
		r.Log.Info("find nodes, will merge data ", "nodes", len(nodes.Items))
		pool.Status.Allocatable = corev1.ResourceList{}
		pool.Status.NodeCount = len(nodes.Items)
		for _, n := range nodes.Items {
			node := n
			err := r.Client.Update(ctx, pool.Spec.ApplyNode(node))
			if err != nil {
				return ctrl.Result{}, err
			}
			for name, quantity := range n.Status.Allocatable {
				q, ok := pool.Status.Allocatable[name]
				if ok {
					q.Add(quantity)
					pool.Status.Allocatable[name] = q
					continue
				}
				pool.Status.Allocatable[name] = quantity
			}
		}
	}
	r.Log.Info("runtimeClass handler")

	runtimeClass := nodev1.RuntimeClass{}
	err = r.Client.Get(ctx, client.ObjectKeyFromObject(pool.RuntimeClass()), &runtimeClass)
	if client.IgnoreNotFound(err) != nil {
		return ctrl.Result{}, err
	}
	r.Log.Info("get runtimeclass", "data", runtimeClass)

	if runtimeClass.Name == "" {
		r.Log.Info("create runtimeclass")
		runtimeClassNow := pool.RuntimeClass()
		err = controllerutil.SetOwnerReference(pool, runtimeClassNow, r.Scheme)
		if err != nil {
			return ctrl.Result{}, err
		}
		err = r.Client.Create(ctx, runtimeClassNow)
		if err != nil {
			return ctrl.Result{}, err
		}
	} else {
		r.Log.Info("update runtimeclass")
		runtimeClass.Scheduling = pool.RuntimeClass().Scheduling
		runtimeClass.Handler = pool.RuntimeClass().Handler
		err = r.Client.Update(ctx, &runtimeClass)
		if err != nil {
			return ctrl.Result{}, err
		}
	}

	r.Log.Info("add status & add Finalizers")
	// 状态的变更不会触发 event
	pool.Status.Status = 200
	err = r.Status().Update(ctx, pool)
	r.Recorder.Event(pool, corev1.EventTypeNormal, "status", "200")
	return ctrl.Result{}, err
}

// SetupWithManager sets up the controller with the Manager.
func (r *NodePoolReconciler) SetupWithManager(mgr ctrl.Manager) error {
	return ctrl.NewControllerManagedBy(mgr).
		For(&nodesv1.NodePool{}).
		Owns(&corev1.Node{}).
		Watches(&source.Kind{Type: &corev1.Node{}}, handler.Funcs{UpdateFunc: r.nodeUpdateHandler}).
		Complete(r)
}

func (r *NodePoolReconciler) nodeUpdateHandler(e event.UpdateEvent, q workqueue.RateLimitingInterface) {
	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
	defer cancel()

	oldPool, err := r.getNodePoolByLabels(ctx, e.ObjectOld.GetLabels())
	if err != nil {
		r.Log.Error(err, "get node pool err")
	}
	if oldPool != nil {
		q.Add(reconcile.Request{
			NamespacedName: types.NamespacedName{Name: oldPool.Name},
		})
	}

	newPool, err := r.getNodePoolByLabels(ctx, e.ObjectNew.GetLabels())
	if err != nil {
		r.Log.Error(err, "get node pool err")
	}
	if newPool != nil {
		q.Add(reconcile.Request{
			NamespacedName: types.NamespacedName{Name: newPool.Name},
		})
	}
}

func (r *NodePoolReconciler) getNodePoolByLabels(ctx context.Context, labels map[string]string) (*nodesv1.NodePool, error) {
	pool := &nodesv1.NodePool{}
	for k := range labels {
		ss := strings.Split(k, "node-role.kubernetes.io/")
		if len(ss) != 2 {
			continue
		}
		err := r.Client.Get(ctx, types.NamespacedName{Name: ss[1]}, pool)
		if err != nil {
			if client.IgnoreNotFound(err) != nil {
				return nil, err
			}
			return nil, nil
		} else {
			if !pool.DeletionTimestamp.IsZero() {
				return nil, nil
			}

			return pool, nil
		}
	}
	return nil, nil
}

// 节点预删除逻辑
func (r *NodePoolReconciler) nodeFinalizer(ctx context.Context, pool *nodesv1.NodePool, nodes []corev1.Node) error {
	// 不为空就说明进入到预删除流程
	for _, n := range nodes {
		n := n

		// 更新节点的标签和污点信息
		// 更新node, 清楚label
		err := r.Client.Update(ctx, pool.Spec.CleanNode(n))
		if err != nil {
			return err
		}
	}

	// 预删除执行完毕，移除 nodeFinalizer
	// 删除Finalizers
	pool.Finalizers = removeString(pool.Finalizers, nodeFinalizer)
	return r.Client.Update(ctx, pool)
}

// 辅助函数用于检查并从字符串切片中删除字符串。
func containsString(slice []string, s string) bool {
	for _, item := range slice {
		if item == s {
			return true
		}
	}
	return false
}

func removeString(slice []string, s string) (result []string) {
	for _, item := range slice {
		if item == s {
			continue
		}
		result = append(result, item)
	}
	return
}
