package creation

import (
	"context"
	"encoding/json"
	"fmt"
	"github.com/jasonlvhit/gocron"
	"github.com/nuttech/bell"
	"gitverse.ru/synapse/kubelatte/pkg/api/v1alpha1"
	"gitverse.ru/synapse/kubelatte/pkg/kubeapi"
	"gitverse.ru/synapse/kubelatte/pkg/observability/logs"
	"gitverse.ru/synapse/kubelatte/pkg/operator"
	"k8s.io/apimachinery/pkg/api/errors"
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
	"sigs.k8s.io/yaml"
	"sync"
)

type WatcherController struct {
	ReSyncPeriod uint64
	job          *gocron.Job
	scheduler    *gocron.Scheduler
	watchObjects map[string]v1alpha1.TriggerInstance
	rwMutex      sync.Mutex
}

func (w *WatcherController) init() {
	if w.ReSyncPeriod <= 0 {
		w.ReSyncPeriod = 5
	}

	w.watchObjects = map[string]v1alpha1.TriggerInstance{}

	bell.Listen(operator.WatcherUpdateReSyncPeriodEvent, w.UpdateSyncPeriod)
	bell.Listen(operator.WatcherAddNewObjectEvent, w.AddNewObject)
	bell.Listen(operator.WatcherRemoveObjectEvent, w.RemoveObject)

	logs.Debugf("WatcherController: Init with SyncPeriod %d", w.ReSyncPeriod)

	w.scheduler = gocron.NewScheduler()
	w.job = w.scheduler.Every(w.ReSyncPeriod).Second()
	err := w.job.Do(w.UpdateStatus)
	if err != nil {
		logs.Error("WatcherController: Init failed: " + err.Error())
		return
	}
	<-w.scheduler.Start()
}

func (w *WatcherController) UpdateStatus() {
	w.rwMutex.Lock()
	defer w.rwMutex.Unlock()

	var mustDeleted []string
	ctx := context.Background()
	clt := kubeapi.GetClient()

	for tiName, triggerInstance := range w.watchObjects {
		ti, err := operator.Cl.TriggerInstance().Get(ctx, triggerInstance.Namespace, triggerInstance.Name, metav1.GetOptions{})
		if err != nil {
			if errors.IsNotFound(err) {
				mustDeleted = append(mustDeleted, tiName)
				continue
			}
		}
		originalStatusHash := ti.GetStatusHash()
		logs.Debug(fmt.Sprintf("WatcherController: Watch for %s", ti.GetNamespacedName()))
		if ti.Status.ResourceStatus != nil {
			allRemoved := true
			for i := 0; i < len(ti.Status.ResourceStatus); i++ {
				name := ti.Status.ResourceStatus[i].Name
				kind := ti.Status.ResourceStatus[i].Kind
				version := ti.Status.ResourceStatus[i].ApiVersion
				if ti.Status.ResourceStatus[i].Status == operator.FactoryResourceRemoved ||
					ti.Status.ResourceStatus[i].Phase == operator.FactoryResourceCreateFailed {
					continue
				}
				resourceMeta := kubeapi.GetResourceMeta(clt, kind, version)
				raw, errKube := kubeapi.GetRawResourceFromKubeApi(ctx, clt.RESTClient(), resourceMeta, name, triggerInstance.Namespace)
				if errKube != nil {
					// if resource is not exist then we set status removed
					if errors.IsNotFound(errKube) {
						ti.UpdateResourceStatus(name, "", operator.FactoryResourceRemoved, operator.FactoryResourceRemoved, kind, version)
						logs.Infof("WatcherController: Resource %s deleted, set status REMOVED for this ti: %s", name, tiName)
					} else {
						logs.Warn(fmt.Sprintf("WatcherController: Get resource %s failed, for this ti: %s error %s", name, tiName, errKube))
					}
					continue
				}

				var resource map[string]interface{}
				err = json.Unmarshal(raw, &resource)
				if err != nil {
					logs.Error("WatcherController: Update trigger status failed: " + err.Error())
					return
				}

				var status map[string]interface{}
				phase := ""
				status, ok := resource["status"].(map[string]interface{})
				if ok {
					phs, ok := status["phase"]
					if ok {
						phase = phs.(string)
					}
				}
				if !ok {
					phase = "Apply"
				}

				data, err := yaml.Marshal(status)
				if err != nil {
					logs.Error("WatcherController: Update trigger status failed: " + err.Error())
				}
				allRemoved = false
				ti.UpdateResourceStatus(name, "", phase, string(data), kind, version)
			}
			if allRemoved {
				logs.Info("WatcherController: All resources deleted, mark to delete this ti: " + tiName)
				mustDeleted = append(mustDeleted, tiName)
			}
		}
		actualStatusHash := ti.GetStatusHash()
		if originalStatusHash == actualStatusHash {
			logs.Debug("WatcherController: Ti status hash equal, no need update ti: " + tiName)
		} else {
			_, err = operator.Cl.TriggerInstance().UpdateStatus(ctx, triggerInstance.Namespace, ti, metav1.UpdateOptions{})
			if err != nil {
				logs.Error("WatcherController: Save trigger status failed: " + err.Error())
			}
		}
	}

	if len(mustDeleted) > 0 {
		for _, tiName := range mustDeleted {
			delete(w.watchObjects, tiName)
			logs.Debug("WatcherController: Clear trigger instance: " + tiName)
		}
	}
}

func (w *WatcherController) Start() {
	logs.Debugf("WatcherController: Start")
	go w.init()
}

func (w *WatcherController) Stop() {
	logs.Debugf("WatcherController: Stop")
	w.scheduler.Remove(w.job)
}

func (w *WatcherController) UpdateSyncPeriod(message bell.Message) {
	w.ReSyncPeriod = message.Value.(uint64)
	logs.Debugf("WatcherController: UpdateSyncPeriod %d", w.ReSyncPeriod)
}

func (w *WatcherController) AddNewObject(message bell.Message) {
	w.rwMutex.Lock()
	tiObject := message.Value.(v1alpha1.TriggerInstance)
	w.watchObjects[tiObject.GetNamespacedName()] = tiObject
	logs.Debug(fmt.Sprintf("WatcherController: addNewObject %s", tiObject.GetNamespacedName()))
	w.rwMutex.Unlock()
}

func (w *WatcherController) RemoveObject(message bell.Message) {
	w.rwMutex.Lock()
	tiObject := message.Value.(v1alpha1.TriggerInstance)
	delete(w.watchObjects, tiObject.GetNamespacedName())
	logs.Debug(fmt.Sprintf("WatcherController: RemoveObject %s", tiObject.GetNamespacedName()))
	w.rwMutex.Unlock()
}
