/*
Copyright 2024.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package controller

import (
	"context"
	"log"

	v1 "k8s.io/api/apps/v1"
	core "k8s.io/api/core/v1"
	apierrors "k8s.io/apimachinery/pkg/api/errors"
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
	"k8s.io/apimachinery/pkg/fields"
	"k8s.io/apimachinery/pkg/runtime"
	ctrl "sigs.k8s.io/controller-runtime"
	"sigs.k8s.io/controller-runtime/pkg/client"

	batchv1 "myoperator.testbuilder.io/project/api/v1"
)

// TestControllerReconciler reconciles a TestController object
type TestControllerReconciler struct {
	client.Client
	Scheme *runtime.Scheme
}

// +kubebuilder:rbac:groups=batch.myoperator.testbuilder.io,resources=testcontrollers,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=batch.myoperator.testbuilder.io,resources=testcontrollers/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=batch.myoperator.testbuilder.io,resources=testcontrollers/finalizers,verbs=update

// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
// TODO(user): Modify the Reconcile function to compare the state specified by
// the TestController object against the actual cluster state, and then
// perform operations to make the cluster state reflect the state specified by
// the user.
//
// For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.18.4/pkg/reconcile
func (r *TestControllerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {

	// TODO(user): your logic here
	job := batchv1.TestController{}
	if err := r.Get(ctx, req.NamespacedName, &job); err != nil {
		log.Print("err to get TestController resource")
		return ctrl.Result{}, client.IgnoreNotFound(err)
	}
	var deployments v1.DeploymentList
	if err := r.List(ctx, &deployments, client.InNamespace(job.Namespace), client.MatchingFieldsSelector{
		fields.OneTermEqualSelector(serviceOwnerKey, job.Name),
	}); err != nil {
		return ctrl.Result{}, nil
	}
	delete := 0
	for _, det := range deployments.Items {
		if det.Name == job.Spec.DeploymentName {
			continue
		}
		if err := r.Client.Delete(ctx, &det); err != nil {
			log.Print("somthing wrong with delete deployment")
			return ctrl.Result{}, err
		}
		delete++
	}
	log.Print("delete " + string(delete) + "services")
	deployment := v1.Deployment{}
	err := r.Client.Get(ctx, client.ObjectKey{Namespace: job.Namespace, Name: job.Name}, &deployment)
	if apierrors.IsNotFound(err) {
		log.Print("could not found existing Deployment resource ")
		deployment = *buildDeployment(job) //init a deployment
		if err = r.Client.Create(ctx, &deployment); err != nil {
			log.Print("someting wrong with create deployment")
			return ctrl.Result{}, nil
		}
		log.Print("success to create deployment for creating job")
		return ctrl.Result{}, nil
	}
	if err != nil {
		log.Print("fail to get deployment for creating job")
		return ctrl.Result{}, nil
	}
	log.Print("desire deployment is already existing ,checking out the replicaset")

	expectReplicas := int32(1)
	if job.Spec.Replicas != nil {
		expectReplicas = *job.Spec.Replicas
	}
	//如果现在的状态不同于在spec字段中定义的数量，调节
	//注意此时的status字段未被初始化，需要使用deployment代替
	if deployment.Spec.Replicas != &expectReplicas {
		log.Print("begin to reconcile the repliacs to desire status")
		deployment.Spec.Replicas = &expectReplicas
		if err := r.Client.Update(ctx, &deployment); err != nil {
			//调节失败
			log.Print("fail to reconcile replicas to spec")
			return ctrl.Result{}, err
		}
		log.Print("successful to reconcile field")
	}

	job.Status.ReadyReplicas = deployment.Status.ReadyReplicas
	if err := r.Client.Status().Update(ctx, &job); err != nil {
		log.Print("fail to update job status")
		return ctrl.Result{}, err
	}
	return ctrl.Result{}, nil
}

var (
	serviceOwnerKey = "metadata.controller"
	serverImageName = "myServer"
	clientImageName = "myClient"
)

func buildDeployment(job batchv1.TestController) *v1.Deployment {
	deployment := v1.Deployment{
		ObjectMeta: metav1.ObjectMeta{
			Name:            job.Name,
			Namespace:       job.Namespace,
			OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(&job, batchv1.GroupVersion.WithKind("TestController"))},
		},
		Spec: v1.DeploymentSpec{
			Replicas: job.Spec.Replicas,
			Selector: &metav1.LabelSelector{
				MatchLabels: map[string]string{
					//随便写，只是要求和下面template一样即可
					"deploymentName": job.Spec.DeploymentName,
				},
			},
			Template: core.PodTemplateSpec{
				ObjectMeta: metav1.ObjectMeta{
					Labels: map[string]string{
						"deploymentName": job.Spec.DeploymentName,
					},
				},
				Spec: core.PodSpec{
					Containers: []core.Container{
						{
							Name:  "clientContianer",
							Image: clientImageName,
						},
						{
							Name:  "serverContainer",
							Image: serverImageName,
						},
					},
				},
			},
		},
	}
	return &deployment
}

// SetupWithManager sets up the controller with the Manager.
func (r *TestControllerReconciler) SetupWithManager(mgr ctrl.Manager) error {
	return ctrl.NewControllerManagedBy(mgr).
		For(&batchv1.TestController{}).
		Complete(r)
}
