/*
Copyright 2024 KubeAGI.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

	http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package evaluation

import (
	"context"
	"fmt"
	"path/filepath"
	"strings"

	batchv1 "k8s.io/api/batch/v1"
	v1 "k8s.io/api/core/v1"
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
	"k8s.io/apimachinery/pkg/types"
	"k8s.io/utils/env"
	"k8s.io/utils/pointer"
	"sigs.k8s.io/controller-runtime/pkg/client"

	"github.com/kubeagi/arcadia/api/base/v1alpha1"
	evav1alpha1 "github.com/kubeagi/arcadia/api/evaluation/v1alpha1"
	"github.com/kubeagi/arcadia/pkg/config"
	"github.com/kubeagi/arcadia/pkg/llms"
	"github.com/kubeagi/arcadia/pkg/utils"
)

const (
	defaultPVCMountPath = "/data/evaluations"
	defaultTestRagFile  = "ragas.csv"
	defaultMCImage      = "kubeagi/minio-mc:RELEASE.2023-01-28T20-29-38Z"
	defaultEvalImage    = "kubeagi/arcadia-eval:v0.2.0"

	// The clusterrolebinding required for the rag evaluation process is ragas-eval-clusterrolebinding by default,
	// and can be changed via environment variable RAG_EVAL_CLUSTERROLEBINDING.
	RAGClusterRoleBindingEnv = "RAG_EVAL_CLUSTERROLEBINDING"
	RAGJobClusterRoleBinding = "ragas-eval-clusterrolebinding"

	// The serviceaccount used by the job during rag evaluation, which is ragas-eval-sa by default,
	// can be changed via the environment variable RAG_EVAL_SERVICEACCOUNT.
	RAGServiceAccountEnv = "RAG_EVAL_SERVICEACCOUNT"
	RAGJobServiceAccount = "ragas-eval-sa"
)

func PhaseJobName(instance *evav1alpha1.RAG, phase evav1alpha1.RAGPhase) string {
	return fmt.Sprintf("%s-phase-%s", instance.Name, phase)
}

func DownloadJob(instance *evav1alpha1.RAG) (*batchv1.Job, error) {
	job := &batchv1.Job{
		ObjectMeta: metav1.ObjectMeta{
			Namespace: instance.Namespace,
			Name:      PhaseJobName(instance, evav1alpha1.DownloadFilesPhase),
			Labels: map[string]string{
				evav1alpha1.EvaluationJobLabels: instance.Name,
			},
		},
		Spec: batchv1.JobSpec{
			Template: v1.PodTemplateSpec{
				Spec: v1.PodSpec{
					RestartPolicy:      v1.RestartPolicyNever,
					ServiceAccountName: instance.Spec.ServiceAccountName,
					Containers: []v1.Container{
						{
							Name:  "download-dataset-files",
							Image: defaultEvalImage,
							Command: []string{
								"arctl",
							},
							Args: []string{
								fmt.Sprintf("-n=%s", instance.Namespace),
								"eval", "download",
								fmt.Sprintf("--rag=%s", instance.Name),
								fmt.Sprintf("--application=%s", instance.Spec.Application.Name),
								fmt.Sprintf("--dir=%s", defaultPVCMountPath),
								fmt.Sprintf("--system-conf-namespace=%s", utils.GetCurrentNamespace()),
								fmt.Sprintf("--system-conf-name=%s", env.GetString(config.EnvConfigKey, config.EnvConfigDefaultValue)),
							},
							VolumeMounts: []v1.VolumeMount{
								{
									Name:      "data",
									MountPath: defaultPVCMountPath,
								},
							},
						},
					},
					Volumes: []v1.Volume{
						{
							Name: "data",
							VolumeSource: v1.VolumeSource{
								PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
									ClaimName: instance.Name,
									ReadOnly:  false,
								},
							},
						},
					},
				},
			},
			BackoffLimit: pointer.Int32(1),
			Completions:  pointer.Int32(1),
			Parallelism:  pointer.Int32(1),
			Suspend:      &instance.Spec.Suspend,
		},
	}
	return job, nil
}

func GenTestDataJob(instance *evav1alpha1.RAG) (*batchv1.Job, error) {
	job := &batchv1.Job{
		ObjectMeta: metav1.ObjectMeta{
			Namespace: instance.Namespace,
			Name:      PhaseJobName(instance, evav1alpha1.GenerateTestFilesPhase),
			Labels: map[string]string{
				evav1alpha1.EvaluationJobLabels: instance.Name,
			},
		},
		Spec: batchv1.JobSpec{
			Template: v1.PodTemplateSpec{
				Spec: v1.PodSpec{
					RestartPolicy:      v1.RestartPolicyNever,
					ServiceAccountName: instance.Spec.ServiceAccountName,
					Containers: []v1.Container{
						{
							Name:  "gen-test-files",
							Image: defaultEvalImage,
							Command: []string{
								"arctl",
							},
							Args: []string{
								fmt.Sprintf("-n=%s", instance.Namespace),
								"eval", "gen_test_dataset",
								fmt.Sprintf("--application=%s", instance.Spec.Application.Name),
								fmt.Sprintf("--input-dir=%s", defaultPVCMountPath),
								"--output=csv",
								"--merge=true",
								fmt.Sprintf("--merge-file=%s", filepath.Join(defaultPVCMountPath, defaultTestRagFile)),
							},
							VolumeMounts: []v1.VolumeMount{
								{
									Name:      "data",
									MountPath: defaultPVCMountPath,
								},
							},
						},
					},
					Volumes: []v1.Volume{
						{
							Name: "data",
							VolumeSource: v1.VolumeSource{
								PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
									ClaimName: instance.Name,
									ReadOnly:  false,
								},
							},
						},
					},
				},
			},
			BackoffLimit: pointer.Int32(1),
			Completions:  pointer.Int32(1),
			Parallelism:  pointer.Int32(1),
			Suspend:      &instance.Spec.Suspend,
		},
	}
	return job, nil
}

func JudgeJobGenerator(ctx context.Context, c client.Client) func(*evav1alpha1.RAG) (*batchv1.Job, error) {
	return func(instance *evav1alpha1.RAG) (*batchv1.Job, error) {
		var (
			apiBase, model, apiKey string
			err                    error
		)
		llm := v1alpha1.LLM{}
		ns := instance.Namespace
		if instance.Spec.JudgeLLM.Namespace != nil {
			ns = *instance.Spec.JudgeLLM.Namespace
		}
		if err = c.Get(context.TODO(), types.NamespacedName{Namespace: ns, Name: instance.Spec.JudgeLLM.Name}, &llm); err != nil {
			return nil, err
		}

		apiBase = llm.Get3rdPartyLLMBaseURL()
		apiKey, err = llm.AuthAPIKey(ctx, c)
		if err != nil {
			return nil, err
		}

		switch llm.Spec.Type {
		case llms.OpenAI:
			model = "gtp4"
		case llms.ZhiPuAI:
			model = "glm-4"
		case llms.Gemini:
			model = "gemini-pro"
		default:
			return nil, fmt.Errorf("not support type %s", llm.Spec.Type)
		}
		if r := llm.Get3rdPartyModels(); len(r) > 0 {
			model = r[0]
		}

		metrics := make([]string, 0)
		for _, m := range instance.Spec.Metrics {
			metrics = append(metrics, string(m.Kind))
		}
		systemEmbedder, _, err := config.GetSystemEmbeddingSuite(ctx)
		if err != nil {
			return nil, err
		}
		embedderModelList := systemEmbedder.GetModelList()
		if len(embedderModelList) == 0 {
			return nil, fmt.Errorf("embedder don't have a model")
		}
		job := &batchv1.Job{
			ObjectMeta: metav1.ObjectMeta{
				Namespace: instance.Namespace,
				Name:      PhaseJobName(instance, evav1alpha1.JudgeLLMPhase),
				Labels: map[string]string{
					evav1alpha1.EvaluationJobLabels: instance.Name,
				},
			},

			Spec: batchv1.JobSpec{
				Template: v1.PodTemplateSpec{
					Spec: v1.PodSpec{
						RestartPolicy:      v1.RestartPolicyNever,
						ServiceAccountName: instance.Spec.ServiceAccountName,
						Containers: []v1.Container{
							{
								Name:       "judge-llm",
								Image:      defaultEvalImage,
								WorkingDir: defaultPVCMountPath,
								Command: []string{
									"kubeagi-cli",
								},
								Args: []string{
									"evaluate",
									fmt.Sprintf("--apibase=%s", apiBase),
									fmt.Sprintf("--embedding-apibase=%s", apiBase),
									fmt.Sprintf("--llm-model=%s", model),
									fmt.Sprintf("--apikey=%s", apiKey),
									fmt.Sprintf("--embedding-apikey=%s", apiKey),
									fmt.Sprintf("--dataset=%s", filepath.Join(defaultPVCMountPath, defaultTestRagFile)),
									fmt.Sprintf("--metrics=%s", strings.Join(metrics, ",")),
									fmt.Sprintf("--embedding-model=%s", embedderModelList[0]),
								},
								VolumeMounts: []v1.VolumeMount{
									{
										Name:      "data",
										MountPath: defaultPVCMountPath,
									},
								},
							},
						},
						Volumes: []v1.Volume{
							{
								Name: "data",
								VolumeSource: v1.VolumeSource{
									PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
										ClaimName: instance.Name,
										ReadOnly:  false,
									},
								},
							},
						},
					},
				},
				BackoffLimit: pointer.Int32(1),
				Completions:  pointer.Int32(1),
				Parallelism:  pointer.Int32(1),
				Suspend:      &instance.Spec.Suspend,
			},
		}
		return job, nil
	}
}

func UploadJobGenerator(ctx context.Context, client client.Client) func(*evav1alpha1.RAG) (*batchv1.Job, error) {
	return func(instance *evav1alpha1.RAG) (*batchv1.Job, error) {
		datasource, err := config.GetSystemDatasource(ctx)
		if err != nil {
			return nil, err
		}
		url := datasource.Spec.Endpoint.URL
		if datasource.Spec.Endpoint.Insecure {
			url = "http://" + url
		} else {
			url = "https://" + url
		}
		ns := datasource.Namespace
		if datasource.Spec.Endpoint.AuthSecret.Namespace != nil {
			ns = *datasource.Spec.Endpoint.AuthSecret.Namespace
		}
		data, err := datasource.Spec.Endpoint.AuthData(ctx, ns, client)
		if err != nil {
			return nil, err
		}

		accessKeyID := string(data["rootUser"])
		secretAccessKey := string(data["rootPassword"])

		job := &batchv1.Job{
			ObjectMeta: metav1.ObjectMeta{
				Namespace: instance.Namespace,
				Name:      PhaseJobName(instance, evav1alpha1.UploadFilesPhase),
				Labels: map[string]string{
					evav1alpha1.EvaluationJobLabels: instance.Name,
				},
			},
			Spec: batchv1.JobSpec{
				Template: v1.PodTemplateSpec{
					Spec: v1.PodSpec{
						RestartPolicy:      v1.RestartPolicyNever,
						ServiceAccountName: instance.Spec.ServiceAccountName,
						Containers: []v1.Container{
							{
								Name:  "upload-result",
								Image: defaultMCImage,
								Command: []string{
									"/bin/bash",
									"-c",
									fmt.Sprintf(`echo "upload result"
mc alias set oss $MINIO_ENDPOINT $MINIO_ACCESS_KEY $MINIO_SECRET_KEY --insecure
mc --insecure cp -r %s/ oss/%s/evals/%s/%s`, defaultPVCMountPath, instance.Namespace, instance.Spec.Application.Name, instance.Name),
								},
								VolumeMounts: []v1.VolumeMount{
									{
										Name:      "data",
										MountPath: defaultPVCMountPath,
									},
								},
								Env: []v1.EnvVar{
									{Name: "MINIO_ENDPOINT", Value: url},
									{Name: "MINIO_ACCESS_KEY", Value: accessKeyID},
									{Name: "MINIO_SECRET_KEY", Value: secretAccessKey},
								},
							},
						},
						Volumes: []v1.Volume{
							{
								Name: "data",
								VolumeSource: v1.VolumeSource{
									PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
										ClaimName: instance.Name,
										ReadOnly:  false,
									},
								},
							},
						},
					},
				},
				BackoffLimit: pointer.Int32(1),
				Completions:  pointer.Int32(1),
				Parallelism:  pointer.Int32(1),
				Suspend:      &instance.Spec.Suspend,
			},
		}
		return job, nil
	}
}
