// Copyright (c) 2024 Huawei Technologies Co., Ltd.
// openFuyao is licensed under Mulan PSL v2.
// You can use this software according to the terms and conditions of the Mulan PSL v2.
// You may obtain a copy of Mulan PSL v2 at:
//          http://license.coscl.org.cn/MulanPSL2
// THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
// EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
// MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
// See the Mulan PSL v2 for more details.

package controller

import (
	"context"
	"fmt"
	"github.com/agiledragon/gomonkey/v2"
	monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
	"github.com/stretchr/testify/assert"
	appsv1 "k8s.io/api/apps/v1"
	coordinationv1 "k8s.io/api/coordination/v1"
	corev1 "k8s.io/api/core/v1"
	nodev1 "k8s.io/api/node/v1"
	rbacv1 "k8s.io/api/rbac/v1"
	apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
	"k8s.io/apimachinery/pkg/runtime"
	"k8s.io/apimachinery/pkg/runtime/schema"
	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
	"k8s.io/apimachinery/pkg/util/uuid"
	"k8s.io/client-go/kubernetes/scheme"
	clientgoscheme "k8s.io/client-go/kubernetes/scheme"
	"k8s.io/client-go/rest"
	v1 "openfuyao.com/npu-operator/api/v1"
	"reflect"
	ctrl "sigs.k8s.io/controller-runtime"
	"sigs.k8s.io/controller-runtime/pkg/client"
	"sigs.k8s.io/controller-runtime/pkg/client/fake"
	"strings"
	"testing"
	"volcano.sh/apis/pkg/apis/batch/v1alpha1"
	busv1alpha1 "volcano.sh/apis/pkg/apis/bus/v1alpha1"
	flowv1alpha1 "volcano.sh/apis/pkg/apis/flow/v1alpha1"
	nodeinfov1alpha1 "volcano.sh/apis/pkg/apis/nodeinfo/v1alpha1"
	"volcano.sh/apis/pkg/apis/scheduling"
)

func getInstance() *v1.NPUClusterPolicy {
	return &v1.NPUClusterPolicy{
		TypeMeta: metav1.TypeMeta{
			Kind:       "NPUClusterPolicy",
			APIVersion: v1.GroupVersion.String(),
		},
		ObjectMeta: metav1.ObjectMeta{
			Name: "cluster",
		},
		Spec: v1.NPUClusterPolicySpec{
			Driver:       v1.DriverSpec{Managed: true},
			OCIRuntime:   v1.OCIRuntimeSpec{Managed: true},
			VCScheduler:  v1.SchedulerSpec{Managed: true},
			VCController: v1.ControllerSpec{Managed: true},
			DevicePlugin: v1.DevicePluginSpec{Managed: true},
			Trainer:      v1.TrainerSpec{Managed: false},
			NodeD:        v1.NodeDSpec{Managed: true},
			ClusterD:     v1.ClusterDSpec{Managed: true},
			RSController: v1.RSControllerSpec{Managed: true},
			Exporter:     v1.ExporterSpec{Managed: true},
			MindIOTFT:    v1.MindIOTFTSpec{Managed: true},
			MindIOACP:    v1.MindIOACPSpec{Managed: true},
		},
	}
}

func getManagerAndReconciler(
	f func(*fake.ClientBuilder) *fake.ClientBuilder,
	instance *v1.NPUClusterPolicy,
) (ctrl.Manager, NPUClusterPolicyReconciler) {
	scheme := runtime.NewScheme()
	utilruntime.Must(v1.AddToScheme(scheme))
	utilruntime.Must(corev1.AddToScheme(scheme))
	utilruntime.Must(apiextensionsv1.AddToScheme(scheme))
	utilruntime.Must(clientgoscheme.AddToScheme(scheme))
	builder := fake.NewClientBuilder().WithScheme(scheme)
	if f != nil {
		builder = f(builder)
	}
	if instance != nil {
		builder = builder.WithObjects(instance).WithStatusSubresource(instance)
	}
	client := builder.Build()

	mgr, err := ctrl.NewManager(&rest.Config{}, ctrl.Options{
		Scheme: client.Scheme(),
	})
	if err != nil {
		return nil, NPUClusterPolicyReconciler{}
	}
	r := NPUClusterPolicyReconciler{
		Client:   client,
		Scheme:   client.Scheme(),
		instance: instance,
	}
	return mgr, r
}

func TestClone(t *testing.T) {
	obj := &corev1.Namespace{
		ObjectMeta: metav1.ObjectMeta{
			Name: "test",
		},
	}
	res := typedComponentResource[corev1.Namespace, *corev1.Namespace]{
		obj:                             obj,
		componentResourceReconcileHooks: namespaceReconcileHooks{},
	}
	tests := []struct {
		name string
	}{
		{
			name: "ok",
		},
	}
	for _, tt := range tests {
		t.Run(tt.name, func(t *testing.T) {
			re := res.clone()
			if !reflect.DeepEqual(re, res) {
				t.Errorf("clone() = %v, want %v", re, res)
			}
		})
	}
}

func TestGVK(t *testing.T) {
	test := []struct {
		name string
		obj  client.Object
		gvk  schema.GroupVersionKind
	}{
		{
			name: "deployment",
			obj: &appsv1.Deployment{
				ObjectMeta: metav1.ObjectMeta{
					Name: "test",
				},
			},
			gvk: gvkDeployment,
		},
		{
			name: "daemonset",
			obj:  &appsv1.DaemonSet{},
			gvk:  gvkDaemonSet,
		},
	}
	for _, tt := range test {
		t.Run(tt.name, func(t *testing.T) {
			tt.obj.GetObjectKind().SetGroupVersionKind(tt.gvk)
			rc := resourceCreators[tt.gvk](tt.obj)
			got := rc.gvk()
			parts := strings.Split(got, "/")
			gk := parts[0]
			v := parts[1]
			gkparts := strings.SplitN(gk, ".", 2)
			k := gkparts[0]
			g := gkparts[1]
			if g != tt.gvk.Group || k != tt.gvk.Kind || v != tt.gvk.Version {
				t.Errorf("no equal got %v, want %v", got, tt.gvk)
			}
		})
	}
}

func TestResourceReconcile(t *testing.T) {
	tests := []struct {
		name        string
		gvk         schema.GroupVersionKind
		obj         client.Object
		managed     bool
		transforErr error
		inspectErr  error
		want        *v1.ComponentState
		wantErr     error
	}{
		{
			name: "reconcile namespace",
			obj: &corev1.Namespace{
				ObjectMeta: metav1.ObjectMeta{
					Name:      "test",
					Namespace: "default",
				},
			},
			gvk:     gvkNamespace,
			managed: true,
		},
		{
			name: "reconcile daemonset",
			obj: &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{
				Name:      nodedDaemonsetName,
				Namespace: placeholderValue,
			}},
			gvk:     gvkDaemonSet,
			managed: true,
		},
		{
			name: "reconcile driver daemonset",
			obj: &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{
				Name:      driverDaemonSetName,
				Namespace: placeholderValue,
			}},
			gvk:     gvkDaemonSet,
			managed: false,
		},
		{
			name: "reconcile device plugin daemonset",
			obj: &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{
				Name:      devicePluginDaemonSetName,
				Namespace: placeholderValue,
			}},
			gvk:     gvkDaemonSet,
			managed: false,
		},
		{
			name: "reconcile exporter daemonset",
			obj: &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{
				Name:      exporterDaemonsetName,
				Namespace: placeholderValue,
			}},
			gvk:     gvkDaemonSet,
			managed: false,
		},
		{
			name: "reconcile driver daemonset",
			obj: &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{
				Name:      mindIOACPDaemonsetName,
				Namespace: placeholderValue,
			}},
			gvk:     gvkDaemonSet,
			managed: false,
		},
		{
			name: "reconcile runtime daemonset",
			obj: &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{
				Name:      runtimeDaemonSetNamePrefix,
				Namespace: placeholderValue,
			}},
			gvk:     gvkDaemonSet,
			managed: false,
		},
		{
			name: "reconcile controller deployment",
			obj: &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{
				Name:      vcControllerDeploymentName,
				Namespace: placeholderValue,
			},
			},
			gvk:     gvkDeployment,
			managed: false,
		},
		{
			name: "reconcile runtime daemonset",
			obj: &appsv1.DaemonSet{
				ObjectMeta: metav1.ObjectMeta{
					Name:      "ascend-runtime-",
					Namespace: "def",
				},
				Status: appsv1.DaemonSetStatus{
					DesiredNumberScheduled: 1,
					NumberUnavailable:      0,
				},
				Spec: appsv1.DaemonSetSpec{
					Selector: &metav1.LabelSelector{
						MatchLabels: map[string]string{},
					},
					Template: corev1.PodTemplateSpec{
						Spec: corev1.PodSpec{
							NodeSelector: map[string]string{"openfuyao.com/container.runtime": "containerd"},
						},
					},
				},
			},
			gvk:     gvkDaemonSet,
			managed: true,
		},
		{
			name:    "reconcile CRD",
			obj:     &apiextensionsv1.CustomResourceDefinition{},
			gvk:     gvkCRD,
			managed: false,
		},
		{
			name:    "reconcile runtimeclass",
			obj:     &nodev1.RuntimeClass{},
			gvk:     gvkRuntimeClass,
			managed: false,
		},
		{
			name:    "reconcile serviceaccount",
			obj:     &corev1.ServiceAccount{},
			gvk:     gvkServiceAccount,
			managed: false,
		},
		{
			name:    "reconcile clusterrole",
			obj:     &rbacv1.ClusterRole{},
			gvk:     gvkClusterRole,
			managed: false,
		},
		{
			name:    "reconcile clusterrolebinding",
			obj:     &rbacv1.ClusterRoleBinding{},
			gvk:     gvkClusterRoleBinding,
			managed: false,
		},
		{
			name:    "reconcile servicemonitor",
			obj:     &monitoringv1.ServiceMonitor{},
			gvk:     gvkServiceMonitor,
			managed: false,
		},
		{
			name:    "reconcile service",
			obj:     &corev1.Service{},
			gvk:     gvkService,
			managed: false,
		},
		{
			name:    "reconcile lease",
			obj:     &coordinationv1.Lease{},
			gvk:     gvkLease,
			managed: false,
		},
		{
			name:    "reconcile configmap",
			obj:     &corev1.ConfigMap{},
			gvk:     gvkConfigMap,
			managed: false,
		},
		{
			name:    "reconcile volcano.batch.job",
			obj:     &v1alpha1.Job{},
			gvk:     gvkVolcanoBatchJob,
			managed: false,
		},
		{
			name:    "reconcile volcano.bus.command",
			obj:     &busv1alpha1.Command{},
			gvk:     gvkVolcanoBusCommand,
			managed: false,
		},
		{
			name:    "reconcile pog group",
			obj:     &scheduling.PodGroup{},
			gvk:     gvkSchedulingPodgroup,
			managed: false,
		},
		{
			name:    "reconcile queue",
			gvk:     gvkSchedulerQueue,
			obj:     &scheduling.Queue{},
			managed: false,
		},
		{
			gvk:     gvkNodeInfo,
			obj:     &nodeinfov1alpha1.Numatopology{},
			name:    "reconcile numatopology",
			managed: false,
		},
		{
			name:    "reconcile jobtemplate",
			gvk:     gvkJobTemplate,
			obj:     &flowv1alpha1.JobTemplate{},
			managed: false,
		},
		{
			name:    "reconcile jobflow",
			gvk:     gvkJobFlow,
			obj:     &flowv1alpha1.JobFlow{},
			managed: false,
		},
	}
	instance := getInstance()
	_, r := getManagerAndReconciler(nil, instance)
	r.hasNPUNodes = true
	r.hasNFDLabels = true
	for _, tt := range tests {
		t.Run(tt.name, func(t *testing.T) {
			tt.obj.GetObjectKind().SetGroupVersionKind(tt.gvk)
			res := resourceCreators[tt.gvk](tt.obj)
			state, err := res.reconcile(context.Background(), &r, tt.managed)
			assert.Equal(t, tt.want, state)
			assert.Equal(t, tt.wantErr, err)
		})
	}
}

func TestDaemonsetInspect(t *testing.T) {
	test := []struct {
		name    string
		obj     client.Object
		gvk     schema.GroupVersionKind
		want    *v1.ComponentState
		wantErr error
	}{
		{
			name: "deployment",
			obj: &appsv1.Deployment{
				ObjectMeta: metav1.ObjectMeta{
					Name: "test",
				},
				Spec: appsv1.DeploymentSpec{
					Selector: &metav1.LabelSelector{
						MatchLabels: map[string]string{},
					},
				},
			},
			gvk:  gvkDeployment,
			want: &v1.ComponentState{Phase: "pending", Reason: "PodNotRunning", Message: "Pod pod1 ,state "},
		},
		{
			name: "daemonset",
			obj: &appsv1.DaemonSet{
				ObjectMeta: metav1.ObjectMeta{
					Name:      "ok",
					Namespace: "def",
				},
				Status: appsv1.DaemonSetStatus{
					DesiredNumberScheduled: 1,
					NumberUnavailable:      0,
				},
				Spec: appsv1.DaemonSetSpec{
					Selector: &metav1.LabelSelector{
						MatchLabels: map[string]string{},
					},
				},
			},
			gvk:  gvkDaemonSet,
			want: &v1.ComponentState{Phase: "pending", Reason: "PodNotRunning", Message: "Pod pod1 ,state "},
		},
	}
	for _, tt := range test {
		t.Run(tt.name, func(t *testing.T) {
			tt.obj.GetObjectKind().SetGroupVersionKind(tt.gvk)
			pod1 := &corev1.Pod{
				ObjectMeta: metav1.ObjectMeta{
					Name: "pod1",
				},
			}
			scheme := runtime.NewScheme()
			_ = corev1.AddToScheme(scheme)
			_ = clientgoscheme.AddToScheme(scheme)
			fakeClient := fake.NewClientBuilder().
				WithScheme(scheme).
				WithObjects(pod1, tt.obj).
				Build()
			r := mockReconciler()
			r.Client = fakeClient
			a := gomonkey.ApplyMethod(r, "Get", func(_ *NPUClusterPolicyReconciler, _ context.Context, _ client.ObjectKey, _ client.Object, _ ...client.GetOption) error {
				return nil
			})
			b := gomonkey.ApplyMethod(r, "List", func(_ *NPUClusterPolicyReconciler, _ context.Context, _ client.ObjectList, _ ...client.ListOption) error {
				return nil
			})
			defer a.Reset()
			defer b.Reset()
			switch obj := tt.obj.(type) {
			case *appsv1.DaemonSet:
				h := &daemonSetReconcileHooks{}
				state, err := h.inspect(context.Background(), r, obj)
				assert.Equal(t, tt.want, state)
				assert.Equal(t, tt.wantErr, err)
			case *appsv1.Deployment:
				h := &deploymentReconcileHooks{}
				state, err := h.inspect(context.Background(), r, obj)
				assert.Equal(t, tt.want, state)
				assert.Equal(t, tt.wantErr, err)
			case *corev1.Namespace:
				h := &namespaceReconcileHooks{}
				state, err := h.inspect(context.Background(), r, obj)
				assert.Equal(t, tt.want, state)
				assert.Equal(t, tt.wantErr, err)
			default:
				t.Fatalf("unsupported object type: %T", tt.obj)
			}
		})
	}
}

func TestDelete(t *testing.T) {
	_ = corev1.AddToScheme(scheme.Scheme)

	owner := &v1.NPUClusterPolicy{
		ObjectMeta: metav1.ObjectMeta{
			Name: "cluster",
			UID:  uuid.NewUUID(),
		},
		TypeMeta: metav1.TypeMeta{
			Kind: "NPU ClusterPolicy",
		},
	}

	ctx := context.TODO()

	tests := []struct {
		name       string
		obj        *corev1.Namespace
		existing   client.Object
		ownerRef   bool
		shouldPass bool
	}{
		{
			name: "OwnerReference matches",
			obj: &corev1.Namespace{
				ObjectMeta: metav1.ObjectMeta{Name: "test"},
			},
			existing: &corev1.Namespace{
				ObjectMeta: metav1.ObjectMeta{
					Name: "test",
					OwnerReferences: []metav1.OwnerReference{
						{
							Kind: "NPU ClusterPolicy",
							Name: "cluster",
							UID:  owner.UID,
						},
					},
				},
			},
			shouldPass: true,
		},
		{
			name: "OwnerReference does not match",
			obj: &corev1.Namespace{
				ObjectMeta: metav1.ObjectMeta{Name: "test"},
			},
			existing: &corev1.Namespace{
				ObjectMeta: metav1.ObjectMeta{
					Name: "test",
					OwnerReferences: []metav1.OwnerReference{
						{
							Kind: "OtherKind",
							Name: "other",
							UID:  "xxxx",
						},
					},
				},
			},
			shouldPass: false,
		},
		{
			name: "No OwnerReference",
			obj: &corev1.Namespace{
				ObjectMeta: metav1.ObjectMeta{Name: "test"},
			},
			existing: &corev1.Namespace{
				ObjectMeta: metav1.ObjectMeta{Name: "test"},
			},
			shouldPass: false,
		},
		{
			name: "NotFound error",
			obj: &corev1.Namespace{
				ObjectMeta: metav1.ObjectMeta{Name: "not-exist"},
			},
			existing:   nil,
			shouldPass: false,
		},
	}

	for _, tt := range tests {
		t.Run(tt.name, func(t *testing.T) {
			builder := fake.NewClientBuilder().WithScheme(scheme.Scheme)
			if tt.existing != nil {
				builder = builder.WithObjects(tt.existing)
			}
			cl := builder.Build()

			r := &NPUClusterPolicyReconciler{
				Client:    cl,
				Scheme:    scheme.Scheme,
				namespace: "default",
				instance:  owner,
			}
			hook := defaultResourceReconcileHooks[*corev1.Namespace]{}
			obj, ok := hook.canDelete(ctx, r, tt.obj)

			if tt.shouldPass {
				assert.True(t, ok)
				assert.NotNil(t, obj)
			} else {
				assert.False(t, ok)
				assert.Nil(t, obj)
			}
		})
	}
}

func Test_runtimeDaemonSetTransformer(t *testing.T) {
	tests := []struct {
		name       string
		dsName     string
		runtimes   map[string]struct{}
		expectSkip bool
	}{
		{
			name:     "runtime exists",
			dsName:   "ascend-runtime-containerd",
			runtimes: map[string]struct{}{"containerd": {}},
		},
		{
			name:       "runtime not found",
			dsName:     "ascend-runtime-docker",
			runtimes:   map[string]struct{}{"containerd": {}},
			expectSkip: true,
		},
	}

	for _, tt := range tests {
		t.Run(tt.name, func(t *testing.T) {
			ds := &appsv1.DaemonSet{
				ObjectMeta: metav1.ObjectMeta{
					Name: tt.dsName,
				},
			}

			r := &NPUClusterPolicyReconciler{
				runtimes: tt.runtimes,
			}

			err := runtimeDaemonSetTransformer(context.TODO(), r, ds)

			if tt.expectSkip {
				assert.Equal(t, errSkipReconciliation, err)
			} else {
				assert.NoError(t, err)
				runtime := strings.TrimPrefix(tt.dsName, runtimeDaemonSetNamePrefix)
				assert.Equal(t, runtime, ds.Spec.Template.Spec.NodeSelector[runtimeLabelKey])
				assert.Equal(t, "", ds.Spec.Template.Annotations[runtimeEndpointAnnotationKey])
			}
		})
	}
}

func Test_runtimeDaemonSetInspector(t *testing.T) {
	const (
		nodeName   = "npu-node"
		podName    = "npu-pod"
		endpoint   = "unix:///var/run/containerd.sock"
		ns         = "test-ns"
		labelKey   = "app"
		labelValue = "npu"
	)

	selector := map[string]string{labelKey: labelValue}

	tests := []struct {
		name          string
		pods          []corev1.Pod
		nodeEndpoints map[string]string
		expectUpdated bool
		expectErr     bool
	}{
		{
			name: "pod matches node endpoint and gets annotation",
			pods: []corev1.Pod{
				{
					ObjectMeta: metav1.ObjectMeta{
						Name:      podName,
						Namespace: ns,
						Labels:    selector,
					},
					Spec: corev1.PodSpec{
						NodeName: nodeName,
					},
				},
			},
			nodeEndpoints: map[string]string{
				nodeName: endpoint,
			},
			expectUpdated: true,
		},
		{
			name: "pod node not in endpoint map, no update",
			pods: []corev1.Pod{
				{
					ObjectMeta: metav1.ObjectMeta{
						Name:      podName,
						Namespace: ns,
						Labels:    selector,
					},
					Spec: corev1.PodSpec{
						NodeName: "unknown-node",
					},
				},
			},
			nodeEndpoints: map[string]string{
				nodeName: endpoint,
			},
			expectUpdated: false,
		},
	}

	for _, tt := range tests {
		t.Run(tt.name, func(t *testing.T) {
			_ = corev1.AddToScheme(scheme.Scheme)

			podObjs := make([]client.Object, len(tt.pods))
			for i := range tt.pods {
				podObjs[i] = &tt.pods[i]
			}

			cl := fake.NewClientBuilder().
				WithScheme(scheme.Scheme).
				WithObjects(podObjs...).
				Build()

			r := &NPUClusterPolicyReconciler{
				Client:               cl,
				Scheme:               scheme.Scheme,
				nodeRuntimeEndpoints: tt.nodeEndpoints,
				namespace:            ns,
			}

			ds := &appsv1.DaemonSet{
				Spec: appsv1.DaemonSetSpec{
					Selector: &metav1.LabelSelector{
						MatchLabels: selector,
					},
				},
			}

			err := runtimeDaemonSetInspector(context.TODO(), r, ds)
			if tt.expectErr {
				assert.Error(t, err)
			} else {
				assert.NoError(t, err)
			}

			if tt.expectUpdated {
				var updatedPod corev1.Pod
				err := cl.Get(context.TODO(), client.ObjectKey{Namespace: ns, Name: podName}, &updatedPod)
				assert.NoError(t, err)
				assert.Equal(t, endpoint, updatedPod.Annotations[runtimeEndpointAnnotationKey])
			}
		})
	}
}

func mockLog(*v1.NPUClusterPolicySpec) *v1.LogRotate {
	return &v1.LogRotate{
		LogLevel: v1.LogInfo,
		LogFile:  "/var/log/npu.log",
		Rotate:   5,
		MaxAge:   7,
		Compress: true,
	}
}

func Test_LogRotateTransformer_DaemonSet(t *testing.T) {
	ds := &appsv1.DaemonSet{
		ObjectMeta: metav1.ObjectMeta{
			Name: "test",
		},
		Spec: appsv1.DaemonSetSpec{
			Template: corev1.PodTemplateSpec{
				Spec: corev1.PodSpec{
					Containers: []corev1.Container{
						{
							Name: "npu",
							Env: []corev1.EnvVar{
								{Name: logRotateLogLevelEnvName, Value: "old"},
								{Name: logRotateLogFileEnvName, Value: "old"},
								{Name: logRotateRotateEnvName, Value: "old"},
								{Name: logRotateMaxAgeEnvName, Value: "old"},
								{Name: logRotateCompressEnvName, Value: "old"},
							},
						},
					},
				},
			},
		},
	}

	r := mockReconciler()
	r.instance = getInstance()
	transformer := newWorkloadLogRotateTransformer[appsv1.DaemonSet](mockLog)
	err := transformer(context.TODO(), r, ds)
	assert.NoError(t, err)
	envMap := map[string]string{}
	for _, env := range ds.Spec.Template.Spec.Containers[0].Env {
		envMap[env.Name] = env.Value
	}
	assert.Equal(t, fmt.Sprintf("%d", v1.LogInfoValue), envMap[logRotateLogLevelEnvName])
	assert.Equal(t, "/var/log/npu.log", envMap[logRotateLogFileEnvName])
	assert.Equal(t, "5", envMap[logRotateRotateEnvName])
	assert.Equal(t, "7", envMap[logRotateMaxAgeEnvName])
	assert.Equal(t, "true", envMap[logRotateCompressEnvName])
}

func Test_configMapReconcileHooks_transform_volcanoConfig(t *testing.T) {
	const (
		configMapName          = "volcano-scheduler-configmap"
		namespace              = "test-ns"
		nodeName               = "node01"
		arch                   = "amd64"
		volcanoVersion         = "7.1.RC1"
		expectedLine           = "  - name: volcano-npu_7.1.RC1_linux-x86_64"
		initialSchedulerConfig = `actions: "enqueue, allocate"
tiers:
- plugins:
  - name: volcano-npu_OLD_linux-aarch64`
	)
	configMap := &corev1.ConfigMap{
		ObjectMeta: metav1.ObjectMeta{
			Name:      configMapName,
			Namespace: namespace,
		},
		Data: map[string]string{},
	}
	node := &corev1.Node{
		ObjectMeta: metav1.ObjectMeta{Name: nodeName},
		Status: corev1.NodeStatus{
			NodeInfo: corev1.NodeSystemInfo{Architecture: arch},
		},
	}
	s := scheme.Scheme
	_ = corev1.AddToScheme(s)
	_ = clientgoscheme.AddToScheme(s)
	_ = v1.AddToScheme(s)
	cl := fake.NewClientBuilder().WithScheme(s).WithObjects(configMap, node).Build()
	r := &NPUClusterPolicyReconciler{
		Client:    cl,
		Scheme:    s,
		namespace: namespace,
		instance: &v1.NPUClusterPolicy{
			Spec: v1.NPUClusterPolicySpec{
				VCScheduler: v1.SchedulerSpec{
					SchedulerConfigMap: initialSchedulerConfig,
				},
			},
		},
	}
	t.Setenv("OPERATOR_NODE_NAME", nodeName)
	patchVolcanoVer := gomonkey.ApplyFuncReturn(ObtainVolcanoVersion, volcanoVersion, nil)
	defer patchVolcanoVer.Reset()
	h := configMapReconcileHooks{}
	err := h.transform(context.TODO(), r, configMap)
	assert.NoError(t, err)
	result := configMap.Data["volcano-scheduler.conf"]
	t.Logf("configmap:\n%s", result)
	assert.NotEmpty(t, result, "ConfigMap should not be empty")
	assert.Contains(t, result, expectedLine)
	assert.NotContains(t, result, "volcano-npu_OLD_linux-aarch64")
}
