// Copyright (c) 2024 Huawei Technologies Co., Ltd.
// openFuyao is licensed under Mulan PSL v2.
// You can use this software according to the terms and conditions of the Mulan PSL v2.
// You may obtain a copy of Mulan PSL v2 at:
//          http://license.coscl.org.cn/MulanPSL2
// THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
// EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
// MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
// See the Mulan PSL v2 for more details.

package controller

import (
	"context"
	"encoding/hex"
	"errors"
	"fmt"
	"hash/fnv"
	"maps"
	"os"
	"reflect"
	"strings"

	"volcano.sh/apis/pkg/apis/batch/v1alpha1"
	busv1alpha1 "volcano.sh/apis/pkg/apis/bus/v1alpha1"
	flowv1alpha1 "volcano.sh/apis/pkg/apis/flow/v1alpha1"
	nodeinfov1alpha1 "volcano.sh/apis/pkg/apis/nodeinfo/v1alpha1"
	"volcano.sh/apis/pkg/apis/scheduling"

	"github.com/davecgh/go-spew/spew"
	monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
	appsv1 "k8s.io/api/apps/v1"
	coordinationv1 "k8s.io/api/coordination/v1"
	corev1 "k8s.io/api/core/v1"
	nodev1 "k8s.io/api/node/v1"
	rbacv1 "k8s.io/api/rbac/v1"
	apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
	apierrors "k8s.io/apimachinery/pkg/api/errors"
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
	"k8s.io/apimachinery/pkg/runtime/schema"
	v1 "openfuyao.com/npu-operator/api/v1"
	"sigs.k8s.io/controller-runtime/pkg/client"
	"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
	"sigs.k8s.io/controller-runtime/pkg/log"
)

const (
	hashAnnotationKey = "openfuyao.com/last-applied-hash"
	placeholderValue  = "FILLED_BY_OPERATOR"

	operatorName                  = "npu-operator"
	runtimeDaemonSetNamePrefix    = "ascend-runtime-"
	driverDaemonSetName           = "npu-driver"
	devicePluginDaemonSetName     = "ascend-device-plugin"
	trainerDeploymentName         = "ascend-operator-manager"
	clusterdDeploymentName        = "clusterd"
	nodedDaemonsetName            = "noded"
	exporterDaemonsetName         = "npu-exporter"
	rsControllerDeploymentName    = "resilience-controller"
	vcControllerDeploymentName    = "volcano-controllers"
	vcSchedulerDeploymentName     = "volcano-scheduler"
	mindIOACPDaemonsetName        = "mindio-acp"
	mindIOTFTDaemonsetName        = "mindio-tft"
	volcanoSchedulerConfigMapName = "volcano-scheduler-configmap"

	logRotateLogLevelEnvName = "NPU_OPERATOR_LOG_LEVEL"
	logRotateLogFileEnvName  = "NPU_OPERATOR_LOG_FILE"
	logRotateRotateEnvName   = "NPU_OPERATOR_ROTATE"
	logRotateMaxAgeEnvName   = "NPU_OPERATOR_MAX_AGE"
	logRotateCompressEnvName = "NPU_OPERATOR_COMPRESS"

	operatorNodeEnvName = "OPERATOR_NODE_NAME"
)

var (
	gvkNamespace = schema.GroupVersionKind{
		Version: "v1",
		Kind:    "Namespace",
	}
	gvkRuntimeClass = schema.GroupVersionKind{
		Group:   "node.k8s.io",
		Version: "v1",
		Kind:    "RuntimeClass",
	}
	gvkDaemonSet = schema.GroupVersionKind{
		Group:   "apps",
		Version: "v1",
		Kind:    "DaemonSet",
	}
	gvkDeployment = schema.GroupVersionKind{
		Group:   "apps",
		Version: "v1",
		Kind:    "Deployment",
	}
	gvkServiceAccount = schema.GroupVersionKind{
		Version: "v1",
		Kind:    "ServiceAccount",
	}
	gvkClusterRole = schema.GroupVersionKind{
		Group:   "rbac.authorization.k8s.io",
		Version: "v1",
		Kind:    "ClusterRole",
	}
	gvkClusterRoleBinding = schema.GroupVersionKind{
		Group:   "rbac.authorization.k8s.io",
		Version: "v1",
		Kind:    "ClusterRoleBinding",
	}
	gvkServiceMonitor = schema.GroupVersionKind{
		Group:   "monitoring.coreos.com",
		Version: "v1",
		Kind:    "ServiceMonitor",
	}
	gvkService = schema.GroupVersionKind{
		Version: "v1",
		Kind:    "Service",
	}
	gvkLease = schema.GroupVersionKind{
		Group:   "coordination.k8s.io",
		Version: "v1",
		Kind:    "Lease",
	}
	gvkVolcanoBatchJob = schema.GroupVersionKind{
		Group:   "batch.volcano.sh",
		Version: "v1alpha1",
		Kind:    "Job",
	}
	gvkVolcanoBusCommand = schema.GroupVersionKind{
		Group:   "bus.volcano.sh",
		Version: "v1alpha1",
		Kind:    "Command",
	}
	gvkSchedulingPodgroup = schema.GroupVersionKind{
		Group:   "scheduling.volcano.sh",
		Version: "v1beta1",
		Kind:    "PodGroup",
	}
	gvkSchedulerQueue = schema.GroupVersionKind{
		Group:   "scheduling.volcano.sh",
		Version: "v1beta1",
		Kind:    "Queue",
	}
	gvkNodeInfo = schema.GroupVersionKind{
		Group:   "nodeinfo.volcano.sh",
		Version: "v1alpha1",
		Kind:    "Numatopology",
	}
	gvkJobTemplate = schema.GroupVersionKind{
		Group:   "flow.volcano.sh",
		Version: "v1alpha1",
		Kind:    "JobTemplate",
	}
	gvkJobFlow = schema.GroupVersionKind{
		Group:   "flow.volcano.sh",
		Version: "v1alpha1",
		Kind:    "JobFlow",
	}
	gvkConfigMap = schema.GroupVersionKind{
		Version: "v1",
		Kind:    "ConfigMap",
	}
	gvkCRD = schema.GroupVersionKind{
		Group:   "apiextensions.k8s.io",
		Version: "v1",
		Kind:    "CustomResourceDefinition",
	}

	resourceCreators = map[schema.GroupVersionKind]func(client.Object) componentResource{
		gvkCRD: func(obj client.Object) componentResource {
			return newComponentResource(obj.(*apiextensionsv1.CustomResourceDefinition), CRDReconcileHooks{})
		},
		gvkNamespace: func(obj client.Object) componentResource {
			return newComponentResource(obj.(*corev1.Namespace), namespaceReconcileHooks{})
		},
		gvkRuntimeClass: func(obj client.Object) componentResource {
			return newComponentResource(obj.(*nodev1.RuntimeClass), runtimeClassReconcileHooks{})
		},
		gvkDaemonSet: func(obj client.Object) componentResource {
			return newComponentResource(obj.(*appsv1.DaemonSet), newDaemonSetReconcileHooks(obj))
		},
		gvkDeployment: func(obj client.Object) componentResource {
			return newComponentResource(obj.(*appsv1.Deployment), newDeploymentReconcileHooks(obj))
		},
		gvkServiceAccount: func(obj client.Object) componentResource {
			return newComponentResource(obj.(*corev1.ServiceAccount), serviceAccountReconcileHooks{})
		},
		gvkClusterRole: func(obj client.Object) componentResource {
			return newComponentResource(obj.(*rbacv1.ClusterRole), clusterRoleReconcileHooks{})
		},
		gvkClusterRoleBinding: func(obj client.Object) componentResource {
			return newComponentResource(obj.(*rbacv1.ClusterRoleBinding), clusterRoleBindingReconcileHooks{})
		},
		gvkServiceMonitor: func(obj client.Object) componentResource {
			return newComponentResource(obj.(*monitoringv1.ServiceMonitor), serviceMoniterReconcileHooks{})
		},
		gvkService: func(obj client.Object) componentResource {
			return newComponentResource(obj.(*corev1.Service), serviceReconcileHooks{})
		},
		gvkLease: func(obj client.Object) componentResource {
			return newComponentResource(obj.(*coordinationv1.Lease), leaseReconcileHooks{})
		},
		gvkConfigMap: func(obj client.Object) componentResource {
			return newComponentResource(obj.(*corev1.ConfigMap), configMapReconcileHooks{})
		},
		gvkVolcanoBatchJob: func(obj client.Object) componentResource {
			return newComponentResource(obj.(*v1alpha1.Job), jobReconcileHooks{})
		},
		gvkVolcanoBusCommand: func(obj client.Object) componentResource {
			return newComponentResource(obj.(*busv1alpha1.Command), commandReconcileHooks{})
		},
		gvkSchedulingPodgroup: func(obj client.Object) componentResource {
			return newComponentResource(obj.(*scheduling.PodGroup), podGroupReconcileHooks{})
		},
		gvkSchedulerQueue: func(obj client.Object) componentResource {
			return newComponentResource(obj.(*scheduling.Queue), queueReconcileHooks{})
		},
		gvkNodeInfo: func(obj client.Object) componentResource {
			return newComponentResource(obj.(*nodeinfov1alpha1.Numatopology), numatopologyReconcileHooks{})
		},
		gvkJobTemplate: func(obj client.Object) componentResource {
			return newComponentResource(obj.(*flowv1alpha1.JobTemplate), jobTemplateReconcileHooks{})
		},
		gvkJobFlow: func(obj client.Object) componentResource {
			return newComponentResource(obj.(*flowv1alpha1.JobFlow), jobFlowReconcileHooks{})
		},
	}

	errSkipCreation       = errors.New("skip resource creation")
	errSkipReconciliation = errors.New("skip resource reconciliation")
)

type componentResource interface {
	clone() componentResource
	gvk() string
	reconcile(ctx context.Context, r *NPUClusterPolicyReconciler, isManaged bool) (*v1.ComponentState, error)
}

type componentResourceReconcileHooks[T any] interface {
	canDelete(context.Context, *NPUClusterPolicyReconciler, *T) (client.Object, bool)     // 是否可以删除该资源
	transform(context.Context, *NPUClusterPolicyReconciler, *T) error                     // 为资源对象设置 OwnerReference，将控制器实例标记为这个资源的所有者控制者。
	inspect(context.Context, *NPUClusterPolicyReconciler, *T) (*v1.ComponentState, error) // 检查组件
}

type (
	componentResourceTransformer[T any] func(context.Context, *NPUClusterPolicyReconciler, *T) error
	componentResourceInspector[T any]   func(context.Context, *NPUClusterPolicyReconciler, *T) error
)

type clientObject[T any] interface {
	*T
	client.Object
}

type typedComponentResource[T any, PT clientObject[T]] struct {
	componentResourceReconcileHooks[T]
	obj PT
}

func (res typedComponentResource[T, PT]) clone() componentResource {
	return typedComponentResource[T, PT]{
		componentResourceReconcileHooks: res.componentResourceReconcileHooks,
		obj:                             res.obj.DeepCopyObject().(PT),
	}
}

func (res typedComponentResource[T, PT]) gvk() string {
	gvk := res.obj.GetObjectKind().GroupVersionKind()
	return fmt.Sprintf("%s/%s", gvk.GroupKind(), gvk.Version)
}

func (res typedComponentResource[T, PT]) reconcile(
	ctx context.Context,
	r *NPUClusterPolicyReconciler,
	isManaged bool,
) (*v1.ComponentState, error) {
	gvk := res.gvk()
	logger := log.FromContext(ctx, gvk, res.obj.GetName())
	ctx = log.IntoContext(ctx, logger)
	mutated := true

	if !isManaged {
		// If component is not managed by operator and resource can be deleted
		realObj, ok := res.canDelete(ctx, r, res.obj)
		if ok {
			if err := r.Delete(ctx, realObj); client.IgnoreNotFound(err) != nil {
				return nil, fmt.Errorf("delete %s resource: %w", gvk, err)
			}
		}
		return nil, nil
	}

	if err := res.transform(ctx, r, res.obj); err != nil {
		if errors.Is(err, errSkipReconciliation) {
			return nil, nil
		}
		if !errors.Is(err, errSkipCreation) {
			return nil, fmt.Errorf("transform %s resource: %w", gvk, err)
		}
		mutated = false
	}

	if mutated {
		// Create new object to receive transformed resource
		o, err := r.Scheme.New(res.obj.GetObjectKind().GroupVersionKind()) // 创建一个新的对象实例，返回新的空对象gvk
		if err != nil {
			return nil, fmt.Errorf("new %s object: %w", gvk, err)
		}
		obj := o.(client.Object)
		obj.SetNamespace(res.obj.GetNamespace())
		obj.SetName(res.obj.GetName())
		/*
		   获取资源的注解 anno 和 GVK 信息。
		   如果资源是 DaemonSet 或 Deployment，并且其注解中的哈希值未发生变化（即 obj.GetAnnotations()[hashAnnotationKey] == anno[hashAnnotationKey]）：
		       说明资源未发生变化。
		       返回 errSkipCreation，跳过创建或更新操作。
		*/
		op, err := controllerutil.CreateOrUpdate(ctx, r.Client, obj, func() error {
			// Check if resource needs update
			gvk := obj.GetObjectKind().GroupVersionKind()
			if (gvk == gvkDaemonSet || gvk == gvkDeployment) &&
				obj.GetAnnotations()[hashAnnotationKey] == res.obj.GetAnnotations()[hashAnnotationKey] {
				return errSkipCreation
			}
			res.obj.SetResourceVersion(obj.GetResourceVersion()) // Kubernetes 使用 乐观锁 机制，ResourceVersion 变化时，只有基于最新版本的更新才会成功，否则会失败。
			res.into(obj)                                        // 使得obj的内容和res.obj一样
			return nil
		})
		if err != nil && !errors.Is(err, errSkipCreation) {
			return nil, fmt.Errorf("create or update %s resource: %w", gvk, err)
		}
		logger.V(1).Info(fmt.Sprintf("Resource %s", op))
	}

	state, err := res.inspect(ctx, r, res.obj)
	if err != nil && state == nil {
		state = &v1.ComponentState{
			Phase:   v1.ComponentTerminated,
			Reason:  reconcileFailedReason,
			Message: err.Error(),
		}
	}
	return state, err
}

func (res typedComponentResource[T, PT]) into(receiver client.Object) {
	*receiver.(PT) = *res.obj
}

func newComponentResource[T any, PT clientObject[T]](
	obj *T,
	hooks componentResourceReconcileHooks[T],
) componentResource {
	return typedComponentResource[T, PT]{hooks, obj}
}

type defaultResourceReconcileHooks[T client.Object] struct{}

func (defaultResourceReconcileHooks[T]) canDelete(ctx context.Context, r *NPUClusterPolicyReconciler, obj T) (client.Object, bool) {
	logger := log.FromContext(ctx)
	if obj.GetNamespace() == placeholderValue {
		obj.SetNamespace(r.namespace)
	}
	key := client.ObjectKeyFromObject(obj)
	currentResource := reflect.New(reflect.TypeOf(obj).Elem()).Interface().(T)
	if err := r.Get(ctx, key, currentResource); err != nil {
		if !apierrors.IsNotFound(err) {
			logger.Error(err, "Failed to get resource")
		}
		return nil, false
	}
	references := currentResource.GetOwnerReferences()
	logger.Info("Reference", "reference", references)
	if references == nil {
		return nil, false
	}
	for _, ref := range references {
		if ref.Kind == r.instance.Kind && ref.Name == r.instance.Name && ref.UID == r.instance.UID {
			return currentResource, true
		}
	}
	return nil, false
}

func (defaultResourceReconcileHooks[T]) transform(
	ctx context.Context,
	r *NPUClusterPolicyReconciler,
	obj T,
) error {
	// set ownerReference.controller to true
	if err := controllerutil.SetControllerReference(r.instance, obj, r.Scheme); err != nil {
		return fmt.Errorf("set owner as controller: %w", err)
	}

	return nil
}

func (defaultResourceReconcileHooks[T]) inspect(
	context.Context,
	*NPUClusterPolicyReconciler,
	T,
) (*v1.ComponentState, error) {
	return nil, nil
}

type namespaceReconcileHooks struct {
	defaultResourceReconcileHooks[*corev1.Namespace]
}

func (h namespaceReconcileHooks) transform(
	ctx context.Context,
	r *NPUClusterPolicyReconciler,
	obj *corev1.Namespace,
) error {
	found := &corev1.Namespace{}
	if err := r.Get(ctx, client.ObjectKeyFromObject(obj), found); err != nil {
		if apierrors.IsNotFound(err) {
			return h.defaultResourceReconcileHooks.transform(ctx, r, obj)
		}
		return err
	}
	return nil
}

type runtimeClassReconcileHooks struct {
	defaultResourceReconcileHooks[*nodev1.RuntimeClass]
}

func (h runtimeClassReconcileHooks) transform(
	ctx context.Context,
	r *NPUClusterPolicyReconciler,
	obj *nodev1.RuntimeClass,
) error {
	if obj.Name == placeholderValue {
		obj.Name = r.instance.Spec.Operator.RuntimeClass
	}
	obj.Handler = r.instance.Spec.Operator.RuntimeClass
	return h.defaultResourceReconcileHooks.transform(ctx, r, obj)
}

type daemonSetReconcileHooks struct {
	defaultResourceReconcileHooks[*appsv1.DaemonSet]
	transformers []componentResourceTransformer[appsv1.DaemonSet]
	inspectors   []componentResourceInspector[appsv1.DaemonSet]
}

func (h daemonSetReconcileHooks) transform(
	ctx context.Context,
	r *NPUClusterPolicyReconciler,
	obj *appsv1.DaemonSet,
) error {
	if !r.hasNPUNodes {
		logger := log.FromContext(ctx)
		logger.V(1).Info("No supported NPU node in cluster, skip DaemonSet creation")
		return errSkipReconciliation
	}
	if obj.Namespace == placeholderValue {
		obj.Namespace = r.namespace
	}

	// Set common DaemonSet configurations
	dsSpec := &r.instance.Spec.Daemonsets
	podTmpl := &obj.Spec.Template
	setCommonDaemonSetMetadata(&obj.ObjectMeta, dsSpec)
	setCommonDaemonSetMetadata(&podTmpl.ObjectMeta, dsSpec)
	if podTmpl.Spec.Tolerations == nil {
		podTmpl.Spec.Tolerations = make([]corev1.Toleration, 0)
	}
	if len(dsSpec.Tolerations) > 0 {
		podTmpl.Spec.Tolerations = append(podTmpl.Spec.Tolerations, dsSpec.Tolerations...)
	}

	for _, tf := range h.transformers {
		if err := tf(ctx, r, obj); err != nil {
			return err
		}
	}

	return h.defaultResourceReconcileHooks.transform(ctx, r, obj)
}

func (h daemonSetReconcileHooks) inspect(
	ctx context.Context,
	r *NPUClusterPolicyReconciler,
	obj *appsv1.DaemonSet,
) (*v1.ComponentState, error) {
	logger := log.FromContext(ctx)
	if err := r.Get(ctx, client.ObjectKeyFromObject(obj), obj); err != nil {
		return nil, fmt.Errorf("get DaemonSet: %w", err)
	}

	for _, inspect := range h.inspectors {
		if err := inspect(ctx, r, obj); err != nil {
			return nil, err
		}
	}

	if obj.Status.DesiredNumberScheduled == 0 {
		logger.V(1).Info("DaemonSet selects 0 node")
		return nil, nil
	}
	if obj.Status.NumberUnavailable != 0 {
		logger.V(1).Info(
			fmt.Sprintf("DaemonSet %s/%s not ready", obj.Namespace, obj.Name),
			"NumberUnavailable",
			obj.Status.NumberUnavailable,
		)
		return nil, fmt.Errorf("DaemonSet %s/%s not ready", obj.Namespace, obj.Name)
	}
	podList := &corev1.PodList{}
	if err := r.Client.List(ctx, podList, client.MatchingLabels(obj.Spec.Selector.MatchLabels)); err != nil {
		return nil, fmt.Errorf("Get pod list failed: %w", err)
	}
	for _, pod := range podList.Items {
		if pod.Status.Phase != corev1.PodRunning {
			return &v1.ComponentState{
				Phase:   v1.ComponentPending,
				Reason:  "PodNotRunning",
				Message: fmt.Sprintf("Pod %s ,state %s", pod.Name, pod.Status.Phase),
			}, nil
		}
	}

	return nil, nil
}

func setCommonDaemonSetMetadata(metadata *metav1.ObjectMeta, spec *v1.DaemonsetsSpec) {
	if metadata.Labels == nil {
		metadata.Labels = make(map[string]string)
	}
	if len(spec.Labels) > 0 {
		maps.Copy(metadata.Labels, spec.Labels)
	}

	if metadata.Annotations == nil {
		metadata.Annotations = make(map[string]string)
	}
	if len(spec.Annotations) > 0 {
		maps.Copy(metadata.Annotations, spec.Annotations)
	}
}

func newDaemonSetReconcileHooks(obj client.Object) daemonSetReconcileHooks {
	hooks := daemonSetReconcileHooks{}
	name := obj.GetName()
	tfs := &hooks.transformers // 这个的hook的transformers里存储的是不同的transformer函数，利用装饰器设计模式，进行不同功能的添加
	ins := &hooks.inspectors

	if strings.HasPrefix(name, runtimeDaemonSetNamePrefix) {
		*tfs = append(
			*tfs,
			runtimeDaemonSetTransformer,
			newWorkloadImageTransformer[appsv1.DaemonSet](runtimeImageConfigGetter),
		)
		*ins = append(*ins, runtimeDaemonSetInspector)
	}
	switch name {
	case driverDaemonSetName:
		*tfs = append(
			*tfs,
			newWorkloadLogRotateTransformer[appsv1.DaemonSet](driverLogRotateConfigGetter),
			newWorkloadImageTransformer[appsv1.DaemonSet](driverImageConfigGetter),
		)
	case devicePluginDaemonSetName:
		*tfs = append(
			*tfs,
			newWorkloadLogRotateTransformer[appsv1.DaemonSet](devicePluginLogRotateConfigGetter),
			newWorkloadImageTransformer[appsv1.DaemonSet](devicePluginImageConfigGetter),
			newWorkloadCommandParameterTransformer[appsv1.DaemonSet](devicePluginCommandParametersGetter),
			newWorkloadResourceRequirementsTransformer[appsv1.DaemonSet](devicePluginResourceRequirementsConfigGetter),
			newWorkloadVolumesTransformer[appsv1.DaemonSet](devicePluginVolumesGetter),
		)
	case nodedDaemonsetName:
		*tfs = append(
			*tfs,
			newWorkloadLogRotateTransformer[appsv1.DaemonSet](nodeDLogRotateConfigGetter),
			newWorkloadImageTransformer[appsv1.DaemonSet](nodeDImageConfigGetter),
			newWorkloadCommandParameterTransformer[appsv1.DaemonSet](nodeDCommandParametersGetter),
			newWorkloadResourceRequirementsTransformer[appsv1.DaemonSet](nodeDResourceConfigGetter),
		)
	case exporterDaemonsetName:
		*tfs = append(
			*tfs,
			newWorkloadLogRotateTransformer[appsv1.DaemonSet](exporterLogRotateConfigGetter),
			newWorkloadImageTransformer[appsv1.DaemonSet](exporterImageConfigGetter),
			newWorkloadResourceRequirementsTransformer[appsv1.DaemonSet](exporterResourceRequirementsConfigGetter),
			newWorkloadCommandParameterTransformer[appsv1.DaemonSet](exporterCommandParametersGetter),
		)
	case mindIOTFTDaemonsetName:
		*tfs = append(
			*tfs,
			newWorkloadImageTransformer[appsv1.DaemonSet](mindioTFTImageConfigGetter),
		)
	case mindIOACPDaemonsetName:
		*tfs = append(
			*tfs,
			newWorkloadImageTransformer[appsv1.DaemonSet](mindioACPImageConfigGetter),
		)
	default:
	}
	*tfs = append(*tfs, workloadHashTransformer[appsv1.DaemonSet])

	return hooks
}

func runtimeDaemonSetTransformer(
	ctx context.Context,
	r *NPUClusterPolicyReconciler,
	obj *appsv1.DaemonSet,
) error {
	logger := log.FromContext(ctx)
	runtime := strings.TrimPrefix(obj.Name, runtimeDaemonSetNamePrefix)
	if _, ok := r.runtimes[runtime]; !ok {
		logger.V(1).Info(fmt.Sprintf("No node with CRI runtime %s found in clustre", runtime))
		return errSkipReconciliation
	}

	selector := obj.Spec.Template.Spec.NodeSelector
	if selector == nil {
		selector = map[string]string{}
	}
	selector[runtimeLabelKey] = runtime
	obj.Spec.Template.Spec.NodeSelector = selector

	anno := obj.Spec.Template.Annotations
	if anno == nil {
		anno = map[string]string{}
	}
	anno[runtimeEndpointAnnotationKey] = ""
	obj.Spec.Template.Annotations = anno

	return nil
}

func runtimeDaemonSetInspector(
	ctx context.Context,
	r *NPUClusterPolicyReconciler,
	obj *appsv1.DaemonSet,
) error {
	logger := log.FromContext(ctx)

	podList := &corev1.PodList{}
	if err := r.List(ctx, podList, client.InNamespace(r.namespace), client.MatchingLabels(obj.Spec.Selector.MatchLabels)); err != nil {
		return fmt.Errorf("list pods: %w", err)
	}

	for _, pod := range podList.Items {
		logger := logger.WithValues("pod", pod.Name)
		anno := pod.Annotations
		if anno == nil {
			anno = map[string]string{}
		}
		update := false
		node := pod.Spec.NodeName
		if endpoint, ok := r.nodeRuntimeEndpoints[node]; ok {
			logger.V(1).Info(fmt.Sprintf("CRI container runtime endpoint: %s", endpoint))
			update = addAnnotation(anno, runtimeEndpointAnnotationKey, endpoint)
		} else {
			logger.V(-1).Info(fmt.Sprintf("Pod running on unrecognized node %s", node))
		}
		pod.Annotations = anno
		if update {
			if err := r.Update(ctx, &pod); err != nil {
				return fmt.Errorf("inject endpoint to pod: %w", err)
			}
		}
	}

	return nil
}

type deploymentReconcileHooks struct {
	defaultResourceReconcileHooks[*appsv1.Deployment]
	transformers []componentResourceTransformer[appsv1.Deployment]
}

func (h deploymentReconcileHooks) transform(
	ctx context.Context,
	r *NPUClusterPolicyReconciler,
	obj *appsv1.Deployment,
) error {
	if obj.Namespace == placeholderValue {
		obj.Namespace = r.namespace
	}

	for _, tf := range h.transformers {
		if err := tf(ctx, r, obj); err != nil {
			return err
		}
	}

	return h.defaultResourceReconcileHooks.transform(ctx, r, obj)
}

func (deploymentReconcileHooks) inspect(
	ctx context.Context,
	r *NPUClusterPolicyReconciler,
	obj *appsv1.Deployment,
) (*v1.ComponentState, error) {
	logger := log.FromContext(ctx)
	if err := r.Get(ctx, client.ObjectKeyFromObject(obj), obj); err != nil {
		return nil, fmt.Errorf("get Deployment: %w", err)
	}
	if obj.Status.UnavailableReplicas != 0 {
		logger.V(1).Info(
			fmt.Sprintf("Deployment %s/%s not ready", obj.Namespace, obj.Name),
			"NumberUnavailable",
			obj.Status.UnavailableReplicas,
		)
		return nil, fmt.Errorf("Deployment %s/%s not ready", obj.Namespace, obj.Name)
	}
	podList := &corev1.PodList{}
	if err := r.Client.List(ctx, podList, client.MatchingLabels(obj.Spec.Selector.MatchLabels)); err != nil {
		return nil, fmt.Errorf("Get pod list failed: %w", err)
	}

	for _, pod := range podList.Items {
		if pod.Status.Phase != corev1.PodRunning {
			return &v1.ComponentState{
				Phase:   v1.ComponentPending,
				Reason:  "PodNotRunning",
				Message: fmt.Sprintf("Pod %s ,state %s", pod.Name, pod.Status.Phase),
			}, nil
		}
	}
	return nil, nil
}

func newDeploymentReconcileHooks(obj client.Object) deploymentReconcileHooks {
	hooks := deploymentReconcileHooks{}
	tfs := &hooks.transformers
	name := obj.GetName()
	switch name {
	case trainerDeploymentName:
		*tfs = append(
			*tfs,
			newWorkloadLogRotateTransformer[appsv1.Deployment](trainerLogRotateConfigGetter),
			newWorkloadImageTransformer[appsv1.Deployment](trainerImageConfigGetter),
			newWorkloadResourceRequirementsTransformer[appsv1.Deployment](trainerResourceRequirementsConfigGetter),
			newWorkloadCommandParameterTransformer[appsv1.Deployment](trainerCommandParametersGetter),
		)
	case clusterdDeploymentName:
		*tfs = append(
			*tfs,
			newWorkloadLogRotateTransformer[appsv1.Deployment](clusterdLogRotateConfigGetter),
			newWorkloadImageTransformer[appsv1.Deployment](clusterdImageConfigGetter),
			newWorkloadResourceRequirementsTransformer[appsv1.Deployment](clusterdResourceConfigGetter),
			newWorkloadCommandParameterTransformer[appsv1.Deployment](clusterDCommandParametersGetter),
		)
	case rsControllerDeploymentName:
		*tfs = append(
			*tfs,
			newWorkloadLogRotateTransformer[appsv1.Deployment](rsControllerLogRotateConfigGetter),
			newWorkloadImageTransformer[appsv1.Deployment](rscontrollerImageConfigGetter),
			newWorkloadResourceRequirementsTransformer[appsv1.Deployment](rsControllerResourceConfigGetter),
			newWorkloadCommandParameterTransformer[appsv1.Deployment](rsControllerCommandParametersGetter),
		)
	case vcControllerDeploymentName:
		*tfs = append(
			*tfs,
			newWorkloadImageTransformer[appsv1.Deployment](vcControllerImageConfigGetter),
			newWorkloadResourceRequirementsTransformer[appsv1.Deployment](vcControllerResourceRequirementsConfigGetter),
			newWorkloadCommandParameterTransformer[appsv1.Deployment](vcControllerCommandParametersGetter),
		)
	case vcSchedulerDeploymentName:
		*tfs = append(
			*tfs,
			newWorkloadImageTransformer[appsv1.Deployment](vcSchedulerImageConfigGetter),
			newWorkloadResourceRequirementsTransformer[appsv1.Deployment](vcSchedulerResourceRequirementsConfigGetter),
			newWorkloadCommandParameterTransformer[appsv1.Deployment](schedulerCommandParametersGetter),
		)
	default:
	}
	*tfs = append(*tfs, workloadHashTransformer[appsv1.Deployment])
	return hooks
}

func newWorkloadPodTemplateSpecGetter[T appsv1.DaemonSet | appsv1.Deployment]() func(*T) *corev1.PodTemplateSpec {
	var o T
	switch any(o).(type) {
	case appsv1.DaemonSet:
		return func(obj *T) *corev1.PodTemplateSpec { return &any(obj).(*appsv1.DaemonSet).Spec.Template }
	case appsv1.Deployment:
		return func(obj *T) *corev1.PodTemplateSpec { return &any(obj).(*appsv1.Deployment).Spec.Template }
	default:
		return nil
	}
}

// Hash workload resource to avoid unnecessary update
func workloadHashTransformer[T appsv1.DaemonSet | appsv1.Deployment, PT clientObject[T]](
	ctx context.Context,
	r *NPUClusterPolicyReconciler,
	obj PT,
) error {
	hasher := fnv.New32a()
	spewer := spew.ConfigState{
		DisableMethods:          true,
		DisablePointerAddresses: true,
		SortKeys:                true,
		SpewKeys:                true,
	}
	spewer.Fprintf(hasher, "%#v", obj)
	anno := obj.GetAnnotations()
	if anno == nil {
		anno = make(map[string]string)
		obj.SetAnnotations(anno)
	}
	obj.GetAnnotations()[hashAnnotationKey] = hex.EncodeToString(hasher.Sum(nil))
	return nil
}

func newWorkloadLogRotateTransformer[T appsv1.DaemonSet | appsv1.Deployment]( // 通过环境变量的方式修改log的相关参数
	logRotate func(*v1.NPUClusterPolicySpec) *v1.LogRotate,
) componentResourceTransformer[T] {
	podTemplateSpec := newWorkloadPodTemplateSpecGetter[T]()
	return func(ctx context.Context, r *NPUClusterPolicyReconciler, obj *T) error {
		conf := logRotate(&r.instance.Spec)
		podTmpl := podTemplateSpec(obj)
		containers := podTmpl.Spec.Containers
		for i := range containers {
			env := containers[i].Env
			for j := range env {
				switch env[j].Name {
				case logRotateLogLevelEnvName:
					env[j].Value = fmt.Sprintf("%d", conf.LogLevel.Value())
				case logRotateLogFileEnvName:
					env[j].Value = conf.LogFile
				case logRotateRotateEnvName:
					env[j].Value = fmt.Sprintf("%d", conf.Rotate)
				case logRotateMaxAgeEnvName:
					env[j].Value = fmt.Sprintf("%d", conf.MaxAge)
				case logRotateCompressEnvName:
					env[j].Value = fmt.Sprintf("%t", conf.Compress)
				}
			}
		}
		return nil
	}
}

func trainerLogRotateConfigGetter(spec *v1.NPUClusterPolicySpec) *v1.LogRotate {
	return &spec.Trainer.LogRotate
}

func devicePluginLogRotateConfigGetter(spec *v1.NPUClusterPolicySpec) *v1.LogRotate {
	return &spec.DevicePlugin.LogRotate
}

func nodeDLogRotateConfigGetter(spec *v1.NPUClusterPolicySpec) *v1.LogRotate {
	return &spec.NodeD.LogRotate
}

func exporterLogRotateConfigGetter(spec *v1.NPUClusterPolicySpec) *v1.LogRotate {
	return &spec.Exporter.LogRotate
}

func driverLogRotateConfigGetter(spec *v1.NPUClusterPolicySpec) *v1.LogRotate {
	return &spec.Driver.LogRotate
}

func clusterdLogRotateConfigGetter(spec *v1.NPUClusterPolicySpec) *v1.LogRotate {
	return &spec.ClusterD.LogRotate
}

func rsControllerLogRotateConfigGetter(spec *v1.NPUClusterPolicySpec) *v1.LogRotate {
	return &spec.RSController.LogRotate
}

// component resource
func trainerResourceRequirementsConfigGetter(spec *v1.NPUClusterPolicySpec) *v1.ResourceRequirements {
	return &spec.Trainer.Resources
}

func vcSchedulerResourceRequirementsConfigGetter(spec *v1.NPUClusterPolicySpec) *v1.ResourceRequirements {
	return &spec.VCScheduler.Resources
}

func vcControllerResourceRequirementsConfigGetter(spec *v1.NPUClusterPolicySpec) *v1.ResourceRequirements {
	return &spec.VCController.Resources
}

func devicePluginResourceRequirementsConfigGetter(spec *v1.NPUClusterPolicySpec) *v1.ResourceRequirements {
	return &spec.DevicePlugin.Resources
}

func nodeDResourceConfigGetter(spec *v1.NPUClusterPolicySpec) *v1.ResourceRequirements {
	return &spec.NodeD.Resources
}

func exporterResourceRequirementsConfigGetter(spec *v1.NPUClusterPolicySpec) *v1.ResourceRequirements {
	return &spec.Exporter.Resources
}

func clusterdResourceConfigGetter(spec *v1.NPUClusterPolicySpec) *v1.ResourceRequirements {
	return &spec.ClusterD.Resources
}

func rsControllerResourceConfigGetter(spec *v1.NPUClusterPolicySpec) *v1.ResourceRequirements {
	return &spec.RSController.Resources
}

// command get
func schedulerCommandParametersGetter(spec *v1.NPUClusterPolicySpec) *v1.CommandSpec {
	return &spec.VCScheduler.Command
}

func vcControllerCommandParametersGetter(spec *v1.NPUClusterPolicySpec) *v1.CommandSpec {
	return &spec.VCController.Command
}

func trainerCommandParametersGetter(spec *v1.NPUClusterPolicySpec) *v1.CommandSpec {
	return &spec.Trainer.Command
}

func devicePluginCommandParametersGetter(spec *v1.NPUClusterPolicySpec) *v1.CommandSpec {
	return &spec.DevicePlugin.Command
}

func nodeDCommandParametersGetter(spec *v1.NPUClusterPolicySpec) *v1.CommandSpec {
	return &spec.NodeD.Command
}

func exporterCommandParametersGetter(spec *v1.NPUClusterPolicySpec) *v1.CommandSpec {
	return &spec.Exporter.Command
}

func clusterDCommandParametersGetter(spec *v1.NPUClusterPolicySpec) *v1.CommandSpec {
	return &spec.ClusterD.Command
}

func rsControllerCommandParametersGetter(spec *v1.NPUClusterPolicySpec) *v1.CommandSpec {
	return &spec.RSController.Command
}

// volumes get
func devicePluginVolumesGetter(spec *v1.NPUClusterPolicySpec) *v1.VolumesSpec {
	return &spec.DevicePlugin.VolumeSpec
}

func newWorkloadVolumesTransformer[T appsv1.DaemonSet | appsv1.Deployment](
	volConfigGetter func(*v1.NPUClusterPolicySpec) *v1.VolumesSpec,
) componentResourceTransformer[T] {
	podTemplateSpec := newWorkloadPodTemplateSpecGetter[T]()

	return func(ctx context.Context, r *NPUClusterPolicyReconciler, obj *T) error {
		volConfig := volConfigGetter(&r.instance.Spec)
		if volConfig == nil || (len(volConfig.Volumes) == 0 && len(volConfig.VolumeMounts) == 0) {
			return nil
		}

		podTmpl := podTemplateSpec(obj)
		if len(volConfig.Volumes) > 0 {
			if podTmpl.Spec.Volumes == nil {
				podTmpl.Spec.Volumes = make([]corev1.Volume, 0)
			}

			existingVolMap := make(map[string]int, len(podTmpl.Spec.Volumes))
			for idx, vol := range podTmpl.Spec.Volumes {
				existingVolMap[vol.Name] = idx
			}

			for _, newVol := range volConfig.Volumes {
				if newVol.Name == "" {
					continue
				}
				if idx, exists := existingVolMap[newVol.Name]; exists {
					podTmpl.Spec.Volumes[idx] = newVol
				} else {
					podTmpl.Spec.Volumes = append(podTmpl.Spec.Volumes, newVol)
				}
			}
		}

		if len(volConfig.VolumeMounts) > 0 {
			for i := range podTmpl.Spec.Containers {
				container := &podTmpl.Spec.Containers[i]

				if container.VolumeMounts == nil {
					container.VolumeMounts = make([]corev1.VolumeMount, 0)
				}

				existingMountMap := make(map[string]int, len(container.VolumeMounts))
				for idx, mount := range container.VolumeMounts {
					existingMountMap[mount.Name] = idx
				}

				for _, newMount := range volConfig.VolumeMounts {
					if newMount.Name == "" {
						continue
					}
					if idx, exists := existingMountMap[newMount.Name]; exists {
						container.VolumeMounts[idx] = newMount
					} else {
						container.VolumeMounts = append(container.VolumeMounts, newMount)
					}
				}
			}
		}

		return nil
	}
}

func newWorkloadCommandParameterTransformer[T appsv1.DaemonSet | appsv1.Deployment](
	command func(*v1.NPUClusterPolicySpec) *v1.CommandSpec) componentResourceTransformer[T] {
	podTemplateSpec := newWorkloadPodTemplateSpecGetter[T]()
	return func(ctx context.Context, r *NPUClusterPolicyReconciler, obj *T) error {
		conf := command(&r.instance.Spec)
		podTmpl := podTemplateSpec(obj)
		containers := podTmpl.Spec.Containers
		for i := range containers {
			c := &containers[i]
			if len(c.Command) == 1 && c.Command[0] == placeholderValue {
				c.Command = conf.Command
			}
			if len(c.Args) == 1 && c.Args[0] == placeholderValue {
				c.Args = conf.Args
			}
		}
		return nil
	}
}

func newWorkloadResourceRequirementsTransformer[T appsv1.DaemonSet | appsv1.Deployment](
	resources func(*v1.NPUClusterPolicySpec) *v1.ResourceRequirements) componentResourceTransformer[T] {
	podTemplateSpec := newWorkloadPodTemplateSpecGetter[T]()
	return func(ctx context.Context, r *NPUClusterPolicyReconciler, obj *T) error {
		conf := resources(&r.instance.Spec)
		if conf == nil {
			return nil
		}
		podTmpl := podTemplateSpec(obj)
		containers := podTmpl.Spec.Containers
		for i := range containers {
			c := &containers[i]
			if c.Resources.Limits == nil {
				c.Resources.Limits = corev1.ResourceList{}
			}
			if c.Resources.Requests == nil {
				c.Resources.Requests = corev1.ResourceList{}
			}
			if len(conf.Limits) > 0 {
				for key, value := range conf.Limits {
					c.Resources.Limits[key] = value
				}
			}
			if len(conf.Requests) > 0 {
				for key, value := range conf.Requests {
					c.Resources.Requests[key] = value
				}
			}
		}
		return nil
	}
}

func newWorkloadImageTransformer[T appsv1.DaemonSet | appsv1.Deployment](
	image func(*v1.NPUClusterPolicySpec) []*v1.ImageSpec,
) componentResourceTransformer[T] {
	podTemplateSpec := newWorkloadPodTemplateSpecGetter[T]() // 返回每个daemonset或者deployment的spec里的template.spec
	return func(ctx context.Context, r *NPUClusterPolicyReconciler, obj *T) error {
		conf := image(&r.instance.Spec) // 返回 ImageSpec 这个是CR实例里定义的 镜像地址
		podTmpl := podTemplateSpec(obj) // 这个是 daemonset的yaml里定义的 pod 的 template
		mainImage := conf[0]
		imagePullSecrets := podTmpl.Spec.ImagePullSecrets
		containers := podTmpl.Spec.Containers // containers的参数定义
		for _, c := range conf {
			for _, secret := range c.ImagePullSecrets {
				imagePullSecrets = append(imagePullSecrets, corev1.LocalObjectReference{Name: secret})
			}
		}
		podTmpl.Spec.ImagePullSecrets = imagePullSecrets
		mainImagename := mainImage.Name() // 把CR里的镜像地址拼成一个完整的地址 然后传入到要部署的daemonset中
		for i := range containers {       // 遍历 daemonset 的yaml里的containers的列表
			c := &containers[i]
			if c.Image == placeholderValue {
				if len(mainImagename) == 0 {
					return fmt.Errorf("invalid image name: %v", *mainImage)
				}
				c.Image = mainImagename
				c.ImagePullPolicy = corev1.PullPolicy(mainImage.ImagePullPolicy)
			}
		}
		initImages := conf[1:]
		if len(initImages) > 0 {
			for i, initImage := range initImages {
				initContainer := &podTmpl.Spec.InitContainers[i]
				if initContainer.Image == placeholderValue {
					name := initImage.Name()
					if len(name) == 0 {
						return fmt.Errorf("invalid init image name at index %d: %v", i, *initImage)
					}
					initContainer.Image = name
					initContainer.ImagePullPolicy = corev1.PullPolicy(initImage.ImagePullPolicy)
				}
			}
		}
		return nil
	}
}

func runtimeImageConfigGetter(spec *v1.NPUClusterPolicySpec) []*v1.ImageSpec {
	return []*v1.ImageSpec{
		&spec.OCIRuntime.Image, &spec.OCIRuntime.InitRuntimeImage, &spec.OCIRuntime.InitConfigImage}
}

func vcSchedulerImageConfigGetter(spec *v1.NPUClusterPolicySpec) []*v1.ImageSpec {
	return []*v1.ImageSpec{&spec.VCScheduler.Image}
}

func vcControllerImageConfigGetter(spec *v1.NPUClusterPolicySpec) []*v1.ImageSpec {
	return []*v1.ImageSpec{&spec.VCController.Image}
}

func trainerImageConfigGetter(spec *v1.NPUClusterPolicySpec) []*v1.ImageSpec {
	return []*v1.ImageSpec{&spec.Trainer.Image, &spec.Trainer.InitImage}
}

func devicePluginImageConfigGetter(spec *v1.NPUClusterPolicySpec) []*v1.ImageSpec {
	return []*v1.ImageSpec{
		&spec.DevicePlugin.Image, &spec.DevicePlugin.InitImage}
}

func nodeDImageConfigGetter(spec *v1.NPUClusterPolicySpec) []*v1.ImageSpec {
	return []*v1.ImageSpec{&spec.NodeD.Image}
}

func exporterImageConfigGetter(spec *v1.NPUClusterPolicySpec) []*v1.ImageSpec {
	return []*v1.ImageSpec{&spec.Exporter.Image}
}

func driverImageConfigGetter(spec *v1.NPUClusterPolicySpec) []*v1.ImageSpec {
	return []*v1.ImageSpec{&spec.Driver.Image, &spec.Driver.InitImage}
}

func clusterdImageConfigGetter(spec *v1.NPUClusterPolicySpec) []*v1.ImageSpec {
	return []*v1.ImageSpec{&spec.ClusterD.Image}
}

func rscontrollerImageConfigGetter(spec *v1.NPUClusterPolicySpec) []*v1.ImageSpec {
	return []*v1.ImageSpec{&spec.RSController.Image}
}

func mindioTFTImageConfigGetter(spec *v1.NPUClusterPolicySpec) []*v1.ImageSpec {
	return []*v1.ImageSpec{&spec.MindIOTFT.Image}
}

func mindioACPImageConfigGetter(spec *v1.NPUClusterPolicySpec) []*v1.ImageSpec {
	return []*v1.ImageSpec{&spec.MindIOACP.Image}
}

func schedulerConfigMapGetter(spec *v1.NPUClusterPolicySpec) string {
	return spec.VCScheduler.SchedulerConfigMap
}

type serviceAccountReconcileHooks struct {
	defaultResourceReconcileHooks[*corev1.ServiceAccount]
}

func (h serviceAccountReconcileHooks) transform(
	ctx context.Context,
	r *NPUClusterPolicyReconciler,
	obj *corev1.ServiceAccount) error {
	if obj.Namespace == placeholderValue {
		obj.Namespace = r.namespace
	}
	return h.defaultResourceReconcileHooks.transform(ctx, r, obj)
}

type clusterRoleReconcileHooks struct {
	defaultResourceReconcileHooks[*rbacv1.ClusterRole]
}

func (h clusterRoleReconcileHooks) transform(
	ctx context.Context,
	r *NPUClusterPolicyReconciler,
	obj *rbacv1.ClusterRole,
) error {
	if obj.Namespace == placeholderValue {
		obj.Namespace = r.namespace
	}
	return h.defaultResourceReconcileHooks.transform(ctx, r, obj)
}

type clusterRoleBindingReconcileHooks struct {
	defaultResourceReconcileHooks[*rbacv1.ClusterRoleBinding]
}

func (h clusterRoleBindingReconcileHooks) transform(
	ctx context.Context,
	r *NPUClusterPolicyReconciler,
	obj *rbacv1.ClusterRoleBinding,
) error {
	for i := range obj.Subjects {
		if obj.Subjects[i].Namespace == placeholderValue {
			obj.Subjects[i].Namespace = r.namespace
		}
	}
	return h.defaultResourceReconcileHooks.transform(ctx, r, obj)
}

type serviceMoniterReconcileHooks struct {
	defaultResourceReconcileHooks[*monitoringv1.ServiceMonitor]
}

func (h serviceMoniterReconcileHooks) transform(ctx context.Context,
	r *NPUClusterPolicyReconciler,
	obj *monitoringv1.ServiceMonitor,
) error {
	return h.defaultResourceReconcileHooks.transform(ctx, r, obj)
}

type serviceReconcileHooks struct {
	defaultResourceReconcileHooks[*corev1.Service]
}

func (h serviceReconcileHooks) transform(ctx context.Context, r *NPUClusterPolicyReconciler,
	obj *corev1.Service) error {
	return h.defaultResourceReconcileHooks.transform(ctx, r, obj)
}

type leaseReconcileHooks struct {
	defaultResourceReconcileHooks[*coordinationv1.Lease]
}

func (h leaseReconcileHooks) transform(ctx context.Context, r *NPUClusterPolicyReconciler,
	obj *coordinationv1.Lease) error {
	return h.defaultResourceReconcileHooks.transform(ctx, r, obj)
}

type configMapReconcileHooks struct {
	defaultResourceReconcileHooks[*corev1.ConfigMap]
}

func ObtainVolcanoVersion(ctx context.Context, r *NPUClusterPolicyReconciler) (version string, err error) {
	logger := log.FromContext(ctx)
	instance := &v1.NPUClusterPolicy{}
	if err := r.Get(ctx, client.ObjectKey{Name: r.instance.Name, Namespace: r.instance.Namespace}, instance); err != nil {
		return "", fmt.Errorf("get CR instance: %w", err)
	}
	tag := instance.Spec.VCScheduler.Image.Tag
	part := strings.Split(tag, "-")
	if len(part) > 0 {
		version = part[len(part)-1]
	}
	logger.Info("volcano version", "version", version)
	return version, err
}

func (h configMapReconcileHooks) transform(ctx context.Context, r *NPUClusterPolicyReconciler,
	obj *corev1.ConfigMap) error {

	logger := log.FromContext(ctx)
	if obj.Name != volcanoSchedulerConfigMapName {
		return h.defaultResourceReconcileHooks.transform(ctx, r, obj)
	}

	updateConfig(r, obj, ctx)

	nodeName := os.Getenv("OPERATOR_NODE_NAME")
	if nodeName == "" {
		logger.Error(nil, "OPERATOR_NODE_NAME environment variable is not set")
		return fmt.Errorf("OPERATOR_NODE_NAME environment variable is not set")
	}

	node := &corev1.Node{}
	if err := r.Get(ctx, client.ObjectKey{Name: nodeName}, node); err != nil {
		logger.Error(err, "failed to get node", "node", nodeName)
		return fmt.Errorf("get node %s: %w", nodeName, err)
	}

	version, err := ObtainVolcanoVersion(ctx, r)
	if err != nil || version == "" {
		logger.Error(err, "failed to get volcano version")
		return fmt.Errorf("obtain volcano version: %w", err)
	}

	arch := node.Status.NodeInfo.Architecture
	var name string
	switch arch {
	case "amd64", "x86_64":
		name = fmt.Sprintf("volcano-npu_%s_linux-x86_64", version)
	case "arm64", "aarch64":
		name = fmt.Sprintf("volcano-npu_%s_linux-aarch64", version)
	default:
		logger.Error(nil, "unsupported architecture", "architecture", arch)
		return fmt.Errorf("unsupported architecture: %s", arch)
	}

	logger.Info("updating volcano scheduler config", "pluginName", name)

	data := obj.Data["volcano-scheduler.conf"]
	lines := []string{}
	for _, line := range strings.Split(data, "\n") {
		if strings.Contains(line, "volcano-npu") {
			line = fmt.Sprintf("  - name: %s", name)
		}
		lines = append(lines, line)
	}
	obj.Data["volcano-scheduler.conf"] = strings.Join(lines, "\n")
	logger.Info("successfully updated volcano scheduler configmap")

	return h.defaultResourceReconcileHooks.transform(ctx, r, obj)
}

func updateConfig(r *NPUClusterPolicyReconciler, obj *corev1.ConfigMap, ctx context.Context) {
	logger := log.FromContext(ctx)
	conf := schedulerConfigMapGetter(&r.instance.Spec)
	obj.Data["volcano-scheduler.conf"] = conf
	logger.Info("updating volcano scheduler configmap", "volcano-scheduler.conf", conf)
}

// //////////////////////////////////////////
type jobReconcileHooks struct {
	defaultResourceReconcileHooks[*v1alpha1.Job]
}

func (h jobReconcileHooks) transform(ctx context.Context, r *NPUClusterPolicyReconciler,
	obj *v1alpha1.Job) error {
	return h.defaultResourceReconcileHooks.transform(ctx, r, obj)
}

type commandReconcileHooks struct {
	defaultResourceReconcileHooks[*busv1alpha1.Command]
}

func (h commandReconcileHooks) transform(ctx context.Context, r *NPUClusterPolicyReconciler,
	obj *busv1alpha1.Command) error {
	return h.defaultResourceReconcileHooks.transform(ctx, r, obj)
}

type podGroupReconcileHooks struct {
	defaultResourceReconcileHooks[*scheduling.PodGroup]
}

func (h podGroupReconcileHooks) transform(ctx context.Context, r *NPUClusterPolicyReconciler,
	obj *scheduling.PodGroup) error {
	return h.defaultResourceReconcileHooks.transform(ctx, r, obj)
}

type queueReconcileHooks struct {
	defaultResourceReconcileHooks[*scheduling.Queue]
}

func (h queueReconcileHooks) transform(ctx context.Context, r *NPUClusterPolicyReconciler,
	obj *scheduling.Queue) error {
	return h.defaultResourceReconcileHooks.transform(ctx, r, obj)
}

type numatopologyReconcileHooks struct {
	defaultResourceReconcileHooks[*nodeinfov1alpha1.Numatopology]
}

func (h numatopologyReconcileHooks) transform(ctx context.Context, r *NPUClusterPolicyReconciler,
	obj *nodeinfov1alpha1.Numatopology) error {
	return h.defaultResourceReconcileHooks.transform(ctx, r, obj)
}

type jobTemplateReconcileHooks struct {
	defaultResourceReconcileHooks[*flowv1alpha1.JobTemplate]
}

func (h jobTemplateReconcileHooks) transform(ctx context.Context, r *NPUClusterPolicyReconciler,
	obj *flowv1alpha1.JobTemplate) error {
	return h.defaultResourceReconcileHooks.transform(ctx, r, obj)
}

type jobFlowReconcileHooks struct {
	defaultResourceReconcileHooks[*flowv1alpha1.JobFlow]
}

func (h jobFlowReconcileHooks) transform(ctx context.Context, r *NPUClusterPolicyReconciler,
	obj *flowv1alpha1.JobFlow) error {
	return h.defaultResourceReconcileHooks.transform(ctx, r, obj)
}

type CRDReconcileHooks struct {
	defaultResourceReconcileHooks[*apiextensionsv1.CustomResourceDefinition]
}

func (h CRDReconcileHooks) transform(ctx context.Context, r *NPUClusterPolicyReconciler, obj *apiextensionsv1.CustomResourceDefinition) error {
	return h.defaultResourceReconcileHooks.transform(ctx, r, obj)
}
