/*
Copyright 2021 The Kubernetes Authors.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package noderesourcetopology

import (
	"context"

	"github.com/go-logr/logr"
	topologyv1alpha2 "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha2"
	v1 "k8s.io/api/core/v1"
	"k8s.io/klog/v2"
	fwk "k8s.io/kube-scheduler/framework"
	v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
	kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
	bm "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask"
	"sigs.k8s.io/scheduler-plugins/pkg/noderesourcetopology/logging"
	"sigs.k8s.io/scheduler-plugins/pkg/noderesourcetopology/nodeconfig"
	"sigs.k8s.io/scheduler-plugins/pkg/noderesourcetopology/resourcerequests"
	"sigs.k8s.io/scheduler-plugins/pkg/noderesourcetopology/stringify"
	"sigs.k8s.io/scheduler-plugins/pkg/util"
)

type PolicyHandler func(pod *v1.Pod, zoneMap topologyv1alpha2.ZoneList) fwk.Status

func singleNUMAContainerLevelHandler(lh logr.Logger, pod *v1.Pod, info *filterInfo) *fwk.Status {
	// the init containers are running SERIALLY and BEFORE the normal containers.
	// https://kubernetes.io/docs/concepts/workloads/pods/init-containers/#understanding-init-containers
	// therefore, we don't need to accumulate their resources together
	for _, initContainer := range pod.Spec.InitContainers {
		cntKind := logging.GetInitContainerKind(&initContainer)
		clh := lh.WithValues(logging.KeyContainer, initContainer.Name, logging.KeyContainerKind, cntKind)
		clh.V(6).Info("desired resources", stringify.ResourceListToLoggable(initContainer.Resources.Requests)...)

		_, match, reason := resourcesAvailableInAnyNUMANodes(clh, info, initContainer.Resources.Requests)
		if !match {
			msg := "cannot align " + cntKind + " container"
			// we can't align init container, so definitely we can't align a pod
			clh.V(2).Info(msg, "reason", reason)
			return fwk.NewStatus(fwk.Unschedulable, msg)
		}
	}

	for _, container := range pod.Spec.Containers {
		clh := lh.WithValues(logging.KeyContainer, container.Name, logging.KeyContainerKind, logging.KindContainerApp)
		clh.V(6).Info("container requests", stringify.ResourceListToLoggable(container.Resources.Requests)...)

		numaID, match, reason := resourcesAvailableInAnyNUMANodes(clh, info, container.Resources.Requests)
		if !match {
			// we can't align container, so definitely we can't align a pod
			clh.V(2).Info("cannot align container", "reason", reason)
			return fwk.NewStatus(fwk.Unschedulable, "cannot align container")
		}

		// subtract the resources requested by the container from the given NUMA.
		// this is necessary, so we won't allocate the same resources for the upcoming containers
		err := subtractResourcesFromNUMANodeList(clh, info.numaNodes, numaID, info.qos, container.Resources.Requests)
		if err != nil {
			// this is an internal error which should never happen
			return fwk.NewStatus(fwk.Error, "inconsistent resource accounting", err.Error())
		}
		clh.V(4).Info("container aligned", "numaCell", numaID)
	}
	return nil
}

// resourcesAvailableInAnyNUMANodes checks for sufficient resource.
// returns:
// - the NUMAID that would be selected by Kubelet,
// - a boolean which tells if the worker node can satisfy the request in any of its NUMA zones
// - the reason for reject.
// the reason for reject is significant only if the worker node is filtered out;
// the selected numaID is significant only if the worker node is filtered in.
// The function takes a `filterInfo` struct which must be filled with the `nodeInfo` provided by the scheduler framework,
// the NUMANodeList built using createNUMANodeList, the topology manager configuration from the NRT objects pertaining
// to the candidate node.
func resourcesAvailableInAnyNUMANodes(lh logr.Logger, info *filterInfo, resources v1.ResourceList) (int, bool, string) {
	numaID := info.topologyManager.MaxNUMANodes // highest NUMA ID
	bitmask := bm.NewEmptyBitMask()
	// set all bits, each bit is a NUMA node, if resources couldn't be aligned
	// on the NUMA node, bit should be unset
	bitmask.Fill()

	nodeResources := util.ResourceList(info.node.GetAllocatable())

	for resource, quantity := range resources {
		clh := lh.WithValues("resource", resource)
		if quantity.IsZero() {
			// why bother? everything's fine from the perspective of this resource
			clh.V(4).Info("ignoring zero-qty resource request")
			continue
		}

		if _, ok := nodeResources[resource]; !ok {
			// some resources may not expose NUMA affinity (device plugins, extended resources), but all resources
			// must be reported at node level; thus, if they are not present at node level, we can safely assume
			// we don't have the resource at all.
			clh.V(2).Info("early verdict: cannot meet request")
			return -1, false, string(resource)
		}

		// for each requested resource, calculate which NUMA slots are good fits, and then AND with the aggregated bitmask, IOW unset appropriate bit if we can't align resources, or set it
		// obvious, bits which are not in the NUMA id's range would be unset
		hasNUMAAffinity := false
		resourceBitmask := bm.NewEmptyBitMask()
		for _, numaNode := range info.numaNodes {
			nlh := clh.WithValues("numaCell", numaNode.NUMAID)
			numaQuantity, ok := numaNode.Resources[resource]
			if !ok {
				nlh.V(6).Info("missing")
				continue
			}

			hasNUMAAffinity = true
			if !isResourceSetSuitable(info.qos, resource, quantity, numaQuantity) {
				nlh.V(6).Info("discarded", "quantity", quantity.String(), "numaQuantity", numaQuantity.String())
				continue
			}

			resourceBitmask.Add(numaNode.NUMAID)
			nlh.V(6).Info("feasible")
		}

		// non-native resources or ephemeral-storage may not expose NUMA affinity,
		// but since they are available at node level, this is fine
		if !hasNUMAAffinity && isHostLevelResource(resource) {
			clh.V(6).Info("resource available at host level (no NUMA affinity)")
			continue
		}

		bitmask.And(resourceBitmask)
		if bitmask.IsEmpty() {
			lh.V(2).Info("early verdict: cannot find affinity")
			return numaID, false, string(resource)
		}
	}
	// according to TopologyManager, the preferred NUMA affinity, is the narrowest one.
	// https://github.com/kubernetes/kubernetes/blob/v1.24.0-rc.1/pkg/kubelet/cm/topologymanager/policy.go#L155
	// in single-numa-node policy all resources should be allocated from a single NUMA,
	// which means that the lowest NUMA ID (with available resources) is the one to be selected by Kubelet.
	numaID = bitmask.GetBits()[0]

	// at least one NUMA node is available
	ret := !bitmask.IsEmpty()
	lh.V(2).Info("final verdict", "suitable", ret, "numaCell", numaID)
	return numaID, ret, "generic"
}

func singleNUMAPodLevelHandler(lh logr.Logger, pod *v1.Pod, info *filterInfo) *fwk.Status {
	resources := util.GetPodEffectiveRequest(pod)
	lh.V(6).Info("pod desired resources", stringify.ResourceListToLoggable(resources)...)

	numaID, match, reason := resourcesAvailableInAnyNUMANodes(lh, info, resources)
	if !match {
		lh.V(2).Info("cannot align pod", "name", pod.Name, "reason", reason)
		return fwk.NewStatus(fwk.Unschedulable, "cannot align pod")
	}
	lh.V(4).Info("all container placed", "numaCell", numaID)
	return nil
}

// Filter Now only single-numa-node supported
func (tm *TopologyMatch) Filter(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, nodeInfo fwk.NodeInfo) *fwk.Status {
	if nodeInfo.Node() == nil {
		return fwk.NewStatus(fwk.Error, "node not found")
	}
	qos := v1qos.GetPodQOS(pod)
	if qos == v1.PodQOSBestEffort && !resourcerequests.IncludeNonNative(pod) {
		return nil
	}

	nodeName := nodeInfo.Node().Name

	lh := klog.FromContext(klog.NewContext(ctx, tm.logger)).WithValues(logging.KeyPod, klog.KObj(pod), logging.KeyPodUID, logging.PodUID(pod), logging.KeyNode, nodeName)

	lh.V(4).Info(logging.FlowBegin)
	defer lh.V(4).Info(logging.FlowEnd)

	nodeTopology, info := tm.nrtCache.GetCachedNRTCopy(ctx, nodeName, pod)
	lh = lh.WithValues(logging.KeyGeneration, info.Generation)
	if !info.Fresh {
		lh.V(2).Info("invalid topology data")
		return fwk.NewStatus(fwk.Unschedulable, "invalid node topology data")
	}
	if nodeTopology == nil {
		return nil
	}

	conf := nodeconfig.TopologyManagerFromNodeResourceTopology(lh, nodeTopology)

	lh.V(4).Info("found nrt data", "object", stringify.NodeResourceTopologyResources(nodeTopology), "conf", conf.String())

	handler, scope := filterHandlerFromTopologyManager(conf)
	if handler == nil {
		return nil
	}

	numaNodes := createNUMANodeList(lh, nodeTopology.Zones)
	lh.V(4).Info("aligning resources", "scope", scope, "numaCells", len(numaNodes))
	fi := filterInfo{
		nodeName:        nodeName,
		node:            nodeInfo,
		topologyManager: conf,
		numaNodes:       numaNodes,
		qos:             qos,
	}
	status := handler(lh, pod, &fi)
	if status != nil {
		tm.nrtCache.NodeMaybeOverReserved(nodeName, pod)
	}
	return status
}

func filterHandlerFromTopologyManager(conf nodeconfig.TopologyManager) (filterFn, string) {
	if conf.Policy != kubeletconfig.SingleNumaNodeTopologyManagerPolicy {
		return nil, ""
	}
	if conf.Scope == kubeletconfig.PodTopologyManagerScope {
		return singleNUMAPodLevelHandler, "pod"
	}
	if conf.Scope == kubeletconfig.ContainerTopologyManagerScope {
		return singleNUMAContainerLevelHandler, "container"
	}
	return nil, "" // cannot happen
}
