package predicates

import (
	"fmt"

	nvml "volcano.sh/volcano/pkg/scheduler/plugins/util/nvidia"
)

// NodeGPUDevices represents GPU device list
type NodeGPUDevices struct {
	Devices []*Device `json:"devices"`
}

// Device represents a GPU device as reported by NVML, including all of its
// Point-to-Point link information.
type Device struct {
	*nvml.Device
	Index int
	Links map[int][]P2PLink
}

// P2PLink represents a Point-to-Point link between two GPU devices. The link
// is between the Device struct this struct is embedded in and the GPU Device
// contained in the P2PLink struct itself.
type P2PLink struct {
	Index int
	// GPU  *Device
	Type nvml.P2PLinkType
}

// Get the total score of a set of GPUs. The score is calculated as the sum of
// the scores calculated for each pair of GPUs in the set.
func calculateGPUSetScore(gpuSet []*Device) int {
	score := 0

	iterateGPUSets(gpuSet, 2, func(gpus []*Device) {
		score += calculateGPUPairScore(gpus[0], gpus[1])
	})

	return score
}

// Iterate through all GPU sets of size 'size', applying a callback function to them.
// This function is implemented using an iterative solution for efficiency.
func iterateGPUSets(devices []*Device, size int, callback func([]*Device)) {
	if size <= 0 {
		return
	}

	if size > len(devices) {
		return
	}

	// The logic below is a simple unrolling of the recursive loops:
	//
	// n := len(devices)
	// for i := 0; i < n; i++
	//     for j := i+1; j < n; j++
	//         for k := j+1; k < n; k++
	//             ...
	//             for z := y+1; z < n; z++
	//                 callback({devices[i], devices[j], devices[k], ..., devices[z]})
	//
	// Where 'size' represents how many logical 'for' loops there are, 'level'
	// represents how many 'for' loops deep we are, 'indices' holds the loop
	// index at each level, and 'set' builds out the list of devices to pass to
	// the callback each time the bottom most level is reached.
	level := 0
	indices := make([]int, size)
	set := make([]*Device, size)

	for {
		if indices[level] == len(devices) {
			if level == 0 {
				break
			}

			level--
			indices[level]++
			continue
		}

		set[level] = devices[indices[level]]

		if level < (size - 1) {
			level++
			indices[level] = indices[level-1] + 1
			continue
		}

		callback(set)
		indices[level]++
	}
}

// Calculate a "link" score for a pair of GPUs.
// The score is based on the "closeness" of the two GPUs in relation to one
// another in terms of the communication links they have with another, as well
// as the PCIe hierarchy they are in. GPUs connected by an NVLINK receive 100
// points for each link connecting them. GPUs in the PCIe hierarchy receive
// points relative to how close they are to one another.
func calculateGPUPairScore(gpu0 *Device, gpu1 *Device) int {
	if gpu0 == nil || gpu1 == nil {
		return 0
	}

	if gpu0 == gpu1 {
		return 0
	}

	if len(gpu0.Links[gpu1.Index]) != len(gpu1.Links[gpu0.Index]) {
		err := fmt.Errorf("Internal error in bestEffort GPU allocator: all P2PLinks between 2 GPUs should be bidirectional")
		panic(err)
	}

	score := 0

	for _, link := range gpu0.Links[gpu1.Index] {
		switch link.Type {
		case nvml.P2PLinkCrossCPU:
			score += 10
		case nvml.P2PLinkSameCPU:
			score += 20
		case nvml.P2PLinkHostBridge:
			score += 30
		case nvml.P2PLinkMultiSwitch:
			score += 40
		case nvml.P2PLinkSingleSwitch:
			score += 50
		case nvml.P2PLinkSameBoard:
			score += 60
		case nvml.SingleNVLINKLink:
			score += 100
		case nvml.TwoNVLINKLinks:
			score += 200
		case nvml.ThreeNVLINKLinks:
			score += 300
		case nvml.FourNVLINKLinks:
			score += 400
		case nvml.FiveNVLINKLinks:
			score += 500
		case nvml.SixNVLINKLinks:
			score += 600
		case nvml.SevenNVLINKLinks:
			score += 700
		case nvml.EightNVLINKLinks:
			score += 800
		case nvml.NineNVLINKLinks:
			score += 900
		case nvml.TenNVLINKLinks:
			score += 1000
		case nvml.ElevenNVLINKLinks:
			score += 1100
		case nvml.TwelveNVLINKLinks:
			score += 1200
		}
	}

	return score
}
