from enum import IntEnum
from copy import deepcopy
from math import log

class nodeClass(IntEnum):
    gpu = 1
    leafswitch = 2
    spineswitch = 3

class aclRule():
    def __init__(self, srcIdx_: int, destIdx_: int, outPort_: int) -> None:
        self.srcIdx: int = srcIdx_
        self.destIdx: int = destIdx_
        self.outPort: int = outPort_
        self.srcIP: str = ""
        self.destIP: str = ""

class networkNode():
    def __init__(self, nodeClass_, nodeIdx_) -> None:
        self.nodeClass: nodeClass = nodeClass_
        self.nodeIdx: int = nodeIdx_ # Global index
        self.connection: dict[int, networkNode] = {} # {portidx:networkNode}
        self.idleConnection: dict[int, networkNode] = {}
        self.aclRules: list[tuple(tuple(int, int), int)] = [] # For switches
        self.txFlows: list[int] = [] # For GPUs

    def __hash__(self):
        return hash(self.nodeIdx)
 
    def __eq__(self, other):
        if self.nodeClass == other.nodeClass and self.nodeIdx == other.nodeIdx:
            return True
        else:
            return False

    def clearUsage(self) -> None:
        self.idleConnection = deepcopy(self.connection)

# routing_material_leaf = {leaf_switch_id: {"gpu_addr":[], "leaf_up_port_seq":[], "spine_port_seq":[(spine_switch_id, spine_port_seq)]}}
# routing_material_spine = {spine_switch_id: {"spine_port_seq":[], "leaf_port_seq":[(leaf_switch_id, leaf_port_seq)]}}
class ClusterGraph():
    def __init__(self, routing_material_leaf : dict, routing_material_spine: dict) -> None:
        # Init the properties
        self.gpuNum = 0
        self.gpuNodeList: list[networkNode] = [] # list[node]
        self.leafNodeList: list[networkNode] = [] # list[node]
        self.spineNodeList: list[networkNode] = [] # list[node]

        # Generate spine switches first
        for spineIdx, connect in routing_material_spine.item():
            self.spineNodeList.append(networkNode(nodeClass.spineswitch, spineIdx))

        # Construct the gpu, leaf networkNodes
        for leafIdx, connect in routing_material_leaf.item():
            self.gpuNum += len(connect["gpu_addr"])
            self.leafNodeList.append(networkNode(nodeClass.leafswitch, leafIdx))
            # Connect leaf to gpu and switch
            self.initLeaf(self.leafNodeList[-1], connect)
            for gpuIdx in connect["gpu_addr"]:
                self.gpuNodeList.append(networkNode(nodeClass.gpu, gpuIdx))
                # Connect the gpu to leaf switch
                self.gpuNodeList[-1].connection[0] = self.leafNodeList[-1]

        for spineIdx, connect in routing_material_spine.item():
            self.spineNodeList.append(networkNode(nodeClass.spineswitch, spineIdx))
            # Connect spine to leaf
            self.initSpine(self.spineNodeList[-1], connect)

    def initSpine(self, node: networkNode, connect: dict[str,list]):
        port_num = len(connect["spine_port_seq"])
        # assert the network is clos
        assert(port_num == len(self.leafNodeList))
        for leaf in range(port_num):
            port = connect["spine_port_seq"][leaf]
            leaf_peer = self.leafNodeList[leaf]
            node.connection[port] = leaf_peer

    # TODO: Only upper port inited now
    def initLeaf(self, node: networkNode, connect: dict[str,list]):
        upper_port_num = len(connect["leaf_up_port_seq"])
        # assert the network is clos
        assert(upper_port_num == len(self.spineNodeList))
        for spine_up in range(upper_port_num):
            upper_port = connect["leaf_up_port_seq"][spine_up]
            upper_peer = self.spineNodeList[spine_up]
            node.connection[upper_port] = upper_peer

    def clearAllUsage(self) ->None:
        for gpu in self.gpuNodeList:
            gpu.clearUsage()
        for leaf in self.leafNodeList:
            leaf.clearUsage()
        for spine in self.spineNodeList:
            spine.clearUsage()

class TaskGen():
    def __init__(self, routing_material_leaf : dict, routing_material_spine: dict) -> None:
        self.graph = ClusterGraph(routing_material_leaf, routing_material_spine)
        self.commPair: list[dict[networkNode,networkNode]] = []

    def gen_hd_task(self) -> list:
        for stage in range(int(log(self.graph.gpuNum, 2))):
            groupSize = 2 * pow(2, stage)
            stageCommPair = {}
            for groupCnt in range(self.graph.gpuNum / groupSize):
                for pairCnt in range(pow(2, stage)):
                    srcIdx = groupCnt * groupSize + pairCnt
                    destIdx = srcIdx + pow(2, stage)
                    srcNode = self.graph.gpuNodeList[srcIdx]
                    destNode = self.graph.gpuNodeList[destIdx]
                    stageCommPair[srcNode] = destNode
                    srcNode.txFlows.append(destNode)
            self.commPair.append(stageCommPair)

        for stage in range(int(log(self.graph.gpuNum, 2)), 0, -1):
            groupSize = 2 * pow(2, stage)
            stageCommPair = {}
            for groupCnt in range(self.graph.gpuNum / groupSize):
                for pairCnt in range(pow(2, stage)):
                    srcIdx = groupCnt * groupSize + pairCnt
                    destIdx = srcIdx + pow(2, stage)
                    srcNode = self.graph.gpuNodeList[srcIdx]
                    destNode = self.graph.gpuNodeList[destIdx]
                    stageCommPair[srcNode] = destNode
                    srcNode.txFlows.append(destNode)
            self.commPair.append(stageCommPair)

    # Note: This function assume that the cluster is CLOS ONLY!
    def genRoutes(self) -> None:
        for stageCommPair in self.commPair:
            self.graph.clearAllUsage()
            for srcNode, destNode in stageCommPair.items():
                # Use leafswitch to compute route directly
                srcLeaf = srcNode.idleConnection[0]
                destLeaf = destNode.idleConnection[0]
                srcLeaf.aclRules.append(aclRule(srcNode, destNode))
                pass
                

class ClusterGlueClass():
    # Generate flows for HD, and compute ACL for each stage in HD.
    def gen_acl(gpu_num, routing_material_leaf, routing_material_spine):
        
        return
