import numpy as np
import psutil
import random
import subprocess as sp
import torch
import uuid
from typing import TypeAlias, List
from enum import Enum
from typing import Dict,Tuple
import ray
from dataclasses import dataclass
GB = 1 << 30
MB = 1 << 20


class Counter:
    def __init__(self, start: int = 0) -> None:
        self.counter = start

    def __next__(self) -> int:
        i = self.counter
        self.counter += 1
        return i

    def reset(self) -> None:
        self.counter = 0


def get_gpu_memory(gpu: int = 0) -> int:
    """Returns the total memory of the GPU in bytes."""
    return torch.cuda.get_device_properties(gpu).total_memory


def get_gpu_memory_usage(gpu: int = 0):
    """
    Python equivalent of nvidia-smi, copied from https://stackoverflow.com/a/67722676
    and verified as being equivalent ✅
    """
    output_to_list = lambda x: x.decode("ascii").split("\n")[:-1]

    COMMAND = "nvidia-smi --query-gpu=memory.used --format=csv"

    try:
        memory_use_info = output_to_list(
            sp.check_output(COMMAND.split(), stderr=sp.STDOUT)
        )[1:]

    except sp.CalledProcessError as e:
        raise RuntimeError(
            "command '{}' return with error (code {}): {}".format(
                e.cmd, e.returncode, e.output
            )
        )

    return int(memory_use_info[gpu].split()[0])


def get_cpu_memory() -> int:
    """Returns the total CPU memory of the node in bytes."""
    return psutil.virtual_memory().total


def set_random_seed(seed: int) -> None:
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(seed)


def random_uuid() -> str:
    return str(uuid.uuid4().hex)


cudaMemoryIpcHandle: TypeAlias = List[int]


class Stage(Enum):
    """The stage of a SingleStageLLMEngine"""

    CONTEXT = "context"
    DECODING = "decoding"

    def __str__(self) -> str:
        return self.value

def get_node_gpu_count():
    # 获取Ray集群中的节点信息
    node_info = ray.nodes()
    
    # 统计每个节点的GPU数量
    node_gpu_count = {}
    for node in node_info:
        node_id = node['NodeID']
        gpu_count = node.get('Resources', {}).get('GPU', 0)
        node_gpu_count[node_id] = gpu_count
    
    return node_gpu_count

# 逻辑ID转物理ID
def logical_to_physical(logical_id):
    node_gpu_count = get_node_gpu_count()
    
    # 找到对应的节点和GPU
    gpu_count_so_far = 0
    for node_id, gpu_count in node_gpu_count.items():
        if logical_id < gpu_count_so_far + gpu_count:
            # 计算在该节点中的物理ID
            physical_id = logical_id - gpu_count_so_far
            return (node_id, physical_id)
        gpu_count_so_far += gpu_count
    raise ValueError("Invalid logical ID")

# 物理ID转逻辑ID
def physical_to_logical(node_id, physical_id):
    node_gpu_count = get_node_gpu_count()
    
    gpu_count_so_far = 0
    for node_id_check, gpu_count in node_gpu_count.items():
        if node_id_check == node_id:
            # 在该节点中查找物理ID对应的逻辑ID
            logical_id = gpu_count_so_far + physical_id
            return logical_id
        gpu_count_so_far += gpu_count
    raise ValueError("Invalid node ID or physical ID")

def get_node_gpu_count() -> Dict[str, int]:
    nodes = ray.nodes()
    return {node['NodeManagerAddress']: int(node['Resources'].get('GPU', 0)) for node in nodes if node['Alive']}

def get_logical_to_physical_map() -> Dict[int, Tuple[str, int]]:
    node_gpu_count = get_node_gpu_count()
    logical_to_physical_map = {}
    gpu_count_so_far = 0
    for node_id, gpu_count in node_gpu_count.items():
        for i in range(gpu_count):
            logical_id = gpu_count_so_far + i
            logical_to_physical_map[logical_id] = (node_id, i)
        gpu_count_so_far += gpu_count
    return logical_to_physical_map

@dataclass
class Prompt_struct:
    input_length: int
    output_length: int
    
    TTFT_SLO: float
    TPOT_SLO: float
    current_length:int
    def calculate_local_length(self, engine_TTFT: float, engine_TPOT: float) -> int:
        if engine_TTFT <= engine_TPOT:
            return 0
        return int(((self.TPOT_SLO-engine_TPOT)*self.output_length) // (engine_TTFT - engine_TPOT))