import os
import signal
import subprocess
import threading
import time
import GPUtil
import psutil
import logging
from datetime import datetime
from threading import RLock
from typing import List, Deque, Dict, Optional, Any
from collections import deque
from GPUtil import GPU
from .models import DLTask
from .config import Config
from .utils import setup_logging

class TaskScheduler:
    def __init__(self, config: Config):
        self.config = config
        self._lock = RLock()
        self.pending_tasks: Deque[DLTask] = deque() # 待处理的任务队列
        self.running_tasks: List[DLTask] = [] # 正在执行的任务队列
        self.finished_tasks: List[DLTask] = [] # 已完成的任务队列
        # 为了人为给gpu和cpu内存使用做出限制，给内存的分配和使用流出余裕
        # 顾采用两个字典分别记录gpu和cpu的总内存和已分配内存，而不是实时获取实际占用
        self.gpu_total_mem: Dict[str, int] = {}     
        self.gpu_allocated_mem: Dict[str, int] = {}
        self.cpu_total_mem: int = psutil.virtual_memory().total / (1024 ** 2)  # 将字节转换为MB
        self.cpu_allocated_mem: int = psutil.virtual_memory().used / (1024 ** 2)
        self._init_gpu_metadata()
        self._running = True
        # # # log路径
        # os.makedirs(config.log_dir, exist_ok=True)
        # current_time = datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
        # log_file = os.path.join(config.log_dir, f"scheduler-{current_time}.log")
        # setup_logging(log_file)

    def _init_gpu_metadata(self, available_devices=None):
        try:
            gpus: list[GPU] = GPUtil.getGPUs()
            if available_devices is None:
                available_devices = self.config.available_devices
            for gpu in gpus:
                device = f"cuda:{gpu.id}"
                if device in available_devices:
                    self.gpu_total_mem[device] = gpu.memoryTotal # MB
                    self.gpu_allocated_mem[device] = gpu.memoryUsed # MB
                # self.config.available_devices.append(device)
        except Exception as e:
            logging.error(f"GPU初始化失败: {str(e)}")
            raise

    def add_task(self, task: DLTask):
        with self._lock:
            self.pending_tasks.append(task)
            logging.info(f"已添加任务 {task.task_id}")

    def _calculate_required_mem(self, task: DLTask, device: str) -> int:
        total = self.gpu_total_mem.get(device, 0)
        if task.mem_per_gpu is None:
            return max(0, total - self.config.reserved_mem) # 如果不指定显存用量，则默认使用最大可使用显存
        return min(int(task.mem_per_gpu * 1024), total) # 显存使用量不能超过总显存，超过了只能申请最大显存

    def _try_allocate_resources(self, task: DLTask) -> bool:
        with self._lock:
            # GPU分配
            if task.num_gpu > 0:
                devices = self._try_allocate_gpus(task)
                if not devices:
                    logging.info(f"Task {task.task_id} failed: No enough GPU resources")
                    return False
                logging.info(f"Task {task.task_id} allocated GPU devices: {devices}")
                task.allocated_devices = devices
                for device in devices:
                    self.gpu_allocated_mem[device] += self._calculate_required_mem(task, device)
            
            # CPU分配
            if task.mem_cpu:
                mems = self._try_allocate_cpus(task)
                if not mems:
                    return False
                
            return True

    def _try_allocate_gpus(self, task: DLTask) -> Optional[List[str]]:
        with self._lock:
            if task.num_gpu > len(self.config.available_devices):
                logging.error(f"请求GPU数超过可用设备")
                return None

            if task.require_devices:
                return self._allocate_specific_devices(task)

            return self._allocate_auto_devices(task)

    def _allocate_specific_devices(self, task: DLTask) -> Optional[List[str]]:
        """ 尝试分配指定的GPU设备, 返回分配的GPU设备列表 """
        selected = []
        for device in task.require_devices[:task.num_gpu]:
            if device not in self.config.available_devices:
                logging.warning(f"设备 {device} 不可用")
                return None
            required = self._calculate_required_mem(task, device)
            available = self.gpu_total_mem[device] - self.gpu_allocated_mem[device]
            logging.info(f"设备 {device} 可用内存 {available}MB, 需求内存 {required}MB")
            if available < required:
                return None
            selected.append(device)
        return selected

    def _allocate_auto_devices(self, task: DLTask) -> Optional[List[str]]:
        """ 尝试自动分配GPU设备, 返回分配的GPU设备列表 """
        candidates = []
        for device in self.config.available_devices:
            required = self._calculate_required_mem(task, device)
            available = self.gpu_total_mem[device] - self.gpu_allocated_mem[device]
            if available >= required:
                candidates.append((device, available))
        
        candidates.sort(key=lambda x: -x[1])
        return [d[0] for d in candidates[:task.num_gpu]] if candidates else None

    def _try_allocate_cpus(self, task: DLTask) -> bool:
        with self._lock:
            free_mem = self.cpu_total_mem - self.cpu_allocated_mem - self.config.reserved_cpu_mem
            logging.info(f"当前可用CPU内存: {free_mem}MB, 请求CPU内存: {task.mem_cpu}MB")
            if free_mem < task.mem_cpu:
                logging.error(f"请求CPU核数超过可用核心")
                return False
            
            self.cpu_allocated_mem += task.mem_cpu
            return True

    def _execute_task(self, task: DLTask):
        try:
            if task.allocated_devices:
                task.env["CUDA_VISIBLE_DEVICES"] = ",".join(d.split(":")[-1] for d in task.allocated_devices)
                
            os.makedirs(self.config.log_dir, exist_ok=True)
            current_time = datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
            log_path = os.path.join(self.config.log_dir, f"{current_time}_{task.task_id}.log")
            print(f"启动任务: {task.command} 日志路径 {log_path}")
            with open(log_path, "w") as log_file:
                log_file.write(task.metadata() + '\n')
                log_file.flush()  # 立即刷新
                proc = subprocess.Popen(
                    task.command,
                    shell=True,
                    env=task.env,
                    stdout=log_file,
                    stderr=log_file, #subprocess.STDOUT,
                    # preexec_fn=os.setsid
                    cwd=task.pwd,
                    text=True,
                    bufsize=0,
                )
                
                threading.Thread(
                    target=self._monitor_task,
                    args=(proc, task),
                    daemon=True
                ).start()
            
            with self._lock:
                task.process = proc
                task.status = "RUNNING"
                self.running_tasks.append(task)
                
        except Exception as e:
            logging.error(f"任务启动失败: {str(e)}")
            task.status = "FAILED"
            task.last_failure = str(e)

    def _monitor_task(self, proc: subprocess.Popen, task: DLTask):
        try:
            exit_code = proc.wait()
            with self._lock:
                self._release_resources(task)
                self.running_tasks.remove(task)
                
                if exit_code == 0:
                    task.status = "COMPLETED"
                    self.finished_tasks.append(task)
                else:
                    task.status = "FAILED"
                    task.last_failure = f"Exit code {exit_code}"
                    self._handle_retry(task)
                    
        except Exception as e:
            logging.error(f"任务监控异常: {str(e)}")

    def _release_resources(self, task: DLTask):
        for device in task.allocated_devices:
            delta = self._calculate_required_mem(task, device)
            self.gpu_allocated_mem[device] -= delta
        if task.mem_cpu:
            self.cpu_allocated_mem -= task.mem_cpu

    def _handle_retry(self, task: DLTask):
        logging.info(f"value type is {type(task.retries)}, task.retries is {type(task.max_retries)}")
        if task.retries < task.max_retries:
            logging.info(f"任务 {task.task_id} 进入重试")
            task.retries += 1
            task.status = "PENDING"
            self.pending_tasks.append(task)
            logging.info(f"任务 {task.task_id} 进入重试 ({task.retries}/{task.max_retries})")
        else:
            logging.error(f"任务 {task.task_id} 超过最大重试次数")
            task.status = "FAILED"
            self.finished_tasks.append(task)

    def stop(self):
        self._running = False
        with self._lock:
            for task in self.running_tasks:
                if task.process and task.process.poll() is None:
                    try:
                        if os.name == 'nt':  # Windows系统
                            # 使用CTRL_BREAK_EVENT来优雅地关闭子进程
                            os.kill(task.process.pid, signal.CTRL_BREAK_EVENT)
                        else:  # Unix/Linux系统
                            # 使用SIGTERM信号来请求进程终止
                            os.killpg(os.getpgid(task.process.pid), signal.SIGTERM)
                    except ProcessLookupError:
                        pass

    def scheduling_loop(self):
        # 打印进程id
        print(f"[INFO] Scheduler process id: {os.getpid()}")
        while self._running:
            try:
                with self._lock:
                    pending = list(self.pending_tasks)
                    for task in pending:
                        logging.info(f"正在处理任务 {task.task_id}")
                        if self._try_allocate_resources(task):
                            logging.info(f"任务 {task.task_id} 分配资源成功")
                            self.pending_tasks.remove(task)
                            self._execute_task(task)
                time.sleep(self.config.check_interval)
                
            except Exception as e:
                logging.error(f"调度循环异常: {str(e)}")
                time.sleep(10)

