"""
这一个监听者，当每次实例化的时候，就会自动开启一个线程和队列，外部可以直接调用函数向队列里面写入数据，内部一个线程隔一段时间后就可以获取当前队列里面的数据，并且打印出来
并且要提供可以停止关闭的接口让外部可以主动停止，且可以获取当前的状态
"""
import queue
import threading
import time
from typing import Callable, Optional
from collections import deque
from enum import Enum, auto

# LLM配置常量
LLM_MODEL = "gpt-4o-2024-08-06"
LLM_API_KEY = "sk-F8Ww4oCYApuNmuRX7R79WccsCcHAxNWgDuQbeNknoSdM9CS7cMoK"  # 默认为None，将从环境变量获取
BASE_URL = "https://api.oaipro.com/v1"
TEMPERATURE = 0
MAX_TOKENS = -1
"""
This module provides interfaces for different Large Language Models (LLMs).
It includes a base LLM class and specific implementations for different LLM providers.
"""

from abc import ABC, abstractmethod
from typing import Optional, Dict, Any
import os
import openai
from anthropic import Anthropic


class LLM(ABC):
    """Base class for Large Language Models."""

    def __init__(self):
        """
        Initialize the LLM with configuration.

        Args:
            api_key: API key for the LLM service
            model_config: Dictionary containing model configuration parameters
        """

    @abstractmethod
    def ask_llm(self, query: str) -> str:
        """
        Send a query to the LLM and get the response.

        Args:
            query: The input query string

        Returns:
            The LLM's response as a string

        Raises:
            Exception: If there's an error in communicating with the LLM
        """
        pass

class LLM_GPT(LLM):
    """GPT-specific implementation of LLM interface."""

    def __init__(self):
        """
        Initialize GPT model with configuration.

        Args:
            api_key: OpenAI API key
            model_config: Dictionary containing model configuration parameters
            base_url: Base URL for API endpoint
        """
        super().__init__()
        self.api_key = LLM_API_KEY
        openai.api_key = LLM_API_KEY
        self.model = LLM_MODEL
        self.temperature = TEMPERATURE
        self.max_tokens = MAX_TOKENS
        self.base_url = BASE_URL

    def ask_llm(self, query: str, system_prompt: Optional[str] = None) -> str:
        """
        Send a query to GPT and get the response.

        Args:
            query: The input query string
            system_prompt: Optional system prompt to set context

        Returns:
            GPT's response as a string

        Raises:
            Exception: If there's an error in communicating with GPT
        """
        try:
            client = openai.OpenAI(base_url=self.base_url)
            if system_prompt is not None:
                messages = [{"role": "system", "content": system_prompt}, {"role": "user", "content": query}]
            else:
                messages = [{"role": "user", "content": query}]

            response = client.chat.completions.create(
                model=self.model,
                messages=messages,
                temperature=self.temperature,
                max_tokens=self.max_tokens

            )
            return response.choices[0].message.content
        except Exception as e:
            raise Exception(f"Error in GPT query: {str(e)}")

class CmdMonitorStatus(Enum):
    """命令监听器状态枚举"""
    INITIALIZED = "initialized"
    RUNNING = "running"
    STOPPED = "stopped"
    FORCE_STOPPED = "force_stopped"
    NOT_RUNNING = "not_running"
    ERROR = "error"


class CmdMonitor:
    """命令监听器，用于监控命令执行过程中的输出"""

    def __init__(
            self,
            interval: int = 30,
            max_queue_size: int = 100,
            logger: Optional[Callable] = None,
            command: Optional[str] = None  # 添加命令参数，用于LLM分析
    ):
        """
        初始化命令监听器
        
        Args:
            interval: 打印间隔时间（秒）
            max_queue_size: 队列最大元素数量
            logger: 日志记录函数，如果为None则使用print
            command: 正在执行的命令，用于LLM分析
        """
        self.interval = interval
        self.max_queue_size = max_queue_size
        # 使用双向队列来存储内容
        self.content_queue = deque(maxlen=max_queue_size)
        self.stop_event = threading.Event()
        self.thread = None
        self.logger = logger or print
        self.command = command  # 保存命令
        self.accumulated_content = ""
        self.status = CmdMonitorStatus.INITIALIZED
        self.force_stopped = False  # 标记是否被强制停止
        # self.llm = LLM_GPT(api_key=os.getenv("OPENAI_API_KEY"))
        self.llm = LLM_GPT()
        # 自动启动监听线程
        self.start()

    def start(self):
        """启动监听线程"""
        if self.thread is not None and self.thread.is_alive():
            return

        self.stop_event.clear()
        self.status = CmdMonitorStatus.RUNNING
        self.thread = threading.Thread(target=self._monitor_loop)
        self.thread.daemon = True
        self.thread.start()

        return self

    def stop(self, force=False):
        """
        停止监听线程
        
        Args:
            force: 是否强制停止
        """
        if force:
            self.force_stopped = True
            self.status = CmdMonitorStatus.FORCE_STOPPED
        else:
            self.status = CmdMonitorStatus.STOPPED

        self.stop_event.set()

        # 避免当前线程join自己
        current_thread = threading.current_thread()
        if self.thread and self.thread.is_alive() and self.thread != current_thread:
            self.thread.join(timeout=2)

    def get_status(self) -> CmdMonitorStatus:
        """获取当前监听器状态"""
        if not self.thread or not self.thread.is_alive():
            if self.force_stopped:
                return CmdMonitorStatus.FORCE_STOPPED
            return CmdMonitorStatus.NOT_RUNNING
        return self.status

    def add_content(self, content: str):
        """
        添加内容到监听队列
        
        Args:
            content: 要添加的内容
        """
        if self.status != CmdMonitorStatus.FORCE_STOPPED and self.status != CmdMonitorStatus.STOPPED:
            # 直接添加到双向队列，如果超过最大长度会自动淘汰最早的元素
            self.content_queue.append(content)
            # 同时更新累积内容
            self.accumulated_content += content
            # 保持累积内容在合理大小范围内
            if len(self.accumulated_content) > 10000:
                self.accumulated_content = self.accumulated_content[-10000:]

    def get_accumulated_content(self) -> str:
        """获取当前累积的内容"""
        return self.accumulated_content

    def _monitor_loop(self):
        """监听循环，定期处理队列中的数据"""
        try:
            last_print_time = time.time()

            while not self.stop_event.is_set():
                current_time = time.time()

                # 每隔指定时间打印一次队列中的所有内容
                if current_time - last_print_time >= self.interval and self.content_queue:
                    # 将队列中的所有内容合并为字符串
                    combined_content = "".join(self.content_queue)
                    if combined_content:
                        self.logger(f"命令输出 (队列中的所有内容):\n{combined_content}")

                        # 使用内部LLM分析方法
                        should_stop = self._analyze_with_llm(combined_content)
                        if should_stop:
                            self.logger("LLM分析建议停止执行")
                            # 强制停止并修改状态
                            self.stop(force=True)
                            break

                    last_print_time = current_time

                # 短暂休眠，避免CPU占用过高
                time.sleep(0.1)

        except Exception as e:
            self.status = CmdMonitorStatus.ERROR
            self.logger(f"监听线程出错: {str(e)}")
        finally:
            if not self.force_stopped:
                self.status = CmdMonitorStatus.STOPPED

    def __del__(self):
        """析构函数，确保线程被正确关闭"""
        self.stop()

    def _analyze_with_llm(self, content):
        """使用LLM分析命令输出内容"""
        try:
            import litellm
            import os

            # 优先使用常量中的API密钥，如果没有则从环境变量获取
            api_key = LLM_API_KEY
            if not api_key:
                self.logger("警告: 未找到API密钥，LLM分析将不可用")
                return False

            command_info = f"command: {self.command}" if self.command else "current command"

            prompt = f"""
            You are a machine learning training monitoring assistant, responsible for analyzing and determining the status of command-line executions.  
            Your task is to monitor the commands I run in the terminal and their output in real-time, helping me determine whether the program is running correctly and decide whether to stop or continue waiting.  
            
            ### What to Monitor:  
            1. **Executed Command**: `{command_info}`  
            2. **Program Output**: current time:`{content}`  
            
            ### Your Goal:  
            - **Normal Execution**: If the output shows the loss decreasing, accuracy increasing, and no abnormal warnings or errors, return "CONTINUE".  
            - **Potential Issues**: If the loss becomes NaN or inf, or if the model fails to converge for a long time and accuracy remains stagnant, return "STOP".  
            - **Error Termination**: If the output contains clear errors, such as out-of-memory issues, data loading failures, or tensor shape mismatches, immediately return "STOP".  
            - **Unresponsive for a Long Time**: If the program outputs nothing useful for a long period, gets stuck in an epoch, or repeatedly prints the same log, return "STOP".  
            
            ### Output Format:  
            - **If running normally: return `CONTINUE`  
            - **If an anomaly is detected: return `STOP`  
            
            ### Important:  
            - **Only output `CONTINUE` or `STOP`, nothing else.**  
            - **Please note that if there is no output content that can be used for judgment, continue to wait.
            """

            # 使用litellm调用模型，使用全局常量配置
            response = self.llm.ask_llm(prompt)

            response_text = response.strip()

            self.logger(f"LLM分析结果: {response_text}")

            if "STOP" in response_text:
                return True  # 需要停止
            return False  # 可以继续
        except Exception as e:
            self.logger(f"LLM分析出错: {str(e)}")
            # 出错时默认继续执行
            return False
