from typing import Dict, Any, Optional, List, Generator
import sys
import time
import traceback
from langchain_community.llms import Ollama
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
import os
import re

# 配置导入路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from utils.logger import logger
from config import OLLAMA_BASE_URL, LLM_MODEL, ENABLE_STREAMING

class StreamingCallbackHandler(StreamingStdOutCallbackHandler):
    """自定义回调处理器，用于流式输出"""
    
    def __init__(self, agent_name: str):
        super().__init__()
        self.agent_name = agent_name
        self.tokens = []
        self.start_time = time.time()
        self.token_count = 0
        self.last_update_time = time.time()
        
    def on_llm_new_token(self, token: str, **kwargs) -> None:
        """收到新token时的处理，只打印新token，不重复之前的内容"""
        self.tokens.append(token)
        self.token_count += 1
        
        # 计算生成速度
        elapsed = time.time() - self.start_time
        tokens_per_second = self.token_count / elapsed if elapsed > 0 else 0
        
        # 只在开始时打印前缀信息
        if self.token_count == 1:
            print(f"\n🤖 {self.agent_name} 生成中 [速度: {tokens_per_second:.1f} t/s]: ", end="")
        
        # 每100个token更新一次速度信息
        if self.token_count % 100 == 0:
            print(f"\n[已生成 {self.token_count} tokens, 速度: {tokens_per_second:.1f} t/s]")
        
        # 直接打印新token，不清除之前的内容
        sys.stdout.write(token)
        sys.stdout.flush()

        
    def on_llm_end(self, response, **kwargs) -> None:
        """生成结束时的处理"""
        elapsed = time.time() - self.start_time
        sys.stdout.write("\n\n")
        sys.stdout.write(f"✓ 生成完成! 共 {self.token_count} tokens, 用时 {elapsed:.2f}s (速度: {self.token_count/elapsed:.1f} t/s)\n")
        sys.stdout.flush()
        
    def on_llm_error(self, error: Exception, **kwargs) -> None:
        """生成出错时的处理"""
        elapsed = time.time() - self.start_time
        sys.stdout.write("\n\n")
        sys.stdout.write(f"❌ 生成出错! 已生成 {self.token_count} tokens, 用时 {elapsed:.2f}s\n")
        sys.stdout.write(f"错误信息: {str(error)}\n")
        sys.stdout.flush()

class BaseAgent:
    """所有Agent的基类，处理流式输出和日志记录"""
    
    def __init__(self, agent_name: str):
        self.agent_name = agent_name
        print(f"初始化 {agent_name} Agent...")
        
        # 创建标准LLM和流式LLM
        try:
            self.llm = self._create_llm(streaming=False)
            self.streaming_llm = self._create_llm(streaming=True)
            print(f"✓ {agent_name} 模型初始化成功")
        except Exception as e:
            print(f"❌ {agent_name} 模型初始化失败: {str(e)}")
            raise
        
    def _create_llm(self, streaming: bool = False):
        """创建LLM实例，可选是否启用流式输出"""
        callbacks = None
        if streaming and ENABLE_STREAMING:  # 根据配置决定是否使用流式输出
            callbacks = [StreamingCallbackHandler(self.agent_name)]
        
        try:
            stream_status = "启用" if (streaming and ENABLE_STREAMING) else "禁用"
            print(f"创建 {self.agent_name} 的 LLM 实例 (模型: {LLM_MODEL}, 流式: {stream_status})...")
            
            llm = Ollama(
                base_url=OLLAMA_BASE_URL,
                model=LLM_MODEL,
                callbacks=callbacks,
                temperature=0.1,
                num_predict=32000,
                timeout=300
            )
            return llm
        except Exception as e:
            print(f"⚠️ 创建 {self.agent_name} 的 LLM 实例失败: {str(e)}")
            raise
    
    def run_with_streaming(self, 
                     prompt_template, 
                     inputs: Dict[str, Any],
                     step: str,
                     file_path: Optional[str] = None,
                     timeout: int = 300) -> str:
        """使用流式输出运行提示，并过滤掉思考过程"""
        # 格式化提示
        try:
            prompt = prompt_template.format(**inputs)
        except KeyError as e:
            error_msg = f"❌ 提示词格式化错误: 缺少参数 {str(e)}"
            print(error_msg)
            return f"错误: {error_msg}"
        except Exception as e:
            error_msg = f"❌ 提示词格式化错误: {str(e)}"
            print(error_msg)
            return f"错误: {error_msg}"
        
        print(f"\n{'='*30}")
        print(f"开始 {self.agent_name} 在 '{step}' 步骤的生成")
        print(f"📂 文件: {file_path or '无'}")
        print(f"⏱️ 超时设置: {timeout}秒")
        print(f"{'='*30}\n")
        
        try:
            # 创建专用于完整输出的LLM实例(无流式回调)
            if not hasattr(self, 'complete_llm'):
                from config import OLLAMA_BASE_URL, LLM_MODEL
                from langchain_community.llms import Ollama
                self.complete_llm = Ollama(
                    base_url=OLLAMA_BASE_URL,
                    model=LLM_MODEL,
                    temperature=0.1,  # 降低随机性以确保一致性
                    timeout=timeout,
                    stop=["\n\nHuman:", "\n\nAI:"]  # 避免生成额外的对话
                    # 已移除不支持的max_tokens参数
                )
                print(f"已创建用于完整输出的LLM实例")
            
            # 运行模型 (流式输出)
            start_time = time.time()
            raw_output = self.streaming_llm(prompt)
            
            # 过滤掉思考过程
            filtered_output = self.filter_thinking(raw_output)
            #filtered_output = self.filter_markdown_code_blocks(filtered_output)
            print("过滤后的AGNET输出:",filtered_output,"!!")
            # 记录输入和输出
            logger.log_model_interaction(
                agent_name=self.agent_name,
                inputs=inputs,
                prompt = prompt,
                output=raw_output,  # 使用原始的输出
                step=step,
                file_path=file_path
            )
            
            print(f"✅ {self.agent_name} 生成成功，总时间: {time.time() - start_time:.2f}秒")
            return filtered_output  # 返回过滤后的输出
            
        except KeyboardInterrupt:
            print(f"\n⚠️ 用户中断了 {self.agent_name} 的生成")
            return f"[生成被用户中断]"
        except Exception as e:
            error_msg = f"❌ {self.agent_name} 生成失败: {str(e)}"
            print(error_msg)
            print(traceback.format_exc())
            print("尝试使用非流式模式重试...")
            
            try:
                # 回退到非流式模式
                raw_output = self.llm(prompt)
                filtered_output = self.filter_thinking(raw_output)
                #filtered_output = self.filter_markdown_code_blocks(filtered_output)
                print(f"✅ 非流式模式生成成功")
                
                # 记录输入和输出
                logger.log_model_interaction(
                    agent_name=f"{self.agent_name}(非流式)",
                    inputs=inputs,
                    output=raw_output,
                    step=step,
                    file_path=file_path
                )
                
                return filtered_output
            except Exception as e2:
                error_msg = f"❌ 重试失败: {str(e2)}"
                print(error_msg)
                print(traceback.format_exc())
                return f"生成失败: {str(e2)}"

    def filter_thinking(self, text: str) -> str:
        """过滤掉模型输出中的思考过程"""
       
        # 匹配各种可能的思考格式
        patterns = [
            r'<think>.*?</think>',  # <thinking>...</thinking>
        ]
        
        filtered_text = text
        for pattern in patterns:
            filtered_text = re.sub(pattern, '', filtered_text, flags=re.DOTALL)
        
        # 移除多余的空行
        filtered_text = re.sub(r'\n{3,}', '\n\n', filtered_text)
        
        return filtered_text


    

    

