#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
大模型聊天对话客户端
基于硅基流动API的聊天对话功能
"""

import os
import json
import asyncio
import aiohttp
import logging
from typing import List, Dict, Any, Optional, AsyncGenerator, Callable
from datetime import datetime
from dotenv import load_dotenv

# 加载环境变量
load_dotenv()

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

class ChatConfig:
    """聊天配置类"""
    
    def __init__(self):
        self.load_config()
        
    def load_config(self):
        """从环境变量加载聊天配置"""
        try:
            # 从环境变量读取聊天配置
            self.name = os.getenv('CHAT_NAME')
            self.api_key = os.getenv('CHAT_API_KEY')
            self.model = os.getenv('CHAT_MODEL')
            self.url = os.getenv('CHAT_URL')
            
            # 验证必要的配置项
            if not self.api_key:
                raise ValueError("CHAT_API_KEY 环境变量未设置")
            if not self.model:
                raise ValueError("CHAT_MODEL 环境变量未设置")
            if not self.url:
                raise ValueError("CHAT_URL 环境变量未设置")
                
        except Exception as e:
            logger.error(f"加载配置失败: {e}")
            raise Exception(f"无法加载环境变量配置: {e}")
        


class ChatMessage:
    """聊天消息类"""
    
    def __init__(self, role: str, content: str, timestamp: Optional[datetime] = None):
        self.role = role  # 'user', 'assistant', 'system'
        self.content = content
        self.timestamp = timestamp or datetime.now()
        
    def to_dict(self) -> Dict[str, str]:
        """转换为API格式"""
        return {
            "role": self.role,
            "content": self.content
        }
        
    def __str__(self) -> str:
        time_str = self.timestamp.strftime("%H:%M:%S")
        role_map = {
            "user": "👤 用户",
            "assistant": "🤖 助手",
            "system": "⚙️ 系统"
        }
        role_display = role_map.get(self.role, self.role)
        return f"[{time_str}] {role_display}: {self.content}"

class ChatSession:
    """聊天会话管理"""
    
    def __init__(self, max_history: int = 20):
        self.messages: List[ChatMessage] = []
        self.max_history = max_history
        
    def add_message(self, role: str, content: str) -> ChatMessage:
        """添加消息"""
        message = ChatMessage(role, content)
        self.messages.append(message)
        
        # 保持历史记录在限制范围内
        if len(self.messages) > self.max_history:
            # 保留系统消息，删除最旧的用户/助手消息
            system_messages = [msg for msg in self.messages if msg.role == "system"]
            other_messages = [msg for msg in self.messages if msg.role != "system"]
            
            if len(other_messages) > self.max_history - len(system_messages):
                other_messages = other_messages[-(self.max_history - len(system_messages)):]
                
            self.messages = system_messages + other_messages
            
        return message
        
    def get_api_messages(self) -> List[Dict[str, str]]:
        """获取API格式的消息列表"""
        return [msg.to_dict() for msg in self.messages]
        
    def clear(self):
        """清空会话"""
        self.messages.clear()
        
    def get_last_messages(self, count: int = 5) -> List[ChatMessage]:
        """获取最近的消息"""
        return self.messages[-count:] if count > 0 else self.messages

class LLMChatClient:
    """大模型聊天客户端"""
    
    def __init__(self, config: Optional[ChatConfig] = None):
        self.config = config or ChatConfig()
        self.session = ChatSession()
        self.http_session: Optional[aiohttp.ClientSession] = None
        
        # 取消流式生成相关
        self._cancel_stream = False
        self._current_stream_task = None
        
        # 加载默认系统提示
        self._load_default_prompt()
        
    def _load_default_prompt(self):
        """加载默认系统提示词"""
        try:
            # 获取prompts目录路径
            current_dir = os.path.dirname(os.path.abspath(__file__))
            project_root = os.path.dirname(os.path.dirname(current_dir))
            prompt_file = os.path.join(project_root, "prompts", "voice_chat_prompt.txt")
            
            # 读取提示词文件
            if os.path.exists(prompt_file):
                with open(prompt_file, 'r', encoding='utf-8') as f:
                    prompt_content = f.read().strip()
                self.set_system_prompt(prompt_content)
                logger.info(f"已加载自定义提示词: {prompt_file}")
            else:
                # 如果文件不存在，使用默认提示
                self.set_system_prompt("你是一个有用的AI助手，请用中文回答问题。")
                logger.warning(f"提示词文件不存在: {prompt_file}，使用默认提示")
                
        except Exception as e:
            logger.error(f"加载提示词失败: {e}")
            # 出错时使用默认提示
            self.set_system_prompt("你是一个有用的AI助手，请用中文回答问题。")
        
    async def __aenter__(self):
        self.http_session = aiohttp.ClientSession()
        return self
        
    async def __aexit__(self, exc_type, exc, tb):
        if self.http_session and not self.http_session.closed:
            await self.http_session.close()
            
    def set_system_prompt(self, prompt: str):
        """设置系统提示"""
        # 移除现有的系统消息
        self.session.messages = [msg for msg in self.session.messages if msg.role != "system"]
        # 添加新的系统消息到开头
        system_msg = ChatMessage("system", prompt)
        self.session.messages.insert(0, system_msg)
        # logger.info(f"已设置系统提示: {prompt[:50]}...")
        
    async def send_message(self, message: str) -> str:
        """发送消息并获取回复"""
        if not self.http_session:
            raise RuntimeError("请在async with语句中使用客户端")
            
        # 添加用户消息
        self.session.add_message("user", message)
        
        try:
            # 准备API请求
            headers = {
                "Authorization": f"Bearer {self.config.api_key}",
                "Content-Type": "application/json"
            }
            
            payload = {
                "model": self.config.model,
                "messages": self.session.get_api_messages(),
                "temperature": 0.7,
                "max_tokens": 2000,
                "stream": False
            }
            
            logger.debug(f"发送请求到: {self.config.url}")
            logger.debug(f"消息数量: {len(self.session.get_api_messages())}")
            
            # 发送请求
            async with self.http_session.post(
                self.config.url,
                headers=headers,
                json=payload,
                timeout=aiohttp.ClientTimeout(total=30)
            ) as response:
                
                if response.status != 200:
                    error_text = await response.text()
                    raise Exception(f"API请求失败: {response.status} - {error_text}")
                    
                result = await response.json()
                
                # 解析响应
                if "choices" not in result or not result["choices"]:
                    raise Exception(f"API响应格式错误: {result}")
                    
                assistant_message = result["choices"][0]["message"]["content"]
                
                # 添加助手回复
                self.session.add_message("assistant", assistant_message)
                
                logger.info(f"收到回复，长度: {len(assistant_message)}字符")
                return assistant_message
                
        except Exception as e:
            logger.error(f"发送消息失败: {e}")
            raise
            
    async def cancel_stream(self):
        """
        取消当前正在进行的流式生成
        """
        self._cancel_stream = True
        if self._current_stream_task and not self._current_stream_task.done():
            self._current_stream_task.cancel()
    
    def reset_cancel_flag(self):
        """
        重置取消标志（在开始新的流式生成前调用）
        """
        self._cancel_stream = False
        self._current_stream_task = None
    
    async def send_message_stream(self, message: str) -> AsyncGenerator[str, None]:
        """发送消息并流式获取回复（支持取消）"""
        if not self.http_session:
            raise RuntimeError("请在async with语句中使用客户端")
        
        # 重置取消标志
        self.reset_cancel_flag()
        
        # 添加用户消息
        self.session.add_message("user", message)
        
        try:
            # 准备API请求
            headers = {
                "Authorization": f"Bearer {self.config.api_key}",
                "Content-Type": "application/json"
            }
            
            payload = {
                "model": self.config.model,
                "messages": self.session.get_api_messages(),
                "temperature": 0.7,
                "max_tokens": 2000,
                "stream": True
            }
            
            logger.debug(f"发送流式请求到: {self.config.url}")
            
            full_response = ""
            
            # 发送流式请求
            async with self.http_session.post(
                self.config.url,
                headers=headers,
                json=payload,
                timeout=aiohttp.ClientTimeout(total=60)
            ) as response:
                
                if response.status != 200:
                    error_text = await response.text()
                    raise Exception(f"API请求失败: {response.status} - {error_text}")
                
                # 处理流式响应
                async for line in response.content:
                    # 检查是否被取消
                    if self._cancel_stream:
                        logger.info("流式生成已被取消")
                        raise asyncio.CancelledError("流式生成已被取消")
                    
                    line = line.decode('utf-8').strip()
                    
                    if line.startswith('data: '):
                        data = line[6:]  # 移除 'data: ' 前缀
                        
                        if data == '[DONE]':
                            break
                            
                        try:
                            chunk = json.loads(data)
                            
                            if "choices" in chunk and chunk["choices"]:
                                delta = chunk["choices"][0].get("delta", {})
                                
                                if "content" in delta:
                                    content = delta["content"]
                                    full_response += content
                                    
                                    # 再次检查是否被取消
                                    if self._cancel_stream:
                                        logger.info("流式生成已被取消")
                                        raise asyncio.CancelledError("流式生成已被取消")
                                    
                                    yield content
                                    
                        except json.JSONDecodeError:
                            continue
            
            # 检查是否在完成前被取消
            if self._cancel_stream:
                logger.info("流式生成已被取消")
                raise asyncio.CancelledError("流式生成已被取消")
                            
            # 添加完整的助手回复到会话
            if full_response:
                self.session.add_message("assistant", full_response)
                
        except asyncio.CancelledError:
            # 流式生成被取消，不添加不完整的回复到会话
            logger.info("流式生成被取消，不保存不完整的回复")
            raise
        except Exception as e:
            logger.error(f"流式发送消息失败: {e}")
            raise
            
    def get_conversation_history(self, count: int = 10) -> List[ChatMessage]:
        """获取对话历史"""
        return self.session.get_last_messages(count)
        
    def clear_conversation(self):
        """清空对话历史（保留系统提示）"""
        system_messages = [msg for msg in self.session.messages if msg.role == "system"]
        self.session.clear()
        self.session.messages.extend(system_messages)
        # logger.info("已清空对话历史")
        
    def export_conversation(self, filename: Optional[str] = None) -> str:
        """导出对话记录"""
        if not filename:
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            filename = f"chat_history_{timestamp}.txt"
            
        try:
            with open(filename, 'w', encoding='utf-8') as f:
                f.write(f"聊天记录导出时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
                f.write(f"模型: {self.config.model}\n")
                f.write("=" * 50 + "\n\n")
                
                for msg in self.session.messages:
                    f.write(str(msg) + "\n\n")
                    
            logger.info(f"对话记录已导出到: {filename}")
            return filename
            
        except Exception as e:
            logger.error(f"导出对话记录失败: {e}")
            raise

class ChatApp:
    """聊天应用主控制器"""
    
    def __init__(self):
        self.client: Optional[LLMChatClient] = None
        self.is_running = False
        
    def print_welcome(self):
        """打印欢迎信息"""
        print("\n" + "=" * 60)
        print("🤖 大模型聊天对话工具")
        print("=" * 60)
        print("💡 使用说明:")
        print("  - 直接输入消息开始对话")
        print("  - 输入 '/help' 查看命令帮助")
        print("  - 输入 '/quit' 或 '/exit' 退出程序")
        print("  - 按 Ctrl+C 也可以退出")
        print("=" * 60 + "\n")
        
    def print_help(self):
        """打印帮助信息"""
        print("\n📋 可用命令:")
        print("  /help          - 显示此帮助信息")
        print("  /clear         - 清空对话历史")
        print("  /history [n]   - 显示最近n条对话记录（默认5条）")
        print("  /export        - 导出对话记录到文件")
        print("  /system <text> - 设置系统提示")
        print("  /stream        - 切换流式/非流式模式")
        print("  /quit, /exit   - 退出程序")
        print()
        
    async def handle_command(self, command: str) -> bool:
        """处理命令，返回是否继续运行"""
        parts = command.strip().split()
        cmd = parts[0].lower()
        
        if cmd in ['/quit', '/exit']:
            return False
            
        elif cmd == '/help':
            self.print_help()
            
        elif cmd == '/clear':
            if self.client:
                self.client.clear_conversation()
                print("✅ 对话历史已清空")
                
        elif cmd == '/history':
            if self.client:
                count = 5
                if len(parts) > 1:
                    try:
                        count = int(parts[1])
                    except ValueError:
                        print("❌ 无效的数字")
                        return True
                        
                history = self.client.get_conversation_history(count)
                if history:
                    print(f"\n📜 最近{len(history)}条对话记录:")
                    for msg in history:
                        if msg.role != "system":
                            print(msg)
                    print()
                else:
                    print("📭 暂无对话记录")
                    
        elif cmd == '/export':
            if self.client:
                try:
                    filename = self.client.export_conversation()
                    print(f"✅ 对话记录已导出到: {filename}")
                except Exception as e:
                    print(f"❌ 导出失败: {e}")
                    
        elif cmd == '/system':
            if len(parts) > 1:
                system_prompt = ' '.join(parts[1:])
                if self.client:
                    self.client.set_system_prompt(system_prompt)
                    print(f"✅ 系统提示已设置: {system_prompt[:50]}...")
            else:
                print("❌ 请提供系统提示内容")
                
        else:
            print(f"❌ 未知命令: {cmd}，输入 /help 查看帮助")
            
        return True
        
    async def run(self):
        """运行聊天应用"""
        self.print_welcome()
        
        try:
            async with LLMChatClient() as client:
                self.client = client
                self.is_running = True
                
                print(f"🔗 已连接到: {client.config.name} ({client.config.model})\n")
                
                while self.is_running:
                    try:
                        # 获取用户输入
                        user_input = input("👤 您: ").strip()
                        
                        if not user_input:
                            continue
                            
                        # 处理命令
                        if user_input.startswith('/'):
                            should_continue = await self.handle_command(user_input)
                            if not should_continue:
                                break
                            continue
                            
                        # 发送消息
                        print("🤖 助手: ", end="", flush=True)
                        
                        try:
                            # 使用流式响应
                            response_parts = []
                            async for chunk in client.send_message_stream(user_input):
                                print(chunk, end="", flush=True)
                                response_parts.append(chunk)
                            print()  # 换行
                            
                        except Exception as e:
                            print(f"\n❌ 发送消息失败: {e}")
                            
                    except KeyboardInterrupt:
                        print("\n\n👋 用户中断，正在退出...")
                        break
                    except EOFError:
                        print("\n\n👋 输入结束，正在退出...")
                        break
                        
        except Exception as e:
            print(f"❌ 应用启动失败: {e}")
        finally:
            self.is_running = False
            print("\n🔚 聊天会话已结束")

async def main():
    """主函数"""
    app = ChatApp()
    await app.run()
    
if __name__ == "__main__":
    try:
        asyncio.run(main())
    except KeyboardInterrupt:
        print("\n程序已退出")