#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
基于星火大模型X1的文本转回答服务
支持流式和非流式响应，对话历史管理
"""

import json
import requests
import logging
from typing import List, Dict, Optional, Generator
import time

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)


class LLMService:
    """星火大模型X1服务"""
    
    def __init__(self, api_key: str, base_url: str, model: str = "x1"):
        self.api_key = api_key
        self.base_url = base_url
        self.model = model
        self.conversation_history = []
        self.max_history_length = 11000  # 最大历史长度（字符数）
        
    def _get_headers(self):
        """获取请求头"""
        return {
            "Authorization": self.api_key,
            "Content-Type": "application/json"
        }
    
    def _check_conversation_length(self):
        """检查对话历史长度，如果超长则删除最早的对话"""
        while self._get_conversation_length() > self.max_history_length:
            if len(self.conversation_history) > 2:  # 保留至少一轮对话
                del self.conversation_history[0]
            else:
                break
    
    def _get_conversation_length(self):
        """获取对话历史的总字符数"""
        total_length = 0
        for message in self.conversation_history:
            total_length += len(message.get("content", ""))
        return total_length
    
    def add_message(self, role: str, content: str):
        """添加消息到对话历史"""
        message = {"role": role, "content": content}
        self.conversation_history.append(message)
        self._check_conversation_length()
        logger.info(f"添加消息: {role} - {content[:50]}...")
    
    def get_conversation_history(self):
        """获取对话历史"""
        return self.conversation_history.copy()
    
    def clear_conversation_history(self):
        """清空对话历史"""
        self.conversation_history = []
        logger.info("对话历史已清空")
    
    def _build_request_body(self, user_message: str, stream: bool = False, 
                          temperature: float = 1.2, top_p: float = 0.95,
                          max_tokens: int = 32768, enable_web_search: bool = False):
        """构建请求体"""
        # 添加用户消息到历史
        self.add_message("user", user_message)
        
        # 构建请求体
        body = {
            "model": self.model,
            "user": "user_123456",
            "messages": self.conversation_history,
            "stream": stream,
            "temperature": temperature,
            "top_p": top_p,
            "max_tokens": max_tokens
        }
        
        # 添加网络搜索工具（如果启用）
        if enable_web_search:
            body["tools"] = [
                {
                    "type": "web_search",
                    "web_search": {
                        "enable": True,
                        "search_mode": "deep"
                    }
                }
            ]
        
        return body
    
    def chat_non_stream(self, user_message: str, **kwargs) -> Dict:
        """非流式对话"""
        logger.info(f"开始非流式对话: {user_message[:50]}...")
        
        body = self._build_request_body(user_message, stream=False, **kwargs)
        
        try:
            response = requests.post(
                url=self.base_url,
                headers=self._get_headers(),
                json=body,
                timeout=60
            )
            response.raise_for_status()
            
            result = response.json()
            logger.info(f"非流式响应: {json.dumps(result, ensure_ascii=False)}")
            
            # 检查错误
            if result.get("code") != 0:
                error_msg = result.get("message", "未知错误")
                raise Exception(f"API错误: {error_msg}")
            
            # 提取回复内容
            if "choices" in result and len(result["choices"]) > 0:
                assistant_message = result["choices"][0]["message"]["content"]
                self.add_message("assistant", assistant_message)
                return {
                    "content": assistant_message,
                    "usage": result.get("usage", {}),
                    "sid": result.get("sid", "")
                }
            else:
                raise Exception("响应格式错误：未找到choices字段")
                
        except requests.exceptions.RequestException as e:
            raise Exception(f"请求失败: {str(e)}")
        except json.JSONDecodeError as e:
            raise Exception(f"响应解析失败: {str(e)}")
    
    def chat_stream(self, user_message: str, **kwargs) -> Generator[str, None, None]:
        """流式对话"""
        logger.info(f"开始流式对话: {user_message[:50]}...")
        
        body = self._build_request_body(user_message, stream=True, **kwargs)
        
        try:
            response = requests.post(
                url=self.base_url,
                headers=self._get_headers(),
                json=body,
                stream=True,
                timeout=60
            )
            response.raise_for_status()
            
            full_response = ""
            is_first_content = True
            
            for line in response.iter_lines():
                if line and b'[DONE]' not in line:
                    try:
                        # 处理SSE格式
                        if line.startswith(b'data:'):
                            data_str = line[6:].decode('utf-8')
                        else:
                            data_str = line.decode('utf-8')
                        
                        chunk = json.loads(data_str)
                        
                        # 检查错误
                        if chunk.get("code") != 0:
                            error_msg = chunk.get("message", "未知错误")
                            raise Exception(f"API错误: {error_msg}")
                        
                        # 处理思维链内容
                        if "choices" in chunk and len(chunk["choices"]) > 0:
                            delta = chunk["choices"][0].get("delta", {})
                            
                            # 输出思维链内容
                            if "reasoning_content" in delta and delta["reasoning_content"]:
                                reasoning_content = delta["reasoning_content"]
                                yield f"[思考] {reasoning_content}"
                            
                            # 输出最终内容
                            if "content" in delta and delta["content"]:
                                content = delta["content"]
                                if is_first_content:
                                    yield "\n[回复] "
                                    is_first_content = False
                                yield content
                                full_response += content
                                
                    except json.JSONDecodeError as e:
                        logger.warning(f"解析流式数据失败: {e}")
                        continue
            
            # 将完整回复添加到对话历史
            if full_response:
                self.add_message("assistant", full_response)
            
        except requests.exceptions.RequestException as e:
            raise Exception(f"流式请求失败: {str(e)}")
    
    def chat(self, user_message: str, stream: bool = False, **kwargs) -> str:
        """统一的对话接口"""
        if stream:
            # 流式对话
            full_response = ""
            for chunk in self.chat_stream(user_message, **kwargs):
                print(chunk, end="", flush=True)
                if chunk.startswith("[回复] "):
                    full_response += chunk[4:]  # 去掉"[回复] "前缀
                elif not chunk.startswith("[思考]"):
                    full_response += chunk
            return full_response
        else:
            # 非流式对话
            result = self.chat_non_stream(user_message, **kwargs)
            print(result["content"])
            return result["content"]


def main():
    """测试函数"""
    # 从配置文件加载配置
    with open('config.json', 'r', encoding='utf-8') as f:
        config = json.load(f)
    
    # 创建LLM服务实例
    llm_config = config['llm']
    llm_service = LLMService(
        api_key=llm_config['api_key'],
        base_url=llm_config['base_url'],
        model=llm_config['model']
    )
    
    print("=== 星火大模型X1对话测试 ===")
    print("输入 'quit' 退出，输入 'clear' 清空历史")
    print("输入 'stream:你的问题' 使用流式模式")
    print("输入 'web:你的问题' 启用网络搜索")
    print()
    
    while True:
        try:
            user_input = input("你: ").strip()
            
            if user_input.lower() == 'quit':
                break
            elif user_input.lower() == 'clear':
                llm_service.clear_conversation_history()
                print("对话历史已清空")
                continue
            elif not user_input:
                continue
            
            # 检查特殊命令
            stream_mode = False
            web_search = False
            
            if user_input.startswith('stream:'):
                user_input = user_input[7:].strip()
                stream_mode = True
            elif user_input.startswith('web:'):
                user_input = user_input[4:].strip()
                web_search = True
            
            print("星火: ", end="")
            
            if stream_mode:
                # 流式对话
                llm_service.chat(user_input, stream=True, enable_web_search=web_search)
            else:
                # 非流式对话
                llm_service.chat(user_input, stream=False, enable_web_search=web_search)
            
            print("\n")
            
        except KeyboardInterrupt:
            print("\n\n程序被用户中断")
            break
        except Exception as e:
            print(f"\n错误: {e}")
            continue


if __name__ == "__main__":
    main()
