"""
API工具模块，用于与智谱AI GLM-4.6 API交互
"""
import requests
import json
import hashlib
import time
from typing import Dict, List, Optional, Any


class GLMAPIClient:
    """智谱AI GLM-4.6 API客户端"""
    
    def __init__(self, api_key: str, base_url: str = "https://open.bigmodel.cn/api/paas/v4/chat/completions", timeout: int = 60):
        """
        初始化API客户端
        
        Args:
            api_key: API密钥
            base_url: API基础URL
            timeout: 请求超时时间（秒）
        """
        self.api_key = api_key
        self.base_url = base_url
        self.timeout = timeout
        self.headers = {
            'Authorization': f'Bearer {api_key}',
            'Content-Type': 'application/json'
        }
        # 简单的响应缓存，避免重复请求
        self._cache = {}
        self._cache_ttl = 300  # 缓存5分钟
    
    def _get_cache_key(self, messages: List[Dict[str, str]], temperature: float, max_tokens: int) -> str:
        """
        生成缓存键
        
        Args:
            messages: 消息列表
            temperature: 温度参数
            max_tokens: 最大令牌数
            
        Returns:
            缓存键字符串
        """
        content = json.dumps({
            "messages": messages,
            "temperature": temperature,
            "max_tokens": max_tokens
        }, sort_keys=True)
        return hashlib.md5(content.encode()).hexdigest()
    
    def _get_from_cache(self, cache_key: str) -> Optional[Dict[str, Any]]:
        """
        从缓存获取响应
        
        Args:
            cache_key: 缓存键
            
        Returns:
            缓存的响应或None
        """
        if cache_key in self._cache:
            cached_data, timestamp = self._cache[cache_key]
            # 检查缓存是否过期
            if time.time() - timestamp < self._cache_ttl:
                return cached_data
            else:
                # 缓存过期，删除
                del self._cache[cache_key]
        return None
    
    def _save_to_cache(self, cache_key: str, response: Dict[str, Any]) -> None:
        """
        保存响应到缓存
        
        Args:
            cache_key: 缓存键
            response: API响应
        """
        self._cache[cache_key] = (response, time.time())
    
    def chat_completion(
        self, 
        messages: List[Dict[str, str]], 
        model: str = "glm-4.6",
        temperature: float = 1.0,
        max_tokens: int = 1000,
        stream: bool = False
    ) -> Dict[str, Any]:
        """
        发送聊天完成请求
        
        Args:
            messages: 消息列表，格式为[{"role": "system", "content": "..."}, ...]
            model: 使用的模型名称
            temperature: 温度参数，控制随机性
            max_tokens: 最大令牌数
            stream: 是否使用流式响应
            
        Returns:
            API响应的JSON数据
        """
        # 检查缓存
        cache_key = self._get_cache_key(messages, temperature, max_tokens)
        cached_response = self._get_from_cache(cache_key)
        if cached_response:
            return cached_response
        
        data = {
            "model": model,
            "messages": messages,
            "temperature": temperature,
            "max_tokens": max_tokens,
            "stream": stream
        }
        
        response = requests.post(
            self.base_url,
            headers=self.headers,
            data=json.dumps(data),
            timeout=self.timeout
        )
        
        # 检查响应状态码
        if response.status_code != 200:
            print(f"API请求失败，状态码: {response.status_code}")
            print(f"响应内容: {response.text}")
            
        response_data = response.json()
        
        # 保存到缓存
        self._save_to_cache(cache_key, response_data)
        
        return response_data
    
    def simple_chat(self, prompt: str, system_prompt: Optional[str] = None) -> str:
        """
        简单聊天接口
        
        Args:
            prompt: 用户提示
            system_prompt: 系统提示，可选
            
        Returns:
            模型回复内容
        """
        messages = []
        
        if system_prompt:
            messages.append({"role": "system", "content": system_prompt})
            
        messages.append({"role": "user", "content": prompt})
        
        response = self.chat_completion(messages)
        
        if "choices" in response and len(response["choices"]) > 0:
            message = response["choices"][0]["message"]
            # 优先使用content字段，如果为空则使用reasoning_content
            if message.get("content"):
                return message["content"]
            elif message.get("reasoning_content"):
                return message["reasoning_content"]
        
        # 如果没有choices，检查是否有错误信息
        if "error" in response:
            print(f"API返回错误: {response['error']}")
        
        return "无法获取回复"