import requests
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry


class BaichuanAPI:
    def __init__(self, api_key):
        self.api_key = api_key
        self.base_url = "https://api.baichuan-ai.com/v1/chat/completions"
        # 支持的模型列表，与提供的计费信息保持一致
        self.supported_models = [
            "Baichuan4-Turbo",
            "Baichuan4-Air",
            "Baichuan4",
            "Baichuan3-Turbo",
            "Baichuan3-Turbo-128k",
            "Baichuan2-Turbo",
            "Baichuan2-53B"
        ]

        # 模型上下文长度映射
        self.model_context_lengths = {
            "Baichuan4-Turbo": "32k",
            "Baichuan4-Air": "32k",
            "Baichuan4": "32k",
            "Baichuan3-Turbo": "32k",
            "Baichuan3-Turbo-128k": "128k",
            "Baichuan2-Turbo": "32k",
            "Baichuan2-53B": "32k"
        }

    def create_session(self):
        """创建带有重试机制的请求会话"""
        session = requests.Session()
        retry_strategy = Retry(
            total=3,
            backoff_factor=1,
            status_forcelist=[429, 500, 502, 503, 504]
        )
        adapter = HTTPAdapter(max_retries=retry_strategy)
        session.mount("https://", adapter)
        return session

    def chat(self, query, model_index=0, stream=False):
        """
        调用百川API进行对话

        参数:
            query: 用户查询内容
            model_index: 模型索引，默认使用第一个模型
            stream: 是否启用流式响应

        返回:
            包含响应结果的字典
        """
        session = self.create_session()

        headers = {
            "Content-Type": "application/json",
            "Authorization": f"Bearer {self.api_key}"
        }

        # 检查模型索引有效性
        if model_index < 0 or model_index >= len(self.supported_models):
            return {
                "success": False,
                "message": f"无效的模型索引: {model_index}，有效范围是0到{len(self.supported_models) - 1}"
            }

        # 选择模型
        selected_model = self.supported_models[model_index]
        payload = {
            "model": selected_model,
            "messages": [{"role": "user", "content": query}],
            "stream": stream
        }

        try:
            response = session.post(
                self.base_url,
                headers=headers,
                json=payload,
                timeout=40,
                stream=stream
            )

            response.raise_for_status()

            # 处理流式响应
            if stream:
                return {
                    "success": True,
                    "stream": response.iter_lines(),
                    "model_used": selected_model
                }

            # 处理常规响应
            result = response.json()
            return {
                "success": True,
                "results": self.format_results(result),
                "model_used": selected_model,
                "context_length": self.model_context_lengths[selected_model]
            }

        except requests.exceptions.HTTPError as e:
            # 尝试下一个模型
            if model_index + 1 < len(self.supported_models):
                print(f"模型 {selected_model} 调用失败，尝试下一个模型...")
                return self.chat(query, model_index + 1, stream)
            return {
                "success": False,
                "message": f"HTTP错误: {str(e)}。所有可用模型均尝试失败。"
            }
        except Exception as e:
            return {
                "success": False,
                "message": f"请求失败: {str(e)}"
            }

    def search(self, query, model_index=0):
        """
        搜索功能封装

        参数:
            query: 搜索关键词
            model_index: 模型索引

        返回:
            搜索结果
        """
        search_prompt = (f"请帮我搜索以下内容并提供准确信息: {query}\n"
                         "请以清晰的结构呈现结果，包括关键信息和来源（如有）。")
        return self.chat(search_prompt, model_index)

    def format_results(self, raw_result):
        """格式化API返回结果"""
        formatted = []
        if "choices" in raw_result and len(raw_result["choices"]) > 0:
            for i, choice in enumerate(raw_result["choices"]):
                formatted.append({
                    "id": i + 1,
                    "title": f"回应 #{i + 1}",
                    "content": choice["message"]["content"],
                    "finish_reason": choice.get("finish_reason")
                })
        return formatted

    def get_model_info(self, model_index=None):
        """
        获取模型信息

        参数:
            model_index: 模型索引，None则返回所有模型信息

        返回:
            模型信息列表或单个模型信息
        """
        if model_index is not None:
            if 0 <= model_index < len(self.supported_models):
                model_name = self.supported_models[model_index]
                return {
                    "name": model_name,
                    "context_length": self.model_context_lengths[model_name]
                }
            return None

        return [
            {
                "name": model,
                "context_length": self.model_context_lengths[model]
            }
            for model in self.supported_models
        ]
