"""
SiliconFlow API 封装

提供对 SiliconFlow 服务的访问接口，支持OpenAI和Anthropic风格的API。
"""

import json
import time
from typing import Any, Dict, List, Optional, Iterator

# 尝试导入requests，如果失败则提供错误信息
try:
    import requests
    REQUESTS_AVAILABLE = True
except ImportError:
    REQUESTS_AVAILABLE = False

from .base import AIModel, ModelFactory
from ..utils.config import Config


class SiliconFlowModel(AIModel):
    """SiliconFlow 模型实现"""
    
    def __init__(self, config: Config) -> None:
        super().__init__(config)
        
        # 获取配置
        api_key = config.get_api_key("siliconflow")
        base_url = config.get_base_url("siliconflow")
        model = config.get_model("siliconflow")
        api_style = config.get("models.siliconflow.api_style", "openai")
        
        if not api_key:
            raise ValueError("SiliconFlow API密钥未设置")
        
        self.api_key = api_key
        self.base_url = base_url.rstrip('/')
        self.model = model
        self.api_style = api_style  # "openai" 或 "anthropic"
        
        # 默认参数
        self.default_params = {
            "temperature": 0.7,
            "max_tokens": 2048,
            "top_p": 1.0,
            "frequency_penalty": 0.0,
            "presence_penalty": 0.0
        }
        
        # 设置请求头
        self.headers = {
            "Authorization": f"Bearer {api_key}",
            "Content-Type": "application/json"
        }
    
    def generate(self, prompt: str, stream: bool = False, **kwargs) -> str:
        """
        生成响应
        
        Args:
            prompt: 输入提示词
            stream: 是否使用流式API调用
            **kwargs: 其他参数
            
        Returns:
            生成的响应文本
        """
        if not REQUESTS_AVAILABLE:
            raise RuntimeError("缺少requests依赖，请运行: pip install requests")
            
        try:
            # 合并参数
            params = self.default_params.copy()
            params.update(kwargs)
            
            if stream:
                # 使用流式API调用，但返回完整响应
                if self.api_style == "openai":
                    return self._generate_openai_style_stream_complete(prompt, params)
                elif self.api_style == "anthropic":
                    return self._generate_anthropic_style_stream_complete(prompt, params)
                else:
                    raise ValueError(f"不支持的API风格: {self.api_style}")
            else:
                # 使用非流式API调用
                if self.api_style == "openai":
                    return self._generate_openai_style(prompt, params)
                elif self.api_style == "anthropic":
                    return self._generate_anthropic_style(prompt, params)
                else:
                    raise ValueError(f"不支持的API风格: {self.api_style}")
                
        except requests.exceptions.ConnectionError:
            raise RuntimeError(f"无法连接到SiliconFlow服务: {self.base_url}")
        except requests.exceptions.Timeout:
            raise RuntimeError("请求超时，请检查网络连接")
        except requests.exceptions.HTTPError as e:
            if e.response.status_code == 401:
                raise ValueError("SiliconFlow API密钥无效")
            elif e.response.status_code == 429:
                raise RuntimeError("API请求频率超限，请稍后重试")
            elif e.response.status_code == 400:
                # 尝试获取更详细的错误信息
                try:
                    error_detail = e.response.json()
                    error_msg = error_detail.get('error', {}).get('message', str(e))
                except:
                    error_msg = str(e)
                raise RuntimeError(f"SiliconFlow API请求参数错误: {error_msg}")
            else:
                raise RuntimeError(f"SiliconFlow API错误: {e}")
        except Exception as e:
            raise RuntimeError(f"请求失败: {e}")
    
    def _generate_openai_style(self, prompt: str, params: Dict[str, Any]) -> str:
        """使用OpenAI风格的API生成响应"""
        # 确保base_url不以斜杠结尾
        base_url = self.base_url.rstrip('/')
        url = f"{base_url}/v1/chat/completions"
        
        data = {
            "model": self.model,
            "messages": [
                {"role": "user", "content": prompt}
            ],
            "stream": False,
            **params
        }
        
        response = requests.post(
            url,
            headers=self.headers,
            json=data,
            timeout=60
        )
        response.raise_for_status()
        
        result = response.json()
        
        # 提取响应内容
        if result.get("choices") and len(result["choices"]) > 0:
            return result["choices"][0]["message"]["content"] or ""
        else:
            return "未收到有效响应"
    
    def _generate_openai_style_stream(self, prompt: str, params: Dict[str, Any]) -> Iterator[str]:
        """使用OpenAI风格的API流式生成响应"""
        # 确保base_url不以斜杠结尾
        base_url = self.base_url.rstrip('/')
        url = f"{base_url}/v1/chat/completions"
        
        data = {
            "model": self.model,
            "messages": [
                {"role": "user", "content": prompt}
            ],
            "stream": True,
            **params
        }
        
        response = requests.post(
            url,
            headers=self.headers,
            json=data,
            timeout=60,
            stream=True
        )
        response.raise_for_status()
        
        # 处理流式响应
        for line in response.iter_lines():
            if line:
                line = line.decode('utf-8')
                if line.startswith('data: '):
                    data_str = line[6:]  # 移除 'data: ' 前缀
                    if data_str.strip() == '[DONE]':
                        break
                    try:
                        data_json = json.loads(data_str)
                        if data_json.get("choices") and len(data_json["choices"]) > 0:
                            delta = data_json["choices"][0].get("delta", {})
                            content = delta.get("content", "")
                            if content:
                                yield content
                    except json.JSONDecodeError:
                        continue
    
    def _generate_anthropic_style(self, prompt: str, params: Dict[str, Any]) -> str:
        """使用Anthropic风格的API生成响应"""
        # 确保base_url不以斜杠结尾
        base_url = self.base_url.rstrip('/')
        url = f"{base_url}/v1/messages"
        
        # 转换参数格式
        anthropic_params = {
            "model": self.model,
            "max_tokens": params.get("max_tokens", 2048),
            "temperature": params.get("temperature", 0.7),
            "top_p": params.get("top_p", 1.0),
            "messages": [
                {"role": "user", "content": prompt}
            ]
        }
        
        response = requests.post(
            url,
            headers=self.headers,
            json=anthropic_params,
            timeout=60
        )
        response.raise_for_status()
        
        result = response.json()
        
        # 提取响应内容
        if result.get("content") and len(result["content"]) > 0:
            return result["content"][0]["text"] or ""
        else:
            return "未收到有效响应"
    
    def _generate_anthropic_style_stream(self, prompt: str, params: Dict[str, Any]) -> Iterator[str]:
        """使用Anthropic风格的API流式生成响应"""
        # 确保base_url不以斜杠结尾
        base_url = self.base_url.rstrip('/')
        url = f"{base_url}/v1/messages"
        
        # 转换参数格式
        anthropic_params = {
            "model": self.model,
            "max_tokens": params.get("max_tokens", 2048),
            "temperature": params.get("temperature", 0.7),
            "top_p": params.get("top_p", 1.0),
            "messages": [
                {"role": "user", "content": prompt}
            ],
            "stream": True
        }
        
        response = requests.post(
            url,
            headers=self.headers,
            json=anthropic_params,
            timeout=60,
            stream=True
        )
        response.raise_for_status()
        
        # 处理流式响应
        for line in response.iter_lines():
            if line:
                line = line.decode('utf-8')
                if line.startswith('data: '):
                    data_str = line[6:]  # 移除 'data: ' 前缀
                    if data_str.strip() == '[DONE]':
                        break
                    try:
                        data_json = json.loads(data_str)
                        if data_json.get("type") == "content_block_delta":
                            delta = data_json.get("delta", {})
                            text = delta.get("text", "")
                            if text:
                                yield text
                    except json.JSONDecodeError:
                        continue
    
    def _generate_openai_style_stream_complete(self, prompt: str, params: Dict[str, Any]) -> str:
        """使用OpenAI风格的流式API生成完整响应"""
        # 确保base_url不以斜杠结尾
        base_url = self.base_url.rstrip('/')
        url = f"{base_url}/v1/chat/completions"
        
        data = {
            "model": self.model,
            "messages": [
                {"role": "user", "content": prompt}
            ],
            "stream": True,
            **params
        }
        
        response = requests.post(
            url,
            headers=self.headers,
            json=data,
            timeout=60,
            stream=True
        )
        response.raise_for_status()
        
        # 收集所有流式响应片段
        complete_response = ""
        for line in response.iter_lines():
            if line:
                line = line.decode('utf-8')
                if line.startswith('data: '):
                    data_str = line[6:]  # 移除 'data: ' 前缀
                    if data_str.strip() == '[DONE]':
                        break
                    try:
                        data_json = json.loads(data_str)
                        if data_json.get("choices") and len(data_json["choices"]) > 0:
                            delta = data_json["choices"][0].get("delta", {})
                            content = delta.get("content", "")
                            if content:
                                complete_response += content
                    except json.JSONDecodeError:
                        continue
        
        return complete_response or "未收到有效响应"
    
    def _generate_anthropic_style_stream_complete(self, prompt: str, params: Dict[str, Any]) -> str:
        """使用Anthropic风格的流式API生成完整响应"""
        # 确保base_url不以斜杠结尾
        base_url = self.base_url.rstrip('/')
        url = f"{base_url}/v1/messages"
        
        # 转换参数格式
        anthropic_params = {
            "model": self.model,
            "max_tokens": params.get("max_tokens", 2048),
            "temperature": params.get("temperature", 0.7),
            "top_p": params.get("top_p", 1.0),
            "messages": [
                {"role": "user", "content": prompt}
            ],
            "stream": True
        }
        
        response = requests.post(
            url,
            headers=self.headers,
            json=anthropic_params,
            timeout=60,
            stream=True
        )
        response.raise_for_status()
        
        # 收集所有流式响应片段
        complete_response = ""
        for line in response.iter_lines():
            if line:
                line = line.decode('utf-8')
                if line.startswith('data: '):
                    data_str = line[6:]  # 移除 'data: ' 前缀
                    if data_str.strip() == '[DONE]':
                        break
                    try:
                        data_json = json.loads(data_str)
                        if data_json.get("type") == "content_block_delta":
                            delta = data_json.get("delta", {})
                            text = delta.get("text", "")
                            if text:
                                complete_response += text
                    except json.JSONDecodeError:
                        continue
        
        return complete_response or "未收到有效响应"
    
    def generate_stream(self, prompt: str, **kwargs) -> Iterator[str]:
        """
        流式生成响应（返回迭代器）
        
        Args:
            prompt: 输入提示词
            **kwargs: 其他参数
            
        Yields:
            响应文本片段
        """
        if not REQUESTS_AVAILABLE:
            raise RuntimeError("缺少requests依赖，请运行: pip install requests")
            
        try:
            # 合并参数
            params = self.default_params.copy()
            params.update(kwargs)
            
            if self.api_style == "openai":
                yield from self._generate_openai_style_stream(prompt, params)
            elif self.api_style == "anthropic":
                yield from self._generate_anthropic_style_stream(prompt, params)
            else:
                raise ValueError(f"不支持的API风格: {self.api_style}")
                
        except requests.exceptions.ConnectionError:
            raise RuntimeError(f"无法连接到SiliconFlow服务: {self.base_url}")
        except requests.exceptions.Timeout:
            raise RuntimeError("请求超时，请检查网络连接")
        except requests.exceptions.HTTPError as e:
            if e.response.status_code == 401:
                raise ValueError("SiliconFlow API密钥无效")
            elif e.response.status_code == 429:
                raise RuntimeError("API请求频率超限，请稍后重试")
            elif e.response.status_code == 400:
                # 尝试获取更详细的错误信息
                try:
                    error_detail = e.response.json()
                    error_msg = error_detail.get('error', {}).get('message', str(e))
                except:
                    error_msg = str(e)
                raise RuntimeError(f"SiliconFlow API请求参数错误: {error_msg}")
            else:
                raise RuntimeError(f"SiliconFlow API错误: {e}")
        except Exception as e:
            raise RuntimeError(f"请求失败: {e}")
    
    def _generate_openai_style_stream(self, prompt: str, params: Dict[str, Any]) -> Iterator[str]:
        """使用OpenAI风格的API流式生成响应"""
        # 确保base_url不以斜杠结尾
        base_url = self.base_url.rstrip('/')
        url = f"{base_url}/v1/chat/completions"
        
        data = {
            "model": self.model,
            "messages": [
                {"role": "user", "content": prompt}
            ],
            "stream": True,
            **params
        }
        
        response = requests.post(
            url,
            headers=self.headers,
            json=data,
            timeout=60,
            stream=True
        )
        response.raise_for_status()
        
        # 处理流式响应
        for line in response.iter_lines():
            if line:
                line = line.decode('utf-8')
                if line.startswith('data: '):
                    data_str = line[6:]  # 移除 'data: ' 前缀
                    if data_str.strip() == '[DONE]':
                        break
                    try:
                        data_json = json.loads(data_str)
                        if data_json.get("choices") and len(data_json["choices"]) > 0:
                            delta = data_json["choices"][0].get("delta", {})
                            content = delta.get("content", "")
                            if content:
                                yield content
                    except json.JSONDecodeError:
                        continue
    
    def _generate_anthropic_style_stream(self, prompt: str, params: Dict[str, Any]) -> Iterator[str]:
        """使用Anthropic风格的API流式生成响应"""
        # 确保base_url不以斜杠结尾
        base_url = self.base_url.rstrip('/')
        url = f"{base_url}/v1/messages"
        
        # 转换参数格式
        anthropic_params = {
            "model": self.model,
            "max_tokens": params.get("max_tokens", 2048),
            "temperature": params.get("temperature", 0.7),
            "top_p": params.get("top_p", 1.0),
            "messages": [
                {"role": "user", "content": prompt}
            ],
            "stream": True
        }
        
        response = requests.post(
            url,
            headers=self.headers,
            json=anthropic_params,
            timeout=60,
            stream=True
        )
        response.raise_for_status()
        
        # 处理流式响应
        for line in response.iter_lines():
            if line:
                line = line.decode('utf-8')
                if line.startswith('data: '):
                    data_str = line[6:]  # 移除 'data: ' 前缀
                    if data_str.strip() == '[DONE]':
                        break
                    try:
                        data_json = json.loads(data_str)
                        if data_json.get("type") == "content_block_delta":
                            delta = data_json.get("delta", {})
                            text = delta.get("text", "")
                            if text:
                                yield text
                    except json.JSONDecodeError:
                        continue
    
    def is_available(self) -> bool:
        """
        检查模型是否可用
        
        Returns:
            模型是否可用
        """
        if not REQUESTS_AVAILABLE:
            return False
            
        try:
            # 尝试发送一个简单的测试请求
            self.generate("test", max_tokens=1)
            return True
        except Exception:
            return False
    
    def get_available_models(self) -> List[str]:
        """
        获取可用的模型列表
        
        Returns:
            可用模型列表
        """
        if not REQUESTS_AVAILABLE:
            return []
            
        try:
            # 确保base_url不以斜杠结尾
            base_url = self.base_url.rstrip('/')
            url = f"{base_url}/v1/models"
            response = requests.get(
                url,
                headers=self.headers,
                timeout=10
            )
            response.raise_for_status()
            
            result = response.json()
            return [model["id"] for model in result.get("data", [])]
        except Exception:
            return []
    
    def create_embeddings(self, text: str) -> List[float]:
        """
        创建文本嵌入向量
        
        Args:
            text: 输入文本
            
        Returns:
            嵌入向量
        """
        if not REQUESTS_AVAILABLE:
            raise RuntimeError("缺少requests依赖，请运行: pip install requests")
            
        try:
            # 确保base_url不以斜杠结尾
            base_url = self.base_url.rstrip('/')
            url = f"{base_url}/v1/embeddings"
            
            data = {
                "model": "text-embedding-ada-002",  # 默认嵌入模型
                "input": text
            }
            
            response = requests.post(
                url,
                headers=self.headers,
                json=data,
                timeout=60
            )
            response.raise_for_status()
            
            result = response.json()
            
            if result.get("data") and len(result["data"]) > 0:
                return result["data"][0]["embedding"]
            else:
                raise RuntimeError("未收到有效的嵌入向量")
                
        except Exception as e:
            raise RuntimeError(f"创建嵌入向量失败: {e}")
    
    def rerank_documents(self, query: str, documents: List[str], top_n: int = 3) -> List[Dict[str, Any]]:
        """
        重新排序文档
        
        Args:
            query: 查询文本
            documents: 文档列表
            top_n: 返回前N个结果
            
        Returns:
            排序后的文档列表
        """
        if not REQUESTS_AVAILABLE:
            raise RuntimeError("缺少requests依赖，请运行: pip install requests")
            
        try:
            # 确保base_url不以斜杠结尾
            base_url = self.base_url.rstrip('/')
            url = f"{base_url}/v1/rerank"
            
            data = {
                "model": "rerank-english-v2.0",  # 默认重排序模型
                "query": query,
                "documents": documents,
                "top_n": top_n
            }
            
            response = requests.post(
                url,
                headers=self.headers,
                json=data,
                timeout=60
            )
            response.raise_for_status()
            
            result = response.json()
            
            return result.get("results", [])
            
        except Exception as e:
            raise RuntimeError(f"文档重排序失败: {e}")


# 注册到工厂
ModelFactory.register("siliconflow", SiliconFlowModel)
