import requests
from typing import Dict, Any, Optional, List, Union, Iterator
import json
import time
from config import OPENAI_CONFIG
from llm.model_interface import ModelInterface, BaseEmbeddingMixin
from utils.model_logger import ModelLogger

class OpenAIClient(ModelInterface, BaseEmbeddingMixin):
    def __init__(self, config: Optional[Dict[str, Any]] = None):
        ModelInterface.__init__(self)
        BaseEmbeddingMixin.__init__(self)
        self.config = config or OPENAI_CONFIG
        self.base_url = self.config['base_url']
        self.default_model = self.config['default_model']
        self.embedding_model = self.config['embedding_model']
        self.timeout = self.config.get('timeout', 60)
        self.api_key = self.config.get('api_key')
        self.max_tokens = self.config.get('max_tokens', 2048)
        self.temperature = self.config.get('temperature', 0.7)
        self.top_p = self.config.get('top_p', 0.9)
        if not self.api_key:
            raise ValueError("OpenAI API key is required")
        self.logger = ModelLogger()

    def _get_headers(self) -> Dict[str, str]:
        """获取请求头"""
        return {
            "Authorization": f"Bearer {self.api_key}",
            "Content-Type": "application/json"
        }

    def _prepare_payload(self, 
                        model: Optional[str] = None,
                        max_tokens: Optional[int] = None,
                        temperature: Optional[float] = None,
                        top_p: Optional[float] = None,
                        stream: bool = False,
                        **kwargs) -> Dict[str, Any]:
        """准备请求参数"""
        model = model or self.default_model
        payload = {
            "model": model,
            "stream": stream
        }

        if self.max_tokens is not None:
            payload["max_tokens"] = self.max_tokens
        if self.temperature is not None:
            payload["temperature"] = self.temperature
        if self.top_p  is not None:
            payload["top_p"] = self.top_p

        if max_tokens is not None:
            payload["max_tokens"] = max_tokens
        if temperature is not None:
            payload["temperature"] = temperature
        if top_p is not None:
            payload["top_p"] = top_p

        # 更新其他传入的参数
        payload.update(kwargs)
        return payload

    def generate(self, 
                prompt: str, 
                model: Optional[str] = None,
                max_tokens: Optional[int] = None,
                temperature: Optional[float] = None,
                top_p: Optional[float] = None,
                stream: bool = False,
                **kwargs) -> Union[str, Iterator[str]]:
        """
        调用 OpenAI 服务生成文本
        """
        url = f"{self.base_url}/completions"
        payload = self._prepare_payload(
            model=model,
            max_tokens=max_tokens,
            temperature=temperature,
            top_p=top_p,
            stream=stream,
            **kwargs
        )
        payload["prompt"] = prompt
        
        start_time = time.time()
        try:
            if stream:
                return self._stream_generate(url, payload)
            else:
                response = requests.post(
                    url, 
                    json=payload, 
                    headers=self._get_headers(), 
                    timeout=self.timeout
                )
                response.raise_for_status()
                result = response.json()
                response_text = result['choices'][0]['text']
                
                # 记录调用日志
                duration = time.time() - start_time
                self.logger.log_model_call(
                    call_type='generate',
                    request=payload,
                    response=response_text,
                    duration=duration
                )
                
                return response_text
        except Exception as e:
            # 记录错误日志
            duration = time.time() - start_time
            self.logger.log_model_call(
                call_type='generate',
                request=payload,
                error=str(e),
                duration=duration
            )
            raise Exception(f"OpenAI API 调用失败: {str(e)}")

    def _stream_generate(self, url: str, payload: Dict[str, Any]) -> Iterator[str]:
        """处理流式生成"""
        start_time = time.time()
        full_response = ""
        total_tokens = 0

        try:
            response = requests.post(
                url, 
                json=payload, 
                headers=self._get_headers(), 
                stream=True, 
                timeout=self.timeout
            )
            response.raise_for_status()
            
            for line in response.iter_lines():
                if not line:
                    continue
                    
                try:
                    # 移除 "data: " 前缀
                    line = line.decode('utf-8')
                    if line.startswith("data: "):
                        line = line[6:]
                    if line == "[DONE]":
                        break
                        
                    data = json.loads(line)
                    if len(data['choices']) > 0:
                        text = data['choices'][0].get('text', '')
                        if text:
                            # 累积完整响应并计数
                            full_response += text
                            total_tokens += 1
                            yield text
                except json.JSONDecodeError:
                    continue
                    
        except Exception as e:
            # 记录错误日志
            duration = time.time() - start_time
            self.logger.log_model_call(
                call_type='generate',
                request=payload,
                error=str(e),
                duration=duration
                # is_streaming=True
            )
            raise Exception(f"OpenAI 流式生成失败: {str(e)}")
        
        finally:
            # 记录完整的调用日志
            duration = time.time() - start_time
            self.logger.log_model_call(
                call_type='generate',
                request=payload,
                response=full_response,
                # total_tokens=total_tokens,
                duration=duration
                # is_streaming=True
            )

    def _clean_deepseek_output(self, text: str) -> str:
        """清理 deepseek 模型输出中的 think 标签内容"""
        # if "deepseek" in self.default_model.lower():
        import re
        # 移除 <think>...</think> 标签及其内容
        text = re.sub(r'<think>.*?</think>', '', text, flags=re.DOTALL)
        # 清理可能残留的前导空白字符
        text = text.lstrip()
        return text

    def chat(self, 
            messages: List[Dict[str, str]],
            model: Optional[str] = None,
            max_tokens: Optional[int] = None,
            temperature: Optional[float] = None,
            top_p: Optional[float] = None,
            stream: bool = False,
            **kwargs) -> Union[str, Iterator[str]]:
        """
        调用 OpenAI 服务进行对话
        """
        url = f"{self.base_url}/chat/completions"
        payload = self._prepare_payload(
            model=model,
            max_tokens=max_tokens,
            temperature=temperature,
            top_p=top_p,
            stream=True,   # 始终开启 stream
            **kwargs
        )
        payload["messages"] = messages

        start_time = time.time()
        try:
            response_text = ""
            for chunk in self._stream_chat(url, payload, log=False):  
                # 这里的 _stream_chat 应该是一个 yield 生成器
                response_text += chunk

            # 清理 deepseek 输出
            response_text = self._clean_deepseek_output(response_text)

            # 记录调用日志
            duration = time.time() - start_time
            self.logger.log_model_call(
                call_type='chat',
                request=payload,
                response=response_text,
                duration=duration
            )
            return response_text

        except Exception as e:
            duration = time.time() - start_time
            self.logger.log_model_call(
                call_type='chat',
                request=payload,
                error=str(e),
                duration=duration
            )
            raise Exception(f"OpenAI API 调用失败: {str(e)}")

    def _stream_chat(self, url: str, payload: Dict[str, Any], log: bool = True) -> Iterator[str]:
        """处理流式对话"""
        start_time = time.time()
        full_response = ""
        total_tokens = 0
        
        try:
            response = requests.post(
                url, 
                json=payload, 
                headers=self._get_headers(), 
                stream=True, 
                timeout=self.timeout
            )
            response.raise_for_status()
            
            for line in response.iter_lines():
                if not line:
                    continue
                    
                try:
                    # 移除 "data: " 前缀
                    line = line.decode('utf-8')
                    if line.startswith("data: "):
                        line = line[6:]
                    if line == "[DONE]":
                        break
                        
                    data = json.loads(line)
                    if len(data['choices']) > 0:
                        content = data['choices'][0].get('delta', {}).get('content')
                        if content:
                            # 对于流式输出，我们只在完整的 think 标签出现时进行过滤
                            if "deepseek" in payload.get("model", "").lower():
                                if "<think>" in content and "</think>" in content:
                                    content = self._clean_deepseek_output(content)
                                elif "<think>" in content or "</think>" in content:
                                    continue
                            # 累积完整响应并计数
                            full_response += content
                            total_tokens += 1
                            yield content
                except json.JSONDecodeError:
                    continue
                    
        except Exception as e:
            # 记录错误日志
            duration = time.time() - start_time
            if log:
                self.logger.log_model_call(
                    call_type='chat',
                    request=payload,
                    error=str(e),
                    duration=duration
                )
            raise Exception(f"OpenAI 流式对话失败: {str(e)}")
                
        finally:
            # 记录完整的调用日志
            duration = time.time() - start_time
            if log:
                self.logger.log_model_call(
                    call_type='chat',
                    request=payload,
                    response=full_response,
                    duration=duration
                )

    def _get_remote_embeddings(self, 
                      text: Union[str, List[str]], 
                      model: Optional[str] = None) -> List[List[float]]:
        """
        调用 OpenAI 服务获取文本嵌入向量
        """
        model = model or self.embedding_model
        url = f"{self.base_url}/embeddings"
        
        if isinstance(text, str):
            text = [text]
            
        payload = {
            "model": model,
            "input": text
        }

        try:
            response = requests.post(
                url, 
                json=payload, 
                headers=self._get_headers(), 
                timeout=self.timeout
            )
            response.raise_for_status()
            result = response.json()
            return [item['embedding'] for item in result['data']]
        except Exception as e:
            raise Exception(f"OpenAI API 调用失败: {str(e)}")

    def _fetch_remote_models(self) -> List[str]:
        """获取 OpenAI 服务中可用的模型列表，包含完整的模型名称和版本"""
        try:
            url = f"{self.base_url}/models"
            response = requests.get(
                url,
                headers=self._get_headers(),
                timeout=self.timeout
            )
            response.raise_for_status()
            all_models = response.json().get('data', [])
            # OpenAI 的模型 ID 已经包含版本信息，例如：gpt-3.5-turbo-0125
            return [
                model['id'] for model in all_models
            ]
        except Exception as e:
            print(f"Error fetching OpenAI models: {str(e)}")
            # 返回带版本号的默认模型列表
            return [
                'gpt-3.5-turbo-0125',
                'gpt-4-0125-preview',
                'text-embedding-3-small'
            ]