import requests
from typing import Dict, Any, Optional, List, Union, Iterator
import json
from config import VLLM_CONFIG
from llm.model_interface import ModelInterface, BaseEmbeddingMixin

class VLLMClient(ModelInterface, BaseEmbeddingMixin):
    def __init__(self, config: Optional[Dict[str, Any]] = None, api_key: Optional[str] = None):
        ModelInterface.__init__(self)
        BaseEmbeddingMixin.__init__(self)
        self.config = config or VLLM_CONFIG
        self.base_url = self.config['base_url']
        self.default_model = self.config['default_model']
        self.embedding_model = self.config['embedding_model']
        self.timeout = self.config.get('timeout', 60)

    def _prepare_payload(self, 
                        model: Optional[str] = None,
                        max_tokens: Optional[int] = None,
                        temperature: Optional[float] = None,
                        top_p: Optional[float] = None,
                        stream: bool = False,
                        **kwargs) -> Dict[str, Any]:
        """准备请求参数"""
        model = model or self.default_model
        payload = {
            "model": model,
            "stream": stream
        }

        if max_tokens:
            payload["max_tokens"] = max_tokens
        if temperature:
            payload["temperature"] = temperature
        if top_p:
            payload["top_p"] = top_p

        # 更新其他传入的参数
        payload.update(kwargs)
        return payload

    def generate(self, 
                prompt: str, 
                model: Optional[str] = None,
                max_tokens: Optional[int] = None,
                temperature: Optional[float] = None,
                top_p: Optional[float] = None,
                stream: bool = False,
                **kwargs) -> Union[str, Iterator[str]]:
        """
        调用 VLLM 服务生成文本
        """
        url = f"{self.base_url}/v1/completions"
        payload = self._prepare_payload(
            model=model,
            max_tokens=max_tokens,
            temperature=temperature,
            top_p=top_p,
            stream=stream,
            **kwargs
        )
        payload["prompt"] = prompt

        try:
            if stream:
                return self._stream_generate(url, payload)
            else:
                response = requests.post(url, json=payload, timeout=self.timeout)
                response.raise_for_status()
                result = response.json()
                return result['choices'][0]['text']
        except Exception as e:
            raise Exception(f"VLLM API 调用失败: {str(e)}")

    def _stream_generate(self, url: str, payload: Dict[str, Any]) -> Iterator[str]:
        """处理流式生成"""
        try:
            response = requests.post(url, json=payload, stream=True, timeout=self.timeout)
            response.raise_for_status()
            
            for line in response.iter_lines():
                if not line:
                    continue
                    
                try:
                    data = json.loads(line.decode())
                    if len(data['choices']) > 0:
                        text = data['choices'][0].get('text', '')
                        if text:
                            yield text
                except json.JSONDecodeError:
                    continue
                    
        except Exception as e:
            raise Exception(f"VLLM 流式生成失败: {str(e)}")

    def chat(self, 
            messages: List[Dict[str, str]],
            model: Optional[str] = None,
            max_tokens: Optional[int] = None,
            temperature: Optional[float] = None,
            top_p: Optional[float] = None,
            stream: bool = False,
            **kwargs) -> Union[str, Iterator[str]]:
        """
        调用 VLLM 服务进行对话
        """
        url = f"{self.base_url}/v1/chat/completions"
        payload = self._prepare_payload(
            model=model,
            max_tokens=max_tokens,
            temperature=temperature,
            top_p=top_p,
            stream=stream,
            **kwargs
        )
        payload["messages"] = messages

        try:
            if stream:
                return self._stream_chat(url, payload)
            else:
                response = requests.post(url, json=payload, timeout=self.timeout)
                response.raise_for_status()
                result = response.json()
                return result['choices'][0]['message']['content']
        except Exception as e:
            raise Exception(f"VLLM API 调用失败: {str(e)}")

    def _stream_chat(self, url: str, payload: Dict[str, Any]) -> Iterator[str]:
        """处理流式对话"""
        try:
            response = requests.post(url, json=payload, stream=True, timeout=self.timeout)
            response.raise_for_status()
            
            for line in response.iter_lines():
                if not line:
                    continue
                    
                try:
                    data = json.loads(line.decode())
                    if len(data['choices']) > 0:
                        content = data['choices'][0].get('delta', {}).get('content')
                        if content:
                            yield content
                except json.JSONDecodeError:
                    continue
                    
        except Exception as e:
            raise Exception(f"VLLM 流式对话失败: {str(e)}")

    def _get_remote_embeddings(self, 
                      text: Union[str, List[str]], 
                      model: Optional[str] = None) -> List[List[float]]:
        """
        调用 VLLM 服务获取文本嵌入向量
        """
        model = model or self.embedding_model
        url = f"{self.base_url}/v1/embeddings"
        
        if isinstance(text, str):
            text = [text]
            
        payload = {
            "model": model,
            "input": text
        }

        try:
            response = requests.post(url, json=payload, timeout=self.timeout)
            response.raise_for_status()
            result = response.json()
            return [item['embedding'] for item in result['data']]
        except Exception as e:
            raise Exception(f"VLLM API 调用失败: {str(e)}")

    def get_available_models(self) -> List[str]:
        """获取 VLLM 服务中可用的模型列表，包含完整的模型名称和版本"""
        try:
            url = f"{self.base_url}/v1/models"
            response = requests.get(url, timeout=self.timeout)
            response.raise_for_status()
            models = response.json().get('data', [])
            # VLLM 的模型 ID 通常已经包含版本信息
            model_names = []
            for model in models:
                model_id = model['id']
                # 如果模型 ID 不包含版本信息，使用配置中的默认模型名称
                if ':' not in model_id:
                    model_id = self.default_model
                model_names.append(model_id)
            return model_names
        except Exception as e:
            print(f"Error fetching VLLM models: {str(e)}")
            # 如果API不支持，返回配置的默认模型（应该包含版本信息）
            return [self.default_model]