"""vLLM LLM Provider实现"""

import os
import time
from typing import Dict, Any, Optional, Generator, List
import numpy as np
import logging
import requests
import json
from backend.llm.base import BaseLLMProvider, LLMProviderFactory

logger = logging.getLogger('llm.vllm')


class VLLMProvider(BaseLLMProvider):
    """vLLM LLM提供商实现"""
    
    def __init__(self, config: Dict[str, Any]):
        """初始化vLLM提供商
        
        Args:
            config: 配置参数，包含base_url、model等
        """
        super().__init__(config)
        self.base_url = config.get('base_url', os.getenv('VLLM_BASE_URL', 'http://localhost:8000'))
        self.model_name = config.get('model', 'facebook/opt-125m')  # 默认使用一个轻量级模型
        self.max_tokens = config.get('max_tokens', 4096)
        self.temperature = config.get('temperature', 0.7)
        self.top_p = config.get('top_p', 0.95)
        
        # 嵌入模型配置
        self.embedding_model_name = config.get('embedding_model', '')
        self.embedding_dimension = config.get('embedding_dimension', 768)
        
        # 请求超时设置
        self.request_timeout = config.get('timeout', 60)
    
    def load_model(self):
        """加载vLLM模型
        实际上是检查vLLM服务是否可用
        """
        try:
            # 检查vLLM服务是否可用
            # vLLM没有专门的健康检查API，但我们可以尝试列出模型
            models_url = f"{self.base_url}/v1/models"
            response = requests.get(models_url, timeout=self.request_timeout)
            
            if response.status_code == 200:
                self.model = self.model_name
                logger.info(f"成功连接到vLLM服务，模型: {self.model_name}")
                return True
            else:
                logger.error(f"vLLM服务检查失败，状态码: {response.status_code}")
                return False
        except Exception as e:
            logger.error(f"连接vLLM服务失败: {str(e)}")
            self.model = None
            return False
    
    def generate(self, prompt: str, **kwargs) -> str:
        """生成文本响应
        
        Args:
            prompt: 输入提示
            **kwargs: 其他生成参数
        
        Returns:
            生成的文本
        """
        if not self.model:
            if not self.load_model():
                return "错误: vLLM服务未连接"
        
        try:
            # 准备参数
            params = {
                "model": self.model_name,
                "prompt": prompt,
                "max_tokens": kwargs.get("max_tokens", self.max_tokens),
                "temperature": kwargs.get("temperature", self.temperature),
                "top_p": kwargs.get("top_p", self.top_p),
                "stream": False
            }
            
            # 发送请求
            url = f"{self.base_url}/v1/completions"
            response = requests.post(url, json=params, timeout=self.request_timeout)
            
            # 检查响应
            if response.status_code == 200:
                data = response.json()
                if 'choices' in data and len(data['choices']) > 0:
                    return data['choices'][0].get('text', '错误: 未获得有效响应')
            else:
                logger.error(f"vLLM生成请求失败，状态码: {response.status_code}, 响应: {response.text}")
                return f"错误: 请求失败，状态码: {response.status_code}"
        except Exception as e:
            logger.error(f"vLLM生成失败: {str(e)}")
            return f"错误: {str(e)}"
        
        return "错误: 未获得有效响应"
    
    def generate_stream(self, prompt: str, **kwargs) -> Generator[str, None, None]:
        """流式生成文本响应
        
        Args:
            prompt: 输入提示
            **kwargs: 其他生成参数
        
        Yields:
            文本片段
        """
        if not self.model:
            if not self.load_model():
                yield "错误: vLLM服务未连接"
                return
        
        try:
            # 准备参数
            params = {
                "model": self.model_name,
                "prompt": prompt,
                "max_tokens": kwargs.get("max_tokens", self.max_tokens),
                "temperature": kwargs.get("temperature", self.temperature),
                "top_p": kwargs.get("top_p", self.top_p),
                "stream": True
            }
            
            # 发送请求
            url = f"{self.base_url}/v1/completions"
            with requests.post(url, json=params, stream=True, timeout=self.request_timeout) as response:
                
                # 检查响应状态
                if response.status_code != 200:
                    logger.error(f"vLLM流式生成请求失败，状态码: {response.status_code}")
                    yield f"错误: 请求失败，状态码: {response.status_code}"
                    return
                
                # 处理流式响应
                for line in response.iter_lines():
                    if line:
                        # 去除前缀 'data: '（如果存在）
                        line_str = line.decode('utf-8')
                        if line_str.startswith('data: '):
                            line_str = line_str[6:]
                        
                        # 检查是否是结束标记
                        if line_str == '[DONE]':
                            break
                        
                        # 解析JSON
                        try:
                            chunk_data = json.loads(line_str)
                            
                            # 提取内容
                            if 'choices' in chunk_data and len(chunk_data['choices']) > 0:
                                text = chunk_data['choices'][0].get('text', '')
                                if text:
                                    yield text
                        except json.JSONDecodeError:
                            logger.warning(f"无法解析vLLM响应行: {line_str}")
        except Exception as e:
            logger.error(f"vLLM流式生成失败: {str(e)}")
            yield f"错误: {str(e)}"
    
    def generate_embedding(self, text: str) -> np.ndarray:
        """生成文本嵌入
        
        Args:
            text: 输入文本
        
        Returns:
            嵌入向量
        """
        # vLLM主要专注于文本生成，通常不直接提供嵌入功能
        # 这里我们提供一个简单的实现，如果配置了嵌入模型，则使用它
        if self.embedding_model_name:
            try:
                # 准备参数
                params = {
                    "model": self.embedding_model_name,
                    "input": [text]
                }
                
                # 发送请求
                url = f"{self.base_url}/v1/embeddings"
                response = requests.post(url, json=params, timeout=self.request_timeout)
                
                # 检查响应
                if response.status_code == 200:
                    data = response.json()
                    if 'data' in data and len(data['data']) > 0:
                        embedding = data['data'][0].get('embedding', [])
                        if embedding:
                            return np.array(embedding)
                else:
                    logger.error(f"vLLM嵌入生成请求失败，状态码: {response.status_code}")
            except Exception as e:
                logger.error(f"vLLM嵌入生成失败: {str(e)}")
        
        # 如果没有配置嵌入模型或出错，返回随机向量
        return np.random.rand(self.embedding_dimension)
    
    def generate_embeddings(self, texts: List[str]) -> List[np.ndarray]:
        """批量生成文本嵌入
        
        Args:
            texts: 输入文本列表
        
        Returns:
            嵌入向量列表
        """
        if self.embedding_model_name:
            try:
                # 准备参数
                params = {
                    "model": self.embedding_model_name,
                    "input": texts
                }
                
                # 发送请求
                url = f"{self.base_url}/v1/embeddings"
                response = requests.post(url, json=params, timeout=self.request_timeout)
                
                # 检查响应
                if response.status_code == 200:
                    data = response.json()
                    if 'data' in data:
                        return [np.array(item.get('embedding', [])) for item in data['data']]
                else:
                    logger.error(f"vLLM批量嵌入生成请求失败，状态码: {response.status_code}")
            except Exception as e:
                logger.error(f"vLLM批量嵌入生成失败: {str(e)}")
        
        # 如果没有配置嵌入模型或出错，返回随机向量
        return [np.random.rand(self.embedding_dimension) for _ in texts]
    
    def close(self):
        """关闭资源"""
        # vLLM是通过HTTP API调用的，不需要显式关闭
        self.model = None


# 注册vLLM提供商
LLMProviderFactory.register_provider('vllm', VLLMProvider)