"""
VLLM提供商
支持访问本地运行的VLLM服务
"""
import json
import os
import time
import logging
from typing import Dict, List, Optional, Any, Generator
import requests

from ..provider import LocalProvider

# 设置日志
logger = logging.getLogger("llm.vllm")


class VLLMProvider(LocalProvider):
    """VLLM提供商，用于访问本地运行的VLLM服务"""
    
    def __init__(self, config: Dict[str, Any]):
        """
        初始化VLLM提供商
        
        Args:
            config: 配置字典
        """
        super().__init__(config)
        self.model = config.get("model", "llama3")  # 默认模型
        self.temperature = config.get("temperature", 0.1)
        self.max_tokens = config.get("max_tokens", 4096)
        self.host = config.get("host", "http://localhost:8000")  # VLLM服务地址
        self.gpu_memory = config.get("gpu_memory", None)  # GPU内存限制（GB）
        
        # 检查VLLM服务是否可用
        self._check_vllm_service()
        
    def _check_vllm_service(self) -> None:
        """检查VLLM服务是否可用"""
        try:
            response = requests.get(f"{self.host}/v1/models")
            if response.status_code == 200:
                models = response.json().get("data", [])
                model_ids = [model.get("id") for model in models]
                logger.info(f"VLLM服务可用，可用模型: {', '.join(model_ids)}")
            else:
                logger.warning(f"VLLM服务响应异常: {response.status_code}")
        except Exception as e:
            logger.warning(f"无法连接到VLLM服务: {str(e)}")
            
    def complete(self, messages: List[Dict[str, str]], stream: bool = False) -> str:
        """
        获取LLM完成
        
        Args:
            messages: 消息列表
            stream: 是否使用流式响应
            
        Returns:
            完成的文本
        """
        if stream:
            return "".join([chunk for chunk in self.complete_stream(messages)])
            
        return self._call_with_retry(self._complete_internal, messages)
        
    def _complete_internal(self, messages: List[Dict[str, str]]) -> str:
        """
        内部完成实现
        
        Args:
            messages: 消息列表
            
        Returns:
            完成的文本
        """
        start_time = time.time()
        
        try:
            # 准备请求数据
            data = {
                "model": self.model,
                "messages": messages,
                "max_tokens": self.max_tokens,
                "temperature": self.temperature,
                "stream": False
            }
            
            # 发送请求
            response = requests.post(
                f"{self.host}/v1/chat/completions",
                json=data
            )
            
            if response.status_code != 200:
                raise Exception(f"API请求失败: {response.status_code} {response.text}")
                
            result_json = response.json()
            result = result_json["choices"][0]["message"]["content"]
            
            # 记录性能指标
            if "usage" in result_json:
                prompt_tokens = result_json["usage"]["prompt_tokens"]
                completion_tokens = result_json["usage"]["completion_tokens"]
                logger.info(f"输入: {prompt_tokens} tokens, 输出: {completion_tokens} tokens")
            
            elapsed = time.time() - start_time
            logger.info(f"VLLM请求完成，耗时: {elapsed:.2f}秒，模型: {self.model}")
            
            return result
        except Exception as e:
            elapsed = time.time() - start_time
            logger.error(f"VLLM请求失败，耗时: {elapsed:.2f}秒，错误: {str(e)}")
            raise
            
    def complete_stream(self, messages: List[Dict[str, str]]) -> Generator[str, None, None]:
        """
        获取LLM流式完成
        
        Args:
            messages: 消息列表
            
        Returns:
            流式响应生成器
        """
        start_time = time.time()
        chunks_count = 0
        
        try:
            # 准备请求数据
            data = {
                "model": self.model,
                "messages": messages,
                "max_tokens": self.max_tokens,
                "temperature": self.temperature,
                "stream": True
            }
            
            # 发送请求
            response = requests.post(
                f"{self.host}/v1/chat/completions",
                json=data,
                stream=True
            )
            
            if response.status_code != 200:
                raise Exception(f"API请求失败: {response.status_code} {response.text}")
                
            for line in response.iter_lines():
                if line:
                    if line.startswith(b"data: "):
                        line = line[6:]  # 去掉 "data: " 前缀
                        if line.strip() == b"[DONE]":
                            break
                        try:
                            chunk = json.loads(line)
                            if "choices" in chunk and len(chunk["choices"]) > 0:
                                delta = chunk["choices"][0]["delta"].get("content", "")
                                if delta:
                                    chunks_count += 1
                                    yield delta
                        except json.JSONDecodeError:
                            pass
                            
            elapsed = time.time() - start_time
            logger.info(f"VLLM流式请求完成，耗时: {elapsed:.2f}秒，块数: {chunks_count}，模型: {self.model}")
        except Exception as e:
            elapsed = time.time() - start_time
            logger.error(f"VLLM流式请求失败，耗时: {elapsed:.2f}秒，错误: {str(e)}")
            raise
            
    def calculate_tokens(self, response: Dict[str, Any]) -> tuple[int, int]:
        """
        计算输入和输出令牌数
        
        Args:
            response: 响应数据
            
        Returns:
            (输入令牌数, 输出令牌数)元组
        """
        # 从VLLM响应中提取token使用量
        if "usage" in response:
            usage = response["usage"]
            return usage.get("prompt_tokens", 0), usage.get("completion_tokens", 0)
            
        # 如果没有usage信息，使用基类的估算方法
        return super().calculate_tokens(response)
