# -*- coding: utf-8 -*-
# Author   : ZhangQing
# Time     : 2025-08-15
# File     : llm_service.py
# Project  : codebuddy_craft
# Desc     : LLM服务

from typing import Dict, List, Optional, Any
import json
import asyncio
import httpx
from datetime import datetime

from backend.utils.logger import get_logger

logger = get_logger(__name__)
from backend.core.settings import get_settings

settings = get_settings()

class LLMService:
    """
    大语言模型服务类
    
    处理与LLM相关的交互
    """
    
    def __init__(self):
        """
        初始化LLM服务
        """
        self.api_key = settings.LLM_API_KEY
        self.api_base = settings.LLM_API_BASE
        self.model = settings.LLM_MODEL
        self.timeout = settings.LLM_TIMEOUT
        self.max_retries = settings.LLM_MAX_RETRIES
        self.retry_delay = settings.LLM_RETRY_DELAY
    
    async def generate_completion(
        self, 
        prompt: str, 
        max_tokens: int = 1000,
        temperature: float = 0.7,
        stop: Optional[List[str]] = None,
        stream: bool = False
    ) -> Dict[str, Any]:
        """
        生成文本补全
        """
        headers = {
            "Content-Type": "application/json",
            "Authorization": f"Bearer {self.api_key}"
        }
        
        payload = {
            "model": self.model,
            "prompt": prompt,
            "max_tokens": max_tokens,
            "temperature": temperature,
            "stream": stream
        }
        
        if stop:
            payload["stop"] = stop
        
        for attempt in range(self.max_retries):
            try:
                async with httpx.AsyncClient(timeout=self.timeout) as client:
                    response = await client.post(
                        f"{self.api_base}/completions",
                        headers=headers,
                        json=payload
                    )
                    
                    if stream:
                        return response  # 返回响应对象以便流式处理
                    
                    response_data = response.json()
                    if response.status_code == 200:
                        return response_data
                    else:
                        logger.error(f"LLM API error: {response.status_code} - {response_data}")
                        if attempt < self.max_retries - 1:
                            await asyncio.sleep(self.retry_delay)
                        else:
                            return {
                                "error": True,
                                "message": f"API error: {response.status_code}",
                                "details": response_data
                            }
            except Exception as e:
                logger.error(f"LLM request error: {str(e)}")
                if attempt < self.max_retries - 1:
                    await asyncio.sleep(self.retry_delay)
                else:
                    return {
                        "error": True,
                        "message": f"Request error: {str(e)}",
                        "details": None
                    }
    
    async def generate_chat_completion(
        self,
        messages: List[Dict[str, str]],
        max_tokens: int = 1000,
        temperature: float = 0.7,
        stream: bool = False
    ) -> Dict[str, Any]:
        """
        生成对话补全
        """
        headers = {
            "Content-Type": "application/json",
            "Authorization": f"Bearer {self.api_key}"
        }
        
        payload = {
            "model": self.model,
            "messages": messages,
            "max_tokens": max_tokens,
            "temperature": temperature,
            "stream": stream
        }
        
        for attempt in range(self.max_retries):
            try:
                async with httpx.AsyncClient(timeout=self.timeout) as client:
                    response = await client.post(
                        f"{self.api_base}/chat/completions",
                        headers=headers,
                        json=payload
                    )
                    
                    if stream:
                        return response  # 返回响应对象以便流式处理
                    
                    response_data = response.json()
                    if response.status_code == 200:
                        return response_data
                    else:
                        logger.error(f"LLM API error: {response.status_code} - {response_data}")
                        if attempt < self.max_retries - 1:
                            await asyncio.sleep(self.retry_delay)
                        else:
                            return {
                                "error": True,
                                "message": f"API error: {response.status_code}",
                                "details": response_data
                            }
            except Exception as e:
                logger.error(f"LLM request error: {str(e)}")
                if attempt < self.max_retries - 1:
                    await asyncio.sleep(self.retry_delay)
                else:
                    return {
                        "error": True,
                        "message": f"Request error: {str(e)}",
                        "details": None
                    }
    
    async def process_code_completion(
        self,
        code_context: str,
        language: str,
        max_tokens: int = 500,
        temperature: float = 0.3
    ) -> Dict[str, Any]:
        """
        处理代码补全请求
        """
        prompt = f"Language: {language}\nContext:\n{code_context}\nCompletion:"
        return await self.generate_completion(
            prompt=prompt,
            max_tokens=max_tokens,
            temperature=temperature
        )