# -*- coding: utf-8 -*-
"""
LLM本地接口调用封装模块
提供与本地LLM服务交互的功能
"""

import json
import time
import requests
from typing import Dict, Any, List, Optional
from common.logger import get_logger
from common.config import config
from common.utils import safe_json_dumps, retry_on_exception

logger = get_logger(__name__)


class LLMClient:
    """LLM客户端"""
    
    def __init__(self):
        """初始化LLM客户端"""
        self.llm_config = config.get('llm', {})
        self.api_url = self.llm_config.get('api_url', 'http://localhost:8000/v1/chat/completions')
        self.model = self.llm_config.get('model', 'qwen2.5-7b-instruct')
        self.max_tokens = self.llm_config.get('max_tokens', 2048)
        self.temperature = self.llm_config.get('temperature', 0.1)
        self.timeout = self.llm_config.get('timeout', 30)
        
        # 测试连接
        self._test_connection()
    
    def _test_connection(self) -> None:
        """测试LLM服务连接"""
        try:
            response = requests.get(
                self.api_url.replace('/chat/completions', '/models'),
                timeout=5
            )
            if response.status_code == 200:
                logger.info(f"成功连接到LLM服务: {self.api_url}")
            else:
                logger.warning(f"LLM服务连接异常，状态码: {response.status_code}")
        except Exception as e:
            logger.error(f"LLM服务连接失败: {e}")
    
    @retry_on_exception(max_retries=3, delay=1.0)
    def chat_completion(self, messages: List[Dict[str, str]], **kwargs) -> Optional[Dict[str, Any]]:
        """
        调用聊天完成接口
        
        Args:
            messages: 消息列表
            **kwargs: 其他参数
            
        Returns:
            LLM响应结果
        """
        try:
            payload = {
                'model': kwargs.get('model', self.model),
                'messages': messages,
                'max_tokens': kwargs.get('max_tokens', self.max_tokens),
                'temperature': kwargs.get('temperature', self.temperature),
                'stream': False
            }
            
            # 添加其他可选参数
            if 'top_p' in kwargs:
                payload['top_p'] = kwargs['top_p']
            if 'frequency_penalty' in kwargs:
                payload['frequency_penalty'] = kwargs['frequency_penalty']
            if 'presence_penalty' in kwargs:
                payload['presence_penalty'] = kwargs['presence_penalty']
            
            response = requests.post(
                self.api_url,
                json=payload,
                timeout=self.timeout,
                headers={'Content-Type': 'application/json'}
            )
            
            if response.status_code == 200:
                result = response.json()
                logger.debug(f"LLM响应成功: {result.get('usage', {})}")
                return result
            else:
                logger.error(f"LLM请求失败，状态码: {response.status_code}, 响应: {response.text}")
                return None
                
        except requests.exceptions.Timeout:
            logger.error(f"LLM请求超时: {self.timeout}秒")
            return None
        except requests.exceptions.RequestException as e:
            logger.error(f"LLM请求异常: {e}")
            return None
        except Exception as e:
            logger.error(f"LLM调用失败: {e}")
            return None
    
    def analyze_log(self, log_message: str, prompt_template: str = None) -> Optional[str]:
        """
        分析单条日志
        
        Args:
            log_message: 日志消息
            prompt_template: 提示词模板
            
        Returns:
            分析结果
        """
        if not prompt_template:
            prompt_template = self._get_default_prompt()
        
        messages = [
            {
                'role': 'system',
                'content': '你是一个专业的日志分析专家，能够分析日志中的问题、异常和重要信息。'
            },
            {
                'role': 'user',
                'content': f"{prompt_template}\n\n日志内容：{log_message}"
            }
        ]
        
        result = self.chat_completion(messages)
        if result and 'choices' in result and len(result['choices']) > 0:
            return result['choices'][0]['message']['content']
        
        return None
    
    def analyze_logs_batch(self, logs: List[Dict[str, Any]], prompt_template: str = None) -> List[Dict[str, Any]]:
        """
        批量分析日志
        
        Args:
            logs: 日志列表
            prompt_template: 提示词模板
            
        Returns:
            分析结果列表
        """
        if not prompt_template:
            prompt_template = self._get_default_prompt()
        
        results = []
        
        for log in logs:
            try:
                log_message = log.get('message', '')
                if not log_message:
                    continue
                
                analysis_result = self.analyze_log(log_message, prompt_template)
                
                result = {
                    'log_id': log.get('log_id'),
                    'original_message': log_message,
                    'analysis_result': analysis_result,
                    'analysis_timestamp': time.time(),
                    'model_used': self.model
                }
                
                results.append(result)
                
                # 添加延迟避免请求过快
                time.sleep(0.1)
                
            except Exception as e:
                logger.error(f"分析日志失败: {e}")
                results.append({
                    'log_id': log.get('log_id'),
                    'original_message': log.get('message', ''),
                    'analysis_result': None,
                    'error': str(e),
                    'analysis_timestamp': time.time()
                })
        
        return results
    
    def _get_default_prompt(self) -> str:
        """
        获取默认提示词模板
        
        Returns:
            默认提示词
        """
        return """
请分析以下日志内容，并提供以下信息：

1. 日志级别和严重程度
2. 可能的问题或异常
3. 错误原因分析
4. 建议的解决方案
5. 是否需要立即关注

请用中文回答，格式要清晰易读。
"""
    
    def get_models(self) -> List[str]:
        """
        获取可用的模型列表
        
        Returns:
            模型列表
        """
        try:
            response = requests.get(
                self.api_url.replace('/chat/completions', '/models'),
                timeout=5
            )
            
            if response.status_code == 200:
                data = response.json()
                models = [model['id'] for model in data.get('data', [])]
                return models
            else:
                logger.error(f"获取模型列表失败，状态码: {response.status_code}")
                return []
                
        except Exception as e:
            logger.error(f"获取模型列表异常: {e}")
            return []
    
    def test_connection(self) -> bool:
        """
        测试连接状态
        
        Returns:
            连接是否正常
        """
        try:
            response = requests.get(
                self.api_url.replace('/chat/completions', '/models'),
                timeout=5
            )
            return response.status_code == 200
        except Exception:
            return False
    
    def get_usage_stats(self) -> Dict[str, Any]:
        """
        获取使用统计信息
        
        Returns:
            使用统计
        """
        return {
            'api_url': self.api_url,
            'model': self.model,
            'max_tokens': self.max_tokens,
            'temperature': self.temperature,
            'timeout': self.timeout,
            'connection_status': self.test_connection()
        }
    
    def update_config(self, **kwargs) -> None:
        """
        更新配置
        
        Args:
            **kwargs: 配置参数
        """
        if 'api_url' in kwargs:
            self.api_url = kwargs['api_url']
        if 'model' in kwargs:
            self.model = kwargs['model']
        if 'max_tokens' in kwargs:
            self.max_tokens = kwargs['max_tokens']
        if 'temperature' in kwargs:
            self.temperature = kwargs['temperature']
        if 'timeout' in kwargs:
            self.timeout = kwargs['timeout']
        
        logger.info(f"LLM配置已更新: {kwargs}") 