#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
LLM服务类，用于与MindCare OpenAI服务交互
"""

import os
import json
import logging
from typing import Dict, List, Any, Optional, Union
import asyncio
from openai import OpenAI
from config.settings import Settings
from dotenv import load_dotenv

load_dotenv()
logger = logging.getLogger("AI-MindCare-System-LLM")
import inspect


class LLMService:
    """LLM服务类，处理与Azure OpenAI的交互"""

    def __init__(self, settings: Optional[Settings] = None):
        """
        初始化LLM服务
        
        Args:
            settings: 系统配置
        """
        if settings is None:
            settings = Settings()

        self.settings = settings
        self.llm_config = settings.llm_config

        # 初始化MindCare OpenAI客户端
        self.client = self._initialize_mindcare_client()

    def _initialize_mindcare_client(self) -> OpenAI:
        """
        初始化OpenAI客户端
        
        Returns:
            OpenAI客户端实例
        """
        try:
            api_key = os.getenv("MINDCARE_OPENAI_API_KEY")
            api_version = os.getenv("MINDCARE_OPENAI_API_VERSION")
            endpoint = os.getenv("MINDCARE_OPENAI_ENDPOINT")
            # deployment_name = os.getenv('MINDCARE_OPENAI_DEPLOYMENT_NAME')
            # 确保endpoint包含http/https协议前缀
            if endpoint and not endpoint.startswith(("http://", "https://")):
                logger.warning(
                    f"MINDCARE OpenAI endpoint URL '{endpoint}' 缺少协议前缀，自动添加'https://'"
                )
                endpoint = "https://" + endpoint
                self.llm_config["endpoint"] = endpoint

            # 记录初始化信息（注意不要记录完整的API密钥）
            logger.info(f"正在初始化Mindcare OpenAI客户端，API版本：{api_version}")
            logger.info(f"端点URL：{endpoint}")
            if api_key:
                masked_key = api_key[:4] + "***" + api_key[-4:] if len(
                    api_key) > 8 else "***"
                logger.info(f"API密钥（部分掩码）：{masked_key}")
            else:
                logger.warning("未提供API密钥")

            client = OpenAI(
                api_key=api_key,  # 替换成你的 API Key
                base_url=endpoint  # 硅基流动的 API 地址
            )
            logger.info("OpenAI客户端初始化成功")
            return client
        except Exception as e:
            logger.error(f"OpenAI客户端初始化失败: {str(e)}")
            logger.error(f"错误类型: {type(e).__name__}")
            if hasattr(e, '__traceback__'):
                import traceback
                logger.error(f"错误详情: {traceback.format_exc()}")
            raise

    def generate_response(self,
                          prompt: str,
                          system_message: str = "你是一个专业的AI心理助手，提供准确、专业的咨询建议。",
                          max_tokens: int = 800,
                          temperature: float = 0.7) -> str:
        """
        生成LLM响应
        
        Args:
            prompt: 用户提示
            system_message: 系统消息
            max_tokens: 最大令牌数
            temperature: 温度参数(创造性)
        
        Returns:
            LLM生成的响应文本
        """
        try:
            # 记录请求信息
            # deployment_name = self.llm_config.get("deployment_name")
            deployment_name = os.getenv("MINDCARE_OPENAI_DEPLOYMENT_NAME")

            logger.info(
                f"正在调用LLM API，模型：{deployment_name}，最大令牌：{max_tokens}，温度：{temperature}"
            )
            logger.info(
                f"系统消息：{system_message[:50]}{'...' if len(system_message) > 50 else ''}"
            )
            logger.info(
                f"提示词（前50个字符）：{prompt[:50]}{'...' if len(prompt) > 50 else ''}"
            )

            # 记录网络状态检查
            import socket
            try:
                # 尝试解析端点的主机名
                endpoint = self.llm_config.get("endpoint", "")
                if endpoint:
                    from urllib.parse import urlparse
                    parsed_url = urlparse(endpoint)
                    hostname = parsed_url.netloc
                    ip = socket.gethostbyname(hostname)
                    logger.info(f"成功解析主机名 {hostname} 到 IP：{ip}")

                    # 检查是否可以连接到端口
                    port = 443  # HTTPS默认端口
                    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                    sock.settimeout(5)
                    result = sock.connect_ex((hostname, port))
                    if result == 0:
                        logger.info(f"成功连接到 {hostname}:{port}")
                    else:
                        logger.warning(f"无法连接到 {hostname}:{port}，错误码：{result}")
                    sock.close()
            except Exception as network_error:
                logger.warning(f"网络检查失败: {str(network_error)}")

            # 开始计时
            import time
            start_time = time.time()

            # 实际调用API
            response = self.client.chat.completions.create(
                model=deployment_name,
                messages=[{
                    "role": "system",
                    "content": system_message
                }, {
                    "role": "user",
                    "content": prompt
                }],
                max_tokens=max_tokens,
                temperature=temperature)

            # 记录响应时间和基本信息
            elapsed_time = time.time() - start_time
            logger.info(f"LLM API调用成功，耗时：{elapsed_time:.2f}秒")
            content = response.choices[0].message.content
            logger.info(
                f"响应内容（前50个字符）：{content[:50]}{'...' if len(content) > 50 else ''}"
            )

            return content
        except Exception as e:
            elapsed_time = time.time() - start_time if 'start_time' in locals(
            ) else -1
            logger.error(f"LLM响应生成失败: {str(e)}，耗时：{elapsed_time:.2f}秒")
            logger.error(f"错误类型: {type(e).__name__}")

            # 记录更详细的错误信息
            if hasattr(e, 'response'):
                try:
                    logger.error(
                        f"API错误响应: {e.response.text if hasattr(e.response, 'text') else e.response}"
                    )
                    logger.error(
                        f"API错误状态码: {e.response.status_code if hasattr(e.response, 'status_code') else 'Unknown'}"
                    )
                except:
                    pass

            if hasattr(e, '__traceback__'):
                import traceback
                logger.error(f"错误详情: {traceback.format_exc()}")

            # 网络错误特别处理
            if "Connection" in str(e) or "Timeout" in str(
                    e) or "timeout" in str(e).lower():
                logger.error("检测到网络连接问题，请检查您的网络连接和防火墙设置")
                if "proxy" in str(e).lower():
                    logger.error("可能与代理设置有关，请检查您的代理配置")

            return f"很抱歉，我无法处理您的请求: {str(e)}"

    async def generate_response_async(
            self,
            prompt: str,
            system_message: str = "你是一个专业的AI心理助手，提供准确、专业的咨询建议。",
            max_tokens: int = 800,
            temperature: float = 0.7) -> str:
        """
        异步生成LLM响应
        
        Args:
            prompt: 用户提示
            system_message: 系统消息
            max_tokens: 最大令牌数
            temperature: 温度参数(创造性)
        
        Returns:
            LLM生成的响应文本
        """
        loop = asyncio.get_event_loop()
        return await loop.run_in_executor(
            None, lambda: self.generate_response(prompt=prompt,
                                                 system_message=system_message,
                                                 max_tokens=max_tokens,
                                                 temperature=temperature))
