"""
AI模型集成模块
使用策略模式和工厂模式设计
"""
import os
import json
import logging
from typing import Dict, Any, Optional, List
from abc import ABC, abstractmethod
from openai import OpenAI
from dotenv import load_dotenv
from config import (
    DEFAULT_MODEL,
    DASHSCOPE_BASE_URL,
    SYSTEM_PROMPT_CODE_QUALITY,
    SYSTEM_PROMPT_REFACTORING,
    SYSTEM_PROMPT_CODE_SMELLS,
    SYSTEM_PROMPT_TEST_GENERATION,
)
from utils import parse_json_response

load_dotenv()

logger = logging.getLogger(__name__)


class LLMClient(ABC):
    """LLM客户端抽象基类（适配器模式）"""
    
    @abstractmethod
    async def generate(self, messages: List[Dict[str, str]], model: str) -> str:
        """生成响应"""
        pass


class DashScopeClient(LLMClient):
    """DashScope客户端实现"""
    
    def __init__(self):
        self.client = OpenAI(
            api_key=os.getenv("DASHSCOPE_API_KEY", default="sk-xxx"),
            base_url=DASHSCOPE_BASE_URL,
        )
    
    async def generate(self, messages: List[Dict[str, str]], model: str = DEFAULT_MODEL) -> str:
        """
        调用AI模型生成响应
        
        Args:
            messages: 消息列表
            model: 模型名称
        
        Returns:
            AI生成的文本响应
        """
        try:
            completion = self.client.chat.completions.create(
                model=model,
                messages=messages,
                temperature=0.3,  # 降低温度以获得更稳定的输出
                extra_body={"enable_thinking": False},
            )
            result = completion.choices[0].message.content
            logger.info(f"AI响应生成成功，模型: {model}")
            return result
        except Exception as e:
            logger.error(f"AI调用失败: {str(e)}")
            raise


class LLMClientFactory:
    """LLM客户端工厂（工厂模式）"""
    
    @staticmethod
    def create_client(client_type: str = "dashscope") -> LLMClient:
        """
        创建LLM客户端
        
        Args:
            client_type: 客户端类型
        
        Returns:
            LLM客户端实例
        """
        if client_type == "dashscope":
            return DashScopeClient()
        else:
            raise ValueError(f"不支持的客户端类型: {client_type}")


class LLMService:
    """LLM服务类（封装AI调用逻辑）"""
    
    def __init__(self, client: Optional[LLMClient] = None):
        self.client = client or LLMClientFactory.create_client()
    
    async def analyze_code_quality(
        self,
        code: str,
        language: str,
        context: Optional[str] = None
    ) -> Dict[str, Any]:
        """
        分析代码质量
        
        Args:
            code: 代码内容
            language: 编程语言
            context: 代码上下文
        
        Returns:
            质量分析结果字典
        """
        user_content = f"编程语言: {language}\n"
        if context:
            user_content += f"代码上下文: {context}\n"
        user_content += f"\n代码内容:\n```{language}\n{code}\n```"
        
        messages = [
            {"role": "system", "content": SYSTEM_PROMPT_CODE_QUALITY},
            {"role": "user", "content": user_content}
        ]
        
        response = await self.client.generate(messages)
        return parse_json_response(response)
    
    async def suggest_refactoring(
        self,
        code: str,
        language: str,
        refactoring_type: str = "general"
    ) -> Dict[str, Any]:
        """
        提供重构建议
        
        Args:
            code: 代码内容
            language: 编程语言
            refactoring_type: 重构类型
        
        Returns:
            重构建议字典
        """
        user_content = f"编程语言: {language}\n"
        user_content += f"重构类型: {refactoring_type}\n"
        user_content += f"\n代码内容:\n```{language}\n{code}\n```"
        
        messages = [
            {"role": "system", "content": SYSTEM_PROMPT_REFACTORING},
            {"role": "user", "content": user_content}
        ]
        
        response = await self.client.generate(messages)
        return parse_json_response(response)
    
    async def detect_code_smells(
        self,
        code: str,
        language: str
    ) -> Dict[str, Any]:
        """
        检测代码坏味道
        
        Args:
            code: 代码内容
            language: 编程语言
        
        Returns:
            坏味道检测结果字典
        """
        user_content = f"编程语言: {language}\n"
        user_content += f"\n代码内容:\n```{language}\n{code}\n```"
        
        messages = [
            {"role": "system", "content": SYSTEM_PROMPT_CODE_SMELLS},
            {"role": "user", "content": user_content}
        ]
        
        response = await self.client.generate(messages)
        return parse_json_response(response)
    
    async def generate_test_cases(
        self,
        code: str,
        language: str,
        test_framework: str = "default"
    ) -> Dict[str, Any]:
        """
        生成测试用例建议
        
        Args:
            code: 代码内容
            language: 编程语言
            test_framework: 测试框架
        
        Returns:
            测试用例建议字典
        """
        user_content = f"编程语言: {language}\n"
        user_content += f"测试框架: {test_framework}\n"
        user_content += f"\n代码内容:\n```{language}\n{code}\n```"
        
        messages = [
            {"role": "system", "content": SYSTEM_PROMPT_TEST_GENERATION},
            {"role": "user", "content": user_content}
        ]
        
        response = await self.client.generate(messages)
        return parse_json_response(response)

