from typing import List, Dict, Optional, Any
import json
from pydantic import BaseModel
from common.log import log_adapter, log_adapter_chat_io
import re
from .llms import get_llm

class Messages(BaseModel):
    role: str
    content: str

class Adapter:
    """大语言模型适配器类,用于统一调用不同的模型服务"""
    
    def __init__(self):
        """初始化适配器"""

    def get_llm(self, llm_name_agreement="openai", model: str = None, api_key: str = None, base_url: str = None):
        """获取LLM实例
        
        Args:
            llm_name: str, 大语言模型名称
        Returns:
            BaseLLM, 大语言模型实例
        """
        return get_llm(llm_name_agreement=llm_name_agreement, model=model, api_key=api_key, base_url=base_url)

    def chat(self, llm: str, messages: List[Dict[str, str]], **kwargs) -> Optional[str]:
        """同步调用模型进行对话
        
        Args:
            messages: 对话历史消息列表
            
        Returns:
            str: 模型回复的文本,发生错误时返回None
        """
        try:
            messages = self.message_to_list(role="user", message=messages)
            response = llm.predict(
                messages=messages,
                **kwargs
            )
            return response
        except Exception as e:
            log_adapter.error(f"调用模型出错: {str(e)}")
            return None

    async def achat(self, llm: str, messages: List[Dict[str, str]], **kwargs) -> Optional[str]:
        """异步调用模型进行对话
        
        Args:
            messages: 对话历史消息列表
            
        Returns:
            str: 模型回复的文本,发生错误时返回None
        """
        try:
            messages = self.message_to_list(role="user", message=messages)
            response = await llm.apredict(
                messages=messages,
                **kwargs
            )
            return response
        except Exception as e:
            log_adapter.error(f"调用模型出错: {str(e)}")
            return None
    
    def response_match_json(self, response: str, output_basemodel: BaseModel) -> str:
        """从响应文本中提取JSON字符串并验证格式
        
        Args:
            response: 模型返回的响应文本,可能包含```json或{}包裹的JSON
            output_basemodel: 用于验证JSON数据结构的BaseModel类
            
        Returns:
            str: 验证通过的JSON对象
            
        Raises:
            ValueError: JSON解析失败时抛出
            ValidationError: 数据结构验证失败时抛出
        """
        try:
            # 尝试匹配```json ```格式
            json_pattern = r'```json\s*([\s\S]*?)\s*```'
            json_match = re.search(json_pattern, response)
            if json_match:
                json_str = json_match.group(1).strip()
            else:
                # 尝试匹配最外层的{}
                brace_pattern = r'\{[\s\S]*\}'
                brace_match = re.search(brace_pattern, response)
                if brace_match:
                    json_str = brace_match.group(0).strip()
                else:
                    raise ValueError("未找到有效的JSON格式")
                    
            # 处理特殊字符并解析JSON
            json_str = json_str.replace('\n', '').replace('\r', '').replace('\t', '').strip()
            
            json_data = json.loads(json_str)
            validated_data = output_basemodel.model_validate(json_data).model_dump()
            
            return validated_data
            
        except Exception as e:
            log_adapter.error(f"JSON解析失败: {str(e)}")
            raise ValueError(f"JSON解析失败: {str(e)}")

    @log_adapter_chat_io
    def chat_as_json(self, llm: str, messages: str, output_basemodel: str, **kwargs) -> Optional[Dict]:
        """以JSON格式返回聊天结果
        
        Args:
            messages: 对话历史消息列表
            output_format: 输出格式要求(预留参数)
            
        Returns:
            dict: JSON格式的聊天结果,发生错误时返回None
        """
        try:
            messages = self.message_to_list(role="user", message=messages)
            response = llm.predict(
                messages=messages,
                **kwargs
            )
            # 解析JSON并验证数据格式
            try:
                response_json = self.response_match_json(response, output_basemodel)
                return response_json
            except Exception as e:
                response_json = self.retry_chat_as_json(
                    llm=llm, 
                    content_messages=messages, 
                    system_messages=response, 
                    fix_messages=str(e), 
                    output_model=output_basemodel, 
                    **kwargs
                )
                return response_json
        except Exception as e:
            log_adapter.error(f"调用模型出错: {str(e)}")
            return None

    @log_adapter_chat_io
    async def achat_as_json(self, llm: str, messages: str, output_basemodel: BaseModel, **kwargs) -> Optional[Dict]:
        """异步方式以JSON格式返回聊天结果
        
        Args:
            messages: 对话历史消息列表
            output_format: 输出格式要求(预留参数)
            
        Returns:
            dict: JSON格式的聊天结果,发生错误时返回None
        """
        try:
            messages = self.message_to_list(role="user", message=messages)
            response = await llm.apredict(
                messages=messages,
                **kwargs
            )
            
            # 解析JSON并验证数据格式
            try:
                response_json = self.response_match_json(response, output_basemodel)
                return response_json
            except Exception as e:
                response_json = self.retry_chat_as_json(
                    llm=llm, 
                    content_messages=messages, 
                    system_messages=response, 
                    fix_messages=str(e), 
                    output_model=output_basemodel, 
                    **kwargs
                )
                return response_json
        except Exception as e:
            log_adapter.error(f"调用模型出错: {str(e)}")
            return None
        
    def retry_chat_as_json(self, llm: str, content_messages: str, system_messages: str, fix_messages: str, output_basemodel: BaseModel, max_retry: int = 3, **kwargs) -> Optional[Dict]:
        """重试聊天
        
        Args:
            messages: 对话历史消息列表
            output_format: 输出格式要求(预留参数)
            max_retry: 最大重试次数
            
        Returns:
            dict: JSON格式的聊天结果,发生错误时返回None
        """
        retry_count = 0
        while retry_count < max_retry:
            messages = self.message_to_list(role="user", message=content_messages) + \
                self.message_to_list(role="system", message=system_messages) + \
                self.message_to_list(role="user", message=fix_messages)
            response = llm.predict(
                messages=messages,
                **kwargs
            )

            try:
                # 解析JSON并验证数据格式
                response_json = self.response_match_json(response, output_basemodel)
                return response_json
            except Exception as e:
                retry_count += 1
                fix_messages = str(e)
        
    def message_to_list(self, role: str="user", message: any=None):
        """
        args:
            role: str, message role
            message: any, message to be converted to list
        return:
            list, message list
        """
        if isinstance(message, str):
            return [{"role": role, "content": message}]
        elif isinstance(message, list):
            try:
                return [Messages.model_validate(msg).model_dump() for msg in message]
            except Exception as e:
                raise ValueError(f"消息格式验证失败: {e}")
        else:
            raise ValueError(f"Invalid message type: {type(message)}")
