from abc import ABC, abstractmethod
import inspect
from typing import List, Optional, Dict, Any
from utils.ApiUtils import ApiClient

class LLMProcessor(ABC):
    """
    Base class for LLM processing tasks.
    Provides infrastructure for calling LLM with prompts generated from docstrings and class members.
    """
    def __init__(self):
        """Initialize the LLM processor with API client"""
        self.api_client = ApiClient()
        self._cache = {}

    def _get_class_docstring(self) -> str:
        """Get the docstring of the implementing class"""
        return inspect.getdoc(self.__class__) or ""

    def _get_class_variables(self) -> Dict[str, Any]:
        """Get all class member variables"""
        return {
            key: value for key, value in self.__dict__.items() 
            if not key.startswith('_') and key != 'api_client'
        }

    def get_prompt(self) -> str:
        """
        Generate the prompt for LLM. Override this method in subclasses.
        Default implementation uses class docstring as template and formats it with class variables.
        
        Returns:
            str: The formatted prompt
        """
        template = self._get_class_docstring()
        variables = self._get_class_variables()
        try:
            return template.format(**variables)
        except KeyError as e:
            raise ValueError(f"Missing required variable in prompt template: {e}")
        
    @abstractmethod    
    def extract(self, content : str) -> tuple[bool, List[any]]:
        """
        processed response for given content
        
        Args:
            content: The input content to generate response for
            
        Returns:
            str: The processed response
        """

        pass

    def run(self, system_message: Optional[str] = None, 
            history_messages: Optional[list[Dict[str, str]]] = None) -> List[str]:
        """
        Execute the LLM call with generated prompt
        
        Args:
            system_message: Optional system message to control LLM behavior
            history_messages: Optional conversation history
            
        Returns:
            str: The LLM response
        """
        prompt = self.get_prompt()
        content = self.api_client.get_completion(
            prompt=prompt,
            system_message=system_message or "You are a helpful assistant。",
            history_messages=history_messages
        )
        content = self.extract(content);
        return content
    
