from root_agent.llm import LLMInterface

class GMemoryLLMWrapper:
    def __init__(self, llm_interface: LLMInterface):
        self.llm_interface = llm_interface

    def generate_content(self, prompt: str, task_type: str = "general") -> dict:
        response_data = self.llm_interface.generate_content(prompt, task_type)
        return {"output_content": response_data["output_content"]}

    def select_model(self, task_type: str = "general") -> str:
        return self.llm_interface.select_model(task_type)

    def __call__(self, messages: list, temperature: float = 0.1, max_tokens: int = 512, stop_strs: list = None, num_comps: int = 1, task_type: str = "general", top_p: float = 1.0, top_k: int = 0) -> dict:
        # Assuming messages is a list of dicts like [{'role': 'user', 'content': '...'}]
        # Convert messages to a single prompt string for generate_content
        formatted_prompt = ""
        for msg in messages:
            formatted_prompt += f"{msg.role}: {msg.content}\n"
        
        # Call the underlying LLMInterface's generate_content method
        response_data = self.llm_interface.generate_content(formatted_prompt, task_type)
        return {"output_content": response_data["output_content"]}
