import importlib.resources as pkg_resources
import json
from typing import Optional
import os
from jinja2 import Environment
from json_repair import repair_json
from research_agent.core.config import Config
from research_agent.core.general_llm import LLM
from pyaml_env import parse_config
from pathlib import Path
import json_repair



class TopicJudger:
    """A class that proposes research questions based on a given topic and context.

    This class uses an LLM to generate relevant research questions by processing
    a topic, optional context, and related papers.

    Attributes:
        llm: An instance of the LLM class for generating completions
        iteration: Current iteration number for question generation
        prompt_template: Jinja template for generating prompts
    """

    def __init__(self):
        """Initialize the QuestionProposer.

        Args:
            iteration (int, optional): Iteration number for question generation. Defaults to 0.
        """
        absolute_path = os.path.abspath(Config.YAML_CONFIG)
        configs = parse_config(absolute_path)
        self.language = ""
        self.llm = LLM(config=configs[Config.DEFAULT_MODEL])
        try:
            base_path = Path(__file__).parent / "prompts"

            judge_topic_type_prompt_file = base_path / "judge_topic_type.jinja"
            with open(judge_topic_type_prompt_file, "r", encoding="utf-8") as f:
                self.judge_topic_type_prompt_template = Environment().from_string(f.read())
        except (FileNotFoundError, IOError) as e:
            raise RuntimeError(f"Failed to load prompt template: {str(e)}")
    def set_language(self, language):
        self.language = language
    def get_language(self,):
        return self.language
    def _prepare_prompts(
            self, topic: str
    ):
        system_prompt = self.judge_topic_type_prompt_template.render(role="system")
        user_prompt = self.judge_topic_type_prompt_template.render(
            role="user",
            topic=topic,
            language=self.language
        )
        return [
            {"role": "system", "content": system_prompt},
            {"role": "user", "content": user_prompt},
        ]

    async def judge_topic_type(self, topic) -> dict:
        """write conclusion of the survey.
        Args:
            topic (str): The topic of the research survey.
            language (str): The language of the topic.

        Returns:
            str: topic type and other info.

        Raises:
            ValueError: If question is empty or not a string
            RuntimeError: If LLM completion fails
        """
        prompt_messages = self._prepare_prompts(
            topic=topic
        )
        response = await self.llm.completion(prompt_messages)
        return json_repair.loads(response)
