import json
import re
from loguru import logger

class TaskValueAssessor:
    def __init__(self, root_agent_instance):
        self.root_agent = root_agent_instance
        self.llm = root_agent_instance.llm

    def assess_task(self, command: str) -> dict:
        """
        Assesses the potential value and cost of a task using an LLM.

        Args:
            command: The user's command.

        Returns:
            A dictionary containing the assessment, e.g.:
            {
                "value": "high" | "medium" | "low",
                "cost": "high" | "medium" | "low",
                "reasoning": "Brief explanation of the assessment."
            }
        """
        prompt = f"""
        As a senior project manager, you must assess the following user request to determine its value and cost.

        User Request: "{command}"

        Consider the following criteria:
        - **Value**: Does this task seem to contribute significantly to a larger goal? Is it a critical bug fix, a core feature development, or a simple query? High-value tasks are often complex and have a clear, important objective. Low-value tasks might be trivial, vague, or for testing purposes.
        - **Cost**: How many resources (time, LLM tokens, tool usage) will this task likely consume? Refactoring a large module is high-cost. Listing files is low-cost.

        Respond with a single, compact JSON object with three keys: "value", "cost", and "reasoning".
        - "value": Must be one of "high", "medium", or "low".
        - "cost": Must be one of "high", "medium", or "low".
        - "reasoning": A brief, one-sentence explanation for your assessment.

        Example 1:
        User Request: "Refactor the entire authentication module to use a new library."
        Response: {{"value": "high", "cost": "high", "reasoning": "Refactoring a core module is a complex but valuable task for system improvement."}}

        Example 2:
        User Request: "What is your name?"
        Response: {{"value": "low", "cost": "low", "reasoning": "This is a simple conversational query with no operational value."}}

        Example 3:
        User Request: "test write a file"
        Response: {{"value": "low", "cost": "low", "reasoning": "This appears to be a simple test of a single function."}}
        """
        try:
            response_data = self.llm.generate_content(prompt, task_type="assessment")
            response_text = response_data["output_content"]
            
            # Log LLM usage
            self.root_agent.llm_usage_metrics["total_input_tokens"] += response_data["input_tokens"]
            self.root_agent.llm_usage_metrics["total_output_tokens"] += response_data["output_tokens"]
            self.root_agent.llm_usage_metrics["total_cost"] += response_data["cost"]
            self.root_agent.llm_usage_metrics["task_llm_metrics"].append({
                "task_type": "assessment",
                "input_tokens": response_data["input_tokens"],
                "output_tokens": response_data["output_tokens"],
                "cost": response_data["cost"]
            })

            # Extract JSON from the response
            json_match = re.search(r'\{{.*\}}', response_text, re.DOTALL)
            if json_match:
                json_string = json_match.group(0)
                assessment = json.loads(json_string)
                logger.info(f"Task Assessment for '{command}': Value={assessment.get('value')}, Cost={assessment.get('cost')}")
                return assessment
            else:
                raise json.JSONDecodeError("No JSON object found in the response.", response_text, 0)

        except (json.JSONDecodeError, KeyError) as e:
            logger.error(f"Failed to parse task assessment from LLM response: {e}. Response: {response_text}")
            # Fallback to a default assessment
            return {"value": "medium", "cost": "medium", "reasoning": "Could not reliably assess the task."}
