import os
import json
import autogen
from dotenv import load_dotenv
from typing import Dict, Any

from src.agent.planning_rag import PlanningRAG

# Load environment variables from .env file
load_dotenv()

def get_llm_config() -> Dict[str, Any]:
    """
    Loads the LLM configuration from environment variables.
    """
    api_key = os.getenv("OPENAI_API_KEY")
    api_base = os.getenv("OPENAI_API_BASE")
    model_name = os.getenv("MODEL_NAME", "deepseek-chat")

    if not api_key or not api_base:
        raise ValueError("Please set OPENAI_API_KEY and OPENAI_API_BASE in your .env file.")

    return {
        "config_list": [{
            'model': model_name,
            'api_key': api_key,
            'base_url': api_base,
        }],
        "temperature": 0.1,
    }


class PlanningAgent:
    """
    An agent that uses a multi-agent team to generate a detailed project plan
    from a user requirement.
    """
    def __init__(self):
        self.rag = PlanningRAG()
        self.llm_config = get_llm_config()
        
        self.analyst_agent = autogen.AssistantAgent(
            name="Requirement_Analyst",
            system_message="You are a requirement analyst. Your job is to take a user's request, clarify it, and break it down into a list of key features and components. Focus on 'what' needs to be built, not 'how'. Pass the structured features to the Planner.",
            llm_config=self.llm_config,
        )
        
        self.planner_agent = autogen.AssistantAgent(
            name="Planner",
            system_message="You are a senior project planner. You receive a list of features from the Analyst. Your task is to create a high-level project plan. Break down the features into main tasks. For each task, define a title and a clear description. You can ask the Technical_Expert for advice on implementation details.",
            llm_config=self.llm_config,
        )

        self.expert_agent = autogen.AssistantAgent(
            name="Technical_Expert",
            system_message="You are a technical expert. You have access to a knowledge base to answer questions about libraries, implementation details, and testing strategies. Use your `retrieve_info` tool for this. Respond with concise, practical advice.",
            llm_config=self.llm_config,
        )
        self.expert_agent.register_function(
            function_map={"retrieve_info": self.rag.retrieve}
        )
        
        # This agent's prompt is crucial for getting the correct JSON format.
        # It includes an example of the desired output structure.
        formatter_system_message = f"""
You are a JSON formatting agent. Your ONLY job is to convert the final project plan into a valid JSON object.
Your output MUST be a single JSON object and nothing else. Do not add any text, explanations, or code blocks like ```json.
The JSON must follow this exact structure:
{{
    "tasks": [
        {{
            "id": <int>,
            "title": "<str>",
            "description": "<str>",
            "details": "<str: detailed technical implementation steps>",
            "testStrategy": "<str>",
            "priority": "<'high'|'medium'|'low'>",
            "dependencies": [<int: list of task ids>],
            "status": "pending",
            "subtasks": []
        }}
    ]
}}
Ensure all fields are present for each task. The 'subtasks' field should be an empty list for now.
The 'id' for each task must be a unique integer, starting from 1.
Dependencies should be an array of integers referencing the 'id' of other tasks.
"""
        self.formatter_agent = autogen.AssistantAgent(
            name="JSON_Formatter",
            system_message=formatter_system_message.strip(),
            llm_config=self.llm_config,
        )

        self.user_proxy = autogen.UserProxyAgent(
            name="User_Proxy",
            human_input_mode="NEVER",
            max_consecutive_auto_reply=10,
            is_termination_msg=lambda x: "FINAL_JSON_OUTPUT" in x.get("content", ""),
            code_execution_config=False,
        )

    def generate_plan(self, requirement: str) -> Dict[str, Any]:
        """
        Generates a project plan in tasks.json format.

        Args:
            requirement (str): The user's project requirement.

        Returns:
            A dictionary representing the tasks.json structure.
        """
        groupchat = autogen.GroupChat(
            agents=[self.user_proxy, self.analyst_agent, self.planner_agent, self.expert_agent, self.formatter_agent],
            messages=[],
            max_round=15
        )
        manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=self.llm_config)

        # The initial message that kicks off the process.
        # It asks the agents to collaborate and explicitly states the final step.
        initial_prompt = f"""
        Please generate a complete project plan for the following requirement: '{requirement}'.

        Follow these steps:
        1.  The Requirement_Analyst will break down the requirement into key features.
        2.  The Planner will create high-level tasks based on these features, consulting the Technical_Expert for implementation and testing ideas for the 'details' and 'testStrategy' fields.
        3.  The JSON_Formatter will take the final plan and format it.
        4.  Finally, after the JSON is complete, the JSON_Formatter should output the text 'FINAL_JSON_OUTPUT' on a new line.
        """

        self.user_proxy.initiate_chat(
            manager,
            message=initial_prompt,
        )
        
        # Find the message from the formatter agent and extract the JSON
        for msg in reversed(groupchat.messages):
            if msg["name"] == "JSON_Formatter" and "{" in msg["content"]:
                content = msg["content"]
                # Clean up the content to extract pure JSON
                json_str = content[content.find('{'):content.rfind('}')+1]
                try:
                    return json.loads(json_str)
                except json.JSONDecodeError as e:
                    print(f"Error decoding JSON from formatter agent: {e}")
                    print(f"Content was:\n{content}")
                    return {"error": "Failed to generate valid JSON plan.", "content": content}

        return {"error": "Failed to generate plan."}


if __name__ == '__main__':
    # Create a dummy .env file for testing if it doesn't exist
    # if not os.path.exists(".env"):
    #     with open(".env", "w") as f:
    #         f.write("OPENAI_API_KEY=your_key_here\n")
    #         f.write("OPENAI_API_BASE=your_base_url_here\n")
    #         f.write("MODEL_NAME=deepseek-chat\n")
    assert os.path.exists(".env"), "Please create a .env file with your OpenAI API key and base URL."
    print("Initializing Planning Agent...")
    agent = PlanningAgent()
    
    # Example requirement
    user_requirement = "Build a command-line tool using Python that converts a given PDF file into a Markdown file."
    print(f"\nGenerating plan for: '{user_requirement}'")

    # This can take a while depending on the model's response time
    plan = agent.generate_plan(user_requirement)
    
    print("\n--- Generated Plan (tasks.json format) ---")
    print(json.dumps(plan, indent=4, ensure_ascii=False)) 