import json
import os
import re
import threading
import xml.etree.ElementTree as ET
from loguru import logger
import uuid

from memory_systems.GMemory.mas.memory.mas_memory.GMemory import GMemory
from memory_systems.GMemory.mas.memory.common import MASMessage, StateChain
from memory_systems.GMemory.mas.utils import EmbeddingFunc, get_model_type
from memory_systems.vector_db import ZillizVectorDB as VectorDB
from memory_systems.experience_metadata_db import ExperienceMetadataDB

class MemoryManager:
    def __init__(self, root_agent_instance):
        self.root_agent = root_agent_instance
        self.gmemory = root_agent_instance.gmemory
        self.semantic_memory = root_agent_instance.semantic_memory
        self.llm = root_agent_instance.llm
        self.experience_metadata_db = ExperienceMetadataDB()

    def add_memory(self, user_goal: str, plan: list[dict], results: list[str], success: bool, userid: str, openid: str, total_duration: float = 0.0, total_cost: float = 0.0):
        # Start a new thread for asynchronous memory operations
        memory_thread = threading.Thread(target=self._add_memory_async, args=(user_goal, plan, results, success, userid, openid, total_duration, total_cost))
        memory_thread.start()

    def _add_memory_async(self, user_goal: str, plan: list[dict], results: list[str], success: bool, userid: str, openid: str, total_duration: float, total_cost: float):
        if  self.root_agent.use_gmemory:
            # Create a StateChain from the plan and results
            state_chain = StateChain()
            for i, step in enumerate(plan):
                action = step.get('tool_name', 'Unknown Tool')
                observation = results[i] if i < len(results) else "Step not executed"
                reward = 1 if success else -1 # Simplified reward
                state_graph = {
                    'action': action,
                    'observation': observation,
                    'reward': reward
                }
                state_chain.move_state(action=state_graph['action'], observation=state_graph['observation'], reward=state_graph['reward'])

            # Combine plan and results into a more detailed description for extra_fields
            detailed_execution_info = f"Plan:\n"
            for i, step in enumerate(plan):
                detailed_execution_info += f"  Step {i+1}: Objective: {step.get('objective')}, Tool: {step.get('tool_name')}, Params: {step.get('params')}, Expected Outcome: {step.get('expect')}, Dependencies: {step.get('dependencies')}, Estimated Time: {step.get('time')}, Failure Policy: {step.get('failure_policy')}\n"
            formatted_results = []
            for res in results:
                if isinstance(res, dict):
                    formatted_results.append(f"Tool: {res.get('tool_name', 'Unknown')}\nOutput: {res.get('output', 'No Output')}")
                else:
                    formatted_results.append(str(res))
            detailed_execution_info += f"\nExecution Results:\n{os.linesep.join(formatted_results)}"

            mas_message = MASMessage(
                task_main=user_goal,
                task_description=user_goal, # Simplified task_description for GMemory
                chain_of_states=state_chain,
                label=success,
                extra_fields={'plan': plan, 'results': results, 'detailed_execution_info': detailed_execution_info, 'total_duration': total_duration, 'total_cost': total_cost}
            )
            self.gmemory.add_memory(mas_message)
            logger.info(f"Added memory for task: {user_goal}, success: {success}")

        if  self.root_agent.use_vector_memory:
            # Generate and store experience summary in semantic memory
            experience_summary = self._generate_experience_summary(user_goal, plan, results, success, total_duration, total_cost)
            # Generate a unique ID for the experience summary
            summary_id = str(uuid.uuid4())
            self.semantic_memory.upsert_text_zilliz(
                Id=summary_id,
                textall0=[experience_summary], # upsert_text_zilliz expects a list of texts
                Source="experience_summary",
                Author="SAIA"
            )
            self.experience_metadata_db.upsert_experience_metadata(summary_id, success, total_cost, total_duration)
            
    def _generate_experience_summary(self, user_goal: str, plan: list[dict], results: list[str], success: bool, total_duration: float, total_cost: float) -> str:
        # Use LLM to generate a structured experience summary
        plan_str = json.dumps(plan, indent=2)
        formatted_results = []
        for res in results:
            if isinstance(res, dict):
                formatted_results.append(f"Tool: {res.get('tool_name', 'Unknown')}\nOutput: {res.get('output', 'No Output')}")
            else:
                formatted_results.append(str(res))
        results_str = "\n".join(formatted_results)

        prompt = f"""Based on the following task execution, generate an experience summary in XML format.
Follow the <experience_summary> template provided. Focus on key highlights and lessons learned.

User Goal: {user_goal}

Plan Executed:
```json
{plan_str}
```

Execution Results:
```
{results_str}
```

Task Success: {'True' if success else 'False'}
Total Duration: {total_duration:.2f} seconds
Total LLM Cost: ${total_cost:.4f}

<experience_summary>
  <objective>Briefly describe the task objective.</objective>
  <total_duration_seconds>{total_duration:.2f}</total_duration_seconds>
  <total_llm_cost>${total_cost:.4f}</total_llm_cost>
  <key_highlights tech="comma, separated, keywords" process="comma, separated, process, steps">
    Summarize the key aspects of the task execution, including what was done, any notable challenges, and how they were addressed.
  </key_highlights>
  <lessons_learned>
    What insights or lessons were learned from this task? What worked well, what didn't, and why?
  </lessons_learned>
</experience_summary>
"""
        llm_summary_response_data = self.llm.generate_content(prompt, task_type="summarization")
        llm_summary_response = llm_summary_response_data["output_content"]
        self.root_agent.llm_usage_metrics["total_input_tokens"] += llm_summary_response_data["input_tokens"]
        self.root_agent.llm_usage_metrics["total_output_tokens"] += llm_summary_response_data["output_tokens"]
        self.root_agent.llm_usage_metrics["total_cost"] += llm_summary_response_data["cost"]
        self.root_agent.llm_usage_metrics["task_llm_metrics"].append({
            "task_type": "summarization",
            "input_tokens": llm_summary_response_data["input_tokens"],
            "output_tokens": llm_summary_response_data["output_tokens"],
            "cost": llm_summary_response_data["cost"]
        })
        
        # Extract XML from markdown code block if present
        xml_match = re.search(r'```xml\n(.*?)```', llm_summary_response, re.DOTALL)
        if xml_match:
            xml_string = xml_match.group(1).strip()
        else:
            xml_string = llm_summary_response.strip()

        try:
            # Validate XML structure (optional, but good practice)
            ET.fromstring(xml_string)
            return xml_string
        except ET.ParseError as e:
            logger.error(f"Error parsing LLM generated experience summary XML: {e}", exc_info=True)
            # Fallback to a simpler summary if XML parsing fails
            return f"<experience_summary><objective>{user_goal}</objective><key_highlights>Error generating detailed summary: {e}</key_highlights><lessons_learned>Review logs for details.</lessons_learned></experience_summary>"

    def retrieve_memory(self, user_goal: str) -> dict:
        if not self.root_agent.use_gmemory: 
            return {
                "successful_cases": [],
                "failed_cases": [],
                "insights": [],
                "semantic_summaries": [],
                "successful_plans": [],
                "successful_results": [],
                "failed_plans": [],
                "failed_results": []
            }

        # Retrieve relevant memories from GMemory
        logger.debug(f"Retrieving memory from GMemory for query: {user_goal}")
        successful_cases = []
        failed_cases = []
        insights = []
        try:
            # When retrieving, query using the original user_goal for semantic search
            successful_cases, failed_cases, insights = self.gmemory.retrieve_memory(query_task=user_goal)
            logger.info(f"Retrieved memories from GMemory for task: {user_goal}")
        except Exception as e:
            logger.warning(f"Error retrieving memories from GMemory: {e}. This might be expected if the memory is empty or the node does not exist yet. Proceeding without GMemory insights.")

        # Retrieve relevant experience summaries from semantic memory
        semantic_summaries = []
        try:
            #query_vector = self.semantic_memory.embedding_func.embed_query(user_goal)
            search_params = {"metric_type": "L2", "params": {"nprobe": 10}}
            semantic_results = self.semantic_memory.query_vectordb_zilliz(user_goal, "", "experience_summary", "SAIA", 7)
            #search_vectors(self.semantic_memory.collection_name, [query_vector], top_k=3, search_params=search_params,output_fields=["text", "source"])
            #print(semantic_results)
            semantic_summaries_with_metadata = []
            for res in semantic_results[0]:
                #if res.entity.get('source') == 'experience_summary':
                if res["score"] > 0.45:
                    summary_id = res["source_id"]
                    metadata = self.experience_metadata_db.get_experience_metadata(summary_id)
                    if metadata:
                        # Append metadata to the summary text or as a separate dict
                        semantic_summaries_with_metadata.append({
                            "summary":res["text"],
                            "metadata": metadata
                        })
                    else:
                        semantic_summaries_with_metadata.append({
                            "summary": res["text"],
                            "metadata": {}
                        })
            semantic_summaries = semantic_summaries_with_metadata
            logger.info(f"Retrieved {len(semantic_summaries)} semantic summaries for task: {user_goal}")
        except Exception as e:
            logger.warning(f"Error retrieving semantic summaries from vector DB: {e}. Proceeding without semantic summaries.")
            # semantic_summaries remains an empty list

        successful_plans = []
        successful_results = []
        for case in successful_cases:
            if case.extra_fields and 'plan' in case.extra_fields and 'results' in case.extra_fields:
                successful_plans.append(case.extra_fields['plan'])
                successful_results.append(case.extra_fields['results'])

        failed_plans = []
        failed_results = []
        for case in failed_cases:
            if case.extra_fields and 'plan' in case.extra_fields and 'results' in case.extra_fields:
                failed_plans.append(case.extra_fields['plan'])
                failed_results.append(case.extra_fields['results'])

        return {
            "successful_cases": successful_cases,
            "failed_cases": failed_cases,
            "insights": insights,
            "semantic_summaries": semantic_summaries,
            "successful_plans": successful_plans,
            "successful_results": successful_results,
            "failed_plans": failed_plans,
            "failed_results": failed_results
        }

    def _get_projected_insights(self, raw_insights: list[str], role: str, task_traj: str = None) -> list[str]:
        """Projects raw insights for a specific role using GMemory."""
        if not raw_insights:
            return []
        try:
            projected_insights = self.gmemory.project_insights(
                raw_insights=raw_insights,
                role=role,
                task_traj=task_traj
            )
            logger.info(f"Projected insights for role '{role}': {projected_insights}")
            return projected_insights
        except Exception as e:
            logger.error(f"Error projecting insights for role '{role}': {e}", exc_info=True)
            return raw_insights # Fallback to raw insights on error
