from core.prompt_templates import PROMPT_TEMPLATES, MEMORY_PROMPT
from openai import OpenAI
import os
import json
from datetime import datetime
from dotenv import load_dotenv
import re

load_dotenv()
api_key = os.getenv("OPENAI_API_KEY")
client = OpenAI(api_key=api_key)

# Create a directory for session memories if it doesn't exist
MEMORY_DIR = "session_memories"
os.makedirs(MEMORY_DIR, exist_ok=True)

class PromptSession:
    def __init__(self, task="general_assistant", session_id=None):
        instruction = PROMPT_TEMPLATES.get(task, PROMPT_TEMPLATES["general_assistant"])
        self.task = task
        self.session_id = session_id or f"session_{datetime.now().strftime('%Y%m%d%H%M%S')}"
        self.messages = [{"role": "system", "content": instruction}]
        self.memory = self._load_memory()
        
        # If we have memory, add it to the system message
        if self.memory:
            memory_content = self._format_memory_for_prompt()
            self.messages[0]["content"] += f"\n\n{memory_content}"
    
    def add_user_input(self, user_input: str):
        # Check if this is a memory command
        memory_command = self._check_memory_command(user_input)
        if memory_command:
            return memory_command
        
        # Regular message
        self.messages.append({"role": "user", "content": user_input})
        return None
    
    def get_response(self, model: str):
        # If there are too many messages, summarize to maintain context but reduce token usage
        if len(self.messages) > 15:  # Arbitrary threshold, adjust based on your needs
            self._summarize_conversation(model)
        
        response = client.chat.completions.create(
            model=model,
            messages=self.messages
        )
        reply = response.choices[0].message.content
        self.messages.append({"role": "assistant", "content": reply})
        
        # Update memory after each response to capture important information
        self._update_memory(model)
        
        # Save memory to disk
        self._save_memory()
        
        return reply
    
    def _check_memory_command(self, text):
        """Check if the input is a memory-related command and handle it."""
        # Commands for memory management
        if text.lower().startswith("!记住 "):
            fact = text[4:].strip()
            self._add_to_memory(fact)
            return f"我已记住：{fact}"
        
        elif text.lower() == "!显示记忆":
            if not self.memory:
                return "我目前没有存储任何记忆。"
            memory_text = "以下是我记住的内容：\n\n"
            for i, fact in enumerate(self.memory, 1):
                memory_text += f"{i}. {fact}\n"
            return memory_text
        
        elif text.lower().startswith("!忘记 "):
            try:
                index = int(text[4:].strip()) - 1
                if 0 <= index < len(self.memory):
                    forgotten = self.memory.pop(index)
                    self._save_memory()
                    return f"我已忘记：{forgotten}"
                else:
                    return "索引超出范围，无法删除该记忆。"
            except ValueError:
                return "请提供有效的记忆索引编号。"
        
        elif text.lower() == "!清空记忆":
            self.memory = []
            self._save_memory()
            return "已清空所有记忆。"
        
        return None  # Not a memory command
    
    def _summarize_conversation(self, model):
        """Summarize the conversation to reduce token count while preserving context."""
        # Keep system message and the most recent 6 messages
        recent_messages = self.messages[-6:]
        
        # Take all but system and recent messages for summarization
        to_summarize = self.messages[1:-6]
        if not to_summarize:
            return  # Not enough messages to summarize
        
        # Create a prompt for summarization
        summarize_prompt = "请将以下对话内容总结为简洁的要点，保留关键信息："
        for msg in to_summarize:
            role = "用户" if msg["role"] == "user" else "助手"
            summarize_prompt += f"\n{role}: {msg['content']}"
        
        try:
            # Get summary from the model
            summary_response = client.chat.completions.create(
                model=model,
                messages=[{"role": "user", "content": summarize_prompt}]
            )
            summary = summary_response.choices[0].message.content
            
            # Update the messages list with the summary and recent messages
            self.messages = [
                self.messages[0],  # Keep system message
                {"role": "system", "content": f"前面对话的摘要：\n{summary}"},
                *recent_messages
            ]
        except Exception as e:
            print(f"Error during summarization: {e}")
            # If summarization fails, just keep recent messages
            self.messages = [self.messages[0], *recent_messages]
    
    def _add_to_memory(self, fact):
        """Add a fact to the memory."""
        self.memory.append(fact)
    
    def _update_memory(self, model):
        """Update memory based on the current conversation."""
        # Only update memory if there are at least 3 message exchanges
        if len(self.messages) < 7:  # system + 3 exchanges (user/assistant pairs)
            return
        
        # Get the most recent messages (last 3 exchanges)
        recent_messages = self.messages[-6:]
        
        # Create a memory extraction prompt
        memory_prompt = MEMORY_PROMPT
        for msg in recent_messages:
            memory_prompt += f"\n{msg['role']}: {msg['content']}"
        
        try:
            # Ask the model to extract important information
            memory_response = client.chat.completions.create(
                model=model,
                messages=[{"role": "user", "content": memory_prompt}]
            )
            memory_extraction = memory_response.choices[0].message.content
            
            # Parse the response - assuming the model returns a comma-separated list
            # of facts or "NONE" if nothing important to remember
            if "NONE" not in memory_extraction.upper():
                # Split by numbered items, bullet points or commas
                facts = re.split(r'\d+\.\s|\-\s|,\s*|\n\s*', memory_extraction)
                facts = [f.strip() for f in facts if f.strip()]
                
                # Add new facts to memory
                for fact in facts:
                    if fact and fact not in self.memory:
                        self.memory.append(fact)
        except Exception as e:
            print(f"Error during memory extraction: {e}")
    
    def _format_memory_for_prompt(self):
        """Format memory items for inclusion in the system prompt."""
        if not self.memory:
            return ""
        
        memory_text = "当前会话记忆（重要信息）：\n"
        for i, fact in enumerate(self.memory, 1):
            memory_text += f"{i}. {fact}\n"
        return memory_text
    
    def _save_memory(self):
        """Save memory to a JSON file."""
        if not self.session_id:
            return
        
        memory_file = os.path.join(MEMORY_DIR, f"{self.session_id}.json")
        memory_data = {
            "task": self.task,
            "memory": self.memory,
            "updated_at": datetime.now().isoformat()
        }
        
        try:
            with open(memory_file, 'w', encoding='utf-8') as f:
                json.dump(memory_data, f, ensure_ascii=False, indent=2)
        except Exception as e:
            print(f"Error saving memory: {e}")
    
    def _load_memory(self):
        """Load memory from a JSON file if it exists."""
        if not self.session_id:
            return []
        
        memory_file = os.path.join(MEMORY_DIR, f"{self.session_id}.json")
        if not os.path.exists(memory_file):
            return []
        
        try:
            with open(memory_file, 'r', encoding='utf-8') as f:
                memory_data = json.load(f)
                return memory_data.get("memory", [])
        except Exception as e:
            print(f"Error loading memory: {e}")
            return []