from typing import Any, Dict, List, Callable, Optional, Union
from gradio import ChatMessage
from langchain_core.messages import BaseMessage, AIMessage, ToolMessage
from langchain_core.runnables import RunnableConfig
from langgraph.graph.state import CompiledStateGraph
from langchain_openai import ChatOpenAI
from langchain_ollama import ChatOllama
from langchain.chat_models.base import init_chat_model
import uuid
import plotly
import os 
import json
from copy import deepcopy
import re
from jinja2 import Template
from langchain_community.chat_message_histories import StreamlitChatMessageHistory
from contextlib import asynccontextmanager


def random_uuid():
    return str(uuid.uuid4())

SYSTEM_PROMPT = """<ROLE>
You are a smart agent with an ability to use tools. 
You will be given a question and you will use the tools to answer the question.
Pick the most relevant tool to answer the question. 
If you are failed to answer the question, try different tools to get context.
Your answer should be very polite and professional.
</ROLE>

----

<INSTRUCTIONS>
Step 1: Analyze the question
- Analyze user's question and final goal.
- If the user's question is consist of multiple sub-questions, split them into smaller sub-questions.

Step 2: Pick the most relevant tool
- Pick the most relevant tool to answer the question.
- If you are failed to answer the question, try different tools to get context.

Step 3: Answer the question
- Answer the question in the same language as the question.
- Your answer should be very polite and professional.

Step 4: Provide the source of the answer(if applicable)
- If you've used the tool, provide the source of the answer.
- Valid sources are either a website(URL) or a document(PDF, etc).

Guidelines:
- If you've used the tool, your answer should be based on the tool's output(tool's output is more important than your own knowledge).
- If you've used the tool, and the source is valid URL, provide the source(URL) of the answer.
- Skip providing the source if the source is not URL.
- Answer in the same language as the question.
- Answer should be concise and to the point.
- Avoid response your output with any other information than the answer and the source. 
</INSTRUCTIONS>

----

<OUTPUT_FORMAT>
(concise answer to the question)

**Source**(if applicable)
- (source1: valid URL)
- (source2: valid URL)
- ...
</OUTPUT_FORMAT>
"""






class HistoryMessage:
    def __init__(self):
        self.messages = []
    def add_user_message(self, content: str):
        self.messages.append({"role": "user", "content": content})
    def add_ai_message(self, content: list[dict]):
        self.messages.append({"role": "assistant", "content": content})

    def clear(self):
        self.messages.clear()

    def _render_ai_message(self, messages: list):
        for message in messages:
            place_holder = st.container()
            render_ai_message(message, place_holder, decode_thinking_tag)
    def render(self):
        for m in self.messages:
            if m["role"] == "user":
                st.chat_message("user", avatar="🧑‍💻").write(m["content"])
            elif m["role"] == "assistant":
                with st.chat_message("assistant", avatar="🤖"):
                    self._render_ai_message(m["content"])
            else:
                raise ValueError("not implemented")

# config.json file path setting
CONFIG_FILE_PATH = "config_mcp.json"
CONFIG_MODEL_PATH = "config_model.json"

def load_model_from_json():
    """load all model config
    """
    default_config = {
        "ollama": {
            "model": "",
            "provider": "ollama"
        }
    }
    try:
        if os.path.exists(CONFIG_MODEL_PATH):
            with open(CONFIG_MODEL_PATH, "r", encoding="utf-8") as f:
                return json.load(f)
        else:
            # Create file with default settings if it doesn't exist
            save_config_to_json(default_config)
            return default_config
    except Exception as e:
        st.error(f"Error loading settings file: {str(e)}")
        return default_config

# Function to load settings from JSON file
def load_mcp_config_from_json():
    """
    Loads settings from config.json file.
    Creates a file with default settings if it doesn't exist.

    Returns:
        dict: Loaded settings
    """
    default_config = {
        "get_current_time": {
            "command": "python",
            "args": ["./mcp_server_time.py"],
            "transport": "stdio"
        }
    }
    
    try:
        if os.path.exists(CONFIG_FILE_PATH):
            with open(CONFIG_FILE_PATH, "r", encoding="utf-8") as f:
                return json.load(f)
        else:
            # Create file with default settings if it doesn't exist
            save_config_to_json(default_config)
            return default_config
    except Exception as e:
        st.error(f"Error loading settings file: {str(e)}")
        return default_config

# Function to save settings to JSON file
def save_config_to_json(config):
    """
    Saves settings to config.json file.

    Args:
        config (dict): Settings to save
    
    Returns:
        bool: Save success status
    """
    try:
        with open(CONFIG_FILE_PATH, "w", encoding="utf-8") as f:
            json.dump(config, f, indent=2, ensure_ascii=False)
        return True
    except Exception as e:
        st.error(f"Error saving settings file: {str(e)}")
        return False

def load_llm_model(model_config):
    provider = model_config["provider"]
    other_config = {}
    for k, v in model_config.items():
        if k not in ["provider"]:
            other_config[k] = v
    if provider == "ollama":
        _LLM_MODEL = ChatOllama(**other_config)
    elif provider == "openai":
        _LLM_MODEL = ChatOpenAI(**other_config)
    else:
        _LLM_MODEL = init_chat_model(model=model_config["model"], model_provider=model_config["provider"])
    return _LLM_MODEL

def retain_only_last_messages(messages):
    result = []
    for m in messages:
        t = deepcopy(m)
        if isinstance(t, ToolMessage):
            t.content = "success"
        else:
            print(t)
        result.append(t)
    return result

def decode_content_with_tag(content, tag):
    pattern = r"(<think>(.*?)</think>)|([^<]+|<(?!/?think>))"
    pattern = pattern.replace("think", tag)
    for match in re.finditer(pattern, content, re.DOTALL):
        if match.group(1):  # 捕获组1：标签内
            yield {"content": match.group(2).strip(), "tag": tag}
        elif match.group(3):  # 捕获组3：标签外
            yield {"content": match.group(3).strip(), "tag": "normal"}

def decode_thinking_tag(content, place_holder=None):
    tags = ["think", "tool_start", "tool_end"]
    content = [{"content": content, "tag": "normal"}]
    res = []
    for tag in tags:
        for cont in content:
            if cont["tag"] == "normal":
                res += list(decode_content_with_tag(cont["content"], tag))
            else:
                res.append(cont)
        content = res
        res = []

    if place_holder is None:
        place_holder = st.container()

    with place_holder:
        for cont in content:
            if cont['tag'] == "normal":
                st.markdown(cont["content"])
            elif cont['tag'] == "think":
                st.expander("Think", expanded=False, icon="🤔").markdown(cont["content"])
            elif cont['tag'] == "tool_start":
                st.expander("Tool Call", expanded=False).markdown(cont["content"])
            elif cont['tag'] == "tool_end":
                st.expander("Tool Result", expanded=False).markdown(cont["content"])
            else:  # cont['tag'] == "code":
                st.code(cont["content"])

def trimmed_message(state):
    trimmed_messages = state["messages"]
    if len(trimmed_messages) <= 1:
        return {}
    
    bFindFirst = False
    for m in reversed(trimmed_messages):
        if isinstance(m, ToolMessage):
            if bFindFirst:
                m.content = "success"
            bFindFirst = True
    # You can return updated messages either under `llm_input_messages` or 
    # `messages` key (see the note below)
    return {"llm_input_messages": trimmed_messages}