#!/usr/bin/env python3
import torch
from outlines import models
from outlines import generate
import streamlit as st
from graphviz import Digraph

from transformers import AutoTokenizer

from src.agent_parser import AgentParser
from src.prompt import convert2template, prompt_template
from src.tool_functions import tool_registry
from src.schema import generate_schema
import time
import json

tool_avatar = "🛠️"
# Automatically generate schema with registered tools (in src/tool_functions.py)
schema = generate_schema(tool_registry)

def reset_conversation():
  st.session_state.messages = []
  st.session_state.history = []


@st.cache_resource
def get_model(_args):
    """define for caching weights"""
    model = models.transformers(_args.model,
                                device=_args.device,
                                model_kwargs={"torch_dtype": getattr(torch, _args.dtype)}
                                )
    generator = generate.json(model, schema)
    tokenizer = AutoTokenizer.from_pretrained(_args.model)

    return generator, tokenizer

def use_tool(tool,tool_input):
    """
    Use a tool by its name with the provided arguments.
    """
    try:
      return tool_registry.tools[tool](**tool_input)

    except Exception as e:
      # raise
      error_message = f"Error using tool '{tool}': {str(e)}"
      st.error(error_message, icon="🚨")
      st.session_state.history.append({"role": "tool", "content": error_message})
      st.session_state.messages.append({"role": "error", "content": error_message})

def tool_chain(generator, prompt_with_history=None, context_len=20, max_tokens=200):

    print(st.session_state)
    input_prompt = prompt_template(prompt_with_history=prompt_with_history())

    print(f'\n\n input prompt: {input_prompt} \n\n')
    response = generator(input_prompt, max_tokens=max_tokens)
    print(f'\n\nresponse: {response}\n\n')

    st.session_state.history.append({"role": "assistant", "content": json.dumps(response, indent=4).replace("'", '"')})
    st.session_state.messages.append({"role": "assistant", "content": response})
    with st.expander("Raw Output", expanded=False):
        st.write(response)

    with st.chat_message('assistant'):
        st.markdown(f"💬 {response['response']}")
        st.markdown(f"> <p style='color:gray;'>🤔 {response['think_step_by_step']}</p>", unsafe_allow_html=True)
        if not response["tool_call"] == []:
            # st.write(f"🛠 **️Calling Tool:** {response['tool_call'][0]['tool']}, **Input:** {response['tool_call'][0]['tool_input']}")
            st.info(f"**️Calling Tool:** {response['tool_call'][0]['tool']}, **Input:** {response['tool_call'][0]['tool_input']}", icon="🔦")


        # check if the output is our final answer or if exist tool calls
        if response["tool_call"] == []:
            return response
        # if not try to use the tool
        else:
            for tool_call in response['tool_call']:

                with st.spinner("Calling Tool..."):
                    time.sleep(0.5)
                    tool_output = use_tool(tool_call["tool"], tool_call["tool_input"])

                if tool_output:
                  st.session_state.messages.append({"role": "tool", "content": tool_output})
                  st.session_state.history.append({"role": "tool", "content": tool_output})
                  st.success(f'**Result**:\n{tool_output}', icon="🛠")

    return tool_chain(generator, prompt_with_history=prompt_with_history, context_len=context_len, max_tokens=max_tokens)

def main():
    # get and plot arguments
    parser = AgentParser()
    args = parser.parse()
    parser.spec()

    st.title("🌏 LLM4Net Agent")
    st.caption("🚀 A network agent with tools")

    with st.sidebar:
        st.button('Reset Chat', on_click=reset_conversation, use_container_width=True)
        col1, col2 = st.columns([2, 1])
        with col1:
            ip = st.text_input("🌏 SDN Controller Address", "192.168.4.35")
        with col2:
            port = st.number_input("Port", value=8080)
        st.session_state["address"] = f"{ip}:{port}"
        timeout = st.number_input("Timeout bond (s)", value=5)
        st.session_state["timeout"] = timeout

        context_len = st.slider("Context", value=20, min_value=2, max_value=50)
        max_tokens = st.slider("Max Tokens", value=1000, min_value=200, max_value=2000, step=50)

    # load model -> llm
    generator, tokenizer = get_model(args)


    if "messages" not in st.session_state:
        st.session_state["messages"] = []
    if "history" not in st.session_state:
        st.session_state["history"] = []
    # prompt_with_history = lambda: convert2template(st.session_state['history'][-context_len:])
    prompt_with_history = lambda: tokenizer.apply_chat_template(st.session_state['history'][-context_len:], tokenize=False)

    # show everything in the session_state
    for msg in st.session_state.messages:
        if msg["role"] == "user":
            st.chat_message(msg["role"]).write(msg["content"])

        elif msg["role"] == "assistant":
            response = msg['content']
            # Display the response in a chat message format
            with st.expander("Raw Output", expanded=False):
                st.write(response)

            agent_message = st.chat_message("assistant")
            with agent_message:
                st.write(f"💬 {response['response']}")
                st.markdown(f"> <p style='color:gray;'>🤔 {response['think_step_by_step']}</p>", unsafe_allow_html=True)
                if not msg['content']["tool_call"] == []:
                    # st.markdown(f"🛠 **️Calling Tool:** {msg['content']['tool_call'][0]['tool']}, **Input:** {msg['content']['tool_call'][0]['tool_input']}")
                    st.info(f"**️Calling Tool:** {response['tool_call'][0]['tool']}, **Input:** {response['tool_call'][0]['tool_input']}", icon="🔦")

        elif msg["role"] == "graphviz":
            dot = msg["content"]
            with agent_message:
                st.graphviz_chart(dot.source, use_container_width=False)

        elif msg["role"] == "fig":
            fig = msg["content"]
            with agent_message:
                st.pyplot(fig)

        elif msg["role"] == "table":
            df = msg["content"]
            with agent_message:
                st.table(df)

        elif msg["role"] == "tool":
            tool_output = msg["content"]
            with agent_message:
                st.success(f'**Result**:\n{tool_output}', icon="🛠")

        elif msg["role"] == "error":
            error_msg = msg["content"]
            with agent_message:
              st.error(error_msg, icon="🚨")
    # if user type input, do the following function
    if prompt := st.chat_input():

        # show user prompt in web
        st.chat_message("user").write(prompt)
        # store user outputs
        st.session_state.history.append({"role": "user", "content": prompt})
        st.session_state.messages.append({"role": "user", "content": prompt})

        with st.spinner("..."):
            response = tool_chain(generator, prompt_with_history, context_len=context_len, max_tokens=max_tokens)

        print(st.session_state)

if __name__=="__main__":
    main()
