#!/usr/bin/env python3

import transformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
import streamlit as st
from math import *
import json

from outlines import models
from outlines import generate
import outlines
# add a sidebar
# with st.sidebar:
#     st.markdown("## LLaMA3 LLM")
#     "some link"
# make a titile
st.title("💬 LLM4Net Chatbot")
st.caption("🚀 A streamlit chatbot built for using tools")

# path or name to the model
# model_path = "/home/wangxianda/model_zoo/llama_3.2_3b_instruct"
model_path = "/home/chenjiaxuan/Github/Meta-Llama-3-8B-Instruct"

# # format parser of json
# def parser(input):
#     # pattern = r'```json\n(.*?)```'
#     pattern = r'\{.*?\}'
#     match = re.search(pattern, input, re.DOTALL)
#     if not match:
#         raise ValueError("Couldn't parse the output.")

#     response_data = json.loads(match.group(0))
#     return response_data

# static loading of weights
@st.cache_resource
def get_model(model_path):

    # model = models.transformers(
    #     "gpt2",               # Model name
    #     max_length=100,       # Generate up to 100 tokens
    #     temperature=0.7,      # Moderate randomness
    #     top_p=0.9,            # Nucleus sampling
    #     repetition_penalty=1.2,  # Penalize repetition
    #     stop=["\n"],          # Stop on newline character
    #     device="cuda"         # Use GPU
    # )
    model = models.transformers(model_path,
                                device="auto",
                                model_kwargs={"torch_dtype": torch.float16}
                                )

    schema = """
    {
    "title": "Tool",
    "type": "object",
    "properties": {
        "tool": {
        "type": "string",
        "enum": ["Chat", "Calculator", "Ping"]
        },
        "tool_input": {
        "type": "string"
        }
    },
    "required": ["tool", "tool_input"]
    }
    """

    generator = generate.json(model, schema)

    # tokenizer = AutoTokenizer.from_pretrained(model, trust_remote_code=True)

    return generator

@outlines.prompt
def prompt_template(instructions, examples, prompt, history=[], tool="null"):
    """
    <|begin_of_text|><|start_header_id|>system<|end_header_id|>
    {{ instructions }}

    Examples
    --------

    User: Hey how are you today?
    Assistant: ```json
    {"action": "Chat",
    "action_input": "I'm good thanks, how are you?"}
    ```
    User: I'm great, what is the square root of 4?
    Assistant: ```json
    {"action": "Calculator",
    "action_input": "sqrt(4)"}
    ```
    Tool: 2.0
    Assistant: ```json
    {"action": "Chat",
    "action_input": "It looks like the answer is 2!"}
    ```
    User: Thanks could you tell me what 4 to the power of 2 is?
    Assistant: ```json
    {"action": "Calculator",
    "action_input": "4**2"}
    ```
    Tool: 16.0
    Assistant: ```json
    {"action": "Chat",
    "action_input": "It looks like the answer is 16!"}```

    User: Thanks, could you tell me what's the delay from 192.168.0.1 to 192.168.0.8?
    Assistant: ```json
    {"action": "Ping",
    "action_input": "a ping b"}
    ```
    Tool: 153ms
    Assistant: ```json
    {"action": "Chat",
    "action_input": "It looks like the delay is 153ms! Not good but usable."}```

    Latest Conversation
    --------
    <|eot_id|>

    {{ history }}
    <|start_header_id|>tool<|end_header_id|>:
    {{ tool }}
    <|eot_id|><|start_header_id|>assistant<|end_header_id|>:
    """

    # <|start_header_id|>user<|end_header_id|>:
    # {{ prompt }}
    # {% for example in examples %}
    # User: {{ example.user }}
    # Tool: {{ example.tool }}
    # Assistant: {{ example.assistant }}

    # {% endfor %}

instructions = """
    You are an assistant that has access to the following set of tools.
    Here are the names and descriptions for each tool:

- "Calculator": Useful for when you need to answer questions about math.
  - To use the calculator tool, Assistant should write like so:
    ```json
    {"tool": "Calculator",
     "tool_input": "4+4"}
    ```

- "Chat": Useful for when Answer is complete.
  - To use the chat tool, Assistant should write like so:
    ```json
    {"tool": "Chat",
     "tool_input": "It looks like you need some help, let me help you!"}
    ```

- "Ping": Check network status, especially delay from A to B.
  - To use the chat tool, Assistant should write like so:
    ```json
    {"tool": "Ping",
     "tool_input": "h1 ping h2"}
    ```

    Given the user input, return the name and input of the tool to use.
    Return your response as a JSON blob with 'tool' and 'tool_input' keys.
    You can divide the task into multiple subtasks and thought and solve them
    step by step.
"""


# instructions = """Assistant is a expert JSON builder designed to assist with a wide range of tasks.

# Assistant is able to respond to the User and use Tools using JSON strings that contain "tool" and "tool_input" parameters.

# All of Assistant's communication is performed using this JSON format.

# Assistant can also use tools by responding to the user with tool use instructions in the same "tool" and "tool_input" JSON format. Tools available to Assistant are:

# - "Calculator": Useful for when you need to answer questions about math.
#   - To use the calculator tool, Assistant should write like so:
#     ```json
#     {"tool": "Calculator",
#      "tool_input": "4+4"}
#     ```

# - "Chat": Useful for when no tool is avaliable.
#   - To use the chat tool, Assistant should write like so:
#     ```json
#     {"tool": "Chat",
#      "tool_input": "It looks like you need some help, let me help you!"}
#     ```

# - "Ping": Useful for Checking network status, especially delay from A to B.
#   - To use the chat tool, Assistant should write like so:
#     ```json
#     {"tool": "Ping",
#      "tool_input": "h1 ping h2"}
#     ```
# """

examples = [
    {"user": "Hey how are you today?", "tool": "null","assistant": {"tool": "Chat", "tool_input": "I'm good thanks, how are you?"}},
    {"user": "I'm great, what is the square root of 4?", "tool": "null", "assistant":{"tool": "Calculator", "tool_input": "sqrt(4)"}, "tool": "2.0", "assistant": {"tool": "Chat", "tool_input": "It looks like the answer is 2!"}},
    {"user": "Hey how is the internet status between s1 to h3 ?", "assistant": {"tool": "Ping", "tool_input": "s1 ping h3"}, "tool": "53ms", "assistant": {"tool": "Chat", "tool_input": "It looks like the delay is 53ms, good for wireless net!"}},
]

# load model
generator = get_model(model_path)

def convert2template(conversation):
    template = ""
    for turn in conversation:
        if turn['role'] == 'user':
            template += f"<|start_header_id|>user<|end_header_id|>\n{turn['content']}<|eot_id|>\n"
        elif turn['role'] == 'assistant':
            template += f"<|start_header_id|>assistant<|end_header_id|>\n{turn['content']}<|eot_id|>\n"
        elif turn['role'] == 'tool':
            template += f"<|start_header_id|>tool<|end_header_id|>\n{turn['content']}<|eot_id|>\n"
    return template

# the tool function
def use_tool(tool,tool_input):
    import random

    # fake net tool
    if tool == "Ping":
        return f'{random.randint(1,1000)}ms'
    elif tool == "Calculator":
        return eval(tool_input)
    else:
        raise Exception("Unknown tool: " + tool)

def tool_chain(instructions, examples, prompt, first_call=True, context_len=20):

    if first_call:
        input_prompt = prompt_template(instructions, examples, prompt, history=convert2template(st.session_state['messages'][-context_len:]))
    else:
        input_prompt = prompt

    response = generator(input_prompt, max_tokens=200)

    st.session_state.messages.append({"role": "assistant", "content": response})
    st.chat_message("assistant").write(response)

    # response = json.loads(response)
    # check if the output is our final answer or if it is a tool
    if response["tool"] == "Chat":
        return response["tool_input"]
    # if not try to use the tool
    tool_output = use_tool(response["tool"], response["tool_input"])

    st.chat_message("Tool").write(tool_output)

    # add message to the agent
    next_prompt = prompt_template(instructions, examples, prompt, history=convert2template(st.session_state['messages'][-context_len:]), tool=tool_output)
    st.session_state.messages.append({"role": "tool", "content": tool_output})
    # f"{input_prompt}\n{response}\nResponse: {tool_output}"
    print(f'\n next prompt: {next_prompt} \n')
    # recursively call the agent with the output of the tool
    return tool_chain(instructions, examples, next_prompt, first_call=False)


if "messages" not in st.session_state:
    st.session_state["messages"] = []

# show everything in the session_state
for msg in st.session_state.messages:
    st.chat_message(msg["role"]).write(msg["content"])

# if user type input, do the following function
if prompt := st.chat_input():

    # show user prompt in web
    st.chat_message("user").write(prompt)
    # store user outputs
    st.session_state.messages.append({"role": "user", "content": prompt})

    # build input
    # def prompt_template(instructions, examples, prompt, history=[]):
    context_len = args.context_len
    prompt_in_template = prompt_template(instructions, examples, prompt, history=convert2template(st.session_state['messages'][-context_len:]))


    print(f"\n\nPrompt_in_template: {prompt_in_template}\n\n")

    response = tool_chain(instructions, examples, prompt, first_call=True, context_len=context_len)
    # , history=st.session_state["messages"])

    # store assistant outputs
    # st.session_state.messages.append({"role": "user", "content": prompt})
    # st.session_state.messages.append({"role": "assistant", "content": response})
    # show assistant output in web
    st.chat_message("assistant").write(response)
    print(st.session_state)
