#!/usr/bin/env python3

import transformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
import streamlit as st
from math import *
# add a sidebar
# with st.sidebar:
#     st.markdown("## LLaMA3 LLM")
#     "some link"
# make a titile
st.title("💬 Net LLaMA3 Chatbot")
st.caption("🚀 A streamlit chatbot built for using tools")

# path or name to the model
model = "/home/wangxianda/model_zoo/llama_3.2_3b_instruct"
# model = "/home/chenjiaxuan/Github/Meta-Llama-3-8B-Instruct"

import json
import re

# format parser of json
def parser(input):
    # pattern = r'```json\n(.*?)```'
    pattern = r'\{.*?\}'
    match = re.search(pattern, input, re.DOTALL)
    if not match:
        raise ValueError("Couldn't parse the output.")

    parsed_data = json.loads(match.group(0))
    return parsed_data

# static loading of weights
@st.cache_resource
def get_model(model):

    llama_pipeline = transformers.pipeline(
        "text-generation",  # LLM task
        model=model,
        torch_dtype=torch.float16,
        device_map="auto",
    )
    tokenizer = AutoTokenizer.from_pretrained(model, trust_remote_code=True)

    return tokenizer, llama_pipeline

# system prompt for using tools
system_message = """Assistant is a expert JSON builder designed to assist with a wide range of tasks.

Assistant is able to respond to the User and use tools using JSON strings that contain "action" and "action_input" parameters.

All of Assistant's communication is performed using this JSON format.

Assistant can also use tools by responding to the user with tool use instructions in the same "action" and "action_input" JSON format. Tools available to Assistant are:

- "Calculator": Useful for when you need to answer questions about math.
  - To use the calculator tool, Assistant should write like so:
    ```json
    {"action": "Calculator",
     "action_input": "4+4"}
    ```

- "Chat": Useful for when no tool is avaliable.
  - To use the chat tool, Assistant should write like so:
    ```json
    {"action": "Chat",
     "action_input": "It looks like you need some help, let me help you!"}
    ```

- "Ping": Useful for Checking network status, especially delay from A to B.
  - To use the chat tool, Assistant should write like so:
    ```json
    {"action": "Ping",
     "action_input": "a ping b"}
    ```

Here are some previous conversations between the Assistant, Tool and User:

User: Hey how are you today?
Assistant: ```json
{"action": "Chat",
 "action_input": "I'm good thanks, how are you?"}
```
User: I'm great, what is the square root of 4?
Assistant: ```json
{"action": "Calculator",
 "action_input": "sqrt(4)"}
```
Tool: 2.0
Assistant: ```json
{"action": "Chat",
 "action_input": "It looks like the answer is 2!"}
```
User: Thanks could you tell me what 4 to the power of 2 is?
Assistant: ```json
{"action": "Calculator",
 "action_input": "4**2"}
```
Tool: 16.0
Assistant: ```json
{"action": "Chat",
 "action_input": "It looks like the answer is 16!"}```

User: Thanks, could you tell me what's the delay from 192.168.0.1 to 192.168.0.8?
Assistant: ```json
{"action": "Ping",
 "action_input": "192.168.0.1 ping 192.168.0.8"}
```
Tool: 153ms
Assistant: ```json
{"action": "Chat",
 "action_input": "It looks like the delay is 153ms! Not good but usable."}```

Let's think step by step. Respone one json at a time, and make sure response in json code block ```json```

Here is the latest conversation between Assistant and User."""

# format prompting and add history
def prompt_template(user_input, history=None, tool_response=False, context_len=20):

    if history is None:
        history = []

    history = history[-context_len:]

    messages = []
    messages.append({
            "role": "system",
            "content": system_message,
        })

    for message in history:
        messages.append(message)

    messages.append({
            "role": "system",
            "content": system_message,
        })

    if tool_response:
        messages.append({"role": "tool", "content": user_input})
    else:
        messages.append({"role": "user", "content": user_input})

    # messages = [{
    #         "role": "system",
    #         "content": system_message,
    #     },
    #     {"role": "user", "content": {user_input}},
    # ]
    return messages

# load model and tokenizer
tokenizer, llama_pipeline = get_model(model)

def get_llama_response(prompt: str) -> None:
    """
    Generate a response from the Llama model.

    Parameters:
        prompt (str): The user's input/question for the model.

    Returns:
        model's response.
    """
    sequences = llama_pipeline(
        prompt,
        do_sample=True,
        top_k=10,
        temperature=0.7,
        num_return_sequences=1,
        eos_token_id=tokenizer.eos_token_id,
        max_length=8192,
    )
    raw = sequences[0]['generated_text']

    return raw

# the tool function
def use_tool(tool,tool_input):

    # fake net tool
    if tool == "Ping":
        return '1000ms'
    elif tool == "Calculator":
        return eval(tool_input)
    else:
        raise Exception("Unknown tool: " + tool)

def inference(input, first_call=True, history=None):
    if first_call:
        input_prompt = prompt_template(input, history=history)
    else:
        input_prompt = input
    # make the agent call
    history = get_llama_response(input_prompt)
    # parse the output if possible
    print('here: ', history)
    response = history[-1]['content']

    st.chat_message("tool").write(response)

    parsed = parser(response)
    # check if the output is our final answer or if it is a tool
    if parsed["action"] == "Chat":
        return parsed["action_input"]
    # if not try to use the tool
    tool_output = use_tool(parsed["action"], parsed["action_input"])

    # add message to the agent
    next_prompt = prompt_template(f"Response: {tool_output}", history=history, tool_response=True)
    # f"{input_prompt}\n{response}\nResponse: {tool_output}"
    print('next prompt:', next_prompt)
    # recursively call the agent with the output of the tool
    return inference(next_prompt, False)

if "messages" not in st.session_state:
    st.session_state["messages"] = []

# show everything in the session_state
for msg in st.session_state.messages:
    st.chat_message(msg["role"]).write(msg["content"])

# if user type input, do the following function
if prompt := st.chat_input():

    # store user outputs
    st.session_state.messages.append({"role": "user", "content": prompt})
    # show user prompt in web
    st.chat_message("user").write(prompt)

    # build input
    response = inference(input=prompt, history=st.session_state['messages'])
# , history=st.session_state["messages"])

    # store assistant outputs
    # st.session_state.messages.append({"role": "user", "content": prompt})
    st.session_state.messages.append({"role": "assistant", "content": response})
    # show assistant output in web
    st.chat_message("assistant").write(response)
    print(st.session_state)
