import torch
import json
import requests
import hashlib
import streamlit as st
from transformers import AutoTokenizer, AutoModelForCausalLM
from transformers.generation.utils import GenerationConfig
import streamlit.components.v1 as components
from config import *
from PIL import Image


bc_logo = Image.open(bc_logo_path)
niogpt_logo = Image.open(niogpt_logo_path)
chatglm_logo = Image.open(chatglm_logo_path)

st.set_page_config(
        page_title="ChatALL",
        page_icon="🧊",
        menu_items={
            'About': 'Muti-LLM used for B4 AI team'})
st.title("ChatALL|一键Chat多模型")
st.write("如在体验过程中发现bug问题，欢迎反馈至shilin.zhuang，感谢您的使用！")
st.divider()



@st.cache_resource
def init_model(model_path):
    model = AutoModelForCausalLM.from_pretrained(
        model_path,
        torch_dtype=torch.float16,
        device_map="auto",
        trust_remote_code=True
    )
    model.generation_config = GenerationConfig.from_pretrained(
        model_path
    )
    tokenizer = AutoTokenizer.from_pretrained(
        model_path,
        use_fast=False,
        trust_remote_code=True
    )
    return model, tokenizer


def clear_chat_history():
    del st.session_state.messages
    del st.session_state.nio_messages
    del st.session_state.glm_messages


def init_chat_history(container):
    with container.chat_message("assistant", avatar=bc_logo):
        container.markdown("您好，我是BaiChuan-13B，很高兴为您服务")

    if "messages" in st.session_state:
        for message in st.session_state.messages:
            avatar = None if message["role"] == "user" else bc_logo
            with container.chat_message(message["role"], avatar=avatar):
                container.markdown(message["content"])
    else:
        st.session_state.messages = []

    return st.session_state.messages


def init_nio_chat_history(container):
    with container.chat_message("assistant", avatar=niogpt_logo):
        container.markdown("您好，我是NIO-GPT-3.5，很高兴为您服务")

    if "nio_messages" in st.session_state:
        for nio_message in st.session_state.nio_messages:
            avatar = None if nio_message["role"] == "user" else niogpt_logo
            with container.chat_message(nio_message["role"], avatar=avatar):
                container.markdown(nio_message["content"])
    else:
        st.session_state.nio_messages = []

    return st.session_state.nio_messages


def init_glm_chat_history(container):
    with container.chat_message("assistant", avatar=chatglm_logo):
        container.markdown("您好，我是Chat-GLM-6B，很高兴为您服务")

    if "glm_messages" in st.session_state:
        for glm_message in st.session_state.glm_messages:
            avatar = None if glm_message["role"] == "user" else chatglm_logo
            with container.chat_message(glm_message["role"], avatar=avatar):
                container.markdown(glm_message["content"])
    else:
        st.session_state.glm_messages = []

    return st.session_state.glm_messages


def generateSign(method, path, params, appSecret):
    signStr = method + path + "?"
    for key in sorted(params.keys()):
        if key == 'sign':
            continue
        signStr += key + "=" + str(params[key]) + "&"
    signStr = signStr[:-1]
    signStr += appSecret
    print("signStr=", signStr)
    sign = hashlib.md5(signStr.encode())
    return sign.hexdigest()


def baichuan_model_inference(container, bc_model, bc_tokenizer, prompt, messages):
    messages.append({"role": "user", "content": prompt})
    print(f"[user] {prompt}", flush=True)
    baichuan_answer = container.chat_message("assistant", avatar=bc_logo)
    with baichuan_answer:
        placeholder = container.empty()
        for response in bc_model.chat(bc_tokenizer, messages, stream=True):
            placeholder.markdown(response)
            if torch.backends.mps.is_available():
                torch.mps.empty_cache()
    messages.append({"role": "assistant", "content": response})
    print(json.dumps(messages, ensure_ascii=False), flush=True)


def re_parse_request(input_task):
    for uid in user_id_list:
        headers["X-USER-ID"] = uid
        req = requests.post(url=f'{target_url}', params=payload, json=input_task, headers=headers)
        res_json = req.json()
        if res_json["result_code"] == 'success':
            return res_json
    return False


def niogpt_model_inference(container, prompt, nio_messages):
    nio_answer = container.chat_message("assistant", avatar=niogpt_logo)
    with nio_answer:
        placeholder = container.empty()
        nio_messages.append({"role": "user", "content": prompt})
        try:
            input_task = {"message": prompt}
            tmp["jsonBody"] = json.dumps(input_task)
            payload["sign"] = generateSign("POST", path, tmp, app_secret)
            res = requests.post(url=f'{target_url}', params=payload, json=input_task, headers=headers)
            res_json = res.json()
            if res_json['result_code'] == 'fail':
                res_json = re_parse_request(input_task)
            nio_message = res_json['data']['content']
            placeholder.markdown(nio_message)
            if torch.backends.mps.is_available():
                torch.mps.empty_cache()
            nio_message = res_json['data']['content']
            nio_messages.append({"role": "assistant", "content": nio_message})
        except Exception as e:
            nio_messages.append({"role": "assistant", "content": "你查询的内容因为一些原因意外丢失了"})
    print(json.dumps(nio_messages, ensure_ascii=False), flush=True)


def main():
    options = st.sidebar.multiselect(
            'Which model do you wanna to chat?',
            ['百川-13B', 'NIO-GPT', 'ChatGLM-6B'],
            ['百川-13B', 'NIO-GPT']
            )
    st.write('You selected:', options)
    nio_container, baichuan_container, chatglm_container = [st.container() for i in range(3)]
    # Baichuan model
    bc_model, bc_tokenizer = init_model(bc_model_path)
    if '百川-13B' in options:
        messages = init_chat_history(baichuan_container)
    if 'NIO-GPT' in options:
        nio_messages = init_nio_chat_history(nio_container)
    if 'ChatGLM-6B'in options:
        glm_messages = init_glm_chat_history(chatglm_container)
    if prompt := st.chat_input("Shift + Enter 换行, Enter 发送"):
        with st.chat_message("user"):
            st.markdown(prompt)
        if '百川-13B' in options:
            baichuan_model_inference(baichuan_container, bc_model, bc_tokenizer, prompt, messages)
        if 'NIO-GPT' in options:
            niogpt_model_inference(nio_container, prompt, nio_messages)
        if 'ChatGLM-6B'in options:
            print(1)
            #chatglm_model_inference(chatglm_container, prompt, glm_messages)
        st.button("清空对话", on_click=clear_chat_history)


if __name__ == "__main__":
    main()

