import streamlit as st
import torch
from LeNet import LeNet
from PIL import Image
from torchvision import transforms as T

# ——标签映射—————————————————————————————————————————————————————————————————————————————————————————————————————————
mnist10_labels = {
    0: "数字0",
    1: "数字1",
    2: "数字2",
    3: "数字3",
    4: "数字4",
    5: "数字5",
    6: "数字6",
    7: "数字7",
    8: "数字8",
    9: "数字9",
}

# ——模型创建、加载、开启推理模式——————————————————————————————————————————————————————————————
model = LeNet()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model.to(device)
try:
    state_dict = torch.load("./LeNet.pth", map_location=device)
    model.load_state_dict(state_dict)
    # print("模型加载成功")
    st.sidebar.success("模型加载成功")
except:
    # print("模型加载失败，无法进行评估测试")
    st.sidebar.error("模型加载失败，无法进行评估测试")
    exit()
model.eval()


# ——模型预测————————————————————————————————————————————————————————————————————————————————————————————————————
def predict(img_path):
    img = Image.open(img_path)
    transformer = T.Compose([
        T.Resize((32, 32), antialias=True),
        T.Grayscale(),
        T.ToTensor()
    ])
    img = transformer(img)
    with torch.no_grad():
        y_pre = model(img.unsqueeze(0).to(device))
        return y_pre.argmax(dim=-1).item()


# ———前端UI界面———————————————————————————————————————————————————————————————————————
def main():
    # 配置侧边栏
    with st.sidebar:
        st.title("使用说明")
        choice_model = st.radio('请选择模型', ['LeNet-5', 'AlexNet', 'GoogLeNet'])
        if st.button("清空历史信息"):
            st.session_state.history = []

    # 主体设置
    st.title("基于CNN的图像识别系统")
    st.write("请上传一张图片进行识别，支持多轮交互")

    # 配置会话状态管理器，存储历史聊天信息
    if 'history' not in st.session_state:
        st.session_state.history = []

    # 在开启新一轮聊天之前，先显示历史聊天
    for msg in st.session_state.history:
        with st.chat_message(msg["role"]):
            if msg["type"] == "text":
                st.markdown(msg["content"])
            elif msg["type"] == "image":
                st.image(msg["content"], width=100)

    # 开启新一轮对话聊天
    # := 海象运算符  赋值+判断
    if prompt := st.chat_input("说一些内容/上传图片/文件等", accept_file='multiple', file_type=['jpg', 'png']):
        # 对用户输入进行处理
        with st.chat_message("user"):
            if prompt and prompt.text:
                st.markdown(prompt.text)
                st.session_state.history.append({"role": "user", "content": prompt.text, "type": "text"})
            if prompt and prompt["files"]:
                st.image(prompt["files"][0], width=100)
                st.session_state.history.append({"role": "user", "content": prompt["files"][0], "type": "image"})

        # 调用模型进行预测
        try:
            pre_class_idx = predict(prompt["files"][0])
            final_label = mnist10_labels[pre_class_idx]
            with st.chat_message("assistant"):
                st.markdown(final_label)
                st.session_state.history.append({"role": "assistant", "content": final_label, "type": "text"})
        except Exception as e:
            with st.chat_message("assistant"):
                st.markdown(f"发生错误：{str(e)}")
            st.session_state.history.append({"role": "assistant", "content": f"发生错误：{str(e)}", "type": "text"})


# ——启动程序——————————————————————————————————————————————————————————————————————————————————————————
if __name__ == "__main__":
    main()
