import streamlit as st
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers import GenerationConfig
import torch
import time



model_id = "../models/Qwen-1_8B-Chat"

# 检查是否有可用的GPU
device = "cuda"

# 加载 tokenizer
tokenizer = AutoTokenizer.from_pretrained(model_id,  revision='master', device_map=device, trust_remote_code=True)

model = AutoModelForCausalLM.from_pretrained(
        model_id,
        device_map="cuda",  # 自动分配到可用的GPU上
        trust_remote_code=True
    ).eval()

# 设置页面标题
st.title("chat2money智金问答")
st.text("beta 1.0 2024/12/25 09:00")

# 创建一个输入框用于用户输入
user_input = st.text_input("投资不决问智金：")



# 创建一个按钮用于发送消息
if st.button("发送"):
    prompt = user_input
    messages = [
    {"role": "system", "content": "你是一名资深的投资分析师."},
    {"role": "user", "content": prompt}]

    response, history = model.chat(tokenizer, prompt, history=None)

    # 更新对话历史
    st.write(f"AI: {response}")
    

