|
import torch |
|
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline |
|
import streamlit as st |
|
|
|
torch.random.manual_seed(0) |
|
|
|
model = AutoModelForCausalLM.from_pretrained( |
|
"microsoft/Phi-3-mini-4k-instruct", |
|
|
|
device_map="cpu", |
|
torch_dtype="auto", |
|
trust_remote_code=True, |
|
) |
|
tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-4k-instruct") |
|
|
|
pipe = pipeline( |
|
"text-generation", |
|
model=model, |
|
tokenizer=tokenizer, |
|
) |
|
|
|
generation_args = { |
|
"max_new_tokens": 500, |
|
"return_full_text": False, |
|
"temperature": 0.0, |
|
"do_sample": False, |
|
} |
|
|
|
|
|
|
|
|
|
st.title("π¬ Chatbot") |
|
st.caption("π A streamlit chatbot powered by Microsoft Phi-3-mini") |
|
|
|
|
|
if 'messages' not in st.session_state: |
|
st.session_state['messages'] = [] |
|
|
|
|
|
for messasge in st.session_state.messages: |
|
st.chat_message(messasge["role"]).write(messasge["content"]) |
|
|
|
|
|
if prompt := st.chat_input(): |
|
|
|
|
|
st.chat_message("user").write(prompt) |
|
|
|
st.session_state.messages.append({"role": "user", "content": prompt}) |
|
|
|
messages=st.session_state.messages |
|
|
|
|
|
output = pipe(messages, **generation_args) |
|
|
|
msg = output[0]['generated_text'] |
|
|
|
|
|
st.chat_message("assistant").write(msg) |
|
|
|
|
|
st.session_state.messages.append({"role": "assistant", "content": msg}) |