chattest / app.py
ProPerNounpYK's picture
Update app.py
3a93ac4 verified
raw
history blame
7 kB
import numpy as np
import streamlit as st
from transformers import Autookenizer, AutoModelorCausalLM
import os
import sys
from dotenv import loaddotenv, dotenvvalues
loaddotenv()
# Create supported models
modellinks = {
"Meta-Llama-3-8B-Instruct": "meta-llama/Meta-Llama-3-8B-Instruct",
}
#andom dog images for error message
randomdog = ["0f476473-2d8b-415e-b944-483768418a95.jpg",
"1bd75c81-f1d7-4e55-9310-a27595fa8762.jpg",
"526590d2-8817-4ff0-8c62-fdcba5306d02.jpg",
"1326984c-39b0-492c-a773-f120d747a7e2.jpg",
"42a98d03-5ed7-4b3b-af89-7c4876cb14c3.jpg",
"8b3317ed-2083-42ac-a575-7ae45f9fdc0d.jpg",
"ee17f54a-83ac-44a3-8a35-e89ff7153fb4.jpg",
"027eef85-ccc1-4a66-8967-5d74f34c8bb4.jpg",
"08f5398d-7f89-47da-a5cd-1ed74967dc1f.jpg",
"0fd781ff-ec46-4bdc-a4e8-24f18bf07def.jpg",
"0fb4aeee-f949-4c7b-a6d8-05bf0736bdd1.jpg",
"6edac66e-c0de-4e69-a9d6-b2e6f6f9001b.jpg",
"bfb9e165-c643-4993-9b3a-7e73571672a6.jpg"]
def resetconversation():
'''
Resets Conversation
'''
st.sessionstate.conversation = []
st.sessionstate.messages = []
return None
# Define the available models
models =[key for key in modellinks.keys()]
# Create the sidebar with the dropdown for model selection
selectedmodel = st.sidebar.selectbox("Select Model", models)
# Create a temperature slider
tempvalues = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
#Add reset button to clear conversation
st.sidebar.button('eset Chat', onclick=resetconversation) #eset button
# Create model description
st.sidebar.write(f"You're now chatting with **{selectedmodel}**")
st.sidebar.markdown("*Generated content may be inaccurate or false.*")
st.sidebar.markdown("\n[ypeGP](https://typegpt.net).")
if "prevoption" not in st.sessionstate:
st.sessionstate.prevoption = selectedmodel
if st.sessionstate.prevoption != selectedmodel:
st.sessionstate.messages = []
# st.write(f"Changed to {selectedmodel}")
st.sessionstate.prevoption = selectedmodel
resetconversation()
#Pull in the model we want to use
repoid = modellinks[selectedmodel]
st.subheader(f'ypeGP.net - {selectedmodel}')
st.title(f'ChatBot Using {selectedmodel}')
# Set a default model
if selectedmodel not in st.sessionstate:
st.sessionstate[selectedmodel] = modellinks[selectedmodel]
# Initialize chat history
if "messages" not in st.sessionstate:
st.sessionstate.messages = []
# Display chat messages from history on app rerun
for message in st.sessionstate.messages:
with st.chatmessage(message["role"]):
st.markdown(message["content"])
if prompt := st.chatinput(f"Hi I'm {selectedmodel}, ask me a question"):
# Display user message in chat message container
with st.chatmessage("user"):
st.markdown(prompt)
# Add user message to chat history
st.sessionstate.messages.append({"role": "user", "content": prompt})
# Display assistant response in chat message container
with st.chatmessage("assistant"):
try:
# μˆ˜μ • μ „ μ½”λ“œ (penAI)
# stream = client.chat.completions.create(
# model=modellinks[selectedmodel],
# messages=[
# {"role": m["role"], "content": m["content"]}
# for m in st.sessionstate.messages
# ],
# temperature=tempvalues,#0.5,
# stream=rue,
# maxtokens=3000,
# )
# μˆ˜μ • ν›„ μ½”λ“œ (gradio & InferenceClient)
import gradio as gr
from huggingfacehub import InferenceClient
"""
For more information on `huggingfacehub` Inference API support, please check the docs: https://huggingface.co/docs/huggingfacehub/v0.22.2/en/guides/inference
"""
client = InferenceClient(repoid)
def respond(
message,
history: list[tuple[str, str]],
systemmessage,
maxtokens,
temperature,
topp,
):
messages = [{"role": "system", "content": systemmessage}]
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
messages.append({"role": "user", "content": message})
response = ""
for message in client.chatcompletion(
messages,
maxtokens=maxtokens,
stream=rue,
temperature=temperature,
topp=topp,
):
token = message.choices[0].delta.content
response += token
yield response
"""
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
"""
demo = gr.ChatInterface(
respond,
additionalinputs=[
gr.extbox(
value="You are a friendly Chatbot.", label="System message"
),
gr.Slider(
minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"
),
gr.Slider(
minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="temperature"
),
gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.95,
step=0.05,
label="op-p (nucleus sampling)",
),
],
)
response = ""
for message in demo(
prompt,
st.sessionstate.messages[1:],
"You are a friendly Chatbot.",
512,
0.7,
0.95,
):
response += message
except Exception as e:
# st.empty()
response = "πŸ˜΅β€πŸ’« Looks like someone unplugged something!\
\n Either the model space is being updated or something is down.\
\n\
\n Try again later. \
\n\
\n Here's a random pic of a 🐢:"
st.write(response)
randomdogpick = 'https://random.dog/'+ randomdog[np.random.randint(len(randomdog))]
st.image(randomdogpick)
st.write("his was the error message:")
st.write(e)
st.sessionstate.messages.append({"role": "assistant", "content": response})