File size: 5,901 Bytes
9f54a3b dab5cc9 9f54a3b e8f079f 9f54a3b 60eb20c 67a2453 60eb20c 8270bde 0ca86ba 9f54a3b 002b092 142827c b8e69b4 2e90884 6e0c914 76b65c9 6e0c914 b8e69b4 9f54a3b 60eb20c 142827c 8e38e68 142827c 0ca86ba 142827c 60eb20c 9f54a3b eabc41f d135f7b eabc41f 9f54a3b 60eb20c 9f54a3b be9d4b9 9f54a3b b8dcb86 f5989f3 c6fd6e4 7c2f8bf c6fd6e4 9f54a3b 717f43f 0441833 0cafb36 f5989f3 8745ce1 0441833 f9207df f5989f3 f9207df 8745ce1 f9207df 0441833 9f54a3b 3bb7e5d 9f54a3b 60eb20c 9f54a3b b8e69b4 9f54a3b b8e69b4 1e6fc39 0cafb36 ca83e2e 350279c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 |
import streamlit as st
from openai import OpenAI
import os
import sys
from dotenv import load_dotenv, dotenv_values
load_dotenv()
# initialize the client
client = OpenAI(
base_url="https://api-inference.huggingface.co/v1",
api_key=os.environ.get('HUGGINGFACEHUB_API_TOKEN') # Replace with your token
)
# Create supported models
model_links = {
"Mixtral-8x7B-Instruct-v0.1": "mistralai/Mixtral-8x7B-Instruct-v0.1",
"Mistral-Nemo-Instruct-2407": "mistralai/Mistral-Nemo-Instruct-2407",
"Nous-Hermes-2-Mixtral-8x7B-DPO": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
"Mistral-7B-Instruct-v0.1": "mistralai/Mistral-7B-Instruct-v0.1",
"Mistral-7B-Instruct-v0.2": "mistralai/Mistral-7B-Instruct-v0.2",
"Mistral-7B-Instruct-v0.3": "mistralai/Mistral-7B-Instruct-v0.3",
"Mistral-Small-Instruct-2409": "mistralai/Mistral-Small-Instruct-2409",
}
#Random dog images for error message
random_dog = ["0f476473-2d8b-415e-b944-483768418a95.jpg",
"1bd75c81-f1d7-4e55-9310-a27595fa8762.jpg",
"526590d2-8817-4ff0-8c62-fdcba5306d02.jpg",
"1326984c-39b0-492c-a773-f120d747a7e2.jpg",
"42a98d03-5ed7-4b3b-af89-7c4876cb14c3.jpg",
"8b3317ed-2083-42ac-a575-7ae45f9fdc0d.jpg",
"ee17f54a-83ac-44a3-8a35-e89ff7153fb4.jpg",
"027eef85-ccc1-4a66-8967-5d74f34c8bb4.jpg",
"08f5398d-7f89-47da-a5cd-1ed74967dc1f.jpg",
"0fd781ff-ec46-4bdc-a4e8-24f18bf07def.jpg",
"0fb4aeee-f949-4c7b-a6d8-05bf0736bdd1.jpg",
"6edac66e-c0de-4e69-a9d6-b2e6f6f9001b.jpg",
"bfb9e165-c643-4993-9b3a-7e73571672a6.jpg"]
def reset_conversation():
'''
Resets Conversation
'''
st.session_state.conversation = []
st.session_state.messages = []
return None
def get_assistant_aswer(st_model, st_messages, st_temp_value, st_max_tokens):
response = ""
#try:
stream = client.chat.completions.create(
model=st_model,
messages=[
{"role": m["role"], "content": m["content"]}
for m in st_messages
],
temperature=st_temp_value,
stream=True,
max_tokens=st_max_tokens,
)
for chunk in stream:
response = response + chunk.choices[0].delta.content
# except Exception as e:
# response = "😵💫 Looks like someone unplugged something!"
return response
# Define the available models
models =[key for key in model_links.keys()]
# Create the sidebar with the dropdown for model selection
selected_model = st.sidebar.selectbox("Select Model", models)
# Create a temperature slider
temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
# Create a max_token slider
max_token_value = st.sidebar.slider('Select a max_token value', 1000, 9000, (5000))
#Add reset button to clear conversation
st.sidebar.button('Reset Chat', on_click=reset_conversation) #Reset button
# Create model description
st.sidebar.write(f"You're now chatting with **{selected_model}**")
st.sidebar.markdown("*Generated content may be inaccurate or false.*")
# st.sidebar.markdown("\n[TypeGPT](https://typegpt.net).")
if "prev_option" not in st.session_state:
st.session_state.prev_option = selected_model
if st.session_state.prev_option != selected_model:
st.session_state.messages = []
# st.write(f"Changed to {selected_model}")
st.session_state.prev_option = selected_model
reset_conversation()
#Pull in the model we want to use
repo_id = model_links[selected_model]
st.subheader(f'{selected_model}')
# # st.title(f'ChatBot Using {selected_model}')
# Set a default model
if selected_model not in st.session_state:
st.session_state[selected_model] = model_links[selected_model]
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if "retry" not in st.session_state:
st.session_state.retry= False
def retry_click():
st.session_state.retry= True
if st.session_state.retry:
lastmessage = st.session_state.messages.pop()
st.toast("popped msg: " + lastmessage["content"] + " // model: " + model_links[selected_model])
response = get_assistant_aswer(model_links[selected_model], st.session_state.messages, temp_values, max_token_value)
st.session_state.messages.append({"role": "assistant", "content": response})
st.session_state.retry= False
st.rerun()
if "remove" not in st.session_state:
st.session_state.remove= False
def remove_click():
st.session_state.remove= True
if st.session_state.remove:
lastmessage = st.session_state.messages.pop()
prelastmessage = st.session_state.messages.pop()
st.toast("popped msg: " + lastmessage["content"] + " // model: " + model_links[selected_model])
st.session_state.remove= False
st.rerun()
# Accept user input
if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
# Display user message in chat message container
with st.chat_message("user"):
st.markdown(prompt)
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
# Display assistant response in chat message container
response = get_assistant_aswer(model_links[selected_model], st.session_state.messages, temp_values, max_token_value)
with st.chat_message("assistant"):
st.write(response)
st.session_state.messages.append({"role": "assistant", "content": response})
if len(st.session_state.messages)>0:
col1, col2 = st.columns(2)
col1.button("retry", key="retryButton", on_click=retry_click)
col2.button("remove", key="removeButton", on_click=remove_click)
|