File size: 5,785 Bytes
9f54a3b dab5cc9 9f54a3b e8f079f 9f54a3b 60eb20c a4ba9fd 67a2453 60eb20c 8270bde 893c692 0ca86ba 9f54a3b 142827c 0029ae0 142827c b8e69b4 0029ae0 e644846 0029ae0 40d82ac 6e0c914 0029ae0 b8e69b4 b713846 9f54a3b 60eb20c 142827c 8e38e68 142827c 0ca86ba 142827c eabc41f cbc7527 a93d19b 8f8cd23 a93d19b 28a3221 5a7aeea a93d19b 47b9fa3 0201923 5a7aeea f228bd3 5a7aeea 0201923 8f8cd23 74d52e7 eabc41f b823548 60eb20c 9f54a3b ee22b54 d9ee789 09af45b d9ee789 0441833 5a7aeea b823548 ae4aeac 5a7aeea 9f54a3b 3bb7e5d 0029ae0 65f8770 9f54a3b a42067e d9ee789 9f54a3b 60eb20c 9f54a3b b823548 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 |
import streamlit as st
from openai import OpenAI
import os
import sys
from dotenv import load_dotenv, dotenv_values
load_dotenv()
# initialize the client
client = OpenAI(
base_url="https://api-inference.huggingface.co/v1",
api_key=os.environ.get('HUGGINGFACEHUB_API_TOKEN') # Replace with your token
)
# Create supported models
model_links = {
"Mixtral-8x7B-Instruct-v0.1": "mistralai/Mixtral-8x7B-Instruct-v0.1",
"Aya-23-35B": "CohereForAI/aya-23-35B",
"Mistral-Nemo-Instruct-2407": "mistralai/Mistral-Nemo-Instruct-2407",
"Nous-Hermes-2-Mixtral-8x7B-DPO": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
"Mistral-7B-Instruct-v0.1": "mistralai/Mistral-7B-Instruct-v0.1",
"Mistral-7B-Instruct-v0.2": "mistralai/Mistral-7B-Instruct-v0.2",
"Mistral-7B-Instruct-v0.3": "mistralai/Mistral-7B-Instruct-v0.3",
"Mistral-Small-Instruct-2409": "mistralai/Mistral-Small-Instruct-2409",
"EuroLLM-9B-Instruct": "utter-project/EuroLLM-9B-Instruct",
"EuroLLM-9B": "utter-project/EuroLLM-9B",
"Athene-V2-Chat": "Nexusflow/Athene-V2-Chat",
}
def reset_conversation():
#st.session_state.conversation = []
st.session_state.messages = []
return None
def ask_assistant_stream(st_model, st_messages, st_temp_value, st_max_tokens):
response={}
try:
stream = client.chat.completions.create(
model=st_model,
messages=[
{"role": m["role"], "content": m["content"]}
for m in st_messages
],
temperature=st_temp_value,
stream=True,
max_tokens=st_max_tokens,
)
response["stream"] = stream
except Exception as e:
pass
return response
# Define the available models & Create the sidebar with the dropdown for model selection
models =[key for key in model_links.keys()]
selected_model = st.sidebar.selectbox("Select Model", models)
# Create a temperature slider
temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
# Create a max_token slider
max_token_value = st.sidebar.slider('Select a max_token value', 1000, 9000, (5000))
#Add reset button to clear conversation
st.sidebar.button('Reset Chat', on_click=reset_conversation) #Reset button
# Create model description
st.sidebar.write(f"You're now chatting with **{selected_model}**")
st.sidebar.markdown("*Generated content may be inaccurate or false.*")
# Edit dialog for editing a message
@st.dialog("Edit Message")
def edit_message(position):
returnText = st.text_area("message:", value = st.session_state.messages[position-1]["content"])
if st.button("Save"):
st.session_state.messages[position-1]["content"] = returnText
st.rerun()
if st.session_state.messages[position-1]["role"] == "user":
if st.button("Save & Retry"):
st.session_state.messages[position-1]["content"] = returnText
del st.session_state.messages[position:]
st.session_state.instant_request = True
st.rerun()
def remove_message(position):
st.toast("try to remove message no: " + str(position-1) + " and "+ str(position))
del st.session_state.messages[position-2:position]
def ask_assistant_write_stream():
# Display assistant response in chat message container
assistant = ask_assistant_stream(model_links[selected_model], st.session_state.messages, temp_values, max_token_value)
pos = len(st.session_state.messages)+1
if "stream" in assistant:
with st.chat_message("assistant"):
col1, col2 = st.columns([9,1])
response = col1.write_stream(assistant["stream"])
col2.button("", icon = ":material/edit:", key="button_edit_message_"+str(pos), args=[pos], on_click=edit_message)
col2.button("", icon = ":material/delete:", key="button_remove_message_"+str(pos), args=[pos], on_click=remove_message)
else:
with st.chat_message("assistant"):
col1, col2 = st.columns([9,1])
response = col1.write("Failure!")
col2.button("", icon = ":material/delete:", key="button_remove_message_"+str(pos), args=[pos], on_click=remove_message)
st.session_state.messages.append({"role": "assistant", "content": response})
st.subheader(f'{selected_model}')
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
pos = 0
for message in st.session_state.messages:
pos=pos+1
with st.chat_message(message["role"]):
col1, col2 = st.columns([9,1])
col1.markdown(message["content"])
col2.button("", icon = ":material/edit:", key="button_edit_message_"+str(pos), args=[pos], on_click=edit_message)
if message["role"] == "assistant":
col2.button("", icon = ":material/delete:", key="button_remove_message_"+str(pos), args=[pos], on_click=remove_message)
if "instant_request" not in st.session_state:
st.session_state.instant_request = False
if st.session_state.instant_request:
ask_assistant_write_stream()
st.session_state.instant_request = False
# Accept user input
if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
# Display user message in chat message container and Add user message to chat history
pos = len(st.session_state.messages)+1
with st.chat_message("user"):
col1, col2 = st.columns([9,1])
col1.markdown(prompt)
col2.button("", icon = ":material/edit:", key="button_edit_message_"+str(pos), args=[pos], on_click=edit_message)
st.session_state.messages.append({"role": "user", "content": prompt})
# Display assistant response in chat message container
ask_assistant_write_stream()
|