Spaces:
Sleeping
Sleeping
File size: 4,536 Bytes
f3f1044 9f54a3b 1e8b2e8 9f54a3b 877a721 9f54a3b fadd816 9f54a3b 1e8b2e8 fadd816 1e8b2e8 fadd816 1e8b2e8 fadd816 c0d899e 142827c fadd816 877a721 fadd816 9f54a3b fadd816 f3f1044 fadd816 9f54a3b 1e8b2e8 9f54a3b 1e8b2e8 f3f1044 fadd816 1e8b2e8 fadd816 1e8b2e8 fadd816 1e8b2e8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 |
""" Simple Chatbot
@author: Nigel Gebodh
@email: nigel.gebodh@gmail.com
"""
import streamlit as st
from openai import OpenAI
import os
import requests
import json
entire_assistant_response = ""
def get_streamed_response(message, history, model):
all_message = []
for human, assistant in history:
all_message.append({"role": "user", "content": human})
all_message.append({"role": "assistant", "content": assistant})
global entire_assistant_response
entire_assistant_response = "" # Reset the entire assistant response
all_message.append({"role": "user", "content": message})
url = "https://api.together.xyz/v1/chat/completions"
payload = {
"model": model,
"temperature": 1.05,
"top_p": 0.9,
"top_k": 50,
"repetition_penalty": 1,
"n": 1,
"messages": all_message,
"stream_tokens": True,
}
TOGETHER_API_KEY = os.getenv('TOGETHER_API_KEY')
headers = {
"accept": "application/json",
"content-type": "application/json",
"Authorization": f"Bearer {TOGETHER_API_KEY}",
}
response = requests.post(url, json=payload, headers=headers, stream=True)
response.raise_for_status() # Ensure HTTP request was successful
for line in response.iter_lines():
if line:
decoded_line = line.decode('utf-8')
# Check for the completion signal
if decoded_line == "data: [DONE]":
yield entire_assistant_response # Yield the entire response at the end
break
try:
# Decode and strip any SSE format specific prefix ("data: ")
if decoded_line.startswith("data: "):
decoded_line = decoded_line.replace("data: ", "")
chunk_data = json.loads(decoded_line)
content = chunk_data['choices'][0]['delta']['content']
entire_assistant_response += content # Aggregate content
yield entire_assistant_response
except json.JSONDecodeError:
print(f"Invalid JSON received: {decoded_line}")
continue
except KeyError as e:
print(f"KeyError encountered: {e}")
continue
print(entire_assistant_response)
all_message.append({"role": "assistant", "content": entire_assistant_response})
# Initialize Streamlit app
st.title("Simple Chatbot")
# Initialize session state if not present
if "messages" not in st.session_state:
st.session_state.messages = []
# Define available models
models = {
"Mistral": "mistralai/Mistral-7B-Instruct-v0.2",
"Gemma-7B": "google/gemma-7b-it",
"Gemma-2B": "google/gemma-2b-it",
"Zephyr-7B-β": "HuggingFaceH4/zephyr-7b-beta",
"BibleLearnerAI": "NousResearch/Nous-Hermes-2-Yi-34B"
}
# Allow user to select a model
selected_model = st.sidebar.selectbox("Select Model", list(models.keys()))
# Create model description
st.sidebar.write(f"You're now chatting with **{selected_model}**")
st.sidebar.markdown("*Generated content may be inaccurate or false.*")
st.sidebar.markdown("\nLearn how to build this chatbot [here](https://ngebodh.github.io/projects/2024-03-05/).")
st.sidebar.markdown("\nRun into issues? Try the [back-up](https://huggingface.co/spaces/ngebodh/SimpleChatbot-Backup).")
if "prev_option" not in st.session_state:
st.session_state.prev_option = selected_model
if st.session_state.prev_option != selected_model:
st.session_state.messages = []
st.session_state.prev_option = selected_model
#Pull in the model we want to use
repo_id = models[selected_model]
st.subheader(f'AI - {selected_model}')
# Accept user input
if prompt := st.text_input(f"Hi I'm {selected_model}, ask me a question"):
# Display user message
with st.spinner("AI is typing..."):
st.session_state.messages.append({"role": "user", "content": prompt})
# Call selected model to get response
response_stream = get_streamed_response(prompt, [(m["content"] for m in st.session_state.messages[:-1])], repo_id)
for response in response_stream:
st.session_state.messages.append({"role": "assistant", "content": response})
# Display chat history
for message in st.session_state.messages:
if message["role"] == "user":
st.text_input("You:", value=message["content"], disabled=True)
else:
st.text_input(f"{selected_model}:", value=message["content"], disabled=True)
|