ogegadavis254 commited on
Commit
2506825
1 Parent(s): f7ca69c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +92 -68
app.py CHANGED
@@ -1,40 +1,80 @@
1
- """ Simple Chatbot
2
- @author: Nigel Gebodh
3
- @email: nigel.gebodh@gmail.com
4
- """
5
-
6
  import streamlit as st
7
- import os
8
  import requests
9
  import json
 
 
10
 
11
- entire_assistant_response = ""
12
 
13
- def get_streamed_response(message, history, model):
14
- all_message = []
 
 
 
 
 
15
 
16
- if not history: # If history is empty
17
- all_message.append({"role": "user", "content": ""})
18
- history = [("", "")] # Add dummy values to prevent unpacking error
 
 
 
 
 
19
 
20
- for human, assistant in history:
21
- all_message.append({"role": "user", "content": human})
22
- all_message.append({"role": "assistant", "content": assistant})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
- global entire_assistant_response
25
- entire_assistant_response = "" # Reset the entire assistant response
 
 
26
 
27
- all_message.append({"role": "user", "content": message})
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
  url = "https://api.together.xyz/v1/chat/completions"
30
  payload = {
31
- "model": model,
32
  "temperature": 1.05,
33
  "top_p": 0.9,
34
  "top_k": 50,
35
  "repetition_penalty": 1,
36
  "n": 1,
37
- "messages": all_message,
38
  "stream_tokens": True,
39
  }
40
 
@@ -73,62 +113,46 @@ def get_streamed_response(message, history, model):
73
  print(f"KeyError encountered: {e}")
74
  continue
75
 
76
- print(entire_assistant_response)
77
- all_message.append({"role": "assistant", "content": entire_assistant_response})
78
-
79
-
80
- # Initialize Streamlit app
81
- st.title("Simple Chatbot")
82
 
83
- # Initialize session state if not present
84
- if "messages" not in st.session_state:
85
- st.session_state.messages = []
86
-
87
- # Define available models
88
- models = {
89
- "Mistral": "mistralai/Mistral-7B-Instruct-v0.2",
90
- "Gemma-7B": "google/gemma-7b-it",
91
- "Gemma-2B": "google/gemma-2b-it",
92
- "Zephyr-7B-β": "HuggingFaceH4/zephyr-7b-beta",
93
- "BibleLearnerAI": "NousResearch/Nous-Hermes-2-Yi-34B"
94
- }
95
-
96
- # Allow user to select a model
97
- selected_model = st.sidebar.selectbox("Select Model", list(models.keys()))
98
-
99
- # Create model description
100
  st.sidebar.write(f"You're now chatting with **{selected_model}**")
 
 
101
  st.sidebar.markdown("*Generated content may be inaccurate or false.*")
102
  st.sidebar.markdown("\nLearn how to build this chatbot [here](https://ngebodh.github.io/projects/2024-03-05/).")
103
  st.sidebar.markdown("\nRun into issues? Try the [back-up](https://huggingface.co/spaces/ngebodh/SimpleChatbot-Backup).")
104
 
105
- if "prev_option" not in st.session_state:
106
- st.session_state.prev_option = selected_model
107
-
108
- if st.session_state.prev_option != selected_model:
109
  st.session_state.messages = []
110
- st.session_state.prev_option = selected_model
111
 
112
- #Pull in the model we want to use
113
- repo_id = models[selected_model]
114
-
115
- st.subheader(f'AI - {selected_model}')
116
 
117
  # Accept user input
118
- if prompt := st.text_input(f"Hi I'm {selected_model}, ask me a question"):
119
-
120
- # Display user message
121
- with st.spinner("AI is typing..."):
122
- st.session_state.messages.append({"role": "user", "content": prompt})
123
-
124
- # Call selected model to get response
125
- response_stream = get_streamed_response(prompt, [(m["content"] for m in st.session_state.messages[:-1])], repo_id)
126
- for response in response_stream:
127
- st.session_state.messages.append({"role": "assistant", "content": response})
128
-
129
- # Display chat history
130
- for message in st.session_state.messages:
131
- if message["role"] == "user":
132
- st.text_input("You:", value=message["content"], disabled=True)
133
  else:
134
- st.text_input(f"{selected_model}:", value=message["content"], disabled=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
 
2
  import requests
3
  import json
4
+ import os
5
+ from dotenv import load_dotenv
6
 
7
+ load_dotenv()
8
 
9
+ def reset_conversation():
10
+ '''
11
+ Resets Conversation
12
+ '''
13
+ st.session_state.conversation = []
14
+ st.session_state.messages = []
15
+ return None
16
 
17
+ # Define model links for Hugging Face models
18
+ model_links = {
19
+ "Mistral": "mistralai/Mistral-7B-Instruct-v0.2",
20
+ "Gemma-7B": "google/gemma-7b-it",
21
+ "Gemma-2B": "google/gemma-2b-it",
22
+ "Zephyr-7B-β": "HuggingFaceH4/zephyr-7b-beta",
23
+ "Nous-Hermes-2-Yi-34B": "NousResearch/Nous-Hermes-2-Yi-34B"
24
+ }
25
 
26
+ # Define model info for all models
27
+ model_info = {
28
+ "Mistral": {
29
+ 'description': "The Mistral model is a Large Language Model (LLM) developed by Mistral AI.",
30
+ 'logo': 'https://mistral.ai/images/logo_hubc88c4ece131b91c7cb753f40e9e1cc5_2589_256x0_resize_q97_h2_lanczos_3.webp'
31
+ },
32
+ "Gemma-7B": {
33
+ 'description': "The Gemma-7B model is a Large Language Model (LLM) developed by Google with 7 billion parameters.",
34
+ 'logo': 'https://pbs.twimg.com/media/GG3sJg7X0AEaNIq.jpg'
35
+ },
36
+ "Gemma-2B": {
37
+ 'description': "The Gemma-2B model is a Large Language Model (LLM) developed by Google with 2 billion parameters.",
38
+ 'logo': 'https://pbs.twimg.com/media/GG3sJg7X0AEaNIq.jpg'
39
+ },
40
+ "Zephyr-7B-β": {
41
+ 'description': "The Zephyr-7B-β model is a Large Language Model (LLM) developed by HuggingFace.",
42
+ 'logo': 'https://huggingface.co/HuggingFaceH4/zephyr-7b-alpha/resolve/main/thumbnail.png'
43
+ },
44
+ "Nous-Hermes-2-Yi-34B": {
45
+ 'description': "The Nous Hermes model is a Large Language Model (LLM) developed by Nous Research with 34 billion parameters.",
46
+ 'logo': 'https://example.com/nous_hermes_logo.png'
47
+ }
48
+ }
49
 
50
+ # Function to interact with Hugging Face models
51
+ def interact_with_huggingface_model(messages, model):
52
+ # Add your code here to interact with the Hugging Face model
53
+ pass
54
 
55
+ # Function to interact with the Together API model
56
+ def interact_with_together_api(messages):
57
+ all_messages = []
58
+
59
+ if not messages: # If history is empty
60
+ all_messages.append({"role": "user", "content": ""})
61
+ history = [("", "")] # Add dummy values to prevent unpacking error
62
+
63
+ for human, assistant in messages:
64
+ all_messages.append({"role": "user", "content": human})
65
+ all_messages.append({"role": "assistant", "content": assistant})
66
+
67
+ all_messages.append({"role": "user", "content": messages[-1][1]})
68
 
69
  url = "https://api.together.xyz/v1/chat/completions"
70
  payload = {
71
+ "model": "NousResearch/Nous-Hermes-2-Yi-34B",
72
  "temperature": 1.05,
73
  "top_p": 0.9,
74
  "top_k": 50,
75
  "repetition_penalty": 1,
76
  "n": 1,
77
+ "messages": all_messages,
78
  "stream_tokens": True,
79
  }
80
 
 
113
  print(f"KeyError encountered: {e}")
114
  continue
115
 
116
+ # Create sidebar with model selection dropdown and temperature slider
117
+ selected_model = st.sidebar.selectbox("Select Model", list(model_links.keys()))
118
+ temperature = st.sidebar.slider('Select Temperature', 0.0, 1.0, 0.5)
119
+ st.sidebar.button('Reset Chat', on_click=reset_conversation)
 
 
120
 
121
+ # Display model description and logo
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
  st.sidebar.write(f"You're now chatting with **{selected_model}**")
123
+ st.sidebar.markdown(model_info[selected_model]['description'])
124
+ st.sidebar.image(model_info[selected_model]['logo'])
125
  st.sidebar.markdown("*Generated content may be inaccurate or false.*")
126
  st.sidebar.markdown("\nLearn how to build this chatbot [here](https://ngebodh.github.io/projects/2024-03-05/).")
127
  st.sidebar.markdown("\nRun into issues? Try the [back-up](https://huggingface.co/spaces/ngebodh/SimpleChatbot-Backup).")
128
 
129
+ # Initialize chat history
130
+ if "messages" not in st.session_state:
 
 
131
  st.session_state.messages = []
 
132
 
133
+ # Display chat messages from history on app rerun
134
+ for message in st.session_state.messages:
135
+ with st.chat_message(message["role"]):
136
+ st.markdown(message["content"])
137
 
138
  # Accept user input
139
+ if prompt := st.chat_input(f"Hi, I'm {selected_model}, ask me a question"):
140
+ # Display user message in chat message container
141
+ with st.chat_message("user"):
142
+ st.markdown(prompt)
143
+ # Add user message to chat history
144
+ st.session_state.messages.append(("user", prompt))
145
+
146
+ # Interact with selected model
147
+ if selected_model == "Nous-Hermes-2-Yi-34B":
148
+ stream = interact_with_together_api(st.session_state.messages)
 
 
 
 
 
149
  else:
150
+ interact_with_huggingface_model(st.session_state.messages, model_links[selected_model])
151
+
152
+ # Display assistant response in chat message container
153
+ with st.chat_message("assistant"):
154
+ response = ""
155
+ for chunk in stream:
156
+ response = chunk
157
+ st.markdown(response)
158
+ st.session_state.messages.append(("assistant", response))