yashsarnaik23 commited on
Commit
f5b4803
1 Parent(s): d6ccd37

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +193 -193
app.py CHANGED
@@ -1,194 +1,194 @@
1
- """ Simple Chatbot
2
- @author:YASH SARNAIK
3
- """
4
- import numpy as np
5
- import streamlit as st
6
- from openai import OpenAI
7
- import os
8
- import sys
9
- import base64
10
- from dotenv import load_dotenv, dotenv_values
11
- load_dotenv()
12
-
13
-
14
-
15
-
16
- # initialize the client
17
- client = OpenAI(
18
- base_url="https://api-inference.huggingface.co/v1",
19
- api_key=os.environ.get('llama')#"hf_xxx" # Replace with your token
20
- )
21
-
22
-
23
-
24
-
25
- #Create supported models
26
- model_links ={
27
- "Meta-Llama-3-8B":"meta-llama/Meta-Llama-3-8B-Instruct",
28
- "Mistral-7B":"mistralai/Mistral-7B-Instruct-v0.2",
29
- "Gemma-7B":"google/gemma-1.1-7b-it",
30
- "Zephyr-7B-β":"HuggingFaceH4/zephyr-7b-beta",
31
-
32
- }
33
-
34
- #Pull info about the model to display
35
- model_info ={
36
- "Mistral-7B":
37
- {'description':"""The Mistral model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
38
- \nIt was created by the [**Mistral AI**](https://mistral.ai/news/announcing-mistral-7b/) team as has over **7 billion parameters.** \n""",
39
- 'logo':'https://mistral.ai/images/logo_hubc88c4ece131b91c7cb753f40e9e1cc5_2589_256x0_resize_q97_h2_lanczos_3.webp'},
40
- "Gemma-7B":
41
- {'description':"""The Gemma model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
42
- \nIt was created by the [**Google's AI Team**](https://blog.google/technology/developers/gemma-open-models/) team as has over **7 billion parameters.** \n""",
43
- 'logo':'https://pbs.twimg.com/media/GG3sJg7X0AEaNIq.jpg'},
44
- "Zephyr-7B":
45
- {'description':"""The Zephyr model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
46
- \nFrom Huggingface: \n\
47
- Zephyr is a series of language models that are trained to act as helpful assistants. \
48
- [Zephyr 7B Gemma](https://huggingface.co/HuggingFaceH4/zephyr-7b-gemma-v0.1)\
49
- is the third model in the series, and is a fine-tuned version of google/gemma-7b \
50
- that was trained on on a mix of publicly available, synthetic datasets using Direct Preference Optimization (DPO)\n""",
51
- 'logo':'https://huggingface.co/HuggingFaceH4/zephyr-7b-gemma-v0.1/resolve/main/thumbnail.png'},
52
- "Zephyr-7B-β":
53
- {'description':"""The Zephyr model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
54
- \nFrom Huggingface: \n\
55
- Zephyr is a series of language models that are trained to act as helpful assistants. \
56
- [Zephyr-7B-β](https://huggingface.co/HuggingFaceH4/zephyr-7b-beta)\
57
- is the second model in the series, and is a fine-tuned version of mistralai/Mistral-7B-v0.1 \
58
- that was trained on on a mix of publicly available, synthetic datasets using Direct Preference Optimization (DPO)\n""",
59
- 'logo':'https://huggingface.co/HuggingFaceH4/zephyr-7b-alpha/resolve/main/thumbnail.png'},
60
- "Meta-Llama-3-8B":
61
- {'description':"""The Llama (3) model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
62
- \nIt was created by the [**Meta's AI**](https://llama.meta.com/) team and has over **8 billion parameters.** \n""",
63
- 'logo':'Llama_logo.png'},
64
- }
65
-
66
-
67
- #Random dog images for error message
68
- random_dog = ["0f476473-2d8b-415e-b944-483768418a95.jpg",
69
- "1bd75c81-f1d7-4e55-9310-a27595fa8762.jpg",
70
- "526590d2-8817-4ff0-8c62-fdcba5306d02.jpg",
71
- "1326984c-39b0-492c-a773-f120d747a7e2.jpg",
72
- "42a98d03-5ed7-4b3b-af89-7c4876cb14c3.jpg",
73
- "8b3317ed-2083-42ac-a575-7ae45f9fdc0d.jpg",
74
- "ee17f54a-83ac-44a3-8a35-e89ff7153fb4.jpg",
75
- "027eef85-ccc1-4a66-8967-5d74f34c8bb4.jpg",
76
- "08f5398d-7f89-47da-a5cd-1ed74967dc1f.jpg",
77
- "0fd781ff-ec46-4bdc-a4e8-24f18bf07def.jpg",
78
- "0fb4aeee-f949-4c7b-a6d8-05bf0736bdd1.jpg",
79
- "6edac66e-c0de-4e69-a9d6-b2e6f6f9001b.jpg",
80
- "bfb9e165-c643-4993-9b3a-7e73571672a6.jpg"]
81
-
82
-
83
-
84
- def reset_conversation():
85
- '''
86
- Resets Conversation
87
- '''
88
- st.session_state.conversation = []
89
- st.session_state.messages = []
90
- return None
91
-
92
-
93
-
94
-
95
- # Define the available models
96
- models =[key for key in model_links.keys()]
97
-
98
- # Create the sidebar with the dropdown for model selection
99
- selected_model = st.sidebar.selectbox("Select Model", models)
100
-
101
- #Create a temperature slider
102
- temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
103
-
104
-
105
- #Add reset button to clear conversation
106
- st.sidebar.button('Reset Chat', on_click=reset_conversation) #Reset button
107
-
108
-
109
- # Create model description
110
- st.sidebar.write(f"You're now chatting with **{selected_model}**")
111
- st.sidebar.markdown(model_info[selected_model]['description'])
112
- st.sidebar.image(model_info[selected_model]['logo'])
113
- st.sidebar.markdown("*Generated content may be inaccurate or false.*")
114
-
115
-
116
- if "prev_option" not in st.session_state:
117
- st.session_state.prev_option = selected_model
118
-
119
- if st.session_state.prev_option != selected_model:
120
- st.session_state.messages = []
121
- # st.write(f"Changed to {selected_model}")
122
- st.session_state.prev_option = selected_model
123
- reset_conversation()
124
-
125
-
126
-
127
- #Pull in the model we want to use
128
- repo_id = model_links[selected_model]
129
-
130
-
131
- st.markdown(f'<p style="font-weight:bold; text-align:center; font-size:48px;"> {selected_model}</p>', unsafe_allow_html=True)
132
- # st.title(f'ChatBot Using {selected_model}')
133
-
134
- # Set a default model
135
- if selected_model not in st.session_state:
136
- st.session_state[selected_model] = model_links[selected_model]
137
-
138
- # Initialize chat history
139
- if "messages" not in st.session_state:
140
- st.session_state.messages = []
141
-
142
-
143
- # Display chat messages from history on app rerun
144
- for message in st.session_state.messages:
145
- with st.chat_message(message["role"]):
146
- st.markdown(message["content"])
147
-
148
-
149
-
150
- # Accept user input
151
- if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
152
-
153
- # Display user message in chat message container
154
- with st.chat_message("user"):
155
- st.markdown(prompt)
156
- # Add user message to chat history
157
- st.session_state.messages.append({"role": "user", "content": prompt})
158
-
159
-
160
- # Display assistant response in chat message container
161
- with st.chat_message("assistant"):
162
-
163
- try:
164
- stream = client.chat.completions.create(
165
- model=model_links[selected_model],
166
- messages=[
167
- {"role": m["role"], "content": m["content"]}
168
- for m in st.session_state.messages
169
- ],
170
- temperature=temp_values,#0.5,
171
- stream=True,
172
- max_tokens=3000,
173
- )
174
-
175
- response = st.write_stream(stream)
176
-
177
- except Exception as e:
178
- # st.empty()
179
- response = "😵‍💫 Looks like someone unplugged something!\
180
- \n Either the model space is being updated or something is down.\
181
- \n\
182
- \n Try again later. \
183
- \n\
184
- \n Here's a random pic of a 🐶:"
185
- st.write(response)
186
- random_dog_pick = 'https://random.dog/'+ random_dog[np.random.randint(len(random_dog))]
187
- st.image(random_dog_pick)
188
- st.write("This was the error message:")
189
- st.write(e)
190
-
191
-
192
-
193
-
194
  st.session_state.messages.append({"role": "assistant", "content": response})
 
1
+ """ Simple Chatbot
2
+ @author:YASH SARNAIK
3
+ """
4
+ import numpy as np
5
+ import streamlit as st
6
+ from openai import OpenAI
7
+ import os
8
+ import sys
9
+ import base64
10
+ from dotenv import load_dotenv, dotenv_values
11
+ load_dotenv()
12
+
13
+
14
+
15
+
16
+ # initialize the client
17
+ client = OpenAI(
18
+ base_url="https://api-inference.huggingface.co/v1",
19
+ api_key=os.environ.get('HUGGINGFACEHUB_API_TOKEN')#"hf_xxx" # Replace with your token
20
+ )
21
+
22
+
23
+
24
+
25
+ #Create supported models
26
+ model_links ={
27
+ "Meta-Llama-3-8B":"meta-llama/Meta-Llama-3-8B-Instruct",
28
+ "Mistral-7B":"mistralai/Mistral-7B-Instruct-v0.2",
29
+ "Gemma-7B":"google/gemma-1.1-7b-it",
30
+ "Zephyr-7B-β":"HuggingFaceH4/zephyr-7b-beta",
31
+
32
+ }
33
+
34
+ #Pull info about the model to display
35
+ model_info ={
36
+ "Mistral-7B":
37
+ {'description':"""The Mistral model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
38
+ \nIt was created by the [**Mistral AI**](https://mistral.ai/news/announcing-mistral-7b/) team as has over **7 billion parameters.** \n""",
39
+ 'logo':'https://mistral.ai/images/logo_hubc88c4ece131b91c7cb753f40e9e1cc5_2589_256x0_resize_q97_h2_lanczos_3.webp'},
40
+ "Gemma-7B":
41
+ {'description':"""The Gemma model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
42
+ \nIt was created by the [**Google's AI Team**](https://blog.google/technology/developers/gemma-open-models/) team as has over **7 billion parameters.** \n""",
43
+ 'logo':'https://pbs.twimg.com/media/GG3sJg7X0AEaNIq.jpg'},
44
+ "Zephyr-7B":
45
+ {'description':"""The Zephyr model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
46
+ \nFrom Huggingface: \n\
47
+ Zephyr is a series of language models that are trained to act as helpful assistants. \
48
+ [Zephyr 7B Gemma](https://huggingface.co/HuggingFaceH4/zephyr-7b-gemma-v0.1)\
49
+ is the third model in the series, and is a fine-tuned version of google/gemma-7b \
50
+ that was trained on on a mix of publicly available, synthetic datasets using Direct Preference Optimization (DPO)\n""",
51
+ 'logo':'https://huggingface.co/HuggingFaceH4/zephyr-7b-gemma-v0.1/resolve/main/thumbnail.png'},
52
+ "Zephyr-7B-β":
53
+ {'description':"""The Zephyr model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
54
+ \nFrom Huggingface: \n\
55
+ Zephyr is a series of language models that are trained to act as helpful assistants. \
56
+ [Zephyr-7B-β](https://huggingface.co/HuggingFaceH4/zephyr-7b-beta)\
57
+ is the second model in the series, and is a fine-tuned version of mistralai/Mistral-7B-v0.1 \
58
+ that was trained on on a mix of publicly available, synthetic datasets using Direct Preference Optimization (DPO)\n""",
59
+ 'logo':'https://huggingface.co/HuggingFaceH4/zephyr-7b-alpha/resolve/main/thumbnail.png'},
60
+ "Meta-Llama-3-8B":
61
+ {'description':"""The Llama (3) model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
62
+ \nIt was created by the [**Meta's AI**](https://llama.meta.com/) team and has over **8 billion parameters.** \n""",
63
+ 'logo':'Llama_logo.png'},
64
+ }
65
+
66
+
67
+ #Random dog images for error message
68
+ random_dog = ["0f476473-2d8b-415e-b944-483768418a95.jpg",
69
+ "1bd75c81-f1d7-4e55-9310-a27595fa8762.jpg",
70
+ "526590d2-8817-4ff0-8c62-fdcba5306d02.jpg",
71
+ "1326984c-39b0-492c-a773-f120d747a7e2.jpg",
72
+ "42a98d03-5ed7-4b3b-af89-7c4876cb14c3.jpg",
73
+ "8b3317ed-2083-42ac-a575-7ae45f9fdc0d.jpg",
74
+ "ee17f54a-83ac-44a3-8a35-e89ff7153fb4.jpg",
75
+ "027eef85-ccc1-4a66-8967-5d74f34c8bb4.jpg",
76
+ "08f5398d-7f89-47da-a5cd-1ed74967dc1f.jpg",
77
+ "0fd781ff-ec46-4bdc-a4e8-24f18bf07def.jpg",
78
+ "0fb4aeee-f949-4c7b-a6d8-05bf0736bdd1.jpg",
79
+ "6edac66e-c0de-4e69-a9d6-b2e6f6f9001b.jpg",
80
+ "bfb9e165-c643-4993-9b3a-7e73571672a6.jpg"]
81
+
82
+
83
+
84
+ def reset_conversation():
85
+ '''
86
+ Resets Conversation
87
+ '''
88
+ st.session_state.conversation = []
89
+ st.session_state.messages = []
90
+ return None
91
+
92
+
93
+
94
+
95
+ # Define the available models
96
+ models =[key for key in model_links.keys()]
97
+
98
+ # Create the sidebar with the dropdown for model selection
99
+ selected_model = st.sidebar.selectbox("Select Model", models)
100
+
101
+ #Create a temperature slider
102
+ temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
103
+
104
+
105
+ #Add reset button to clear conversation
106
+ st.sidebar.button('Reset Chat', on_click=reset_conversation) #Reset button
107
+
108
+
109
+ # Create model description
110
+ st.sidebar.write(f"You're now chatting with **{selected_model}**")
111
+ st.sidebar.markdown(model_info[selected_model]['description'])
112
+ st.sidebar.image(model_info[selected_model]['logo'])
113
+ st.sidebar.markdown("*Generated content may be inaccurate or false.*")
114
+
115
+
116
+ if "prev_option" not in st.session_state:
117
+ st.session_state.prev_option = selected_model
118
+
119
+ if st.session_state.prev_option != selected_model:
120
+ st.session_state.messages = []
121
+ # st.write(f"Changed to {selected_model}")
122
+ st.session_state.prev_option = selected_model
123
+ reset_conversation()
124
+
125
+
126
+
127
+ #Pull in the model we want to use
128
+ repo_id = model_links[selected_model]
129
+
130
+
131
+ st.markdown(f'<p style="font-weight:bold; text-align:center; font-size:48px;"> {selected_model}</p>', unsafe_allow_html=True)
132
+ # st.title(f'ChatBot Using {selected_model}')
133
+
134
+ # Set a default model
135
+ if selected_model not in st.session_state:
136
+ st.session_state[selected_model] = model_links[selected_model]
137
+
138
+ # Initialize chat history
139
+ if "messages" not in st.session_state:
140
+ st.session_state.messages = []
141
+
142
+
143
+ # Display chat messages from history on app rerun
144
+ for message in st.session_state.messages:
145
+ with st.chat_message(message["role"]):
146
+ st.markdown(message["content"])
147
+
148
+
149
+
150
+ # Accept user input
151
+ if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
152
+
153
+ # Display user message in chat message container
154
+ with st.chat_message("user"):
155
+ st.markdown(prompt)
156
+ # Add user message to chat history
157
+ st.session_state.messages.append({"role": "user", "content": prompt})
158
+
159
+
160
+ # Display assistant response in chat message container
161
+ with st.chat_message("assistant"):
162
+
163
+ try:
164
+ stream = client.chat.completions.create(
165
+ model=model_links[selected_model],
166
+ messages=[
167
+ {"role": m["role"], "content": m["content"]}
168
+ for m in st.session_state.messages
169
+ ],
170
+ temperature=temp_values,#0.5,
171
+ stream=True,
172
+ max_tokens=3000,
173
+ )
174
+
175
+ response = st.write_stream(stream)
176
+
177
+ except Exception as e:
178
+ # st.empty()
179
+ response = "😵‍💫 Looks like someone unplugged something!\
180
+ \n Either the model space is being updated or something is down.\
181
+ \n\
182
+ \n Try again later. \
183
+ \n\
184
+ \n Here's a random pic of a 🐶:"
185
+ st.write(response)
186
+ random_dog_pick = 'https://random.dog/'+ random_dog[np.random.randint(len(random_dog))]
187
+ st.image(random_dog_pick)
188
+ st.write("This was the error message:")
189
+ st.write(e)
190
+
191
+
192
+
193
+
194
  st.session_state.messages.append({"role": "assistant", "content": response})