ngebodh commited on
Commit
002b092
ā€¢
1 Parent(s): 2398106

Updated model links added new model

Browse files

Added new Llama 3 model. Updated Gemma links.

Files changed (1) hide show
  1. app.py +59 -17
app.py CHANGED
@@ -3,7 +3,7 @@
3
  @email: nigel.gebodh@gmail.com
4
 
5
  """
6
-
7
  import streamlit as st
8
  from openai import OpenAI
9
  import os
@@ -26,9 +26,10 @@ client = OpenAI(
26
 
27
  #Create supported models
28
  model_links ={
29
- "Mistral":"mistralai/Mistral-7B-Instruct-v0.2",
30
- "Gemma-7B":"google/gemma-7b-it",
31
- "Gemma-2B":"google/gemma-2b-it",
 
32
  "Zephyr-7B-Ī²":"HuggingFaceH4/zephyr-7b-beta",
33
 
34
  }
@@ -63,9 +64,30 @@ model_info ={
63
  is the second model in the series, and is a fine-tuned version of mistralai/Mistral-7B-v0.1 \
64
  that was trained on on a mix of publicly available, synthetic datasets using Direct Preference Optimization (DPO)\n""",
65
  'logo':'https://huggingface.co/HuggingFaceH4/zephyr-7b-alpha/resolve/main/thumbnail.png'},
66
-
 
 
 
67
  }
68
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
  def reset_conversation():
70
  '''
71
  Resets Conversation
@@ -148,16 +170,36 @@ if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
148
 
149
  # Display assistant response in chat message container
150
  with st.chat_message("assistant"):
151
- stream = client.chat.completions.create(
152
- model=model_links[selected_model],
153
- messages=[
154
- {"role": m["role"], "content": m["content"]}
155
- for m in st.session_state.messages
156
- ],
157
- temperature=temp_values,#0.5,
158
- stream=True,
159
- max_tokens=3000,
160
- )
161
-
162
- response = st.write_stream(stream)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
163
  st.session_state.messages.append({"role": "assistant", "content": response})
 
3
  @email: nigel.gebodh@gmail.com
4
 
5
  """
6
+ import numpy as np
7
  import streamlit as st
8
  from openai import OpenAI
9
  import os
 
26
 
27
  #Create supported models
28
  model_links ={
29
+ "Meta-Llama-3-8B":"meta-llama/Meta-Llama-3-8B-Instruct",
30
+ "Mistral-7B":"mistralai/Mistral-7B-Instruct-v0.2",
31
+ "Gemma-7B":"google/gemma-1.1-7b-it",
32
+ "Gemma-2B":"google/gemma-1.1-2b-it",
33
  "Zephyr-7B-Ī²":"HuggingFaceH4/zephyr-7b-beta",
34
 
35
  }
 
64
  is the second model in the series, and is a fine-tuned version of mistralai/Mistral-7B-v0.1 \
65
  that was trained on on a mix of publicly available, synthetic datasets using Direct Preference Optimization (DPO)\n""",
66
  'logo':'https://huggingface.co/HuggingFaceH4/zephyr-7b-alpha/resolve/main/thumbnail.png'},
67
+ "Meta-Llama-3-8B":
68
+ {'description':"""The Llama (3) model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
69
+ \nIt was created by the [**Meta's AI**](https://llama.meta.com/) team as has over **8 billion parameters.** \n""",
70
+ 'logo':'Llama_logo.png'},
71
  }
72
 
73
+
74
+ #Random dog images for error message
75
+ random_dog = ["0f476473-2d8b-415e-b944-483768418a95.jpg",
76
+ "1bd75c81-f1d7-4e55-9310-a27595fa8762.jpg",
77
+ "526590d2-8817-4ff0-8c62-fdcba5306d02.jpg",
78
+ "1326984c-39b0-492c-a773-f120d747a7e2.jpg",
79
+ "42a98d03-5ed7-4b3b-af89-7c4876cb14c3.jpg",
80
+ "8b3317ed-2083-42ac-a575-7ae45f9fdc0d.jpg",
81
+ "ee17f54a-83ac-44a3-8a35-e89ff7153fb4.jpg",
82
+ "027eef85-ccc1-4a66-8967-5d74f34c8bb4.jpg",
83
+ "08f5398d-7f89-47da-a5cd-1ed74967dc1f.jpg",
84
+ "0fd781ff-ec46-4bdc-a4e8-24f18bf07def.jpg",
85
+ "0fb4aeee-f949-4c7b-a6d8-05bf0736bdd1.jpg",
86
+ "6edac66e-c0de-4e69-a9d6-b2e6f6f9001b.jpg",
87
+ "bfb9e165-c643-4993-9b3a-7e73571672a6.jpg"]
88
+
89
+
90
+
91
  def reset_conversation():
92
  '''
93
  Resets Conversation
 
170
 
171
  # Display assistant response in chat message container
172
  with st.chat_message("assistant"):
173
+
174
+ try:
175
+ stream = client.chat.completions.create(
176
+ model=model_links[selected_model],
177
+ messages=[
178
+ {"role": m["role"], "content": m["content"]}
179
+ for m in st.session_state.messages
180
+ ],
181
+ temperature=temp_values,#0.5,
182
+ stream=True,
183
+ max_tokens=3000,
184
+ )
185
+
186
+ response = st.write_stream(stream)
187
+
188
+ except Exception as e:
189
+ # st.empty()
190
+ response = "šŸ˜µā€šŸ’« Looks like someone unplugged something!\
191
+ \n Either the model space is being updated or something is down.\
192
+ \n\
193
+ \n Try again later. \
194
+ \n\
195
+ \n Here's a random pic of a šŸ¶:"
196
+ st.write(response)
197
+ random_dog_pick = 'https://random.dog/'+ random_dog[np.random.randint(len(random_dog))]
198
+ st.image(random_dog_pick)
199
+ st.write("This was the error message:")
200
+ st.write(e)
201
+
202
+
203
+
204
+
205
  st.session_state.messages.append({"role": "assistant", "content": response})