GaneshK commited on
Commit
2066897
1 Parent(s): 0e05c7e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +217 -47
app.py CHANGED
@@ -1,51 +1,221 @@
1
- from huggingface_hub import InferenceClient
2
- import gradio as gr
3
- client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
4
-
5
- def format_prompt(message, history):
6
- prompt = "<s>"
7
- for user_prompt, bot_response in history:
8
- prompt += f"[INST] {user_prompt} [/INST]"
9
- prompt += f" {bot_response}</s> "
10
- prompt += f"[INST] {message} [/INST]"
11
- return prompt
12
-
13
- def generate(
14
- prompt, history, temperature=0.2, max_new_tokens=3000, top_p=0.95, repetition_penalty=1.0,
15
- ):
16
- temperature = float(temperature)
17
- if temperature < 1e-2:
18
- temperature = 1e-2
19
- top_p = float(top_p)
20
-
21
- generate_kwargs = dict(
22
- temperature=temperature,
23
- max_new_tokens=max_new_tokens,
24
- top_p=top_p,
25
- repetition_penalty=repetition_penalty,
26
- do_sample=True,
27
- seed=42,
28
- )
29
-
30
- formatted_prompt = format_prompt(prompt, history)
31
-
32
- stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
33
- output = ""
34
-
35
- for response in stream:
36
- output += response.token.text
37
- yield output
38
- return output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
 
41
- mychatbot = gr.Chatbot(
42
- avatar_images=["./user.png", "./bot.png"], bubble_full_width=False, show_label=False, show_copy_button=True, likeable=True,)
43
 
44
- demo = gr.ChatInterface(fn=generate,
45
- chatbot=mychatbot,
46
- title="Mistral-Chat",
47
- retry_btn=None,
48
- undo_btn=None
49
- )
50
 
51
- demo.queue().launch(show_api=False)
 
1
+
2
+ import streamlit as st
3
+ from openai import OpenAI
4
+ import os
5
+ import sys
6
+ from dotenv import load_dotenv, dotenv_values
7
+ load_dotenv()
8
+
9
+
10
+
11
+ # initialize the client
12
+ client = OpenAI(
13
+ base_url="https://api-inference.huggingface.co/v1",
14
+ api_key=os.environ.get('HUGGINGFACEHUB_API_TOKEN')#"hf_xxx" # Replace with your token
15
+ )
16
+
17
+
18
+
19
+ #Create supported models
20
+ model_links ={
21
+ "Mistral-7b":"mistralai/Mistral-7B-Instruct-v0.2",
22
+ "Mistral-8x7b":"mistralai/Mixtral-8x7B-Instruct-v0.1"
23
+ # "Gemma-7B":"google/gemma-7b-it",
24
+ # "Gemma-2B":"google/gemma-2b-it",
25
+ # "Zephyr-7B-β":"HuggingFaceH4/zephyr-7b-beta",
26
+
27
+ }
28
+
29
+ #Pull info about the model to display
30
+ model_info ={
31
+ "Mistral 7B":
32
+ {'description':"""The Mistral 7B model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
33
+ \nIt was created by the [**Mistral AI**](https://mistral.ai/news/announcing-mistral-7b/) team as has over **7 billion parameters.** \n""",
34
+ 'logo':'https://mistral.ai/images/logo_hubc88c4ece131b91c7cb753f40e9e1cc5_2589_256x0_resize_q97_h2_lanczos_3.webp'},
35
+
36
+ "Mistral 8x7B":
37
+ {'description':"""The Mistral 8x7B model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
38
+ \nIt was created by the [**Mistral AI**](https://mistral.ai/news/announcing-mistral-8x7b/) team as has based on MOE arch.** \n""",
39
+ 'logo':'https://mistral.ai/images/logo_hubc88c4ece131b91c7cb753f40e9e1cc5_2589_256x0_resize_q97_h2_lanczos_3.webp'},
40
+
41
+
42
+ # "Gemma-7B":
43
+ # {'description':"""The Gemma model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
44
+ # \nIt was created by the [**Google's AI Team**](https://blog.google/technology/developers/gemma-open-models/) team as has over **7 billion parameters.** \n""",
45
+ # 'logo':'https://pbs.twimg.com/media/GG3sJg7X0AEaNIq.jpg'},
46
+ # "Gemma-2B":
47
+ # {'description':"""The Gemma model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
48
+ # \nIt was created by the [**Google's AI Team**](https://blog.google/technology/developers/gemma-open-models/) team as has over **2 billion parameters.** \n""",
49
+ # 'logo':'https://pbs.twimg.com/media/GG3sJg7X0AEaNIq.jpg'},
50
+ # "Zephyr-7B":
51
+ # {'description':"""The Zephyr model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
52
+ # \nFrom Huggingface: \n\
53
+ # Zephyr is a series of language models that are trained to act as helpful assistants. \
54
+ # [Zephyr 7B Gemma](https://huggingface.co/HuggingFaceH4/zephyr-7b-gemma-v0.1)\
55
+ # is the third model in the series, and is a fine-tuned version of google/gemma-7b \
56
+ # that was trained on on a mix of publicly available, synthetic datasets using Direct Preference Optimization (DPO)\n""",
57
+ # 'logo':'https://huggingface.co/HuggingFaceH4/zephyr-7b-gemma-v0.1/resolve/main/thumbnail.png'},
58
+ # "Zephyr-7B-β":
59
+ # {'description':"""The Zephyr model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
60
+ # \nFrom Huggingface: \n\
61
+ # Zephyr is a series of language models that are trained to act as helpful assistants. \
62
+ # [Zephyr-7B-β](https://huggingface.co/HuggingFaceH4/zephyr-7b-beta)\
63
+ # is the second model in the series, and is a fine-tuned version of mistralai/Mistral-7B-v0.1 \
64
+ # that was trained on on a mix of publicly available, synthetic datasets using Direct Preference Optimization (DPO)\n""",
65
+ # 'logo':'https://huggingface.co/HuggingFaceH4/zephyr-7b-alpha/resolve/main/thumbnail.png'},
66
+
67
+ }
68
+
69
+ def reset_conversation():
70
+ '''
71
+ Resets Conversation
72
+ '''
73
+ st.session_state.conversation = []
74
+ st.session_state.messages = []
75
+ return None
76
+
77
+
78
+
79
+ # Define the available models
80
+ models =[key for key in model_links.keys()]
81
+
82
+ # Create the sidebar with the dropdown for model selection
83
+ selected_model = st.sidebar.selectbox("Select Model", models)
84
+
85
+ #Create a temperature slider
86
+ temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
87
+
88
+
89
+ #Add reset button to clear conversation
90
+ st.sidebar.button('Reset Chat', on_click=reset_conversation) #Reset button
91
+
92
+
93
+ # Create model description
94
+ st.sidebar.write(f"You're now chatting with **{selected_model}**")
95
+ st.sidebar.markdown(model_info[selected_model]['description'])
96
+ st.sidebar.image(model_info[selected_model]['logo'])
97
+ # st.sidebar.markdown("*Generated content may be inaccurate or false.*")
98
+ # st.sidebar.markdown("\nLearn how to build this chatbot [here](https://ngebodh.github.io/projects/2024-03-05/).")
99
+ # st.sidebar.markdown("\nRun into issues? Try the [back-up](https://huggingface.co/spaces/ngebodh/SimpleChatbot-Backup).")
100
+
101
+
102
+
103
+ if "prev_option" not in st.session_state:
104
+ st.session_state.prev_option = selected_model
105
+
106
+ if st.session_state.prev_option != selected_model:
107
+ st.session_state.messages = []
108
+ # st.write(f"Changed to {selected_model}")
109
+ st.session_state.prev_option = selected_model
110
+ reset_conversation()
111
+
112
+
113
+
114
+ #Pull in the model we want to use
115
+ repo_id = model_links[selected_model]
116
+
117
+
118
+ st.subheader(f'AI - {selected_model}')
119
+ # st.title(f'ChatBot Using {selected_model}')
120
+
121
+ # Set a default model
122
+ if selected_model not in st.session_state:
123
+ st.session_state[selected_model] = model_links[selected_model]
124
+
125
+ # Initialize chat history
126
+ if "messages" not in st.session_state:
127
+ st.session_state.messages = []
128
+
129
+
130
+ # Display chat messages from history on app rerun
131
+ for message in st.session_state.messages:
132
+ with st.chat_message(message["role"]):
133
+ st.markdown(message["content"])
134
+
135
+
136
+
137
+ # Accept user input
138
+ if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
139
+
140
+ # Display user message in chat message container
141
+ with st.chat_message("user"):
142
+ st.markdown(prompt)
143
+ # Add user message to chat history
144
+ st.session_state.messages.append({"role": "user", "content": prompt})
145
+
146
+
147
+ # Display assistant response in chat message container
148
+ with st.chat_message("assistant"):
149
+ stream = client.chat.completions.create(
150
+ model=model_links[selected_model],
151
+ messages=[
152
+ {"role": m["role"], "content": m["content"]}
153
+ for m in st.session_state.messages
154
+ ],
155
+ temperature=temp_values,#0.5,
156
+ stream=True,
157
+ max_tokens=3000,
158
+ )
159
+
160
+ response = st.write_stream(stream)
161
+ st.session_state.messages.append({"role": "assistant", "content": response})
162
+
163
+
164
+
165
+
166
+
167
+
168
+
169
+
170
+
171
+ # from huggingface_hub import InferenceClient
172
+ # import gradio as gr
173
+ # client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
174
+
175
+ # def format_prompt(message, history):
176
+ # prompt = "<s>"
177
+ # for user_prompt, bot_response in history:
178
+ # prompt += f"[INST] {user_prompt} [/INST]"
179
+ # prompt += f" {bot_response}</s> "
180
+ # prompt += f"[INST] {message} [/INST]"
181
+ # return prompt
182
+
183
+ # def generate(
184
+ # prompt, history, temperature=0.2, max_new_tokens=3000, top_p=0.95, repetition_penalty=1.0,
185
+ # ):
186
+ # temperature = float(temperature)
187
+ # if temperature < 1e-2:
188
+ # temperature = 1e-2
189
+ # top_p = float(top_p)
190
+
191
+ # generate_kwargs = dict(
192
+ # temperature=temperature,
193
+ # max_new_tokens=max_new_tokens,
194
+ # top_p=top_p,
195
+ # repetition_penalty=repetition_penalty,
196
+ # do_sample=True,
197
+ # seed=42,
198
+ # )
199
+
200
+ # formatted_prompt = format_prompt(prompt, history)
201
+
202
+ # stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
203
+ # output = ""
204
+
205
+ # for response in stream:
206
+ # output += response.token.text
207
+ # yield output
208
+ # return output
209
 
210
 
211
+ # mychatbot = gr.Chatbot(
212
+ # avatar_images=["./user.png", "./bot.png"], bubble_full_width=False, show_label=False, show_copy_button=True, likeable=True,)
213
 
214
+ # demo = gr.ChatInterface(fn=generate,
215
+ # chatbot=mychatbot,
216
+ # title="Mistral-Chat",
217
+ # retry_btn=None,
218
+ # undo_btn=None
219
+ # )
220
 
221
+ # demo.queue().launch(show_api=False)