ariankhalfani commited on
Commit
6116c39
·
verified ·
1 Parent(s): 7913569

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +214 -0
app.py ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import requests
3
+ import time
4
+ import streamlit as st
5
+
6
+ # Get the Hugging Face API Token from environment variables
7
+ HF_API_TOKEN = os.getenv("HF_API_KEY")
8
+ if not HF_API_TOKEN:
9
+ raise ValueError("Hugging Face API Token is not set in the environment variables.")
10
+
11
+ # Hugging Face API URLs and headers for models
12
+ MISTRAL_API_URL = "https://api-inference.huggingface.co/models/mistralai/Mixtral-8x7B-Instruct-v0.1"
13
+ MINICHAT_API_URL = "https://api-inference.huggingface.co/models/GeneZC/MiniChat-2-3B"
14
+ DIALOGPT_API_URL = "https://api-inference.huggingface.co/models/microsoft/DialoGPT-large"
15
+ PHI3_API_URL = "https://api-inference.huggingface.co/models/microsoft/Phi-3-mini-4k-instruct"
16
+ META_LLAMA_70B_API_URL = "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-70B-Instruct"
17
+ META_LLAMA_8B_API_URL = "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-8B-Instruct"
18
+ GEMMA_27B_API_URL = "https://api-inference.huggingface.co/models/google/gemma-2-27b"
19
+ GEMMA_27B_IT_API_URL = "https://api-inference.huggingface.co/models/google/gemma-2-27b-it"
20
+ HEADERS = {"Authorization": f"Bearer {HF_API_TOKEN}"}
21
+
22
+ def query_mistral(payload):
23
+ response = requests.post(MISTRAL_API_URL, headers=HEADERS, json=payload)
24
+ st.write(f"Mistral API response: {response.json()}") # Debugging log
25
+ return response.json()
26
+
27
+ def query_minichat(payload):
28
+ response = requests.post(MINICHAT_API_URL, headers=HEADERS, json=payload)
29
+ return response.json()
30
+
31
+ def query_dialogpt(payload):
32
+ response = requests.post(DIALOGPT_API_URL, headers=HEADERS, json=payload)
33
+ return response.json()
34
+
35
+ def query_phi3(payload):
36
+ response = requests.post(PHI3_API_URL, headers=HEADERS, json=payload)
37
+ return response.json()
38
+
39
+ def query_meta_llama_70b(payload):
40
+ response = requests.post(META_LLAMA_70B_API_URL, headers=HEADERS, json=payload)
41
+ return response.json()
42
+
43
+ def query_meta_llama_8b(payload):
44
+ response = requests.post(META_LLAMA_8B_API_URL, headers=HEADERS, json=payload)
45
+ return response.json()
46
+
47
+ def query_gemma_27b(payload):
48
+ response = requests.post(GEMMA_27B_API_URL, headers=HEADERS, json=payload)
49
+ return response.json()
50
+
51
+ def query_gemma_27b_it(payload):
52
+ response = requests.post(GEMMA_27B_IT_API_URL, headers=HEADERS, json=payload)
53
+ return response.json()
54
+
55
+ def count_tokens(text):
56
+ return len(text.split())
57
+
58
+ # Token limit handling
59
+ MAX_TOKENS_PER_MINUTE = 1000
60
+ token_count = 0
61
+ start_time = time.time()
62
+
63
+ def handle_token_limit(text):
64
+ global token_count, start_time
65
+ current_time = time.time()
66
+ if current_time - start_time > 60:
67
+ token_count = 0
68
+ start_time = current_time
69
+ token_count += count_tokens(text)
70
+ if token_count > MAX_TOKENS_PER_MINUTE:
71
+ raise ValueError("Token limit exceeded. Please wait before sending more messages.")
72
+
73
+ def add_message_to_conversation(user_message, bot_message, model_name):
74
+ st.session_state.conversation.append((user_message, bot_message, model_name))
75
+
76
+ # Streamlit app
77
+ st.set_page_config(page_title="Multi-LLM Chatbot Interface", layout="wide")
78
+ st.title("Multi-LLM Chatbot Interface")
79
+ st.write("Multi LLM-Chatbot Interface by Thariq Arian")
80
+
81
+ # Initialize session state for conversation and model history
82
+ if "conversation" not in st.session_state:
83
+ st.session_state.conversation = []
84
+ if "model_history" not in st.session_state:
85
+ st.session_state.model_history = {model: [] for model in ["Mistral-8x7B", "Meta-Llama-3-70B-Instruct", "Meta-Llama-3-8B-Instruct", "MiniChat-2-3B", "DialoGPT (GPT-2-1.5B)", "Phi-3-mini-4k-instruct", "Gemma-2-27B", "Gemma-2-27B-IT"]}
86
+
87
+ # Dropdown for LLM selection
88
+ llm_selection = st.selectbox("Select Language Model", ["Mistral-8x7B", "Meta-Llama-3-70B-Instruct", "Meta-Llama-3-8B-Instruct", "MiniChat-2-3B", "DialoGPT (GPT-2-1.5B)", "Phi-3-mini-4k-instruct", "Gemma-2-27B", "Gemma-2-27B-IT"])
89
+
90
+ # User input for question
91
+ question = st.text_input("Question", placeholder="Enter your question here...")
92
+
93
+ # Handle user input and LLM response
94
+ if st.button("Send") and question:
95
+ try:
96
+ handle_token_limit(question)
97
+ with st.spinner("Waiting for the model to respond..."):
98
+ chat_history = " ".join(st.session_state.model_history[llm_selection]) + f"User: {question}\n"
99
+ if llm_selection == "Mistral-8x7B":
100
+ mistral_response = query_mistral({"inputs": chat_history})
101
+ if isinstance(mistral_response, list) and len(mistral_response) > 0:
102
+ mistral_answer = mistral_response[0].get("generated_text", "No response")
103
+ else:
104
+ mistral_answer = "No response"
105
+ add_message_to_conversation(question, mistral_answer, llm_selection)
106
+ st.session_state.model_history[llm_selection].append(f"User: {question}\nMistral-8x7B: {mistral_answer}\n")
107
+ elif llm_selection == "Meta-Llama-3-70B-Instruct":
108
+ meta_llama_70b_response = query_meta_llama_70b({"inputs": chat_history})
109
+ if isinstance(meta_llama_70b_response, dict) and "generated_text" in meta_llama_70b_response:
110
+ meta_llama_70b_answer = meta_llama_70b_response["generated_text"]
111
+ elif isinstance(meta_llama_70b_response, list) and len(meta_llama_70b_response) > 0:
112
+ meta_llama_70b_answer = meta_llama_70b_response[0].get("generated_text", "No response")
113
+ else:
114
+ meta_llama_70b_answer = "No response"
115
+ add_message_to_conversation(question, meta_llama_70b_answer, llm_selection)
116
+ st.session_state.model_history[llm_selection].append(f"User: {question}\nMeta-Llama-3-70B-Instruct: {meta_llama_70b_answer}\n")
117
+ elif llm_selection == "Meta-Llama-3-8B-Instruct":
118
+ meta_llama_8b_response = query_meta_llama_8b({"inputs": chat_history})
119
+ if isinstance(meta_llama_8b_response, dict) and "generated_text" in meta_llama_8b_response:
120
+ meta_llama_8b_answer = meta_llama_8b_response["generated_text"]
121
+ elif isinstance(meta_llama_8b_response, list) and len(meta_llama_8b_response) > 0:
122
+ meta_llama_8b_answer = meta_llama_8b_response[0].get("generated_text", "No response")
123
+ else:
124
+ meta_llama_8b_answer = "No response"
125
+ add_message_to_conversation(question, meta_llama_8b_answer, llm_selection)
126
+ st.session_state.model_history[llm_selection].append(f"User: {question}\nMeta-Llama-3-8B-Instruct: {meta_llama_8b_answer}\n")
127
+ elif llm_selection == "MiniChat-2-3B":
128
+ minichat_response = query_minichat({"inputs": chat_history})
129
+ if "error" in minichat_response and "is currently loading" in minichat_response["error"]:
130
+ minichat_answer = f"Model is loading, please wait {minichat_response['estimated_time']} seconds."
131
+ elif isinstance(minichat_response, list) and len(minichat_response) > 0:
132
+ minichat_answer = minichat_response[0].get("generated_text", "No response")
133
+ else:
134
+ minichat_answer = "No response"
135
+ add_message_to_conversation(question, minichat_answer, llm_selection)
136
+ st.session_state.model_history[llm_selection].append(f"User: {question}\nMiniChat-2-3B: {minichat_answer}\n")
137
+ elif llm_selection == "DialoGPT (GPT-2-1.5B)":
138
+ dialogpt_response = query_dialogpt({"inputs": chat_history})
139
+ if isinstance(dialogpt_response, dict) and "generated_text" in dialogpt_response:
140
+ dialogpt_answer = dialogpt_response["generated_text"]
141
+ elif isinstance(dialogpt_response, list) and len(dialogpt_response) > 0:
142
+ dialogpt_answer = dialogpt_response[0].get("generated_text", "No response")
143
+ else:
144
+ dialogpt_answer = "No response"
145
+ add_message_to_conversation(question, dialogpt_answer, llm_selection)
146
+ st.session_state.model_history[llm_selection].append(f"User: {question}\nDialoGPT (GPT-2-1.5B): {dialogpt_answer}\n")
147
+ elif llm_selection == "Phi-3-mini-4k-instruct":
148
+ phi3_response = query_phi3({"inputs": chat_history})
149
+ if isinstance(phi3_response, list) and len(phi3_response) > 0:
150
+ phi3_answer = phi3_response[0].get("generated_text", "No response")
151
+ else:
152
+ phi3_answer = "No response"
153
+ add_message_to_conversation(question, phi3_answer, llm_selection)
154
+ st.session_state.model_history[llm_selection].append(f"User: {question}\nPhi-3-mini-4k-instruct: {phi3_answer}\n")
155
+ elif llm_selection == "Gemma-2-27B":
156
+ gemma_response = query_gemma_27b({"inputs": chat_history})
157
+ if isinstance(gemma_response, dict) and "generated_text" in gemma_response:
158
+ gemma_answer = gemma_response["generated_text"]
159
+ elif isinstance(gemma_response, list) and len(gemma_response) > 0:
160
+ gemma_answer = gemma_response[0].get("generated_text", "No response")
161
+ else:
162
+ gemma_answer = "No response"
163
+ add_message_to_conversation(question, gemma_answer, llm_selection)
164
+ st.session_state.model_history[llm_selection].append(f"User: {question}\nGemma-2-27B: {gemma_answer}\n")
165
+ elif llm_selection == "Gemma-2-27B-IT":
166
+ gemma_27b_it_response = query_gemma_27b_it({"inputs": chat_history})
167
+ if isinstance(gemma_27b_it_response, dict) and "generated_text" in gemma_27b_it_response:
168
+ gemma_27b_it_answer = gemma_27b_it_response["generated_text"]
169
+ elif isinstance(gemma_27b_it_response, list) and len(gemma_27b_it_response) > 0:
170
+ gemma_27b_it_answer = gemma_27b_it_response[0].get("generated_text", "No response")
171
+ else:
172
+ gemma_27b_it_answer = "No response"
173
+ add_message_to_conversation(question, gemma_27b_it_answer, llm_selection)
174
+ st.session_state.model_history[llm_selection].append(f"User: {question}\nGemma-2-27B-IT: {gemma_27b_it_answer}\n")
175
+ except ValueError as e:
176
+ st.error(str(e))
177
+
178
+ # Custom CSS for chat bubbles
179
+ st.markdown(
180
+ """
181
+ <style>
182
+ .chat-bubble {
183
+ padding: 10px 14px;
184
+ border-radius: 14px;
185
+ margin-bottom: 10px;
186
+ display: inline-block;
187
+ max-width: 80%;
188
+ color: black;
189
+ }
190
+ .chat-bubble.user {
191
+ background-color: #dcf8c6;
192
+ align-self: flex-end;
193
+ }
194
+ .chat-bubble.bot {
195
+ background-color: #fff;
196
+ align-self: flex-start;
197
+ }
198
+ .chat-container {
199
+ display: flex;
200
+ flex-direction: column;
201
+ gap: 10px;
202
+ margin-top: 20px;
203
+ }
204
+ </style>
205
+ """,
206
+ unsafe_allow_html=True
207
+ )
208
+
209
+ # Display the conversation
210
+ st.write('<div class="chat-container">', unsafe_allow_html=True)
211
+ for user_message, bot_message, model_name in st.session_state.conversation:
212
+ st.write(f'<div class="chat-bubble user">You: {user_message}</div>', unsafe_allow_html=True)
213
+ st.write(f'<div class="chat-bubble bot">{model_name}: {bot_message}</div>', unsafe_allow_html=True)
214
+ st.write('</div>', unsafe_allow_html=True)