hysts HF staff commited on
Commit
2201358
1 Parent(s): 6b720a2

Disable api

Browse files
Files changed (3) hide show
  1. app_allenai.py +4 -1
  2. app_experimental.py +82 -96
  3. app_marco_o1.py +4 -1
app_allenai.py CHANGED
@@ -5,5 +5,8 @@ import transformers_gradio
5
  demo = gr.load(name="allenai/Llama-3.1-Tulu-3-8B", src=transformers_gradio.registry)
6
  demo.fn = spaces.GPU()(demo.fn)
7
 
 
 
 
8
  if __name__ == "__main__":
9
- demo.launch()
 
5
  demo = gr.load(name="allenai/Llama-3.1-Tulu-3-8B", src=transformers_gradio.registry)
6
  demo.fn = spaces.GPU()(demo.fn)
7
 
8
+ for fn in demo.fns.values():
9
+ fn.api_name = False
10
+
11
  if __name__ == "__main__":
12
+ demo.launch()
app_experimental.py CHANGED
@@ -1,12 +1,14 @@
1
  import os
2
- import gradio as gr
3
- from typing import List, Dict, Callable
4
  import random
 
 
5
  import google.generativeai as genai
6
- from anthropic import Anthropic
7
  import openai
 
8
  from openai import OpenAI # Add explicit OpenAI import
9
 
 
10
  def get_all_models():
11
  """Get all available models from the registries."""
12
  return [
@@ -28,8 +30,10 @@ def get_all_models():
28
  "Hyperbolic: meta-llama/Meta-Llama-3.1-405B-Instruct",
29
  ]
30
 
 
31
  def generate_discussion_prompt(original_question: str, previous_responses: List[str]) -> str:
32
- """Generate a prompt for models to discuss and build upon previous responses."""
 
33
  prompt = f"""You are participating in a multi-AI discussion about this question: "{original_question}"
34
 
35
  Previous responses from other AI models:
@@ -44,6 +48,7 @@ Please provide your perspective while:
44
  Keep your response focused and concise (max 3-4 paragraphs)."""
45
  return prompt
46
 
 
47
  def generate_consensus_prompt(original_question: str, discussion_history: List[str]) -> str:
48
  """Generate a prompt for final consensus building."""
49
  return f"""Review this multi-AI discussion about: "{original_question}"
@@ -59,67 +64,64 @@ As a final synthesizer, please:
59
 
60
  Keep the final consensus concise but complete."""
61
 
62
- def chat_with_openai(model: str, messages: List[Dict], api_key: str) -> str:
 
63
  import openai
 
64
  client = openai.OpenAI(api_key=api_key)
65
- response = client.chat.completions.create(
66
- model=model,
67
- messages=messages
68
- )
69
  return response.choices[0].message.content
70
 
71
- def chat_with_anthropic(messages: List[Dict], api_key: str) -> str:
 
72
  """Chat with Anthropic's Claude model."""
73
  client = Anthropic(api_key=api_key)
74
- response = client.messages.create(
75
- model="claude-3-sonnet-20240229",
76
- messages=messages,
77
- max_tokens=1024
78
- )
79
  return response.content[0].text
80
 
81
- def chat_with_gemini(messages: List[Dict], api_key: str) -> str:
 
82
  """Chat with Gemini Pro model."""
83
  genai.configure(api_key=api_key)
84
- model = genai.GenerativeModel('gemini-pro')
85
-
86
  # Convert messages to Gemini format
87
  gemini_messages = []
88
  for msg in messages:
89
  role = "user" if msg["role"] == "user" else "model"
90
  gemini_messages.append({"role": role, "parts": [msg["content"]]})
91
-
92
  response = model.generate_content([m["parts"][0] for m in gemini_messages])
93
  return response.text
94
 
95
- def chat_with_sambanova(messages: List[Dict], api_key: str, model_name: str = "Llama-3.2-90B-Vision-Instruct") -> str:
 
 
 
96
  """Chat with SambaNova's models using their OpenAI-compatible API."""
97
  client = openai.OpenAI(
98
  api_key=api_key,
99
  base_url="https://api.sambanova.ai/v1",
100
  )
101
-
102
  response = client.chat.completions.create(
103
- model=model_name, # Use the specific model name passed in
104
- messages=messages,
105
- temperature=0.1,
106
- top_p=0.1
107
  )
108
  return response.choices[0].message.content
109
 
110
- def chat_with_hyperbolic(messages: List[Dict], api_key: str, model_name: str = "Qwen/Qwen2.5-Coder-32B-Instruct") -> str:
 
 
 
111
  """Chat with Hyperbolic's models using their OpenAI-compatible API."""
112
- client = OpenAI(
113
- api_key=api_key,
114
- base_url="https://api.hyperbolic.xyz/v1"
115
- )
116
-
117
  # Add system message to the start of the messages list
118
  full_messages = [
119
  {"role": "system", "content": "You are a helpful assistant. Be descriptive and clear."},
120
- *messages
121
  ]
122
-
123
  response = client.chat.completions.create(
124
  model=model_name, # Use the specific model name passed in
125
  messages=full_messages,
@@ -128,152 +130,138 @@ def chat_with_hyperbolic(messages: List[Dict], api_key: str, model_name: str = "
128
  )
129
  return response.choices[0].message.content
130
 
 
131
  def multi_model_consensus(
132
- question: str,
133
- selected_models: List[str],
134
- rounds: int = 3,
135
- progress: gr.Progress = gr.Progress()
136
- ) -> tuple[str, List[Dict]]:
137
  if not selected_models:
138
- return "Please select at least one model to chat with.", []
139
-
140
  chat_history = []
141
  discussion_history = []
142
-
143
  # Initial responses
144
  progress(0, desc="Getting initial responses...")
145
  initial_responses = []
146
  for i, model in enumerate(selected_models):
147
  provider, model_name = model.split(": ", 1)
148
-
149
  try:
150
  if provider == "Anthropic":
151
  api_key = os.getenv("ANTHROPIC_API_KEY")
152
- response = chat_with_anthropic(
153
- messages=[{"role": "user", "content": question}],
154
- api_key=api_key
155
- )
156
  elif provider == "SambaNova":
157
  api_key = os.getenv("SAMBANOVA_API_KEY")
158
  response = chat_with_sambanova(
159
  messages=[
160
  {"role": "system", "content": "You are a helpful assistant"},
161
- {"role": "user", "content": question}
162
  ],
163
- api_key=api_key
164
  )
165
  elif provider == "Hyperbolic": # Add Hyperbolic case
166
  api_key = os.getenv("HYPERBOLIC_API_KEY")
167
- response = chat_with_hyperbolic(
168
- messages=[{"role": "user", "content": question}],
169
- api_key=api_key
170
- )
171
  else: # Gemini
172
  api_key = os.getenv("GEMINI_API_KEY")
173
- response = chat_with_gemini(
174
- messages=[{"role": "user", "content": question}],
175
- api_key=api_key
176
- )
177
-
178
  initial_responses.append(f"{model}: {response}")
179
  discussion_history.append(f"Initial response from {model}:\n{response}")
180
  chat_history.append((f"Initial response from {model}", response))
181
  except Exception as e:
182
  chat_history.append((f"Error from {model}", str(e)))
183
-
184
  # Discussion rounds
185
  for round_num in range(rounds):
186
  progress((round_num + 1) / (rounds + 2), desc=f"Discussion round {round_num + 1}...")
187
  round_responses = []
188
-
189
  random.shuffle(selected_models) # Randomize order each round
190
  for model in selected_models:
191
  provider, model_name = model.split(": ", 1)
192
-
193
  try:
194
  discussion_prompt = generate_discussion_prompt(question, discussion_history)
195
  if provider == "Anthropic":
196
  api_key = os.getenv("ANTHROPIC_API_KEY")
197
  response = chat_with_anthropic(
198
- messages=[{"role": "user", "content": discussion_prompt}],
199
- api_key=api_key
200
  )
201
  elif provider == "SambaNova":
202
  api_key = os.getenv("SAMBANOVA_API_KEY")
203
  response = chat_with_sambanova(
204
  messages=[
205
  {"role": "system", "content": "You are a helpful assistant"},
206
- {"role": "user", "content": discussion_prompt}
207
  ],
208
- api_key=api_key
209
  )
210
  elif provider == "Hyperbolic": # Add Hyperbolic case
211
  api_key = os.getenv("HYPERBOLIC_API_KEY")
212
  response = chat_with_hyperbolic(
213
- messages=[{"role": "user", "content": discussion_prompt}],
214
- api_key=api_key
215
  )
216
  else: # Gemini
217
  api_key = os.getenv("GEMINI_API_KEY")
218
  response = chat_with_gemini(
219
- messages=[{"role": "user", "content": discussion_prompt}],
220
- api_key=api_key
221
  )
222
-
223
  round_responses.append(f"{model}: {response}")
224
  discussion_history.append(f"Round {round_num + 1} - {model}:\n{response}")
225
  chat_history.append((f"Round {round_num + 1} - {model}", response))
226
  except Exception as e:
227
  chat_history.append((f"Error from {model} in round {round_num + 1}", str(e)))
228
-
229
  # Final consensus
230
  progress(0.9, desc="Building final consensus...")
231
  model = selected_models[0]
232
  provider, model_name = model.split(": ", 1)
233
-
234
  try:
235
  consensus_prompt = generate_consensus_prompt(question, discussion_history)
236
  if provider == "Anthropic":
237
  api_key = os.getenv("ANTHROPIC_API_KEY")
238
  final_consensus = chat_with_anthropic(
239
- messages=[{"role": "user", "content": consensus_prompt}],
240
- api_key=api_key
241
  )
242
  elif provider == "SambaNova":
243
  api_key = os.getenv("SAMBANOVA_API_KEY")
244
  final_consensus = chat_with_sambanova(
245
  messages=[
246
  {"role": "system", "content": "You are a helpful assistant"},
247
- {"role": "user", "content": consensus_prompt}
248
  ],
249
- api_key=api_key
250
  )
251
  elif provider == "Hyperbolic": # Add Hyperbolic case
252
  api_key = os.getenv("HYPERBOLIC_API_KEY")
253
  final_consensus = chat_with_hyperbolic(
254
- messages=[{"role": "user", "content": consensus_prompt}],
255
- api_key=api_key
256
  )
257
  else: # Gemini
258
  api_key = os.getenv("GEMINI_API_KEY")
259
  final_consensus = chat_with_gemini(
260
- messages=[{"role": "user", "content": consensus_prompt}],
261
- api_key=api_key
262
  )
263
  except Exception as e:
264
  final_consensus = f"Error getting consensus from {model}: {str(e)}"
265
-
266
  chat_history.append(("Final Consensus", final_consensus))
267
-
268
  progress(1.0, desc="Done!")
269
  return chat_history
270
 
 
271
  with gr.Blocks() as demo:
272
  gr.Markdown("# Experimental Multi-Model Consensus Chat")
273
- gr.Markdown("""Select multiple models to collaborate on answering your question.
 
274
  The models will discuss with each other and attempt to reach a consensus.
275
- Maximum 3 models can be selected at once.""")
276
-
 
277
  with gr.Row():
278
  with gr.Column():
279
  model_selector = gr.Dropdown(
@@ -282,7 +270,7 @@ with gr.Blocks() as demo:
282
  label="Select Models (max 3)",
283
  info="Choose up to 3 models to participate in the discussion",
284
  value=["SambaNova: Llama-3.2-90B-Vision-Instruct", "Hyperbolic: Qwen/Qwen2.5-Coder-32B-Instruct"],
285
- max_choices=3
286
  )
287
  rounds_slider = gr.Slider(
288
  minimum=1,
@@ -290,22 +278,20 @@ with gr.Blocks() as demo:
290
  value=1,
291
  step=1,
292
  label="Discussion Rounds",
293
- info="Number of rounds of discussion between models"
294
  )
295
-
296
  chatbot = gr.Chatbot(height=600, label="Multi-Model Discussion")
297
  msg = gr.Textbox(label="Your Question", placeholder="Ask a question for the models to discuss...")
298
-
299
  def respond(message, selected_models, rounds):
300
  chat_history = multi_model_consensus(message, selected_models, rounds)
301
  return chat_history
302
-
303
- msg.submit(
304
- respond,
305
- [msg, model_selector, rounds_slider],
306
- [chatbot],
307
- api_name="consensus_chat"
308
- )
309
 
310
  if __name__ == "__main__":
311
- demo.launch()
 
1
  import os
 
 
2
  import random
3
+ from typing import Dict, List
4
+
5
  import google.generativeai as genai
6
+ import gradio as gr
7
  import openai
8
+ from anthropic import Anthropic
9
  from openai import OpenAI # Add explicit OpenAI import
10
 
11
+
12
  def get_all_models():
13
  """Get all available models from the registries."""
14
  return [
 
30
  "Hyperbolic: meta-llama/Meta-Llama-3.1-405B-Instruct",
31
  ]
32
 
33
+
34
  def generate_discussion_prompt(original_question: str, previous_responses: List[str]) -> str:
35
+ """Generate a prompt for models to discuss and build upon previous
36
+ responses."""
37
  prompt = f"""You are participating in a multi-AI discussion about this question: "{original_question}"
38
 
39
  Previous responses from other AI models:
 
48
  Keep your response focused and concise (max 3-4 paragraphs)."""
49
  return prompt
50
 
51
+
52
  def generate_consensus_prompt(original_question: str, discussion_history: List[str]) -> str:
53
  """Generate a prompt for final consensus building."""
54
  return f"""Review this multi-AI discussion about: "{original_question}"
 
64
 
65
  Keep the final consensus concise but complete."""
66
 
67
+
68
+ def chat_with_openai(model: str, messages: List[Dict], api_key: str | None) -> str:
69
  import openai
70
+
71
  client = openai.OpenAI(api_key=api_key)
72
+ response = client.chat.completions.create(model=model, messages=messages)
 
 
 
73
  return response.choices[0].message.content
74
 
75
+
76
+ def chat_with_anthropic(messages: List[Dict], api_key: str | None) -> str:
77
  """Chat with Anthropic's Claude model."""
78
  client = Anthropic(api_key=api_key)
79
+ response = client.messages.create(model="claude-3-sonnet-20240229", messages=messages, max_tokens=1024)
 
 
 
 
80
  return response.content[0].text
81
 
82
+
83
+ def chat_with_gemini(messages: List[Dict], api_key: str | None) -> str:
84
  """Chat with Gemini Pro model."""
85
  genai.configure(api_key=api_key)
86
+ model = genai.GenerativeModel("gemini-pro")
87
+
88
  # Convert messages to Gemini format
89
  gemini_messages = []
90
  for msg in messages:
91
  role = "user" if msg["role"] == "user" else "model"
92
  gemini_messages.append({"role": role, "parts": [msg["content"]]})
93
+
94
  response = model.generate_content([m["parts"][0] for m in gemini_messages])
95
  return response.text
96
 
97
+
98
+ def chat_with_sambanova(
99
+ messages: List[Dict], api_key: str | None, model_name: str = "Llama-3.2-90B-Vision-Instruct"
100
+ ) -> str:
101
  """Chat with SambaNova's models using their OpenAI-compatible API."""
102
  client = openai.OpenAI(
103
  api_key=api_key,
104
  base_url="https://api.sambanova.ai/v1",
105
  )
106
+
107
  response = client.chat.completions.create(
108
+ model=model_name, messages=messages, temperature=0.1, top_p=0.1 # Use the specific model name passed in
 
 
 
109
  )
110
  return response.choices[0].message.content
111
 
112
+
113
+ def chat_with_hyperbolic(
114
+ messages: List[Dict], api_key: str | None, model_name: str = "Qwen/Qwen2.5-Coder-32B-Instruct"
115
+ ) -> str:
116
  """Chat with Hyperbolic's models using their OpenAI-compatible API."""
117
+ client = OpenAI(api_key=api_key, base_url="https://api.hyperbolic.xyz/v1")
118
+
 
 
 
119
  # Add system message to the start of the messages list
120
  full_messages = [
121
  {"role": "system", "content": "You are a helpful assistant. Be descriptive and clear."},
122
+ *messages,
123
  ]
124
+
125
  response = client.chat.completions.create(
126
  model=model_name, # Use the specific model name passed in
127
  messages=full_messages,
 
130
  )
131
  return response.choices[0].message.content
132
 
133
+
134
  def multi_model_consensus(
135
+ question: str, selected_models: List[str], rounds: int = 3, progress: gr.Progress = gr.Progress()
136
+ ) -> list[tuple[str, str]]:
 
 
 
137
  if not selected_models:
138
+ raise gr.Error("Please select at least one model to chat with.")
139
+
140
  chat_history = []
141
  discussion_history = []
142
+
143
  # Initial responses
144
  progress(0, desc="Getting initial responses...")
145
  initial_responses = []
146
  for i, model in enumerate(selected_models):
147
  provider, model_name = model.split(": ", 1)
148
+
149
  try:
150
  if provider == "Anthropic":
151
  api_key = os.getenv("ANTHROPIC_API_KEY")
152
+ response = chat_with_anthropic(messages=[{"role": "user", "content": question}], api_key=api_key)
 
 
 
153
  elif provider == "SambaNova":
154
  api_key = os.getenv("SAMBANOVA_API_KEY")
155
  response = chat_with_sambanova(
156
  messages=[
157
  {"role": "system", "content": "You are a helpful assistant"},
158
+ {"role": "user", "content": question},
159
  ],
160
+ api_key=api_key,
161
  )
162
  elif provider == "Hyperbolic": # Add Hyperbolic case
163
  api_key = os.getenv("HYPERBOLIC_API_KEY")
164
+ response = chat_with_hyperbolic(messages=[{"role": "user", "content": question}], api_key=api_key)
 
 
 
165
  else: # Gemini
166
  api_key = os.getenv("GEMINI_API_KEY")
167
+ response = chat_with_gemini(messages=[{"role": "user", "content": question}], api_key=api_key)
168
+
 
 
 
169
  initial_responses.append(f"{model}: {response}")
170
  discussion_history.append(f"Initial response from {model}:\n{response}")
171
  chat_history.append((f"Initial response from {model}", response))
172
  except Exception as e:
173
  chat_history.append((f"Error from {model}", str(e)))
174
+
175
  # Discussion rounds
176
  for round_num in range(rounds):
177
  progress((round_num + 1) / (rounds + 2), desc=f"Discussion round {round_num + 1}...")
178
  round_responses = []
179
+
180
  random.shuffle(selected_models) # Randomize order each round
181
  for model in selected_models:
182
  provider, model_name = model.split(": ", 1)
183
+
184
  try:
185
  discussion_prompt = generate_discussion_prompt(question, discussion_history)
186
  if provider == "Anthropic":
187
  api_key = os.getenv("ANTHROPIC_API_KEY")
188
  response = chat_with_anthropic(
189
+ messages=[{"role": "user", "content": discussion_prompt}], api_key=api_key
 
190
  )
191
  elif provider == "SambaNova":
192
  api_key = os.getenv("SAMBANOVA_API_KEY")
193
  response = chat_with_sambanova(
194
  messages=[
195
  {"role": "system", "content": "You are a helpful assistant"},
196
+ {"role": "user", "content": discussion_prompt},
197
  ],
198
+ api_key=api_key,
199
  )
200
  elif provider == "Hyperbolic": # Add Hyperbolic case
201
  api_key = os.getenv("HYPERBOLIC_API_KEY")
202
  response = chat_with_hyperbolic(
203
+ messages=[{"role": "user", "content": discussion_prompt}], api_key=api_key
 
204
  )
205
  else: # Gemini
206
  api_key = os.getenv("GEMINI_API_KEY")
207
  response = chat_with_gemini(
208
+ messages=[{"role": "user", "content": discussion_prompt}], api_key=api_key
 
209
  )
210
+
211
  round_responses.append(f"{model}: {response}")
212
  discussion_history.append(f"Round {round_num + 1} - {model}:\n{response}")
213
  chat_history.append((f"Round {round_num + 1} - {model}", response))
214
  except Exception as e:
215
  chat_history.append((f"Error from {model} in round {round_num + 1}", str(e)))
216
+
217
  # Final consensus
218
  progress(0.9, desc="Building final consensus...")
219
  model = selected_models[0]
220
  provider, model_name = model.split(": ", 1)
221
+
222
  try:
223
  consensus_prompt = generate_consensus_prompt(question, discussion_history)
224
  if provider == "Anthropic":
225
  api_key = os.getenv("ANTHROPIC_API_KEY")
226
  final_consensus = chat_with_anthropic(
227
+ messages=[{"role": "user", "content": consensus_prompt}], api_key=api_key
 
228
  )
229
  elif provider == "SambaNova":
230
  api_key = os.getenv("SAMBANOVA_API_KEY")
231
  final_consensus = chat_with_sambanova(
232
  messages=[
233
  {"role": "system", "content": "You are a helpful assistant"},
234
+ {"role": "user", "content": consensus_prompt},
235
  ],
236
+ api_key=api_key,
237
  )
238
  elif provider == "Hyperbolic": # Add Hyperbolic case
239
  api_key = os.getenv("HYPERBOLIC_API_KEY")
240
  final_consensus = chat_with_hyperbolic(
241
+ messages=[{"role": "user", "content": consensus_prompt}], api_key=api_key
 
242
  )
243
  else: # Gemini
244
  api_key = os.getenv("GEMINI_API_KEY")
245
  final_consensus = chat_with_gemini(
246
+ messages=[{"role": "user", "content": consensus_prompt}], api_key=api_key
 
247
  )
248
  except Exception as e:
249
  final_consensus = f"Error getting consensus from {model}: {str(e)}"
250
+
251
  chat_history.append(("Final Consensus", final_consensus))
252
+
253
  progress(1.0, desc="Done!")
254
  return chat_history
255
 
256
+
257
  with gr.Blocks() as demo:
258
  gr.Markdown("# Experimental Multi-Model Consensus Chat")
259
+ gr.Markdown(
260
+ """Select multiple models to collaborate on answering your question.
261
  The models will discuss with each other and attempt to reach a consensus.
262
+ Maximum 3 models can be selected at once."""
263
+ )
264
+
265
  with gr.Row():
266
  with gr.Column():
267
  model_selector = gr.Dropdown(
 
270
  label="Select Models (max 3)",
271
  info="Choose up to 3 models to participate in the discussion",
272
  value=["SambaNova: Llama-3.2-90B-Vision-Instruct", "Hyperbolic: Qwen/Qwen2.5-Coder-32B-Instruct"],
273
+ max_choices=3,
274
  )
275
  rounds_slider = gr.Slider(
276
  minimum=1,
 
278
  value=1,
279
  step=1,
280
  label="Discussion Rounds",
281
+ info="Number of rounds of discussion between models",
282
  )
283
+
284
  chatbot = gr.Chatbot(height=600, label="Multi-Model Discussion")
285
  msg = gr.Textbox(label="Your Question", placeholder="Ask a question for the models to discuss...")
286
+
287
  def respond(message, selected_models, rounds):
288
  chat_history = multi_model_consensus(message, selected_models, rounds)
289
  return chat_history
290
+
291
+ msg.submit(respond, [msg, model_selector, rounds_slider], [chatbot], api_name="consensus_chat")
292
+
293
+ for fn in demo.fns.values():
294
+ fn.api_name = False
 
 
295
 
296
  if __name__ == "__main__":
297
+ demo.launch()
app_marco_o1.py CHANGED
@@ -5,5 +5,8 @@ import transformers_gradio
5
  demo = gr.load(name="AIDC-AI/Marco-o1", src=transformers_gradio.registry)
6
  demo.fn = spaces.GPU()(demo.fn)
7
 
 
 
 
8
  if __name__ == "__main__":
9
- demo.launch()
 
5
  demo = gr.load(name="AIDC-AI/Marco-o1", src=transformers_gradio.registry)
6
  demo.fn = spaces.GPU()(demo.fn)
7
 
8
+ for fn in demo.fns.values():
9
+ fn.api_name = False
10
+
11
  if __name__ == "__main__":
12
+ demo.launch()