update full height
Browse files
app.py
CHANGED
@@ -27,12 +27,14 @@ with gr.Blocks(fill_height=True) as demo:
|
|
27 |
gemini_interface = gr.load(
|
28 |
name=gemini_model.value,
|
29 |
src=gemini_gradio.registry,
|
|
|
30 |
)
|
31 |
|
32 |
def update_gemini_model(new_model):
|
33 |
return gr.load(
|
34 |
name=new_model,
|
35 |
src=gemini_gradio.registry,
|
|
|
36 |
)
|
37 |
|
38 |
gemini_model.change(
|
@@ -70,14 +72,16 @@ with gr.Blocks(fill_height=True) as demo:
|
|
70 |
chatgpt_interface = gr.load(
|
71 |
name=model_choice.value,
|
72 |
src=openai_gradio.registry,
|
73 |
-
accept_token=True
|
|
|
74 |
)
|
75 |
|
76 |
def update_model(new_model):
|
77 |
return gr.load(
|
78 |
name=new_model,
|
79 |
src=openai_gradio.registry,
|
80 |
-
accept_token=True
|
|
|
81 |
)
|
82 |
|
83 |
model_choice.change(
|
@@ -103,14 +107,16 @@ with gr.Blocks(fill_height=True) as demo:
|
|
103 |
claude_interface = gr.load(
|
104 |
name=claude_model.value,
|
105 |
src=anthropic_gradio.registry,
|
106 |
-
accept_token=True
|
|
|
107 |
)
|
108 |
|
109 |
def update_claude_model(new_model):
|
110 |
return gr.load(
|
111 |
name=new_model,
|
112 |
src=anthropic_gradio.registry,
|
113 |
-
accept_token=True
|
|
|
114 |
)
|
115 |
|
116 |
claude_model.change(
|
@@ -139,7 +145,8 @@ with gr.Blocks(fill_height=True) as demo:
|
|
139 |
name=llama_model.value,
|
140 |
src=sambanova_gradio.registry,
|
141 |
accept_token=True,
|
142 |
-
multimodal=True
|
|
|
143 |
)
|
144 |
|
145 |
def update_llama_model(new_model):
|
@@ -147,7 +154,8 @@ with gr.Blocks(fill_height=True) as demo:
|
|
147 |
name=new_model,
|
148 |
src=sambanova_gradio.registry,
|
149 |
accept_token=True,
|
150 |
-
multimodal=True
|
|
|
151 |
)
|
152 |
|
153 |
llama_model.change(
|
@@ -159,15 +167,17 @@ with gr.Blocks(fill_height=True) as demo:
|
|
159 |
gr.Markdown("**Note:** You need to use a SambaNova API key from [SambaNova Cloud](https://cloud.sambanova.ai/).")
|
160 |
with gr.Tab("Grok"):
|
161 |
gr.load(
|
162 |
-
|
163 |
-
|
164 |
-
|
|
|
165 |
)
|
166 |
with gr.Tab("Qwen2.5 72B"):
|
167 |
gr.load(
|
168 |
name='Qwen/Qwen2.5-72B-Instruct',
|
169 |
src=hyperbolic_gradio.registry,
|
170 |
-
accept_token=True
|
|
|
171 |
)
|
172 |
gr.Markdown("**Note:** You need to use a Hyperbolic API key from [Hyperbolic](https://app.hyperbolic.xyz/).")
|
173 |
with gr.Tab("Perplexity"):
|
@@ -193,14 +203,16 @@ with gr.Blocks(fill_height=True) as demo:
|
|
193 |
perplexity_interface = gr.load(
|
194 |
name=perplexity_model.value,
|
195 |
src=perplexity_gradio.registry,
|
196 |
-
accept_token=True
|
|
|
197 |
)
|
198 |
|
199 |
def update_perplexity_model(new_model):
|
200 |
return gr.load(
|
201 |
name=new_model,
|
202 |
src=perplexity_gradio.registry,
|
203 |
-
accept_token=True
|
|
|
204 |
)
|
205 |
|
206 |
perplexity_model.change(
|
|
|
27 |
gemini_interface = gr.load(
|
28 |
name=gemini_model.value,
|
29 |
src=gemini_gradio.registry,
|
30 |
+
full_height=True
|
31 |
)
|
32 |
|
33 |
def update_gemini_model(new_model):
|
34 |
return gr.load(
|
35 |
name=new_model,
|
36 |
src=gemini_gradio.registry,
|
37 |
+
full_height=True
|
38 |
)
|
39 |
|
40 |
gemini_model.change(
|
|
|
72 |
chatgpt_interface = gr.load(
|
73 |
name=model_choice.value,
|
74 |
src=openai_gradio.registry,
|
75 |
+
accept_token=True,
|
76 |
+
full_height=True
|
77 |
)
|
78 |
|
79 |
def update_model(new_model):
|
80 |
return gr.load(
|
81 |
name=new_model,
|
82 |
src=openai_gradio.registry,
|
83 |
+
accept_token=True,
|
84 |
+
full_height=True
|
85 |
)
|
86 |
|
87 |
model_choice.change(
|
|
|
107 |
claude_interface = gr.load(
|
108 |
name=claude_model.value,
|
109 |
src=anthropic_gradio.registry,
|
110 |
+
accept_token=True,
|
111 |
+
full_height=True
|
112 |
)
|
113 |
|
114 |
def update_claude_model(new_model):
|
115 |
return gr.load(
|
116 |
name=new_model,
|
117 |
src=anthropic_gradio.registry,
|
118 |
+
accept_token=True,
|
119 |
+
full_height=True
|
120 |
)
|
121 |
|
122 |
claude_model.change(
|
|
|
145 |
name=llama_model.value,
|
146 |
src=sambanova_gradio.registry,
|
147 |
accept_token=True,
|
148 |
+
multimodal=True,
|
149 |
+
full_height=True
|
150 |
)
|
151 |
|
152 |
def update_llama_model(new_model):
|
|
|
154 |
name=new_model,
|
155 |
src=sambanova_gradio.registry,
|
156 |
accept_token=True,
|
157 |
+
multimodal=True,
|
158 |
+
full_height=True
|
159 |
)
|
160 |
|
161 |
llama_model.change(
|
|
|
167 |
gr.Markdown("**Note:** You need to use a SambaNova API key from [SambaNova Cloud](https://cloud.sambanova.ai/).")
|
168 |
with gr.Tab("Grok"):
|
169 |
gr.load(
|
170 |
+
name='grok-beta',
|
171 |
+
src=xai_gradio.registry,
|
172 |
+
accept_token=True,
|
173 |
+
full_height=True
|
174 |
)
|
175 |
with gr.Tab("Qwen2.5 72B"):
|
176 |
gr.load(
|
177 |
name='Qwen/Qwen2.5-72B-Instruct',
|
178 |
src=hyperbolic_gradio.registry,
|
179 |
+
accept_token=True,
|
180 |
+
full_height=True
|
181 |
)
|
182 |
gr.Markdown("**Note:** You need to use a Hyperbolic API key from [Hyperbolic](https://app.hyperbolic.xyz/).")
|
183 |
with gr.Tab("Perplexity"):
|
|
|
203 |
perplexity_interface = gr.load(
|
204 |
name=perplexity_model.value,
|
205 |
src=perplexity_gradio.registry,
|
206 |
+
accept_token=True,
|
207 |
+
full_height=True
|
208 |
)
|
209 |
|
210 |
def update_perplexity_model(new_model):
|
211 |
return gr.load(
|
212 |
name=new_model,
|
213 |
src=perplexity_gradio.registry,
|
214 |
+
accept_token=True,
|
215 |
+
full_height=True
|
216 |
)
|
217 |
|
218 |
perplexity_model.change(
|