Spaces:
Runtime error
Runtime error
tricktreat
commited on
Commit
β’
ef7177c
1
Parent(s):
bff5b69
duplicate space
Browse files- app.py +1 -0
- models_server.py +4 -4
app.py
CHANGED
@@ -119,6 +119,7 @@ with gr.Blocks() as demo:
|
|
119 |
gr.Markdown("<h1><center>HuggingGPT</center></h1>")
|
120 |
gr.Markdown("<p align='center'><img src='https://i.ibb.co/qNH3Jym/logo.png' height='25' width='95'></p>")
|
121 |
gr.Markdown("<p align='center' style='font-size: 20px;'>A system to connect LLMs with ML community. See our <a href='https://github.com/microsoft/JARVIS'>Project</a> and <a href='http://arxiv.org/abs/2303.17580'>Paper</a>.</p>")
|
|
|
122 |
with gr.Row().style():
|
123 |
with gr.Column(scale=0.85):
|
124 |
openai_api_key = gr.Textbox(
|
|
|
119 |
gr.Markdown("<h1><center>HuggingGPT</center></h1>")
|
120 |
gr.Markdown("<p align='center'><img src='https://i.ibb.co/qNH3Jym/logo.png' height='25' width='95'></p>")
|
121 |
gr.Markdown("<p align='center' style='font-size: 20px;'>A system to connect LLMs with ML community. See our <a href='https://github.com/microsoft/JARVIS'>Project</a> and <a href='http://arxiv.org/abs/2303.17580'>Paper</a>.</p>")
|
122 |
+
gr.HTML('''<center><a href="https://huggingface.co/spaces/microsoft/HuggingGPT?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>Duplicate the Space and run securely with your OpenAI API Key and Hugging Face Token</center>''')
|
123 |
with gr.Row().style():
|
124 |
with gr.Column(scale=0.85):
|
125 |
openai_api_key = gr.Textbox(
|
models_server.py
CHANGED
@@ -162,10 +162,10 @@ def load_pipes(local_deployment):
|
|
162 |
# "tokenizer": AutoTokenizer.from_pretrained(f"{local_models}nlpconnect/vit-gpt2-image-captioning"),
|
163 |
# "device": "cuda:0"
|
164 |
# },
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
# "lambdalabs/sd-image-variations-diffusers": {
|
170 |
# "model": DiffusionPipeline.from_pretrained(f"{local_models}lambdalabs/sd-image-variations-diffusers"), #torch_dtype=torch.float16
|
171 |
# "device": "cuda:0"
|
|
|
162 |
# "tokenizer": AutoTokenizer.from_pretrained(f"{local_models}nlpconnect/vit-gpt2-image-captioning"),
|
163 |
# "device": "cuda:0"
|
164 |
# },
|
165 |
+
"espnet/kan-bayashi_ljspeech_vits": {
|
166 |
+
"model": Text2Speech.from_pretrained("espnet/kan-bayashi_ljspeech_vits"),
|
167 |
+
"device": "cuda:0"
|
168 |
+
},
|
169 |
# "lambdalabs/sd-image-variations-diffusers": {
|
170 |
# "model": DiffusionPipeline.from_pretrained(f"{local_models}lambdalabs/sd-image-variations-diffusers"), #torch_dtype=torch.float16
|
171 |
# "device": "cuda:0"
|