Spaces:
Sleeping
Sleeping
change text (#4)
Browse files- change text (9528ff8d5a9e31853e23cedd979dfa887b04a89b)
app.py
CHANGED
@@ -51,9 +51,9 @@ def sql_generate(prompt, input_prompt_sql ):
|
|
51 |
demo = gr.Blocks()
|
52 |
|
53 |
with demo:
|
54 |
-
gr.Markdown("<h1><center>
|
55 |
gr.Markdown(
|
56 |
-
"""[BigScienceW Bloom](https://twitter.com/BigscienceW) \n\n Large language models have demonstrated a capability of Zero-Shot SQL generation. Some might say β You can get good results out of LLMs if you know how to speak to them. This space is an attempt at inspecting this behavior/capability in the new HuggingFace BigScienceW [Bloom](https://huggingface.co/bigscience/bloom) model.\n\nThe Prompt length is limited at the API end right now, thus there is a certain limitation in testing Bloom's capability thoroughly.This Space might sometime fail due to inference queue being full and logs would end up showing error as *'queue full, try again later'*, in such cases please try again after few minutes. Please note that, longer prompts might not work as well and the Space could error out with Response code [500] or *'A very long prompt, temporarily not accepting these'* message in the logs. Still iterating over the app, might be able to improve it further soon.. \n\nThis Space is created by
|
57 |
)
|
58 |
with gr.Row():
|
59 |
|
@@ -75,8 +75,7 @@ with demo:
|
|
75 |
b1 = gr.Button("Generate SQL")
|
76 |
b1.click(sql_generate,inputs=[example_prompt, input_prompt_sql], outputs=generated_txt)
|
77 |
|
78 |
-
|
79 |
-
gr.Markdown("![visitor badge](https://visitor-badge.glitch.me/badge?page_id=europython2022_zero-shot-sql-by-bloom)")
|
80 |
|
81 |
demo.launch(enable_queue=True, debug=True)
|
82 |
|
|
|
51 |
demo = gr.Blocks()
|
52 |
|
53 |
with demo:
|
54 |
+
gr.Markdown("<h1><center>Text to SQL by Bloom</center></h1>")
|
55 |
gr.Markdown(
|
56 |
+
"""[BigScienceW Bloom](https://twitter.com/BigscienceW) \n\n Large language models have demonstrated a capability of Zero-Shot SQL generation. Some might say β You can get good results out of LLMs if you know how to speak to them. This space is an attempt at inspecting this behavior/capability in the new HuggingFace BigScienceW [Bloom](https://huggingface.co/bigscience/bloom) model.\n\nThe Prompt length is limited at the API end right now, thus there is a certain limitation in testing Bloom's capability thoroughly.This Space might sometime fail due to inference queue being full and logs would end up showing error as *'queue full, try again later'*, in such cases please try again after few minutes. Please note that, longer prompts might not work as well and the Space could error out with Response code [500] or *'A very long prompt, temporarily not accepting these'* message in the logs. Still iterating over the app, might be able to improve it further soon.. \n\nThis Space is created by Yuvraj Sharma for Gradio EuroPython 2022 Demo and changed with [Ayoub Nikravan](https://www.linkedin.com/in/ayyoub-nikravan-862b1770/) ."""
|
57 |
)
|
58 |
with gr.Row():
|
59 |
|
|
|
75 |
b1 = gr.Button("Generate SQL")
|
76 |
b1.click(sql_generate,inputs=[example_prompt, input_prompt_sql], outputs=generated_txt)
|
77 |
|
78 |
+
|
|
|
79 |
|
80 |
demo.launch(enable_queue=True, debug=True)
|
81 |
|