Spaces:
Running
Running
only query is output
#11
by
nikravan
- opened
app.py
CHANGED
@@ -1,67 +1,65 @@
|
|
1 |
import gradio as gr
|
2 |
import requests
|
3 |
-
import os
|
4 |
|
5 |
##Bloom
|
6 |
API_URL = "https://api-inference.huggingface.co/models/bigscience/bloom"
|
7 |
|
8 |
-
HF_TOKEN = "
|
9 |
headers = {"Authorization": f"Bearer {HF_TOKEN}"}
|
10 |
|
11 |
|
12 |
-
def sql_generate(prompt):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
|
14 |
-
|
15 |
-
|
16 |
-
json_ = {"inputs": prompt,
|
17 |
-
"parameters":
|
18 |
-
{
|
19 |
-
"top_p": 0.9,
|
20 |
-
"temperature": 1.1,
|
21 |
-
"max_new_tokens": 64,
|
22 |
-
"return_full_text": True,
|
23 |
-
},
|
24 |
-
"options":
|
25 |
-
{"use_cache": False,
|
26 |
-
"wait_for_model": True,
|
27 |
-
},}
|
28 |
-
response = requests.post(API_URL, json=json_)
|
29 |
-
print(f"Response is : {response}")
|
30 |
-
output = response.json()
|
31 |
-
print(f"output is : {output}")
|
32 |
-
output_tmp = output[0]['generated_text']
|
33 |
-
print(f"output_tmp is: {output_tmp}")
|
34 |
-
solution = output_tmp.split("\nQ:")[0]
|
35 |
-
print(f"Final response after splits is: {solution}")
|
36 |
-
if '\nOutput:' in solution:
|
37 |
-
final_solution = solution.split("\nOutput:")[0]
|
38 |
-
print(f"Response after removing output is: {final_solution}")
|
39 |
-
elif '\n\n' in solution:
|
40 |
-
final_solution = solution.split("\n\n")[0]
|
41 |
-
print(f"Response after removing new line entries is: {final_solution}")
|
42 |
-
else:
|
43 |
-
final_solution = solution
|
44 |
-
#return final_solution
|
45 |
-
return final_solution
|
46 |
|
47 |
demo = gr.Blocks()
|
48 |
|
49 |
with demo:
|
50 |
-
|
51 |
-
|
52 |
"""[BigScienceW Bloom](https://twitter.com/BigscienceW) \n\n Large language models have demonstrated a capability of Zero-Shot SQL generation. Some might say β You can get good results out of LLMs if you know how to speak to them. This space is an attempt at inspecting this behavior/capability in the new HuggingFace BigScienceW [Bloom](https://huggingface.co/bigscience/bloom) model.\n\nThe Prompt length is limited at the API end right now, thus there is a certain limitation in testing Bloom's capability thoroughly.This Space might sometime fail due to inference queue being full and logs would end up showing error as *'queue full, try again later'*, in such cases please try again after few minutes. Please note that, longer prompts might not work as well and the Space could error out with Response code [500] or *'A very long prompt, temporarily not accepting these'* message in the logs. Still iterating over the app, might be able to improve it further soon.. \n\nThis Space is created by Yuvraj Sharma for Gradio EuroPython 2022 Demo and changed with [Ayoub Nikravan](https://www.linkedin.com/in/ayyoub-nikravan-862b1770/) ."""
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
|
60 |
-
|
61 |
-
|
62 |
|
63 |
-
|
64 |
-
|
65 |
|
66 |
-
|
67 |
-
# gr.Interface.load("models/bigscience/bloom").launch()
|
|
|
1 |
import gradio as gr
|
2 |
import requests
|
3 |
+
import os
|
4 |
|
5 |
##Bloom
|
6 |
API_URL = "https://api-inference.huggingface.co/models/bigscience/bloom"
|
7 |
|
8 |
+
HF_TOKEN = "Token"
|
9 |
headers = {"Authorization": f"Bearer {HF_TOKEN}"}
|
10 |
|
11 |
|
12 |
+
def sql_generate(prompt):
|
13 |
+
json_ = {"inputs": prompt,
|
14 |
+
"parameters":
|
15 |
+
{
|
16 |
+
"top_p": 0.9,
|
17 |
+
"temperature": 1.1,
|
18 |
+
"max_new_tokens": 64,
|
19 |
+
"return_full_text": True,
|
20 |
+
},
|
21 |
+
"options":
|
22 |
+
{"use_cache": False,
|
23 |
+
"wait_for_model": True,
|
24 |
+
}, }
|
25 |
+
response = requests.post(API_URL, json=json_)
|
26 |
+
print(f"Response is : {response}")
|
27 |
+
output = response.json()
|
28 |
+
print(f"output is : {output}")
|
29 |
+
output_tmp = output[0]['generated_text']
|
30 |
+
output_tmp= output_tmp.replace(prompt,'')
|
31 |
+
print(f"output_tmp is: {output_tmp}")
|
32 |
+
solution = output_tmp.split("\nQ:")[0]
|
33 |
+
print(f"Final response after splits is: {solution}")
|
34 |
+
if '\nOutput:' in solution:
|
35 |
+
final_solution = solution.split("\nOutput:")[0]
|
36 |
+
print(f"Response after removing output is: {final_solution}")
|
37 |
+
elif '\n\n' in solution:
|
38 |
+
final_solution = solution.split("\n\n")[0]
|
39 |
+
print(f"Response after removing new line entries is: {final_solution}")
|
40 |
+
else:
|
41 |
+
final_solution = solution
|
42 |
+
# return final_solution
|
43 |
+
return final_solution
|
44 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
|
46 |
demo = gr.Blocks()
|
47 |
|
48 |
with demo:
|
49 |
+
gr.Markdown("<h1><center>Text to SQL by Bloom</center></h1>")
|
50 |
+
gr.Markdown(
|
51 |
"""[BigScienceW Bloom](https://twitter.com/BigscienceW) \n\n Large language models have demonstrated a capability of Zero-Shot SQL generation. Some might say β You can get good results out of LLMs if you know how to speak to them. This space is an attempt at inspecting this behavior/capability in the new HuggingFace BigScienceW [Bloom](https://huggingface.co/bigscience/bloom) model.\n\nThe Prompt length is limited at the API end right now, thus there is a certain limitation in testing Bloom's capability thoroughly.This Space might sometime fail due to inference queue being full and logs would end up showing error as *'queue full, try again later'*, in such cases please try again after few minutes. Please note that, longer prompts might not work as well and the Space could error out with Response code [500] or *'A very long prompt, temporarily not accepting these'* message in the logs. Still iterating over the app, might be able to improve it further soon.. \n\nThis Space is created by Yuvraj Sharma for Gradio EuroPython 2022 Demo and changed with [Ayoub Nikravan](https://www.linkedin.com/in/ayyoub-nikravan-862b1770/) ."""
|
52 |
+
)
|
53 |
+
with gr.Row():
|
54 |
+
input_prompt_sql = gr.Textbox(
|
55 |
+
label="Write text following the example pattern given below, to get SQL commands...",
|
56 |
+
value="Instruction: Given an input question, respond with syntactically correct PostgreSQL. Use table called 'department'.\nInput: Select names of all the departments in their descending alphabetical order.\nPostgreSQL query: ",
|
57 |
+
lines=6)
|
58 |
|
59 |
+
with gr.Row():
|
60 |
+
generated_txt = gr.Textbox(lines=3)
|
61 |
|
62 |
+
b1 = gr.Button("Generate SQL")
|
63 |
+
b1.click(sql_generate, inputs=[input_prompt_sql], outputs=generated_txt)
|
64 |
|
65 |
+
demo.launch(enable_queue=True, debug=True)
|
|