Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -16,41 +16,54 @@ gen_pipeline = pipeline(
|
|
16 |
)
|
17 |
|
18 |
# Define the function to generate text
|
19 |
-
def generate_text(question, options, context, max_new_tokens=
|
20 |
-
if context is None:
|
21 |
-
context = "nessun contesto fornito."
|
22 |
|
23 |
-
|
|
|
|
|
|
|
24 |
### Contesto:
|
25 |
{}
|
26 |
-
|
27 |
### Domanda:
|
28 |
{}
|
29 |
-
|
30 |
-
### Risposta:
|
31 |
-
"""
|
32 |
|
33 |
-
|
34 |
-
### Contesto:
|
35 |
{}
|
36 |
|
|
|
|
|
|
|
|
|
37 |
### Domanda:
|
38 |
{}
|
39 |
|
40 |
### Opzioni:
|
41 |
{}
|
42 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
### Risposta:
|
44 |
"""
|
45 |
|
46 |
-
|
47 |
-
|
|
|
|
|
|
|
|
|
|
|
48 |
else:
|
49 |
-
prompt =
|
50 |
|
51 |
output = gen_pipeline(
|
52 |
prompt,
|
53 |
-
max_new_tokens=
|
54 |
temperature=temperature,
|
55 |
return_full_text = False
|
56 |
)
|
@@ -58,9 +71,12 @@ def generate_text(question, options, context, max_new_tokens=50, temperature=1):
|
|
58 |
return f"<span>{question} </span><b style='color: blue;'>{generated_text}</b>"
|
59 |
|
60 |
# Create the Gradio interface
|
61 |
-
question = gr.Textbox(lines=
|
62 |
-
|
63 |
-
|
|
|
|
|
|
|
64 |
|
65 |
temperature = gr.Slider(minimum=0.1, maximum=2.0, value=1.0, step=0.1, label="Temperature")
|
66 |
|
@@ -69,6 +85,11 @@ with gr.Blocks(css="#outbox { border-radius: 8px !important; border: 1px solid #
|
|
69 |
gr.Markdown("🐢💬 To guarantee a reasonable througput (<1 min to answer with default settings), this space employs a **GGUF quantized version of [Igea 1B](https://huggingface.co/bmi-labmedinfo/Igea-1B-v0.0.1)**, optimized for **hardware-limited, CPU-only machines** like the free-tier HuggingFace space.")
|
70 |
gr.Markdown("⚠️ Read the **[bias, risks and limitations](https://huggingface.co/bmi-labmedinfo/Igea-1B-v0.0.1#%F0%9F%9A%A8%E2%9A%A0%EF%B8%8F%F0%9F%9A%A8-bias-risks-and-limitations-%F0%9F%9A%A8%E2%9A%A0%EF%B8%8F%F0%9F%9A%A8)** of Igea before use!")
|
71 |
question.render()
|
|
|
|
|
|
|
|
|
|
|
72 |
options.render()
|
73 |
context.render()
|
74 |
with gr.Accordion("Advanced Options", open=False):
|
@@ -76,7 +97,7 @@ with gr.Blocks(css="#outbox { border-radius: 8px !important; border: 1px solid #
|
|
76 |
output = gr.HTML(label="Answer",elem_id="outbox")
|
77 |
|
78 |
btn = gr.Button("Answer")
|
79 |
-
btn.click(generate_text, [question, options, context, temperature], output)
|
80 |
|
81 |
# Launch the interface
|
82 |
if __name__ == "__main__":
|
|
|
16 |
)
|
17 |
|
18 |
# Define the function to generate text
|
19 |
+
def generate_text(question, options, context, max_new_tokens=30, temperature=1):
|
|
|
|
|
20 |
|
21 |
+
options_filtered = [option if option is not None for option in options]
|
22 |
+
options_string = "; ".join([["A) ","B) ","C) ","D) "][i]+options[i] for i in range(len(options_filtered))])+"."
|
23 |
+
|
24 |
+
closed_prompt = """
|
25 |
### Contesto:
|
26 |
{}
|
27 |
+
|
28 |
### Domanda:
|
29 |
{}
|
|
|
|
|
|
|
30 |
|
31 |
+
### Opzioni:
|
|
|
32 |
{}
|
33 |
|
34 |
+
### Risposta:
|
35 |
+
"""
|
36 |
+
|
37 |
+
closed_prompt_no_context = """
|
38 |
### Domanda:
|
39 |
{}
|
40 |
|
41 |
### Opzioni:
|
42 |
{}
|
43 |
|
44 |
+
### Risposta:
|
45 |
+
"""
|
46 |
+
|
47 |
+
open_prompt = """
|
48 |
+
### Domanda:
|
49 |
+
{}
|
50 |
+
|
51 |
### Risposta:
|
52 |
"""
|
53 |
|
54 |
+
#valid context, valid options
|
55 |
+
if context is not None and len(context)>1 and len(options_filtered)>1:
|
56 |
+
prompt = closed_prompt.format(context, question, options_string)
|
57 |
+
#invalid context, valid options
|
58 |
+
elif context is None or len(context)<1 and len(options_filtered)>1:
|
59 |
+
prompt = closed_prompt_no_context.format(question, options_string)
|
60 |
+
#invalid context, invalid options
|
61 |
else:
|
62 |
+
prompt = open_prompt.format(question)
|
63 |
|
64 |
output = gen_pipeline(
|
65 |
prompt,
|
66 |
+
max_new_tokens=max_new_tokens,
|
67 |
temperature=temperature,
|
68 |
return_full_text = False
|
69 |
)
|
|
|
71 |
return f"<span>{question} </span><b style='color: blue;'>{generated_text}</b>"
|
72 |
|
73 |
# Create the Gradio interface
|
74 |
+
question = gr.Textbox(lines=1, placeholder="L'ostruzione uretrale cronica dovuta a iperplasia prismatica benigna può portare al seguente cambiamento nel parenchima renale", label="Domanda (Opzioni facoltative)")
|
75 |
+
opa = gr.Textbox(lines=1, placeholder="Iperplasia")
|
76 |
+
opb = gr.Textbox(lines=1, placeholder="Iperofia")
|
77 |
+
opc = gr.Textbox(lines=1, placeholder="Atrofia")
|
78 |
+
opd = gr.Textbox(lines=1, placeholder="Displasia")
|
79 |
+
context = gr.Textbox(lines=2, placeholder="L'ostruzione uretrale cronica dovuta a calcoli urinari, iperofia prostatica, tumori, gravidanza normale, tumori, prolasso uterino o disturbi funzionali causano idronefrosi che per definizione viene utilizzata per descrivere la dilatazione della pelvi renale e dei calcoli associati ad atrofia progressiva del rene dovuta a ostruzione dell'uretra. Deflusso di urina. Fare riferimento a Robbins 7yh/9,1012,9/e. P950.", label="Contesto (facoltativo)")
|
80 |
|
81 |
temperature = gr.Slider(minimum=0.1, maximum=2.0, value=1.0, step=0.1, label="Temperature")
|
82 |
|
|
|
85 |
gr.Markdown("🐢💬 To guarantee a reasonable througput (<1 min to answer with default settings), this space employs a **GGUF quantized version of [Igea 1B](https://huggingface.co/bmi-labmedinfo/Igea-1B-v0.0.1)**, optimized for **hardware-limited, CPU-only machines** like the free-tier HuggingFace space.")
|
86 |
gr.Markdown("⚠️ Read the **[bias, risks and limitations](https://huggingface.co/bmi-labmedinfo/Igea-1B-v0.0.1#%F0%9F%9A%A8%E2%9A%A0%EF%B8%8F%F0%9F%9A%A8-bias-risks-and-limitations-%F0%9F%9A%A8%E2%9A%A0%EF%B8%8F%F0%9F%9A%A8)** of Igea before use!")
|
87 |
question.render()
|
88 |
+
with gr.Row():
|
89 |
+
opa.render()
|
90 |
+
opb.render()
|
91 |
+
opc.render()
|
92 |
+
opd.render()
|
93 |
options.render()
|
94 |
context.render()
|
95 |
with gr.Accordion("Advanced Options", open=False):
|
|
|
97 |
output = gr.HTML(label="Answer",elem_id="outbox")
|
98 |
|
99 |
btn = gr.Button("Answer")
|
100 |
+
btn.click(generate_text, [question=question, options=[opa, opb, opc, opd], context=context, temperature=temperature], output)
|
101 |
|
102 |
# Launch the interface
|
103 |
if __name__ == "__main__":
|