Spaces:
Sleeping
Sleeping
ljyflores
commited on
Commit
•
a55c200
1
Parent(s):
6dbb060
Update app.py filename
Browse files- streamlit_app.py +0 -114
streamlit_app.py
DELETED
@@ -1,114 +0,0 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
|
3 |
-
from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM
|
4 |
-
|
5 |
-
dataset_example_dictionary = {
|
6 |
-
"cochrane": [
|
7 |
-
"""A total of 38 studies involving 7843 children were included. Following educational intervention delivered to children, their parents or both, there was a significantly reduced risk of subsequent emergency department visits (RR 0.73, 95% CI 0.65 to 0.81, N = 3008) and hospital admissions (RR 0.79, 95% CI 0.69 to 0.92, N = 4019) compared with control. There were also fewer unscheduled doctor visits (RR 0.68, 95% CI 0.57 to 0.81, N = 1009). Very few data were available for other outcomes (FEV1, PEF, rescue medication use, quality of life or symptoms) and there was no statistically significant difference between education and control. Asthma education aimed at children and their carers who present to the emergency department for acute exacerbations can result in lower risk of future emergency department presentation and hospital admission. There remains uncertainty as to the long-term effect of education on other markers of asthma morbidity such as quality of life, symptoms and lung function. It remains unclear as to what type, duration and intensity of educational packages are the most effective in reducing acute care utilisation.""",
|
8 |
-
"""Five trials of MSP/RESA vaccine with 217 participants were included; all five reported on safety, and two on efficacy. No severe or systemic adverse effects were reported at doses of 13 to 15 \u00b5g of each antigen (39 to 45 \u00b5g total). One small efficacy trial with 17 non-immune participants with blood-stage parasites showed no reduction or delay in parasite growth rates after artificial challenge. In the second efficacy trial in 120 children aged five to nine years in Papua New Guinea, episodes of clinical malaria were not reduced, but MSP/RESA significantly reduced parasite density only in children who had not been pretreated with an antimalarial drug (sulfadoxine-pyrimethamine). Infections with the 3D7 parasite subtype of MSP2 (the variant included in the vaccine) were reduced (RR 0.38, 95% CI 0.26 to 0.57; 719 participants) while those with the other main subtype, FC27, were not (720 participants). The MSP/RESA (Combination B) vaccine shows promise as a way to reduce the severity of malaria episodes, but the effect of the vaccine is MSP2 variant-specific. Pretreatment for malaria during a vaccine trial makes the results difficult to interpret, particularly with the relatively small sample sizes of early trials. The results show that blood-stage vaccines may play a role and merit further development."""
|
9 |
-
],
|
10 |
-
"medeasi": [
|
11 |
-
"""Intervention for obese adolescents should be focused on developing healthy eating and exercise habits rather than on losing a specific amount of weight.""",
|
12 |
-
"""The liver may be enlarged, hard, or tender; massive hepatomegaly with easily palpable nodules signifies advanced disease."""
|
13 |
-
]
|
14 |
-
}
|
15 |
-
|
16 |
-
model_dictionary = {
|
17 |
-
"cochrane": {
|
18 |
-
"baseline": "ljyflores/bart_xsum_cochrane_finetune",
|
19 |
-
"ul": "ljyflores/bart_xsum_cochrane_ul"
|
20 |
-
},
|
21 |
-
"medeasi": {
|
22 |
-
"baseline": "ljyflores/bart_xsum_medeasi_finetune",
|
23 |
-
"ul": "ljyflores/bart_xsum_medeasi_ul"
|
24 |
-
}
|
25 |
-
}
|
26 |
-
|
27 |
-
st.title("Text Simplification Model")
|
28 |
-
|
29 |
-
@st.cache(allow_output_mutation=True)
|
30 |
-
def load(dataset_name, model_variant_name):
|
31 |
-
return pipeline(
|
32 |
-
"text2text-generation",
|
33 |
-
model="lucadiliello/bart-small" # model_dictionary[dataset_name][model_variant_name]
|
34 |
-
)
|
35 |
-
|
36 |
-
def predict(text, pipeline):
|
37 |
-
return pipeline(text, max_length=768)
|
38 |
-
|
39 |
-
# @st.cache_resource
|
40 |
-
# def load(dataset_name, model_variant_name):
|
41 |
-
# tokenizer = AutoTokenizer.from_pretrained(model_dictionary[dataset_name][model_variant_name])
|
42 |
-
# model = AutoModelForSeq2SeqLM.from_pretrained(model_dictionary[dataset_name][model_variant_name])
|
43 |
-
# return pipeline("text2text-generation", model="ljyflores/bart_xsum_cochrane_finetune")
|
44 |
-
|
45 |
-
# def encode(text, _tokenizer):
|
46 |
-
# """This function takes a batch of samples,
|
47 |
-
# and tokenizes them into IDs for the model."""
|
48 |
-
# # Tokenize the Findings (the input)
|
49 |
-
# model_inputs = _tokenizer(
|
50 |
-
# [text], padding=True, truncation=True, return_tensors="pt"
|
51 |
-
# )
|
52 |
-
# return model_inputs
|
53 |
-
|
54 |
-
# def predict(text, model, tokenizer):
|
55 |
-
# model_inputs = encode(text, tokenizer)
|
56 |
-
# model_outputs = model.generate(**model_inputs, max_length=768).detach()
|
57 |
-
# return tokenizer.batch_decode(model_outputs)
|
58 |
-
|
59 |
-
def clean(s):
|
60 |
-
return s.replace("<s>","").replace("</s>","")
|
61 |
-
|
62 |
-
# Get user input for model
|
63 |
-
dataset_option = st.selectbox(
|
64 |
-
label = 'Dataset Selection',
|
65 |
-
options = ["cochrane", "medeasi"],
|
66 |
-
index = 0
|
67 |
-
)
|
68 |
-
|
69 |
-
st.subheader("Dataset Examples")
|
70 |
-
st.caption("Try out some of these examples by copying them into the space below!")
|
71 |
-
st.text(dataset_example_dictionary[dataset_option][0])
|
72 |
-
st.text(dataset_example_dictionary[dataset_option][1])
|
73 |
-
|
74 |
-
# Get user input for text
|
75 |
-
st.subheader("Input Text to Simplify")
|
76 |
-
st.text_area("Text to Simplify:", key="text", height=275)
|
77 |
-
|
78 |
-
# Load model and run inference
|
79 |
-
if st.button("Simplify!"):
|
80 |
-
# Number 1
|
81 |
-
# # tokenizer_baseline, model_baseline = load(dataset_option, "baseline")
|
82 |
-
# # model_outputs_baseline = predict(st.session_state.text, model_baseline, tokenizer_baseline)[0]
|
83 |
-
|
84 |
-
# pipeline_baseline = load(dataset_option, "baseline")
|
85 |
-
# # model_outputs_baseline = predict(st.session_state.text, pipeline_baseline)[0]["generated_text"]
|
86 |
-
|
87 |
-
# # pipeline_baseline = pipeline(
|
88 |
-
# # "text2text-generation",
|
89 |
-
# # model=model_dictionary[dataset_option]["baseline"]
|
90 |
-
# # )
|
91 |
-
# model_outputs_baseline = pipeline_baseline(
|
92 |
-
# st.session_state.text,
|
93 |
-
# max_length=768,
|
94 |
-
# do_sample=False
|
95 |
-
# )
|
96 |
-
# st.write(f"Baseline: {clean(model_outputs_baseline)}")
|
97 |
-
|
98 |
-
# Number 2
|
99 |
-
# tokenizer_ul, model_ul = load(dataset_option, "ul")
|
100 |
-
# model_outputs_ul = predict(st.session_state.text, model_ul, tokenizer_ul)[0]
|
101 |
-
|
102 |
-
pipeline_ul = load(dataset_option, "ul")
|
103 |
-
# model_outputs_ul = predict(st.session_state.text, pipeline_ul)[0]["generated_text"]
|
104 |
-
|
105 |
-
# pipeline_ul = pipeline(
|
106 |
-
# "text2text-generation",
|
107 |
-
# model=model_dictionary[dataset_option]["ul"]
|
108 |
-
# )
|
109 |
-
model_outputs_ul = pipeline_ul(
|
110 |
-
st.session_state.text,
|
111 |
-
max_length=768,
|
112 |
-
do_sample=False
|
113 |
-
)
|
114 |
-
st.write(f"Unlikelihood Learning: {clean(model_outputs_ul)}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|