Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
import gradio as gr
|
2 |
import spaces
|
3 |
-
import
|
4 |
import torch
|
5 |
|
6 |
DESCRIPTION="""
|
@@ -21,32 +21,86 @@ ner_example = [["Benim adım Turna."]]
|
|
21 |
t2t_example = [["Paraphrase: Bu üründen çok memnun kaldım."]]
|
22 |
nli_example = [["Bunu çok beğendim. Bunu çok sevdim."]]
|
23 |
|
24 |
-
t2t_gen = gr.load("huggingface/boun-tabi-LMG/TURNA", examples =t2t_example, title="Text-to-Text Generation", description="Please enter an instruction with a prefix to generate.")
|
25 |
-
summarization = gr.load("huggingface/boun-tabi-LMG/turna_summarization_mlsum",examples =long_text, title="Summarization", description="TURNA fine-tuned on MLSUM. Enter a text to summarize below.")
|
26 |
-
news_sum = gr.load("huggingface/boun-tabi-LMG/turna_summarization_tr_news",examples =long_text, title="News Summarization", description="TURNA fine-tuned on News summarization. Enter a news to summarize.")
|
27 |
-
paraphrase = gr.load("huggingface/boun-tabi-LMG/turna_paraphrasing_tatoeba", examples =long_text,title="Paraphrasing")
|
28 |
-
paraphrasing_sub = gr.load("huggingface/boun-tabi-LMG/turna_paraphrasing_opensubtitles",examples =long_text, title="Paraphrasing on Subtitles")
|
29 |
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
|
|
34 |
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
|
42 |
|
43 |
-
interface_list = ["t2t_gen","summarization", "news_sum", "paraphrase", "paraphrasing_sub", "ttc",
|
44 |
-
"product_reviews", "title_gen", "sentiment", "pos", "nli", "pos_boun",
|
45 |
-
"stsb", "ner", "ner_wikiann"]
|
46 |
|
47 |
with gr.Blocks(theme="shivi/calm_seafoam") as demo:
|
48 |
gr.Markdown("# TURNA 🐦")
|
49 |
gr.Markdown(DESCRIPTION)
|
50 |
-
|
51 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
import spaces
|
3 |
+
from transformers import pipeline
|
4 |
import torch
|
5 |
|
6 |
DESCRIPTION="""
|
|
|
21 |
t2t_example = [["Paraphrase: Bu üründen çok memnun kaldım."]]
|
22 |
nli_example = [["Bunu çok beğendim. Bunu çok sevdim."]]
|
23 |
|
|
|
|
|
|
|
|
|
|
|
24 |
|
25 |
+
t2t_gen_model = pipeline("boun-tabi-LMG/TURNA", device=0) examples =t2t_example, title="Text-to-Text Generation", description="Please enter an instruction with a prefix to generate.")
|
26 |
+
summarization_model = pipeline("boun-tabi-LMG/turna_summarization_mlsum", device=0) examples =long_text, title="Summarization", description="TURNA fine-tuned on MLSUM. Enter a text to summarize below.")
|
27 |
+
news_sum = pipeline("boun-tabi-LMG/turna_summarization_tr_news", device=0) examples =long_text, title="News Summarization", description="TURNA fine-tuned on News summarization. Enter a news to summarize.")
|
28 |
+
paraphrasing = pipeline("boun-tabi-LMG/turna_paraphrasing_tatoeba", device=0) examples =long_text,title="Paraphrasing")
|
29 |
+
paraphrasing_sub = pipeline("boun-tabi-LMG/turna_paraphrasing_opensubtitles", device=0) examples =long_text, title="Paraphrasing on Subtitles")
|
30 |
|
31 |
+
ttc = pipeline("boun-tabi-LMG/turna_classification_ttc4900", device=0) examples =long_text, title="Text Categorization")
|
32 |
+
product_reviews = pipeline("boun-tabi-LMG/turna_classification_tr_product_reviews", device=0) examples=sentiment_example, title="Product Reviews Categorization")
|
33 |
+
title_gen = pipeline("boun-tabi-LMG/turna_title_generation_mlsum", device=0) examples =long_text, title="Title Generation", description="Enter a text to generate title to.")
|
34 |
+
sentiment_model = pipeline("boun-tabi-LMG/turna_classification_17bintweet_sentiment", device=0) examples=sentiment_example, title="Sentiment Analysis", description="Enter a text to generate title to.")
|
35 |
+
|
36 |
+
pos_imst = pipeline("boun-tabi-LMG/turna_pos_imst", device=0) title="Part of Speech Tagging", examples=ner_example,description="Enter a text to generate title to.")
|
37 |
+
nli_model = pipeline("boun-tabi-LMG/turna_nli_nli_tr", device=0) title="NLI",examples=nli_example, description="Enter two texts to infer entailment.")
|
38 |
+
pos_boun = pipeline("boun-tabi-LMG/turna_pos_boun", device=0) examples = ner_example, title="Part of Speech Tagging", description="Enter a text to tag parts of speech (POS).")
|
39 |
+
stsb_model = pipeline("boun-tabi-LMG/turna_semantic_similarity_stsb_tr", device=0) examples=nli_example, title="Semantic Similarity", description="Enter two texts in the input to assess semantic similarity.")
|
40 |
+
ner_model = pipeline("boun-tabi-LMG/turna_ner_milliyet", device=0) title="NER WikiANN", examples=ner_example, description="Enter a text for NER.")
|
41 |
+
ner_wikiann = pipeline("boun-tabi-LMG/turna_ner_wikiann", device=0) title="NER",examples=ner_example, description="Enter a text for NER.")
|
42 |
+
|
43 |
+
|
44 |
+
@spaces.GPU
|
45 |
+
def sentiment_analysis(input, sentiment=True):
|
46 |
+
if sentiment==True:
|
47 |
+
return sentiment_model(input)
|
48 |
+
else:
|
49 |
+
return product_reviews(input)
|
50 |
+
|
51 |
+
@spaces.GPU
|
52 |
+
def nli_stsb(input, nli=True):
|
53 |
+
if nli==True:
|
54 |
+
return nli_model(input)
|
55 |
+
else:
|
56 |
+
return stsb_model(input)
|
57 |
+
|
58 |
+
@spaces.GPU
|
59 |
+
def t2t(input):
|
60 |
+
return t2t_gen_model(input)
|
61 |
+
|
62 |
+
@spaces.GPU
|
63 |
+
def pos(input, boun=True):
|
64 |
+
if boun==True:
|
65 |
+
return pos_boun(input)
|
66 |
+
else:
|
67 |
+
return pos_imst(input)
|
68 |
+
|
69 |
+
@spaces.GPU
|
70 |
+
def ner(input, wikiann=True):
|
71 |
+
if wikiann==True:
|
72 |
+
return ner_wikiann(input)
|
73 |
+
else:
|
74 |
+
return ner_model(input)
|
75 |
+
|
76 |
+
|
77 |
+
@spaces.GPU
|
78 |
+
def paraphrase(input, model_choice="turna_paraphrasing_tatoeba"):
|
79 |
+
if model_choice=="turna_paraphrasing_tatoeba":
|
80 |
+
return paraphrasing(input)
|
81 |
+
else:
|
82 |
+
return paraphrasing_sub(input)
|
83 |
+
|
84 |
+
@spaces.GPU
|
85 |
+
def summarize(input, model_choice="turna_summarization_tr_news"):
|
86 |
+
if model_choice=="turna_summarization_tr_news":
|
87 |
+
return news_sum(input)
|
88 |
+
else:
|
89 |
+
return summarization_model(input)
|
90 |
|
91 |
|
|
|
|
|
|
|
92 |
|
93 |
with gr.Blocks(theme="shivi/calm_seafoam") as demo:
|
94 |
gr.Markdown("# TURNA 🐦")
|
95 |
gr.Markdown(DESCRIPTION)
|
96 |
+
with gr.Tab("Summarization"):
|
97 |
+
with gr.Markdown("TURNA fine-tuned on ummarization. Enter a news to summarize and pick the model.")
|
98 |
+
with gr.Column():
|
99 |
+
with gr.Row():
|
100 |
+
sum_choice = gr.Radio(choices = ["turna_summarization_mlsum", "turna_summarization_tr_news"])
|
101 |
+
sum_input = gr.Text()
|
102 |
+
sum_output = gr.Text()
|
103 |
+
sum_submit = gr.Button()
|
104 |
+
sum_submit.click(summarize, inputs=[sum_input, sum_choice], outputs=sum_output)
|
105 |
+
examples = gr.Examples(examples = long_text, inputs = [sum_input, sum_choice], outputs=sum_output, cache_examples=True, fn = summarize=True)
|
106 |
demo.launch()
|