add app
Browse files
app.py
CHANGED
@@ -6,97 +6,93 @@ from transformers import AutoTokenizer, BertForSequenceClassification, AutoModel
|
|
6 |
from transformers import TextClassificationPipeline
|
7 |
import gradio as gr
|
8 |
|
9 |
-
tokenizer = AutoTokenizer.from_pretrained('momo/KcELECTRA-base_Hate_speech_Privacy_Detection')
|
10 |
-
model = AutoModelForSequenceClassification.from_pretrained(
|
11 |
-
'momo/KcELECTRA-base_Hate_speech_Privacy_Detection',
|
12 |
-
num_labels= 15,
|
13 |
-
problem_type="multi_label_classification"
|
14 |
-
)
|
15 |
-
|
16 |
-
|
17 |
-
pipe = TextClassificationPipeline(
|
18 |
-
model = model,
|
19 |
-
tokenizer = tokenizer,
|
20 |
-
return_all_scores=True,
|
21 |
-
function_to_apply='sigmoid'
|
22 |
-
)
|
23 |
-
|
24 |
-
def predict(text):
|
25 |
-
return pipe(text)[0]
|
26 |
-
|
27 |
-
iface = gr.Interface(
|
28 |
-
fn=predict,
|
29 |
-
inputs='text',
|
30 |
-
outputs='text',
|
31 |
-
examples=[["Hello! My name is Omar"]]
|
32 |
-
)
|
33 |
-
|
34 |
-
iface.launch()
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
# # global var
|
39 |
-
# MODEL_NAME = 'momo/KcBERT-base_Hate_speech_Privacy_Detection'
|
40 |
-
# tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
41 |
# model = AutoModelForSequenceClassification.from_pretrained(
|
42 |
-
#
|
43 |
# num_labels= 15,
|
44 |
# problem_type="multi_label_classification"
|
45 |
# )
|
46 |
|
47 |
-
#
|
48 |
-
#
|
49 |
-
#
|
50 |
-
#
|
51 |
-
#
|
52 |
-
|
53 |
-
# def change_model_name(name):
|
54 |
-
# MODEL_BUF["name"] = name
|
55 |
-
# MODEL_BUF["tokenizer"] = AutoTokenizer.from_pretrained(name)
|
56 |
-
# MODEL_BUF["model"] = AutoModelForSequenceClassification.from_pretrained(name)
|
57 |
|
58 |
-
# def predict(
|
59 |
-
#
|
60 |
-
# change_model_name(model_name)
|
61 |
-
|
62 |
-
# tokenizer = MODEL_BUF["tokenizer"]
|
63 |
-
# model = MODEL_BUF["model"]
|
64 |
|
65 |
-
#
|
66 |
-
#
|
|
|
|
|
|
|
|
|
67 |
|
68 |
-
#
|
69 |
-
# model.config.label2id = {label: i for i, label in zip(range(num_labels), unsmile_labels)}
|
70 |
|
71 |
-
# pipe = TextClassificationPipeline(
|
72 |
-
# model = model,
|
73 |
-
# tokenizer = tokenizer,
|
74 |
-
# return_all_scores=True,
|
75 |
-
# function_to_apply='sigmoid'
|
76 |
-
# )
|
77 |
|
78 |
-
# for result in pipe(text)[0]:
|
79 |
-
# output = result
|
80 |
|
81 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
|
83 |
-
|
84 |
-
|
|
|
|
|
|
|
85 |
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
100 |
|
101 |
|
102 |
# # global var
|
|
|
6 |
from transformers import TextClassificationPipeline
|
7 |
import gradio as gr
|
8 |
|
9 |
+
# tokenizer = AutoTokenizer.from_pretrained('momo/KcELECTRA-base_Hate_speech_Privacy_Detection')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
# model = AutoModelForSequenceClassification.from_pretrained(
|
11 |
+
# 'momo/KcELECTRA-base_Hate_speech_Privacy_Detection',
|
12 |
# num_labels= 15,
|
13 |
# problem_type="multi_label_classification"
|
14 |
# )
|
15 |
|
16 |
+
# pipe = TextClassificationPipeline(
|
17 |
+
# model = model,
|
18 |
+
# tokenizer = tokenizer,
|
19 |
+
# return_all_scores=True,
|
20 |
+
# function_to_apply='sigmoid'
|
21 |
+
# )
|
|
|
|
|
|
|
|
|
22 |
|
23 |
+
# def predict(text):
|
24 |
+
# return pipe(text)[0]
|
|
|
|
|
|
|
|
|
25 |
|
26 |
+
# iface = gr.Interface(
|
27 |
+
# fn=predict,
|
28 |
+
# inputs='text',
|
29 |
+
# outputs='text',
|
30 |
+
# examples=[["Hello! My name is Omar"]]
|
31 |
+
# )
|
32 |
|
33 |
+
# iface.launch()
|
|
|
34 |
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
|
|
|
|
|
36 |
|
37 |
+
# global var
|
38 |
+
MODEL_NAME = 'momo/KcBERT-base_Hate_speech_Privacy_Detection'
|
39 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
40 |
+
model = AutoModelForSequenceClassification.from_pretrained(
|
41 |
+
MODEL_NAME,
|
42 |
+
num_labels= 15,
|
43 |
+
problem_type="multi_label_classification"
|
44 |
+
)
|
45 |
|
46 |
+
MODEL_BUF = {
|
47 |
+
"name": MODEL_NAME,
|
48 |
+
"tokenizer": tokenizer,
|
49 |
+
"model": model,
|
50 |
+
}
|
51 |
|
52 |
+
def change_model_name(name):
|
53 |
+
MODEL_BUF["name"] = name
|
54 |
+
MODEL_BUF["tokenizer"] = AutoTokenizer.from_pretrained(name)
|
55 |
+
MODEL_BUF["model"] = AutoModelForSequenceClassification.from_pretrained(name)
|
56 |
|
57 |
+
def predict(model_name, text):
|
58 |
+
if model_name != MODEL_BUF["name"]:
|
59 |
+
change_model_name(model_name)
|
60 |
+
|
61 |
+
tokenizer = MODEL_BUF["tokenizer"]
|
62 |
+
model = MODEL_BUF["model"]
|
63 |
+
|
64 |
+
unsmile_labels = ["์ฌ์ฑ/๊ฐ์กฑ","๋จ์ฑ","์ฑ์์์","์ธ์ข
/๊ตญ์ ","์ฐ๋ น","์ง์ญ","์ข
๊ต","๊ธฐํ ํ์ค","์
ํ/์์ค","clean", 'name', 'number', 'address', 'bank', 'person']
|
65 |
+
num_labels = len(unsmile_labels)
|
66 |
+
|
67 |
+
model.config.id2label = {i: label for i, label in zip(range(num_labels), unsmile_labels)}
|
68 |
+
model.config.label2id = {label: i for i, label in zip(range(num_labels), unsmile_labels)}
|
69 |
+
|
70 |
+
pipe = TextClassificationPipeline(
|
71 |
+
model = model,
|
72 |
+
tokenizer = tokenizer,
|
73 |
+
return_all_scores=True,
|
74 |
+
function_to_apply='sigmoid'
|
75 |
+
)
|
76 |
+
|
77 |
+
return pipe(text)[0]
|
78 |
+
|
79 |
+
if __name__ == '__main__':
|
80 |
+
text = '์ฟ๋ด๊ฑธ ํ๋ณฟ๊ธ ์ฟ๋๊ณญ ์์ ฉ๋๊ณ ์์์๋ฉ'
|
81 |
+
|
82 |
+
model_name_list = [
|
83 |
+
'momo/KcELECTRA-base_Hate_speech_Privacy_Detection',
|
84 |
+
"momo/KcBERT-base_Hate_speech_Privacy_Detection",
|
85 |
+
]
|
86 |
+
|
87 |
+
#Create a gradio app with a button that calls predict()
|
88 |
+
app = gr.Interface(
|
89 |
+
fn=predict,
|
90 |
+
inputs=[gr.inputs.Dropdown(model_name_list, label="Model Name"), 'text'], outputs=['label'],
|
91 |
+
examples = [[MODEL_BUF["name"], text], [MODEL_BUF["name"], "4=๐ฆ 4โ ๐ฆ"]],
|
92 |
+
title="ํ๊ตญ์ด ํ์คํํ, ๊ฐ์ธ์ ๋ณด ํ๋ณ๊ธฐ (Korean Hate Speech and Privacy Detection)",
|
93 |
+
description="Korean Hate Speech and Privacy Detection."
|
94 |
+
)
|
95 |
+
app.launch()
|
96 |
|
97 |
|
98 |
# # global var
|