Spaces:
Sleeping
Sleeping
sigmadream
commited on
Commit
β’
7eb81bd
1
Parent(s):
593a37b
v1
Browse files- app.py +217 -0
- examples.csv +51 -0
- gitattributes.txt +34 -0
- klue_roberta-small-2400.pt +3 -0
- lid.176.ftz +3 -0
- model-1900.pt +3 -0
- requirements.txt +7 -0
- roberta-base-1900.pt +3 -0
app.py
ADDED
@@ -0,0 +1,217 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import fasttext
|
3 |
+
|
4 |
+
from transformers import AutoModelForSequenceClassification
|
5 |
+
from transformers import AutoTokenizer
|
6 |
+
|
7 |
+
import numpy as np
|
8 |
+
import pandas as pd
|
9 |
+
import torch
|
10 |
+
|
11 |
+
|
12 |
+
id2label = {0: "NEGATIVE", 1: "POSITIVE"}
|
13 |
+
label2id = {"NEGATIVE": 0, "POSITIVE": 1}
|
14 |
+
|
15 |
+
|
16 |
+
title = "Movie Review Score Discriminator"
|
17 |
+
description = "It is a program that classifies whether it is positive or negative by entering movie reviews. \
|
18 |
+
You can choose between the Korean version and the English version. \
|
19 |
+
It also provides a version called ""Default"", which determines whether it is Korean or English and predicts it."
|
20 |
+
|
21 |
+
|
22 |
+
class LanguageIdentification:
|
23 |
+
def __init__(self):
|
24 |
+
pretrained_lang_model = "./lid.176.ftz"
|
25 |
+
self.model = fasttext.load_model(pretrained_lang_model)
|
26 |
+
|
27 |
+
def predict_lang(self, text):
|
28 |
+
predictions = self.model.predict(text, k=200) # returns top 200 matching languages
|
29 |
+
return predictions
|
30 |
+
|
31 |
+
LANGUAGE = LanguageIdentification()
|
32 |
+
|
33 |
+
|
34 |
+
|
35 |
+
def tokenized_data(tokenizer, inputs):
|
36 |
+
return tokenizer.batch_encode_plus(
|
37 |
+
[inputs],
|
38 |
+
return_tensors="pt",
|
39 |
+
padding="max_length",
|
40 |
+
max_length=64,
|
41 |
+
truncation=True)
|
42 |
+
|
43 |
+
|
44 |
+
|
45 |
+
examples = []
|
46 |
+
df = pd.read_csv('examples.csv', sep='\t', index_col='Unnamed: 0')
|
47 |
+
np.random.seed(100)
|
48 |
+
|
49 |
+
idx = np.random.choice(50, size=5, replace=False)
|
50 |
+
eng_examples = [ ['Eng', df.iloc[i, 0]] for i in idx ]
|
51 |
+
kor_examples = [ ['Kor', df.iloc[i, 1]] for i in idx ]
|
52 |
+
examples = eng_examples + kor_examples
|
53 |
+
|
54 |
+
|
55 |
+
|
56 |
+
eng_model_name = "roberta-base"
|
57 |
+
eng_step = 1900
|
58 |
+
eng_tokenizer = AutoTokenizer.from_pretrained(eng_model_name)
|
59 |
+
eng_file_name = "{}-{}.pt".format(eng_model_name, eng_step)
|
60 |
+
eng_state_dict = torch.load(eng_file_name)
|
61 |
+
eng_model = AutoModelForSequenceClassification.from_pretrained(
|
62 |
+
eng_model_name, num_labels=2, id2label=id2label, label2id=label2id,
|
63 |
+
state_dict=eng_state_dict
|
64 |
+
)
|
65 |
+
|
66 |
+
|
67 |
+
kor_model_name = "klue/roberta-small"
|
68 |
+
kor_step = 2400
|
69 |
+
kor_tokenizer = AutoTokenizer.from_pretrained(kor_model_name)
|
70 |
+
kor_file_name = "{}-{}.pt".format(kor_model_name.replace('/', '_'), kor_step)
|
71 |
+
kor_state_dict = torch.load(kor_file_name)
|
72 |
+
kor_model = AutoModelForSequenceClassification.from_pretrained(
|
73 |
+
kor_model_name, num_labels=2, id2label=id2label, label2id=label2id,
|
74 |
+
state_dict=kor_state_dict
|
75 |
+
)
|
76 |
+
|
77 |
+
|
78 |
+
def builder(Lang, Text):
|
79 |
+
percent_kor, percent_eng = 0, 0
|
80 |
+
text_list = Text.split(' ')
|
81 |
+
|
82 |
+
|
83 |
+
# [ output_1 ]
|
84 |
+
if Lang == 'μΈμ΄κ°μ§ κΈ°λ₯ μ¬μ©':
|
85 |
+
pred = LANGUAGE.predict_lang(Text)
|
86 |
+
if '__label__en' in pred[0]:
|
87 |
+
Lang = 'Eng'
|
88 |
+
idx = pred[0].index('__label__en')
|
89 |
+
p_eng = pred[1][idx]
|
90 |
+
if '__label__ko' in pred[0]:
|
91 |
+
Lang = 'Kor'
|
92 |
+
idx = pred[0].index('__label__ko')
|
93 |
+
p_kor = pred[1][idx]
|
94 |
+
# Normalize Percentage
|
95 |
+
percent_kor = p_kor / (p_kor+p_eng)
|
96 |
+
percent_eng = p_eng / (p_kor+p_eng)
|
97 |
+
|
98 |
+
if Lang == 'Eng':
|
99 |
+
model = eng_model
|
100 |
+
tokenizer = eng_tokenizer
|
101 |
+
if percent_eng==0: percent_eng=1
|
102 |
+
|
103 |
+
if Lang == 'Kor':
|
104 |
+
model = kor_model
|
105 |
+
tokenizer = kor_tokenizer
|
106 |
+
if percent_kor==0: percent_kor=1
|
107 |
+
|
108 |
+
|
109 |
+
# [ output_2 ]
|
110 |
+
inputs = tokenized_data(tokenizer, Text)
|
111 |
+
model.eval()
|
112 |
+
with torch.no_grad():
|
113 |
+
logits = model(input_ids=inputs['input_ids'],
|
114 |
+
attention_mask=inputs['attention_mask']).logits
|
115 |
+
|
116 |
+
m = torch.nn.Softmax(dim=1)
|
117 |
+
output = m(logits)
|
118 |
+
# print(logits, output)
|
119 |
+
|
120 |
+
|
121 |
+
# [ output_3 ]
|
122 |
+
output_analysis = []
|
123 |
+
for word in text_list:
|
124 |
+
tokenized_word = tokenized_data(tokenizer, word)
|
125 |
+
with torch.no_grad():
|
126 |
+
logit = model(input_ids=tokenized_word['input_ids'],
|
127 |
+
attention_mask=tokenized_word['attention_mask']).logits
|
128 |
+
word_output = m(logit)
|
129 |
+
if word_output[0][1] > 0.99:
|
130 |
+
output_analysis.append( (word, '+++') )
|
131 |
+
elif word_output[0][1] > 0.9:
|
132 |
+
output_analysis.append( (word, '++') )
|
133 |
+
elif word_output[0][1] > 0.8:
|
134 |
+
output_analysis.append( (word, '+') )
|
135 |
+
elif word_output[0][1] < 0.01:
|
136 |
+
output_analysis.append( (word, '---') )
|
137 |
+
elif word_output[0][1] < 0.1:
|
138 |
+
output_analysis.append( (word, '--') )
|
139 |
+
elif word_output[0][1] < 0.2:
|
140 |
+
output_analysis.append( (word, '-') )
|
141 |
+
else:
|
142 |
+
output_analysis.append( (word, None) )
|
143 |
+
|
144 |
+
|
145 |
+
return [ {'Kor': percent_kor, 'Eng': percent_eng},
|
146 |
+
{id2label[1]: output[0][1].item(), id2label[0]: output[0][0].item()},
|
147 |
+
output_analysis ]
|
148 |
+
|
149 |
+
# prediction = torch.argmax(logits, axis=1)
|
150 |
+
return id2label[prediction.item()]
|
151 |
+
|
152 |
+
|
153 |
+
# demo3 = gr.Interface.load("models/mdj1412/movie_review_score_discriminator_eng", inputs="text", outputs="text",
|
154 |
+
# title=title, theme="peach",
|
155 |
+
# allow_flagging="auto",
|
156 |
+
# description=description, examples=examples)
|
157 |
+
|
158 |
+
|
159 |
+
|
160 |
+
# demo = gr.Interface(builder, inputs=[gr.inputs.Dropdown(['Default', 'Eng', 'Kor']), gr.Textbox(placeholder="리뷰λ₯Ό μ
λ ₯νμμ€.")],
|
161 |
+
# outputs=[ gr.Label(num_top_classes=3, label='Lang'),
|
162 |
+
# gr.Label(num_top_classes=2, label='Result'),
|
163 |
+
# gr.HighlightedText(label="Analysis", combine_adjacent=False)
|
164 |
+
# .style(color_map={"+++": "#CF0000", "++": "#FF3232", "+": "#FFD4D4", "---": "#0004FE", "--": "#4C47FF", "-": "#BEBDFF"}) ],
|
165 |
+
# # outputs='label',
|
166 |
+
# title=title, description=description, examples=examples)
|
167 |
+
|
168 |
+
|
169 |
+
|
170 |
+
with gr.Blocks() as demo1:
|
171 |
+
gr.Markdown(
|
172 |
+
"""
|
173 |
+
<h1 align="center">
|
174 |
+
Movie Review Score Discriminator
|
175 |
+
</h1>
|
176 |
+
""")
|
177 |
+
|
178 |
+
gr.Markdown(
|
179 |
+
"""
|
180 |
+
μν 리뷰λ₯Ό μ
λ ₯νλ©΄, λ¦¬λ·°κ° κΈμ μΈμ§ λΆμ μΈμ§ νλ³ν΄μ£Όλ λͺ¨λΈμ΄λ€. \
|
181 |
+
μμ΄μ νκΈμ μ§μνλ©°, μΈμ΄λ₯Ό μ§μ μ νν μλ, νΉμ λͺ¨λΈμ΄ μΈμ΄κ°μ§λ₯Ό μ§μ νλλ‘ ν μ μλ€.
|
182 |
+
리뷰λ₯Ό μ
λ ₯νλ©΄, (1) κ°μ§λ μΈμ΄, (2) κΈμ λ¦¬λ·°μΌ νλ₯ κ³Ό λΆμ λ¦¬λ·°μΌ νλ₯ , (3) μ
λ ₯λ 리뷰μ μ΄λ λ¨μ΄κ° κΈμ /λΆμ κ²°μ μ μν₯μ μ£Όμλμ§ \
|
183 |
+
(κΈμ μΌ κ²½μ° λΉ¨κ°μ, λΆμ μΌ κ²½μ° νλμ)λ₯Ό νμΈν μ μλ€.
|
184 |
+
""")
|
185 |
+
|
186 |
+
with gr.Accordion(label="λͺ¨λΈμ λν μ€λͺ
( μ¬κΈ°λ₯Ό ν΄λ¦ νμμ€. )", open=False):
|
187 |
+
gr.Markdown(
|
188 |
+
"""
|
189 |
+
μμ΄ λͺ¨λΈμ bert-base-uncased κΈ°λ°μΌλ‘, μμ΄ μν 리뷰 λΆμ λ°μ΄ν°μ
μΈ SST-2λ‘ νμ΅ λ° νκ°λμλ€.
|
190 |
+
νκΈ λͺ¨λΈμ klue/roberta-base κΈ°λ°μ΄λ€. κΈ°μ‘΄ νκΈ μν 리뷰 λΆμ λ°μ΄ν°μ
μ΄ μ‘΄μ¬νμ§ μμ, λ€μ΄λ² μνμ 리뷰λ₯Ό ν¬λ‘€λ§ν΄μ μν 리뷰 λΆμ λ°μ΄ν°μ
μ μ μνκ³ , μ΄λ₯Ό μ΄μ©νμ¬ λͺ¨λΈμ νμ΅ λ° νκ°νμλ€.
|
191 |
+
μμ΄ λͺ¨λΈμ SST-2μμ 92.8%, νκΈ λͺ¨λΈμ λ€μ΄λ² μν 리뷰 λ°μ΄ν°μ
μμ 94%μ μ νλλ₯Ό κ°μ§λ€ (test set κΈ°μ€).
|
192 |
+
μΈμ΄κ°μ§λ fasttextμ language detectorλ₯Ό μ¬μ©νμλ€. 리뷰μ λ¨μ΄λ³ μν₯λ ₯μ, λ¨μ΄ κ°κ°μ λͺ¨λΈμ λ£μμ λ κ²°κ³Όκ° κΈμ μΌλ‘ λμ€λμ§ λΆμ μΌλ‘ λμ€λμ§λ₯Ό λ°νμΌλ‘ μΈ‘μ νμλ€.
|
193 |
+
""")
|
194 |
+
|
195 |
+
with gr.Row():
|
196 |
+
with gr.Column():
|
197 |
+
inputs_1 = gr.Dropdown(choices=['μΈμ΄κ°μ§ κΈ°λ₯ μ¬μ©', 'Eng', 'Kor'], value='μΈμ΄κ°μ§ κΈ°λ₯ μ¬μ©', label='Lang')
|
198 |
+
inputs_2 = gr.Textbox(placeholder="리뷰λ₯Ό μ
λ ₯νμμ€.", label='Text')
|
199 |
+
with gr.Row():
|
200 |
+
# btn2 = gr.Button("ν΄λ¦¬μ΄")
|
201 |
+
btn = gr.Button("μ μΆνκΈ°")
|
202 |
+
with gr.Column():
|
203 |
+
output_1 = gr.Label(num_top_classes=3, label='Lang')
|
204 |
+
output_2 = gr.Label(num_top_classes=2, label='Result')
|
205 |
+
output_3 = gr.HighlightedText(label="Analysis", combine_adjacent=False) \
|
206 |
+
.style(color_map={"+++": "#CF0000", "++": "#FF3232", "+": "#FFD4D4", "---": "#0004FE", "--": "#4C47FF", "-": "#BEBDFF"})
|
207 |
+
|
208 |
+
# btn2.click(fn=fn2, inputs=[None, None], output=[output_1, output_2, output_3])
|
209 |
+
btn.click(fn=builder, inputs=[inputs_1, inputs_2], outputs=[output_1, output_2, output_3])
|
210 |
+
gr.Examples(examples, inputs=[inputs_1, inputs_2])
|
211 |
+
|
212 |
+
|
213 |
+
|
214 |
+
if __name__ == "__main__":
|
215 |
+
# print(examples)
|
216 |
+
# demo.launch()
|
217 |
+
demo1.launch()
|
examples.csv
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
eng kor
|
2 |
+
0 of saucy 1μ λμκΉλ€4λͺ
보λ€μ¬λ―Έμμ΄μ2λͺ
λκ°
|
3 |
+
1 cold movie 맀νΈλ¦μ€?γ΄γ΄ μ§μ₯ 묻μ μ‘΄μ
|
4 |
+
2 redundant concept κ°μΈμ μ νμΌλ‘ 1μ μ 쀬μ΅λλ€
|
5 |
+
3 in world cinema 보λλ΄λ΄ λ μκ°λ§ λ¬λ€.
|
6 |
+
4 on all cylinders μν보λ€κ° μ λ μ μ μ²μμ΄λ€μ
|
7 |
+
5 sit through , λ°λ»ν μνμμ~^^μΆμ²ν΄μ!
|
8 |
+
6 heroes λ³λ‘μμ μκ°λ³΄λ€ λ
ΈμΌμ
|
9 |
+
7 sharply μ’μμ κ°μ‘±λ€κ³Ό 보기 μ’μμ
|
10 |
+
8 sometimes dry β‘ μ¬λ°κ² μλ΄€μ΅λλ€γ
γ
|
11 |
+
9 disappointments λ°μ νΈλΉ μ¬λν΄μ~
|
12 |
+
10 the horrors λ₯λ μ΄λ° κ±°λν λ₯μ΄ μμλ€..
|
13 |
+
11 many pointless κ°μ§λ¦½λλ€ λλ§λΉν μμμ§
|
14 |
+
12 a beautifully μ΄κ²λ¬΄μ¨...λ§νλ€ λ§ν γ
γ
γ
|
15 |
+
13 a doa 7κ΄κ΅¬μ μλ²½μ μ΄λ£¨λ λ§μ
|
16 |
+
14 no apparent joy μν 보λ€κ° μ€κ°μ λμμ΅λλ€
|
17 |
+
15 seem fresh μ΅μ
κ·Έλ₯ λ³΄μ§ λ§μΈμμ§μ§ λ
ΈμΌ
|
18 |
+
16 weak and 짱ꡬ κ·Ήμ₯νμ μΈμ λ μ΅κ³ μμ
|
19 |
+
17 skip this dreck , λ΄ μκ°μ μμ€ν κ±°λ€.
|
20 |
+
18 generates κ²λ μ¬λ°λλ,,,,
|
21 |
+
19 funny yet κ·Έλ₯ κ°μ¬λ°μ νμ λ―ΏμΌλ©΄ μλ¨
|
22 |
+
20 in memory μ¬λ°κ² μλ΄£μ΅λλ€ λ무μ’μ΅λλ€μ
|
23 |
+
21 hawaiian shirt λ°₯ λ¨ΉμΌλ©΄μ 보기 μ’μ μν
|
24 |
+
22 grievous but μ¬λ―Έμ κ°λμ κ²ΈλΉν λͺ
μμ
λλ€!!
|
25 |
+
23 hopeless μ¬κ°λ΄ κ°μ¬ν©λλ€.μ λ§λ‘
|
26 |
+
24 bring tissues . λλ μ΄μ μ€λͺ
μ΄ νμν κΉ.
|
27 |
+
25 just too silly μμ 믿보 ν©.μ .λ―Ό λ°°μ°λ~^^
|
28 |
+
26 cinematic bon bons μ°μΆ+μ°κΈ°+μ€ν 리+μμλ―Έ+OST
|
29 |
+
27 irritates and μΆμ΅μ 묻μ΄λμ§ κ·Έλ¬λ
|
30 |
+
28 collapse μ΄μλ μ΅κ³ μ μ½λ―Έλ μν
|
31 |
+
29 no lika da μ¬λ―Έμκ² κ΄λνμμ΅λλ€
|
32 |
+
30 a welcome relief μ€λ§μ°κ·Έλ μμλ μ€λ¦΄μ΄ λ§λ€.
|
33 |
+
31 , compelling μ²μμΌλ‘ κ·Ήμ₯μμ μ€μ΅λλ€
|
34 |
+
32 infectiously λ무λλ μλ΄€μ΄μ κ΅Ώμ
λλ
|
35 |
+
33 imax in short γ
γΉκ² μκΈ°κ³ μΌμλ€.γ
|
36 |
+
34 i hate it . μ°λ§μ 보면 λν΄νλ€ μ λ§
|
37 |
+
35 a good one κ·Έλ₯ κ²μμΌλ‘ λ΄μ§ κ·Έλ¬λ.
|
38 |
+
36 , plodding picture μ§μ§ κ°μΆ μ΅κ³ μ νκ΅μν
|
39 |
+
37 inane and awful μ§μ§μ΅μ
μ
λλ€...λͺ
μ μ보μΈμ
|
40 |
+
38 whole mess λλ§μ 보μ§λ§μΈμ λ μκΉμ
|
41 |
+
39 enjoy the ride μ΄κ±° λ³Ό μκ°μ μΌλμ΄λ λ΄λΌ
|
42 |
+
40 the horror λ무λ무 μ¬λ°μ λ²μ¦ μ΅κ³
|
43 |
+
41 a dim 3μκ°μ΄ μ ν μκΉμ§ μμ
|
44 |
+
42 amazingly lame . μ‘Έμμ΄λ€..
|
45 |
+
43 to spare wildlife λ
Έμ°μΌμ€γ
‘ μ΄λ§μ‘ μ΄μ μ±μ°κΈ°
|
46 |
+
44 carnage and 2022λ
μ΅κ³ νκ΅μν
|
47 |
+
45 second fiddle μ¬λ―Έμλ€λ무μ¬λ―Έμλ€OSTμ§κ²Ήλ€
|
48 |
+
46 a stylish exercise λλ¦ μ¬λ°κ² λ΄ κ°λ³κ² 보기 μ’μλ―
|
49 |
+
47 than this mess μ...κ°λ
νμ΄ λμ’λ€... λκΈ΄λ°
|
50 |
+
48 valuable messages κ°μκΈ° λκ²μ γΉγ
γ
γ
|
51 |
+
49 usual worst λ³μ 1μ λ μ£ΌκΈ°κ° μκΉμ΄ μν..
|
gitattributes.txt
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
klue_roberta-small-2400.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1b572a576888999c3696750507168b1ec8c194b93e3b0a5fb69d5932cb61a410
|
3 |
+
size 272408049
|
lid.176.ftz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8f3472cfe8738a7b6099e8e999c3cbfae0dcd15696aac7d7738a8039db603e83
|
3 |
+
size 938013
|
model-1900.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1f0dcb5d42751656f47868d0b1cd793c33bd2c497df57dde5514a2b15a791d05
|
3 |
+
size 498658641
|
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio
|
2 |
+
datasets
|
3 |
+
transformers
|
4 |
+
torch
|
5 |
+
pandas
|
6 |
+
numpy
|
7 |
+
fasttext
|
roberta-base-1900.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1f0dcb5d42751656f47868d0b1cd793c33bd2c497df57dde5514a2b15a791d05
|
3 |
+
size 498658641
|