Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -218,8 +218,8 @@ for d in dataset:
|
|
218 |
data_per_lang[full] = data_per_lang.get(code, []) + [d[k]]
|
219 |
|
220 |
|
221 |
-
def get_results(tokenizer_name, base_lang, comp_lang):
|
222 |
-
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
|
223 |
base_data = data_per_lang[base_lang]
|
224 |
comp_data = data_per_lang[comp_lang]
|
225 |
|
@@ -251,8 +251,11 @@ def get_results(tokenizer_name, base_lang, comp_lang):
|
|
251 |
|
252 |
with gr.Blocks() as demo:
|
253 |
with gr.Column():
|
254 |
-
with gr.
|
255 |
-
|
|
|
|
|
|
|
256 |
|
257 |
with gr.Row():
|
258 |
with gr.Column():
|
@@ -271,7 +274,7 @@ with gr.Blocks() as demo:
|
|
271 |
|
272 |
btn.click(
|
273 |
get_results,
|
274 |
-
inputs=[tokenizer, base_lang, comp_lang],
|
275 |
outputs=[out_text],
|
276 |
api_name=False,
|
277 |
)
|
|
|
218 |
data_per_lang[full] = data_per_lang.get(code, []) + [d[k]]
|
219 |
|
220 |
|
221 |
+
def get_results(tokenizer_name, base_lang, comp_lang, HF_token=""):
|
222 |
+
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, token=HF_token if HF_token != "" else False)
|
223 |
base_data = data_per_lang[base_lang]
|
224 |
comp_data = data_per_lang[comp_lang]
|
225 |
|
|
|
251 |
|
252 |
with gr.Blocks() as demo:
|
253 |
with gr.Column():
|
254 |
+
with gr.Column():
|
255 |
+
with gr.Row():
|
256 |
+
tokenizer = gr.Textbox(label="Tokenizer name", value="bert-base-cased")
|
257 |
+
with gr.Row():
|
258 |
+
HF_token = gr.Textbox(label="your HF Token")
|
259 |
|
260 |
with gr.Row():
|
261 |
with gr.Column():
|
|
|
274 |
|
275 |
btn.click(
|
276 |
get_results,
|
277 |
+
inputs=[tokenizer, base_lang, comp_lang, HF_token],
|
278 |
outputs=[out_text],
|
279 |
api_name=False,
|
280 |
)
|