File size: 1,763 Bytes
aed621d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 |
import gradio as gr
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM, StoppingCriteriaList
import os
import torch
theme = "darkgrass"
title = "Polyglot(Korean) Demo"
model_name = "EleutherAI/polyglot-ko-1.3b"
bad_words = [
'...',
'....',
'(์ค๋ต)',
'http'
]
description = "polyglot (1.3B ํ๋ผ๋ฏธํฐ ์ฌ์ด์ฆ) ํ๊ตญ์ด ๋ชจ๋ธ์ ์์ฐํ๋ ๋ฐ๋ชจํ์ด์ง ์
๋๋ค."
article = "<p style='text-align: center'><a href='https://github.com/EleutherAI/polyglot' target='_blank'>Polyglot: Large Language Models of Well-balanced Competence in Multi-languages</a></p>"
examples = [
["CPU์ GPU์ ์ฐจ์ด๋,"],
["์ง๋ฌธ: ์ฐํฌ๋ผ์ด๋ ์ ์์ด ์ธ๊ณ3์ฐจ๋์ ์ผ๋ก ํ์ ์ด ๋ ๊น์? \n๋ต๋ณ:"],
["2040๋
๋ฏธ๊ตญ์, "]
]
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
model_name
)
model.eval()
pipe = pipeline('text-generation', model=model, tokenizer=tokenizer, eos_token_id=tokenizer.eos_token_id)
def predict(text):
with torch.no_grad():
tokens = tokenizer(text, return_tensors="pt").input_ids
# generate and end generate if <|endoftext|> is not in text
gen_tokens = model.generate(
tokens, do_sample=True, temperature=0.8, max_new_tokens=64, top_k=50, top_p=0.8,
no_repeat_ngram_size=3, repetition_penalty=1.2,
bad_words_ids=[
tokenizer.encode(bad_word) for bad_word in bad_words
],
eos_token_id=tokenizer.eos_token_id,
pad_token_id=tokenizer.pad_token_id
)
generated = tokenizer.batch_decode(gen_tokens)[0]
return generated
# return pipe(text)[0]['generated_text']
iface = gr.Interface(
fn=predict,
inputs='text',
outputs='text',
examples=examples
)
iface.launch() |