jason9693's picture
modified model path
34c0980
import gradio as gr
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM, StoppingCriteriaList
import os
import torch
theme = "darkgrass"
title = "Polyglot(Korean) Demo"
model_name = "EleutherAI/polyglot-ko-1.3b"
bad_words = [
'...',
'....',
'(์ค‘๋žต)',
'http'
]
description = "polyglot (1.3B ํŒŒ๋ผ๋ฏธํ„ฐ ์‚ฌ์ด์ฆˆ) ํ•œ๊ตญ์–ด ๋ชจ๋ธ์„ ์‹œ์—ฐํ•˜๋Š” ๋ฐ๋ชจํŽ˜์ด์ง€ ์ž…๋‹ˆ๋‹ค."
article = "<p style='text-align: center'><a href='https://github.com/EleutherAI/polyglot' target='_blank'>Polyglot: Large Language Models of Well-balanced Competence in Multi-languages</a></p>"
examples = [
["CPU์™€ GPU์˜ ์ฐจ์ด๋Š”,"],
["์งˆ๋ฌธ: ์šฐํฌ๋ผ์ด๋‚˜ ์ „์Ÿ์ด ์„ธ๊ณ„3์ฐจ๋Œ€์ „์œผ๋กœ ํ™•์ „์ด ๋ ๊นŒ์š”? \n๋‹ต๋ณ€:"],
["2040๋…„ ๋ฏธ๊ตญ์€, "]
]
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
model_name
)
model.eval()
pipe = pipeline('text-generation', model=model, tokenizer=tokenizer, eos_token_id=tokenizer.eos_token_id)
def predict(text):
with torch.no_grad():
tokens = tokenizer(text, return_tensors="pt").input_ids
# generate and end generate if <|endoftext|> is not in text
gen_tokens = model.generate(
tokens, do_sample=True, temperature=0.8, max_new_tokens=64, top_k=50, top_p=0.8,
no_repeat_ngram_size=3, repetition_penalty=1.2,
bad_words_ids=[
tokenizer.encode(bad_word) for bad_word in bad_words
],
eos_token_id=tokenizer.eos_token_id,
pad_token_id=tokenizer.pad_token_id
)
generated = tokenizer.batch_decode(gen_tokens)[0]
return generated
# return pipe(text)[0]['generated_text']
iface = gr.Interface(
fn=predict,
inputs='text',
outputs='text',
examples=examples
)
iface.launch()
# print(generated) # print: ์ธ๊ฐ„์ฒ˜๋Ÿผ ์ƒ๊ฐํ•˜๊ณ , ํ–‰๋™ํ•˜๋Š” '์ง€๋Šฅ'์„ ํ†ตํ•ด ์ธ๋ฅ˜๊ฐ€ ์ด์ œ๊นŒ์ง€ ํ’€์ง€ ๋ชปํ–ˆ๋˜ ๋ฌธ์ œ์˜ ํ•ด๋‹ต์„ ์ฐพ์„ ์ˆ˜ ์žˆ์„ ๊ฒƒ์ด๋‹ค. ๊ณผํ•™๊ธฐ์ˆ ์ด ๊ณ ๋„๋กœ ๋ฐœ๋‹ฌํ•œ 21์„ธ๊ธฐ๋ฅผ ์‚ด์•„๊ฐˆ ์šฐ๋ฆฌ ์•„์ด๋“ค์—๊ฒŒ ๊ฐ€์žฅ ํ•„์š”ํ•œ ๊ฒƒ์€ ์‚ฌ๊ณ ๋ ฅ ํ›ˆ๋ จ์ด๋‹ค. ์‚ฌ๊ณ ๋ ฅ ํ›ˆ๋ จ์„ ํ†ตํ•ด, ์„ธ์ƒ