|
import gradio as gr |
|
import torch |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
|
model_name = "cameltech/japanese-gpt-1b-PII-masking" |
|
model = AutoModelForCausalLM.from_pretrained(model_name) |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
|
instruction = "文中の個人情報をマスキングせよ\n\n" |
|
|
|
if torch.cuda.is_available(): |
|
model = model.to("cuda") |
|
|
|
def preprocess(text): |
|
return text.replace("\n", "<LB>") |
|
|
|
def postprocess(text): |
|
return text.replace("<LB>", "\n") |
|
|
|
def generate(input_text): |
|
input_text += tokenizer.eos_token |
|
input_text = preprocess(input_text) |
|
|
|
with torch.no_grad(): |
|
token_ids = tokenizer.encode(input_text, add_special_tokens=False, return_tensors="pt") |
|
|
|
output_ids = model.generate( |
|
token_ids.to(model.device), |
|
max_new_tokens=256, |
|
pad_token_id=tokenizer.pad_token_id, |
|
eos_token_id=tokenizer.eos_token_id, |
|
) |
|
output = tokenizer.decode(output_ids.tolist()[0][token_ids.size(1) :], skip_special_tokens=True) |
|
return postprocess(output) |
|
|
|
|
|
iface = gr.Interface( |
|
fn=generate, |
|
inputs="text", |
|
outputs="text" |
|
) |
|
|
|
iface.launch() |