title_extraction_bart_logical / bart_demo_gradio.py
Unggi's picture
add model_file
af60965
raw
history blame
1.16 kB
import gradio as gr
import torch
import transformers
# saved_model
def load_model(model_path):
saved_data = torch.load(
model_path,
map_location="cpu"
)
bart_best = saved_data["model"]
train_config = saved_data["config"]
tokenizer = transformers.PreTrainedTokenizerFast.from_pretrained('gogamza/kobart-base-v1')
## Load weights.
model = transformers.BartForConditionalGeneration.from_pretrained('gogamza/kobart-base-v1')
model.load_state_dict(bart_best)
return model, tokenizer
# main
def inference(prompt):
model_path = "./kobart-model-logical.pth"
model, tokenizer = load_model(
model_path=model_path
)
input_ids = tokenizer.encode(prompt)
input_ids = torch.tensor(input_ids)
input_ids = input_ids.unsqueeze(0)
output = model.generate(input_ids)
output = tokenizer.decode(output[0], skip_special_tokens=True)
return output
demo = gr.Interface(
fn=inference,
inputs="text",
outputs="text" #return κ°’
).launch(share=True) # launch(share=True)λ₯Ό μ„€μ •ν•˜λ©΄ μ™ΈλΆ€μ—μ„œ 접속 κ°€λŠ₯ν•œ 링크가 생성됨
demo.launch()