|
import gradio |
|
from transformers import GPT2LMHeadModel, GPT2Tokenizer, AdamW |
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
|
|
|
|
|
|
|
|
|
|
model_name = "zaanind/gpt2_finetune_alpaca" |
|
|
|
|
|
tokenizer = GPT2Tokenizer.from_pretrained(model_name) |
|
tokenizer.pad_token = tokenizer.eos_token |
|
|
|
model = GPT2LMHeadModel.from_pretrained(model_name) |
|
|
|
|
|
|
|
def translate(text): |
|
prompt = f"<s>[INST] translate this sentence to sinhala - {text} [/INST] sure,here the translation of the provided text - " |
|
|
|
input_ids = tokenizer.encode(prompt, return_tensors='pt') |
|
output = model.generate(input_ids, max_length=250, num_return_sequences=1) |
|
translation = tokenizer.decode(output[0], skip_special_tokens=True) |
|
|
|
return translation |
|
|
|
|
|
def nmtapifunc(text): |
|
text = translate(text) |
|
return text |
|
|
|
gradio_interface = gradio.Interface( |
|
fn=nmtapifunc, |
|
inputs="text", |
|
outputs="text", |
|
title="ZoomAI Inference Server", |
|
description="", |
|
article="© Zaanind 2023-2024" |
|
) |
|
gradio_interface.launch() |