|
import requests |
|
import gradio as gr |
|
from PIL import Image |
|
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline |
|
|
|
import os |
|
import io |
|
token = os.environ['HF_TOKEN'] |
|
|
|
|
|
translation_model = AutoModelForSeq2SeqLM.from_pretrained("KarmaCST/nllb-200-distilled-600M-dz-to-en") |
|
tokenizer = AutoTokenizer.from_pretrained("KarmaCST/nllb-200-distilled-600M-dz-to-en") |
|
|
|
|
|
|
|
print(token) |
|
API_URL = "https://api-inference.huggingface.co/models/playgroundai/playground-v2-1024px-aesthetic" |
|
bearer_token = "Bearer " + str(token) |
|
|
|
headers = {"Authorization": bearer_token} |
|
|
|
src_lang="dzo_Tibt" |
|
tgt_lang="eng_Latn" |
|
|
|
def translate_dzongkha_to_english(text): |
|
translation_pipeline = pipeline("translation", |
|
model=translation_model, |
|
tokenizer=tokenizer, |
|
src_lang=src_lang, |
|
tgt_lang=tgt_lang) |
|
|
|
translated_text = translation_pipeline(text)[0]['translation_text'] |
|
return translated_text |
|
|
|
def query(payload): |
|
english_text = translate_dzongkha_to_english(payload) |
|
response = requests.post(API_URL, headers=headers, json=english_text) |
|
image_bytes = response.content |
|
|
|
|
|
image = Image.open(io.BytesIO(image_bytes)) |
|
return image |
|
|
|
|
|
|
|
|
|
|
|
with gr.Blocks() as demo: |
|
|
|
inp = gr.components.Textbox(label="Input Text", placeholder = " Enter Dzongkha test here ...") |
|
gr.Interface(fn=query, |
|
inputs=inp, |
|
outputs="image", |
|
title="Dzongkha Text to Image Generation", |
|
examples=["བྱི་ཅུང་ཚུ་གངས་རི་གི་ཐོག་ཁར་འཕུར།","པཱ་རོ་ཁྲོམ་གྱི་ཐོག་ཁར་གནམ་གྲུ་འཕུར།","པཱ་རོ་ཁྲོམ་གྱི་ཐོག་ཁར་ ཤིང་ཚུ་གི་བར་ན་ གནམ་གྲུ་འཕུར་བའི་འཐོང་གནང་།","སློབ་ཕྲུག་ཚུ་ ཆརཔ་ནང་རྐང་རྩེད་རྩེ་དེས།"], |
|
article="<h1>Created By:</h1>Mr. Karma Wangchuk<br>Lecturer<br>Information Technology Department<br>College of Science and Technology<br>Rinchending Phuentsholing<br>Chhukha Bhutan<br>", |
|
) |
|
|
|
demo.launch() |