File size: 4,249 Bytes
7bb9dbf
84c2ca3
7bb9dbf
50db88f
7bb9dbf
 
e596ad1
 
7bb9dbf
0d6dd81
7bb9dbf
 
4700be2
 
 
3252649
6b61ffa
 
 
 
7bb9dbf
 
 
 
 
 
 
 
59506cd
7bb9dbf
59506cd
 
3d2ca92
7951125
59506cd
7bb9dbf
 
 
 
 
6b61ffa
 
 
 
 
7bb9dbf
 
4700be2
7bb9dbf
 
 
 
 
 
 
 
 
 
d6b591b
141a4e3
 
7bb9dbf
79a74e1
 
 
 
4700be2
7bb9dbf
4f1896d
 
 
 
7bb9dbf
 
79a74e1
 
 
 
7bb9dbf
 
59506cd
 
79a74e1
7bb9dbf
4700be2
 
7bb9dbf
59506cd
6e0be5b
 
4700be2
7bb9dbf
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
import gradio as gr
import os 
import requests
import time 
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
import paddlehub as hub
# Importing the essential libraries for monitoring
import psutil

HF_TOKEN = os.environ["HF_TOKEN"]
model = hub.Module(name='ernie_vilg')

def get_ernie_vilg(text_prompts, style): 
  style = style.split('-')[0]
  results = model.generate_image(text_prompts=text_prompts, style=style, visualization=False)
  #for CPU monitoring
  # Testing the psutil library for both CPU and RAM performance details
  print(f"ERNIE CPU percent is: {psutil.cpu_percent()}")
  print(f"ERNIE virtual memory is : {psutil.virtual_memory().percent}")

  return results[0]
  
sd_inf = gr.Blocks.load(name="spaces/stabilityai/stable-diffusion", use_auth_token=HF_TOKEN)
 
nllb_model_name = 'facebook/nllb-200-distilled-600M'
nllb_model = AutoModelForSeq2SeqLM.from_pretrained(nllb_model_name)
nllb_tokenizer = AutoTokenizer.from_pretrained(nllb_model_name)

def get_chinese_translation(text):  #in_language_first, in_language_second, 
  print("********Inside get_chinese_translation ********")
  src = 'eng_Latn' 
  tgt= 'zho_Hans'
  print(f"text is :{text}, source language is : {src}, target language is : {tgt} ") 

  translator = pipeline('translation', model=nllb_model, tokenizer=nllb_tokenizer, src_lang=src, tgt_lang=tgt)
  output = translator(text, max_length=400)
  print(f"initial output is:{output}")
  output = output[0]['translation_text']
  print(f"output is:{output}")
  
  # for CPU monitoring
  # Testing the psutil library for both CPU and RAM performance details
  print(f"CPU percent is: {psutil.cpu_percent()}")
  print(f"virtual memory is : {psutil.virtual_memory().percent}")

  return output 
  
#Block inference not working for stable diffusion   
def get_sd(translated_txt, samples, steps, scale, seed):
  print("******** Inside get_SD ********")
  print(f"translated_txt is : {translated_txt}")
  sd_img_gallery = sd_inf(translated_txt, samples, steps, scale, seed, fn_index=1)[0] 
  
  return sd_img_gallery

demo = gr.Blocks()

with demo:
  gr.Markdown("<h1><center>ERNIE in English !</center></h1>")
  gr.Markdown("<h3><center>ERNIE-ViLG is a state-of-the-art text-to-image model that generates images from simplified Chinese text.</center></h3>")
  gr.Markdown("<h3><center>This app helps you in checking-out ERNIE in English. Note that due to limitations on available Ram, only one image is being generated at the moment<br><br>Please access the original model here - [ERNIE-ViLG](https://huggingface.co/spaces/PaddlePaddle/ERNIE-ViLG)</center></h3>")
  with gr.Row():
    with gr.Column():
      in_text_prompt = gr.Textbox(label="Enter English text here")
      out_text_chinese = gr.Textbox(label="Text in Simplified Chinese")
      
    b1 = gr.Button("English to Simplified Chinese")
    
    #s1 = gr.Slider(label='samples', value=4, visible=False)
    #s2 = gr.Slider(label='steps', value=45, visible=False)
    #s3 = gr.Slider(label='scale', value=7.5, visible=False)
    #s4 = gr.Slider(label='seed', value=1024, visible=False)
    
  with gr.Row():
    with gr.Column():
      in_styles = gr.Dropdown(['水彩-WaterColor', '油画-OilPainting', '粉笔画-Painting', '卡通-Cartoon', '蜡笔画-Pencils', '儿童画-ChildrensPaintings', '探索无限-ExploringTheInfinite'])
      b2 = gr.Button("Generate Images from Ernie")
      
    out_ernie = gr.Image(type="pil", label="Ernie output for the given prompt")
    #out_gallery = gr.Gallery(label="Generated images", show_label=False, elem_id="gallery") #.style(grid=[2, 3], height="auto")
    #in_language_first = gr.Textbox(visible=False, value= 'eng_Latn') #'English'
    #in_language_second = gr.Textbox(visible=False, value= 'zho_Hans') #'Chinese (Simplified)'
    
    
    #out_sd = gr.Image(type="pil", label="SD output for the given prompt")
    #b3 = gr.Button("Generate Images from SD")
    
    b1.click(get_chinese_translation, in_text_prompt, out_text_chinese )  #[in_language_first, in_language_second, 
    b2.click(get_ernie_vilg, [out_text_chinese, in_styles], out_ernie)  
    
    #b3.click(get_sd, [in_text_prompt,s1,s2,s3,s4], out_sd) #out_gallery )  
    
demo.launch(enable_queue=True, debug=True)