hysts HF staff commited on
Commit
fde1134
1 Parent(s): 185d817

Add a simpler interface

Browse files
Files changed (2) hide show
  1. app.py +90 -58
  2. model.py +12 -1
app.py CHANGED
@@ -6,8 +6,9 @@ import gradio as gr
6
 
7
  from model import AppModel
8
 
9
- DESCRIPTION = '''# <a href="https://github.com/THUDM/CogView2">CogView2</a> (text2image)
10
 
 
11
  The model accepts English or Chinese as input.
12
  In general, Chinese input produces better results than English input.
13
  By checking the "Translate to Chinese" checkbox, the results of English to Chinese translation with [this Space](https://huggingface.co/spaces/chinhon/translation_eng2ch) will be used as input. Since the translation model may mistranslate, you may want to use the translation results from other translation services.
@@ -31,64 +32,94 @@ def main():
31
  model = AppModel(max_inference_batch_size, only_first_stage)
32
 
33
  with gr.Blocks(css='style.css') as demo:
34
- gr.Markdown(DESCRIPTION)
35
-
36
- with gr.Row():
37
- with gr.Column():
38
- with gr.Group():
39
- text = gr.Textbox(label='Input Text')
40
- translate = gr.Checkbox(label='Translate to Chinese',
41
- value=False)
42
- style = gr.Dropdown(choices=[
43
- 'none',
44
- 'mainbody',
45
- 'photo',
46
- 'flat',
47
- 'comics',
48
- 'oil',
49
- 'sketch',
50
- 'isometric',
51
- 'chinese',
52
- 'watercolor',
53
- ],
54
- value='mainbody',
55
- label='Style')
56
- seed = gr.Slider(0,
57
- 100000,
58
- step=1,
59
- value=1234,
60
- label='Seed')
61
- only_first_stage = gr.Checkbox(
62
- label='Only First Stage',
63
- value=only_first_stage,
64
- visible=not only_first_stage)
65
- num_images = gr.Slider(1,
66
- 16,
67
- step=1,
68
- value=4,
69
- label='Number of Images')
70
- run_button = gr.Button('Run')
71
-
72
- with open('samples.txt') as f:
73
- samples = [
74
- line.strip().split('\t') for line in f.readlines()
75
- ]
76
- examples = gr.Dataset(components=[text, style],
77
- samples=samples)
78
-
79
- with gr.Column():
80
- with gr.Group():
81
- translated_text = gr.Textbox(label='Translated Text')
82
- with gr.Tabs():
83
- with gr.TabItem('Output (Grid View)'):
84
- result_grid = gr.Image(show_label=False)
85
- with gr.TabItem('Output (Gallery)'):
86
- result_gallery = gr.Gallery(show_label=False)
87
-
88
- gr.Markdown(NOTES)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
  gr.Markdown(FOOTER)
90
 
91
- run_button.click(fn=model.run_with_translation,
 
 
 
92
  inputs=[
93
  text,
94
  translate,
@@ -104,7 +135,8 @@ def main():
104
  ])
105
  examples.click(fn=set_example_text,
106
  inputs=examples,
107
- outputs=examples.components)
 
108
 
109
  demo.launch(enable_queue=True)
110
 
6
 
7
  from model import AppModel
8
 
9
+ TITLE = '# <a href="https://github.com/THUDM/CogView2">CogView2</a> (text2image)'
10
 
11
+ DESCRIPTION = '''
12
  The model accepts English or Chinese as input.
13
  In general, Chinese input produces better results than English input.
14
  By checking the "Translate to Chinese" checkbox, the results of English to Chinese translation with [this Space](https://huggingface.co/spaces/chinhon/translation_eng2ch) will be used as input. Since the translation model may mistranslate, you may want to use the translation results from other translation services.
32
  model = AppModel(max_inference_batch_size, only_first_stage)
33
 
34
  with gr.Blocks(css='style.css') as demo:
35
+
36
+ with gr.Tabs():
37
+ with gr.TabItem('Simple Mode'):
38
+ gr.Markdown(TITLE)
39
+
40
+ with gr.Row().style(mobile_collapse=False, equal_height=True):
41
+ text_simple = gr.Textbox(placeholder='Enter your prompt',
42
+ show_label=False,
43
+ max_lines=1).style(
44
+ border=(True, False, True,
45
+ True),
46
+ rounded=(True, False, False,
47
+ True),
48
+ container=False,
49
+ )
50
+ run_button_simple = gr.Button('Run').style(
51
+ margin=False,
52
+ rounded=(False, True, True, False),
53
+ )
54
+ result_grid_simple = gr.Image(show_label=False)
55
+
56
+ with gr.TabItem('Advanced Mode'):
57
+ gr.Markdown(TITLE)
58
+ gr.Markdown(DESCRIPTION)
59
+
60
+ with gr.Row():
61
+ with gr.Column():
62
+ with gr.Group():
63
+ text = gr.Textbox(label='Input Text')
64
+ translate = gr.Checkbox(
65
+ label='Translate to Chinese', value=False)
66
+ style = gr.Dropdown(choices=[
67
+ 'none',
68
+ 'mainbody',
69
+ 'photo',
70
+ 'flat',
71
+ 'comics',
72
+ 'oil',
73
+ 'sketch',
74
+ 'isometric',
75
+ 'chinese',
76
+ 'watercolor',
77
+ ],
78
+ value='mainbody',
79
+ label='Style')
80
+ seed = gr.Slider(0,
81
+ 100000,
82
+ step=1,
83
+ value=1234,
84
+ label='Seed')
85
+ only_first_stage = gr.Checkbox(
86
+ label='Only First Stage',
87
+ value=only_first_stage,
88
+ visible=not only_first_stage)
89
+ num_images = gr.Slider(1,
90
+ 16,
91
+ step=1,
92
+ value=4,
93
+ label='Number of Images')
94
+ run_button = gr.Button('Run')
95
+
96
+ with open('samples.txt') as f:
97
+ samples = [
98
+ line.strip().split('\t')
99
+ for line in f.readlines()
100
+ ]
101
+ examples = gr.Dataset(components=[text, style],
102
+ samples=samples)
103
+
104
+ with gr.Column():
105
+ with gr.Group():
106
+ translated_text = gr.Textbox(
107
+ label='Translated Text')
108
+ with gr.Tabs():
109
+ with gr.TabItem('Output (Grid View)'):
110
+ result_grid = gr.Image(show_label=False)
111
+ with gr.TabItem('Output (Gallery)'):
112
+ result_gallery = gr.Gallery(
113
+ show_label=False)
114
+
115
+ gr.Markdown(NOTES)
116
+
117
  gr.Markdown(FOOTER)
118
 
119
+ run_button_simple.click(fn=model.run_simple,
120
+ inputs=text_simple,
121
+ outputs=result_grid_simple)
122
+ run_button.click(fn=model.run_advanced,
123
  inputs=[
124
  text,
125
  translate,
135
  ])
136
  examples.click(fn=set_example_text,
137
  inputs=examples,
138
+ outputs=examples.components,
139
+ queue=False)
140
 
141
  demo.launch(enable_queue=True)
142
 
model.py CHANGED
@@ -7,6 +7,7 @@ import functools
7
  import logging
8
  import os
9
  import pathlib
 
10
  import subprocess
11
  import sys
12
  import time
@@ -402,6 +403,7 @@ class AppModel(Model):
402
  super().__init__(max_inference_batch_size, only_first_stage)
403
  self.translator = gr.Interface.load(
404
  'spaces/chinhon/translation_eng2ch')
 
405
 
406
  def make_grid(self, images: list[np.ndarray] | None) -> np.ndarray | None:
407
  if images is None or len(images) == 0:
@@ -422,7 +424,7 @@ class AppModel(Model):
422
  grid[h * i:h * (i + 1), w * j:w * (j + 1)] = images[index]
423
  return grid
424
 
425
- def run_with_translation(
426
  self, text: str, translate: bool, style: str, seed: int,
427
  only_first_stage: bool, num: int
428
  ) -> tuple[str | None, np.ndarray | None, list[np.ndarray] | None]:
@@ -436,3 +438,12 @@ class AppModel(Model):
436
  results = self.run(text, style, seed, only_first_stage, num)
437
  grid_image = self.make_grid(results)
438
  return translated_text, grid_image, results
 
 
 
 
 
 
 
 
 
7
  import logging
8
  import os
9
  import pathlib
10
+ import random
11
  import subprocess
12
  import sys
13
  import time
403
  super().__init__(max_inference_batch_size, only_first_stage)
404
  self.translator = gr.Interface.load(
405
  'spaces/chinhon/translation_eng2ch')
406
+ self.rng = random.Random()
407
 
408
  def make_grid(self, images: list[np.ndarray] | None) -> np.ndarray | None:
409
  if images is None or len(images) == 0:
424
  grid[h * i:h * (i + 1), w * j:w * (j + 1)] = images[index]
425
  return grid
426
 
427
+ def run_advanced(
428
  self, text: str, translate: bool, style: str, seed: int,
429
  only_first_stage: bool, num: int
430
  ) -> tuple[str | None, np.ndarray | None, list[np.ndarray] | None]:
438
  results = self.run(text, style, seed, only_first_stage, num)
439
  grid_image = self.make_grid(results)
440
  return translated_text, grid_image, results
441
+
442
+ def run_simple(self, text: str) -> np.ndarray | None:
443
+ logger.info(f'{text=}')
444
+ if text.isascii():
445
+ text = self.translator(text)
446
+ seed = self.rng.randint(0, 100000)
447
+ results = self.run(text, 'photo', seed, False, 4)
448
+ grid_image = self.make_grid(results)
449
+ return grid_image