flash64 commited on
Commit
8324511
1 Parent(s): 81e1e65

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -77
app.py CHANGED
@@ -1,80 +1,7 @@
1
- #!/usr/bin/env python
2
-
3
- from __future__ import annotations
4
-
5
  import gradio as gr
6
 
7
- from model import AppModel
8
-
9
- DESCRIPTION = '''# <a href="https://github.com/THUDM/CogVideo">CogVideo</a>
10
-
11
- Currently, this Space only supports the first stage of the CogVideo pipeline due to hardware limitations.
12
-
13
- The model accepts only Chinese as input.
14
- By checking the "Translate to Chinese" checkbox, the results of English to Chinese translation with [this Space](https://huggingface.co/spaces/chinhon/translation_eng2ch) will be used as input.
15
- Since the translation model may mistranslate, you may want to use the translation results from other translation services.
16
- '''
17
- NOTES = 'This app is adapted from <a href="https://github.com/hysts/CogVideo_demo">https://github.com/hysts/CogVideo_demo</a>. It would be recommended to use the repo if you want to run the app yourself.'
18
- FOOTER = '<img id="visitor-badge" alt="visitor badge" src="https://visitor-badge.glitch.me/badge?page_id=THUDM.CogVideo" />'
19
-
20
-
21
- def main():
22
- only_first_stage = True
23
- model = AppModel(only_first_stage)
24
-
25
- with gr.Blocks(css='style.css') as demo:
26
- gr.Markdown(DESCRIPTION)
27
-
28
- with gr.Row():
29
- with gr.Column():
30
- with gr.Group():
31
- text = gr.Textbox(label='Input Text')
32
- translate = gr.Checkbox(label='Translate to Chinese',
33
- value=False)
34
- seed = gr.Slider(0,
35
- 100000,
36
- step=1,
37
- value=1234,
38
- label='Seed')
39
- only_first_stage = gr.Checkbox(
40
- label='Only First Stage',
41
- value=only_first_stage,
42
- visible=not only_first_stage)
43
- image_prompt = gr.Image(type="filepath",
44
- label="Image Prompt",
45
- value=None)
46
- run_button = gr.Button('Run')
47
-
48
- with gr.Column():
49
- with gr.Group():
50
- translated_text = gr.Textbox(label='Translated Text')
51
- with gr.Tabs():
52
- with gr.TabItem('Output (Video)'):
53
- result_video = gr.Video(show_label=False)
54
-
55
- examples = gr.Examples(
56
- examples=[['骑滑板的皮卡丘', False, 1234, True,None],
57
- ['a cat playing chess', True, 1253, True,None]],
58
- fn=model.run_with_translation,
59
- inputs=[text, translate, seed, only_first_stage,image_prompt],
60
- outputs=[translated_text, result_video],
61
- cache_examples=True)
62
-
63
- gr.Markdown(NOTES)
64
- gr.Markdown(FOOTER)
65
- print(gr.__version__)
66
- run_button.click(fn=model.run_with_translation,
67
- inputs=[
68
- text,
69
- translate,
70
- seed,
71
- only_first_stage,
72
- image_prompt
73
- ],
74
- outputs=[translated_text, result_video])
75
- print(gr.__version__)
76
- demo.launch()
77
-
78
 
79
- if __name__ == '__main__':
80
- main()
 
 
 
 
 
1
  import gradio as gr
2
 
3
+ def greet(name):
4
+ return "Hello " + name + "!!"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
+ iface = gr.Interface(fn=greet, inputs="text", outputs="text")
7
+ iface.launch()