yufengzhu commited on
Commit
d61718f
1 Parent(s): fc459bd

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -10
app.py CHANGED
@@ -9,9 +9,7 @@ import gradio as gr
9
  MAINTENANCE_NOTICE='Sorry, due to computing resources issues, this space is under maintenance, and will be restored as soon as possible. '
10
 
11
  DESCRIPTION = '''# <a href="https://github.com/THUDM/CogVideo">CogVideo</a>
12
-
13
  Currently, this Space only supports the first stage of the CogVideo pipeline due to hardware limitations.
14
-
15
  The model accepts only Chinese as input.
16
  By checking the "Translate to Chinese" checkbox, the results of English to Chinese translation with [this Space](https://huggingface.co/spaces/chinhon/translation_eng2ch) will be used as input.
17
  Since the translation model may mistranslate, you may want to use the translation results from other translation services.
@@ -23,6 +21,7 @@ import json
23
  import requests
24
  import numpy as np
25
  import imageio.v2 as iio
 
26
 
27
  def post(
28
  text,
@@ -31,30 +30,32 @@ def post(
31
  only_first_stage,
32
  image_prompt
33
  ):
34
- url = 'https://ccb8is4fqtofrtdsfjebg.ml-platform-cn-beijing.volces.com/devinstance/di-20221130120908-bhpxq/proxy/6201'
35
  headers = {
36
  "Content-Type": "application/json; charset=UTF-8",
37
  "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36",
38
  }
39
-
 
40
  data = json.dumps({'text': text,
41
  'translate': translate,
42
  'seed': seed,
43
  'only_first_stage': only_first_stage,
44
- 'image_prompt': image_prompt
45
  })
46
  r = requests.post(url, data, headers=headers)
47
 
48
  translated_text = r.json()['data']['translated_text']
49
  result_video = r.json()['data']['result_video']
50
  frames = r.json()['data']['frames']
51
- for i in range(4):
52
  writer = iio.get_writer(result_video[i], fps=4)
53
  for frame in frames[i]:
54
  writer.append_data(np.array(frame))
55
  writer.close()
56
  print('finish')
57
- return result_video[0], result_video[1], result_video[2], result_video[3]
 
58
 
59
  def main():
60
  only_first_stage = True
@@ -92,8 +93,8 @@ def main():
92
  with gr.TabItem('Output (Video)'):
93
  result_video1 = gr.Video(show_label=False)
94
  result_video2 = gr.Video(show_label=False)
95
- result_video3 = gr.Video(show_label=False)
96
- result_video4 = gr.Video(show_label=False)
97
 
98
 
99
 
@@ -123,4 +124,4 @@ def main():
123
 
124
 
125
  if __name__ == '__main__':
126
- main()
 
9
  MAINTENANCE_NOTICE='Sorry, due to computing resources issues, this space is under maintenance, and will be restored as soon as possible. '
10
 
11
  DESCRIPTION = '''# <a href="https://github.com/THUDM/CogVideo">CogVideo</a>
 
12
  Currently, this Space only supports the first stage of the CogVideo pipeline due to hardware limitations.
 
13
  The model accepts only Chinese as input.
14
  By checking the "Translate to Chinese" checkbox, the results of English to Chinese translation with [this Space](https://huggingface.co/spaces/chinhon/translation_eng2ch) will be used as input.
15
  Since the translation model may mistranslate, you may want to use the translation results from other translation services.
 
21
  import requests
22
  import numpy as np
23
  import imageio.v2 as iio
24
+ import base64
25
 
26
  def post(
27
  text,
 
30
  only_first_stage,
31
  image_prompt
32
  ):
33
+ url = 'https://tianqi.aminer.cn/cogvideo/api/generate'
34
  headers = {
35
  "Content-Type": "application/json; charset=UTF-8",
36
  "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36",
37
  }
38
+ with open(image_prompt, "rb") as image_file:
39
+ encoded_img = base64.b64encode(image_file.read())
40
  data = json.dumps({'text': text,
41
  'translate': translate,
42
  'seed': seed,
43
  'only_first_stage': only_first_stage,
44
+ 'image_prompt': encoded_img
45
  })
46
  r = requests.post(url, data, headers=headers)
47
 
48
  translated_text = r.json()['data']['translated_text']
49
  result_video = r.json()['data']['result_video']
50
  frames = r.json()['data']['frames']
51
+ for i in range(2):
52
  writer = iio.get_writer(result_video[i], fps=4)
53
  for frame in frames[i]:
54
  writer.append_data(np.array(frame))
55
  writer.close()
56
  print('finish')
57
+ return result_video[0], result_video[1]
58
+ # return result_video[0], result_video[1], result_video[2], result_video[3]
59
 
60
  def main():
61
  only_first_stage = True
 
93
  with gr.TabItem('Output (Video)'):
94
  result_video1 = gr.Video(show_label=False)
95
  result_video2 = gr.Video(show_label=False)
96
+ # result_video3 = gr.Video(show_label=False)
97
+ # result_video4 = gr.Video(show_label=False)
98
 
99
 
100
 
 
124
 
125
 
126
  if __name__ == '__main__':
127
+ main()