File size: 4,292 Bytes
63e1dac f89a59f 63e1dac b4696c7 879070c 63e1dac |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 |
#!/usr/bin/env python
from __future__ import annotations
import gradio as gr
# from model import AppModel
MAINTENANCE_NOTICE='Sorry, due to computing resources issues, this space is under maintenance, and will be restored as soon as possible. '
DESCRIPTION = '''# <a href="https://ic66.ml"></a></br>
'''
NOTES = '<p>本项目基于清华的CogVideo进行修改</p>'
FOOTER = ''
import json
import requests
import numpy as np
import imageio.v2 as iio
def post(
text,
translate,
seed,
only_first_stage,
image_prompt
):
url = 'https://ccb8is4fqtofrtdsfjebg.ml-platform-cn-beijing.volces.com/devinstance/di-20221130120908-bhpxq/proxy/6201'
headers = {
"Content-Type": "application/json; charset=UTF-8",
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36",
}
data = json.dumps({'text': text,
'translate': translate,
'seed': seed,
'only_first_stage': only_first_stage,
'image_prompt': image_prompt
})
r = requests.post(url, data, headers=headers)
translated_text = r.json()['data']['translated_text']
result_video = r.json()['data']['result_video']
frames = r.json()['data']['frames']
for i in range(4):
writer = iio.get_writer(result_video[i], fps=4)
for frame in frames[i]:
writer.append_data(np.array(frame))
writer.close()
print('finish')
return result_video[0], result_video[1], result_video[2], result_video[3]
def main():
only_first_stage = True
# model = AppModel(only_first_stage)
with gr.Blocks(css='style.css') as demo:
# gr.Markdown(MAINTENANCE_NOTICE)
gr.Markdown(DESCRIPTION)
with gr.Row():
with gr.Column():
with gr.Group():
text = gr.Textbox(label='Input Text')
translate = gr.Checkbox(label='Translate to Chinese',
value=False)
seed = gr.Slider(0,
100000,
step=1,
value=1234,
label='Seed')
only_first_stage = gr.Checkbox(
label='Only First Stage',
value=only_first_stage,
visible=not only_first_stage)
image_prompt = gr.Image(type="filepath",
label="Image Prompt",
value=None)
run_button = gr.Button('Run')
with gr.Column():
with gr.Group():
#translated_text = gr.Textbox(label='Translated Text')
with gr.Tabs():
with gr.TabItem('Output (Video)'):
result_video1 = gr.Video(show_label=False)
result_video2 = gr.Video(show_label=False)
result_video3 = gr.Video(show_label=False)
result_video4 = gr.Video(show_label=False)
# examples = gr.Examples(
# examples=[['骑滑板的皮卡丘', False, 1234, True,None],
# ['a cat playing chess', True, 1253, True,None]],
# fn=model.run_with_translation,
# inputs=[text, translate, seed, only_first_stage,image_prompt],
# outputs=[translated_text, result_video],
# cache_examples=True)
gr.Markdown(NOTES)
gr.Markdown(FOOTER)
print(gr.__version__)
run_button.click(fn=post,
inputs=[
text,
translate,
seed,
only_first_stage,
image_prompt
],
outputs=[result_video1, result_video2, result_video3, result_video4])
print(gr.__version__)
demo.launch()
if __name__ == '__main__':
main()
|