lalashechka commited on
Commit
1198eae
1 Parent(s): 16d4e4c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +132 -108
app.py CHANGED
@@ -17,119 +17,141 @@ import re
17
  from gradio_client import Client
18
 
19
 
20
- def animate_img(encoded_string):
21
- try:
22
- r = requests.post("https://stable-video-diffusion.com/api/upload", files={"file": open(encoded_string, 'rb')})
23
- hash_ = r.json()['hash']
24
- time.sleep(10)
25
- c = 0
26
- while c < 10:
27
- r2 = requests.get(f"https://stable-video-diffusion.com/result?hash={hash_}")
28
- source_string = r2.text
29
- if "Generation has been in progress for" in source_string:
30
- time.sleep(15)
31
- c += 1
32
- continue
33
- if "Generation has been in progress for" not in source_string:
34
- pattern = r'https://storage.stable-video-diffusion.com/([a-f0-9]{32})\.mp4'
35
- matches = re.findall(pattern, source_string)
36
- sd_video = []
37
- for match in matches:
38
- sd_video.append(f"https://storage.stable-video-diffusion.com/{match}.mp4")
39
- if len(sd_video) != 0:
40
- print("s_1")
41
- return sd_video[0]
42
- else:
43
- _ = 1/0
44
- print("f_1")
45
- except:
46
- print("2")
47
- client1 = Client("https://emmadrex-stable-video-diffusion.hf.space")
48
- result1 = client1.predict(encoded_string, api_name="/resize_image")
49
- client = Client("https://emmadrex-stable-video-diffusion.hf.space")
50
- result = client.predict(result1, 0, True, 1, 15, api_name="/video")
51
- res = result[0]['video']
52
- print("s_2")
53
- return res
54
-
55
 
56
- def create_video(prompt):
 
 
 
 
 
 
57
  url_sd3 = os.getenv("url_sd3")
58
  url_sd4 = os.getenv("url_sd4")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
 
60
- try:
61
- with closing(create_connection(f"{url_sd3}", timeout=120)) as conn:
62
- conn.send('{"fn_index":3,"session_hash":""}')
63
- conn.send(f'{{"data":["{prompt}","[deformed | disfigured], poorly drawn, [bad : wrong] anatomy, [extra | missing | floating | disconnected] limb, (mutated hands and fingers), blurry",7.5,"(No style)"],"event_data":null,"fn_index":3,"session_hash":""}}')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
  c = 0
65
- while c < 60:
66
- status = json.loads(conn.recv())['msg']
67
- if status == 'estimation':
 
 
68
  c += 1
69
- time.sleep(1)
70
  continue
71
- if status == 'process_starts':
72
- break
73
- photo = json.loads(conn.recv())['output']['data'][0][0]
74
- base64_string = photo.replace('data:image/jpeg;base64,', '').replace('data:image/png;base64,', '')
 
 
 
 
 
 
 
 
 
 
 
 
 
75
 
76
- image_bytes = base64.b64decode(base64_string)
77
- with tempfile.NamedTemporaryFile(delete=False) as temp:
78
- temp.write(image_bytes)
79
- temp_file_path = temp.name
80
- print("cs_1")
81
-
82
 
83
- except:
84
- print("c_2")
85
- with closing(create_connection(f"{url_sd4}", timeout=120)) as conn:
86
- conn.send('{"fn_index":0,"session_hash":""}')
87
- conn.send(f'{{"data":["{prompt}","[deformed | disfigured], poorly drawn, [bad : wrong] anatomy, [extra | missing | floating | disconnected] limb, (mutated hands and fingers), blurry","dreamshaperXL10_alpha2.safetensors [c8afe2ef]",30,"DPM++ 2M Karras",7,1024,1024,-1],"event_data":null,"fn_index":0,"session_hash":""}}')
88
- conn.recv()
89
- conn.recv()
90
- conn.recv()
91
- conn.recv()
92
- photo = json.loads(conn.recv())['output']['data'][0]
93
- base64_string = photo.replace('data:image/jpeg;base64,', '').replace('data:image/png;base64,', '')
94
-
95
- image_bytes = base64.b64decode(base64_string)
96
- with tempfile.NamedTemporaryFile(delete=False) as temp:
97
- temp.write(image_bytes)
98
- temp_file_path = temp.name
99
- print("cs_2")
100
 
101
- try:
102
- r = requests.post("https://stable-video-diffusion.com/api/upload", files={"file": open(temp_file_path, 'rb')})
103
- print(r.text)
104
- hash_ = r.json()['hash']
105
- time.sleep(10)
106
  c = 0
107
- while c < 10:
108
- r2 = requests.get(f"https://stable-video-diffusion.com/result?hash={hash_}")
109
- source_string = r2.text
110
- if "Generation has been in progress for" in source_string:
111
- time.sleep(15)
112
- c += 1
113
- continue
114
- if "Generation has been in progress for" not in source_string:
115
- pattern = r'https://storage.stable-video-diffusion.com/([a-f0-9]{32})\.mp4'
116
- matches = re.findall(pattern, source_string)
117
- sd_video = []
118
- for match in matches:
119
- sd_video.append(f"https://storage.stable-video-diffusion.com/{match}.mp4")
120
- print(sd_video[0])
121
- if len(sd_video) != 0:
122
- return sd_video[0]
123
- else:
124
- _ = 1/0
125
- except:
126
- client1 = Client("https://emmadrex-stable-video-diffusion.hf.space")
127
- result1 = client1.predict(encoded_string, api_name="/resize_image")
128
- client = Client("https://emmadrex-stable-video-diffusion.hf.space")
129
- result = client.predict(result1, 0, True, 1, 15, api_name="/video")
130
- return result[0]['video']
131
-
132
-
133
  def flip_text1(prompt, motion):
134
  try:
135
  language = detect(prompt)
@@ -269,25 +291,27 @@ with gr.Blocks(css=css) as demo:
269
  with gr.Tab("Сгенерировать видео"):
270
  with gr.Column():
271
  prompt = gr.Textbox(placeholder="Введите описание видео...", show_label=True, label='Описание:', lines=3)
272
- motion1 = gr.Dropdown(value="Приближение →←", interactive=True, show_label=True, label="Движение камеры:", choices=[
273
- "Приближение →←", "Отдаление ←→", "Вверх ↑", "Вниз ↓", "Влево ←", "Вправо →", "По часовой стрелке ⟳", "Против часовой стрелки ⟲"])
 
274
  with gr.Column():
275
  text_button = gr.Button("Сгенерировать видео", variant='primary', elem_id="generate")
276
  with gr.Column():
277
  video_output = gr.Video(show_label=True, label='Результат:', type="file")
278
- text_button.click(create_video, inputs=[prompt], outputs=video_output)
279
 
280
  with gr.Tab("Анимировать изображение"):
281
  with gr.Column():
282
  prompt2 = gr.Image(show_label=True, interactive=True, type='filepath', label='Исходное изображение:')
283
  prompt12 = gr.Textbox(placeholder="Введите описание видео...", show_label=True, label='Описание видео (опционально):', lines=3)
284
- motion2 = gr.Dropdown(value="Приближение →←", interactive=True, show_label=True, label="Движение камеры:", choices=[
285
- "Приближение →←", "Отдаление ←→", "Вверх ↑", "Вниз ↓", "Влево ←", "Вправо →", "По часовой стрелке ⟳", "Против часовой стрелки ⟲"])
 
286
  with gr.Column():
287
  text_button2 = gr.Button("Анимировать изображение", variant='primary', elem_id="generate")
288
  with gr.Column():
289
  video_output2 = gr.Video(show_label=True, label='Результат:', type="file")
290
- text_button2.click(animate_img, inputs=[prompt2], outputs=video_output2)
291
 
292
  demo.queue(concurrency_count=12)
293
  demo.launch()
 
17
  from gradio_client import Client
18
 
19
 
20
+ def animate_img(encoded_string, model):
21
+ if model == "Stable Video Diffusion":
22
+ try:
23
+ r = requests.post("https://stable-video-diffusion.com/api/upload", files={"file": open(encoded_string, 'rb')})
24
+ hash_ = r.json()['hash']
25
+ time.sleep(10)
26
+ c = 0
27
+ while c < 10:
28
+ r2 = requests.get(f"https://stable-video-diffusion.com/result?hash={hash_}")
29
+ source_string = r2.text
30
+ if "Generation has been in progress for" in source_string:
31
+ time.sleep(15)
32
+ c += 1
33
+ continue
34
+ if "Generation has been in progress for" not in source_string:
35
+ pattern = r'https://storage.stable-video-diffusion.com/([a-f0-9]{32})\.mp4'
36
+ matches = re.findall(pattern, source_string)
37
+ sd_video = []
38
+ for match in matches:
39
+ sd_video.append(f"https://storage.stable-video-diffusion.com/{match}.mp4")
40
+ if len(sd_video) != 0:
41
+ print("s_1")
42
+ return sd_video[0]
43
+ else:
44
+ _ = 1/0
45
+ print("f_1")
46
+ except:
47
+ print("2")
48
+ client1 = Client("https://emmadrex-stable-video-diffusion.hf.space")
49
+ result1 = client1.predict(encoded_string, api_name="/resize_image")
50
+ client = Client("https://emmadrex-stable-video-diffusion.hf.space")
51
+ result = client.predict(result1, 0, True, 1, 15, api_name="/video")
52
+ res = result[0]['video']
53
+ print("s_2")
54
+ return res
55
 
56
+ if model == "AnimateDiff":
57
+ client = Client("https://ap123-animateimage.hf.space/--replicas/zlwk6/")
58
+ result = client.predict(encoded_string, "zoom-out", api_name="/predict")
59
+ return result
60
+
61
+
62
+ def create_video(prompt, model):
63
  url_sd3 = os.getenv("url_sd3")
64
  url_sd4 = os.getenv("url_sd4")
65
+ if model == "Stable Video Diffusion":
66
+ try:
67
+ with closing(create_connection(f"{url_sd3}", timeout=120)) as conn:
68
+ conn.send('{"fn_index":3,"session_hash":""}')
69
+ conn.send(f'{{"data":["{prompt}","[deformed | disfigured], poorly drawn, [bad : wrong] anatomy, [extra | missing | floating | disconnected] limb, (mutated hands and fingers), blurry",7.5,"(No style)"],"event_data":null,"fn_index":3,"session_hash":""}}')
70
+ c = 0
71
+ while c < 60:
72
+ status = json.loads(conn.recv())['msg']
73
+ if status == 'estimation':
74
+ c += 1
75
+ time.sleep(1)
76
+ continue
77
+ if status == 'process_starts':
78
+ break
79
+ photo = json.loads(conn.recv())['output']['data'][0][0]
80
+ base64_string = photo.replace('data:image/jpeg;base64,', '').replace('data:image/png;base64,', '')
81
+
82
+ image_bytes = base64.b64decode(base64_string)
83
+ with tempfile.NamedTemporaryFile(delete=False) as temp:
84
+ temp.write(image_bytes)
85
+ temp_file_path = temp.name
86
+ print("cs_1")
87
 
88
+
89
+ except:
90
+ print("c_2")
91
+ with closing(create_connection(f"{url_sd4}", timeout=120)) as conn:
92
+ conn.send('{"fn_index":0,"session_hash":""}')
93
+ conn.send(f'{{"data":["{prompt}","[deformed | disfigured], poorly drawn, [bad : wrong] anatomy, [extra | missing | floating | disconnected] limb, (mutated hands and fingers), blurry","dreamshaperXL10_alpha2.safetensors [c8afe2ef]",30,"DPM++ 2M Karras",7,1024,1024,-1],"event_data":null,"fn_index":0,"session_hash":""}}')
94
+ conn.recv()
95
+ conn.recv()
96
+ conn.recv()
97
+ conn.recv()
98
+ photo = json.loads(conn.recv())['output']['data'][0]
99
+ base64_string = photo.replace('data:image/jpeg;base64,', '').replace('data:image/png;base64,', '')
100
+
101
+ image_bytes = base64.b64decode(base64_string)
102
+ with tempfile.NamedTemporaryFile(delete=False) as temp:
103
+ temp.write(image_bytes)
104
+ temp_file_path = temp.name
105
+ print("cs_2")
106
+
107
+ try:
108
+ r = requests.post("https://stable-video-diffusion.com/api/upload", files={"file": open(temp_file_path, 'rb')})
109
+ print(r.text)
110
+ hash_ = r.json()['hash']
111
+ time.sleep(10)
112
  c = 0
113
+ while c < 10:
114
+ r2 = requests.get(f"https://stable-video-diffusion.com/result?hash={hash_}")
115
+ source_string = r2.text
116
+ if "Generation has been in progress for" in source_string:
117
+ time.sleep(15)
118
  c += 1
 
119
  continue
120
+ if "Generation has been in progress for" not in source_string:
121
+ pattern = r'https://storage.stable-video-diffusion.com/([a-f0-9]{32})\.mp4'
122
+ matches = re.findall(pattern, source_string)
123
+ sd_video = []
124
+ for match in matches:
125
+ sd_video.append(f"https://storage.stable-video-diffusion.com/{match}.mp4")
126
+ print(sd_video[0])
127
+ if len(sd_video) != 0:
128
+ return sd_video[0]
129
+ else:
130
+ _ = 1/0
131
+ except:
132
+ client1 = Client("https://emmadrex-stable-video-diffusion.hf.space")
133
+ result1 = client1.predict(encoded_string, api_name="/resize_image")
134
+ client = Client("https://emmadrex-stable-video-diffusion.hf.space")
135
+ result = client.predict(result1, 0, True, 1, 15, api_name="/video")
136
+ return result[0]['video']
137
 
 
 
 
 
 
 
138
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
139
 
140
+
141
+ if model == "AnimateDiff":
142
+ data = {"prompt": prompt, "negative_prompt": "EasyNegative"}
143
+ r = requests.post("https://sd.cuilutech.com/sdapi/async/txt2gif", json=data)
 
144
  c = 0
145
+ while c < 60:
146
+ r2 = requests.post("https://sd.cuilutech.com/sdapi/get_task_info", json={'task_id': r.json()['data']['task_id']})
147
+ time.sleep(2)
148
+ if r2.json()['data']:
149
+ photo = r2.json()['data']['image_urls'][0]
150
+ break
151
+ c += 1
152
+ return photo
153
+
154
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
155
  def flip_text1(prompt, motion):
156
  try:
157
  language = detect(prompt)
 
291
  with gr.Tab("Сгенерировать видео"):
292
  with gr.Column():
293
  prompt = gr.Textbox(placeholder="Введите описание видео...", show_label=True, label='Описание:', lines=3)
294
+ # motion1 = gr.Dropdown(value="Приближение →←", interactive=True, show_label=True, label="Движение камеры:", choices=["Приближение →←", "Отдаление ←→", "Вверх ↑", "Вниз ↓", "Влево ←", "Вправо →", "По часовой стрелке ⟳", "Против часовой стрелки ⟲"])
295
+ model = gr.Radio(interactive=True, value="Stable Video Diffusion", show_label=True,
296
+ label="Модель нейросети:", choices=['Stable Video Diffusion', 'AnimateDiff'])
297
  with gr.Column():
298
  text_button = gr.Button("Сгенерировать видео", variant='primary', elem_id="generate")
299
  with gr.Column():
300
  video_output = gr.Video(show_label=True, label='Результат:', type="file")
301
+ text_button.click(create_video, inputs=[prompt, model], outputs=video_output)
302
 
303
  with gr.Tab("Анимировать изображение"):
304
  with gr.Column():
305
  prompt2 = gr.Image(show_label=True, interactive=True, type='filepath', label='Исходное изображение:')
306
  prompt12 = gr.Textbox(placeholder="Введите описание видео...", show_label=True, label='Описание видео (опционально):', lines=3)
307
+ # motion2 = gr.Dropdown(value="Приближение →←", interactive=True, show_label=True, label="Движение камеры:", choices=["Приближение →←", "Отдаление ←→", "Вверх ↑", "Вниз ↓", "Влево ←", "Вправо →", "По часовой стрелке ⟳", "Против часовой стрелки ⟲"])
308
+ model2 = gr.Radio(interactive=True, value="Stable Video Diffusion", show_label=True,
309
+ label="Модель нейросети:", choices=['Stable Video Diffusion', 'AnimateDiff'])
310
  with gr.Column():
311
  text_button2 = gr.Button("Анимировать изображение", variant='primary', elem_id="generate")
312
  with gr.Column():
313
  video_output2 = gr.Video(show_label=True, label='Результат:', type="file")
314
+ text_button2.click(animate_img, inputs=[prompt2, model2], outputs=video_output2)
315
 
316
  demo.queue(concurrency_count=12)
317
  demo.launch()