robomaster2025 commited on
Commit
62200cb
·
verified ·
1 Parent(s): 51cccb3

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. robomaster/__init__.py +0 -0
  2. robomaster/api/api.py +173 -0
  3. robomaster/api/post_infer.py +89 -0
  4. robomaster/data/bucket_sampler.py +379 -0
  5. robomaster/data/dataset_image.py +76 -0
  6. robomaster/data/dataset_image_video.py +357 -0
  7. robomaster/data/dataset_video.py +262 -0
  8. robomaster/models/autoencoder_magvit.py +1646 -0
  9. robomaster/models/transformer3d.py +863 -0
  10. robomaster/pipeline/pipeline_cogvideox.py +877 -0
  11. robomaster/pipeline/pipeline_cogvideox_control.py +970 -0
  12. robomaster/pipeline/pipeline_cogvideox_inpaint.py +1156 -0
  13. robomaster/reward/MPS/README.md +1 -0
  14. robomaster/reward/MPS/trainer/models/base_model.py +7 -0
  15. robomaster/reward/MPS/trainer/models/clip_model.py +154 -0
  16. robomaster/reward/MPS/trainer/models/cross_modeling.py +291 -0
  17. robomaster/reward/aesthetic_predictor_v2_5/__init__.py +13 -0
  18. robomaster/reward/aesthetic_predictor_v2_5/siglip_v2_5.py +133 -0
  19. robomaster/reward/improved_aesthetic_predictor.py +49 -0
  20. robomaster/reward/reward_fn.py +385 -0
  21. robomaster/ui/ui.py +1634 -0
  22. robomaster/utils/__init__.py +0 -0
  23. robomaster/utils/discrete_sampler.py +46 -0
  24. robomaster/utils/lora_utils.py +477 -0
  25. robomaster/utils/utils.py +208 -0
  26. robomaster/video_caption/README.md +174 -0
  27. robomaster/video_caption/README_zh-CN.md +159 -0
  28. robomaster/video_caption/beautiful_prompt.py +103 -0
  29. robomaster/video_caption/caption_rewrite.py +224 -0
  30. robomaster/video_caption/compute_motion_score.py +186 -0
  31. robomaster/video_caption/compute_text_score.py +214 -0
  32. robomaster/video_caption/compute_video_quality.py +201 -0
  33. robomaster/video_caption/cutscene_detect.py +97 -0
  34. robomaster/video_caption/filter_meta_train.py +88 -0
  35. robomaster/video_caption/package_patches/easyocr_detection_patched.py +114 -0
  36. robomaster/video_caption/package_patches/vila_siglip_encoder_patched.py +42 -0
  37. robomaster/video_caption/prompt/beautiful_prompt.txt +9 -0
  38. robomaster/video_caption/prompt/rewrite.txt +9 -0
  39. robomaster/video_caption/requirements.txt +9 -0
  40. robomaster/video_caption/scripts/stage_1_video_splitting.sh +39 -0
  41. robomaster/video_caption/scripts/stage_2_video_filtering.sh +41 -0
  42. robomaster/video_caption/scripts/stage_3_video_recaptioning.sh +52 -0
  43. robomaster/video_caption/utils/filter.py +162 -0
  44. robomaster/video_caption/utils/gather_jsonl.py +55 -0
  45. robomaster/video_caption/utils/get_meta_file.py +74 -0
  46. robomaster/video_caption/utils/image_evaluator.py +248 -0
  47. robomaster/video_caption/utils/logger.py +36 -0
  48. robomaster/video_caption/utils/longclip/README.md +19 -0
  49. robomaster/video_caption/utils/longclip/__init__.py +1 -0
  50. robomaster/video_caption/utils/longclip/bpe_simple_vocab_16e6.txt.gz +3 -0
robomaster/__init__.py ADDED
File without changes
robomaster/api/api.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import gc
3
+ import base64
4
+ import torch
5
+ import gradio as gr
6
+ import tempfile
7
+ import hashlib
8
+ import os
9
+
10
+ from fastapi import FastAPI
11
+ from io import BytesIO
12
+ from PIL import Image
13
+
14
+ # Function to encode a file to Base64
15
+ def encode_file_to_base64(file_path):
16
+ with open(file_path, "rb") as file:
17
+ # Encode the data to Base64
18
+ file_base64 = base64.b64encode(file.read())
19
+ return file_base64
20
+
21
+ def update_edition_api(_: gr.Blocks, app: FastAPI, controller):
22
+ @app.post("/cogvideox_fun/update_edition")
23
+ def _update_edition_api(
24
+ datas: dict,
25
+ ):
26
+ edition = datas.get('edition', 'v2')
27
+
28
+ try:
29
+ controller.update_edition(
30
+ edition
31
+ )
32
+ comment = "Success"
33
+ except Exception as e:
34
+ torch.cuda.empty_cache()
35
+ comment = f"Error. error information is {str(e)}"
36
+
37
+ return {"message": comment}
38
+
39
+ def update_diffusion_transformer_api(_: gr.Blocks, app: FastAPI, controller):
40
+ @app.post("/cogvideox_fun/update_diffusion_transformer")
41
+ def _update_diffusion_transformer_api(
42
+ datas: dict,
43
+ ):
44
+ diffusion_transformer_path = datas.get('diffusion_transformer_path', 'none')
45
+
46
+ try:
47
+ controller.update_diffusion_transformer(
48
+ diffusion_transformer_path
49
+ )
50
+ comment = "Success"
51
+ except Exception as e:
52
+ torch.cuda.empty_cache()
53
+ comment = f"Error. error information is {str(e)}"
54
+
55
+ return {"message": comment}
56
+
57
+ def save_base64_video(base64_string):
58
+ video_data = base64.b64decode(base64_string)
59
+
60
+ md5_hash = hashlib.md5(video_data).hexdigest()
61
+ filename = f"{md5_hash}.mp4"
62
+
63
+ temp_dir = tempfile.gettempdir()
64
+ file_path = os.path.join(temp_dir, filename)
65
+
66
+ with open(file_path, 'wb') as video_file:
67
+ video_file.write(video_data)
68
+
69
+ return file_path
70
+
71
+ def save_base64_image(base64_string):
72
+ video_data = base64.b64decode(base64_string)
73
+
74
+ md5_hash = hashlib.md5(video_data).hexdigest()
75
+ filename = f"{md5_hash}.jpg"
76
+
77
+ temp_dir = tempfile.gettempdir()
78
+ file_path = os.path.join(temp_dir, filename)
79
+
80
+ with open(file_path, 'wb') as video_file:
81
+ video_file.write(video_data)
82
+
83
+ return file_path
84
+
85
+ def infer_forward_api(_: gr.Blocks, app: FastAPI, controller):
86
+ @app.post("/cogvideox_fun/infer_forward")
87
+ def _infer_forward_api(
88
+ datas: dict,
89
+ ):
90
+ base_model_path = datas.get('base_model_path', 'none')
91
+ lora_model_path = datas.get('lora_model_path', 'none')
92
+ lora_alpha_slider = datas.get('lora_alpha_slider', 0.55)
93
+ prompt_textbox = datas.get('prompt_textbox', None)
94
+ negative_prompt_textbox = datas.get('negative_prompt_textbox', 'The video is not of a high quality, it has a low resolution. Watermark present in each frame. The background is solid. Strange body and strange trajectory. Distortion. ')
95
+ sampler_dropdown = datas.get('sampler_dropdown', 'Euler')
96
+ sample_step_slider = datas.get('sample_step_slider', 30)
97
+ resize_method = datas.get('resize_method', "Generate by")
98
+ width_slider = datas.get('width_slider', 672)
99
+ height_slider = datas.get('height_slider', 384)
100
+ base_resolution = datas.get('base_resolution', 512)
101
+ is_image = datas.get('is_image', False)
102
+ generation_method = datas.get('generation_method', False)
103
+ length_slider = datas.get('length_slider', 49)
104
+ overlap_video_length = datas.get('overlap_video_length', 4)
105
+ partial_video_length = datas.get('partial_video_length', 72)
106
+ cfg_scale_slider = datas.get('cfg_scale_slider', 6)
107
+ start_image = datas.get('start_image', None)
108
+ end_image = datas.get('end_image', None)
109
+ validation_video = datas.get('validation_video', None)
110
+ validation_video_mask = datas.get('validation_video_mask', None)
111
+ control_video = datas.get('control_video', None)
112
+ denoise_strength = datas.get('denoise_strength', 0.70)
113
+ seed_textbox = datas.get("seed_textbox", 43)
114
+
115
+ generation_method = "Image Generation" if is_image else generation_method
116
+
117
+ if start_image is not None:
118
+ start_image = base64.b64decode(start_image)
119
+ start_image = [Image.open(BytesIO(start_image))]
120
+
121
+ if end_image is not None:
122
+ end_image = base64.b64decode(end_image)
123
+ end_image = [Image.open(BytesIO(end_image))]
124
+
125
+ if validation_video is not None:
126
+ validation_video = save_base64_video(validation_video)
127
+
128
+ if validation_video_mask is not None:
129
+ validation_video_mask = save_base64_image(validation_video_mask)
130
+
131
+ if control_video is not None:
132
+ control_video = save_base64_video(control_video)
133
+
134
+ try:
135
+ save_sample_path, comment = controller.generate(
136
+ "",
137
+ base_model_path,
138
+ lora_model_path,
139
+ lora_alpha_slider,
140
+ prompt_textbox,
141
+ negative_prompt_textbox,
142
+ sampler_dropdown,
143
+ sample_step_slider,
144
+ resize_method,
145
+ width_slider,
146
+ height_slider,
147
+ base_resolution,
148
+ generation_method,
149
+ length_slider,
150
+ overlap_video_length,
151
+ partial_video_length,
152
+ cfg_scale_slider,
153
+ start_image,
154
+ end_image,
155
+ validation_video,
156
+ validation_video_mask,
157
+ control_video,
158
+ denoise_strength,
159
+ seed_textbox,
160
+ is_api = True,
161
+ )
162
+ except Exception as e:
163
+ gc.collect()
164
+ torch.cuda.empty_cache()
165
+ torch.cuda.ipc_collect()
166
+ save_sample_path = ""
167
+ comment = f"Error. error information is {str(e)}"
168
+ return {"message": comment}
169
+
170
+ if save_sample_path != "":
171
+ return {"message": comment, "save_sample_path": save_sample_path, "base64_encoding": encode_file_to_base64(save_sample_path)}
172
+ else:
173
+ return {"message": comment, "save_sample_path": save_sample_path}
robomaster/api/post_infer.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import json
3
+ import sys
4
+ import time
5
+ from datetime import datetime
6
+ from io import BytesIO
7
+
8
+ import cv2
9
+ import requests
10
+ import base64
11
+
12
+
13
+ def post_diffusion_transformer(diffusion_transformer_path, url='http://127.0.0.1:7860'):
14
+ datas = json.dumps({
15
+ "diffusion_transformer_path": diffusion_transformer_path
16
+ })
17
+ r = requests.post(f'{url}/cogvideox_fun/update_diffusion_transformer', data=datas, timeout=1500)
18
+ data = r.content.decode('utf-8')
19
+ return data
20
+
21
+ def post_update_edition(edition, url='http://0.0.0.0:7860'):
22
+ datas = json.dumps({
23
+ "edition": edition
24
+ })
25
+ r = requests.post(f'{url}/cogvideox_fun/update_edition', data=datas, timeout=1500)
26
+ data = r.content.decode('utf-8')
27
+ return data
28
+
29
+ def post_infer(generation_method, length_slider, url='http://127.0.0.1:7860'):
30
+ datas = json.dumps({
31
+ "base_model_path": "none",
32
+ "motion_module_path": "none",
33
+ "lora_model_path": "none",
34
+ "lora_alpha_slider": 0.55,
35
+ "prompt_textbox": "A young woman with beautiful and clear eyes and blonde hair standing and white dress in a forest wearing a crown. She seems to be lost in thought, and the camera focuses on her face. The video is of high quality, and the view is very clear. High quality, masterpiece, best quality, highres, ultra-detailed, fantastic.",
36
+ "negative_prompt_textbox": "The video is not of a high quality, it has a low resolution. Watermark present in each frame. The background is solid. Strange body and strange trajectory. Distortion. ",
37
+ "sampler_dropdown": "Euler",
38
+ "sample_step_slider": 50,
39
+ "width_slider": 672,
40
+ "height_slider": 384,
41
+ "generation_method": "Video Generation",
42
+ "length_slider": length_slider,
43
+ "cfg_scale_slider": 6,
44
+ "seed_textbox": 43,
45
+ })
46
+ r = requests.post(f'{url}/cogvideox_fun/infer_forward', data=datas, timeout=1500)
47
+ data = r.content.decode('utf-8')
48
+ return data
49
+
50
+ if __name__ == '__main__':
51
+ # initiate time
52
+ now_date = datetime.now()
53
+ time_start = time.time()
54
+
55
+ # -------------------------- #
56
+ # Step 1: update edition
57
+ # -------------------------- #
58
+ diffusion_transformer_path = "models/Diffusion_Transformer/CogVideoX-Fun-2b-InP"
59
+ outputs = post_diffusion_transformer(diffusion_transformer_path)
60
+ print('Output update edition: ', outputs)
61
+
62
+ # -------------------------- #
63
+ # Step 2: infer
64
+ # -------------------------- #
65
+ # "Video Generation" and "Image Generation"
66
+ generation_method = "Video Generation"
67
+ length_slider = 49
68
+ outputs = post_infer(generation_method, length_slider)
69
+
70
+ # Get decoded data
71
+ outputs = json.loads(outputs)
72
+ base64_encoding = outputs["base64_encoding"]
73
+ decoded_data = base64.b64decode(base64_encoding)
74
+
75
+ is_image = True if generation_method == "Image Generation" else False
76
+ if is_image or length_slider == 1:
77
+ file_path = "1.png"
78
+ else:
79
+ file_path = "1.mp4"
80
+ with open(file_path, "wb") as file:
81
+ file.write(decoded_data)
82
+
83
+ # End of record time
84
+ # The calculated time difference is the execution time of the program, expressed in seconds / s
85
+ time_end = time.time()
86
+ time_sum = (time_end - time_start) % 60
87
+ print('# --------------------------------------------------------- #')
88
+ print(f'# Total expenditure: {time_sum}s')
89
+ print('# --------------------------------------------------------- #')
robomaster/data/bucket_sampler.py ADDED
@@ -0,0 +1,379 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import os
3
+ from typing import (Generic, Iterable, Iterator, List, Optional, Sequence,
4
+ Sized, TypeVar, Union)
5
+
6
+ import cv2
7
+ import numpy as np
8
+ import torch
9
+ from PIL import Image
10
+ from torch.utils.data import BatchSampler, Dataset, Sampler
11
+
12
+ ASPECT_RATIO_512 = {
13
+ '0.25': [256.0, 1024.0], '0.26': [256.0, 992.0], '0.27': [256.0, 960.0], '0.28': [256.0, 928.0],
14
+ '0.32': [288.0, 896.0], '0.33': [288.0, 864.0], '0.35': [288.0, 832.0], '0.4': [320.0, 800.0],
15
+ '0.42': [320.0, 768.0], '0.48': [352.0, 736.0], '0.5': [352.0, 704.0], '0.52': [352.0, 672.0],
16
+ '0.57': [384.0, 672.0], '0.6': [384.0, 640.0], '0.68': [416.0, 608.0], '0.72': [416.0, 576.0],
17
+ '0.78': [448.0, 576.0], '0.82': [448.0, 544.0], '0.88': [480.0, 544.0], '0.94': [480.0, 512.0],
18
+ '1.0': [512.0, 512.0], '1.07': [512.0, 480.0], '1.13': [544.0, 480.0], '1.21': [544.0, 448.0],
19
+ '1.29': [576.0, 448.0], '1.38': [576.0, 416.0], '1.46': [608.0, 416.0], '1.67': [640.0, 384.0],
20
+ '1.75': [672.0, 384.0], '2.0': [704.0, 352.0], '2.09': [736.0, 352.0], '2.4': [768.0, 320.0],
21
+ '2.5': [800.0, 320.0], '2.89': [832.0, 288.0], '3.0': [864.0, 288.0], '3.11': [896.0, 288.0],
22
+ '3.62': [928.0, 256.0], '3.75': [960.0, 256.0], '3.88': [992.0, 256.0], '4.0': [1024.0, 256.0]
23
+ }
24
+ ASPECT_RATIO_RANDOM_CROP_512 = {
25
+ '0.42': [320.0, 768.0], '0.5': [352.0, 704.0],
26
+ '0.57': [384.0, 672.0], '0.68': [416.0, 608.0], '0.78': [448.0, 576.0], '0.88': [480.0, 544.0],
27
+ '0.94': [480.0, 512.0], '1.0': [512.0, 512.0], '1.07': [512.0, 480.0],
28
+ '1.13': [544.0, 480.0], '1.29': [576.0, 448.0], '1.46': [608.0, 416.0], '1.75': [672.0, 384.0],
29
+ '2.0': [704.0, 352.0], '2.4': [768.0, 320.0]
30
+ }
31
+ ASPECT_RATIO_RANDOM_CROP_PROB = [
32
+ 1, 2,
33
+ 4, 4, 4, 4,
34
+ 8, 8, 8,
35
+ 4, 4, 4, 4,
36
+ 2, 1
37
+ ]
38
+ ASPECT_RATIO_RANDOM_CROP_PROB = np.array(ASPECT_RATIO_RANDOM_CROP_PROB) / sum(ASPECT_RATIO_RANDOM_CROP_PROB)
39
+
40
+ def get_closest_ratio(height: float, width: float, ratios: dict = ASPECT_RATIO_512):
41
+ aspect_ratio = height / width
42
+ closest_ratio = min(ratios.keys(), key=lambda ratio: abs(float(ratio) - aspect_ratio))
43
+ return ratios[closest_ratio], float(closest_ratio)
44
+
45
+ def get_image_size_without_loading(path):
46
+ with Image.open(path) as img:
47
+ return img.size # (width, height)
48
+
49
+ class RandomSampler(Sampler[int]):
50
+ r"""Samples elements randomly. If without replacement, then sample from a shuffled dataset.
51
+
52
+ If with replacement, then user can specify :attr:`num_samples` to draw.
53
+
54
+ Args:
55
+ data_source (Dataset): dataset to sample from
56
+ replacement (bool): samples are drawn on-demand with replacement if ``True``, default=``False``
57
+ num_samples (int): number of samples to draw, default=`len(dataset)`.
58
+ generator (Generator): Generator used in sampling.
59
+ """
60
+
61
+ data_source: Sized
62
+ replacement: bool
63
+
64
+ def __init__(self, data_source: Sized, replacement: bool = False,
65
+ num_samples: Optional[int] = None, generator=None) -> None:
66
+ self.data_source = data_source
67
+ self.replacement = replacement
68
+ self._num_samples = num_samples
69
+ self.generator = generator
70
+ self._pos_start = 0
71
+
72
+ if not isinstance(self.replacement, bool):
73
+ raise TypeError(f"replacement should be a boolean value, but got replacement={self.replacement}")
74
+
75
+ if not isinstance(self.num_samples, int) or self.num_samples <= 0:
76
+ raise ValueError(f"num_samples should be a positive integer value, but got num_samples={self.num_samples}")
77
+
78
+ @property
79
+ def num_samples(self) -> int:
80
+ # dataset size might change at runtime
81
+ if self._num_samples is None:
82
+ return len(self.data_source)
83
+ return self._num_samples
84
+
85
+ def __iter__(self) -> Iterator[int]:
86
+ n = len(self.data_source)
87
+ if self.generator is None:
88
+ seed = int(torch.empty((), dtype=torch.int64).random_().item())
89
+ generator = torch.Generator()
90
+ generator.manual_seed(seed)
91
+ else:
92
+ generator = self.generator
93
+
94
+ if self.replacement:
95
+ for _ in range(self.num_samples // 32):
96
+ yield from torch.randint(high=n, size=(32,), dtype=torch.int64, generator=generator).tolist()
97
+ yield from torch.randint(high=n, size=(self.num_samples % 32,), dtype=torch.int64, generator=generator).tolist()
98
+ else:
99
+ for _ in range(self.num_samples // n):
100
+ xx = torch.randperm(n, generator=generator).tolist()
101
+ if self._pos_start >= n:
102
+ self._pos_start = 0
103
+ print("xx top 10", xx[:10], self._pos_start)
104
+ for idx in range(self._pos_start, n):
105
+ yield xx[idx]
106
+ self._pos_start = (self._pos_start + 1) % n
107
+ self._pos_start = 0
108
+ yield from torch.randperm(n, generator=generator).tolist()[:self.num_samples % n]
109
+
110
+ def __len__(self) -> int:
111
+ return self.num_samples
112
+
113
+ class AspectRatioBatchImageSampler(BatchSampler):
114
+ """A sampler wrapper for grouping images with similar aspect ratio into a same batch.
115
+
116
+ Args:
117
+ sampler (Sampler): Base sampler.
118
+ dataset (Dataset): Dataset providing data information.
119
+ batch_size (int): Size of mini-batch.
120
+ drop_last (bool): If ``True``, the sampler will drop the last batch if
121
+ its size would be less than ``batch_size``.
122
+ aspect_ratios (dict): The predefined aspect ratios.
123
+ """
124
+ def __init__(
125
+ self,
126
+ sampler: Sampler,
127
+ dataset: Dataset,
128
+ batch_size: int,
129
+ train_folder: str = None,
130
+ aspect_ratios: dict = ASPECT_RATIO_512,
131
+ drop_last: bool = False,
132
+ config=None,
133
+ **kwargs
134
+ ) -> None:
135
+ if not isinstance(sampler, Sampler):
136
+ raise TypeError('sampler should be an instance of ``Sampler``, '
137
+ f'but got {sampler}')
138
+ if not isinstance(batch_size, int) or batch_size <= 0:
139
+ raise ValueError('batch_size should be a positive integer value, '
140
+ f'but got batch_size={batch_size}')
141
+ self.sampler = sampler
142
+ self.dataset = dataset
143
+ self.train_folder = train_folder
144
+ self.batch_size = batch_size
145
+ self.aspect_ratios = aspect_ratios
146
+ self.drop_last = drop_last
147
+ self.config = config
148
+ # buckets for each aspect ratio
149
+ self._aspect_ratio_buckets = {ratio: [] for ratio in aspect_ratios}
150
+ # [str(k) for k, v in aspect_ratios]
151
+ self.current_available_bucket_keys = list(aspect_ratios.keys())
152
+
153
+ def __iter__(self):
154
+ for idx in self.sampler:
155
+ try:
156
+ image_dict = self.dataset[idx]
157
+
158
+ width, height = image_dict.get("width", None), image_dict.get("height", None)
159
+ if width is None or height is None:
160
+ image_id, name = image_dict['file_path'], image_dict['text']
161
+ if self.train_folder is None:
162
+ image_dir = image_id
163
+ else:
164
+ image_dir = os.path.join(self.train_folder, image_id)
165
+
166
+ width, height = get_image_size_without_loading(image_dir)
167
+
168
+ ratio = height / width # self.dataset[idx]
169
+ else:
170
+ height = int(height)
171
+ width = int(width)
172
+ ratio = height / width # self.dataset[idx]
173
+ except Exception as e:
174
+ print(e)
175
+ continue
176
+ # find the closest aspect ratio
177
+ closest_ratio = min(self.aspect_ratios.keys(), key=lambda r: abs(float(r) - ratio))
178
+ if closest_ratio not in self.current_available_bucket_keys:
179
+ continue
180
+ bucket = self._aspect_ratio_buckets[closest_ratio]
181
+ bucket.append(idx)
182
+ # yield a batch of indices in the same aspect ratio group
183
+ if len(bucket) == self.batch_size:
184
+ yield bucket[:]
185
+ del bucket[:]
186
+
187
+ class AspectRatioBatchSampler(BatchSampler):
188
+ """A sampler wrapper for grouping images with similar aspect ratio into a same batch.
189
+
190
+ Args:
191
+ sampler (Sampler): Base sampler.
192
+ dataset (Dataset): Dataset providing data information.
193
+ batch_size (int): Size of mini-batch.
194
+ drop_last (bool): If ``True``, the sampler will drop the last batch if
195
+ its size would be less than ``batch_size``.
196
+ aspect_ratios (dict): The predefined aspect ratios.
197
+ """
198
+ def __init__(
199
+ self,
200
+ sampler: Sampler,
201
+ dataset: Dataset,
202
+ batch_size: int,
203
+ video_folder: str = None,
204
+ train_data_format: str = "webvid",
205
+ aspect_ratios: dict = ASPECT_RATIO_512,
206
+ drop_last: bool = False,
207
+ config=None,
208
+ **kwargs
209
+ ) -> None:
210
+ if not isinstance(sampler, Sampler):
211
+ raise TypeError('sampler should be an instance of ``Sampler``, '
212
+ f'but got {sampler}')
213
+ if not isinstance(batch_size, int) or batch_size <= 0:
214
+ raise ValueError('batch_size should be a positive integer value, '
215
+ f'but got batch_size={batch_size}')
216
+ self.sampler = sampler
217
+ self.dataset = dataset
218
+ self.video_folder = video_folder
219
+ self.train_data_format = train_data_format
220
+ self.batch_size = batch_size
221
+ self.aspect_ratios = aspect_ratios
222
+ self.drop_last = drop_last
223
+ self.config = config
224
+ # buckets for each aspect ratio
225
+ self._aspect_ratio_buckets = {ratio: [] for ratio in aspect_ratios}
226
+ # [str(k) for k, v in aspect_ratios]
227
+ self.current_available_bucket_keys = list(aspect_ratios.keys())
228
+
229
+ def __iter__(self):
230
+ for idx in self.sampler:
231
+ try:
232
+ video_dict = self.dataset[idx]
233
+ width, more = video_dict.get("width", None), video_dict.get("height", None)
234
+
235
+ if width is None or height is None:
236
+ if self.train_data_format == "normal":
237
+ video_id, name = video_dict['file_path'], video_dict['text']
238
+ if self.video_folder is None:
239
+ video_dir = video_id
240
+ else:
241
+ video_dir = os.path.join(self.video_folder, video_id)
242
+ else:
243
+ videoid, name, page_dir = video_dict['videoid'], video_dict['name'], video_dict['page_dir']
244
+ video_dir = os.path.join(self.video_folder, f"{videoid}.mp4")
245
+ cap = cv2.VideoCapture(video_dir)
246
+
247
+ # 获取视频尺寸
248
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) # 浮点数转换为整数
249
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) # 浮点数转换为整数
250
+
251
+ ratio = height / width # self.dataset[idx]
252
+ else:
253
+ height = int(height)
254
+ width = int(width)
255
+ ratio = height / width # self.dataset[idx]
256
+ except Exception as e:
257
+ print(e)
258
+ continue
259
+ # find the closest aspect ratio
260
+ closest_ratio = min(self.aspect_ratios.keys(), key=lambda r: abs(float(r) - ratio))
261
+ if closest_ratio not in self.current_available_bucket_keys:
262
+ continue
263
+ bucket = self._aspect_ratio_buckets[closest_ratio]
264
+ bucket.append(idx)
265
+ # yield a batch of indices in the same aspect ratio group
266
+ if len(bucket) == self.batch_size:
267
+ yield bucket[:]
268
+ del bucket[:]
269
+
270
+ class AspectRatioBatchImageVideoSampler(BatchSampler):
271
+ """A sampler wrapper for grouping images with similar aspect ratio into a same batch.
272
+
273
+ Args:
274
+ sampler (Sampler): Base sampler.
275
+ dataset (Dataset): Dataset providing data information.
276
+ batch_size (int): Size of mini-batch.
277
+ drop_last (bool): If ``True``, the sampler will drop the last batch if
278
+ its size would be less than ``batch_size``.
279
+ aspect_ratios (dict): The predefined aspect ratios.
280
+ """
281
+
282
+ def __init__(self,
283
+ sampler: Sampler,
284
+ dataset: Dataset,
285
+ batch_size: int,
286
+ train_folder: str = None,
287
+ aspect_ratios: dict = ASPECT_RATIO_512,
288
+ drop_last: bool = False
289
+ ) -> None:
290
+ if not isinstance(sampler, Sampler):
291
+ raise TypeError('sampler should be an instance of ``Sampler``, '
292
+ f'but got {sampler}')
293
+ if not isinstance(batch_size, int) or batch_size <= 0:
294
+ raise ValueError('batch_size should be a positive integer value, '
295
+ f'but got batch_size={batch_size}')
296
+ self.sampler = sampler
297
+ self.dataset = dataset
298
+ self.train_folder = train_folder
299
+ self.batch_size = batch_size
300
+ self.aspect_ratios = aspect_ratios
301
+ self.drop_last = drop_last
302
+
303
+ # buckets for each aspect ratio
304
+ self.current_available_bucket_keys = list(aspect_ratios.keys())
305
+ self.bucket = {
306
+ 'image':{ratio: [] for ratio in aspect_ratios},
307
+ 'video':{ratio: [] for ratio in aspect_ratios}
308
+ }
309
+
310
+ def __iter__(self):
311
+ for idx in self.sampler:
312
+ content_type = self.dataset[idx].get('type', 'image')
313
+ if content_type == 'image':
314
+ try:
315
+ image_dict = self.dataset[idx]
316
+
317
+ width, height = image_dict.get("width", None), image_dict.get("height", None)
318
+ if width is None or height is None:
319
+ image_id, name = image_dict['file_path'], image_dict['text']
320
+ if self.train_folder is None:
321
+ image_dir = image_id
322
+ else:
323
+ image_dir = os.path.join(self.train_folder, image_id)
324
+
325
+ width, height = get_image_size_without_loading(image_dir)
326
+
327
+ ratio = height / width # self.dataset[idx]
328
+ else:
329
+ height = int(height)
330
+ width = int(width)
331
+ ratio = height / width # self.dataset[idx]
332
+ except Exception as e:
333
+ print(e)
334
+ continue
335
+ # find the closest aspect ratio
336
+ closest_ratio = min(self.aspect_ratios.keys(), key=lambda r: abs(float(r) - ratio))
337
+ if closest_ratio not in self.current_available_bucket_keys:
338
+ continue
339
+ bucket = self.bucket['image'][closest_ratio]
340
+ bucket.append(idx)
341
+ # yield a batch of indices in the same aspect ratio group
342
+ if len(bucket) == self.batch_size:
343
+ yield bucket[:]
344
+ del bucket[:]
345
+ else:
346
+ try:
347
+ video_dict = self.dataset[idx]
348
+ width, height = video_dict.get("width", None), video_dict.get("height", None)
349
+
350
+ if width is None or height is None:
351
+ video_id, name = video_dict['file_path'], video_dict['text']
352
+ if self.train_folder is None:
353
+ video_dir = video_id
354
+ else:
355
+ video_dir = os.path.join(self.train_folder, video_id)
356
+ cap = cv2.VideoCapture(video_dir)
357
+
358
+ # 获取视频尺寸
359
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) # 浮点数转换为整数
360
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) # 浮点数转换为整数
361
+
362
+ ratio = height / width # self.dataset[idx]
363
+ else:
364
+ height = int(height)
365
+ width = int(width)
366
+ ratio = height / width # self.dataset[idx]
367
+ except Exception as e:
368
+ print(e)
369
+ continue
370
+ # find the closest aspect ratio
371
+ closest_ratio = min(self.aspect_ratios.keys(), key=lambda r: abs(float(r) - ratio))
372
+ if closest_ratio not in self.current_available_bucket_keys:
373
+ continue
374
+ bucket = self.bucket['video'][closest_ratio]
375
+ bucket.append(idx)
376
+ # yield a batch of indices in the same aspect ratio group
377
+ if len(bucket) == self.batch_size:
378
+ yield bucket[:]
379
+ del bucket[:]
robomaster/data/dataset_image.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import random
4
+
5
+ import numpy as np
6
+ import torch
7
+ import torchvision.transforms as transforms
8
+ from PIL import Image
9
+ from torch.utils.data.dataset import Dataset
10
+
11
+
12
+ class CC15M(Dataset):
13
+ def __init__(
14
+ self,
15
+ json_path,
16
+ video_folder=None,
17
+ resolution=512,
18
+ enable_bucket=False,
19
+ ):
20
+ print(f"loading annotations from {json_path} ...")
21
+ self.dataset = json.load(open(json_path, 'r'))
22
+ self.length = len(self.dataset)
23
+ print(f"data scale: {self.length}")
24
+
25
+ self.enable_bucket = enable_bucket
26
+ self.video_folder = video_folder
27
+
28
+ resolution = tuple(resolution) if not isinstance(resolution, int) else (resolution, resolution)
29
+ self.pixel_transforms = transforms.Compose([
30
+ transforms.Resize(resolution[0]),
31
+ transforms.CenterCrop(resolution),
32
+ transforms.ToTensor(),
33
+ transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True),
34
+ ])
35
+
36
+ def get_batch(self, idx):
37
+ video_dict = self.dataset[idx]
38
+ video_id, name = video_dict['file_path'], video_dict['text']
39
+
40
+ if self.video_folder is None:
41
+ video_dir = video_id
42
+ else:
43
+ video_dir = os.path.join(self.video_folder, video_id)
44
+
45
+ pixel_values = Image.open(video_dir).convert("RGB")
46
+ return pixel_values, name
47
+
48
+ def __len__(self):
49
+ return self.length
50
+
51
+ def __getitem__(self, idx):
52
+ while True:
53
+ try:
54
+ pixel_values, name = self.get_batch(idx)
55
+ break
56
+ except Exception as e:
57
+ print(e)
58
+ idx = random.randint(0, self.length-1)
59
+
60
+ if not self.enable_bucket:
61
+ pixel_values = self.pixel_transforms(pixel_values)
62
+ else:
63
+ pixel_values = np.array(pixel_values)
64
+
65
+ sample = dict(pixel_values=pixel_values, text=name)
66
+ return sample
67
+
68
+ if __name__ == "__main__":
69
+ dataset = CC15M(
70
+ csv_path="/mnt_wg/zhoumo.xjq/CCUtils/cc15m_add_index.json",
71
+ resolution=512,
72
+ )
73
+
74
+ dataloader = torch.utils.data.DataLoader(dataset, batch_size=4, num_workers=0,)
75
+ for idx, batch in enumerate(dataloader):
76
+ print(batch["pixel_values"].shape, len(batch["text"]))
robomaster/data/dataset_image_video.py ADDED
@@ -0,0 +1,357 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import io
3
+ import json
4
+ import math
5
+ import os
6
+ import random
7
+ from threading import Thread
8
+
9
+ import albumentations
10
+ import cv2
11
+ import gc
12
+ import numpy as np
13
+ import torch
14
+ import torchvision.transforms as transforms
15
+
16
+ from func_timeout import func_timeout, FunctionTimedOut
17
+ from decord import VideoReader
18
+ from PIL import Image
19
+ from torch.utils.data import BatchSampler, Sampler
20
+ from torch.utils.data.dataset import Dataset
21
+ from contextlib import contextmanager
22
+
23
+ import tensorflow as tf
24
+ import tensorflow_datasets as tfds
25
+ from PIL import Image
26
+ from IPython import display
27
+ import tqdm
28
+
29
+ os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
30
+ VIDEO_READER_TIMEOUT = 20
31
+
32
+ def dataset2path(dataset_name):
33
+ if dataset_name == 'robo_net':
34
+ version = '1.0.0'
35
+ elif dataset_name == 'language_table':
36
+ version = '0.0.1'
37
+ else:
38
+ version = '0.1.0'
39
+ return f'/m2v_intern/fuxiao/Open-X-Embodiement/dataset/{dataset_name}/{version}'
40
+
41
+ def get_random_mask(shape):
42
+ f, c, h, w = shape
43
+
44
+ if f != 1:
45
+ mask_index = np.random.choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], p=[0.05, 0.2, 0.2, 0.2, 0.05, 0.05, 0.05, 0.1, 0.05, 0.05])
46
+ else:
47
+ mask_index = np.random.choice([0, 1], p = [0.2, 0.8])
48
+ mask = torch.zeros((f, 1, h, w), dtype=torch.uint8)
49
+
50
+ if mask_index == 0:
51
+ center_x = torch.randint(0, w, (1,)).item()
52
+ center_y = torch.randint(0, h, (1,)).item()
53
+ block_size_x = torch.randint(w // 4, w // 4 * 3, (1,)).item() # 方块的宽度范围
54
+ block_size_y = torch.randint(h // 4, h // 4 * 3, (1,)).item() # 方块的高度范围
55
+
56
+ start_x = max(center_x - block_size_x // 2, 0)
57
+ end_x = min(center_x + block_size_x // 2, w)
58
+ start_y = max(center_y - block_size_y // 2, 0)
59
+ end_y = min(center_y + block_size_y // 2, h)
60
+ mask[:, :, start_y:end_y, start_x:end_x] = 1
61
+ elif mask_index == 1:
62
+ mask[:, :, :, :] = 1
63
+ elif mask_index == 2:
64
+ mask_frame_index = np.random.randint(1, 5)
65
+ mask[mask_frame_index:, :, :, :] = 1
66
+ elif mask_index == 3:
67
+ mask_frame_index = np.random.randint(1, 5)
68
+ mask[mask_frame_index:-mask_frame_index, :, :, :] = 1
69
+ elif mask_index == 4:
70
+ center_x = torch.randint(0, w, (1,)).item()
71
+ center_y = torch.randint(0, h, (1,)).item()
72
+ block_size_x = torch.randint(w // 4, w // 4 * 3, (1,)).item() # 方块的宽度范围
73
+ block_size_y = torch.randint(h // 4, h // 4 * 3, (1,)).item() # 方块的高度范围
74
+
75
+ start_x = max(center_x - block_size_x // 2, 0)
76
+ end_x = min(center_x + block_size_x // 2, w)
77
+ start_y = max(center_y - block_size_y // 2, 0)
78
+ end_y = min(center_y + block_size_y // 2, h)
79
+
80
+ mask_frame_before = np.random.randint(0, f // 2)
81
+ mask_frame_after = np.random.randint(f // 2, f)
82
+ mask[mask_frame_before:mask_frame_after, :, start_y:end_y, start_x:end_x] = 1
83
+ elif mask_index == 5:
84
+ mask = torch.randint(0, 2, (f, 1, h, w), dtype=torch.uint8)
85
+ elif mask_index == 6:
86
+ num_frames_to_mask = random.randint(1, max(f // 2, 1))
87
+ frames_to_mask = random.sample(range(f), num_frames_to_mask)
88
+
89
+ for i in frames_to_mask:
90
+ block_height = random.randint(1, h // 4)
91
+ block_width = random.randint(1, w // 4)
92
+ top_left_y = random.randint(0, h - block_height)
93
+ top_left_x = random.randint(0, w - block_width)
94
+ mask[i, 0, top_left_y:top_left_y + block_height, top_left_x:top_left_x + block_width] = 1
95
+ elif mask_index == 7:
96
+ center_x = torch.randint(0, w, (1,)).item()
97
+ center_y = torch.randint(0, h, (1,)).item()
98
+ a = torch.randint(min(w, h) // 8, min(w, h) // 4, (1,)).item() # 长半轴
99
+ b = torch.randint(min(h, w) // 8, min(h, w) // 4, (1,)).item() # 短半轴
100
+
101
+ for i in range(h):
102
+ for j in range(w):
103
+ if ((i - center_y) ** 2) / (b ** 2) + ((j - center_x) ** 2) / (a ** 2) < 1:
104
+ mask[:, :, i, j] = 1
105
+ elif mask_index == 8:
106
+ center_x = torch.randint(0, w, (1,)).item()
107
+ center_y = torch.randint(0, h, (1,)).item()
108
+ radius = torch.randint(min(h, w) // 8, min(h, w) // 4, (1,)).item()
109
+ for i in range(h):
110
+ for j in range(w):
111
+ if (i - center_y) ** 2 + (j - center_x) ** 2 < radius ** 2:
112
+ mask[:, :, i, j] = 1
113
+ elif mask_index == 9:
114
+ for idx in range(f):
115
+ if np.random.rand() > 0.5:
116
+ mask[idx, :, :, :] = 1
117
+ else:
118
+ raise ValueError(f"The mask_index {mask_index} is not define")
119
+ return mask
120
+
121
+ class ImageVideoSampler(BatchSampler):
122
+ """A sampler wrapper for grouping images with similar aspect ratio into a same batch.
123
+
124
+ Args:
125
+ sampler (Sampler): Base sampler.
126
+ dataset (Dataset): Dataset providing data information.
127
+ batch_size (int): Size of mini-batch.
128
+ drop_last (bool): If ``True``, the sampler will drop the last batch if
129
+ its size would be less than ``batch_size``.
130
+ aspect_ratios (dict): The predefined aspect ratios.
131
+ """
132
+
133
+ def __init__(self,
134
+ sampler: Sampler,
135
+ dataset: Dataset,
136
+ batch_size: int,
137
+ drop_last: bool = False
138
+ ) -> None:
139
+ if not isinstance(sampler, Sampler):
140
+ raise TypeError('sampler should be an instance of ``Sampler``, '
141
+ f'but got {sampler}')
142
+ if not isinstance(batch_size, int) or batch_size <= 0:
143
+ raise ValueError('batch_size should be a positive integer value, '
144
+ f'but got batch_size={batch_size}')
145
+ self.sampler = sampler
146
+ self.dataset = dataset
147
+ self.batch_size = batch_size
148
+ self.drop_last = drop_last
149
+
150
+ # buckets for each aspect ratio
151
+ self.bucket = {'image':[], 'video':[]}
152
+
153
+ def __iter__(self):
154
+ for idx in self.sampler:
155
+ content_type = self.dataset.dataset[idx].get('type', 'image')
156
+ self.bucket[content_type].append(idx)
157
+
158
+ # yield a batch of indices in the same aspect ratio group
159
+ if len(self.bucket['video']) == self.batch_size:
160
+ bucket = self.bucket['video']
161
+ yield bucket[:]
162
+ del bucket[:]
163
+ elif len(self.bucket['image']) == self.batch_size:
164
+ bucket = self.bucket['image']
165
+ yield bucket[:]
166
+ del bucket[:]
167
+
168
+ @contextmanager
169
+ def VideoReader_contextmanager(*args, **kwargs):
170
+ vr = VideoReader(*args, **kwargs)
171
+ try:
172
+ yield vr
173
+ finally:
174
+ del vr
175
+ gc.collect()
176
+
177
+ def get_video_reader_batch(video_reader, batch_index):
178
+ frames = video_reader.get_batch(batch_index).asnumpy()
179
+ return frames
180
+
181
+ def resize_frame(frame, target_short_side):
182
+ h, w, _ = frame.shape
183
+ if h < w:
184
+ if target_short_side > h:
185
+ return frame
186
+ new_h = target_short_side
187
+ new_w = int(target_short_side * w / h)
188
+ else:
189
+ if target_short_side > w:
190
+ return frame
191
+ new_w = target_short_side
192
+ new_h = int(target_short_side * h / w)
193
+
194
+ resized_frame = cv2.resize(frame, (new_w, new_h))
195
+ return resized_frame
196
+
197
+ class ImageVideoDataset(Dataset):
198
+ def __init__(
199
+ self,
200
+ data_root=None,
201
+ video_sample_size_h=256,
202
+ video_sample_size_w=320,
203
+ video_sample_stride=4,
204
+ video_sample_n_frames=16,
205
+ image_sample_size=512,
206
+ text_drop_ratio=0.1,
207
+ enable_bucket=False,
208
+ video_length_drop_start=0.0,
209
+ video_length_drop_end=1.0,
210
+ enable_inpaint=False,
211
+ ):
212
+ # Loading annotations from files
213
+ print(f"loading dataset from {data_root} ...")
214
+ self.data_root = data_root
215
+ self.dataset = []
216
+
217
+ b = tfds.builder_from_directory(builder_dir=dataset2path('fractal20220817_data'))
218
+ ds = b.as_dataset(split='train')
219
+
220
+ for i, batch in tqdm.tqdm(enumerate(ds), desc="Loading Open-X-Embodiement dataset"):
221
+ episode = batch['steps']
222
+
223
+ del dataset
224
+
225
+ self.length = len(self.dataset)
226
+ print(f"data scale: {self.length}")
227
+ # TODO: enable bucket training
228
+ self.enable_bucket = enable_bucket
229
+ self.text_drop_ratio = text_drop_ratio
230
+ self.enable_inpaint = enable_inpaint
231
+
232
+ self.video_length_drop_start = video_length_drop_start
233
+ self.video_length_drop_end = video_length_drop_end
234
+
235
+ # Video params
236
+ self.video_sample_stride = video_sample_stride
237
+ self.video_sample_n_frames = video_sample_n_frames
238
+ self.video_sample_size = (video_sample_size_h, video_sample_size_w)
239
+ self.video_transforms = transforms.Compose(
240
+ [
241
+ transforms.Resize(min(self.video_sample_size)),
242
+ transforms.CenterCrop(self.video_sample_size),
243
+ transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True),
244
+ ]
245
+ )
246
+
247
+ def get_batch(self, idx):
248
+ data_info = self.dataset[idx % len(self.dataset)]
249
+
250
+ if data_info.get('type', 'image')=='video':
251
+ video_id, text = data_info['file_path'], data_info['text']
252
+
253
+ if self.data_root is None:
254
+ video_dir = video_id
255
+ else:
256
+ video_dir = os.path.join(self.data_root, video_id)
257
+
258
+ with VideoReader_contextmanager(video_dir, num_threads=2) as video_reader:
259
+ min_sample_n_frames = min(
260
+ self.video_sample_n_frames,
261
+ int(len(video_reader) * (self.video_length_drop_end - self.video_length_drop_start) // self.video_sample_stride)
262
+ )
263
+ if min_sample_n_frames == 0:
264
+ raise ValueError(f"No Frames in video.")
265
+
266
+ video_length = int(self.video_length_drop_end * len(video_reader))
267
+ clip_length = min(video_length, (min_sample_n_frames - 1) * self.video_sample_stride + 1)
268
+ start_idx = random.randint(int(self.video_length_drop_start * video_length), video_length - clip_length) if video_length != clip_length else 0
269
+ batch_index = np.linspace(start_idx, start_idx + clip_length - 1, min_sample_n_frames, dtype=int)
270
+
271
+ try:
272
+ sample_args = (video_reader, batch_index)
273
+ pixel_values = func_timeout(
274
+ VIDEO_READER_TIMEOUT, get_video_reader_batch, args=sample_args
275
+ )
276
+ resized_frames = []
277
+ for i in range(len(pixel_values)):
278
+ frame = pixel_values[i]
279
+ resized_frame = resize_frame(frame, self.larger_side_of_image_and_video)
280
+ resized_frames.append(resized_frame)
281
+ pixel_values = np.array(resized_frames)
282
+ except FunctionTimedOut:
283
+ raise ValueError(f"Read {idx} timeout.")
284
+ except Exception as e:
285
+ raise ValueError(f"Failed to extract frames from video. Error is {e}.")
286
+
287
+ if not self.enable_bucket:
288
+ pixel_values = torch.from_numpy(pixel_values).permute(0, 3, 1, 2).contiguous()
289
+ pixel_values = pixel_values / 255.
290
+ del video_reader
291
+ else:
292
+ pixel_values = pixel_values
293
+
294
+ if not self.enable_bucket:
295
+ pixel_values = self.video_transforms(pixel_values)
296
+
297
+ # Random use no text generation
298
+ if random.random() < self.text_drop_ratio:
299
+ text = ''
300
+ return pixel_values, text, 'video'
301
+ else:
302
+ image_path, text = data_info['file_path'], data_info['text']
303
+ if self.data_root is not None:
304
+ image_path = os.path.join(self.data_root, image_path)
305
+ image = Image.open(image_path).convert('RGB')
306
+ if not self.enable_bucket:
307
+ image = self.image_transforms(image).unsqueeze(0)
308
+ else:
309
+ image = np.expand_dims(np.array(image), 0)
310
+ if random.random() < self.text_drop_ratio:
311
+ text = ''
312
+ return image, text, 'image'
313
+
314
+ def __len__(self):
315
+ return self.length
316
+
317
+ def __getitem__(self, idx):
318
+ data_info = self.dataset[idx % len(self.dataset)]
319
+ data_type = data_info.get('type', 'image')
320
+ while True:
321
+ sample = {}
322
+ try:
323
+ data_info_local = self.dataset[idx % len(self.dataset)]
324
+ data_type_local = data_info_local.get('type', 'image')
325
+ if data_type_local != data_type:
326
+ raise ValueError("data_type_local != data_type")
327
+
328
+ pixel_values, name, data_type = self.get_batch(idx)
329
+ sample["pixel_values"] = pixel_values
330
+ sample["text"] = name
331
+ sample["data_type"] = data_type
332
+ sample["idx"] = idx
333
+
334
+ if len(sample) > 0:
335
+ break
336
+ except Exception as e:
337
+ print(e, self.dataset[idx % len(self.dataset)])
338
+ idx = random.randint(0, self.length-1)
339
+
340
+ if self.enable_inpaint and not self.enable_bucket:
341
+ mask = get_random_mask(pixel_values.size())
342
+ mask_pixel_values = pixel_values * (1 - mask) + torch.ones_like(pixel_values) * -1 * mask
343
+ sample["mask_pixel_values"] = mask_pixel_values
344
+ sample["mask"] = mask
345
+
346
+ clip_pixel_values = sample["pixel_values"][0].permute(1, 2, 0).contiguous()
347
+ clip_pixel_values = (clip_pixel_values * 0.5 + 0.5) * 255
348
+ sample["clip_pixel_values"] = clip_pixel_values
349
+
350
+ ref_pixel_values = sample["pixel_values"][0].unsqueeze(0)
351
+ if (mask == 1).all():
352
+ ref_pixel_values = torch.ones_like(ref_pixel_values) * -1
353
+ sample["ref_pixel_values"] = ref_pixel_values
354
+
355
+ return sample
356
+
357
+
robomaster/data/dataset_video.py ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import gc
3
+ import io
4
+ import json
5
+ import math
6
+ import os
7
+ import random
8
+ from contextlib import contextmanager
9
+ from threading import Thread
10
+
11
+ import albumentations
12
+ import cv2
13
+ import numpy as np
14
+ import torch
15
+ import torchvision.transforms as transforms
16
+ from decord import VideoReader
17
+ from einops import rearrange
18
+ from func_timeout import FunctionTimedOut, func_timeout
19
+ from PIL import Image
20
+ from torch.utils.data import BatchSampler, Sampler
21
+ from torch.utils.data.dataset import Dataset
22
+
23
+ VIDEO_READER_TIMEOUT = 20
24
+
25
+ def get_random_mask(shape):
26
+ f, c, h, w = shape
27
+
28
+ mask_index = np.random.randint(0, 4)
29
+ mask = torch.zeros((f, 1, h, w), dtype=torch.uint8)
30
+ if mask_index == 0:
31
+ mask[1:, :, :, :] = 1
32
+ elif mask_index == 1:
33
+ mask_frame_index = 1
34
+ mask[mask_frame_index:-mask_frame_index, :, :, :] = 1
35
+ elif mask_index == 2:
36
+ center_x = torch.randint(0, w, (1,)).item()
37
+ center_y = torch.randint(0, h, (1,)).item()
38
+ block_size_x = torch.randint(w // 4, w // 4 * 3, (1,)).item() # 方块的宽度范围
39
+ block_size_y = torch.randint(h // 4, h // 4 * 3, (1,)).item() # 方块的高度范围
40
+
41
+ start_x = max(center_x - block_size_x // 2, 0)
42
+ end_x = min(center_x + block_size_x // 2, w)
43
+ start_y = max(center_y - block_size_y // 2, 0)
44
+ end_y = min(center_y + block_size_y // 2, h)
45
+ mask[:, :, start_y:end_y, start_x:end_x] = 1
46
+ elif mask_index == 3:
47
+ center_x = torch.randint(0, w, (1,)).item()
48
+ center_y = torch.randint(0, h, (1,)).item()
49
+ block_size_x = torch.randint(w // 4, w // 4 * 3, (1,)).item() # 方块的宽度范围
50
+ block_size_y = torch.randint(h // 4, h // 4 * 3, (1,)).item() # 方块的高度范围
51
+
52
+ start_x = max(center_x - block_size_x // 2, 0)
53
+ end_x = min(center_x + block_size_x // 2, w)
54
+ start_y = max(center_y - block_size_y // 2, 0)
55
+ end_y = min(center_y + block_size_y // 2, h)
56
+
57
+ mask_frame_before = np.random.randint(0, f // 2)
58
+ mask_frame_after = np.random.randint(f // 2, f)
59
+ mask[mask_frame_before:mask_frame_after, :, start_y:end_y, start_x:end_x] = 1
60
+ else:
61
+ raise ValueError(f"The mask_index {mask_index} is not define")
62
+ return mask
63
+
64
+
65
+ @contextmanager
66
+ def VideoReader_contextmanager(*args, **kwargs):
67
+ vr = VideoReader(*args, **kwargs)
68
+ try:
69
+ yield vr
70
+ finally:
71
+ del vr
72
+ gc.collect()
73
+
74
+
75
+ def get_video_reader_batch(video_reader, batch_index):
76
+ frames = video_reader.get_batch(batch_index).asnumpy()
77
+ return frames
78
+
79
+
80
+ class WebVid10M(Dataset):
81
+ def __init__(
82
+ self,
83
+ csv_path, video_folder,
84
+ sample_size=256, sample_stride=4, sample_n_frames=16,
85
+ enable_bucket=False, enable_inpaint=False, is_image=False,
86
+ ):
87
+ print(f"loading annotations from {csv_path} ...")
88
+ with open(csv_path, 'r') as csvfile:
89
+ self.dataset = list(csv.DictReader(csvfile))
90
+ self.length = len(self.dataset)
91
+ print(f"data scale: {self.length}")
92
+
93
+ self.video_folder = video_folder
94
+ self.sample_stride = sample_stride
95
+ self.sample_n_frames = sample_n_frames
96
+ self.enable_bucket = enable_bucket
97
+ self.enable_inpaint = enable_inpaint
98
+ self.is_image = is_image
99
+
100
+ sample_size = tuple(sample_size) if not isinstance(sample_size, int) else (sample_size, sample_size)
101
+ self.pixel_transforms = transforms.Compose([
102
+ transforms.Resize(sample_size[0]),
103
+ transforms.CenterCrop(sample_size),
104
+ transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True),
105
+ ])
106
+
107
+ def get_batch(self, idx):
108
+ video_dict = self.dataset[idx]
109
+ videoid, name, page_dir = video_dict['videoid'], video_dict['name'], video_dict['page_dir']
110
+
111
+ video_dir = os.path.join(self.video_folder, f"{videoid}.mp4")
112
+ video_reader = VideoReader(video_dir)
113
+ video_length = len(video_reader)
114
+
115
+ if not self.is_image:
116
+ clip_length = min(video_length, (self.sample_n_frames - 1) * self.sample_stride + 1)
117
+ start_idx = random.randint(0, video_length - clip_length)
118
+ batch_index = np.linspace(start_idx, start_idx + clip_length - 1, self.sample_n_frames, dtype=int)
119
+ else:
120
+ batch_index = [random.randint(0, video_length - 1)]
121
+
122
+ if not self.enable_bucket:
123
+ pixel_values = torch.from_numpy(video_reader.get_batch(batch_index).asnumpy()).permute(0, 3, 1, 2).contiguous()
124
+ pixel_values = pixel_values / 255.
125
+ del video_reader
126
+ else:
127
+ pixel_values = video_reader.get_batch(batch_index).asnumpy()
128
+
129
+ if self.is_image:
130
+ pixel_values = pixel_values[0]
131
+ return pixel_values, name
132
+
133
+ def __len__(self):
134
+ return self.length
135
+
136
+ def __getitem__(self, idx):
137
+ while True:
138
+ try:
139
+ pixel_values, name = self.get_batch(idx)
140
+ break
141
+
142
+ except Exception as e:
143
+ print("Error info:", e)
144
+ idx = random.randint(0, self.length-1)
145
+
146
+ if not self.enable_bucket:
147
+ pixel_values = self.pixel_transforms(pixel_values)
148
+ if self.enable_inpaint:
149
+ mask = get_random_mask(pixel_values.size())
150
+ mask_pixel_values = pixel_values * (1 - mask) + torch.ones_like(pixel_values) * -1 * mask
151
+ sample = dict(pixel_values=pixel_values, mask_pixel_values=mask_pixel_values, mask=mask, text=name)
152
+ else:
153
+ sample = dict(pixel_values=pixel_values, text=name)
154
+ return sample
155
+
156
+
157
+ class VideoDataset(Dataset):
158
+ def __init__(
159
+ self,
160
+ json_path, video_folder=None,
161
+ sample_size=256, sample_stride=4, sample_n_frames=16,
162
+ enable_bucket=False, enable_inpaint=False
163
+ ):
164
+ print(f"loading annotations from {json_path} ...")
165
+ self.dataset = json.load(open(json_path, 'r'))
166
+ self.length = len(self.dataset)
167
+ print(f"data scale: {self.length}")
168
+
169
+ self.video_folder = video_folder
170
+ self.sample_stride = sample_stride
171
+ self.sample_n_frames = sample_n_frames
172
+ self.enable_bucket = enable_bucket
173
+ self.enable_inpaint = enable_inpaint
174
+
175
+ sample_size = tuple(sample_size) if not isinstance(sample_size, int) else (sample_size, sample_size)
176
+ self.pixel_transforms = transforms.Compose(
177
+ [
178
+ transforms.Resize(sample_size[0]),
179
+ transforms.CenterCrop(sample_size),
180
+ transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True),
181
+ ]
182
+ )
183
+
184
+ def get_batch(self, idx):
185
+ video_dict = self.dataset[idx]
186
+ video_id, name = video_dict['file_path'], video_dict['text']
187
+
188
+ if self.video_folder is None:
189
+ video_dir = video_id
190
+ else:
191
+ video_dir = os.path.join(self.video_folder, video_id)
192
+
193
+ with VideoReader_contextmanager(video_dir, num_threads=2) as video_reader:
194
+ video_length = len(video_reader)
195
+
196
+ clip_length = min(video_length, (self.sample_n_frames - 1) * self.sample_stride + 1)
197
+ start_idx = random.randint(0, video_length - clip_length)
198
+ batch_index = np.linspace(start_idx, start_idx + clip_length - 1, self.sample_n_frames, dtype=int)
199
+
200
+ try:
201
+ sample_args = (video_reader, batch_index)
202
+ pixel_values = func_timeout(
203
+ VIDEO_READER_TIMEOUT, get_video_reader_batch, args=sample_args
204
+ )
205
+ except FunctionTimedOut:
206
+ raise ValueError(f"Read {idx} timeout.")
207
+ except Exception as e:
208
+ raise ValueError(f"Failed to extract frames from video. Error is {e}.")
209
+
210
+ if not self.enable_bucket:
211
+ pixel_values = torch.from_numpy(pixel_values).permute(0, 3, 1, 2).contiguous()
212
+ pixel_values = pixel_values / 255.
213
+ del video_reader
214
+ else:
215
+ pixel_values = pixel_values
216
+
217
+ return pixel_values, name
218
+
219
+ def __len__(self):
220
+ return self.length
221
+
222
+ def __getitem__(self, idx):
223
+ while True:
224
+ try:
225
+ pixel_values, name = self.get_batch(idx)
226
+ break
227
+
228
+ except Exception as e:
229
+ print("Error info:", e)
230
+ idx = random.randint(0, self.length-1)
231
+
232
+ if not self.enable_bucket:
233
+ pixel_values = self.pixel_transforms(pixel_values)
234
+ if self.enable_inpaint:
235
+ mask = get_random_mask(pixel_values.size())
236
+ mask_pixel_values = pixel_values * (1 - mask) + torch.ones_like(pixel_values) * -1 * mask
237
+ sample = dict(pixel_values=pixel_values, mask_pixel_values=mask_pixel_values, mask=mask, text=name)
238
+ else:
239
+ sample = dict(pixel_values=pixel_values, text=name)
240
+ return sample
241
+
242
+
243
+ if __name__ == "__main__":
244
+ if 1:
245
+ dataset = VideoDataset(
246
+ json_path="/home/zhoumo.xjq/disk3/datasets/webvidval/results_2M_val.json",
247
+ sample_size=256,
248
+ sample_stride=4, sample_n_frames=16,
249
+ )
250
+
251
+ if 0:
252
+ dataset = WebVid10M(
253
+ csv_path="/mnt/petrelfs/guoyuwei/projects/datasets/webvid/results_2M_val.csv",
254
+ video_folder="/mnt/petrelfs/guoyuwei/projects/datasets/webvid/2M_val",
255
+ sample_size=256,
256
+ sample_stride=4, sample_n_frames=16,
257
+ is_image=False,
258
+ )
259
+
260
+ dataloader = torch.utils.data.DataLoader(dataset, batch_size=4, num_workers=0,)
261
+ for idx, batch in enumerate(dataloader):
262
+ print(batch["pixel_values"].shape, len(batch["text"]))
robomaster/models/autoencoder_magvit.py ADDED
@@ -0,0 +1,1646 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The CogVideoX team, Tsinghua University & ZhipuAI and The HuggingFace Team.
2
+ # All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from typing import Dict, Optional, Tuple, Union
17
+
18
+ import numpy as np
19
+ import torch
20
+ import torch.nn as nn
21
+ import torch.nn.functional as F
22
+
23
+ from diffusers.configuration_utils import ConfigMixin, register_to_config
24
+ from diffusers.loaders.single_file_model import FromOriginalModelMixin
25
+ from diffusers.utils import logging
26
+ from diffusers.utils.accelerate_utils import apply_forward_hook
27
+ from diffusers.models.activations import get_activation
28
+ from diffusers.models.downsampling import CogVideoXDownsample3D
29
+ from diffusers.models.modeling_outputs import AutoencoderKLOutput
30
+ from diffusers.models.modeling_utils import ModelMixin
31
+ from diffusers.models.upsampling import CogVideoXUpsample3D
32
+ from diffusers.models.autoencoders.vae import DecoderOutput, DiagonalGaussianDistribution
33
+
34
+
35
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
36
+
37
+
38
+ class CogVideoXSafeConv3d(nn.Conv3d):
39
+ r"""
40
+ A 3D convolution layer that splits the input tensor into smaller parts to avoid OOM in CogVideoX Model.
41
+ """
42
+
43
+ def forward(self, input: torch.Tensor) -> torch.Tensor:
44
+ memory_count = (
45
+ (input.shape[0] * input.shape[1] * input.shape[2] * input.shape[3] * input.shape[4]) * 2 / 1024**3
46
+ )
47
+
48
+ # Set to 2GB, suitable for CuDNN
49
+ if memory_count > 2:
50
+ kernel_size = self.kernel_size[0]
51
+ part_num = int(memory_count / 2) + 1
52
+ input_chunks = torch.chunk(input, part_num, dim=2)
53
+
54
+ if kernel_size > 1:
55
+ input_chunks = [input_chunks[0]] + [
56
+ torch.cat((input_chunks[i - 1][:, :, -kernel_size + 1 :], input_chunks[i]), dim=2)
57
+ for i in range(1, len(input_chunks))
58
+ ]
59
+
60
+ output_chunks = []
61
+ for input_chunk in input_chunks:
62
+ output_chunks.append(super().forward(input_chunk))
63
+ output = torch.cat(output_chunks, dim=2)
64
+ return output
65
+ else:
66
+ return super().forward(input)
67
+
68
+
69
+ class CogVideoXCausalConv3d(nn.Module):
70
+ r"""A 3D causal convolution layer that pads the input tensor to ensure causality in CogVideoX Model.
71
+
72
+ Args:
73
+ in_channels (`int`): Number of channels in the input tensor.
74
+ out_channels (`int`): Number of output channels produced by the convolution.
75
+ kernel_size (`int` or `Tuple[int, int, int]`): Kernel size of the convolutional kernel.
76
+ stride (`int`, defaults to `1`): Stride of the convolution.
77
+ dilation (`int`, defaults to `1`): Dilation rate of the convolution.
78
+ pad_mode (`str`, defaults to `"constant"`): Padding mode.
79
+ """
80
+
81
+ def __init__(
82
+ self,
83
+ in_channels: int,
84
+ out_channels: int,
85
+ kernel_size: Union[int, Tuple[int, int, int]],
86
+ stride: int = 1,
87
+ dilation: int = 1,
88
+ pad_mode: str = "constant",
89
+ ):
90
+ super().__init__()
91
+
92
+ if isinstance(kernel_size, int):
93
+ kernel_size = (kernel_size,) * 3
94
+
95
+ time_kernel_size, height_kernel_size, width_kernel_size = kernel_size
96
+
97
+ # TODO(aryan): configure calculation based on stride and dilation in the future.
98
+ # Since CogVideoX does not use it, it is currently tailored to "just work" with Mochi
99
+ time_pad = time_kernel_size - 1
100
+ height_pad = (height_kernel_size - 1) // 2
101
+ width_pad = (width_kernel_size - 1) // 2
102
+
103
+ self.pad_mode = pad_mode
104
+ self.height_pad = height_pad
105
+ self.width_pad = width_pad
106
+ self.time_pad = time_pad
107
+ self.time_causal_padding = (width_pad, width_pad, height_pad, height_pad, time_pad, 0)
108
+
109
+ self.temporal_dim = 2
110
+ self.time_kernel_size = time_kernel_size
111
+
112
+ stride = stride if isinstance(stride, tuple) else (stride, 1, 1)
113
+ dilation = (dilation, 1, 1)
114
+ self.conv = CogVideoXSafeConv3d(
115
+ in_channels=in_channels,
116
+ out_channels=out_channels,
117
+ kernel_size=kernel_size,
118
+ stride=stride,
119
+ dilation=dilation,
120
+ )
121
+
122
+ def fake_context_parallel_forward(
123
+ self, inputs: torch.Tensor, conv_cache: Optional[torch.Tensor] = None
124
+ ) -> torch.Tensor:
125
+ if self.pad_mode == "replicate":
126
+ inputs = F.pad(inputs, self.time_causal_padding, mode="replicate")
127
+ else:
128
+ kernel_size = self.time_kernel_size
129
+ if kernel_size > 1:
130
+ cached_inputs = [conv_cache] if conv_cache is not None else [inputs[:, :, :1]] * (kernel_size - 1)
131
+ inputs = torch.cat(cached_inputs + [inputs], dim=2)
132
+ return inputs
133
+
134
+ def forward(self, inputs: torch.Tensor, conv_cache: Optional[torch.Tensor] = None) -> torch.Tensor:
135
+ inputs = self.fake_context_parallel_forward(inputs, conv_cache)
136
+
137
+ if self.pad_mode == "replicate":
138
+ conv_cache = None
139
+ else:
140
+ padding_2d = (self.width_pad, self.width_pad, self.height_pad, self.height_pad)
141
+ conv_cache = inputs[:, :, -self.time_kernel_size + 1 :].clone()
142
+ inputs = F.pad(inputs, padding_2d, mode="constant", value=0)
143
+
144
+ output = self.conv(inputs)
145
+ return output, conv_cache
146
+
147
+
148
+ class CogVideoXSpatialNorm3D(nn.Module):
149
+ r"""
150
+ Spatially conditioned normalization as defined in https://arxiv.org/abs/2209.09002. This implementation is specific
151
+ to 3D-video like data.
152
+
153
+ CogVideoXSafeConv3d is used instead of nn.Conv3d to avoid OOM in CogVideoX Model.
154
+
155
+ Args:
156
+ f_channels (`int`):
157
+ The number of channels for input to group normalization layer, and output of the spatial norm layer.
158
+ zq_channels (`int`):
159
+ The number of channels for the quantized vector as described in the paper.
160
+ groups (`int`):
161
+ Number of groups to separate the channels into for group normalization.
162
+ """
163
+
164
+ def __init__(
165
+ self,
166
+ f_channels: int,
167
+ zq_channels: int,
168
+ groups: int = 32,
169
+ ):
170
+ super().__init__()
171
+ self.norm_layer = nn.GroupNorm(num_channels=f_channels, num_groups=groups, eps=1e-6, affine=True)
172
+ self.conv_y = CogVideoXCausalConv3d(zq_channels, f_channels, kernel_size=1, stride=1)
173
+ self.conv_b = CogVideoXCausalConv3d(zq_channels, f_channels, kernel_size=1, stride=1)
174
+
175
+ def forward(
176
+ self, f: torch.Tensor, zq: torch.Tensor, conv_cache: Optional[Dict[str, torch.Tensor]] = None
177
+ ) -> torch.Tensor:
178
+ new_conv_cache = {}
179
+ conv_cache = conv_cache or {}
180
+
181
+ if f.shape[2] > 1 and f.shape[2] % 2 == 1:
182
+ f_first, f_rest = f[:, :, :1], f[:, :, 1:]
183
+ f_first_size, f_rest_size = f_first.shape[-3:], f_rest.shape[-3:]
184
+ z_first, z_rest = zq[:, :, :1], zq[:, :, 1:]
185
+ z_first = F.interpolate(z_first, size=f_first_size)
186
+ z_rest = F.interpolate(z_rest, size=f_rest_size)
187
+ zq = torch.cat([z_first, z_rest], dim=2)
188
+ else:
189
+ zq = F.interpolate(zq, size=f.shape[-3:])
190
+
191
+ conv_y, new_conv_cache["conv_y"] = self.conv_y(zq, conv_cache=conv_cache.get("conv_y"))
192
+ conv_b, new_conv_cache["conv_b"] = self.conv_b(zq, conv_cache=conv_cache.get("conv_b"))
193
+
194
+ norm_f = self.norm_layer(f)
195
+ new_f = norm_f * conv_y + conv_b
196
+ return new_f, new_conv_cache
197
+
198
+
199
+ class CogVideoXUpsample3D(nn.Module):
200
+ r"""
201
+ A 3D Upsample layer using in CogVideoX by Tsinghua University & ZhipuAI # Todo: Wait for paper relase.
202
+
203
+ Args:
204
+ in_channels (`int`):
205
+ Number of channels in the input image.
206
+ out_channels (`int`):
207
+ Number of channels produced by the convolution.
208
+ kernel_size (`int`, defaults to `3`):
209
+ Size of the convolving kernel.
210
+ stride (`int`, defaults to `1`):
211
+ Stride of the convolution.
212
+ padding (`int`, defaults to `1`):
213
+ Padding added to all four sides of the input.
214
+ compress_time (`bool`, defaults to `False`):
215
+ Whether or not to compress the time dimension.
216
+ """
217
+
218
+ def __init__(
219
+ self,
220
+ in_channels: int,
221
+ out_channels: int,
222
+ kernel_size: int = 3,
223
+ stride: int = 1,
224
+ padding: int = 1,
225
+ compress_time: bool = False,
226
+ ) -> None:
227
+ super().__init__()
228
+
229
+ self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding)
230
+ self.compress_time = compress_time
231
+
232
+ self.auto_split_process = True
233
+ self.first_frame_flag = False
234
+
235
+ def forward(self, inputs: torch.Tensor) -> torch.Tensor:
236
+ if self.compress_time:
237
+ if self.auto_split_process:
238
+ if inputs.shape[2] > 1 and inputs.shape[2] % 2 == 1:
239
+ # split first frame
240
+ x_first, x_rest = inputs[:, :, 0], inputs[:, :, 1:]
241
+
242
+ x_first = F.interpolate(x_first, scale_factor=2.0)
243
+ x_rest = F.interpolate(x_rest, scale_factor=2.0)
244
+ x_first = x_first[:, :, None, :, :]
245
+ inputs = torch.cat([x_first, x_rest], dim=2)
246
+ elif inputs.shape[2] > 1:
247
+ inputs = F.interpolate(inputs, scale_factor=2.0)
248
+ else:
249
+ inputs = inputs.squeeze(2)
250
+ inputs = F.interpolate(inputs, scale_factor=2.0)
251
+ inputs = inputs[:, :, None, :, :]
252
+ else:
253
+ if self.first_frame_flag:
254
+ inputs = inputs.squeeze(2)
255
+ inputs = F.interpolate(inputs, scale_factor=2.0)
256
+ inputs = inputs[:, :, None, :, :]
257
+ else:
258
+ inputs = F.interpolate(inputs, scale_factor=2.0)
259
+ else:
260
+ # only interpolate 2D
261
+ b, c, t, h, w = inputs.shape
262
+ inputs = inputs.permute(0, 2, 1, 3, 4).reshape(b * t, c, h, w)
263
+ inputs = F.interpolate(inputs, scale_factor=2.0)
264
+ inputs = inputs.reshape(b, t, c, *inputs.shape[2:]).permute(0, 2, 1, 3, 4)
265
+
266
+ b, c, t, h, w = inputs.shape
267
+ inputs = inputs.permute(0, 2, 1, 3, 4).reshape(b * t, c, h, w)
268
+ inputs = self.conv(inputs)
269
+ inputs = inputs.reshape(b, t, *inputs.shape[1:]).permute(0, 2, 1, 3, 4)
270
+
271
+ return inputs
272
+
273
+
274
+ class CogVideoXResnetBlock3D(nn.Module):
275
+ r"""
276
+ A 3D ResNet block used in the CogVideoX model.
277
+
278
+ Args:
279
+ in_channels (`int`):
280
+ Number of input channels.
281
+ out_channels (`int`, *optional*):
282
+ Number of output channels. If None, defaults to `in_channels`.
283
+ dropout (`float`, defaults to `0.0`):
284
+ Dropout rate.
285
+ temb_channels (`int`, defaults to `512`):
286
+ Number of time embedding channels.
287
+ groups (`int`, defaults to `32`):
288
+ Number of groups to separate the channels into for group normalization.
289
+ eps (`float`, defaults to `1e-6`):
290
+ Epsilon value for normalization layers.
291
+ non_linearity (`str`, defaults to `"swish"`):
292
+ Activation function to use.
293
+ conv_shortcut (bool, defaults to `False`):
294
+ Whether or not to use a convolution shortcut.
295
+ spatial_norm_dim (`int`, *optional*):
296
+ The dimension to use for spatial norm if it is to be used instead of group norm.
297
+ pad_mode (str, defaults to `"first"`):
298
+ Padding mode.
299
+ """
300
+
301
+ def __init__(
302
+ self,
303
+ in_channels: int,
304
+ out_channels: Optional[int] = None,
305
+ dropout: float = 0.0,
306
+ temb_channels: int = 512,
307
+ groups: int = 32,
308
+ eps: float = 1e-6,
309
+ non_linearity: str = "swish",
310
+ conv_shortcut: bool = False,
311
+ spatial_norm_dim: Optional[int] = None,
312
+ pad_mode: str = "first",
313
+ ):
314
+ super().__init__()
315
+
316
+ out_channels = out_channels or in_channels
317
+
318
+ self.in_channels = in_channels
319
+ self.out_channels = out_channels
320
+ self.nonlinearity = get_activation(non_linearity)
321
+ self.use_conv_shortcut = conv_shortcut
322
+ self.spatial_norm_dim = spatial_norm_dim
323
+
324
+ if spatial_norm_dim is None:
325
+ self.norm1 = nn.GroupNorm(num_channels=in_channels, num_groups=groups, eps=eps)
326
+ self.norm2 = nn.GroupNorm(num_channels=out_channels, num_groups=groups, eps=eps)
327
+ else:
328
+ self.norm1 = CogVideoXSpatialNorm3D(
329
+ f_channels=in_channels,
330
+ zq_channels=spatial_norm_dim,
331
+ groups=groups,
332
+ )
333
+ self.norm2 = CogVideoXSpatialNorm3D(
334
+ f_channels=out_channels,
335
+ zq_channels=spatial_norm_dim,
336
+ groups=groups,
337
+ )
338
+
339
+ self.conv1 = CogVideoXCausalConv3d(
340
+ in_channels=in_channels, out_channels=out_channels, kernel_size=3, pad_mode=pad_mode
341
+ )
342
+
343
+ if temb_channels > 0:
344
+ self.temb_proj = nn.Linear(in_features=temb_channels, out_features=out_channels)
345
+
346
+ self.dropout = nn.Dropout(dropout)
347
+ self.conv2 = CogVideoXCausalConv3d(
348
+ in_channels=out_channels, out_channels=out_channels, kernel_size=3, pad_mode=pad_mode
349
+ )
350
+
351
+ if self.in_channels != self.out_channels:
352
+ if self.use_conv_shortcut:
353
+ self.conv_shortcut = CogVideoXCausalConv3d(
354
+ in_channels=in_channels, out_channels=out_channels, kernel_size=3, pad_mode=pad_mode
355
+ )
356
+ else:
357
+ self.conv_shortcut = CogVideoXSafeConv3d(
358
+ in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0
359
+ )
360
+
361
+ def forward(
362
+ self,
363
+ inputs: torch.Tensor,
364
+ temb: Optional[torch.Tensor] = None,
365
+ zq: Optional[torch.Tensor] = None,
366
+ conv_cache: Optional[Dict[str, torch.Tensor]] = None,
367
+ ) -> torch.Tensor:
368
+ new_conv_cache = {}
369
+ conv_cache = conv_cache or {}
370
+
371
+ hidden_states = inputs
372
+
373
+ if zq is not None:
374
+ hidden_states, new_conv_cache["norm1"] = self.norm1(hidden_states, zq, conv_cache=conv_cache.get("norm1"))
375
+ else:
376
+ hidden_states = self.norm1(hidden_states)
377
+
378
+ hidden_states = self.nonlinearity(hidden_states)
379
+ hidden_states, new_conv_cache["conv1"] = self.conv1(hidden_states, conv_cache=conv_cache.get("conv1"))
380
+
381
+ if temb is not None:
382
+ hidden_states = hidden_states + self.temb_proj(self.nonlinearity(temb))[:, :, None, None, None]
383
+
384
+ if zq is not None:
385
+ hidden_states, new_conv_cache["norm2"] = self.norm2(hidden_states, zq, conv_cache=conv_cache.get("norm2"))
386
+ else:
387
+ hidden_states = self.norm2(hidden_states)
388
+
389
+ hidden_states = self.nonlinearity(hidden_states)
390
+ hidden_states = self.dropout(hidden_states)
391
+ hidden_states, new_conv_cache["conv2"] = self.conv2(hidden_states, conv_cache=conv_cache.get("conv2"))
392
+
393
+ if self.in_channels != self.out_channels:
394
+ if self.use_conv_shortcut:
395
+ inputs, new_conv_cache["conv_shortcut"] = self.conv_shortcut(
396
+ inputs, conv_cache=conv_cache.get("conv_shortcut")
397
+ )
398
+ else:
399
+ inputs = self.conv_shortcut(inputs)
400
+
401
+ hidden_states = hidden_states + inputs
402
+ return hidden_states, new_conv_cache
403
+
404
+
405
+ class CogVideoXDownBlock3D(nn.Module):
406
+ r"""
407
+ A downsampling block used in the CogVideoX model.
408
+
409
+ Args:
410
+ in_channels (`int`):
411
+ Number of input channels.
412
+ out_channels (`int`, *optional*):
413
+ Number of output channels. If None, defaults to `in_channels`.
414
+ temb_channels (`int`, defaults to `512`):
415
+ Number of time embedding channels.
416
+ num_layers (`int`, defaults to `1`):
417
+ Number of resnet layers.
418
+ dropout (`float`, defaults to `0.0`):
419
+ Dropout rate.
420
+ resnet_eps (`float`, defaults to `1e-6`):
421
+ Epsilon value for normalization layers.
422
+ resnet_act_fn (`str`, defaults to `"swish"`):
423
+ Activation function to use.
424
+ resnet_groups (`int`, defaults to `32`):
425
+ Number of groups to separate the channels into for group normalization.
426
+ add_downsample (`bool`, defaults to `True`):
427
+ Whether or not to use a downsampling layer. If not used, output dimension would be same as input dimension.
428
+ compress_time (`bool`, defaults to `False`):
429
+ Whether or not to downsample across temporal dimension.
430
+ pad_mode (str, defaults to `"first"`):
431
+ Padding mode.
432
+ """
433
+
434
+ _supports_gradient_checkpointing = True
435
+
436
+ def __init__(
437
+ self,
438
+ in_channels: int,
439
+ out_channels: int,
440
+ temb_channels: int,
441
+ dropout: float = 0.0,
442
+ num_layers: int = 1,
443
+ resnet_eps: float = 1e-6,
444
+ resnet_act_fn: str = "swish",
445
+ resnet_groups: int = 32,
446
+ add_downsample: bool = True,
447
+ downsample_padding: int = 0,
448
+ compress_time: bool = False,
449
+ pad_mode: str = "first",
450
+ ):
451
+ super().__init__()
452
+
453
+ resnets = []
454
+ for i in range(num_layers):
455
+ in_channel = in_channels if i == 0 else out_channels
456
+ resnets.append(
457
+ CogVideoXResnetBlock3D(
458
+ in_channels=in_channel,
459
+ out_channels=out_channels,
460
+ dropout=dropout,
461
+ temb_channels=temb_channels,
462
+ groups=resnet_groups,
463
+ eps=resnet_eps,
464
+ non_linearity=resnet_act_fn,
465
+ pad_mode=pad_mode,
466
+ )
467
+ )
468
+
469
+ self.resnets = nn.ModuleList(resnets)
470
+ self.downsamplers = None
471
+
472
+ if add_downsample:
473
+ self.downsamplers = nn.ModuleList(
474
+ [
475
+ CogVideoXDownsample3D(
476
+ out_channels, out_channels, padding=downsample_padding, compress_time=compress_time
477
+ )
478
+ ]
479
+ )
480
+
481
+ self.gradient_checkpointing = False
482
+
483
+ def forward(
484
+ self,
485
+ hidden_states: torch.Tensor,
486
+ temb: Optional[torch.Tensor] = None,
487
+ zq: Optional[torch.Tensor] = None,
488
+ conv_cache: Optional[Dict[str, torch.Tensor]] = None,
489
+ ) -> torch.Tensor:
490
+ r"""Forward method of the `CogVideoXDownBlock3D` class."""
491
+
492
+ new_conv_cache = {}
493
+ conv_cache = conv_cache or {}
494
+
495
+ for i, resnet in enumerate(self.resnets):
496
+ conv_cache_key = f"resnet_{i}"
497
+
498
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
499
+
500
+ def create_custom_forward(module):
501
+ def create_forward(*inputs):
502
+ return module(*inputs)
503
+
504
+ return create_forward
505
+
506
+ hidden_states, new_conv_cache[conv_cache_key] = torch.utils.checkpoint.checkpoint(
507
+ create_custom_forward(resnet),
508
+ hidden_states,
509
+ temb,
510
+ zq,
511
+ conv_cache.get(conv_cache_key),
512
+ )
513
+ else:
514
+ hidden_states, new_conv_cache[conv_cache_key] = resnet(
515
+ hidden_states, temb, zq, conv_cache=conv_cache.get(conv_cache_key)
516
+ )
517
+
518
+ if self.downsamplers is not None:
519
+ for downsampler in self.downsamplers:
520
+ hidden_states = downsampler(hidden_states)
521
+
522
+ return hidden_states, new_conv_cache
523
+
524
+
525
+ class CogVideoXMidBlock3D(nn.Module):
526
+ r"""
527
+ A middle block used in the CogVideoX model.
528
+
529
+ Args:
530
+ in_channels (`int`):
531
+ Number of input channels.
532
+ temb_channels (`int`, defaults to `512`):
533
+ Number of time embedding channels.
534
+ dropout (`float`, defaults to `0.0`):
535
+ Dropout rate.
536
+ num_layers (`int`, defaults to `1`):
537
+ Number of resnet layers.
538
+ resnet_eps (`float`, defaults to `1e-6`):
539
+ Epsilon value for normalization layers.
540
+ resnet_act_fn (`str`, defaults to `"swish"`):
541
+ Activation function to use.
542
+ resnet_groups (`int`, defaults to `32`):
543
+ Number of groups to separate the channels into for group normalization.
544
+ spatial_norm_dim (`int`, *optional*):
545
+ The dimension to use for spatial norm if it is to be used instead of group norm.
546
+ pad_mode (str, defaults to `"first"`):
547
+ Padding mode.
548
+ """
549
+
550
+ _supports_gradient_checkpointing = True
551
+
552
+ def __init__(
553
+ self,
554
+ in_channels: int,
555
+ temb_channels: int,
556
+ dropout: float = 0.0,
557
+ num_layers: int = 1,
558
+ resnet_eps: float = 1e-6,
559
+ resnet_act_fn: str = "swish",
560
+ resnet_groups: int = 32,
561
+ spatial_norm_dim: Optional[int] = None,
562
+ pad_mode: str = "first",
563
+ ):
564
+ super().__init__()
565
+
566
+ resnets = []
567
+ for _ in range(num_layers):
568
+ resnets.append(
569
+ CogVideoXResnetBlock3D(
570
+ in_channels=in_channels,
571
+ out_channels=in_channels,
572
+ dropout=dropout,
573
+ temb_channels=temb_channels,
574
+ groups=resnet_groups,
575
+ eps=resnet_eps,
576
+ spatial_norm_dim=spatial_norm_dim,
577
+ non_linearity=resnet_act_fn,
578
+ pad_mode=pad_mode,
579
+ )
580
+ )
581
+ self.resnets = nn.ModuleList(resnets)
582
+
583
+ self.gradient_checkpointing = False
584
+
585
+ def forward(
586
+ self,
587
+ hidden_states: torch.Tensor,
588
+ temb: Optional[torch.Tensor] = None,
589
+ zq: Optional[torch.Tensor] = None,
590
+ conv_cache: Optional[Dict[str, torch.Tensor]] = None,
591
+ ) -> torch.Tensor:
592
+ r"""Forward method of the `CogVideoXMidBlock3D` class."""
593
+
594
+ new_conv_cache = {}
595
+ conv_cache = conv_cache or {}
596
+
597
+ for i, resnet in enumerate(self.resnets):
598
+ conv_cache_key = f"resnet_{i}"
599
+
600
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
601
+
602
+ def create_custom_forward(module):
603
+ def create_forward(*inputs):
604
+ return module(*inputs)
605
+
606
+ return create_forward
607
+
608
+ hidden_states, new_conv_cache[conv_cache_key] = torch.utils.checkpoint.checkpoint(
609
+ create_custom_forward(resnet), hidden_states, temb, zq, conv_cache.get(conv_cache_key)
610
+ )
611
+ else:
612
+ hidden_states, new_conv_cache[conv_cache_key] = resnet(
613
+ hidden_states, temb, zq, conv_cache=conv_cache.get(conv_cache_key)
614
+ )
615
+
616
+ return hidden_states, new_conv_cache
617
+
618
+
619
+ class CogVideoXUpBlock3D(nn.Module):
620
+ r"""
621
+ An upsampling block used in the CogVideoX model.
622
+
623
+ Args:
624
+ in_channels (`int`):
625
+ Number of input channels.
626
+ out_channels (`int`, *optional*):
627
+ Number of output channels. If None, defaults to `in_channels`.
628
+ temb_channels (`int`, defaults to `512`):
629
+ Number of time embedding channels.
630
+ dropout (`float`, defaults to `0.0`):
631
+ Dropout rate.
632
+ num_layers (`int`, defaults to `1`):
633
+ Number of resnet layers.
634
+ resnet_eps (`float`, defaults to `1e-6`):
635
+ Epsilon value for normalization layers.
636
+ resnet_act_fn (`str`, defaults to `"swish"`):
637
+ Activation function to use.
638
+ resnet_groups (`int`, defaults to `32`):
639
+ Number of groups to separate the channels into for group normalization.
640
+ spatial_norm_dim (`int`, defaults to `16`):
641
+ The dimension to use for spatial norm if it is to be used instead of group norm.
642
+ add_upsample (`bool`, defaults to `True`):
643
+ Whether or not to use a upsampling layer. If not used, output dimension would be same as input dimension.
644
+ compress_time (`bool`, defaults to `False`):
645
+ Whether or not to downsample across temporal dimension.
646
+ pad_mode (str, defaults to `"first"`):
647
+ Padding mode.
648
+ """
649
+
650
+ def __init__(
651
+ self,
652
+ in_channels: int,
653
+ out_channels: int,
654
+ temb_channels: int,
655
+ dropout: float = 0.0,
656
+ num_layers: int = 1,
657
+ resnet_eps: float = 1e-6,
658
+ resnet_act_fn: str = "swish",
659
+ resnet_groups: int = 32,
660
+ spatial_norm_dim: int = 16,
661
+ add_upsample: bool = True,
662
+ upsample_padding: int = 1,
663
+ compress_time: bool = False,
664
+ pad_mode: str = "first",
665
+ ):
666
+ super().__init__()
667
+
668
+ resnets = []
669
+ for i in range(num_layers):
670
+ in_channel = in_channels if i == 0 else out_channels
671
+ resnets.append(
672
+ CogVideoXResnetBlock3D(
673
+ in_channels=in_channel,
674
+ out_channels=out_channels,
675
+ dropout=dropout,
676
+ temb_channels=temb_channels,
677
+ groups=resnet_groups,
678
+ eps=resnet_eps,
679
+ non_linearity=resnet_act_fn,
680
+ spatial_norm_dim=spatial_norm_dim,
681
+ pad_mode=pad_mode,
682
+ )
683
+ )
684
+
685
+ self.resnets = nn.ModuleList(resnets)
686
+ self.upsamplers = None
687
+
688
+ if add_upsample:
689
+ self.upsamplers = nn.ModuleList(
690
+ [
691
+ CogVideoXUpsample3D(
692
+ out_channels, out_channels, padding=upsample_padding, compress_time=compress_time
693
+ )
694
+ ]
695
+ )
696
+
697
+ self.gradient_checkpointing = False
698
+
699
+ def forward(
700
+ self,
701
+ hidden_states: torch.Tensor,
702
+ temb: Optional[torch.Tensor] = None,
703
+ zq: Optional[torch.Tensor] = None,
704
+ conv_cache: Optional[Dict[str, torch.Tensor]] = None,
705
+ ) -> torch.Tensor:
706
+ r"""Forward method of the `CogVideoXUpBlock3D` class."""
707
+
708
+ new_conv_cache = {}
709
+ conv_cache = conv_cache or {}
710
+
711
+ for i, resnet in enumerate(self.resnets):
712
+ conv_cache_key = f"resnet_{i}"
713
+
714
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
715
+
716
+ def create_custom_forward(module):
717
+ def create_forward(*inputs):
718
+ return module(*inputs)
719
+
720
+ return create_forward
721
+
722
+ hidden_states, new_conv_cache[conv_cache_key] = torch.utils.checkpoint.checkpoint(
723
+ create_custom_forward(resnet),
724
+ hidden_states,
725
+ temb,
726
+ zq,
727
+ conv_cache.get(conv_cache_key),
728
+ )
729
+ else:
730
+ hidden_states, new_conv_cache[conv_cache_key] = resnet(
731
+ hidden_states, temb, zq, conv_cache=conv_cache.get(conv_cache_key)
732
+ )
733
+
734
+ if self.upsamplers is not None:
735
+ for upsampler in self.upsamplers:
736
+ hidden_states = upsampler(hidden_states)
737
+
738
+ return hidden_states, new_conv_cache
739
+
740
+
741
+ class CogVideoXEncoder3D(nn.Module):
742
+ r"""
743
+ The `CogVideoXEncoder3D` layer of a variational autoencoder that encodes its input into a latent representation.
744
+
745
+ Args:
746
+ in_channels (`int`, *optional*, defaults to 3):
747
+ The number of input channels.
748
+ out_channels (`int`, *optional*, defaults to 3):
749
+ The number of output channels.
750
+ down_block_types (`Tuple[str, ...]`, *optional*, defaults to `("DownEncoderBlock2D",)`):
751
+ The types of down blocks to use. See `~diffusers.models.unet_2d_blocks.get_down_block` for available
752
+ options.
753
+ block_out_channels (`Tuple[int, ...]`, *optional*, defaults to `(64,)`):
754
+ The number of output channels for each block.
755
+ act_fn (`str`, *optional*, defaults to `"silu"`):
756
+ The activation function to use. See `~diffusers.models.activations.get_activation` for available options.
757
+ layers_per_block (`int`, *optional*, defaults to 2):
758
+ The number of layers per block.
759
+ norm_num_groups (`int`, *optional*, defaults to 32):
760
+ The number of groups for normalization.
761
+ """
762
+
763
+ _supports_gradient_checkpointing = True
764
+
765
+ def __init__(
766
+ self,
767
+ in_channels: int = 3,
768
+ out_channels: int = 16,
769
+ down_block_types: Tuple[str, ...] = (
770
+ "CogVideoXDownBlock3D",
771
+ "CogVideoXDownBlock3D",
772
+ "CogVideoXDownBlock3D",
773
+ "CogVideoXDownBlock3D",
774
+ ),
775
+ block_out_channels: Tuple[int, ...] = (128, 256, 256, 512),
776
+ layers_per_block: int = 3,
777
+ act_fn: str = "silu",
778
+ norm_eps: float = 1e-6,
779
+ norm_num_groups: int = 32,
780
+ dropout: float = 0.0,
781
+ pad_mode: str = "first",
782
+ temporal_compression_ratio: float = 4,
783
+ ):
784
+ super().__init__()
785
+
786
+ # log2 of temporal_compress_times
787
+ temporal_compress_level = int(np.log2(temporal_compression_ratio))
788
+
789
+ self.conv_in = CogVideoXCausalConv3d(in_channels, block_out_channels[0], kernel_size=3, pad_mode=pad_mode)
790
+ self.down_blocks = nn.ModuleList([])
791
+
792
+ # down blocks
793
+ output_channel = block_out_channels[0]
794
+ for i, down_block_type in enumerate(down_block_types):
795
+ input_channel = output_channel
796
+ output_channel = block_out_channels[i]
797
+ is_final_block = i == len(block_out_channels) - 1
798
+ compress_time = i < temporal_compress_level
799
+
800
+ if down_block_type == "CogVideoXDownBlock3D":
801
+ down_block = CogVideoXDownBlock3D(
802
+ in_channels=input_channel,
803
+ out_channels=output_channel,
804
+ temb_channels=0,
805
+ dropout=dropout,
806
+ num_layers=layers_per_block,
807
+ resnet_eps=norm_eps,
808
+ resnet_act_fn=act_fn,
809
+ resnet_groups=norm_num_groups,
810
+ add_downsample=not is_final_block,
811
+ compress_time=compress_time,
812
+ )
813
+ else:
814
+ raise ValueError("Invalid `down_block_type` encountered. Must be `CogVideoXDownBlock3D`")
815
+
816
+ self.down_blocks.append(down_block)
817
+
818
+ # mid block
819
+ self.mid_block = CogVideoXMidBlock3D(
820
+ in_channels=block_out_channels[-1],
821
+ temb_channels=0,
822
+ dropout=dropout,
823
+ num_layers=2,
824
+ resnet_eps=norm_eps,
825
+ resnet_act_fn=act_fn,
826
+ resnet_groups=norm_num_groups,
827
+ pad_mode=pad_mode,
828
+ )
829
+
830
+ self.norm_out = nn.GroupNorm(norm_num_groups, block_out_channels[-1], eps=1e-6)
831
+ self.conv_act = nn.SiLU()
832
+ self.conv_out = CogVideoXCausalConv3d(
833
+ block_out_channels[-1], 2 * out_channels, kernel_size=3, pad_mode=pad_mode
834
+ )
835
+
836
+ self.gradient_checkpointing = False
837
+
838
+ def forward(
839
+ self,
840
+ sample: torch.Tensor,
841
+ temb: Optional[torch.Tensor] = None,
842
+ conv_cache: Optional[Dict[str, torch.Tensor]] = None,
843
+ ) -> torch.Tensor:
844
+ r"""The forward method of the `CogVideoXEncoder3D` class."""
845
+
846
+ new_conv_cache = {}
847
+ conv_cache = conv_cache or {}
848
+
849
+ hidden_states, new_conv_cache["conv_in"] = self.conv_in(sample, conv_cache=conv_cache.get("conv_in"))
850
+
851
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
852
+
853
+ def create_custom_forward(module):
854
+ def custom_forward(*inputs):
855
+ return module(*inputs)
856
+
857
+ return custom_forward
858
+
859
+ # 1. Down
860
+ for i, down_block in enumerate(self.down_blocks):
861
+ conv_cache_key = f"down_block_{i}"
862
+ hidden_states, new_conv_cache[conv_cache_key] = torch.utils.checkpoint.checkpoint(
863
+ create_custom_forward(down_block),
864
+ hidden_states,
865
+ temb,
866
+ None,
867
+ conv_cache.get(conv_cache_key),
868
+ )
869
+
870
+ # 2. Mid
871
+ hidden_states, new_conv_cache["mid_block"] = torch.utils.checkpoint.checkpoint(
872
+ create_custom_forward(self.mid_block),
873
+ hidden_states,
874
+ temb,
875
+ None,
876
+ conv_cache.get("mid_block"),
877
+ )
878
+ else:
879
+ # 1. Down
880
+ for i, down_block in enumerate(self.down_blocks):
881
+ conv_cache_key = f"down_block_{i}"
882
+ hidden_states, new_conv_cache[conv_cache_key] = down_block(
883
+ hidden_states, temb, None, conv_cache=conv_cache.get(conv_cache_key)
884
+ )
885
+
886
+ # 2. Mid
887
+ hidden_states, new_conv_cache["mid_block"] = self.mid_block(
888
+ hidden_states, temb, None, conv_cache=conv_cache.get("mid_block")
889
+ )
890
+
891
+ # 3. Post-process
892
+ hidden_states = self.norm_out(hidden_states)
893
+ hidden_states = self.conv_act(hidden_states)
894
+
895
+ hidden_states, new_conv_cache["conv_out"] = self.conv_out(hidden_states, conv_cache=conv_cache.get("conv_out"))
896
+
897
+ return hidden_states, new_conv_cache
898
+
899
+
900
+ class CogVideoXDecoder3D(nn.Module):
901
+ r"""
902
+ The `CogVideoXDecoder3D` layer of a variational autoencoder that decodes its latent representation into an output
903
+ sample.
904
+
905
+ Args:
906
+ in_channels (`int`, *optional*, defaults to 3):
907
+ The number of input channels.
908
+ out_channels (`int`, *optional*, defaults to 3):
909
+ The number of output channels.
910
+ up_block_types (`Tuple[str, ...]`, *optional*, defaults to `("UpDecoderBlock2D",)`):
911
+ The types of up blocks to use. See `~diffusers.models.unet_2d_blocks.get_up_block` for available options.
912
+ block_out_channels (`Tuple[int, ...]`, *optional*, defaults to `(64,)`):
913
+ The number of output channels for each block.
914
+ act_fn (`str`, *optional*, defaults to `"silu"`):
915
+ The activation function to use. See `~diffusers.models.activations.get_activation` for available options.
916
+ layers_per_block (`int`, *optional*, defaults to 2):
917
+ The number of layers per block.
918
+ norm_num_groups (`int`, *optional*, defaults to 32):
919
+ The number of groups for normalization.
920
+ """
921
+
922
+ _supports_gradient_checkpointing = True
923
+
924
+ def __init__(
925
+ self,
926
+ in_channels: int = 16,
927
+ out_channels: int = 3,
928
+ up_block_types: Tuple[str, ...] = (
929
+ "CogVideoXUpBlock3D",
930
+ "CogVideoXUpBlock3D",
931
+ "CogVideoXUpBlock3D",
932
+ "CogVideoXUpBlock3D",
933
+ ),
934
+ block_out_channels: Tuple[int, ...] = (128, 256, 256, 512),
935
+ layers_per_block: int = 3,
936
+ act_fn: str = "silu",
937
+ norm_eps: float = 1e-6,
938
+ norm_num_groups: int = 32,
939
+ dropout: float = 0.0,
940
+ pad_mode: str = "first",
941
+ temporal_compression_ratio: float = 4,
942
+ ):
943
+ super().__init__()
944
+
945
+ reversed_block_out_channels = list(reversed(block_out_channels))
946
+
947
+ self.conv_in = CogVideoXCausalConv3d(
948
+ in_channels, reversed_block_out_channels[0], kernel_size=3, pad_mode=pad_mode
949
+ )
950
+
951
+ # mid block
952
+ self.mid_block = CogVideoXMidBlock3D(
953
+ in_channels=reversed_block_out_channels[0],
954
+ temb_channels=0,
955
+ num_layers=2,
956
+ resnet_eps=norm_eps,
957
+ resnet_act_fn=act_fn,
958
+ resnet_groups=norm_num_groups,
959
+ spatial_norm_dim=in_channels,
960
+ pad_mode=pad_mode,
961
+ )
962
+
963
+ # up blocks
964
+ self.up_blocks = nn.ModuleList([])
965
+
966
+ output_channel = reversed_block_out_channels[0]
967
+ temporal_compress_level = int(np.log2(temporal_compression_ratio))
968
+
969
+ for i, up_block_type in enumerate(up_block_types):
970
+ prev_output_channel = output_channel
971
+ output_channel = reversed_block_out_channels[i]
972
+ is_final_block = i == len(block_out_channels) - 1
973
+ compress_time = i < temporal_compress_level
974
+
975
+ if up_block_type == "CogVideoXUpBlock3D":
976
+ up_block = CogVideoXUpBlock3D(
977
+ in_channels=prev_output_channel,
978
+ out_channels=output_channel,
979
+ temb_channels=0,
980
+ dropout=dropout,
981
+ num_layers=layers_per_block + 1,
982
+ resnet_eps=norm_eps,
983
+ resnet_act_fn=act_fn,
984
+ resnet_groups=norm_num_groups,
985
+ spatial_norm_dim=in_channels,
986
+ add_upsample=not is_final_block,
987
+ compress_time=compress_time,
988
+ pad_mode=pad_mode,
989
+ )
990
+ prev_output_channel = output_channel
991
+ else:
992
+ raise ValueError("Invalid `up_block_type` encountered. Must be `CogVideoXUpBlock3D`")
993
+
994
+ self.up_blocks.append(up_block)
995
+
996
+ self.norm_out = CogVideoXSpatialNorm3D(reversed_block_out_channels[-1], in_channels, groups=norm_num_groups)
997
+ self.conv_act = nn.SiLU()
998
+ self.conv_out = CogVideoXCausalConv3d(
999
+ reversed_block_out_channels[-1], out_channels, kernel_size=3, pad_mode=pad_mode
1000
+ )
1001
+
1002
+ self.gradient_checkpointing = False
1003
+
1004
+ def forward(
1005
+ self,
1006
+ sample: torch.Tensor,
1007
+ temb: Optional[torch.Tensor] = None,
1008
+ conv_cache: Optional[Dict[str, torch.Tensor]] = None,
1009
+ ) -> torch.Tensor:
1010
+ r"""The forward method of the `CogVideoXDecoder3D` class."""
1011
+
1012
+ new_conv_cache = {}
1013
+ conv_cache = conv_cache or {}
1014
+
1015
+ hidden_states, new_conv_cache["conv_in"] = self.conv_in(sample, conv_cache=conv_cache.get("conv_in"))
1016
+
1017
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
1018
+
1019
+ def create_custom_forward(module):
1020
+ def custom_forward(*inputs):
1021
+ return module(*inputs)
1022
+
1023
+ return custom_forward
1024
+
1025
+ # 1. Mid
1026
+ hidden_states, new_conv_cache["mid_block"] = torch.utils.checkpoint.checkpoint(
1027
+ create_custom_forward(self.mid_block),
1028
+ hidden_states,
1029
+ temb,
1030
+ sample,
1031
+ conv_cache.get("mid_block"),
1032
+ )
1033
+
1034
+ # 2. Up
1035
+ for i, up_block in enumerate(self.up_blocks):
1036
+ conv_cache_key = f"up_block_{i}"
1037
+ hidden_states, new_conv_cache[conv_cache_key] = torch.utils.checkpoint.checkpoint(
1038
+ create_custom_forward(up_block),
1039
+ hidden_states,
1040
+ temb,
1041
+ sample,
1042
+ conv_cache.get(conv_cache_key),
1043
+ )
1044
+ else:
1045
+ # 1. Mid
1046
+ hidden_states, new_conv_cache["mid_block"] = self.mid_block(
1047
+ hidden_states, temb, sample, conv_cache=conv_cache.get("mid_block")
1048
+ )
1049
+
1050
+ # 2. Up
1051
+ for i, up_block in enumerate(self.up_blocks):
1052
+ conv_cache_key = f"up_block_{i}"
1053
+ hidden_states, new_conv_cache[conv_cache_key] = up_block(
1054
+ hidden_states, temb, sample, conv_cache=conv_cache.get(conv_cache_key)
1055
+ )
1056
+
1057
+ # 3. Post-process
1058
+ hidden_states, new_conv_cache["norm_out"] = self.norm_out(
1059
+ hidden_states, sample, conv_cache=conv_cache.get("norm_out")
1060
+ )
1061
+ hidden_states = self.conv_act(hidden_states)
1062
+ hidden_states, new_conv_cache["conv_out"] = self.conv_out(hidden_states, conv_cache=conv_cache.get("conv_out"))
1063
+
1064
+ return hidden_states, new_conv_cache
1065
+
1066
+
1067
+ class AutoencoderKLCogVideoX(ModelMixin, ConfigMixin, FromOriginalModelMixin):
1068
+ r"""
1069
+ A VAE model with KL loss for encoding images into latents and decoding latent representations into images. Used in
1070
+ [CogVideoX](https://github.com/THUDM/CogVideo).
1071
+
1072
+ This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
1073
+ for all models (such as downloading or saving).
1074
+
1075
+ Parameters:
1076
+ in_channels (int, *optional*, defaults to 3): Number of channels in the input image.
1077
+ out_channels (int, *optional*, defaults to 3): Number of channels in the output.
1078
+ down_block_types (`Tuple[str]`, *optional*, defaults to `("DownEncoderBlock2D",)`):
1079
+ Tuple of downsample block types.
1080
+ up_block_types (`Tuple[str]`, *optional*, defaults to `("UpDecoderBlock2D",)`):
1081
+ Tuple of upsample block types.
1082
+ block_out_channels (`Tuple[int]`, *optional*, defaults to `(64,)`):
1083
+ Tuple of block output channels.
1084
+ act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
1085
+ sample_size (`int`, *optional*, defaults to `32`): Sample input size.
1086
+ scaling_factor (`float`, *optional*, defaults to `1.15258426`):
1087
+ The component-wise standard deviation of the trained latent space computed using the first batch of the
1088
+ training set. This is used to scale the latent space to have unit variance when training the diffusion
1089
+ model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the
1090
+ diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1
1091
+ / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image
1092
+ Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper.
1093
+ force_upcast (`bool`, *optional*, default to `True`):
1094
+ If enabled it will force the VAE to run in float32 for high image resolution pipelines, such as SD-XL. VAE
1095
+ can be fine-tuned / trained to a lower range without loosing too much precision in which case
1096
+ `force_upcast` can be set to `False` - see: https://huggingface.co/madebyollin/sdxl-vae-fp16-fix
1097
+ """
1098
+
1099
+ _supports_gradient_checkpointing = True
1100
+ _no_split_modules = ["CogVideoXResnetBlock3D"]
1101
+
1102
+ @register_to_config
1103
+ def __init__(
1104
+ self,
1105
+ in_channels: int = 3,
1106
+ out_channels: int = 3,
1107
+ down_block_types: Tuple[str] = (
1108
+ "CogVideoXDownBlock3D",
1109
+ "CogVideoXDownBlock3D",
1110
+ "CogVideoXDownBlock3D",
1111
+ "CogVideoXDownBlock3D",
1112
+ ),
1113
+ up_block_types: Tuple[str] = (
1114
+ "CogVideoXUpBlock3D",
1115
+ "CogVideoXUpBlock3D",
1116
+ "CogVideoXUpBlock3D",
1117
+ "CogVideoXUpBlock3D",
1118
+ ),
1119
+ block_out_channels: Tuple[int] = (128, 256, 256, 512),
1120
+ latent_channels: int = 16,
1121
+ layers_per_block: int = 3,
1122
+ act_fn: str = "silu",
1123
+ norm_eps: float = 1e-6,
1124
+ norm_num_groups: int = 32,
1125
+ temporal_compression_ratio: float = 4,
1126
+ sample_height: int = 480,
1127
+ sample_width: int = 720,
1128
+ scaling_factor: float = 1.15258426,
1129
+ shift_factor: Optional[float] = None,
1130
+ latents_mean: Optional[Tuple[float]] = None,
1131
+ latents_std: Optional[Tuple[float]] = None,
1132
+ force_upcast: float = True,
1133
+ use_quant_conv: bool = False,
1134
+ use_post_quant_conv: bool = False,
1135
+ invert_scale_latents: bool = False,
1136
+ ):
1137
+ super().__init__()
1138
+
1139
+ self.encoder = CogVideoXEncoder3D(
1140
+ in_channels=in_channels,
1141
+ out_channels=latent_channels,
1142
+ down_block_types=down_block_types,
1143
+ block_out_channels=block_out_channels,
1144
+ layers_per_block=layers_per_block,
1145
+ act_fn=act_fn,
1146
+ norm_eps=norm_eps,
1147
+ norm_num_groups=norm_num_groups,
1148
+ temporal_compression_ratio=temporal_compression_ratio,
1149
+ )
1150
+ self.decoder = CogVideoXDecoder3D(
1151
+ in_channels=latent_channels,
1152
+ out_channels=out_channels,
1153
+ up_block_types=up_block_types,
1154
+ block_out_channels=block_out_channels,
1155
+ layers_per_block=layers_per_block,
1156
+ act_fn=act_fn,
1157
+ norm_eps=norm_eps,
1158
+ norm_num_groups=norm_num_groups,
1159
+ temporal_compression_ratio=temporal_compression_ratio,
1160
+ )
1161
+ self.quant_conv = CogVideoXSafeConv3d(2 * out_channels, 2 * out_channels, 1) if use_quant_conv else None
1162
+ self.post_quant_conv = CogVideoXSafeConv3d(out_channels, out_channels, 1) if use_post_quant_conv else None
1163
+
1164
+ self.use_slicing = False
1165
+ self.use_tiling = False
1166
+ self.auto_split_process = False
1167
+
1168
+ # Can be increased to decode more latent frames at once, but comes at a reasonable memory cost and it is not
1169
+ # recommended because the temporal parts of the VAE, here, are tricky to understand.
1170
+ # If you decode X latent frames together, the number of output frames is:
1171
+ # (X + (2 conv cache) + (2 time upscale_1) + (4 time upscale_2) - (2 causal conv downscale)) => X + 6 frames
1172
+ #
1173
+ # Example with num_latent_frames_batch_size = 2:
1174
+ # - 12 latent frames: (0, 1), (2, 3), (4, 5), (6, 7), (8, 9), (10, 11) are processed together
1175
+ # => (12 // 2 frame slices) * ((2 num_latent_frames_batch_size) + (2 conv cache) + (2 time upscale_1) + (4 time upscale_2) - (2 causal conv downscale))
1176
+ # => 6 * 8 = 48 frames
1177
+ # - 13 latent frames: (0, 1, 2) (special case), (3, 4), (5, 6), (7, 8), (9, 10), (11, 12) are processed together
1178
+ # => (1 frame slice) * ((3 num_latent_frames_batch_size) + (2 conv cache) + (2 time upscale_1) + (4 time upscale_2) - (2 causal conv downscale)) +
1179
+ # ((13 - 3) // 2) * ((2 num_latent_frames_batch_size) + (2 conv cache) + (2 time upscale_1) + (4 time upscale_2) - (2 causal conv downscale))
1180
+ # => 1 * 9 + 5 * 8 = 49 frames
1181
+ # It has been implemented this way so as to not have "magic values" in the code base that would be hard to explain. Note that
1182
+ # setting it to anything other than 2 would give poor results because the VAE hasn't been trained to be adaptive with different
1183
+ # number of temporal frames.
1184
+ self.num_latent_frames_batch_size = 2
1185
+ self.num_sample_frames_batch_size = 8
1186
+
1187
+ # We make the minimum height and width of sample for tiling half that of the generally supported
1188
+ self.tile_sample_min_height = sample_height // 2
1189
+ self.tile_sample_min_width = sample_width // 2
1190
+ self.tile_latent_min_height = int(
1191
+ self.tile_sample_min_height / (2 ** (len(self.config.block_out_channels) - 1))
1192
+ )
1193
+ self.tile_latent_min_width = int(self.tile_sample_min_width / (2 ** (len(self.config.block_out_channels) - 1)))
1194
+
1195
+ # These are experimental overlap factors that were chosen based on experimentation and seem to work best for
1196
+ # 720x480 (WxH) resolution. The above resolution is the strongly recommended generation resolution in CogVideoX
1197
+ # and so the tiling implementation has only been tested on those specific resolutions.
1198
+ self.tile_overlap_factor_height = 1 / 6
1199
+ self.tile_overlap_factor_width = 1 / 5
1200
+
1201
+ def _set_gradient_checkpointing(self, module, value=False):
1202
+ if isinstance(module, (CogVideoXEncoder3D, CogVideoXDecoder3D)):
1203
+ module.gradient_checkpointing = value
1204
+
1205
+ def enable_tiling(
1206
+ self,
1207
+ tile_sample_min_height: Optional[int] = None,
1208
+ tile_sample_min_width: Optional[int] = None,
1209
+ tile_overlap_factor_height: Optional[float] = None,
1210
+ tile_overlap_factor_width: Optional[float] = None,
1211
+ ) -> None:
1212
+ r"""
1213
+ Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
1214
+ compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
1215
+ processing larger images.
1216
+
1217
+ Args:
1218
+ tile_sample_min_height (`int`, *optional*):
1219
+ The minimum height required for a sample to be separated into tiles across the height dimension.
1220
+ tile_sample_min_width (`int`, *optional*):
1221
+ The minimum width required for a sample to be separated into tiles across the width dimension.
1222
+ tile_overlap_factor_height (`int`, *optional*):
1223
+ The minimum amount of overlap between two consecutive vertical tiles. This is to ensure that there are
1224
+ no tiling artifacts produced across the height dimension. Must be between 0 and 1. Setting a higher
1225
+ value might cause more tiles to be processed leading to slow down of the decoding process.
1226
+ tile_overlap_factor_width (`int`, *optional*):
1227
+ The minimum amount of overlap between two consecutive horizontal tiles. This is to ensure that there
1228
+ are no tiling artifacts produced across the width dimension. Must be between 0 and 1. Setting a higher
1229
+ value might cause more tiles to be processed leading to slow down of the decoding process.
1230
+ """
1231
+ self.use_tiling = True
1232
+ self.tile_sample_min_height = tile_sample_min_height or self.tile_sample_min_height
1233
+ self.tile_sample_min_width = tile_sample_min_width or self.tile_sample_min_width
1234
+ self.tile_latent_min_height = int(
1235
+ self.tile_sample_min_height / (2 ** (len(self.config.block_out_channels) - 1))
1236
+ )
1237
+ self.tile_latent_min_width = int(self.tile_sample_min_width / (2 ** (len(self.config.block_out_channels) - 1)))
1238
+ self.tile_overlap_factor_height = tile_overlap_factor_height or self.tile_overlap_factor_height
1239
+ self.tile_overlap_factor_width = tile_overlap_factor_width or self.tile_overlap_factor_width
1240
+
1241
+ def disable_tiling(self) -> None:
1242
+ r"""
1243
+ Disable tiled VAE decoding. If `enable_tiling` was previously enabled, this method will go back to computing
1244
+ decoding in one step.
1245
+ """
1246
+ self.use_tiling = False
1247
+
1248
+ def enable_slicing(self) -> None:
1249
+ r"""
1250
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
1251
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
1252
+ """
1253
+ self.use_slicing = True
1254
+
1255
+ def disable_slicing(self) -> None:
1256
+ r"""
1257
+ Disable sliced VAE decoding. If `enable_slicing` was previously enabled, this method will go back to computing
1258
+ decoding in one step.
1259
+ """
1260
+ self.use_slicing = False
1261
+
1262
+ def _set_first_frame(self):
1263
+ for name, module in self.named_modules():
1264
+ if isinstance(module, CogVideoXUpsample3D):
1265
+ module.auto_split_process = False
1266
+ module.first_frame_flag = True
1267
+
1268
+ def _set_rest_frame(self):
1269
+ for name, module in self.named_modules():
1270
+ if isinstance(module, CogVideoXUpsample3D):
1271
+ module.auto_split_process = False
1272
+ module.first_frame_flag = False
1273
+
1274
+ def enable_auto_split_process(self) -> None:
1275
+ self.auto_split_process = True
1276
+ for name, module in self.named_modules():
1277
+ if isinstance(module, CogVideoXUpsample3D):
1278
+ module.auto_split_process = True
1279
+
1280
+ def disable_auto_split_process(self) -> None:
1281
+ self.auto_split_process = False
1282
+
1283
+ def _encode(self, x: torch.Tensor) -> torch.Tensor:
1284
+ batch_size, num_channels, num_frames, height, width = x.shape
1285
+
1286
+ if self.use_tiling and (width > self.tile_sample_min_width or height > self.tile_sample_min_height):
1287
+ return self.tiled_encode(x)
1288
+
1289
+ frame_batch_size = self.num_sample_frames_batch_size
1290
+ # Note: We expect the number of frames to be either `1` or `frame_batch_size * k` or `frame_batch_size * k + 1` for some k.
1291
+ # As the extra single frame is handled inside the loop, it is not required to round up here.
1292
+ num_batches = max(num_frames // frame_batch_size, 1)
1293
+ conv_cache = None
1294
+ enc = []
1295
+
1296
+ for i in range(num_batches):
1297
+ remaining_frames = num_frames % frame_batch_size
1298
+ start_frame = frame_batch_size * i + (0 if i == 0 else remaining_frames)
1299
+ end_frame = frame_batch_size * (i + 1) + remaining_frames
1300
+ x_intermediate = x[:, :, start_frame:end_frame]
1301
+ x_intermediate, conv_cache = self.encoder(x_intermediate, conv_cache=conv_cache)
1302
+ if self.quant_conv is not None:
1303
+ x_intermediate = self.quant_conv(x_intermediate)
1304
+ enc.append(x_intermediate)
1305
+
1306
+ enc = torch.cat(enc, dim=2)
1307
+ return enc
1308
+
1309
+ @apply_forward_hook
1310
+ def encode(
1311
+ self, x: torch.Tensor, return_dict: bool = True
1312
+ ) -> Union[AutoencoderKLOutput, Tuple[DiagonalGaussianDistribution]]:
1313
+ """
1314
+ Encode a batch of images into latents.
1315
+
1316
+ Args:
1317
+ x (`torch.Tensor`): Input batch of images.
1318
+ return_dict (`bool`, *optional*, defaults to `True`):
1319
+ Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple.
1320
+
1321
+ Returns:
1322
+ The latent representations of the encoded videos. If `return_dict` is True, a
1323
+ [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned.
1324
+ """
1325
+ if self.use_slicing and x.shape[0] > 1:
1326
+ encoded_slices = [self._encode(x_slice) for x_slice in x.split(1)]
1327
+ h = torch.cat(encoded_slices)
1328
+ else:
1329
+ h = self._encode(x)
1330
+
1331
+ posterior = DiagonalGaussianDistribution(h)
1332
+
1333
+ if not return_dict:
1334
+ return (posterior,)
1335
+ return AutoencoderKLOutput(latent_dist=posterior)
1336
+
1337
+ def _decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]:
1338
+ batch_size, num_channels, num_frames, height, width = z.shape
1339
+
1340
+ if self.use_tiling and (width > self.tile_latent_min_width or height > self.tile_latent_min_height):
1341
+ return self.tiled_decode(z, return_dict=return_dict)
1342
+
1343
+ if self.auto_split_process:
1344
+ frame_batch_size = self.num_latent_frames_batch_size
1345
+ num_batches = max(num_frames // frame_batch_size, 1)
1346
+ conv_cache = None
1347
+ dec = []
1348
+
1349
+ for i in range(num_batches):
1350
+ remaining_frames = num_frames % frame_batch_size
1351
+ start_frame = frame_batch_size * i + (0 if i == 0 else remaining_frames)
1352
+ end_frame = frame_batch_size * (i + 1) + remaining_frames
1353
+ z_intermediate = z[:, :, start_frame:end_frame]
1354
+ if self.post_quant_conv is not None:
1355
+ z_intermediate = self.post_quant_conv(z_intermediate)
1356
+ z_intermediate, conv_cache = self.decoder(z_intermediate, conv_cache=conv_cache)
1357
+ dec.append(z_intermediate)
1358
+ else:
1359
+ conv_cache = None
1360
+ start_frame = 0
1361
+ end_frame = 1
1362
+ dec = []
1363
+
1364
+ self._set_first_frame()
1365
+ z_intermediate = z[:, :, start_frame:end_frame]
1366
+ if self.post_quant_conv is not None:
1367
+ z_intermediate = self.post_quant_conv(z_intermediate)
1368
+ z_intermediate, conv_cache = self.decoder(z_intermediate, conv_cache=conv_cache)
1369
+ dec.append(z_intermediate)
1370
+
1371
+ self._set_rest_frame()
1372
+ start_frame = end_frame
1373
+ end_frame += self.num_latent_frames_batch_size
1374
+
1375
+ while start_frame < num_frames:
1376
+ z_intermediate = z[:, :, start_frame:end_frame]
1377
+ if self.post_quant_conv is not None:
1378
+ z_intermediate = self.post_quant_conv(z_intermediate)
1379
+ z_intermediate, conv_cache = self.decoder(z_intermediate, conv_cache=conv_cache)
1380
+ dec.append(z_intermediate)
1381
+ start_frame = end_frame
1382
+ end_frame += self.num_latent_frames_batch_size
1383
+
1384
+ dec = torch.cat(dec, dim=2)
1385
+
1386
+ if not return_dict:
1387
+ return (dec,)
1388
+
1389
+ return DecoderOutput(sample=dec)
1390
+
1391
+ @apply_forward_hook
1392
+ def decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]:
1393
+ """
1394
+ Decode a batch of images.
1395
+
1396
+ Args:
1397
+ z (`torch.Tensor`): Input batch of latent vectors.
1398
+ return_dict (`bool`, *optional*, defaults to `True`):
1399
+ Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple.
1400
+
1401
+ Returns:
1402
+ [`~models.vae.DecoderOutput`] or `tuple`:
1403
+ If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is
1404
+ returned.
1405
+ """
1406
+ if self.use_slicing and z.shape[0] > 1:
1407
+ decoded_slices = [self._decode(z_slice).sample for z_slice in z.split(1)]
1408
+ decoded = torch.cat(decoded_slices)
1409
+ else:
1410
+ decoded = self._decode(z).sample
1411
+
1412
+ if not return_dict:
1413
+ return (decoded,)
1414
+ return DecoderOutput(sample=decoded)
1415
+
1416
+ def blend_v(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
1417
+ blend_extent = min(a.shape[3], b.shape[3], blend_extent)
1418
+ for y in range(blend_extent):
1419
+ b[:, :, :, y, :] = a[:, :, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, :, y, :] * (
1420
+ y / blend_extent
1421
+ )
1422
+ return b
1423
+
1424
+ def blend_h(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
1425
+ blend_extent = min(a.shape[4], b.shape[4], blend_extent)
1426
+ for x in range(blend_extent):
1427
+ b[:, :, :, :, x] = a[:, :, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, :, x] * (
1428
+ x / blend_extent
1429
+ )
1430
+ return b
1431
+
1432
+ def tiled_encode(self, x: torch.Tensor) -> torch.Tensor:
1433
+ r"""Encode a batch of images using a tiled encoder.
1434
+
1435
+ When this option is enabled, the VAE will split the input tensor into tiles to compute encoding in several
1436
+ steps. This is useful to keep memory use constant regardless of image size. The end result of tiled encoding is
1437
+ different from non-tiled encoding because each tile uses a different encoder. To avoid tiling artifacts, the
1438
+ tiles overlap and are blended together to form a smooth output. You may still see tile-sized changes in the
1439
+ output, but they should be much less noticeable.
1440
+
1441
+ Args:
1442
+ x (`torch.Tensor`): Input batch of videos.
1443
+
1444
+ Returns:
1445
+ `torch.Tensor`:
1446
+ The latent representation of the encoded videos.
1447
+ """
1448
+ # For a rough memory estimate, take a look at the `tiled_decode` method.
1449
+ batch_size, num_channels, num_frames, height, width = x.shape
1450
+
1451
+ overlap_height = int(self.tile_sample_min_height * (1 - self.tile_overlap_factor_height))
1452
+ overlap_width = int(self.tile_sample_min_width * (1 - self.tile_overlap_factor_width))
1453
+ blend_extent_height = int(self.tile_latent_min_height * self.tile_overlap_factor_height)
1454
+ blend_extent_width = int(self.tile_latent_min_width * self.tile_overlap_factor_width)
1455
+ row_limit_height = self.tile_latent_min_height - blend_extent_height
1456
+ row_limit_width = self.tile_latent_min_width - blend_extent_width
1457
+ frame_batch_size = self.num_sample_frames_batch_size
1458
+
1459
+ # Split x into overlapping tiles and encode them separately.
1460
+ # The tiles have an overlap to avoid seams between tiles.
1461
+ rows = []
1462
+ for i in range(0, height, overlap_height):
1463
+ row = []
1464
+ for j in range(0, width, overlap_width):
1465
+ # Note: We expect the number of frames to be either `1` or `frame_batch_size * k` or `frame_batch_size * k + 1` for some k.
1466
+ # As the extra single frame is handled inside the loop, it is not required to round up here.
1467
+ num_batches = max(num_frames // frame_batch_size, 1)
1468
+ conv_cache = None
1469
+ time = []
1470
+
1471
+ for k in range(num_batches):
1472
+ remaining_frames = num_frames % frame_batch_size
1473
+ start_frame = frame_batch_size * k + (0 if k == 0 else remaining_frames)
1474
+ end_frame = frame_batch_size * (k + 1) + remaining_frames
1475
+ tile = x[
1476
+ :,
1477
+ :,
1478
+ start_frame:end_frame,
1479
+ i : i + self.tile_sample_min_height,
1480
+ j : j + self.tile_sample_min_width,
1481
+ ]
1482
+ tile, conv_cache = self.encoder(tile, conv_cache=conv_cache)
1483
+ if self.quant_conv is not None:
1484
+ tile = self.quant_conv(tile)
1485
+ time.append(tile)
1486
+
1487
+ row.append(torch.cat(time, dim=2))
1488
+ rows.append(row)
1489
+
1490
+ result_rows = []
1491
+ for i, row in enumerate(rows):
1492
+ result_row = []
1493
+ for j, tile in enumerate(row):
1494
+ # blend the above tile and the left tile
1495
+ # to the current tile and add the current tile to the result row
1496
+ if i > 0:
1497
+ tile = self.blend_v(rows[i - 1][j], tile, blend_extent_height)
1498
+ if j > 0:
1499
+ tile = self.blend_h(row[j - 1], tile, blend_extent_width)
1500
+ result_row.append(tile[:, :, :, :row_limit_height, :row_limit_width])
1501
+ result_rows.append(torch.cat(result_row, dim=4))
1502
+
1503
+ enc = torch.cat(result_rows, dim=3)
1504
+ return enc
1505
+
1506
+ def tiled_decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]:
1507
+ r"""
1508
+ Decode a batch of images using a tiled decoder.
1509
+
1510
+ Args:
1511
+ z (`torch.Tensor`): Input batch of latent vectors.
1512
+ return_dict (`bool`, *optional*, defaults to `True`):
1513
+ Whether or not to return a [`~models.vae.DecoderOutput`] instead of a plain tuple.
1514
+
1515
+ Returns:
1516
+ [`~models.vae.DecoderOutput`] or `tuple`:
1517
+ If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is
1518
+ returned.
1519
+ """
1520
+ # Rough memory assessment:
1521
+ # - In CogVideoX-2B, there are a total of 24 CausalConv3d layers.
1522
+ # - The biggest intermediate dimensions are: [1, 128, 9, 480, 720].
1523
+ # - Assume fp16 (2 bytes per value).
1524
+ # Memory required: 1 * 128 * 9 * 480 * 720 * 24 * 2 / 1024**3 = 17.8 GB
1525
+ #
1526
+ # Memory assessment when using tiling:
1527
+ # - Assume everything as above but now HxW is 240x360 by tiling in half
1528
+ # Memory required: 1 * 128 * 9 * 240 * 360 * 24 * 2 / 1024**3 = 4.5 GB
1529
+
1530
+ batch_size, num_channels, num_frames, height, width = z.shape
1531
+
1532
+ overlap_height = int(self.tile_latent_min_height * (1 - self.tile_overlap_factor_height))
1533
+ overlap_width = int(self.tile_latent_min_width * (1 - self.tile_overlap_factor_width))
1534
+ blend_extent_height = int(self.tile_sample_min_height * self.tile_overlap_factor_height)
1535
+ blend_extent_width = int(self.tile_sample_min_width * self.tile_overlap_factor_width)
1536
+ row_limit_height = self.tile_sample_min_height - blend_extent_height
1537
+ row_limit_width = self.tile_sample_min_width - blend_extent_width
1538
+ frame_batch_size = self.num_latent_frames_batch_size
1539
+
1540
+ # Split z into overlapping tiles and decode them separately.
1541
+ # The tiles have an overlap to avoid seams between tiles.
1542
+ rows = []
1543
+ for i in range(0, height, overlap_height):
1544
+ row = []
1545
+ for j in range(0, width, overlap_width):
1546
+ if self.auto_split_process:
1547
+ num_batches = max(num_frames // frame_batch_size, 1)
1548
+ conv_cache = None
1549
+ time = []
1550
+
1551
+ for k in range(num_batches):
1552
+ remaining_frames = num_frames % frame_batch_size
1553
+ start_frame = frame_batch_size * k + (0 if k == 0 else remaining_frames)
1554
+ end_frame = frame_batch_size * (k + 1) + remaining_frames
1555
+ tile = z[
1556
+ :,
1557
+ :,
1558
+ start_frame:end_frame,
1559
+ i : i + self.tile_latent_min_height,
1560
+ j : j + self.tile_latent_min_width,
1561
+ ]
1562
+ if self.post_quant_conv is not None:
1563
+ tile = self.post_quant_conv(tile)
1564
+ tile, conv_cache = self.decoder(tile, conv_cache=conv_cache)
1565
+ time.append(tile)
1566
+
1567
+ row.append(torch.cat(time, dim=2))
1568
+ else:
1569
+ conv_cache = None
1570
+ start_frame = 0
1571
+ end_frame = 1
1572
+ dec = []
1573
+
1574
+ tile = z[
1575
+ :,
1576
+ :,
1577
+ start_frame:end_frame,
1578
+ i : i + self.tile_latent_min_height,
1579
+ j : j + self.tile_latent_min_width,
1580
+ ]
1581
+
1582
+ self._set_first_frame()
1583
+ if self.post_quant_conv is not None:
1584
+ tile = self.post_quant_conv(tile)
1585
+ tile, conv_cache = self.decoder(tile, conv_cache=conv_cache)
1586
+ dec.append(tile)
1587
+
1588
+ self._set_rest_frame()
1589
+ start_frame = end_frame
1590
+ end_frame += self.num_latent_frames_batch_size
1591
+
1592
+ while start_frame < num_frames:
1593
+ tile = z[
1594
+ :,
1595
+ :,
1596
+ start_frame:end_frame,
1597
+ i : i + self.tile_latent_min_height,
1598
+ j : j + self.tile_latent_min_width,
1599
+ ]
1600
+ if self.post_quant_conv is not None:
1601
+ tile = self.post_quant_conv(tile)
1602
+ tile, conv_cache = self.decoder(tile, conv_cache=conv_cache)
1603
+ dec.append(tile)
1604
+ start_frame = end_frame
1605
+ end_frame += self.num_latent_frames_batch_size
1606
+
1607
+ row.append(torch.cat(dec, dim=2))
1608
+ rows.append(row)
1609
+
1610
+ result_rows = []
1611
+ for i, row in enumerate(rows):
1612
+ result_row = []
1613
+ for j, tile in enumerate(row):
1614
+ # blend the above tile and the left tile
1615
+ # to the current tile and add the current tile to the result row
1616
+ if i > 0:
1617
+ tile = self.blend_v(rows[i - 1][j], tile, blend_extent_height)
1618
+ if j > 0:
1619
+ tile = self.blend_h(row[j - 1], tile, blend_extent_width)
1620
+ result_row.append(tile[:, :, :, :row_limit_height, :row_limit_width])
1621
+ result_rows.append(torch.cat(result_row, dim=4))
1622
+
1623
+ dec = torch.cat(result_rows, dim=3)
1624
+
1625
+ if not return_dict:
1626
+ return (dec,)
1627
+
1628
+ return DecoderOutput(sample=dec)
1629
+
1630
+ def forward(
1631
+ self,
1632
+ sample: torch.Tensor,
1633
+ sample_posterior: bool = False,
1634
+ return_dict: bool = True,
1635
+ generator: Optional[torch.Generator] = None,
1636
+ ) -> Union[torch.Tensor, torch.Tensor]:
1637
+ x = sample
1638
+ posterior = self.encode(x).latent_dist
1639
+ if sample_posterior:
1640
+ z = posterior.sample(generator=generator)
1641
+ else:
1642
+ z = posterior.mode()
1643
+ dec = self.decode(z)
1644
+ if not return_dict:
1645
+ return (dec,)
1646
+ return dec
robomaster/models/transformer3d.py ADDED
@@ -0,0 +1,863 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The CogVideoX team, Tsinghua University & ZhipuAI and The HuggingFace Team.
2
+ # All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from typing import Any, Dict, Optional, Tuple, Union
17
+
18
+ import os
19
+ import json
20
+ import torch
21
+ import glob
22
+ import torch.nn.functional as F
23
+ from torch import nn
24
+
25
+ from diffusers.configuration_utils import ConfigMixin, register_to_config
26
+ from diffusers.models.embeddings import CogVideoXPatchEmbed
27
+ from diffusers.utils import is_torch_version, logging
28
+ from diffusers.utils.torch_utils import maybe_allow_in_graph
29
+ from diffusers.models.attention import Attention, FeedForward
30
+ from diffusers.models.attention_processor import AttentionProcessor, CogVideoXAttnProcessor2_0, FusedCogVideoXAttnProcessor2_0
31
+ from diffusers.models.embeddings import TimestepEmbedding, Timesteps, get_3d_sincos_pos_embed, get_2d_sincos_pos_embed
32
+ from diffusers.models.modeling_outputs import Transformer2DModelOutput
33
+ from diffusers.models.modeling_utils import ModelMixin
34
+ from diffusers.models.normalization import AdaLayerNorm, CogVideoXLayerNormZero
35
+
36
+ from einops import rearrange
37
+
38
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
39
+
40
+ def zero_module(module):
41
+ """
42
+ Zero out the parameters of a module and return it.
43
+ """
44
+ for p in module.parameters():
45
+ p.detach().zero_()
46
+ return module
47
+
48
+ class FloatGroupNorm(nn.GroupNorm):
49
+ def forward(self, x):
50
+ return super().forward(x.to(self.bias.dtype)).type(x.dtype)
51
+
52
+ class CogVideoXPatchEmbed(nn.Module):
53
+ def __init__(
54
+ self,
55
+ patch_size: int = 2,
56
+ patch_size_t: Optional[int] = None,
57
+ in_channels: int = 16,
58
+ embed_dim: int = 1920,
59
+ text_embed_dim: int = 4096,
60
+ bias: bool = True,
61
+ sample_width: int = 90,
62
+ sample_height: int = 60,
63
+ sample_frames: int = 49,
64
+ temporal_compression_ratio: int = 4,
65
+ max_text_seq_length: int = 226,
66
+ spatial_interpolation_scale: float = 1.875,
67
+ temporal_interpolation_scale: float = 1.0,
68
+ use_positional_embeddings: bool = True,
69
+ use_learned_positional_embeddings: bool = True,
70
+ ) -> None:
71
+ super().__init__()
72
+
73
+ post_patch_height = sample_height // patch_size
74
+ post_patch_width = sample_width // patch_size
75
+ post_time_compression_frames = (sample_frames - 1) // temporal_compression_ratio + 1
76
+ self.num_patches = post_patch_height * post_patch_width * post_time_compression_frames
77
+ self.post_patch_height = post_patch_height
78
+ self.post_patch_width = post_patch_width
79
+ self.post_time_compression_frames = post_time_compression_frames
80
+ self.patch_size = patch_size
81
+ self.patch_size_t = patch_size_t
82
+ self.embed_dim = embed_dim
83
+ self.sample_height = sample_height
84
+ self.sample_width = sample_width
85
+ self.sample_frames = sample_frames
86
+ self.temporal_compression_ratio = temporal_compression_ratio
87
+ self.max_text_seq_length = max_text_seq_length
88
+ self.spatial_interpolation_scale = spatial_interpolation_scale
89
+ self.temporal_interpolation_scale = temporal_interpolation_scale
90
+ self.use_positional_embeddings = use_positional_embeddings
91
+ self.use_learned_positional_embeddings = use_learned_positional_embeddings
92
+
93
+ if patch_size_t is None:
94
+ # CogVideoX 1.0 checkpoints
95
+ self.proj = nn.Conv2d(
96
+ in_channels, embed_dim, kernel_size=(patch_size, patch_size), stride=patch_size, bias=bias
97
+ )
98
+ else:
99
+ # CogVideoX 1.5 checkpoints
100
+ self.proj = nn.Linear(in_channels * patch_size * patch_size * patch_size_t, embed_dim)
101
+
102
+ self.text_proj = nn.Linear(text_embed_dim, embed_dim)
103
+
104
+ if use_positional_embeddings or use_learned_positional_embeddings:
105
+ persistent = use_learned_positional_embeddings
106
+ pos_embedding = self._get_positional_embeddings(sample_height, sample_width, sample_frames)
107
+ self.register_buffer("pos_embedding", pos_embedding, persistent=persistent)
108
+
109
+ def _get_positional_embeddings(self, sample_height: int, sample_width: int, sample_frames: int) -> torch.Tensor:
110
+ post_patch_height = sample_height // self.patch_size
111
+ post_patch_width = sample_width // self.patch_size
112
+ post_time_compression_frames = (sample_frames - 1) // self.temporal_compression_ratio + 1
113
+ num_patches = post_patch_height * post_patch_width * post_time_compression_frames
114
+
115
+ pos_embedding = get_3d_sincos_pos_embed(
116
+ self.embed_dim,
117
+ (post_patch_width, post_patch_height),
118
+ post_time_compression_frames,
119
+ self.spatial_interpolation_scale,
120
+ self.temporal_interpolation_scale,
121
+ )
122
+ pos_embedding = torch.from_numpy(pos_embedding).flatten(0, 1)
123
+ joint_pos_embedding = torch.zeros(
124
+ 1, self.max_text_seq_length + num_patches, self.embed_dim, requires_grad=False
125
+ )
126
+ joint_pos_embedding.data[:, self.max_text_seq_length :].copy_(pos_embedding)
127
+
128
+ return joint_pos_embedding
129
+
130
+ def forward(self, text_embeds: torch.Tensor, image_embeds: torch.Tensor, flow_embeds: Optional[torch.Tensor] = None):
131
+ r"""
132
+ Args:
133
+ text_embeds (`torch.Tensor`):
134
+ Input text embeddings. Expected shape: (batch_size, seq_length, embedding_dim).
135
+ image_embeds (`torch.Tensor`):
136
+ Input image embeddings. Expected shape: (batch_size, num_frames, channels, height, width).
137
+ """
138
+ text_embeds = self.text_proj(text_embeds)
139
+
140
+ text_batch_size, text_seq_length, text_channels = text_embeds.shape
141
+ batch_size, num_frames, channels, height, width = image_embeds.shape
142
+
143
+ if self.patch_size_t is None:
144
+ image_embeds = image_embeds.reshape(-1, channels, height, width)
145
+ image_embeds = self.proj(image_embeds)
146
+ image_embeds = image_embeds.view(batch_size, num_frames, *image_embeds.shape[1:])
147
+ image_embeds = image_embeds.flatten(3).transpose(2, 3) # [batch, num_frames, height x width, channels]
148
+ image_embeds = image_embeds.flatten(1, 2) # [batch, num_frames x height x width, channels]
149
+ else:
150
+ p = self.patch_size
151
+ p_t = self.patch_size_t
152
+
153
+ image_embeds = image_embeds.permute(0, 1, 3, 4, 2)
154
+ # b, f, h, w, c => b, f // 2, 2, h // 2, 2, w // 2, 2, c
155
+ image_embeds = image_embeds.reshape(
156
+ batch_size, num_frames // p_t, p_t, height // p, p, width // p, p, channels
157
+ )
158
+ # b, f // 2, 2, h // 2, 2, w // 2, 2, c => b, f // 2, h // 2, w // 2, c, 2, 2, 2
159
+ image_embeds = image_embeds.permute(0, 1, 3, 5, 7, 2, 4, 6).flatten(4, 7).flatten(1, 3)
160
+
161
+ image_embeds = self.proj(image_embeds)
162
+
163
+ embeds = torch.cat(
164
+ [text_embeds, image_embeds], dim=1
165
+ ).contiguous() # [batch, seq_length + num_frames x height x width, channels]
166
+
167
+ if self.use_positional_embeddings or self.use_learned_positional_embeddings:
168
+ seq_length = height * width * num_frames // (self.patch_size**2)
169
+ # pos_embeds = self.pos_embedding[:, : text_seq_length + seq_length]
170
+ pos_embeds = self.pos_embedding
171
+ emb_size = embeds.size()[-1]
172
+ pos_embeds_without_text = pos_embeds[:, text_seq_length: ].view(1, self.post_time_compression_frames, self.post_patch_height, self.post_patch_width, emb_size)
173
+ pos_embeds_without_text = pos_embeds_without_text.permute([0, 4, 1, 2, 3])
174
+ pos_embeds_without_text = F.interpolate(pos_embeds_without_text,size=[self.post_time_compression_frames, height // self.patch_size, width // self.patch_size], mode='trilinear', align_corners=False)
175
+ pos_embeds_without_text = pos_embeds_without_text.permute([0, 2, 3, 4, 1]).view(1, -1, emb_size)
176
+ pos_embeds = torch.cat([pos_embeds[:, :text_seq_length], pos_embeds_without_text], dim = 1)
177
+ pos_embeds = pos_embeds[:, : text_seq_length + seq_length]
178
+ embeds = embeds + pos_embeds
179
+
180
+ if flow_embeds is not None:
181
+
182
+ if self.patch_size_t is not None:
183
+ _, _, flow_channels, _, _ = flow_embeds.shape
184
+
185
+ flow_embeds = flow_embeds.permute(0, 1, 3, 4, 2)
186
+ # b, f, h, w, c => b, f // 2, 2, h // 2, 2, w // 2, 2, c
187
+ flow_embeds = flow_embeds.reshape(
188
+ batch_size, num_frames // p_t, p_t, height // p, p, width // p, p, flow_channels
189
+ )
190
+ # b, f // 2, 2, h // 2, 2, w // 2, 2, c => b, f // 2, h // 2, w // 2, c, 2, 2, 2
191
+ flow_embeds = flow_embeds.permute(0, 1, 3, 5, 7, 2, 4, 6).flatten(4, 7)
192
+
193
+ return embeds, flow_embeds
194
+
195
+
196
+ @maybe_allow_in_graph
197
+ class CogVideoXBlock(nn.Module):
198
+ r"""
199
+ Transformer block used in [CogVideoX](https://github.com/THUDM/CogVideo) model.
200
+
201
+ Parameters:
202
+ dim (`int`):
203
+ The number of channels in the input and output.
204
+ num_attention_heads (`int`):
205
+ The number of heads to use for multi-head attention.
206
+ attention_head_dim (`int`):
207
+ The number of channels in each head.
208
+ time_embed_dim (`int`):
209
+ The number of channels in timestep embedding.
210
+ dropout (`float`, defaults to `0.0`):
211
+ The dropout probability to use.
212
+ activation_fn (`str`, defaults to `"gelu-approximate"`):
213
+ Activation function to be used in feed-forward.
214
+ attention_bias (`bool`, defaults to `False`):
215
+ Whether or not to use bias in attention projection layers.
216
+ qk_norm (`bool`, defaults to `True`):
217
+ Whether or not to use normalization after query and key projections in Attention.
218
+ norm_elementwise_affine (`bool`, defaults to `True`):
219
+ Whether to use learnable elementwise affine parameters for normalization.
220
+ norm_eps (`float`, defaults to `1e-5`):
221
+ Epsilon value for normalization layers.
222
+ final_dropout (`bool` defaults to `False`):
223
+ Whether to apply a final dropout after the last feed-forward layer.
224
+ ff_inner_dim (`int`, *optional*, defaults to `None`):
225
+ Custom hidden dimension of Feed-forward layer. If not provided, `4 * dim` is used.
226
+ ff_bias (`bool`, defaults to `True`):
227
+ Whether or not to use bias in Feed-forward layer.
228
+ attention_out_bias (`bool`, defaults to `True`):
229
+ Whether or not to use bias in Attention output projection layer.
230
+ """
231
+
232
+ def __init__(
233
+ self,
234
+ block_idx: int,
235
+ dim: int,
236
+ num_attention_heads: int,
237
+ attention_head_dim: int,
238
+ time_embed_dim: int,
239
+ block_interval: int = 1,
240
+ flow_in_dim: int = 128,
241
+ out_dim: int = 3072,
242
+ dropout: float = 0.0,
243
+ activation_fn: str = "gelu-approximate",
244
+ attention_bias: bool = False,
245
+ qk_norm: bool = True,
246
+ norm_elementwise_affine: bool = True,
247
+ norm_eps: float = 1e-5,
248
+ final_dropout: bool = True,
249
+ ff_inner_dim: Optional[int] = None,
250
+ ff_bias: bool = True,
251
+ attention_out_bias: bool = True,
252
+ finetune_init: bool = False,
253
+ ):
254
+ super().__init__()
255
+
256
+
257
+ # 1. Self Attention
258
+ self.norm1 = CogVideoXLayerNormZero(time_embed_dim, dim, norm_elementwise_affine, norm_eps, bias=True)
259
+
260
+ self.attn1 = Attention(
261
+ query_dim=dim,
262
+ dim_head=attention_head_dim,
263
+ heads=num_attention_heads,
264
+ qk_norm="layer_norm" if qk_norm else None,
265
+ eps=1e-6,
266
+ bias=attention_bias,
267
+ out_bias=attention_out_bias,
268
+ processor=CogVideoXAttnProcessor2_0(),
269
+ )
270
+
271
+ if not finetune_init and (block_idx%block_interval==0):
272
+ self.flow_spatial = nn.Conv2d(flow_in_dim, out_dim // 4, 3, padding=1)
273
+ self.flow_temporal = zero_module(
274
+ nn.Conv1d(
275
+ out_dim // 4,
276
+ out_dim,
277
+ kernel_size=3,
278
+ stride=1,
279
+ padding=1,
280
+ padding_mode="replicate",
281
+ )
282
+ )
283
+ self.flow_cond_norm = FloatGroupNorm(32, out_dim)
284
+
285
+ # 2. Feed Forward
286
+ self.norm2 = CogVideoXLayerNormZero(time_embed_dim, dim, norm_elementwise_affine, norm_eps, bias=True)
287
+
288
+ self.ff = FeedForward(
289
+ dim,
290
+ dropout=dropout,
291
+ activation_fn=activation_fn,
292
+ final_dropout=final_dropout,
293
+ inner_dim=ff_inner_dim,
294
+ bias=ff_bias,
295
+ )
296
+
297
+ def forward(
298
+ self,
299
+ hidden_states: torch.Tensor,
300
+ encoder_hidden_states: torch.Tensor,
301
+ flow_states: torch.Tensor,
302
+ temb: torch.Tensor,
303
+ image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
304
+ ) -> torch.Tensor:
305
+ text_seq_length = encoder_hidden_states.size(1)
306
+
307
+ # norm & modulate
308
+ norm_hidden_states, norm_encoder_hidden_states, gate_msa, enc_gate_msa = self.norm1(
309
+ hidden_states, encoder_hidden_states, temb
310
+ )
311
+
312
+ # attention
313
+ attn_hidden_states, attn_encoder_hidden_states = self.attn1(
314
+ hidden_states=norm_hidden_states,
315
+ encoder_hidden_states=norm_encoder_hidden_states,
316
+ image_rotary_emb=image_rotary_emb,
317
+ )
318
+
319
+ hidden_states = hidden_states + gate_msa * attn_hidden_states
320
+ encoder_hidden_states = encoder_hidden_states + enc_gate_msa * attn_encoder_hidden_states
321
+
322
+ if hasattr(self, "flow_spatial") and flow_states is not None:
323
+
324
+ bz, f, h, w, c = flow_states.shape
325
+
326
+ flow_states = rearrange(flow_states, "bz f h w c -> (bz f) c h w")
327
+ flow_states = self.flow_spatial(flow_states) # [5, 128, 30, 40] -> [5, 960, 30, 40]
328
+
329
+ flow_states = rearrange(flow_states, "(bz f) c h w -> (bz h w) c f", f=f)
330
+ flow_states = self.flow_temporal(flow_states) # [1200, 960, 5] -> [1200, 1920, 5]
331
+ flow_states = rearrange(flow_states, "(bz h w) c f -> bz (f h w) c", f=f, h=h, w=w)
332
+
333
+ norm_flow_states = self.flow_cond_norm(rearrange(flow_states, "bz (f h w) c -> (bz f) c h w", h=h, w=w))
334
+ norm_flow_states = rearrange(norm_flow_states, "(bz f) c h w -> bz (f h w) c", f=f, h=h, w=w)
335
+
336
+ hidden_states = hidden_states + norm_flow_states * flow_states
337
+
338
+ # norm & modulate
339
+ norm_hidden_states, norm_encoder_hidden_states, gate_ff, enc_gate_ff = self.norm2(
340
+ hidden_states, encoder_hidden_states, temb
341
+ )
342
+
343
+ # feed-forward
344
+ norm_hidden_states = torch.cat([norm_encoder_hidden_states, norm_hidden_states], dim=1)
345
+ ff_output = self.ff(norm_hidden_states)
346
+
347
+ hidden_states = hidden_states + gate_ff * ff_output[:, text_seq_length:]
348
+ encoder_hidden_states = encoder_hidden_states + enc_gate_ff * ff_output[:, :text_seq_length]
349
+
350
+ return hidden_states, encoder_hidden_states
351
+
352
+
353
+ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin):
354
+ """
355
+ A Transformer model for video-like data in [CogVideoX](https://github.com/THUDM/CogVideo).
356
+
357
+ Parameters:
358
+ num_attention_heads (`int`, defaults to `30`):
359
+ The number of heads to use for multi-head attention.
360
+ attention_head_dim (`int`, defaults to `64`):
361
+ The number of channels in each head.
362
+ in_channels (`int`, defaults to `16`):
363
+ The number of channels in the input.
364
+ out_channels (`int`, *optional*, defaults to `16`):
365
+ The number of channels in the output.
366
+ flip_sin_to_cos (`bool`, defaults to `True`):
367
+ Whether to flip the sin to cos in the time embedding.
368
+ time_embed_dim (`int`, defaults to `512`):
369
+ Output dimension of timestep embeddings.
370
+ text_embed_dim (`int`, defaults to `4096`):
371
+ Input dimension of text embeddings from the text encoder.
372
+ num_layers (`int`, defaults to `30`):
373
+ The number of layers of Transformer blocks to use.
374
+ dropout (`float`, defaults to `0.0`):
375
+ The dropout probability to use.
376
+ attention_bias (`bool`, defaults to `True`):
377
+ Whether or not to use bias in the attention projection layers.
378
+ sample_width (`int`, defaults to `90`):
379
+ The width of the input latents.
380
+ sample_height (`int`, defaults to `60`):
381
+ The height of the input latents.
382
+ sample_frames (`int`, defaults to `49`):
383
+ The number of frames in the input latents. Note that this parameter was incorrectly initialized to 49
384
+ instead of 13 because CogVideoX processed 13 latent frames at once in its default and recommended settings,
385
+ but cannot be changed to the correct value to ensure backwards compatibility. To create a transformer with
386
+ K latent frames, the correct value to pass here would be: ((K - 1) * temporal_compression_ratio + 1).
387
+ patch_size (`int`, defaults to `2`):
388
+ The size of the patches to use in the patch embedding layer.
389
+ temporal_compression_ratio (`int`, defaults to `4`):
390
+ The compression ratio across the temporal dimension. See documentation for `sample_frames`.
391
+ max_text_seq_length (`int`, defaults to `226`):
392
+ The maximum sequence length of the input text embeddings.
393
+ activation_fn (`str`, defaults to `"gelu-approximate"`):
394
+ Activation function to use in feed-forward.
395
+ timestep_activation_fn (`str`, defaults to `"silu"`):
396
+ Activation function to use when generating the timestep embeddings.
397
+ norm_elementwise_affine (`bool`, defaults to `True`):
398
+ Whether or not to use elementwise affine in normalization layers.
399
+ norm_eps (`float`, defaults to `1e-5`):
400
+ The epsilon value to use in normalization layers.
401
+ spatial_interpolation_scale (`float`, defaults to `1.875`):
402
+ Scaling factor to apply in 3D positional embeddings across spatial dimensions.
403
+ temporal_interpolation_scale (`float`, defaults to `1.0`):
404
+ Scaling factor to apply in 3D positional embeddings across temporal dimensions.
405
+ """
406
+
407
+ _supports_gradient_checkpointing = True
408
+
409
+ @register_to_config
410
+ def __init__(
411
+ self,
412
+ num_attention_heads: int = 30,
413
+ attention_head_dim: int = 64,
414
+ in_channels: int = 16,
415
+ out_channels: Optional[int] = 16,
416
+ flip_sin_to_cos: bool = True,
417
+ freq_shift: int = 0,
418
+ time_embed_dim: int = 512,
419
+ text_embed_dim: int = 4096,
420
+ num_layers: int = 30,
421
+ dropout: float = 0.0,
422
+ attention_bias: bool = True,
423
+ sample_width: int = 90,
424
+ sample_height: int = 60,
425
+ sample_frames: int = 49,
426
+ patch_size: int = 2,
427
+ patch_size_t: Optional[int] = None,
428
+ temporal_compression_ratio: int = 4,
429
+ max_text_seq_length: int = 226,
430
+ activation_fn: str = "gelu-approximate",
431
+ timestep_activation_fn: str = "silu",
432
+ norm_elementwise_affine: bool = True,
433
+ norm_eps: float = 1e-5,
434
+ spatial_interpolation_scale: float = 1.875,
435
+ temporal_interpolation_scale: float = 1.0,
436
+ use_rotary_positional_embeddings: bool = False,
437
+ use_learned_positional_embeddings: bool = False,
438
+ patch_bias: bool = True,
439
+ add_noise_in_inpaint_model: bool = False,
440
+ finetune_init: bool = False,
441
+ ):
442
+ super().__init__()
443
+ inner_dim = num_attention_heads * attention_head_dim
444
+ self.patch_size_t = patch_size_t
445
+ if not use_rotary_positional_embeddings and use_learned_positional_embeddings:
446
+ raise ValueError(
447
+ "There are no CogVideoX checkpoints available with disable rotary embeddings and learned positional "
448
+ "embeddings. If you're using a custom model and/or believe this should be supported, please open an "
449
+ "issue at https://github.com/huggingface/diffusers/issues."
450
+ )
451
+
452
+ # 1. Patch embedding
453
+ self.patch_embed = CogVideoXPatchEmbed(
454
+ patch_size=patch_size,
455
+ patch_size_t=patch_size_t,
456
+ in_channels=in_channels,
457
+ embed_dim=inner_dim,
458
+ text_embed_dim=text_embed_dim,
459
+ bias=patch_bias,
460
+ sample_width=sample_width,
461
+ sample_height=sample_height,
462
+ sample_frames=sample_frames,
463
+ temporal_compression_ratio=temporal_compression_ratio,
464
+ max_text_seq_length=max_text_seq_length,
465
+ spatial_interpolation_scale=spatial_interpolation_scale,
466
+ temporal_interpolation_scale=temporal_interpolation_scale,
467
+ use_positional_embeddings=not use_rotary_positional_embeddings,
468
+ use_learned_positional_embeddings=use_learned_positional_embeddings,
469
+ )
470
+ self.embedding_dropout = nn.Dropout(dropout)
471
+
472
+ # 2. Time embeddings
473
+ self.time_proj = Timesteps(inner_dim, flip_sin_to_cos, freq_shift)
474
+ self.time_embedding = TimestepEmbedding(inner_dim, time_embed_dim, timestep_activation_fn)
475
+
476
+ # 3. Define spatio-temporal transformers blocks
477
+ self.transformer_blocks = nn.ModuleList(
478
+ [
479
+ CogVideoXBlock(
480
+ block_idx=idx,
481
+ dim=inner_dim,
482
+ num_attention_heads=num_attention_heads,
483
+ attention_head_dim=attention_head_dim,
484
+ time_embed_dim=time_embed_dim,
485
+ dropout=dropout,
486
+ activation_fn=activation_fn,
487
+ attention_bias=attention_bias,
488
+ norm_elementwise_affine=norm_elementwise_affine,
489
+ norm_eps=norm_eps,
490
+ finetune_init=finetune_init,
491
+ )
492
+ for idx in range(num_layers)
493
+ ]
494
+ )
495
+ self.norm_final = nn.LayerNorm(inner_dim, norm_eps, norm_elementwise_affine)
496
+
497
+ # 4. Output blocks
498
+ self.norm_out = AdaLayerNorm(
499
+ embedding_dim=time_embed_dim,
500
+ output_dim=2 * inner_dim,
501
+ norm_elementwise_affine=norm_elementwise_affine,
502
+ norm_eps=norm_eps,
503
+ chunk_dim=1,
504
+ )
505
+
506
+ if patch_size_t is None:
507
+ # For CogVideox 1.0
508
+ output_dim = patch_size * patch_size * out_channels
509
+ else:
510
+ # For CogVideoX 1.5
511
+ output_dim = patch_size * patch_size * patch_size_t * out_channels
512
+
513
+ self.proj_out = nn.Linear(inner_dim, output_dim)
514
+
515
+ self.gradient_checkpointing = False
516
+
517
+ def _set_gradient_checkpointing(self, module, value=False):
518
+ self.gradient_checkpointing = value
519
+
520
+ @property
521
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
522
+ def attn_processors(self) -> Dict[str, AttentionProcessor]:
523
+ r"""
524
+ Returns:
525
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
526
+ indexed by its weight name.
527
+ """
528
+ # set recursively
529
+ processors = {}
530
+
531
+ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
532
+ if hasattr(module, "get_processor"):
533
+ processors[f"{name}.processor"] = module.get_processor()
534
+
535
+ for sub_name, child in module.named_children():
536
+ fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
537
+
538
+ return processors
539
+
540
+ for name, module in self.named_children():
541
+ fn_recursive_add_processors(name, module, processors)
542
+
543
+ return processors
544
+
545
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
546
+ def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
547
+ r"""
548
+ Sets the attention processor to use to compute attention.
549
+
550
+ Parameters:
551
+ processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
552
+ The instantiated processor class or a dictionary of processor classes that will be set as the processor
553
+ for **all** `Attention` layers.
554
+
555
+ If `processor` is a dict, the key needs to define the path to the corresponding cross attention
556
+ processor. This is strongly recommended when setting trainable attention processors.
557
+
558
+ """
559
+ count = len(self.attn_processors.keys())
560
+
561
+ if isinstance(processor, dict) and len(processor) != count:
562
+ raise ValueError(
563
+ f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
564
+ f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
565
+ )
566
+
567
+ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
568
+ if hasattr(module, "set_processor"):
569
+ if not isinstance(processor, dict):
570
+ module.set_processor(processor)
571
+ else:
572
+ module.set_processor(processor.pop(f"{name}.processor"))
573
+
574
+ for sub_name, child in module.named_children():
575
+ fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
576
+
577
+ for name, module in self.named_children():
578
+ fn_recursive_attn_processor(name, module, processor)
579
+
580
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections with FusedAttnProcessor2_0->FusedCogVideoXAttnProcessor2_0
581
+ def fuse_qkv_projections(self):
582
+ """
583
+ Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value)
584
+ are fused. For cross-attention modules, key and value projection matrices are fused.
585
+
586
+ <Tip warning={true}>
587
+
588
+ This API is 🧪 experimental.
589
+
590
+ </Tip>
591
+ """
592
+ self.original_attn_processors = None
593
+
594
+ for _, attn_processor in self.attn_processors.items():
595
+ if "Added" in str(attn_processor.__class__.__name__):
596
+ raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.")
597
+
598
+ self.original_attn_processors = self.attn_processors
599
+
600
+ for module in self.modules():
601
+ if isinstance(module, Attention):
602
+ module.fuse_projections(fuse=True)
603
+
604
+ self.set_attn_processor(FusedCogVideoXAttnProcessor2_0())
605
+
606
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections
607
+ def unfuse_qkv_projections(self):
608
+ """Disables the fused QKV projection if enabled.
609
+
610
+ <Tip warning={true}>
611
+
612
+ This API is 🧪 experimental.
613
+
614
+ </Tip>
615
+
616
+ """
617
+ if self.original_attn_processors is not None:
618
+ self.set_attn_processor(self.original_attn_processors)
619
+
620
+ def forward(
621
+ self,
622
+ hidden_states: torch.Tensor,
623
+ encoder_hidden_states: torch.Tensor,
624
+ timestep: Union[int, float, torch.LongTensor],
625
+ timestep_cond: Optional[torch.Tensor] = None,
626
+ inpaint_latents: Optional[torch.Tensor] = None,
627
+ flow_latents: Optional[torch.Tensor] = None,
628
+ control_latents: Optional[torch.Tensor] = None,
629
+ image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
630
+ return_dict: bool = True,
631
+ ):
632
+
633
+ batch_size, num_frames, channels, height, width = hidden_states.shape
634
+ if num_frames == 1 and self.patch_size_t is not None:
635
+ hidden_states = torch.cat([hidden_states, torch.zeros_like(hidden_states)], dim=1)
636
+ if inpaint_latents is not None:
637
+ inpaint_latents = torch.concat([inpaint_latents, torch.zeros_like(inpaint_latents)], dim=1)
638
+ if control_latents is not None:
639
+ control_latents = torch.concat([control_latents, torch.zeros_like(control_latents)], dim=1)
640
+ local_num_frames = num_frames + 1
641
+ else:
642
+ local_num_frames = num_frames
643
+
644
+ # 1. Time embedding
645
+ timesteps = timestep
646
+ t_emb = self.time_proj(timesteps)
647
+
648
+ # timesteps does not contain any weights and will always return f32 tensors
649
+ # but time_embedding might actually be running in fp16. so we need to cast here.
650
+ # there might be better ways to encapsulate this.
651
+ t_emb = t_emb.to(dtype=hidden_states.dtype)
652
+ emb = self.time_embedding(t_emb, timestep_cond)
653
+
654
+ # 2. Patch embedding
655
+ if inpaint_latents is not None:
656
+ hidden_states = torch.concat([hidden_states, inpaint_latents], 2)
657
+ if control_latents is not None:
658
+ hidden_states = torch.concat([hidden_states, control_latents], 2)
659
+
660
+ hidden_states, flow_states = self.patch_embed(encoder_hidden_states, hidden_states, flow_latents)
661
+ hidden_states = self.embedding_dropout(hidden_states)
662
+
663
+ text_seq_length = encoder_hidden_states.shape[1]
664
+ encoder_hidden_states = hidden_states[:, :text_seq_length]
665
+ hidden_states = hidden_states[:, text_seq_length:]
666
+
667
+ # 3. Transformer blocks
668
+ for i, block in enumerate(self.transformer_blocks):
669
+ if self.training and self.gradient_checkpointing:
670
+
671
+ def create_custom_forward(module):
672
+ def custom_forward(*inputs):
673
+ return module(*inputs)
674
+
675
+ return custom_forward
676
+
677
+ ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
678
+ hidden_states, encoder_hidden_states = torch.utils.checkpoint.checkpoint(
679
+ create_custom_forward(block),
680
+ hidden_states,
681
+ encoder_hidden_states,
682
+ flow_states,
683
+ emb,
684
+ image_rotary_emb,
685
+ **ckpt_kwargs,
686
+ )
687
+ else:
688
+ hidden_states, encoder_hidden_states = block(
689
+ hidden_states=hidden_states,
690
+ encoder_hidden_states=encoder_hidden_states,
691
+ flow_states=flow_states,
692
+ temb=emb,
693
+ image_rotary_emb=image_rotary_emb,
694
+ )
695
+
696
+ if not self.config.use_rotary_positional_embeddings:
697
+ # CogVideoX-2B
698
+ hidden_states = self.norm_final(hidden_states)
699
+ else:
700
+ # CogVideoX-5B
701
+ hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1)
702
+ hidden_states = self.norm_final(hidden_states)
703
+ hidden_states = hidden_states[:, text_seq_length:]
704
+
705
+ # 4. Final block
706
+ hidden_states = self.norm_out(hidden_states, temb=emb)
707
+ hidden_states = self.proj_out(hidden_states)
708
+
709
+ # 5. Unpatchify
710
+ p = self.config.patch_size
711
+ p_t = self.config.patch_size_t
712
+
713
+ if p_t is None:
714
+ output = hidden_states.reshape(batch_size, local_num_frames, height // p, width // p, -1, p, p)
715
+ output = output.permute(0, 1, 4, 2, 5, 3, 6).flatten(5, 6).flatten(3, 4)
716
+ else:
717
+ output = hidden_states.reshape(
718
+ batch_size, (local_num_frames + p_t - 1) // p_t, height // p, width // p, -1, p_t, p, p
719
+ )
720
+ output = output.permute(0, 1, 5, 4, 2, 6, 3, 7).flatten(6, 7).flatten(4, 5).flatten(1, 2)
721
+
722
+ if num_frames == 1:
723
+ output = output[:, :num_frames, :]
724
+
725
+ if not return_dict:
726
+ return (output,)
727
+ return Transformer2DModelOutput(sample=output)
728
+
729
+ @classmethod
730
+ def from_pretrained_2d(
731
+ cls, pretrained_model_path, subfolder=None, transformer_additional_kwargs={},
732
+ finetune_init=False, low_cpu_mem_usage=False, torch_dtype=torch.bfloat16
733
+ ):
734
+ if subfolder is not None:
735
+ pretrained_model_path = os.path.join(pretrained_model_path, subfolder)
736
+ print(f"loaded 3D transformer's pretrained weights from {pretrained_model_path} ...")
737
+
738
+ config_file = os.path.join(pretrained_model_path, 'config.json')
739
+ if not os.path.isfile(config_file):
740
+ raise RuntimeError(f"{config_file} does not exist")
741
+ with open(config_file, "r") as f:
742
+ config = json.load(f)
743
+ config['finetune_init'] = finetune_init
744
+
745
+ from diffusers.utils import WEIGHTS_NAME
746
+ model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME)
747
+ model_file_safetensors = model_file.replace(".bin", ".safetensors")
748
+
749
+ if low_cpu_mem_usage:
750
+ try:
751
+ import re
752
+ from diffusers.utils import is_accelerate_available
753
+ from diffusers.models.modeling_utils import load_model_dict_into_meta
754
+ if is_accelerate_available():
755
+ import accelerate
756
+
757
+ # Instantiate model with empty weights
758
+ with accelerate.init_empty_weights():
759
+ model = cls.from_config(config, **transformer_additional_kwargs)
760
+
761
+ param_device = "cpu"
762
+ if os.path.exists(model_file):
763
+ state_dict = torch.load(model_file, map_location="cpu")
764
+ elif os.path.exists(model_file_safetensors):
765
+ from safetensors.torch import load_file, safe_open
766
+ state_dict = load_file(model_file_safetensors)
767
+ else:
768
+ from safetensors.torch import load_file, safe_open
769
+ model_files_safetensors = glob.glob(os.path.join(pretrained_model_path, "*.safetensors"))
770
+ state_dict = {}
771
+ for _model_file_safetensors in model_files_safetensors:
772
+ _state_dict = load_file(_model_file_safetensors)
773
+ for key in _state_dict:
774
+ state_dict[key] = _state_dict[key]
775
+ model._convert_deprecated_attention_blocks(state_dict)
776
+ # move the params from meta device to cpu
777
+ missing_keys = set(model.state_dict().keys()) - set(state_dict.keys())
778
+ if len(missing_keys) > 0:
779
+ raise ValueError(
780
+ f"Cannot load {cls} from {pretrained_model_path} because the following keys are"
781
+ f" missing: \n {', '.join(missing_keys)}. \n Please make sure to pass"
782
+ " `low_cpu_mem_usage=False` and `device_map=None` if you want to randomly initialize"
783
+ " those weights or else make sure your checkpoint file is correct."
784
+ )
785
+
786
+ unexpected_keys = load_model_dict_into_meta(
787
+ model,
788
+ state_dict,
789
+ device=param_device,
790
+ dtype=torch_dtype,
791
+ model_name_or_path=pretrained_model_path,
792
+ )
793
+
794
+ if cls._keys_to_ignore_on_load_unexpected is not None:
795
+ for pat in cls._keys_to_ignore_on_load_unexpected:
796
+ unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None]
797
+
798
+ if len(unexpected_keys) > 0:
799
+ print(
800
+ f"Some weights of the model checkpoint were not used when initializing {cls.__name__}: \n {[', '.join(unexpected_keys)]}"
801
+ )
802
+ return model
803
+ except Exception as e:
804
+ print(
805
+ f"The low_cpu_mem_usage mode is not work because {e}. Use low_cpu_mem_usage=False instead."
806
+ )
807
+
808
+ model = cls.from_config(config, **transformer_additional_kwargs)
809
+ if os.path.exists(model_file):
810
+ state_dict = torch.load(model_file, map_location="cpu")
811
+ elif os.path.exists(model_file_safetensors):
812
+ from safetensors.torch import load_file, safe_open
813
+ state_dict = load_file(model_file_safetensors)
814
+ else:
815
+ from safetensors.torch import load_file, safe_open
816
+ model_files_safetensors = glob.glob(os.path.join(pretrained_model_path, "*.safetensors"))
817
+ state_dict = {}
818
+ for _model_file_safetensors in model_files_safetensors:
819
+ _state_dict = load_file(_model_file_safetensors)
820
+ for key in _state_dict:
821
+ state_dict[key] = _state_dict[key]
822
+
823
+ if model.state_dict()['patch_embed.proj.weight'].size() != state_dict['patch_embed.proj.weight'].size():
824
+ new_shape = model.state_dict()['patch_embed.proj.weight'].size()
825
+ if len(new_shape) == 5:
826
+ state_dict['patch_embed.proj.weight'] = state_dict['patch_embed.proj.weight'].unsqueeze(2).expand(new_shape).clone()
827
+ state_dict['patch_embed.proj.weight'][:, :, :-1] = 0
828
+ elif len(new_shape) == 2:
829
+ if model.state_dict()['patch_embed.proj.weight'].size()[1] > state_dict['patch_embed.proj.weight'].size()[1]:
830
+ model.state_dict()['patch_embed.proj.weight'][:, :state_dict['patch_embed.proj.weight'].size()[1]] = state_dict['patch_embed.proj.weight']
831
+ model.state_dict()['patch_embed.proj.weight'][:, state_dict['patch_embed.proj.weight'].size()[1]:] = 0
832
+ state_dict['patch_embed.proj.weight'] = model.state_dict()['patch_embed.proj.weight']
833
+ else:
834
+ model.state_dict()['patch_embed.proj.weight'][:, :] = state_dict['patch_embed.proj.weight'][:, :model.state_dict()['patch_embed.proj.weight'].size()[1]]
835
+ state_dict['patch_embed.proj.weight'] = model.state_dict()['patch_embed.proj.weight']
836
+ else:
837
+ if model.state_dict()['patch_embed.proj.weight'].size()[1] > state_dict['patch_embed.proj.weight'].size()[1]:
838
+ model.state_dict()['patch_embed.proj.weight'][:, :state_dict['patch_embed.proj.weight'].size()[1], :, :] = state_dict['patch_embed.proj.weight']
839
+ model.state_dict()['patch_embed.proj.weight'][:, state_dict['patch_embed.proj.weight'].size()[1]:, :, :] = 0
840
+ state_dict['patch_embed.proj.weight'] = model.state_dict()['patch_embed.proj.weight']
841
+ else:
842
+ model.state_dict()['patch_embed.proj.weight'][:, :, :, :] = state_dict['patch_embed.proj.weight'][:, :model.state_dict()['patch_embed.proj.weight'].size()[1], :, :]
843
+ state_dict['patch_embed.proj.weight'] = model.state_dict()['patch_embed.proj.weight']
844
+
845
+ tmp_state_dict = {}
846
+ for key in state_dict:
847
+ if key in model.state_dict().keys() and model.state_dict()[key].size() == state_dict[key].size():
848
+ tmp_state_dict[key] = state_dict[key]
849
+ else:
850
+ import ipdb; ipdb.set_trace()
851
+ print(key, "Size don't match, skip")
852
+
853
+ state_dict = tmp_state_dict
854
+
855
+ m, u = model.load_state_dict(state_dict, strict=False)
856
+ print(f"### missing keys: {len(m)}; \n### unexpected keys: {len(u)};")
857
+ print(m)
858
+
859
+ params = [p.numel() if "." in n else 0 for n, p in model.named_parameters()]
860
+ print(f"### All Parameters: {sum(params) / 1e6} M")
861
+
862
+ model = model.to(torch_dtype)
863
+ return model
robomaster/pipeline/pipeline_cogvideox.py ADDED
@@ -0,0 +1,877 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The CogVideoX team, Tsinghua University & ZhipuAI and The HuggingFace Team.
2
+ # All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import inspect
17
+ import math
18
+ import numpy as np
19
+ from dataclasses import dataclass
20
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
21
+
22
+ import torch
23
+ from transformers import T5EncoderModel, T5Tokenizer
24
+
25
+ from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback
26
+ from diffusers.models import AutoencoderKLCogVideoX, CogVideoXTransformer3DModel
27
+ from diffusers.models.embeddings import get_1d_rotary_pos_embed
28
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
29
+ from diffusers.schedulers import CogVideoXDDIMScheduler, CogVideoXDPMScheduler
30
+ from diffusers.utils import BaseOutput, logging, replace_example_docstring
31
+ from diffusers.utils.torch_utils import randn_tensor
32
+ from diffusers.video_processor import VideoProcessor
33
+
34
+
35
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
36
+
37
+
38
+ EXAMPLE_DOC_STRING = """
39
+ Examples:
40
+ ```python
41
+ >>> import torch
42
+ >>> from diffusers import CogVideoX_Fun_Pipeline
43
+ >>> from diffusers.utils import export_to_video
44
+
45
+ >>> # Models: "THUDM/CogVideoX-2b" or "THUDM/CogVideoX-5b"
46
+ >>> pipe = CogVideoX_Fun_Pipeline.from_pretrained("THUDM/CogVideoX-2b", torch_dtype=torch.float16).to("cuda")
47
+ >>> prompt = (
48
+ ... "A panda, dressed in a small, red jacket and a tiny hat, sits on a wooden stool in a serene bamboo forest. "
49
+ ... "The panda's fluffy paws strum a miniature acoustic guitar, producing soft, melodic tunes. Nearby, a few other "
50
+ ... "pandas gather, watching curiously and some clapping in rhythm. Sunlight filters through the tall bamboo, "
51
+ ... "casting a gentle glow on the scene. The panda's face is expressive, showing concentration and joy as it plays. "
52
+ ... "The background includes a small, flowing stream and vibrant green foliage, enhancing the peaceful and magical "
53
+ ... "atmosphere of this unique musical performance."
54
+ ... )
55
+ >>> video = pipe(prompt=prompt, guidance_scale=6, num_inference_steps=50).frames[0]
56
+ >>> export_to_video(video, "output.mp4", fps=8)
57
+ ```
58
+ """
59
+
60
+
61
+ # Copied from diffusers.models.embeddings.get_3d_rotary_pos_embed
62
+ def get_3d_rotary_pos_embed(
63
+ embed_dim,
64
+ crops_coords,
65
+ grid_size,
66
+ temporal_size,
67
+ theta: int = 10000,
68
+ use_real: bool = True,
69
+ grid_type: str = "linspace",
70
+ max_size: Optional[Tuple[int, int]] = None,
71
+ ) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
72
+ """
73
+ RoPE for video tokens with 3D structure.
74
+
75
+ Args:
76
+ embed_dim: (`int`):
77
+ The embedding dimension size, corresponding to hidden_size_head.
78
+ crops_coords (`Tuple[int]`):
79
+ The top-left and bottom-right coordinates of the crop.
80
+ grid_size (`Tuple[int]`):
81
+ The grid size of the spatial positional embedding (height, width).
82
+ temporal_size (`int`):
83
+ The size of the temporal dimension.
84
+ theta (`float`):
85
+ Scaling factor for frequency computation.
86
+ grid_type (`str`):
87
+ Whether to use "linspace" or "slice" to compute grids.
88
+
89
+ Returns:
90
+ `torch.Tensor`: positional embedding with shape `(temporal_size * grid_size[0] * grid_size[1], embed_dim/2)`.
91
+ """
92
+ if use_real is not True:
93
+ raise ValueError(" `use_real = False` is not currently supported for get_3d_rotary_pos_embed")
94
+
95
+ if grid_type == "linspace":
96
+ start, stop = crops_coords
97
+ grid_size_h, grid_size_w = grid_size
98
+ grid_h = np.linspace(start[0], stop[0], grid_size_h, endpoint=False, dtype=np.float32)
99
+ grid_w = np.linspace(start[1], stop[1], grid_size_w, endpoint=False, dtype=np.float32)
100
+ grid_t = np.arange(temporal_size, dtype=np.float32)
101
+ grid_t = np.linspace(0, temporal_size, temporal_size, endpoint=False, dtype=np.float32)
102
+ elif grid_type == "slice":
103
+ max_h, max_w = max_size
104
+ grid_size_h, grid_size_w = grid_size
105
+ grid_h = np.arange(max_h, dtype=np.float32)
106
+ grid_w = np.arange(max_w, dtype=np.float32)
107
+ grid_t = np.arange(temporal_size, dtype=np.float32)
108
+ else:
109
+ raise ValueError("Invalid value passed for `grid_type`.")
110
+
111
+ # Compute dimensions for each axis
112
+ dim_t = embed_dim // 4
113
+ dim_h = embed_dim // 8 * 3
114
+ dim_w = embed_dim // 8 * 3
115
+
116
+ # Temporal frequencies
117
+ freqs_t = get_1d_rotary_pos_embed(dim_t, grid_t, use_real=True)
118
+ # Spatial frequencies for height and width
119
+ freqs_h = get_1d_rotary_pos_embed(dim_h, grid_h, use_real=True)
120
+ freqs_w = get_1d_rotary_pos_embed(dim_w, grid_w, use_real=True)
121
+
122
+ # BroadCast and concatenate temporal and spaial frequencie (height and width) into a 3d tensor
123
+ def combine_time_height_width(freqs_t, freqs_h, freqs_w):
124
+ freqs_t = freqs_t[:, None, None, :].expand(
125
+ -1, grid_size_h, grid_size_w, -1
126
+ ) # temporal_size, grid_size_h, grid_size_w, dim_t
127
+ freqs_h = freqs_h[None, :, None, :].expand(
128
+ temporal_size, -1, grid_size_w, -1
129
+ ) # temporal_size, grid_size_h, grid_size_2, dim_h
130
+ freqs_w = freqs_w[None, None, :, :].expand(
131
+ temporal_size, grid_size_h, -1, -1
132
+ ) # temporal_size, grid_size_h, grid_size_2, dim_w
133
+
134
+ freqs = torch.cat(
135
+ [freqs_t, freqs_h, freqs_w], dim=-1
136
+ ) # temporal_size, grid_size_h, grid_size_w, (dim_t + dim_h + dim_w)
137
+ freqs = freqs.view(
138
+ temporal_size * grid_size_h * grid_size_w, -1
139
+ ) # (temporal_size * grid_size_h * grid_size_w), (dim_t + dim_h + dim_w)
140
+ return freqs
141
+
142
+ t_cos, t_sin = freqs_t # both t_cos and t_sin has shape: temporal_size, dim_t
143
+ h_cos, h_sin = freqs_h # both h_cos and h_sin has shape: grid_size_h, dim_h
144
+ w_cos, w_sin = freqs_w # both w_cos and w_sin has shape: grid_size_w, dim_w
145
+
146
+ if grid_type == "slice":
147
+ t_cos, t_sin = t_cos[:temporal_size], t_sin[:temporal_size]
148
+ h_cos, h_sin = h_cos[:grid_size_h], h_sin[:grid_size_h]
149
+ w_cos, w_sin = w_cos[:grid_size_w], w_sin[:grid_size_w]
150
+
151
+ cos = combine_time_height_width(t_cos, h_cos, w_cos)
152
+ sin = combine_time_height_width(t_sin, h_sin, w_sin)
153
+ return cos, sin
154
+
155
+
156
+ # Similar to diffusers.pipelines.hunyuandit.pipeline_hunyuandit.get_resize_crop_region_for_grid
157
+ def get_resize_crop_region_for_grid(src, tgt_width, tgt_height):
158
+ tw = tgt_width
159
+ th = tgt_height
160
+ h, w = src
161
+ r = h / w
162
+ if r > (th / tw):
163
+ resize_height = th
164
+ resize_width = int(round(th / h * w))
165
+ else:
166
+ resize_width = tw
167
+ resize_height = int(round(tw / w * h))
168
+
169
+ crop_top = int(round((th - resize_height) / 2.0))
170
+ crop_left = int(round((tw - resize_width) / 2.0))
171
+
172
+ return (crop_top, crop_left), (crop_top + resize_height, crop_left + resize_width)
173
+
174
+
175
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
176
+ def retrieve_timesteps(
177
+ scheduler,
178
+ num_inference_steps: Optional[int] = None,
179
+ device: Optional[Union[str, torch.device]] = None,
180
+ timesteps: Optional[List[int]] = None,
181
+ sigmas: Optional[List[float]] = None,
182
+ **kwargs,
183
+ ):
184
+ """
185
+ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
186
+ custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
187
+
188
+ Args:
189
+ scheduler (`SchedulerMixin`):
190
+ The scheduler to get timesteps from.
191
+ num_inference_steps (`int`):
192
+ The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
193
+ must be `None`.
194
+ device (`str` or `torch.device`, *optional*):
195
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
196
+ timesteps (`List[int]`, *optional*):
197
+ Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
198
+ `num_inference_steps` and `sigmas` must be `None`.
199
+ sigmas (`List[float]`, *optional*):
200
+ Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
201
+ `num_inference_steps` and `timesteps` must be `None`.
202
+
203
+ Returns:
204
+ `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
205
+ second element is the number of inference steps.
206
+ """
207
+ if timesteps is not None and sigmas is not None:
208
+ raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
209
+ if timesteps is not None:
210
+ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
211
+ if not accepts_timesteps:
212
+ raise ValueError(
213
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
214
+ f" timestep schedules. Please check whether you are using the correct scheduler."
215
+ )
216
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
217
+ timesteps = scheduler.timesteps
218
+ num_inference_steps = len(timesteps)
219
+ elif sigmas is not None:
220
+ accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
221
+ if not accept_sigmas:
222
+ raise ValueError(
223
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
224
+ f" sigmas schedules. Please check whether you are using the correct scheduler."
225
+ )
226
+ scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
227
+ timesteps = scheduler.timesteps
228
+ num_inference_steps = len(timesteps)
229
+ else:
230
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
231
+ timesteps = scheduler.timesteps
232
+ return timesteps, num_inference_steps
233
+
234
+
235
+ @dataclass
236
+ class CogVideoX_Fun_PipelineOutput(BaseOutput):
237
+ r"""
238
+ Output class for CogVideo pipelines.
239
+
240
+ Args:
241
+ video (`torch.Tensor`, `np.ndarray`, or List[List[PIL.Image.Image]]):
242
+ List of video outputs - It can be a nested list of length `batch_size,` with each sub-list containing
243
+ denoised PIL image sequences of length `num_frames.` It can also be a NumPy array or Torch tensor of shape
244
+ `(batch_size, num_frames, channels, height, width)`.
245
+ """
246
+
247
+ videos: torch.Tensor
248
+
249
+
250
+ class CogVideoX_Fun_Pipeline(DiffusionPipeline):
251
+ r"""
252
+ Pipeline for text-to-video generation using CogVideoX_Fun.
253
+
254
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
255
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
256
+
257
+ Args:
258
+ vae ([`AutoencoderKL`]):
259
+ Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations.
260
+ text_encoder ([`T5EncoderModel`]):
261
+ Frozen text-encoder. CogVideoX uses
262
+ [T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5EncoderModel); specifically the
263
+ [t5-v1_1-xxl](https://huggingface.co/PixArt-alpha/PixArt-alpha/tree/main/t5-v1_1-xxl) variant.
264
+ tokenizer (`T5Tokenizer`):
265
+ Tokenizer of class
266
+ [T5Tokenizer](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5Tokenizer).
267
+ transformer ([`CogVideoXTransformer3DModel`]):
268
+ A text conditioned `CogVideoXTransformer3DModel` to denoise the encoded video latents.
269
+ scheduler ([`SchedulerMixin`]):
270
+ A scheduler to be used in combination with `transformer` to denoise the encoded video latents.
271
+ """
272
+
273
+ _optional_components = []
274
+ model_cpu_offload_seq = "text_encoder->transformer->vae"
275
+
276
+ _callback_tensor_inputs = [
277
+ "latents",
278
+ "prompt_embeds",
279
+ "negative_prompt_embeds",
280
+ ]
281
+
282
+ def __init__(
283
+ self,
284
+ tokenizer: T5Tokenizer,
285
+ text_encoder: T5EncoderModel,
286
+ vae: AutoencoderKLCogVideoX,
287
+ transformer: CogVideoXTransformer3DModel,
288
+ scheduler: Union[CogVideoXDDIMScheduler, CogVideoXDPMScheduler],
289
+ ):
290
+ super().__init__()
291
+
292
+ self.register_modules(
293
+ tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler
294
+ )
295
+ self.vae_scale_factor_spatial = (
296
+ 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 8
297
+ )
298
+ self.vae_scale_factor_temporal = (
299
+ self.vae.config.temporal_compression_ratio if hasattr(self, "vae") and self.vae is not None else 4
300
+ )
301
+
302
+ self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor_spatial)
303
+
304
+ def _get_t5_prompt_embeds(
305
+ self,
306
+ prompt: Union[str, List[str]] = None,
307
+ num_videos_per_prompt: int = 1,
308
+ max_sequence_length: int = 226,
309
+ device: Optional[torch.device] = None,
310
+ dtype: Optional[torch.dtype] = None,
311
+ ):
312
+ device = device or self._execution_device
313
+ dtype = dtype or self.text_encoder.dtype
314
+
315
+ prompt = [prompt] if isinstance(prompt, str) else prompt
316
+ batch_size = len(prompt)
317
+
318
+ text_inputs = self.tokenizer(
319
+ prompt,
320
+ padding="max_length",
321
+ max_length=max_sequence_length,
322
+ truncation=True,
323
+ add_special_tokens=True,
324
+ return_tensors="pt",
325
+ )
326
+ text_input_ids = text_inputs.input_ids
327
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
328
+
329
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
330
+ removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_sequence_length - 1 : -1])
331
+ logger.warning(
332
+ "The following part of your input was truncated because `max_sequence_length` is set to "
333
+ f" {max_sequence_length} tokens: {removed_text}"
334
+ )
335
+
336
+ prompt_embeds = self.text_encoder(text_input_ids.to(device))[0]
337
+ prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
338
+
339
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
340
+ _, seq_len, _ = prompt_embeds.shape
341
+ prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1)
342
+ prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1)
343
+
344
+ return prompt_embeds
345
+
346
+ def encode_prompt(
347
+ self,
348
+ prompt: Union[str, List[str]],
349
+ negative_prompt: Optional[Union[str, List[str]]] = None,
350
+ do_classifier_free_guidance: bool = True,
351
+ num_videos_per_prompt: int = 1,
352
+ prompt_embeds: Optional[torch.Tensor] = None,
353
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
354
+ max_sequence_length: int = 226,
355
+ device: Optional[torch.device] = None,
356
+ dtype: Optional[torch.dtype] = None,
357
+ ):
358
+ r"""
359
+ Encodes the prompt into text encoder hidden states.
360
+
361
+ Args:
362
+ prompt (`str` or `List[str]`, *optional*):
363
+ prompt to be encoded
364
+ negative_prompt (`str` or `List[str]`, *optional*):
365
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
366
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
367
+ less than `1`).
368
+ do_classifier_free_guidance (`bool`, *optional*, defaults to `True`):
369
+ Whether to use classifier free guidance or not.
370
+ num_videos_per_prompt (`int`, *optional*, defaults to 1):
371
+ Number of videos that should be generated per prompt. torch device to place the resulting embeddings on
372
+ prompt_embeds (`torch.Tensor`, *optional*):
373
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
374
+ provided, text embeddings will be generated from `prompt` input argument.
375
+ negative_prompt_embeds (`torch.Tensor`, *optional*):
376
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
377
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
378
+ argument.
379
+ device: (`torch.device`, *optional*):
380
+ torch device
381
+ dtype: (`torch.dtype`, *optional*):
382
+ torch dtype
383
+ """
384
+ device = device or self._execution_device
385
+
386
+ prompt = [prompt] if isinstance(prompt, str) else prompt
387
+ if prompt is not None:
388
+ batch_size = len(prompt)
389
+ else:
390
+ batch_size = prompt_embeds.shape[0]
391
+
392
+ if prompt_embeds is None:
393
+ prompt_embeds = self._get_t5_prompt_embeds(
394
+ prompt=prompt,
395
+ num_videos_per_prompt=num_videos_per_prompt,
396
+ max_sequence_length=max_sequence_length,
397
+ device=device,
398
+ dtype=dtype,
399
+ )
400
+
401
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
402
+ negative_prompt = negative_prompt or ""
403
+ negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
404
+
405
+ if prompt is not None and type(prompt) is not type(negative_prompt):
406
+ raise TypeError(
407
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
408
+ f" {type(prompt)}."
409
+ )
410
+ elif batch_size != len(negative_prompt):
411
+ raise ValueError(
412
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
413
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
414
+ " the batch size of `prompt`."
415
+ )
416
+
417
+ negative_prompt_embeds = self._get_t5_prompt_embeds(
418
+ prompt=negative_prompt,
419
+ num_videos_per_prompt=num_videos_per_prompt,
420
+ max_sequence_length=max_sequence_length,
421
+ device=device,
422
+ dtype=dtype,
423
+ )
424
+
425
+ return prompt_embeds, negative_prompt_embeds
426
+
427
+ def prepare_latents(
428
+ self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None
429
+ ):
430
+ if isinstance(generator, list) and len(generator) != batch_size:
431
+ raise ValueError(
432
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
433
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
434
+ )
435
+
436
+ shape = (
437
+ batch_size,
438
+ (num_frames - 1) // self.vae_scale_factor_temporal + 1,
439
+ num_channels_latents,
440
+ height // self.vae_scale_factor_spatial,
441
+ width // self.vae_scale_factor_spatial,
442
+ )
443
+
444
+ if latents is None:
445
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
446
+ else:
447
+ latents = latents.to(device)
448
+
449
+ # scale the initial noise by the standard deviation required by the scheduler
450
+ latents = latents * self.scheduler.init_noise_sigma
451
+ return latents
452
+
453
+ def decode_latents(self, latents: torch.Tensor) -> torch.Tensor:
454
+ latents = latents.permute(0, 2, 1, 3, 4) # [batch_size, num_channels, num_frames, height, width]
455
+ latents = 1 / self.vae.config.scaling_factor * latents
456
+
457
+ frames = self.vae.decode(latents).sample
458
+ frames = (frames / 2 + 0.5).clamp(0, 1)
459
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
460
+ frames = frames.cpu().float().numpy()
461
+ return frames
462
+
463
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
464
+ def prepare_extra_step_kwargs(self, generator, eta):
465
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
466
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
467
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
468
+ # and should be between [0, 1]
469
+
470
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
471
+ extra_step_kwargs = {}
472
+ if accepts_eta:
473
+ extra_step_kwargs["eta"] = eta
474
+
475
+ # check if the scheduler accepts generator
476
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
477
+ if accepts_generator:
478
+ extra_step_kwargs["generator"] = generator
479
+ return extra_step_kwargs
480
+
481
+ # Copied from diffusers.pipelines.latte.pipeline_latte.LattePipeline.check_inputs
482
+ def check_inputs(
483
+ self,
484
+ prompt,
485
+ height,
486
+ width,
487
+ negative_prompt,
488
+ callback_on_step_end_tensor_inputs,
489
+ prompt_embeds=None,
490
+ negative_prompt_embeds=None,
491
+ ):
492
+ if height % 8 != 0 or width % 8 != 0:
493
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
494
+
495
+ if callback_on_step_end_tensor_inputs is not None and not all(
496
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
497
+ ):
498
+ raise ValueError(
499
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
500
+ )
501
+ if prompt is not None and prompt_embeds is not None:
502
+ raise ValueError(
503
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
504
+ " only forward one of the two."
505
+ )
506
+ elif prompt is None and prompt_embeds is None:
507
+ raise ValueError(
508
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
509
+ )
510
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
511
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
512
+
513
+ if prompt is not None and negative_prompt_embeds is not None:
514
+ raise ValueError(
515
+ f"Cannot forward both `prompt`: {prompt} and `negative_prompt_embeds`:"
516
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
517
+ )
518
+
519
+ if negative_prompt is not None and negative_prompt_embeds is not None:
520
+ raise ValueError(
521
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
522
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
523
+ )
524
+
525
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
526
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
527
+ raise ValueError(
528
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
529
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
530
+ f" {negative_prompt_embeds.shape}."
531
+ )
532
+
533
+ def fuse_qkv_projections(self) -> None:
534
+ r"""Enables fused QKV projections."""
535
+ self.fusing_transformer = True
536
+ self.transformer.fuse_qkv_projections()
537
+
538
+ def unfuse_qkv_projections(self) -> None:
539
+ r"""Disable QKV projection fusion if enabled."""
540
+ if not self.fusing_transformer:
541
+ logger.warning("The Transformer was not initially fused for QKV projections. Doing nothing.")
542
+ else:
543
+ self.transformer.unfuse_qkv_projections()
544
+ self.fusing_transformer = False
545
+
546
+ def _prepare_rotary_positional_embeddings(
547
+ self,
548
+ height: int,
549
+ width: int,
550
+ num_frames: int,
551
+ device: torch.device,
552
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
553
+ grid_height = height // (self.vae_scale_factor_spatial * self.transformer.config.patch_size)
554
+ grid_width = width // (self.vae_scale_factor_spatial * self.transformer.config.patch_size)
555
+
556
+ p = self.transformer.config.patch_size
557
+ p_t = self.transformer.config.patch_size_t
558
+
559
+ base_size_width = self.transformer.config.sample_width // p
560
+ base_size_height = self.transformer.config.sample_height // p
561
+
562
+ if p_t is None:
563
+ # CogVideoX 1.0
564
+ grid_crops_coords = get_resize_crop_region_for_grid(
565
+ (grid_height, grid_width), base_size_width, base_size_height
566
+ )
567
+ freqs_cos, freqs_sin = get_3d_rotary_pos_embed(
568
+ embed_dim=self.transformer.config.attention_head_dim,
569
+ crops_coords=grid_crops_coords,
570
+ grid_size=(grid_height, grid_width),
571
+ temporal_size=num_frames,
572
+ )
573
+ else:
574
+ # CogVideoX 1.5
575
+ base_num_frames = (num_frames + p_t - 1) // p_t
576
+
577
+ freqs_cos, freqs_sin = get_3d_rotary_pos_embed(
578
+ embed_dim=self.transformer.config.attention_head_dim,
579
+ crops_coords=None,
580
+ grid_size=(grid_height, grid_width),
581
+ temporal_size=base_num_frames,
582
+ grid_type="slice",
583
+ max_size=(base_size_height, base_size_width),
584
+ )
585
+
586
+ freqs_cos = freqs_cos.to(device=device)
587
+ freqs_sin = freqs_sin.to(device=device)
588
+ return freqs_cos, freqs_sin
589
+
590
+ @property
591
+ def guidance_scale(self):
592
+ return self._guidance_scale
593
+
594
+ @property
595
+ def num_timesteps(self):
596
+ return self._num_timesteps
597
+
598
+ @property
599
+ def attention_kwargs(self):
600
+ return self._attention_kwargs
601
+
602
+ @property
603
+ def interrupt(self):
604
+ return self._interrupt
605
+
606
+ @torch.no_grad()
607
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
608
+ def __call__(
609
+ self,
610
+ prompt: Optional[Union[str, List[str]]] = None,
611
+ negative_prompt: Optional[Union[str, List[str]]] = None,
612
+ height: int = 480,
613
+ width: int = 720,
614
+ num_frames: int = 49,
615
+ num_inference_steps: int = 50,
616
+ timesteps: Optional[List[int]] = None,
617
+ guidance_scale: float = 6,
618
+ use_dynamic_cfg: bool = False,
619
+ num_videos_per_prompt: int = 1,
620
+ eta: float = 0.0,
621
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
622
+ latents: Optional[torch.FloatTensor] = None,
623
+ prompt_embeds: Optional[torch.FloatTensor] = None,
624
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
625
+ output_type: str = "numpy",
626
+ return_dict: bool = False,
627
+ callback_on_step_end: Optional[
628
+ Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]
629
+ ] = None,
630
+ attention_kwargs: Optional[Dict[str, Any]] = None,
631
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
632
+ max_sequence_length: int = 226,
633
+ ) -> Union[CogVideoX_Fun_PipelineOutput, Tuple]:
634
+ """
635
+ Function invoked when calling the pipeline for generation.
636
+
637
+ Args:
638
+ prompt (`str` or `List[str]`, *optional*):
639
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
640
+ instead.
641
+ negative_prompt (`str` or `List[str]`, *optional*):
642
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
643
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
644
+ less than `1`).
645
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
646
+ The height in pixels of the generated image. This is set to 1024 by default for the best results.
647
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
648
+ The width in pixels of the generated image. This is set to 1024 by default for the best results.
649
+ num_frames (`int`, defaults to `48`):
650
+ Number of frames to generate. Must be divisible by self.vae_scale_factor_temporal. Generated video will
651
+ contain 1 extra frame because CogVideoX is conditioned with (num_seconds * fps + 1) frames where
652
+ num_seconds is 6 and fps is 4. However, since videos can be saved at any fps, the only condition that
653
+ needs to be satisfied is that of divisibility mentioned above.
654
+ num_inference_steps (`int`, *optional*, defaults to 50):
655
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
656
+ expense of slower inference.
657
+ timesteps (`List[int]`, *optional*):
658
+ Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
659
+ in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
660
+ passed will be used. Must be in descending order.
661
+ guidance_scale (`float`, *optional*, defaults to 7.0):
662
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
663
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
664
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
665
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
666
+ usually at the expense of lower image quality.
667
+ num_videos_per_prompt (`int`, *optional*, defaults to 1):
668
+ The number of videos to generate per prompt.
669
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
670
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
671
+ to make generation deterministic.
672
+ latents (`torch.FloatTensor`, *optional*):
673
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
674
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
675
+ tensor will ge generated by sampling using the supplied random `generator`.
676
+ prompt_embeds (`torch.FloatTensor`, *optional*):
677
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
678
+ provided, text embeddings will be generated from `prompt` input argument.
679
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
680
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
681
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
682
+ argument.
683
+ output_type (`str`, *optional*, defaults to `"pil"`):
684
+ The output format of the generate image. Choose between
685
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
686
+ return_dict (`bool`, *optional*, defaults to `True`):
687
+ Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead
688
+ of a plain tuple.
689
+ callback_on_step_end (`Callable`, *optional*):
690
+ A function that calls at the end of each denoising steps during the inference. The function is called
691
+ with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
692
+ callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
693
+ `callback_on_step_end_tensor_inputs`.
694
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
695
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
696
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
697
+ `._callback_tensor_inputs` attribute of your pipeline class.
698
+ max_sequence_length (`int`, defaults to `226`):
699
+ Maximum sequence length in encoded prompt. Must be consistent with
700
+ `self.transformer.config.max_text_seq_length` otherwise may lead to poor results.
701
+
702
+ Examples:
703
+
704
+ Returns:
705
+ [`~pipelines.cogvideo.pipeline_cogvideox.CogVideoX_Fun_PipelineOutput`] or `tuple`:
706
+ [`~pipelines.cogvideo.pipeline_cogvideox.CogVideoX_Fun_PipelineOutput`] if `return_dict` is True, otherwise a
707
+ `tuple`. When returning a tuple, the first element is a list with the generated images.
708
+ """
709
+
710
+ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)):
711
+ callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs
712
+
713
+ height = height or self.transformer.config.sample_height * self.vae_scale_factor_spatial
714
+ width = width or self.transformer.config.sample_width * self.vae_scale_factor_spatial
715
+ num_frames = num_frames or self.transformer.config.sample_frames
716
+
717
+ num_videos_per_prompt = 1
718
+
719
+ # 1. Check inputs. Raise error if not correct
720
+ self.check_inputs(
721
+ prompt,
722
+ height,
723
+ width,
724
+ negative_prompt,
725
+ callback_on_step_end_tensor_inputs,
726
+ prompt_embeds,
727
+ negative_prompt_embeds,
728
+ )
729
+ self._guidance_scale = guidance_scale
730
+ self._attention_kwargs = attention_kwargs
731
+ self._interrupt = False
732
+
733
+ # 2. Default call parameters
734
+ if prompt is not None and isinstance(prompt, str):
735
+ batch_size = 1
736
+ elif prompt is not None and isinstance(prompt, list):
737
+ batch_size = len(prompt)
738
+ else:
739
+ batch_size = prompt_embeds.shape[0]
740
+
741
+ device = self._execution_device
742
+
743
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
744
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
745
+ # corresponds to doing no classifier free guidance.
746
+ do_classifier_free_guidance = guidance_scale > 1.0
747
+
748
+ # 3. Encode input prompt
749
+ prompt_embeds, negative_prompt_embeds = self.encode_prompt(
750
+ prompt,
751
+ negative_prompt,
752
+ do_classifier_free_guidance,
753
+ num_videos_per_prompt=num_videos_per_prompt,
754
+ prompt_embeds=prompt_embeds,
755
+ negative_prompt_embeds=negative_prompt_embeds,
756
+ max_sequence_length=max_sequence_length,
757
+ device=device,
758
+ )
759
+ if do_classifier_free_guidance:
760
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
761
+
762
+ # 4. Prepare timesteps
763
+ timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
764
+ self._num_timesteps = len(timesteps)
765
+
766
+ # 5. Prepare latents
767
+ latent_frames = (num_frames - 1) // self.vae_scale_factor_temporal + 1
768
+
769
+ # For CogVideoX 1.5, the latent frames should be padded to make it divisible by patch_size_t
770
+ patch_size_t = self.transformer.config.patch_size_t
771
+ additional_frames = 0
772
+ if num_frames != 1 and patch_size_t is not None and latent_frames % patch_size_t != 0:
773
+ additional_frames = patch_size_t - latent_frames % patch_size_t
774
+ num_frames += additional_frames * self.vae_scale_factor_temporal
775
+
776
+ latent_channels = self.transformer.config.in_channels
777
+ latents = self.prepare_latents(
778
+ batch_size * num_videos_per_prompt,
779
+ latent_channels,
780
+ num_frames,
781
+ height,
782
+ width,
783
+ prompt_embeds.dtype,
784
+ device,
785
+ generator,
786
+ latents,
787
+ )
788
+
789
+ # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
790
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
791
+
792
+ # 7. Create rotary embeds if required
793
+ image_rotary_emb = (
794
+ self._prepare_rotary_positional_embeddings(height, width, latents.size(1), device)
795
+ if self.transformer.config.use_rotary_positional_embeddings
796
+ else None
797
+ )
798
+
799
+ # 8. Denoising loop
800
+ num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
801
+
802
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
803
+ # for DPM-solver++
804
+ old_pred_original_sample = None
805
+ for i, t in enumerate(timesteps):
806
+ if self.interrupt:
807
+ continue
808
+
809
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
810
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
811
+
812
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
813
+ timestep = t.expand(latent_model_input.shape[0])
814
+
815
+ # predict noise model_output
816
+ noise_pred = self.transformer(
817
+ hidden_states=latent_model_input,
818
+ encoder_hidden_states=prompt_embeds,
819
+ timestep=timestep,
820
+ image_rotary_emb=image_rotary_emb,
821
+ return_dict=False,
822
+ )[0]
823
+ noise_pred = noise_pred.float()
824
+
825
+ # perform guidance
826
+ if use_dynamic_cfg:
827
+ self._guidance_scale = 1 + guidance_scale * (
828
+ (1 - math.cos(math.pi * ((num_inference_steps - t.item()) / num_inference_steps) ** 5.0)) / 2
829
+ )
830
+ if do_classifier_free_guidance:
831
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
832
+ noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
833
+
834
+ # compute the previous noisy sample x_t -> x_t-1
835
+ if not isinstance(self.scheduler, CogVideoXDPMScheduler):
836
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
837
+ else:
838
+ latents, old_pred_original_sample = self.scheduler.step(
839
+ noise_pred,
840
+ old_pred_original_sample,
841
+ t,
842
+ timesteps[i - 1] if i > 0 else None,
843
+ latents,
844
+ **extra_step_kwargs,
845
+ return_dict=False,
846
+ )
847
+ latents = latents.to(prompt_embeds.dtype)
848
+
849
+ # call the callback, if provided
850
+ if callback_on_step_end is not None:
851
+ callback_kwargs = {}
852
+ for k in callback_on_step_end_tensor_inputs:
853
+ callback_kwargs[k] = locals()[k]
854
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
855
+
856
+ latents = callback_outputs.pop("latents", latents)
857
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
858
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
859
+
860
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
861
+ progress_bar.update()
862
+
863
+ if output_type == "numpy":
864
+ video = self.decode_latents(latents)
865
+ elif not output_type == "latent":
866
+ video = self.decode_latents(latents)
867
+ video = self.video_processor.postprocess_video(video=video, output_type=output_type)
868
+ else:
869
+ video = latents
870
+
871
+ # Offload all models
872
+ self.maybe_free_model_hooks()
873
+
874
+ if not return_dict:
875
+ video = torch.from_numpy(video)
876
+
877
+ return CogVideoX_Fun_PipelineOutput(videos=video)
robomaster/pipeline/pipeline_cogvideox_control.py ADDED
@@ -0,0 +1,970 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The CogVideoX team, Tsinghua University & ZhipuAI and The HuggingFace Team.
2
+ # All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import inspect
17
+ import math
18
+ from dataclasses import dataclass
19
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
20
+
21
+ import numpy as np
22
+ import torch
23
+ import torch.nn.functional as F
24
+ from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback
25
+ from diffusers.image_processor import VaeImageProcessor
26
+ from diffusers.models import (AutoencoderKLCogVideoX,
27
+ CogVideoXTransformer3DModel)
28
+ from diffusers.models.embeddings import (get_1d_rotary_pos_embed,
29
+ get_3d_rotary_pos_embed)
30
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
31
+ from diffusers.schedulers import CogVideoXDDIMScheduler, CogVideoXDPMScheduler
32
+ from diffusers.utils import BaseOutput, logging, replace_example_docstring
33
+ from diffusers.utils.torch_utils import randn_tensor
34
+ from diffusers.video_processor import VideoProcessor
35
+ from einops import rearrange
36
+ from transformers import T5EncoderModel, T5Tokenizer
37
+
38
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
39
+
40
+
41
+ EXAMPLE_DOC_STRING = """
42
+ Examples:
43
+ ```python
44
+ >>> import torch
45
+ >>> from diffusers import CogVideoX_Fun_Pipeline
46
+ >>> from diffusers.utils import export_to_video
47
+
48
+ >>> # Models: "THUDM/CogVideoX-2b" or "THUDM/CogVideoX-5b"
49
+ >>> pipe = CogVideoX_Fun_Pipeline.from_pretrained("THUDM/CogVideoX-2b", torch_dtype=torch.float16).to("cuda")
50
+ >>> prompt = (
51
+ ... "A panda, dressed in a small, red jacket and a tiny hat, sits on a wooden stool in a serene bamboo forest. "
52
+ ... "The panda's fluffy paws strum a miniature acoustic guitar, producing soft, melodic tunes. Nearby, a few other "
53
+ ... "pandas gather, watching curiously and some clapping in rhythm. Sunlight filters through the tall bamboo, "
54
+ ... "casting a gentle glow on the scene. The panda's face is expressive, showing concentration and joy as it plays. "
55
+ ... "The background includes a small, flowing stream and vibrant green foliage, enhancing the peaceful and magical "
56
+ ... "atmosphere of this unique musical performance."
57
+ ... )
58
+ >>> video = pipe(prompt=prompt, guidance_scale=6, num_inference_steps=50).frames[0]
59
+ >>> export_to_video(video, "output.mp4", fps=8)
60
+ ```
61
+ """
62
+
63
+
64
+ # Copied from diffusers.models.embeddings.get_3d_rotary_pos_embed
65
+ def get_3d_rotary_pos_embed(
66
+ embed_dim,
67
+ crops_coords,
68
+ grid_size,
69
+ temporal_size,
70
+ theta: int = 10000,
71
+ use_real: bool = True,
72
+ grid_type: str = "linspace",
73
+ max_size: Optional[Tuple[int, int]] = None,
74
+ ) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
75
+ """
76
+ RoPE for video tokens with 3D structure.
77
+
78
+ Args:
79
+ embed_dim: (`int`):
80
+ The embedding dimension size, corresponding to hidden_size_head.
81
+ crops_coords (`Tuple[int]`):
82
+ The top-left and bottom-right coordinates of the crop.
83
+ grid_size (`Tuple[int]`):
84
+ The grid size of the spatial positional embedding (height, width).
85
+ temporal_size (`int`):
86
+ The size of the temporal dimension.
87
+ theta (`float`):
88
+ Scaling factor for frequency computation.
89
+ grid_type (`str`):
90
+ Whether to use "linspace" or "slice" to compute grids.
91
+
92
+ Returns:
93
+ `torch.Tensor`: positional embedding with shape `(temporal_size * grid_size[0] * grid_size[1], embed_dim/2)`.
94
+ """
95
+ if use_real is not True:
96
+ raise ValueError(" `use_real = False` is not currently supported for get_3d_rotary_pos_embed")
97
+
98
+ if grid_type == "linspace":
99
+ start, stop = crops_coords
100
+ grid_size_h, grid_size_w = grid_size
101
+ grid_h = np.linspace(start[0], stop[0], grid_size_h, endpoint=False, dtype=np.float32)
102
+ grid_w = np.linspace(start[1], stop[1], grid_size_w, endpoint=False, dtype=np.float32)
103
+ grid_t = np.arange(temporal_size, dtype=np.float32)
104
+ grid_t = np.linspace(0, temporal_size, temporal_size, endpoint=False, dtype=np.float32)
105
+ elif grid_type == "slice":
106
+ max_h, max_w = max_size
107
+ grid_size_h, grid_size_w = grid_size
108
+ grid_h = np.arange(max_h, dtype=np.float32)
109
+ grid_w = np.arange(max_w, dtype=np.float32)
110
+ grid_t = np.arange(temporal_size, dtype=np.float32)
111
+ else:
112
+ raise ValueError("Invalid value passed for `grid_type`.")
113
+
114
+ # Compute dimensions for each axis
115
+ dim_t = embed_dim // 4
116
+ dim_h = embed_dim // 8 * 3
117
+ dim_w = embed_dim // 8 * 3
118
+
119
+ # Temporal frequencies
120
+ freqs_t = get_1d_rotary_pos_embed(dim_t, grid_t, use_real=True)
121
+ # Spatial frequencies for height and width
122
+ freqs_h = get_1d_rotary_pos_embed(dim_h, grid_h, use_real=True)
123
+ freqs_w = get_1d_rotary_pos_embed(dim_w, grid_w, use_real=True)
124
+
125
+ # BroadCast and concatenate temporal and spaial frequencie (height and width) into a 3d tensor
126
+ def combine_time_height_width(freqs_t, freqs_h, freqs_w):
127
+ freqs_t = freqs_t[:, None, None, :].expand(
128
+ -1, grid_size_h, grid_size_w, -1
129
+ ) # temporal_size, grid_size_h, grid_size_w, dim_t
130
+ freqs_h = freqs_h[None, :, None, :].expand(
131
+ temporal_size, -1, grid_size_w, -1
132
+ ) # temporal_size, grid_size_h, grid_size_2, dim_h
133
+ freqs_w = freqs_w[None, None, :, :].expand(
134
+ temporal_size, grid_size_h, -1, -1
135
+ ) # temporal_size, grid_size_h, grid_size_2, dim_w
136
+
137
+ freqs = torch.cat(
138
+ [freqs_t, freqs_h, freqs_w], dim=-1
139
+ ) # temporal_size, grid_size_h, grid_size_w, (dim_t + dim_h + dim_w)
140
+ freqs = freqs.view(
141
+ temporal_size * grid_size_h * grid_size_w, -1
142
+ ) # (temporal_size * grid_size_h * grid_size_w), (dim_t + dim_h + dim_w)
143
+ return freqs
144
+
145
+ t_cos, t_sin = freqs_t # both t_cos and t_sin has shape: temporal_size, dim_t
146
+ h_cos, h_sin = freqs_h # both h_cos and h_sin has shape: grid_size_h, dim_h
147
+ w_cos, w_sin = freqs_w # both w_cos and w_sin has shape: grid_size_w, dim_w
148
+
149
+ if grid_type == "slice":
150
+ t_cos, t_sin = t_cos[:temporal_size], t_sin[:temporal_size]
151
+ h_cos, h_sin = h_cos[:grid_size_h], h_sin[:grid_size_h]
152
+ w_cos, w_sin = w_cos[:grid_size_w], w_sin[:grid_size_w]
153
+
154
+ cos = combine_time_height_width(t_cos, h_cos, w_cos)
155
+ sin = combine_time_height_width(t_sin, h_sin, w_sin)
156
+ return cos, sin
157
+
158
+
159
+ # Similar to diffusers.pipelines.hunyuandit.pipeline_hunyuandit.get_resize_crop_region_for_grid
160
+ def get_resize_crop_region_for_grid(src, tgt_width, tgt_height):
161
+ tw = tgt_width
162
+ th = tgt_height
163
+ h, w = src
164
+ r = h / w
165
+ if r > (th / tw):
166
+ resize_height = th
167
+ resize_width = int(round(th / h * w))
168
+ else:
169
+ resize_width = tw
170
+ resize_height = int(round(tw / w * h))
171
+
172
+ crop_top = int(round((th - resize_height) / 2.0))
173
+ crop_left = int(round((tw - resize_width) / 2.0))
174
+
175
+ return (crop_top, crop_left), (crop_top + resize_height, crop_left + resize_width)
176
+
177
+
178
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
179
+ def retrieve_timesteps(
180
+ scheduler,
181
+ num_inference_steps: Optional[int] = None,
182
+ device: Optional[Union[str, torch.device]] = None,
183
+ timesteps: Optional[List[int]] = None,
184
+ sigmas: Optional[List[float]] = None,
185
+ **kwargs,
186
+ ):
187
+ """
188
+ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
189
+ custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
190
+
191
+ Args:
192
+ scheduler (`SchedulerMixin`):
193
+ The scheduler to get timesteps from.
194
+ num_inference_steps (`int`):
195
+ The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
196
+ must be `None`.
197
+ device (`str` or `torch.device`, *optional*):
198
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
199
+ timesteps (`List[int]`, *optional*):
200
+ Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
201
+ `num_inference_steps` and `sigmas` must be `None`.
202
+ sigmas (`List[float]`, *optional*):
203
+ Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
204
+ `num_inference_steps` and `timesteps` must be `None`.
205
+
206
+ Returns:
207
+ `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
208
+ second element is the number of inference steps.
209
+ """
210
+ if timesteps is not None and sigmas is not None:
211
+ raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
212
+ if timesteps is not None:
213
+ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
214
+ if not accepts_timesteps:
215
+ raise ValueError(
216
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
217
+ f" timestep schedules. Please check whether you are using the correct scheduler."
218
+ )
219
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
220
+ timesteps = scheduler.timesteps
221
+ num_inference_steps = len(timesteps)
222
+ elif sigmas is not None:
223
+ accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
224
+ if not accept_sigmas:
225
+ raise ValueError(
226
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
227
+ f" sigmas schedules. Please check whether you are using the correct scheduler."
228
+ )
229
+ scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
230
+ timesteps = scheduler.timesteps
231
+ num_inference_steps = len(timesteps)
232
+ else:
233
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
234
+ timesteps = scheduler.timesteps
235
+ return timesteps, num_inference_steps
236
+
237
+
238
+ @dataclass
239
+ class CogVideoX_Fun_PipelineOutput(BaseOutput):
240
+ r"""
241
+ Output class for CogVideo pipelines.
242
+
243
+ Args:
244
+ video (`torch.Tensor`, `np.ndarray`, or List[List[PIL.Image.Image]]):
245
+ List of video outputs - It can be a nested list of length `batch_size,` with each sub-list containing
246
+ denoised PIL image sequences of length `num_frames.` It can also be a NumPy array or Torch tensor of shape
247
+ `(batch_size, num_frames, channels, height, width)`.
248
+ """
249
+
250
+ videos: torch.Tensor
251
+
252
+
253
+ class CogVideoX_Fun_Pipeline_Control(DiffusionPipeline):
254
+ r"""
255
+ Pipeline for text-to-video generation using CogVideoX.
256
+
257
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
258
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
259
+
260
+ Args:
261
+ vae ([`AutoencoderKL`]):
262
+ Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations.
263
+ text_encoder ([`T5EncoderModel`]):
264
+ Frozen text-encoder. CogVideoX_Fun uses
265
+ [T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5EncoderModel); specifically the
266
+ [t5-v1_1-xxl](https://huggingface.co/PixArt-alpha/PixArt-alpha/tree/main/t5-v1_1-xxl) variant.
267
+ tokenizer (`T5Tokenizer`):
268
+ Tokenizer of class
269
+ [T5Tokenizer](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5Tokenizer).
270
+ transformer ([`CogVideoXTransformer3DModel`]):
271
+ A text conditioned `CogVideoXTransformer3DModel` to denoise the encoded video latents.
272
+ scheduler ([`SchedulerMixin`]):
273
+ A scheduler to be used in combination with `transformer` to denoise the encoded video latents.
274
+ """
275
+
276
+ _optional_components = []
277
+ model_cpu_offload_seq = "text_encoder->transformer->vae"
278
+
279
+ _callback_tensor_inputs = [
280
+ "latents",
281
+ "prompt_embeds",
282
+ "negative_prompt_embeds",
283
+ ]
284
+
285
+ def __init__(
286
+ self,
287
+ tokenizer: T5Tokenizer,
288
+ text_encoder: T5EncoderModel,
289
+ vae: AutoencoderKLCogVideoX,
290
+ transformer: CogVideoXTransformer3DModel,
291
+ scheduler: Union[CogVideoXDDIMScheduler, CogVideoXDPMScheduler],
292
+ ):
293
+ super().__init__()
294
+
295
+ self.register_modules(
296
+ tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler
297
+ )
298
+ self.vae_scale_factor_spatial = (
299
+ 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 8
300
+ )
301
+ self.vae_scale_factor_temporal = (
302
+ self.vae.config.temporal_compression_ratio if hasattr(self, "vae") and self.vae is not None else 4
303
+ )
304
+
305
+ self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor_spatial)
306
+
307
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
308
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
309
+ self.mask_processor = VaeImageProcessor(
310
+ vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True
311
+ )
312
+
313
+ def _get_t5_prompt_embeds(
314
+ self,
315
+ prompt: Union[str, List[str]] = None,
316
+ num_videos_per_prompt: int = 1,
317
+ max_sequence_length: int = 226,
318
+ device: Optional[torch.device] = None,
319
+ dtype: Optional[torch.dtype] = None,
320
+ ):
321
+ device = device or self._execution_device
322
+ dtype = dtype or self.text_encoder.dtype
323
+
324
+ prompt = [prompt] if isinstance(prompt, str) else prompt
325
+ batch_size = len(prompt)
326
+
327
+ text_inputs = self.tokenizer(
328
+ prompt,
329
+ padding="max_length",
330
+ max_length=max_sequence_length,
331
+ truncation=True,
332
+ add_special_tokens=True,
333
+ return_tensors="pt",
334
+ )
335
+ text_input_ids = text_inputs.input_ids
336
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
337
+
338
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
339
+ removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_sequence_length - 1 : -1])
340
+ logger.warning(
341
+ "The following part of your input was truncated because `max_sequence_length` is set to "
342
+ f" {max_sequence_length} tokens: {removed_text}"
343
+ )
344
+
345
+ prompt_embeds = self.text_encoder(text_input_ids.to(device))[0]
346
+ prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
347
+
348
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
349
+ _, seq_len, _ = prompt_embeds.shape
350
+ prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1)
351
+ prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1)
352
+
353
+ return prompt_embeds
354
+
355
+ def encode_prompt(
356
+ self,
357
+ prompt: Union[str, List[str]],
358
+ negative_prompt: Optional[Union[str, List[str]]] = None,
359
+ do_classifier_free_guidance: bool = True,
360
+ num_videos_per_prompt: int = 1,
361
+ prompt_embeds: Optional[torch.Tensor] = None,
362
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
363
+ max_sequence_length: int = 226,
364
+ device: Optional[torch.device] = None,
365
+ dtype: Optional[torch.dtype] = None,
366
+ ):
367
+ r"""
368
+ Encodes the prompt into text encoder hidden states.
369
+
370
+ Args:
371
+ prompt (`str` or `List[str]`, *optional*):
372
+ prompt to be encoded
373
+ negative_prompt (`str` or `List[str]`, *optional*):
374
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
375
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
376
+ less than `1`).
377
+ do_classifier_free_guidance (`bool`, *optional*, defaults to `True`):
378
+ Whether to use classifier free guidance or not.
379
+ num_videos_per_prompt (`int`, *optional*, defaults to 1):
380
+ Number of videos that should be generated per prompt. torch device to place the resulting embeddings on
381
+ prompt_embeds (`torch.Tensor`, *optional*):
382
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
383
+ provided, text embeddings will be generated from `prompt` input argument.
384
+ negative_prompt_embeds (`torch.Tensor`, *optional*):
385
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
386
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
387
+ argument.
388
+ device: (`torch.device`, *optional*):
389
+ torch device
390
+ dtype: (`torch.dtype`, *optional*):
391
+ torch dtype
392
+ """
393
+ device = device or self._execution_device
394
+
395
+ prompt = [prompt] if isinstance(prompt, str) else prompt
396
+ if prompt is not None:
397
+ batch_size = len(prompt)
398
+ else:
399
+ batch_size = prompt_embeds.shape[0]
400
+
401
+ if prompt_embeds is None:
402
+ prompt_embeds = self._get_t5_prompt_embeds(
403
+ prompt=prompt,
404
+ num_videos_per_prompt=num_videos_per_prompt,
405
+ max_sequence_length=max_sequence_length,
406
+ device=device,
407
+ dtype=dtype,
408
+ )
409
+
410
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
411
+ negative_prompt = negative_prompt or ""
412
+ negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
413
+
414
+ if prompt is not None and type(prompt) is not type(negative_prompt):
415
+ raise TypeError(
416
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
417
+ f" {type(prompt)}."
418
+ )
419
+ elif batch_size != len(negative_prompt):
420
+ raise ValueError(
421
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
422
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
423
+ " the batch size of `prompt`."
424
+ )
425
+
426
+ negative_prompt_embeds = self._get_t5_prompt_embeds(
427
+ prompt=negative_prompt,
428
+ num_videos_per_prompt=num_videos_per_prompt,
429
+ max_sequence_length=max_sequence_length,
430
+ device=device,
431
+ dtype=dtype,
432
+ )
433
+
434
+ return prompt_embeds, negative_prompt_embeds
435
+
436
+ def prepare_latents(
437
+ self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None
438
+ ):
439
+ shape = (
440
+ batch_size,
441
+ (num_frames - 1) // self.vae_scale_factor_temporal + 1,
442
+ num_channels_latents,
443
+ height // self.vae_scale_factor_spatial,
444
+ width // self.vae_scale_factor_spatial,
445
+ )
446
+ if isinstance(generator, list) and len(generator) != batch_size:
447
+ raise ValueError(
448
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
449
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
450
+ )
451
+
452
+ if latents is None:
453
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
454
+ else:
455
+ latents = latents.to(device)
456
+
457
+ # scale the initial noise by the standard deviation required by the scheduler
458
+ latents = latents * self.scheduler.init_noise_sigma
459
+ return latents
460
+
461
+ def prepare_control_latents(
462
+ self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance
463
+ ):
464
+ # resize the mask to latents shape as we concatenate the mask to the latents
465
+ # we do that before converting to dtype to avoid breaking in case we're using cpu_offload
466
+ # and half precision
467
+
468
+ if mask is not None:
469
+ mask = mask.to(device=device, dtype=self.vae.dtype)
470
+ bs = 1
471
+ new_mask = []
472
+ for i in range(0, mask.shape[0], bs):
473
+ mask_bs = mask[i : i + bs]
474
+ mask_bs = self.vae.encode(mask_bs)[0]
475
+ mask_bs = mask_bs.mode()
476
+ new_mask.append(mask_bs)
477
+ mask = torch.cat(new_mask, dim = 0)
478
+ mask = mask * self.vae.config.scaling_factor
479
+
480
+ if masked_image is not None:
481
+ masked_image = masked_image.to(device=device, dtype=self.vae.dtype)
482
+ bs = 1
483
+ new_mask_pixel_values = []
484
+ for i in range(0, masked_image.shape[0], bs):
485
+ mask_pixel_values_bs = masked_image[i : i + bs]
486
+ mask_pixel_values_bs = self.vae.encode(mask_pixel_values_bs)[0]
487
+ mask_pixel_values_bs = mask_pixel_values_bs.mode()
488
+ new_mask_pixel_values.append(mask_pixel_values_bs)
489
+ masked_image_latents = torch.cat(new_mask_pixel_values, dim = 0)
490
+ masked_image_latents = masked_image_latents * self.vae.config.scaling_factor
491
+ else:
492
+ masked_image_latents = None
493
+
494
+ return mask, masked_image_latents
495
+
496
+ def decode_latents(self, latents: torch.Tensor) -> torch.Tensor:
497
+ latents = latents.permute(0, 2, 1, 3, 4) # [batch_size, num_channels, num_frames, height, width]
498
+ latents = 1 / self.vae.config.scaling_factor * latents
499
+
500
+ frames = self.vae.decode(latents).sample
501
+ frames = (frames / 2 + 0.5).clamp(0, 1)
502
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
503
+ frames = frames.cpu().float().numpy()
504
+ return frames
505
+
506
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
507
+ def prepare_extra_step_kwargs(self, generator, eta):
508
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
509
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
510
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
511
+ # and should be between [0, 1]
512
+
513
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
514
+ extra_step_kwargs = {}
515
+ if accepts_eta:
516
+ extra_step_kwargs["eta"] = eta
517
+
518
+ # check if the scheduler accepts generator
519
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
520
+ if accepts_generator:
521
+ extra_step_kwargs["generator"] = generator
522
+ return extra_step_kwargs
523
+
524
+ # Copied from diffusers.pipelines.latte.pipeline_latte.LattePipeline.check_inputs
525
+ def check_inputs(
526
+ self,
527
+ prompt,
528
+ height,
529
+ width,
530
+ negative_prompt,
531
+ callback_on_step_end_tensor_inputs,
532
+ prompt_embeds=None,
533
+ negative_prompt_embeds=None,
534
+ ):
535
+ if height % 8 != 0 or width % 8 != 0:
536
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
537
+
538
+ if callback_on_step_end_tensor_inputs is not None and not all(
539
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
540
+ ):
541
+ raise ValueError(
542
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
543
+ )
544
+ if prompt is not None and prompt_embeds is not None:
545
+ raise ValueError(
546
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
547
+ " only forward one of the two."
548
+ )
549
+ elif prompt is None and prompt_embeds is None:
550
+ raise ValueError(
551
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
552
+ )
553
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
554
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
555
+
556
+ if prompt is not None and negative_prompt_embeds is not None:
557
+ raise ValueError(
558
+ f"Cannot forward both `prompt`: {prompt} and `negative_prompt_embeds`:"
559
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
560
+ )
561
+
562
+ if negative_prompt is not None and negative_prompt_embeds is not None:
563
+ raise ValueError(
564
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
565
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
566
+ )
567
+
568
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
569
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
570
+ raise ValueError(
571
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
572
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
573
+ f" {negative_prompt_embeds.shape}."
574
+ )
575
+
576
+ def fuse_qkv_projections(self) -> None:
577
+ r"""Enables fused QKV projections."""
578
+ self.fusing_transformer = True
579
+ self.transformer.fuse_qkv_projections()
580
+
581
+ def unfuse_qkv_projections(self) -> None:
582
+ r"""Disable QKV projection fusion if enabled."""
583
+ if not self.fusing_transformer:
584
+ logger.warning("The Transformer was not initially fused for QKV projections. Doing nothing.")
585
+ else:
586
+ self.transformer.unfuse_qkv_projections()
587
+ self.fusing_transformer = False
588
+
589
+ def _prepare_rotary_positional_embeddings(
590
+ self,
591
+ height: int,
592
+ width: int,
593
+ num_frames: int,
594
+ device: torch.device,
595
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
596
+ grid_height = height // (self.vae_scale_factor_spatial * self.transformer.config.patch_size)
597
+ grid_width = width // (self.vae_scale_factor_spatial * self.transformer.config.patch_size)
598
+
599
+ p = self.transformer.config.patch_size
600
+ p_t = self.transformer.config.patch_size_t
601
+
602
+ base_size_width = self.transformer.config.sample_width // p
603
+ base_size_height = self.transformer.config.sample_height // p
604
+
605
+ if p_t is None:
606
+ # CogVideoX 1.0
607
+ grid_crops_coords = get_resize_crop_region_for_grid(
608
+ (grid_height, grid_width), base_size_width, base_size_height
609
+ )
610
+ freqs_cos, freqs_sin = get_3d_rotary_pos_embed(
611
+ embed_dim=self.transformer.config.attention_head_dim,
612
+ crops_coords=grid_crops_coords,
613
+ grid_size=(grid_height, grid_width),
614
+ temporal_size=num_frames,
615
+ )
616
+ else:
617
+ # CogVideoX 1.5
618
+ base_num_frames = (num_frames + p_t - 1) // p_t
619
+
620
+ freqs_cos, freqs_sin = get_3d_rotary_pos_embed(
621
+ embed_dim=self.transformer.config.attention_head_dim,
622
+ crops_coords=None,
623
+ grid_size=(grid_height, grid_width),
624
+ temporal_size=base_num_frames,
625
+ grid_type="slice",
626
+ max_size=(base_size_height, base_size_width),
627
+ )
628
+
629
+ freqs_cos = freqs_cos.to(device=device)
630
+ freqs_sin = freqs_sin.to(device=device)
631
+ return freqs_cos, freqs_sin
632
+
633
+ @property
634
+ def guidance_scale(self):
635
+ return self._guidance_scale
636
+
637
+ @property
638
+ def num_timesteps(self):
639
+ return self._num_timesteps
640
+
641
+ @property
642
+ def interrupt(self):
643
+ return self._interrupt
644
+
645
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps
646
+ def get_timesteps(self, num_inference_steps, strength, device):
647
+ # get the original timestep using init_timestep
648
+ init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
649
+
650
+ t_start = max(num_inference_steps - init_timestep, 0)
651
+ timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
652
+
653
+ return timesteps, num_inference_steps - t_start
654
+
655
+ @torch.no_grad()
656
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
657
+ def __call__(
658
+ self,
659
+ prompt: Optional[Union[str, List[str]]] = None,
660
+ negative_prompt: Optional[Union[str, List[str]]] = None,
661
+ height: int = 480,
662
+ width: int = 720,
663
+ video: Union[torch.FloatTensor] = None,
664
+ control_video: Union[torch.FloatTensor] = None,
665
+ num_frames: int = 49,
666
+ num_inference_steps: int = 50,
667
+ timesteps: Optional[List[int]] = None,
668
+ guidance_scale: float = 6,
669
+ use_dynamic_cfg: bool = False,
670
+ num_videos_per_prompt: int = 1,
671
+ eta: float = 0.0,
672
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
673
+ latents: Optional[torch.FloatTensor] = None,
674
+ prompt_embeds: Optional[torch.FloatTensor] = None,
675
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
676
+ output_type: str = "numpy",
677
+ return_dict: bool = False,
678
+ callback_on_step_end: Optional[
679
+ Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]
680
+ ] = None,
681
+ attention_kwargs: Optional[Dict[str, Any]] = None,
682
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
683
+ max_sequence_length: int = 226,
684
+ comfyui_progressbar: bool = False,
685
+ ) -> Union[CogVideoX_Fun_PipelineOutput, Tuple]:
686
+ """
687
+ Function invoked when calling the pipeline for generation.
688
+
689
+ Args:
690
+ prompt (`str` or `List[str]`, *optional*):
691
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
692
+ instead.
693
+ negative_prompt (`str` or `List[str]`, *optional*):
694
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
695
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
696
+ less than `1`).
697
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
698
+ The height in pixels of the generated image. This is set to 1024 by default for the best results.
699
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
700
+ The width in pixels of the generated image. This is set to 1024 by default for the best results.
701
+ num_frames (`int`, defaults to `48`):
702
+ Number of frames to generate. Must be divisible by self.vae_scale_factor_temporal. Generated video will
703
+ contain 1 extra frame because CogVideoX_Fun is conditioned with (num_seconds * fps + 1) frames where
704
+ num_seconds is 6 and fps is 4. However, since videos can be saved at any fps, the only condition that
705
+ needs to be satisfied is that of divisibility mentioned above.
706
+ num_inference_steps (`int`, *optional*, defaults to 50):
707
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
708
+ expense of slower inference.
709
+ timesteps (`List[int]`, *optional*):
710
+ Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
711
+ in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
712
+ passed will be used. Must be in descending order.
713
+ guidance_scale (`float`, *optional*, defaults to 7.0):
714
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
715
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
716
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
717
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
718
+ usually at the expense of lower image quality.
719
+ num_videos_per_prompt (`int`, *optional*, defaults to 1):
720
+ The number of videos to generate per prompt.
721
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
722
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
723
+ to make generation deterministic.
724
+ latents (`torch.FloatTensor`, *optional*):
725
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
726
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
727
+ tensor will ge generated by sampling using the supplied random `generator`.
728
+ prompt_embeds (`torch.FloatTensor`, *optional*):
729
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
730
+ provided, text embeddings will be generated from `prompt` input argument.
731
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
732
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
733
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
734
+ argument.
735
+ output_type (`str`, *optional*, defaults to `"pil"`):
736
+ The output format of the generate image. Choose between
737
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
738
+ return_dict (`bool`, *optional*, defaults to `True`):
739
+ Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead
740
+ of a plain tuple.
741
+ callback_on_step_end (`Callable`, *optional*):
742
+ A function that calls at the end of each denoising steps during the inference. The function is called
743
+ with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
744
+ callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
745
+ `callback_on_step_end_tensor_inputs`.
746
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
747
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
748
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
749
+ `._callback_tensor_inputs` attribute of your pipeline class.
750
+ max_sequence_length (`int`, defaults to `226`):
751
+ Maximum sequence length in encoded prompt. Must be consistent with
752
+ `self.transformer.config.max_text_seq_length` otherwise may lead to poor results.
753
+
754
+ Examples:
755
+
756
+ Returns:
757
+ [`~pipelines.cogvideo.pipeline_cogvideox.CogVideoX_Fun_PipelineOutput`] or `tuple`:
758
+ [`~pipelines.cogvideo.pipeline_cogvideox.CogVideoX_Fun_PipelineOutput`] if `return_dict` is True, otherwise a
759
+ `tuple`. When returning a tuple, the first element is a list with the generated images.
760
+ """
761
+
762
+ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)):
763
+ callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs
764
+
765
+ height = height or self.transformer.config.sample_height * self.vae_scale_factor_spatial
766
+ width = width or self.transformer.config.sample_width * self.vae_scale_factor_spatial
767
+ num_frames = num_frames or self.transformer.config.sample_frames
768
+
769
+ num_videos_per_prompt = 1
770
+
771
+ # 1. Check inputs. Raise error if not correct
772
+ self.check_inputs(
773
+ prompt,
774
+ height,
775
+ width,
776
+ negative_prompt,
777
+ callback_on_step_end_tensor_inputs,
778
+ prompt_embeds,
779
+ negative_prompt_embeds,
780
+ )
781
+ self._guidance_scale = guidance_scale
782
+ self._attention_kwargs = attention_kwargs
783
+ self._interrupt = False
784
+
785
+ # 2. Default call parameters
786
+ if prompt is not None and isinstance(prompt, str):
787
+ batch_size = 1
788
+ elif prompt is not None and isinstance(prompt, list):
789
+ batch_size = len(prompt)
790
+ else:
791
+ batch_size = prompt_embeds.shape[0]
792
+
793
+ device = self._execution_device
794
+
795
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
796
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
797
+ # corresponds to doing no classifier free guidance.
798
+ do_classifier_free_guidance = guidance_scale > 1.0
799
+
800
+ # 3. Encode input prompt
801
+ prompt_embeds, negative_prompt_embeds = self.encode_prompt(
802
+ prompt,
803
+ negative_prompt,
804
+ do_classifier_free_guidance,
805
+ num_videos_per_prompt=num_videos_per_prompt,
806
+ prompt_embeds=prompt_embeds,
807
+ negative_prompt_embeds=negative_prompt_embeds,
808
+ max_sequence_length=max_sequence_length,
809
+ device=device,
810
+ )
811
+ if do_classifier_free_guidance:
812
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
813
+
814
+ # 4. Prepare timesteps
815
+ timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
816
+ self._num_timesteps = len(timesteps)
817
+ if comfyui_progressbar:
818
+ from comfy.utils import ProgressBar
819
+ pbar = ProgressBar(num_inference_steps + 2)
820
+
821
+ if control_video is not None:
822
+ video_length = control_video.shape[2]
823
+ control_video = self.image_processor.preprocess(rearrange(control_video, "b c f h w -> (b f) c h w"), height=height, width=width)
824
+ control_video = control_video.to(dtype=torch.float32)
825
+ control_video = rearrange(control_video, "(b f) c h w -> b c f h w", f=video_length)
826
+ else:
827
+ control_video = None
828
+
829
+ # Magvae needs the number of frames to be 4n + 1.
830
+ local_latent_length = (num_frames - 1) // self.vae_scale_factor_temporal + 1
831
+ # For CogVideoX 1.5, the latent frames should be clipped to make it divisible by patch_size_t
832
+ patch_size_t = self.transformer.config.patch_size_t
833
+ additional_frames = 0
834
+ if patch_size_t is not None and local_latent_length % patch_size_t != 0:
835
+ additional_frames = local_latent_length % patch_size_t
836
+ num_frames -= additional_frames * self.vae_scale_factor_temporal
837
+ if num_frames <= 0:
838
+ num_frames = 1
839
+ if video_length > num_frames:
840
+ logger.warning("The length of condition video is not right, the latent frames should be clipped to make it divisible by patch_size_t. ")
841
+ video_length = num_frames
842
+ control_video = control_video[:, :, :video_length]
843
+
844
+ # 5. Prepare latents.
845
+ latent_channels = self.vae.config.latent_channels
846
+ latents = self.prepare_latents(
847
+ batch_size * num_videos_per_prompt,
848
+ latent_channels,
849
+ num_frames,
850
+ height,
851
+ width,
852
+ prompt_embeds.dtype,
853
+ device,
854
+ generator,
855
+ latents,
856
+ )
857
+ if comfyui_progressbar:
858
+ pbar.update(1)
859
+
860
+ control_video_latents = self.prepare_control_latents(
861
+ None,
862
+ control_video,
863
+ batch_size,
864
+ height,
865
+ width,
866
+ prompt_embeds.dtype,
867
+ device,
868
+ generator,
869
+ do_classifier_free_guidance
870
+ )[1]
871
+ control_video_latents_input = (
872
+ torch.cat([control_video_latents] * 2) if do_classifier_free_guidance else control_video_latents
873
+ )
874
+ control_latents = rearrange(control_video_latents_input, "b c f h w -> b f c h w")
875
+
876
+ if comfyui_progressbar:
877
+ pbar.update(1)
878
+
879
+ # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
880
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
881
+
882
+ # 7. Create rotary embeds if required
883
+ image_rotary_emb = (
884
+ self._prepare_rotary_positional_embeddings(height, width, latents.size(1), device)
885
+ if self.transformer.config.use_rotary_positional_embeddings
886
+ else None
887
+ )
888
+
889
+ # 8. Denoising loop
890
+ num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
891
+
892
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
893
+ # for DPM-solver++
894
+ old_pred_original_sample = None
895
+ for i, t in enumerate(timesteps):
896
+ if self.interrupt:
897
+ continue
898
+
899
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
900
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
901
+
902
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
903
+ timestep = t.expand(latent_model_input.shape[0])
904
+
905
+ # predict noise model_output
906
+ noise_pred = self.transformer(
907
+ hidden_states=latent_model_input,
908
+ encoder_hidden_states=prompt_embeds,
909
+ timestep=timestep,
910
+ image_rotary_emb=image_rotary_emb,
911
+ return_dict=False,
912
+ control_latents=control_latents,
913
+ )[0]
914
+ noise_pred = noise_pred.float()
915
+
916
+ # perform guidance
917
+ if use_dynamic_cfg:
918
+ self._guidance_scale = 1 + guidance_scale * (
919
+ (1 - math.cos(math.pi * ((num_inference_steps - t.item()) / num_inference_steps) ** 5.0)) / 2
920
+ )
921
+ if do_classifier_free_guidance:
922
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
923
+ noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
924
+
925
+ # compute the previous noisy sample x_t -> x_t-1
926
+ if not isinstance(self.scheduler, CogVideoXDPMScheduler):
927
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
928
+ else:
929
+ latents, old_pred_original_sample = self.scheduler.step(
930
+ noise_pred,
931
+ old_pred_original_sample,
932
+ t,
933
+ timesteps[i - 1] if i > 0 else None,
934
+ latents,
935
+ **extra_step_kwargs,
936
+ return_dict=False,
937
+ )
938
+ latents = latents.to(prompt_embeds.dtype)
939
+
940
+ # call the callback, if provided
941
+ if callback_on_step_end is not None:
942
+ callback_kwargs = {}
943
+ for k in callback_on_step_end_tensor_inputs:
944
+ callback_kwargs[k] = locals()[k]
945
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
946
+
947
+ latents = callback_outputs.pop("latents", latents)
948
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
949
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
950
+
951
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
952
+ progress_bar.update()
953
+ if comfyui_progressbar:
954
+ pbar.update(1)
955
+
956
+ if output_type == "numpy":
957
+ video = self.decode_latents(latents)
958
+ elif not output_type == "latent":
959
+ video = self.decode_latents(latents)
960
+ video = self.video_processor.postprocess_video(video=video, output_type=output_type)
961
+ else:
962
+ video = latents
963
+
964
+ # Offload all models
965
+ self.maybe_free_model_hooks()
966
+
967
+ if not return_dict:
968
+ video = torch.from_numpy(video)
969
+
970
+ return CogVideoX_Fun_PipelineOutput(videos=video)
robomaster/pipeline/pipeline_cogvideox_inpaint.py ADDED
@@ -0,0 +1,1156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The CogVideoX team, Tsinghua University & ZhipuAI and The HuggingFace Team.
2
+ # All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import inspect
17
+ import math
18
+ import numpy as np
19
+ from dataclasses import dataclass
20
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
21
+
22
+ import torch
23
+ import torch.nn.functional as F
24
+ from einops import rearrange
25
+ from transformers import T5EncoderModel, T5Tokenizer
26
+
27
+ from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback
28
+ from diffusers.models import AutoencoderKLCogVideoX, CogVideoXTransformer3DModel
29
+ from diffusers.models.embeddings import get_1d_rotary_pos_embed
30
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
31
+ from diffusers.schedulers import CogVideoXDDIMScheduler, CogVideoXDPMScheduler
32
+ from diffusers.utils import BaseOutput, logging, replace_example_docstring
33
+ from diffusers.utils.torch_utils import randn_tensor
34
+ from diffusers.video_processor import VideoProcessor
35
+ from diffusers.image_processor import VaeImageProcessor
36
+ from einops import rearrange
37
+
38
+
39
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
40
+
41
+
42
+ EXAMPLE_DOC_STRING = """
43
+ Examples:
44
+ ```python
45
+ >>> import torch
46
+ >>> from diffusers import CogVideoX_Fun_Pipeline
47
+ >>> from diffusers.utils import export_to_video
48
+
49
+ >>> # Models: "THUDM/CogVideoX-2b" or "THUDM/CogVideoX-5b"
50
+ >>> pipe = CogVideoX_Fun_Pipeline.from_pretrained("THUDM/CogVideoX-2b", torch_dtype=torch.float16).to("cuda")
51
+ >>> prompt = (
52
+ ... "A panda, dressed in a small, red jacket and a tiny hat, sits on a wooden stool in a serene bamboo forest. "
53
+ ... "The panda's fluffy paws strum a miniature acoustic guitar, producing soft, melodic tunes. Nearby, a few other "
54
+ ... "pandas gather, watching curiously and some clapping in rhythm. Sunlight filters through the tall bamboo, "
55
+ ... "casting a gentle glow on the scene. The panda's face is expressive, showing concentration and joy as it plays. "
56
+ ... "The background includes a small, flowing stream and vibrant green foliage, enhancing the peaceful and magical "
57
+ ... "atmosphere of this unique musical performance."
58
+ ... )
59
+ >>> video = pipe(prompt=prompt, guidance_scale=6, num_inference_steps=50).frames[0]
60
+ >>> export_to_video(video, "output.mp4", fps=8)
61
+ ```
62
+ """
63
+
64
+ # Copied from diffusers.models.embeddings.get_3d_rotary_pos_embed
65
+ def get_3d_rotary_pos_embed(
66
+ embed_dim,
67
+ crops_coords,
68
+ grid_size,
69
+ temporal_size,
70
+ theta: int = 10000,
71
+ use_real: bool = True,
72
+ grid_type: str = "linspace",
73
+ max_size: Optional[Tuple[int, int]] = None,
74
+ ) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
75
+ """
76
+ RoPE for video tokens with 3D structure.
77
+
78
+ Args:
79
+ embed_dim: (`int`):
80
+ The embedding dimension size, corresponding to hidden_size_head.
81
+ crops_coords (`Tuple[int]`):
82
+ The top-left and bottom-right coordinates of the crop.
83
+ grid_size (`Tuple[int]`):
84
+ The grid size of the spatial positional embedding (height, width).
85
+ temporal_size (`int`):
86
+ The size of the temporal dimension.
87
+ theta (`float`):
88
+ Scaling factor for frequency computation.
89
+ grid_type (`str`):
90
+ Whether to use "linspace" or "slice" to compute grids.
91
+
92
+ Returns:
93
+ `torch.Tensor`: positional embedding with shape `(temporal_size * grid_size[0] * grid_size[1], embed_dim/2)`.
94
+ """
95
+ if use_real is not True:
96
+ raise ValueError(" `use_real = False` is not currently supported for get_3d_rotary_pos_embed")
97
+
98
+ if grid_type == "linspace":
99
+ start, stop = crops_coords
100
+ grid_size_h, grid_size_w = grid_size
101
+ grid_h = np.linspace(start[0], stop[0], grid_size_h, endpoint=False, dtype=np.float32)
102
+ grid_w = np.linspace(start[1], stop[1], grid_size_w, endpoint=False, dtype=np.float32)
103
+ grid_t = np.arange(temporal_size, dtype=np.float32)
104
+ grid_t = np.linspace(0, temporal_size, temporal_size, endpoint=False, dtype=np.float32)
105
+ elif grid_type == "slice":
106
+ max_h, max_w = max_size
107
+ grid_size_h, grid_size_w = grid_size
108
+ grid_h = np.arange(max_h, dtype=np.float32)
109
+ grid_w = np.arange(max_w, dtype=np.float32)
110
+ grid_t = np.arange(temporal_size, dtype=np.float32)
111
+ else:
112
+ raise ValueError("Invalid value passed for `grid_type`.")
113
+
114
+ # Compute dimensions for each axis
115
+ dim_t = embed_dim // 4
116
+ dim_h = embed_dim // 8 * 3
117
+ dim_w = embed_dim // 8 * 3
118
+
119
+ # Temporal frequencies
120
+ freqs_t = get_1d_rotary_pos_embed(dim_t, grid_t, use_real=True)
121
+ # Spatial frequencies for height and width
122
+ freqs_h = get_1d_rotary_pos_embed(dim_h, grid_h, use_real=True)
123
+ freqs_w = get_1d_rotary_pos_embed(dim_w, grid_w, use_real=True)
124
+
125
+ # BroadCast and concatenate temporal and spaial frequencie (height and width) into a 3d tensor
126
+ def combine_time_height_width(freqs_t, freqs_h, freqs_w):
127
+ freqs_t = freqs_t[:, None, None, :].expand(
128
+ -1, grid_size_h, grid_size_w, -1
129
+ ) # temporal_size, grid_size_h, grid_size_w, dim_t
130
+ freqs_h = freqs_h[None, :, None, :].expand(
131
+ temporal_size, -1, grid_size_w, -1
132
+ ) # temporal_size, grid_size_h, grid_size_2, dim_h
133
+ freqs_w = freqs_w[None, None, :, :].expand(
134
+ temporal_size, grid_size_h, -1, -1
135
+ ) # temporal_size, grid_size_h, grid_size_2, dim_w
136
+
137
+ freqs = torch.cat(
138
+ [freqs_t, freqs_h, freqs_w], dim=-1
139
+ ) # temporal_size, grid_size_h, grid_size_w, (dim_t + dim_h + dim_w)
140
+ freqs = freqs.view(
141
+ temporal_size * grid_size_h * grid_size_w, -1
142
+ ) # (temporal_size * grid_size_h * grid_size_w), (dim_t + dim_h + dim_w)
143
+ return freqs
144
+
145
+ t_cos, t_sin = freqs_t # both t_cos and t_sin has shape: temporal_size, dim_t
146
+ h_cos, h_sin = freqs_h # both h_cos and h_sin has shape: grid_size_h, dim_h
147
+ w_cos, w_sin = freqs_w # both w_cos and w_sin has shape: grid_size_w, dim_w
148
+
149
+ if grid_type == "slice":
150
+ t_cos, t_sin = t_cos[:temporal_size], t_sin[:temporal_size]
151
+ h_cos, h_sin = h_cos[:grid_size_h], h_sin[:grid_size_h]
152
+ w_cos, w_sin = w_cos[:grid_size_w], w_sin[:grid_size_w]
153
+
154
+ cos = combine_time_height_width(t_cos, h_cos, w_cos)
155
+ sin = combine_time_height_width(t_sin, h_sin, w_sin)
156
+ return cos, sin
157
+
158
+
159
+ # Similar to diffusers.pipelines.hunyuandit.pipeline_hunyuandit.get_resize_crop_region_for_grid
160
+ def get_resize_crop_region_for_grid(src, tgt_width, tgt_height):
161
+ tw = tgt_width
162
+ th = tgt_height
163
+ h, w = src
164
+ r = h / w
165
+ if r > (th / tw):
166
+ resize_height = th
167
+ resize_width = int(round(th / h * w))
168
+ else:
169
+ resize_width = tw
170
+ resize_height = int(round(tw / w * h))
171
+
172
+ crop_top = int(round((th - resize_height) / 2.0))
173
+ crop_left = int(round((tw - resize_width) / 2.0))
174
+
175
+ return (crop_top, crop_left), (crop_top + resize_height, crop_left + resize_width)
176
+
177
+
178
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
179
+ def retrieve_timesteps(
180
+ scheduler,
181
+ num_inference_steps: Optional[int] = None,
182
+ device: Optional[Union[str, torch.device]] = None,
183
+ timesteps: Optional[List[int]] = None,
184
+ sigmas: Optional[List[float]] = None,
185
+ **kwargs,
186
+ ):
187
+ """
188
+ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
189
+ custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
190
+
191
+ Args:
192
+ scheduler (`SchedulerMixin`):
193
+ The scheduler to get timesteps from.
194
+ num_inference_steps (`int`):
195
+ The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
196
+ must be `None`.
197
+ device (`str` or `torch.device`, *optional*):
198
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
199
+ timesteps (`List[int]`, *optional*):
200
+ Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
201
+ `num_inference_steps` and `sigmas` must be `None`.
202
+ sigmas (`List[float]`, *optional*):
203
+ Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
204
+ `num_inference_steps` and `timesteps` must be `None`.
205
+
206
+ Returns:
207
+ `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
208
+ second element is the number of inference steps.
209
+ """
210
+ if timesteps is not None and sigmas is not None:
211
+ raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
212
+ if timesteps is not None:
213
+ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
214
+ if not accepts_timesteps:
215
+ raise ValueError(
216
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
217
+ f" timestep schedules. Please check whether you are using the correct scheduler."
218
+ )
219
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
220
+ timesteps = scheduler.timesteps
221
+ num_inference_steps = len(timesteps)
222
+ elif sigmas is not None:
223
+ accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
224
+ if not accept_sigmas:
225
+ raise ValueError(
226
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
227
+ f" sigmas schedules. Please check whether you are using the correct scheduler."
228
+ )
229
+ scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
230
+ timesteps = scheduler.timesteps
231
+ num_inference_steps = len(timesteps)
232
+ else:
233
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
234
+ timesteps = scheduler.timesteps
235
+ return timesteps, num_inference_steps
236
+
237
+
238
+ def resize_mask(mask, latent, process_first_frame_only=True):
239
+ latent_size = latent.size()
240
+ batch_size, channels, num_frames, height, width = mask.shape
241
+
242
+ if process_first_frame_only:
243
+ target_size = list(latent_size[2:])
244
+ target_size[0] = 1
245
+ first_frame_resized = F.interpolate(
246
+ mask[:, :, 0:1, :, :],
247
+ size=target_size,
248
+ mode='trilinear',
249
+ align_corners=False
250
+ )
251
+
252
+ target_size = list(latent_size[2:])
253
+ target_size[0] = target_size[0] - 1
254
+ if target_size[0] != 0:
255
+ remaining_frames_resized = F.interpolate(
256
+ mask[:, :, 1:, :, :],
257
+ size=target_size,
258
+ mode='trilinear',
259
+ align_corners=False
260
+ )
261
+ resized_mask = torch.cat([first_frame_resized, remaining_frames_resized], dim=2)
262
+ else:
263
+ resized_mask = first_frame_resized
264
+ else:
265
+ target_size = list(latent_size[2:])
266
+ resized_mask = F.interpolate(
267
+ mask,
268
+ size=target_size,
269
+ mode='trilinear',
270
+ align_corners=False
271
+ )
272
+ return resized_mask
273
+
274
+
275
+ def add_noise_to_reference_video(image, ratio=None):
276
+ if ratio is None:
277
+ sigma = torch.normal(mean=-3.0, std=0.5, size=(image.shape[0],)).to(image.device)
278
+ sigma = torch.exp(sigma).to(image.dtype)
279
+ else:
280
+ sigma = torch.ones((image.shape[0],)).to(image.device, image.dtype) * ratio
281
+
282
+ image_noise = torch.randn_like(image) * sigma[:, None, None, None, None]
283
+ image_noise = torch.where(image==-1, torch.zeros_like(image), image_noise)
284
+ image = image + image_noise
285
+ return image
286
+
287
+
288
+ @dataclass
289
+ class CogVideoX_Fun_PipelineOutput(BaseOutput):
290
+ r"""
291
+ Output class for CogVideo pipelines.
292
+
293
+ Args:
294
+ video (`torch.Tensor`, `np.ndarray`, or List[List[PIL.Image.Image]]):
295
+ List of video outputs - It can be a nested list of length `batch_size,` with each sub-list containing
296
+ denoised PIL image sequences of length `num_frames.` It can also be a NumPy array or Torch tensor of shape
297
+ `(batch_size, num_frames, channels, height, width)`.
298
+ """
299
+
300
+ videos: torch.Tensor
301
+
302
+
303
+ class CogVideoX_Fun_Pipeline_Inpaint(DiffusionPipeline):
304
+ r"""
305
+ Pipeline for text-to-video generation using CogVideoX.
306
+
307
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
308
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
309
+
310
+ Args:
311
+ vae ([`AutoencoderKL`]):
312
+ Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations.
313
+ text_encoder ([`T5EncoderModel`]):
314
+ Frozen text-encoder. CogVideoX_Fun uses
315
+ [T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5EncoderModel); specifically the
316
+ [t5-v1_1-xxl](https://huggingface.co/PixArt-alpha/PixArt-alpha/tree/main/t5-v1_1-xxl) variant.
317
+ tokenizer (`T5Tokenizer`):
318
+ Tokenizer of class
319
+ [T5Tokenizer](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5Tokenizer).
320
+ transformer ([`CogVideoXTransformer3DModel`]):
321
+ A text conditioned `CogVideoXTransformer3DModel` to denoise the encoded video latents.
322
+ scheduler ([`SchedulerMixin`]):
323
+ A scheduler to be used in combination with `transformer` to denoise the encoded video latents.
324
+ """
325
+
326
+ _optional_components = []
327
+ model_cpu_offload_seq = "text_encoder->transformer->vae"
328
+
329
+ _callback_tensor_inputs = [
330
+ "latents",
331
+ "prompt_embeds",
332
+ "negative_prompt_embeds",
333
+ ]
334
+
335
+ def __init__(
336
+ self,
337
+ tokenizer: T5Tokenizer,
338
+ text_encoder: T5EncoderModel,
339
+ vae: AutoencoderKLCogVideoX,
340
+ transformer: CogVideoXTransformer3DModel,
341
+ scheduler: Union[CogVideoXDDIMScheduler, CogVideoXDPMScheduler],
342
+ ):
343
+ super().__init__()
344
+
345
+ self.register_modules(
346
+ tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler
347
+ )
348
+ self.vae_scale_factor_spatial = (
349
+ 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 8
350
+ )
351
+ self.vae_scale_factor_temporal = (
352
+ self.vae.config.temporal_compression_ratio if hasattr(self, "vae") and self.vae is not None else 4
353
+ )
354
+
355
+ self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor_spatial)
356
+
357
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
358
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
359
+ self.mask_processor = VaeImageProcessor(
360
+ vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True
361
+ )
362
+
363
+ def _get_t5_prompt_embeds(
364
+ self,
365
+ prompt: Union[str, List[str]] = None,
366
+ num_videos_per_prompt: int = 1,
367
+ max_sequence_length: int = 226,
368
+ device: Optional[torch.device] = None,
369
+ dtype: Optional[torch.dtype] = None,
370
+ ):
371
+ device = device or self._execution_device
372
+ dtype = dtype or self.text_encoder.dtype
373
+
374
+ prompt = [prompt] if isinstance(prompt, str) else prompt
375
+ batch_size = len(prompt)
376
+
377
+ text_inputs = self.tokenizer(
378
+ prompt,
379
+ padding="max_length",
380
+ max_length=max_sequence_length,
381
+ truncation=True,
382
+ add_special_tokens=True,
383
+ return_tensors="pt",
384
+ )
385
+ text_input_ids = text_inputs.input_ids
386
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
387
+
388
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
389
+ removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_sequence_length - 1 : -1])
390
+ logger.warning(
391
+ "The following part of your input was truncated because `max_sequence_length` is set to "
392
+ f" {max_sequence_length} tokens: {removed_text}"
393
+ )
394
+
395
+ prompt_embeds = self.text_encoder(text_input_ids.to(device))[0]
396
+ prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
397
+
398
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
399
+ _, seq_len, _ = prompt_embeds.shape
400
+ prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1)
401
+ prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1)
402
+
403
+ return prompt_embeds
404
+
405
+ def encode_prompt(
406
+ self,
407
+ prompt: Union[str, List[str]],
408
+ negative_prompt: Optional[Union[str, List[str]]] = None,
409
+ do_classifier_free_guidance: bool = True,
410
+ num_videos_per_prompt: int = 1,
411
+ prompt_embeds: Optional[torch.Tensor] = None,
412
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
413
+ max_sequence_length: int = 226,
414
+ device: Optional[torch.device] = None,
415
+ dtype: Optional[torch.dtype] = None,
416
+ ):
417
+ r"""
418
+ Encodes the prompt into text encoder hidden states.
419
+
420
+ Args:
421
+ prompt (`str` or `List[str]`, *optional*):
422
+ prompt to be encoded
423
+ negative_prompt (`str` or `List[str]`, *optional*):
424
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
425
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
426
+ less than `1`).
427
+ do_classifier_free_guidance (`bool`, *optional*, defaults to `True`):
428
+ Whether to use classifier free guidance or not.
429
+ num_videos_per_prompt (`int`, *optional*, defaults to 1):
430
+ Number of videos that should be generated per prompt. torch device to place the resulting embeddings on
431
+ prompt_embeds (`torch.Tensor`, *optional*):
432
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
433
+ provided, text embeddings will be generated from `prompt` input argument.
434
+ negative_prompt_embeds (`torch.Tensor`, *optional*):
435
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
436
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
437
+ argument.
438
+ device: (`torch.device`, *optional*):
439
+ torch device
440
+ dtype: (`torch.dtype`, *optional*):
441
+ torch dtype
442
+ """
443
+ device = device or self._execution_device
444
+
445
+ prompt = [prompt] if isinstance(prompt, str) else prompt
446
+ if prompt is not None:
447
+ batch_size = len(prompt)
448
+ else:
449
+ batch_size = prompt_embeds.shape[0]
450
+
451
+ if prompt_embeds is None:
452
+ prompt_embeds = self._get_t5_prompt_embeds(
453
+ prompt=prompt,
454
+ num_videos_per_prompt=num_videos_per_prompt,
455
+ max_sequence_length=max_sequence_length,
456
+ device=device,
457
+ dtype=dtype,
458
+ )
459
+
460
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
461
+ negative_prompt = negative_prompt or ""
462
+ negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
463
+
464
+ if prompt is not None and type(prompt) is not type(negative_prompt):
465
+ raise TypeError(
466
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
467
+ f" {type(prompt)}."
468
+ )
469
+ elif batch_size != len(negative_prompt):
470
+ raise ValueError(
471
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
472
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
473
+ " the batch size of `prompt`."
474
+ )
475
+
476
+ negative_prompt_embeds = self._get_t5_prompt_embeds(
477
+ prompt=negative_prompt,
478
+ num_videos_per_prompt=num_videos_per_prompt,
479
+ max_sequence_length=max_sequence_length,
480
+ device=device,
481
+ dtype=dtype,
482
+ )
483
+
484
+ return prompt_embeds, negative_prompt_embeds
485
+
486
+ def prepare_latents(
487
+ self,
488
+ batch_size,
489
+ num_channels_latents,
490
+ height,
491
+ width,
492
+ video_length,
493
+ dtype,
494
+ device,
495
+ generator,
496
+ latents=None,
497
+ video=None,
498
+ timestep=None,
499
+ is_strength_max=True,
500
+ return_noise=False,
501
+ return_video_latents=False,
502
+ ):
503
+ shape = (
504
+ batch_size,
505
+ (video_length - 1) // self.vae_scale_factor_temporal + 1,
506
+ num_channels_latents,
507
+ height // self.vae_scale_factor_spatial,
508
+ width // self.vae_scale_factor_spatial,
509
+ )
510
+ if isinstance(generator, list) and len(generator) != batch_size:
511
+ raise ValueError(
512
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
513
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
514
+ )
515
+
516
+ if return_video_latents or (latents is None and not is_strength_max):
517
+ video = video.to(device=device, dtype=self.vae.dtype)
518
+
519
+ bs = 1
520
+ new_video = []
521
+ for i in range(0, video.shape[0], bs):
522
+ video_bs = video[i : i + bs]
523
+ video_bs = self.vae.encode(video_bs)[0]
524
+ video_bs = video_bs.sample()
525
+ new_video.append(video_bs)
526
+ video = torch.cat(new_video, dim = 0)
527
+ video = video * self.vae.config.scaling_factor
528
+
529
+ video_latents = video.repeat(batch_size // video.shape[0], 1, 1, 1, 1)
530
+ video_latents = video_latents.to(device=device, dtype=dtype)
531
+ video_latents = rearrange(video_latents, "b c f h w -> b f c h w")
532
+
533
+ if latents is None:
534
+ noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
535
+ # if strength is 1. then initialise the latents to noise, else initial to image + noise
536
+ latents = noise if is_strength_max else self.scheduler.add_noise(video_latents, noise, timestep)
537
+ # if pure noise then scale the initial latents by the Scheduler's init sigma
538
+ latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents
539
+ else:
540
+ noise = latents.to(device)
541
+ latents = noise * self.scheduler.init_noise_sigma
542
+
543
+ # scale the initial noise by the standard deviation required by the scheduler
544
+ outputs = (latents,)
545
+
546
+ if return_noise:
547
+ outputs += (noise,)
548
+
549
+ if return_video_latents:
550
+ outputs += (video_latents,)
551
+
552
+ return outputs
553
+
554
+ def prepare_mask_latents(
555
+ self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance, noise_aug_strength
556
+ ):
557
+ # resize the mask to latents shape as we concatenate the mask to the latents
558
+ # we do that before converting to dtype to avoid breaking in case we're using cpu_offload
559
+ # and half precision
560
+
561
+ if mask is not None:
562
+ mask = mask.to(device=device, dtype=self.vae.dtype)
563
+ bs = 1
564
+ new_mask = []
565
+ for i in range(0, mask.shape[0], bs):
566
+ mask_bs = mask[i : i + bs]
567
+ mask_bs = self.vae.encode(mask_bs)[0]
568
+ mask_bs = mask_bs.mode()
569
+ new_mask.append(mask_bs)
570
+ mask = torch.cat(new_mask, dim = 0)
571
+ mask = mask * self.vae.config.scaling_factor
572
+
573
+ if masked_image is not None:
574
+ if self.transformer.config.add_noise_in_inpaint_model:
575
+ masked_image = add_noise_to_reference_video(masked_image, ratio=noise_aug_strength)
576
+ masked_image = masked_image.to(device=device, dtype=self.vae.dtype)
577
+ bs = 1
578
+ new_mask_pixel_values = []
579
+ for i in range(0, masked_image.shape[0], bs):
580
+ mask_pixel_values_bs = masked_image[i : i + bs]
581
+ mask_pixel_values_bs = self.vae.encode(mask_pixel_values_bs)[0]
582
+ mask_pixel_values_bs = mask_pixel_values_bs.mode()
583
+ new_mask_pixel_values.append(mask_pixel_values_bs)
584
+ masked_image_latents = torch.cat(new_mask_pixel_values, dim = 0)
585
+ masked_image_latents = masked_image_latents * self.vae.config.scaling_factor
586
+ else:
587
+ masked_image_latents = None
588
+
589
+ return mask, masked_image_latents
590
+
591
+ def decode_latents(self, latents: torch.Tensor) -> torch.Tensor:
592
+ latents = latents.permute(0, 2, 1, 3, 4) # [batch_size, num_channels, num_frames, height, width]
593
+ latents = 1 / self.vae.config.scaling_factor * latents
594
+
595
+ frames = self.vae.decode(latents).sample
596
+ frames = (frames / 2 + 0.5).clamp(0, 1)
597
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
598
+ frames = frames.cpu().float().numpy()
599
+ return frames
600
+
601
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
602
+ def prepare_extra_step_kwargs(self, generator, eta):
603
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
604
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
605
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
606
+ # and should be between [0, 1]
607
+
608
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
609
+ extra_step_kwargs = {}
610
+ if accepts_eta:
611
+ extra_step_kwargs["eta"] = eta
612
+
613
+ # check if the scheduler accepts generator
614
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
615
+ if accepts_generator:
616
+ extra_step_kwargs["generator"] = generator
617
+ return extra_step_kwargs
618
+
619
+ # Copied from diffusers.pipelines.latte.pipeline_latte.LattePipeline.check_inputs
620
+ def check_inputs(
621
+ self,
622
+ prompt,
623
+ height,
624
+ width,
625
+ negative_prompt,
626
+ callback_on_step_end_tensor_inputs,
627
+ prompt_embeds=None,
628
+ negative_prompt_embeds=None,
629
+ ):
630
+ if height % 8 != 0 or width % 8 != 0:
631
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
632
+
633
+ if callback_on_step_end_tensor_inputs is not None and not all(
634
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
635
+ ):
636
+ raise ValueError(
637
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
638
+ )
639
+ if prompt is not None and prompt_embeds is not None:
640
+ raise ValueError(
641
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
642
+ " only forward one of the two."
643
+ )
644
+ elif prompt is None and prompt_embeds is None:
645
+ raise ValueError(
646
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
647
+ )
648
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
649
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
650
+
651
+ if prompt is not None and negative_prompt_embeds is not None:
652
+ raise ValueError(
653
+ f"Cannot forward both `prompt`: {prompt} and `negative_prompt_embeds`:"
654
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
655
+ )
656
+
657
+ if negative_prompt is not None and negative_prompt_embeds is not None:
658
+ raise ValueError(
659
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
660
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
661
+ )
662
+
663
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
664
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
665
+ raise ValueError(
666
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
667
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
668
+ f" {negative_prompt_embeds.shape}."
669
+ )
670
+
671
+ def fuse_qkv_projections(self) -> None:
672
+ r"""Enables fused QKV projections."""
673
+ self.fusing_transformer = True
674
+ self.transformer.fuse_qkv_projections()
675
+
676
+ def unfuse_qkv_projections(self) -> None:
677
+ r"""Disable QKV projection fusion if enabled."""
678
+ if not self.fusing_transformer:
679
+ logger.warning("The Transformer was not initially fused for QKV projections. Doing nothing.")
680
+ else:
681
+ self.transformer.unfuse_qkv_projections()
682
+ self.fusing_transformer = False
683
+
684
+ def _prepare_rotary_positional_embeddings(
685
+ self,
686
+ height: int,
687
+ width: int,
688
+ num_frames: int,
689
+ device: torch.device,
690
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
691
+ grid_height = height // (self.vae_scale_factor_spatial * self.transformer.config.patch_size)
692
+ grid_width = width // (self.vae_scale_factor_spatial * self.transformer.config.patch_size)
693
+
694
+ p = self.transformer.config.patch_size
695
+ p_t = self.transformer.config.patch_size_t
696
+
697
+ base_size_width = self.transformer.config.sample_width // p
698
+ base_size_height = self.transformer.config.sample_height // p
699
+
700
+ if p_t is None:
701
+ # CogVideoX 1.0
702
+ grid_crops_coords = get_resize_crop_region_for_grid(
703
+ (grid_height, grid_width), base_size_width, base_size_height
704
+ )
705
+ freqs_cos, freqs_sin = get_3d_rotary_pos_embed(
706
+ embed_dim=self.transformer.config.attention_head_dim,
707
+ crops_coords=grid_crops_coords,
708
+ grid_size=(grid_height, grid_width),
709
+ temporal_size=num_frames,
710
+ )
711
+ else:
712
+ # CogVideoX 1.5
713
+ base_num_frames = (num_frames + p_t - 1) // p_t
714
+
715
+ freqs_cos, freqs_sin = get_3d_rotary_pos_embed(
716
+ embed_dim=self.transformer.config.attention_head_dim,
717
+ crops_coords=None,
718
+ grid_size=(grid_height, grid_width),
719
+ temporal_size=base_num_frames,
720
+ grid_type="slice",
721
+ max_size=(base_size_height, base_size_width),
722
+ )
723
+
724
+ freqs_cos = freqs_cos.to(device=device)
725
+ freqs_sin = freqs_sin.to(device=device)
726
+ return freqs_cos, freqs_sin
727
+
728
+ @property
729
+ def guidance_scale(self):
730
+ return self._guidance_scale
731
+
732
+ @property
733
+ def num_timesteps(self):
734
+ return self._num_timesteps
735
+
736
+ @property
737
+ def attention_kwargs(self):
738
+ return self._attention_kwargs
739
+
740
+ @property
741
+ def interrupt(self):
742
+ return self._interrupt
743
+
744
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps
745
+ def get_timesteps(self, num_inference_steps, strength, device):
746
+ # get the original timestep using init_timestep
747
+ init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
748
+
749
+ t_start = max(num_inference_steps - init_timestep, 0)
750
+ timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
751
+
752
+ return timesteps, num_inference_steps - t_start
753
+
754
+ @torch.no_grad()
755
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
756
+ def __call__(
757
+ self,
758
+ prompt: Optional[Union[str, List[str]]] = None,
759
+ negative_prompt: Optional[Union[str, List[str]]] = None,
760
+ height: int = 480,
761
+ width: int = 720,
762
+ video: Union[torch.FloatTensor] = None,
763
+ mask_video: Union[torch.FloatTensor] = None,
764
+ masked_video_latents: Union[torch.FloatTensor] = None,
765
+ flow_latents: Optional[torch.FloatTensor] = None,
766
+ num_frames: int = 49,
767
+ num_inference_steps: int = 50,
768
+ timesteps: Optional[List[int]] = None,
769
+ guidance_scale: float = 6,
770
+ use_dynamic_cfg: bool = False,
771
+ num_videos_per_prompt: int = 1,
772
+ eta: float = 0.0,
773
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
774
+ latents: Optional[torch.FloatTensor] = None,
775
+ prompt_embeds: Optional[torch.FloatTensor] = None,
776
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
777
+ output_type: str = "numpy",
778
+ return_dict: bool = False,
779
+ callback_on_step_end: Optional[
780
+ Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]
781
+ ] = None,
782
+ attention_kwargs: Optional[Dict[str, Any]] = None,
783
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
784
+ max_sequence_length: int = 226,
785
+ strength: float = 1,
786
+ noise_aug_strength: float = 0.0563,
787
+ comfyui_progressbar: bool = False,
788
+ ) -> Union[CogVideoX_Fun_PipelineOutput, Tuple]:
789
+ """
790
+ Function invoked when calling the pipeline for generation.
791
+
792
+ Args:
793
+ prompt (`str` or `List[str]`, *optional*):
794
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
795
+ instead.
796
+ negative_prompt (`str` or `List[str]`, *optional*):
797
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
798
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
799
+ less than `1`).
800
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
801
+ The height in pixels of the generated image. This is set to 1024 by default for the best results.
802
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
803
+ The width in pixels of the generated image. This is set to 1024 by default for the best results.
804
+ num_frames (`int`, defaults to `48`):
805
+ Number of frames to generate. Must be divisible by self.vae_scale_factor_temporal. Generated video will
806
+ contain 1 extra frame because CogVideoX_Fun is conditioned with (num_seconds * fps + 1) frames where
807
+ num_seconds is 6 and fps is 4. However, since videos can be saved at any fps, the only condition that
808
+ needs to be satisfied is that of divisibility mentioned above.
809
+ num_inference_steps (`int`, *optional*, defaults to 50):
810
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
811
+ expense of slower inference.
812
+ timesteps (`List[int]`, *optional*):
813
+ Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
814
+ in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
815
+ passed will be used. Must be in descending order.
816
+ guidance_scale (`float`, *optional*, defaults to 7.0):
817
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
818
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
819
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
820
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
821
+ usually at the expense of lower image quality.
822
+ num_videos_per_prompt (`int`, *optional*, defaults to 1):
823
+ The number of videos to generate per prompt.
824
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
825
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
826
+ to make generation deterministic.
827
+ latents (`torch.FloatTensor`, *optional*):
828
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
829
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
830
+ tensor will ge generated by sampling using the supplied random `generator`.
831
+ prompt_embeds (`torch.FloatTensor`, *optional*):
832
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
833
+ provided, text embeddings will be generated from `prompt` input argument.
834
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
835
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
836
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
837
+ argument.
838
+ output_type (`str`, *optional*, defaults to `"pil"`):
839
+ The output format of the generate image. Choose between
840
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
841
+ return_dict (`bool`, *optional*, defaults to `True`):
842
+ Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead
843
+ of a plain tuple.
844
+ callback_on_step_end (`Callable`, *optional*):
845
+ A function that calls at the end of each denoising steps during the inference. The function is called
846
+ with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
847
+ callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
848
+ `callback_on_step_end_tensor_inputs`.
849
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
850
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
851
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
852
+ `._callback_tensor_inputs` attribute of your pipeline class.
853
+ max_sequence_length (`int`, defaults to `226`):
854
+ Maximum sequence length in encoded prompt. Must be consistent with
855
+ `self.transformer.config.max_text_seq_length` otherwise may lead to poor results.
856
+
857
+ Examples:
858
+
859
+ Returns:
860
+ [`~pipelines.cogvideo.pipeline_cogvideox.CogVideoX_Fun_PipelineOutput`] or `tuple`:
861
+ [`~pipelines.cogvideo.pipeline_cogvideox.CogVideoX_Fun_PipelineOutput`] if `return_dict` is True, otherwise a
862
+ `tuple`. When returning a tuple, the first element is a list with the generated images.
863
+ """
864
+
865
+ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)):
866
+ callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs
867
+
868
+ height = height or self.transformer.config.sample_height * self.vae_scale_factor_spatial
869
+ width = width or self.transformer.config.sample_width * self.vae_scale_factor_spatial
870
+ num_frames = num_frames or self.transformer.config.sample_frames
871
+
872
+ num_videos_per_prompt = 1
873
+
874
+ # 1. Check inputs. Raise error if not correct
875
+ self.check_inputs(
876
+ prompt,
877
+ height,
878
+ width,
879
+ negative_prompt,
880
+ callback_on_step_end_tensor_inputs,
881
+ prompt_embeds,
882
+ negative_prompt_embeds,
883
+ )
884
+ self._guidance_scale = guidance_scale
885
+ self._attention_kwargs = attention_kwargs
886
+ self._interrupt = False
887
+
888
+ # 2. Default call parameters
889
+ if prompt is not None and isinstance(prompt, str):
890
+ batch_size = 1
891
+ elif prompt is not None and isinstance(prompt, list):
892
+ batch_size = len(prompt)
893
+ else:
894
+ batch_size = prompt_embeds.shape[0]
895
+
896
+ device = self._execution_device
897
+
898
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
899
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
900
+ # corresponds to doing no classifier free guidance.
901
+ do_classifier_free_guidance = guidance_scale > 1.0
902
+
903
+ # 3. Encode input prompt
904
+ prompt_embeds, negative_prompt_embeds = self.encode_prompt(
905
+ prompt,
906
+ negative_prompt,
907
+ do_classifier_free_guidance,
908
+ num_videos_per_prompt=num_videos_per_prompt,
909
+ prompt_embeds=prompt_embeds,
910
+ negative_prompt_embeds=negative_prompt_embeds,
911
+ max_sequence_length=max_sequence_length,
912
+ device=device,
913
+ )
914
+ if do_classifier_free_guidance:
915
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
916
+
917
+ # 4. set timesteps
918
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
919
+ timesteps, num_inference_steps = self.get_timesteps(
920
+ num_inference_steps=num_inference_steps, strength=strength, device=device
921
+ )
922
+ self._num_timesteps = len(timesteps)
923
+ if comfyui_progressbar:
924
+ from comfy.utils import ProgressBar
925
+ pbar = ProgressBar(num_inference_steps + 2)
926
+ # at which timestep to set the initial noise (n.b. 50% if strength is 0.5)
927
+ latent_timestep = timesteps[:1].repeat(batch_size * num_videos_per_prompt)
928
+ # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise
929
+ is_strength_max = strength == 1.0
930
+
931
+ # 5. Prepare latents.
932
+ if video is not None:
933
+ video_length = video.shape[2]
934
+ init_video = self.image_processor.preprocess(rearrange(video, "b c f h w -> (b f) c h w"), height=height, width=width)
935
+ init_video = init_video.to(dtype=torch.float32)
936
+ init_video = rearrange(init_video, "(b f) c h w -> b c f h w", f=video_length)
937
+ else:
938
+ init_video = None
939
+
940
+ # Magvae needs the number of frames to be 4n + 1.
941
+ local_latent_length = (num_frames - 1) // self.vae_scale_factor_temporal + 1
942
+ # For CogVideoX 1.5, the latent frames should be clipped to make it divisible by patch_size_t
943
+ patch_size_t = self.transformer.config.patch_size_t
944
+ additional_frames = 0
945
+ if patch_size_t is not None and local_latent_length % patch_size_t != 0:
946
+ additional_frames = local_latent_length % patch_size_t
947
+ num_frames -= additional_frames * self.vae_scale_factor_temporal
948
+ if num_frames <= 0:
949
+ num_frames = 1
950
+ if video_length > num_frames:
951
+ logger.warning("The length of condition video is not right, the latent frames should be clipped to make it divisible by patch_size_t. ")
952
+ video_length = num_frames
953
+ video = video[:, :, :video_length]
954
+ init_video = init_video[:, :, :video_length]
955
+ mask_video = mask_video[:, :, :video_length]
956
+
957
+ num_channels_latents = self.vae.config.latent_channels
958
+ num_channels_transformer = self.transformer.config.in_channels
959
+ return_image_latents = num_channels_transformer == num_channels_latents
960
+
961
+ latents_outputs = self.prepare_latents(
962
+ batch_size * num_videos_per_prompt,
963
+ num_channels_latents,
964
+ height,
965
+ width,
966
+ video_length,
967
+ prompt_embeds.dtype,
968
+ device,
969
+ generator,
970
+ latents,
971
+ video=init_video,
972
+ timestep=latent_timestep,
973
+ is_strength_max=is_strength_max,
974
+ return_noise=True,
975
+ return_video_latents=return_image_latents,
976
+ )
977
+ if return_image_latents:
978
+ latents, noise, image_latents = latents_outputs
979
+ else:
980
+ latents, noise = latents_outputs
981
+ if comfyui_progressbar:
982
+ pbar.update(1)
983
+
984
+ if mask_video is not None:
985
+ if (mask_video == 255).all():
986
+ mask_latents = torch.zeros_like(latents)[:, :, :1].to(latents.device, latents.dtype)
987
+ masked_video_latents = torch.zeros_like(latents).to(latents.device, latents.dtype)
988
+
989
+ mask_input = torch.cat([mask_latents] * 2) if do_classifier_free_guidance else mask_latents
990
+ masked_video_latents_input = (
991
+ torch.cat([masked_video_latents] * 2) if do_classifier_free_guidance else masked_video_latents
992
+ )
993
+ inpaint_latents = torch.cat([mask_input, masked_video_latents_input], dim=2).to(latents.dtype)
994
+ else:
995
+ # Prepare mask latent variables
996
+ video_length = video.shape[2]
997
+ mask_condition = self.mask_processor.preprocess(rearrange(mask_video, "b c f h w -> (b f) c h w"), height=height, width=width)
998
+ mask_condition = mask_condition.to(dtype=torch.float32)
999
+ mask_condition = rearrange(mask_condition, "(b f) c h w -> b c f h w", f=video_length)
1000
+
1001
+ if num_channels_transformer != num_channels_latents:
1002
+ mask_condition_tile = torch.tile(mask_condition, [1, 3, 1, 1, 1])
1003
+ if masked_video_latents is None:
1004
+ masked_video = init_video * (mask_condition_tile < 0.5) + torch.ones_like(init_video) * (mask_condition_tile > 0.5) * -1
1005
+ else:
1006
+ masked_video = masked_video_latents
1007
+
1008
+ _, masked_video_latents = self.prepare_mask_latents(
1009
+ None,
1010
+ masked_video,
1011
+ batch_size,
1012
+ height,
1013
+ width,
1014
+ prompt_embeds.dtype,
1015
+ device,
1016
+ generator,
1017
+ do_classifier_free_guidance,
1018
+ noise_aug_strength=noise_aug_strength,
1019
+ )
1020
+ mask_latents = resize_mask(1 - mask_condition, masked_video_latents)
1021
+ mask_latents = mask_latents.to(masked_video_latents.device) * self.vae.config.scaling_factor
1022
+
1023
+ mask = torch.tile(mask_condition, [1, num_channels_latents, 1, 1, 1])
1024
+ mask = F.interpolate(mask, size=latents.size()[-3:], mode='trilinear', align_corners=True).to(latents.device, latents.dtype)
1025
+
1026
+ mask_input = torch.cat([mask_latents] * 2) if do_classifier_free_guidance else mask_latents
1027
+ masked_video_latents_input = (
1028
+ torch.cat([masked_video_latents] * 2) if do_classifier_free_guidance else masked_video_latents
1029
+ )
1030
+
1031
+ mask = rearrange(mask, "b c f h w -> b f c h w")
1032
+ mask_input = rearrange(mask_input, "b c f h w -> b f c h w")
1033
+ masked_video_latents_input = rearrange(masked_video_latents_input, "b c f h w -> b f c h w")
1034
+
1035
+ inpaint_latents = torch.cat([mask_input, masked_video_latents_input], dim=2).to(latents.dtype)
1036
+ else:
1037
+ mask = torch.tile(mask_condition, [1, num_channels_latents, 1, 1, 1])
1038
+ mask = F.interpolate(mask, size=latents.size()[-3:], mode='trilinear', align_corners=True).to(latents.device, latents.dtype)
1039
+ mask = rearrange(mask, "b c f h w -> b f c h w")
1040
+
1041
+ inpaint_latents = None
1042
+ else:
1043
+ if num_channels_transformer != num_channels_latents:
1044
+ mask = torch.zeros_like(latents).to(latents.device, latents.dtype)
1045
+ masked_video_latents = torch.zeros_like(latents).to(latents.device, latents.dtype)
1046
+
1047
+ mask_input = torch.cat([mask] * 2) if do_classifier_free_guidance else mask
1048
+ masked_video_latents_input = (
1049
+ torch.cat([masked_video_latents] * 2) if do_classifier_free_guidance else masked_video_latents
1050
+ )
1051
+ inpaint_latents = torch.cat([mask_input, masked_video_latents_input], dim=1).to(latents.dtype)
1052
+ else:
1053
+ mask = torch.zeros_like(init_video[:, :1])
1054
+ mask = torch.tile(mask, [1, num_channels_latents, 1, 1, 1])
1055
+ mask = F.interpolate(mask, size=latents.size()[-3:], mode='trilinear', align_corners=True).to(latents.device, latents.dtype)
1056
+ mask = rearrange(mask, "b c f h w -> b f c h w")
1057
+
1058
+ inpaint_latents = None
1059
+ if comfyui_progressbar:
1060
+ pbar.update(1)
1061
+
1062
+ # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1063
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1064
+
1065
+ # 7. Create rotary embeds if required
1066
+ image_rotary_emb = (
1067
+ self._prepare_rotary_positional_embeddings(height, width, latents.size(1), device)
1068
+ if self.transformer.config.use_rotary_positional_embeddings
1069
+ else None
1070
+ )
1071
+
1072
+ # 8. Denoising loop
1073
+ num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
1074
+ if flow_latents is not None:
1075
+ flow_latents = torch.cat([flow_latents] * 2) if do_classifier_free_guidance else latents
1076
+
1077
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
1078
+ # for DPM-solver++
1079
+ old_pred_original_sample = None
1080
+ for i, t in enumerate(timesteps):
1081
+ if self.interrupt:
1082
+ continue
1083
+
1084
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
1085
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1086
+
1087
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
1088
+ timestep = t.expand(latent_model_input.shape[0])
1089
+
1090
+ # predict noise model_output
1091
+ noise_pred = self.transformer(
1092
+ hidden_states=latent_model_input,
1093
+ encoder_hidden_states=prompt_embeds,
1094
+ timestep=timestep,
1095
+ image_rotary_emb=image_rotary_emb,
1096
+ return_dict=False,
1097
+ inpaint_latents=inpaint_latents,
1098
+ flow_latents=flow_latents,
1099
+ )[0]
1100
+ noise_pred = noise_pred.float()
1101
+
1102
+ # perform guidance
1103
+ if use_dynamic_cfg:
1104
+ self._guidance_scale = 1 + guidance_scale * (
1105
+ (1 - math.cos(math.pi * ((num_inference_steps - t.item()) / num_inference_steps) ** 5.0)) / 2
1106
+ )
1107
+ if do_classifier_free_guidance:
1108
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1109
+ noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
1110
+
1111
+ # compute the previous noisy sample x_t -> x_t-1
1112
+ if not isinstance(self.scheduler, CogVideoXDPMScheduler):
1113
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
1114
+ else:
1115
+ latents, old_pred_original_sample = self.scheduler.step(
1116
+ noise_pred,
1117
+ old_pred_original_sample,
1118
+ t,
1119
+ timesteps[i - 1] if i > 0 else None,
1120
+ latents,
1121
+ **extra_step_kwargs,
1122
+ return_dict=False,
1123
+ )
1124
+ latents = latents.to(prompt_embeds.dtype)
1125
+
1126
+ # call the callback, if provided
1127
+ if callback_on_step_end is not None:
1128
+ callback_kwargs = {}
1129
+ for k in callback_on_step_end_tensor_inputs:
1130
+ callback_kwargs[k] = locals()[k]
1131
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
1132
+
1133
+ latents = callback_outputs.pop("latents", latents)
1134
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
1135
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
1136
+
1137
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1138
+ progress_bar.update()
1139
+ if comfyui_progressbar:
1140
+ pbar.update(1)
1141
+
1142
+ if output_type == "numpy":
1143
+ video = self.decode_latents(latents)
1144
+ elif not output_type == "latent":
1145
+ video = self.decode_latents(latents)
1146
+ video = self.video_processor.postprocess_video(video=video, output_type=output_type)
1147
+ else:
1148
+ video = latents
1149
+
1150
+ # Offload all models
1151
+ self.maybe_free_model_hooks()
1152
+
1153
+ if not return_dict:
1154
+ video = torch.from_numpy(video)
1155
+
1156
+ return CogVideoX_Fun_PipelineOutput(videos=video)
robomaster/reward/MPS/README.md ADDED
@@ -0,0 +1 @@
 
 
1
+ This folder is modified from the official [MPS](https://github.com/Kwai-Kolors/MPS/tree/main) repository.
robomaster/reward/MPS/trainer/models/base_model.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+
3
+
4
+
5
+ @dataclass
6
+ class BaseModelConfig:
7
+ pass
robomaster/reward/MPS/trainer/models/clip_model.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from transformers import CLIPModel as HFCLIPModel
3
+ from transformers import AutoTokenizer
4
+
5
+ from torch import nn, einsum
6
+
7
+ # Modified: import
8
+ # from trainer.models.base_model import BaseModelConfig
9
+ from .base_model import BaseModelConfig
10
+
11
+ from transformers import CLIPConfig
12
+ from typing import Any, Optional, Tuple, Union
13
+ import torch
14
+
15
+ # Modified: import
16
+ # from trainer.models.cross_modeling import Cross_model
17
+ from .cross_modeling import Cross_model
18
+
19
+ import gc
20
+
21
+ class XCLIPModel(HFCLIPModel):
22
+ def __init__(self, config: CLIPConfig):
23
+ super().__init__(config)
24
+
25
+ def get_text_features(
26
+ self,
27
+ input_ids: Optional[torch.Tensor] = None,
28
+ attention_mask: Optional[torch.Tensor] = None,
29
+ position_ids: Optional[torch.Tensor] = None,
30
+ output_attentions: Optional[bool] = None,
31
+ output_hidden_states: Optional[bool] = None,
32
+ return_dict: Optional[bool] = None,
33
+ ) -> torch.FloatTensor:
34
+
35
+ # Use CLIP model's config for some fields (if specified) instead of those of vision & text components.
36
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
37
+ output_hidden_states = (
38
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
39
+ )
40
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
41
+
42
+ text_outputs = self.text_model(
43
+ input_ids=input_ids,
44
+ attention_mask=attention_mask,
45
+ position_ids=position_ids,
46
+ output_attentions=output_attentions,
47
+ output_hidden_states=output_hidden_states,
48
+ return_dict=return_dict,
49
+ )
50
+
51
+ # pooled_output = text_outputs[1]
52
+ # text_features = self.text_projection(pooled_output)
53
+ last_hidden_state = text_outputs[0]
54
+ text_features = self.text_projection(last_hidden_state)
55
+
56
+ pooled_output = text_outputs[1]
57
+ text_features_EOS = self.text_projection(pooled_output)
58
+
59
+
60
+ # del last_hidden_state, text_outputs
61
+ # gc.collect()
62
+
63
+ return text_features, text_features_EOS
64
+
65
+ def get_image_features(
66
+ self,
67
+ pixel_values: Optional[torch.FloatTensor] = None,
68
+ output_attentions: Optional[bool] = None,
69
+ output_hidden_states: Optional[bool] = None,
70
+ return_dict: Optional[bool] = None,
71
+ ) -> torch.FloatTensor:
72
+
73
+ # Use CLIP model's config for some fields (if specified) instead of those of vision & text components.
74
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
75
+ output_hidden_states = (
76
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
77
+ )
78
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
79
+
80
+ vision_outputs = self.vision_model(
81
+ pixel_values=pixel_values,
82
+ output_attentions=output_attentions,
83
+ output_hidden_states=output_hidden_states,
84
+ return_dict=return_dict,
85
+ )
86
+
87
+ # pooled_output = vision_outputs[1] # pooled_output
88
+ # image_features = self.visual_projection(pooled_output)
89
+ last_hidden_state = vision_outputs[0]
90
+ image_features = self.visual_projection(last_hidden_state)
91
+
92
+ return image_features
93
+
94
+
95
+
96
+ @dataclass
97
+ class ClipModelConfig(BaseModelConfig):
98
+ _target_: str = "trainer.models.clip_model.CLIPModel"
99
+ pretrained_model_name_or_path: str ="openai/clip-vit-base-patch32"
100
+
101
+
102
+ class CLIPModel(nn.Module):
103
+ def __init__(self, config):
104
+ super().__init__()
105
+ # Modified: We convert the original ckpt (contains the entire model) to a `state_dict`.
106
+ # self.model = XCLIPModel.from_pretrained(ckpt)
107
+ self.model = XCLIPModel(config)
108
+ self.cross_model = Cross_model(dim=1024, layer_num=4, heads=16)
109
+
110
+ def get_text_features(self, *args, **kwargs):
111
+ return self.model.get_text_features(*args, **kwargs)
112
+
113
+ def get_image_features(self, *args, **kwargs):
114
+ return self.model.get_image_features(*args, **kwargs)
115
+
116
+ def forward(self, text_inputs=None, image_inputs=None, condition_inputs=None):
117
+ outputs = ()
118
+
119
+ text_f, text_EOS = self.model.get_text_features(text_inputs) # B*77*1024
120
+ outputs += text_EOS,
121
+
122
+ image_f = self.model.get_image_features(image_inputs.half()) # 2B*257*1024
123
+ # [B, 77, 1024]
124
+ condition_f, _ = self.model.get_text_features(condition_inputs) # B*5*1024
125
+
126
+ sim_text_condition = einsum('b i d, b j d -> b j i', text_f, condition_f)
127
+ sim_text_condition = torch.max(sim_text_condition, dim=1, keepdim=True)[0]
128
+ sim_text_condition = sim_text_condition / sim_text_condition.max()
129
+ mask = torch.where(sim_text_condition > 0.01, 0, float('-inf')) # B*1*77
130
+
131
+ # Modified: Support both torch.float16 and torch.bfloat16
132
+ # mask = mask.repeat(1,image_f.shape[1],1) # B*257*77
133
+ model_dtype = next(self.cross_model.parameters()).dtype
134
+ mask = mask.repeat(1,image_f.shape[1],1).to(model_dtype) # B*257*77
135
+ # bc = int(image_f.shape[0]/2)
136
+
137
+ # Modified: The original input consists of a (batch of) text and two (batches of) images,
138
+ # primarily used to compute which (batch of) image is more consistent with the text.
139
+ # The modified input consists of a (batch of) text and a (batch of) images.
140
+ # sim0 = self.cross_model(image_f[:bc,:,:], text_f,mask.half())
141
+ # sim1 = self.cross_model(image_f[bc:,:,:], text_f,mask.half())
142
+ # outputs += sim0[:,0,:],
143
+ # outputs += sim1[:,0,:],
144
+ sim = self.cross_model(image_f, text_f,mask)
145
+ outputs += sim[:,0,:],
146
+
147
+ return outputs
148
+
149
+ @property
150
+ def logit_scale(self):
151
+ return self.model.logit_scale
152
+
153
+ def save(self, path):
154
+ self.model.save_pretrained(path)
robomaster/reward/MPS/trainer/models/cross_modeling.py ADDED
@@ -0,0 +1,291 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import einsum, nn
3
+ import torch.nn.functional as F
4
+ from einops import rearrange, repeat
5
+
6
+ # helper functions
7
+
8
+ def exists(val):
9
+ return val is not None
10
+
11
+ def default(val, d):
12
+ return val if exists(val) else d
13
+
14
+ # normalization
15
+ # they use layernorm without bias, something that pytorch does not offer
16
+
17
+
18
+ class LayerNorm(nn.Module):
19
+ def __init__(self, dim):
20
+ super().__init__()
21
+ self.weight = nn.Parameter(torch.ones(dim))
22
+ self.register_buffer("bias", torch.zeros(dim))
23
+
24
+ def forward(self, x):
25
+ return F.layer_norm(x, x.shape[-1:], self.weight, self.bias)
26
+
27
+ # residual
28
+
29
+
30
+ class Residual(nn.Module):
31
+ def __init__(self, fn):
32
+ super().__init__()
33
+ self.fn = fn
34
+
35
+ def forward(self, x, *args, **kwargs):
36
+ return self.fn(x, *args, **kwargs) + x
37
+
38
+
39
+ # rotary positional embedding
40
+ # https://arxiv.org/abs/2104.09864
41
+
42
+
43
+ class RotaryEmbedding(nn.Module):
44
+ def __init__(self, dim):
45
+ super().__init__()
46
+ inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2).float() / dim))
47
+ self.register_buffer("inv_freq", inv_freq)
48
+
49
+ def forward(self, max_seq_len, *, device):
50
+ seq = torch.arange(max_seq_len, device=device, dtype=self.inv_freq.dtype)
51
+ freqs = einsum("i , j -> i j", seq, self.inv_freq)
52
+ return torch.cat((freqs, freqs), dim=-1)
53
+
54
+
55
+ def rotate_half(x):
56
+ x = rearrange(x, "... (j d) -> ... j d", j=2)
57
+ x1, x2 = x.unbind(dim=-2)
58
+ return torch.cat((-x2, x1), dim=-1)
59
+
60
+
61
+ def apply_rotary_pos_emb(pos, t):
62
+ return (t * pos.cos()) + (rotate_half(t) * pos.sin())
63
+
64
+
65
+ # classic Noam Shazeer paper, except here they use SwiGLU instead of the more popular GEGLU for gating the feedforward
66
+ # https://arxiv.org/abs/2002.05202
67
+
68
+
69
+ class SwiGLU(nn.Module):
70
+ def forward(self, x):
71
+ x, gate = x.chunk(2, dim=-1)
72
+ return F.silu(gate) * x
73
+
74
+
75
+ # parallel attention and feedforward with residual
76
+ # discovered by Wang et al + EleutherAI from GPT-J fame
77
+
78
+ class ParallelTransformerBlock(nn.Module):
79
+ def __init__(self, dim, dim_head=64, heads=8, ff_mult=4):
80
+ super().__init__()
81
+ self.norm = LayerNorm(dim)
82
+
83
+ attn_inner_dim = dim_head * heads
84
+ ff_inner_dim = dim * ff_mult
85
+ self.fused_dims = (attn_inner_dim, dim_head, dim_head, (ff_inner_dim * 2))
86
+
87
+ self.heads = heads
88
+ self.scale = dim_head**-0.5
89
+ self.rotary_emb = RotaryEmbedding(dim_head)
90
+
91
+ self.fused_attn_ff_proj = nn.Linear(dim, sum(self.fused_dims), bias=False)
92
+ self.attn_out = nn.Linear(attn_inner_dim, dim, bias=False)
93
+
94
+ self.ff_out = nn.Sequential(
95
+ SwiGLU(),
96
+ nn.Linear(ff_inner_dim, dim, bias=False)
97
+ )
98
+
99
+ self.register_buffer("pos_emb", None, persistent=False)
100
+
101
+
102
+ def get_rotary_embedding(self, n, device):
103
+ if self.pos_emb is not None and self.pos_emb.shape[-2] >= n:
104
+ return self.pos_emb[:n]
105
+
106
+ pos_emb = self.rotary_emb(n, device=device)
107
+ self.register_buffer("pos_emb", pos_emb, persistent=False)
108
+ return pos_emb
109
+
110
+ def forward(self, x, attn_mask=None):
111
+ """
112
+ einstein notation
113
+ b - batch
114
+ h - heads
115
+ n, i, j - sequence length (base sequence length, source, target)
116
+ d - feature dimension
117
+ """
118
+
119
+ n, device, h = x.shape[1], x.device, self.heads
120
+
121
+ # pre layernorm
122
+
123
+ x = self.norm(x)
124
+
125
+ # attention queries, keys, values, and feedforward inner
126
+
127
+ q, k, v, ff = self.fused_attn_ff_proj(x).split(self.fused_dims, dim=-1)
128
+
129
+ # split heads
130
+ # they use multi-query single-key-value attention, yet another Noam Shazeer paper
131
+ # they found no performance loss past a certain scale, and more efficient decoding obviously
132
+ # https://arxiv.org/abs/1911.02150
133
+
134
+ q = rearrange(q, "b n (h d) -> b h n d", h=h)
135
+
136
+ # rotary embeddings
137
+
138
+ positions = self.get_rotary_embedding(n, device)
139
+ q, k = map(lambda t: apply_rotary_pos_emb(positions, t), (q, k))
140
+
141
+ # scale
142
+
143
+ q = q * self.scale
144
+
145
+ # similarity
146
+
147
+ sim = einsum("b h i d, b j d -> b h i j", q, k)
148
+
149
+
150
+ # extra attention mask - for masking out attention from text CLS token to padding
151
+
152
+ if exists(attn_mask):
153
+ attn_mask = rearrange(attn_mask, 'b i j -> b 1 i j')
154
+ sim = sim.masked_fill(~attn_mask, -torch.finfo(sim.dtype).max)
155
+
156
+ # attention
157
+
158
+ sim = sim - sim.amax(dim=-1, keepdim=True).detach()
159
+ attn = sim.softmax(dim=-1)
160
+
161
+ # aggregate values
162
+
163
+ out = einsum("b h i j, b j d -> b h i d", attn, v)
164
+
165
+ # merge heads
166
+
167
+ out = rearrange(out, "b h n d -> b n (h d)")
168
+ return self.attn_out(out) + self.ff_out(ff)
169
+
170
+ # cross attention - using multi-query + one-headed key / values as in PaLM w/ optional parallel feedforward
171
+
172
+ class CrossAttention(nn.Module):
173
+ def __init__(
174
+ self,
175
+ dim,
176
+ *,
177
+ context_dim=None,
178
+ dim_head=64,
179
+ heads=12,
180
+ parallel_ff=False,
181
+ ff_mult=4,
182
+ norm_context=False
183
+ ):
184
+ super().__init__()
185
+ self.heads = heads
186
+ self.scale = dim_head ** -0.5
187
+ inner_dim = heads * dim_head
188
+ context_dim = default(context_dim, dim)
189
+
190
+ self.norm = LayerNorm(dim)
191
+ self.context_norm = LayerNorm(context_dim) if norm_context else nn.Identity()
192
+
193
+ self.to_q = nn.Linear(dim, inner_dim, bias=False)
194
+ self.to_kv = nn.Linear(context_dim, dim_head * 2, bias=False)
195
+ self.to_out = nn.Linear(inner_dim, dim, bias=False)
196
+
197
+ # whether to have parallel feedforward
198
+
199
+ ff_inner_dim = ff_mult * dim
200
+
201
+ self.ff = nn.Sequential(
202
+ nn.Linear(dim, ff_inner_dim * 2, bias=False),
203
+ SwiGLU(),
204
+ nn.Linear(ff_inner_dim, dim, bias=False)
205
+ ) if parallel_ff else None
206
+
207
+ def forward(self, x, context, mask):
208
+ """
209
+ einstein notation
210
+ b - batch
211
+ h - heads
212
+ n, i, j - sequence length (base sequence length, source, target)
213
+ d - feature dimension
214
+ """
215
+
216
+ # pre-layernorm, for queries and context
217
+
218
+ x = self.norm(x)
219
+ context = self.context_norm(context)
220
+
221
+ # get queries
222
+
223
+ q = self.to_q(x)
224
+ q = rearrange(q, 'b n (h d) -> b h n d', h = self.heads)
225
+
226
+ # scale
227
+
228
+ q = q * self.scale
229
+
230
+ # get key / values
231
+
232
+ k, v = self.to_kv(context).chunk(2, dim=-1)
233
+
234
+ # query / key similarity
235
+
236
+ sim = einsum('b h i d, b j d -> b h i j', q, k)
237
+
238
+ # attention
239
+ mask = mask.unsqueeze(1).repeat(1,self.heads,1,1)
240
+ sim = sim + mask # context mask
241
+ sim = sim - sim.amax(dim=-1, keepdim=True)
242
+ attn = sim.softmax(dim=-1)
243
+
244
+ # aggregate
245
+
246
+ out = einsum('b h i j, b j d -> b h i d', attn, v)
247
+
248
+ # merge and combine heads
249
+
250
+ out = rearrange(out, 'b h n d -> b n (h d)')
251
+ out = self.to_out(out)
252
+
253
+ # add parallel feedforward (for multimodal layers)
254
+
255
+ if exists(self.ff):
256
+ out = out + self.ff(x)
257
+
258
+ return out
259
+
260
+
261
+ class Cross_model(nn.Module):
262
+ def __init__(
263
+ self,
264
+ dim=512,
265
+ layer_num=4,
266
+ dim_head=64,
267
+ heads=8,
268
+ ff_mult=4
269
+ ):
270
+ super().__init__()
271
+
272
+ self.layers = nn.ModuleList([])
273
+
274
+
275
+ for ind in range(layer_num):
276
+ self.layers.append(nn.ModuleList([
277
+ Residual(CrossAttention(dim=dim, dim_head=dim_head, heads=heads, parallel_ff=True, ff_mult=ff_mult)),
278
+ Residual(ParallelTransformerBlock(dim=dim, dim_head=dim_head, heads=heads, ff_mult=ff_mult))
279
+ ]))
280
+
281
+ def forward(
282
+ self,
283
+ query_tokens,
284
+ context_tokens,
285
+ mask
286
+ ):
287
+ for cross_attn, self_attn_ff in self.layers:
288
+ query_tokens = cross_attn(query_tokens, context_tokens,mask)
289
+ query_tokens = self_attn_ff(query_tokens)
290
+
291
+ return query_tokens
robomaster/reward/aesthetic_predictor_v2_5/__init__.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .siglip_v2_5 import (
2
+ AestheticPredictorV2_5Head,
3
+ AestheticPredictorV2_5Model,
4
+ AestheticPredictorV2_5Processor,
5
+ convert_v2_5_from_siglip,
6
+ )
7
+
8
+ __all__ = [
9
+ "AestheticPredictorV2_5Head",
10
+ "AestheticPredictorV2_5Model",
11
+ "AestheticPredictorV2_5Processor",
12
+ "convert_v2_5_from_siglip",
13
+ ]
robomaster/reward/aesthetic_predictor_v2_5/siglip_v2_5.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Borrowed from https://github.com/discus0434/aesthetic-predictor-v2-5/blob/3125a9e/src/aesthetic_predictor_v2_5/siglip_v2_5.py
2
+ import os
3
+ from collections import OrderedDict
4
+ from os import PathLike
5
+ from typing import Final
6
+
7
+ import torch
8
+ import torch.nn as nn
9
+ import torchvision.transforms as transforms
10
+ from transformers import (
11
+ SiglipImageProcessor,
12
+ SiglipVisionConfig,
13
+ SiglipVisionModel,
14
+ logging,
15
+ )
16
+ from transformers.image_processing_utils import BatchFeature
17
+ from transformers.modeling_outputs import ImageClassifierOutputWithNoAttention
18
+
19
+ logging.set_verbosity_error()
20
+
21
+ URL: Final[str] = (
22
+ "https://github.com/discus0434/aesthetic-predictor-v2-5/raw/main/models/aesthetic_predictor_v2_5.pth"
23
+ )
24
+
25
+
26
+ class AestheticPredictorV2_5Head(nn.Module):
27
+ def __init__(self, config: SiglipVisionConfig) -> None:
28
+ super().__init__()
29
+ self.scoring_head = nn.Sequential(
30
+ nn.Linear(config.hidden_size, 1024),
31
+ nn.Dropout(0.5),
32
+ nn.Linear(1024, 128),
33
+ nn.Dropout(0.5),
34
+ nn.Linear(128, 64),
35
+ nn.Dropout(0.5),
36
+ nn.Linear(64, 16),
37
+ nn.Dropout(0.2),
38
+ nn.Linear(16, 1),
39
+ )
40
+
41
+ def forward(self, image_embeds: torch.Tensor) -> torch.Tensor:
42
+ return self.scoring_head(image_embeds)
43
+
44
+
45
+ class AestheticPredictorV2_5Model(SiglipVisionModel):
46
+ PATCH_SIZE = 14
47
+
48
+ def __init__(self, config: SiglipVisionConfig, *args, **kwargs) -> None:
49
+ super().__init__(config, *args, **kwargs)
50
+ self.layers = AestheticPredictorV2_5Head(config)
51
+ self.post_init()
52
+ self.transforms = transforms.Compose([
53
+ transforms.Resize((384, 384)),
54
+ transforms.ToTensor(),
55
+ transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
56
+ ])
57
+
58
+ def forward(
59
+ self,
60
+ pixel_values: torch.FloatTensor | None = None,
61
+ labels: torch.Tensor | None = None,
62
+ return_dict: bool | None = None,
63
+ ) -> tuple | ImageClassifierOutputWithNoAttention:
64
+ return_dict = (
65
+ return_dict if return_dict is not None else self.config.use_return_dict
66
+ )
67
+
68
+ outputs = super().forward(
69
+ pixel_values=pixel_values,
70
+ return_dict=return_dict,
71
+ )
72
+ image_embeds = outputs.pooler_output
73
+ image_embeds_norm = image_embeds / image_embeds.norm(dim=-1, keepdim=True)
74
+ prediction = self.layers(image_embeds_norm)
75
+
76
+ loss = None
77
+ if labels is not None:
78
+ loss_fct = nn.MSELoss()
79
+ loss = loss_fct()
80
+
81
+ if not return_dict:
82
+ return (loss, prediction, image_embeds)
83
+
84
+ return ImageClassifierOutputWithNoAttention(
85
+ loss=loss,
86
+ logits=prediction,
87
+ hidden_states=image_embeds,
88
+ )
89
+
90
+
91
+ class AestheticPredictorV2_5Processor(SiglipImageProcessor):
92
+ def __init__(self, *args, **kwargs) -> None:
93
+ super().__init__(*args, **kwargs)
94
+
95
+ def __call__(self, *args, **kwargs) -> BatchFeature:
96
+ return super().__call__(*args, **kwargs)
97
+
98
+ @classmethod
99
+ def from_pretrained(
100
+ self,
101
+ pretrained_model_name_or_path: str
102
+ | PathLike = "google/siglip-so400m-patch14-384",
103
+ *args,
104
+ **kwargs,
105
+ ) -> "AestheticPredictorV2_5Processor":
106
+ return super().from_pretrained(pretrained_model_name_or_path, *args, **kwargs)
107
+
108
+
109
+ def convert_v2_5_from_siglip(
110
+ predictor_name_or_path: str | PathLike | None = None,
111
+ encoder_model_name: str = "google/siglip-so400m-patch14-384",
112
+ *args,
113
+ **kwargs,
114
+ ) -> tuple[AestheticPredictorV2_5Model, AestheticPredictorV2_5Processor]:
115
+ model = AestheticPredictorV2_5Model.from_pretrained(
116
+ encoder_model_name, *args, **kwargs
117
+ )
118
+
119
+ processor = AestheticPredictorV2_5Processor.from_pretrained(
120
+ encoder_model_name, *args, **kwargs
121
+ )
122
+
123
+ if predictor_name_or_path is None or not os.path.exists(predictor_name_or_path):
124
+ state_dict = torch.hub.load_state_dict_from_url(URL, map_location="cpu")
125
+ else:
126
+ state_dict = torch.load(predictor_name_or_path, map_location="cpu")
127
+
128
+ assert isinstance(state_dict, OrderedDict)
129
+
130
+ model.layers.load_state_dict(state_dict)
131
+ model.eval()
132
+
133
+ return model, processor
robomaster/reward/improved_aesthetic_predictor.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+ from transformers import CLIPModel
6
+ from torchvision.datasets.utils import download_url
7
+
8
+ URL = "https://pai-aigc-photog.oss-cn-hangzhou.aliyuncs.com/easyanimate/Third_Party/sac%2Blogos%2Bava1-l14-linearMSE.pth"
9
+ FILENAME = "sac+logos+ava1-l14-linearMSE.pth"
10
+ MD5 = "b1047fd767a00134b8fd6529bf19521a"
11
+
12
+
13
+ class MLP(nn.Module):
14
+ def __init__(self):
15
+ super().__init__()
16
+ self.layers = nn.Sequential(
17
+ nn.Linear(768, 1024),
18
+ nn.Dropout(0.2),
19
+ nn.Linear(1024, 128),
20
+ nn.Dropout(0.2),
21
+ nn.Linear(128, 64),
22
+ nn.Dropout(0.1),
23
+ nn.Linear(64, 16),
24
+ nn.Linear(16, 1),
25
+ )
26
+
27
+
28
+ def forward(self, embed):
29
+ return self.layers(embed)
30
+
31
+
32
+ class ImprovedAestheticPredictor(nn.Module):
33
+ def __init__(self, encoder_path="openai/clip-vit-large-patch14", predictor_path=None):
34
+ super().__init__()
35
+ self.encoder = CLIPModel.from_pretrained(encoder_path)
36
+ self.predictor = MLP()
37
+ if predictor_path is None or not os.path.exists(predictor_path):
38
+ download_url(URL, torch.hub.get_dir(), FILENAME, md5=MD5)
39
+ predictor_path = os.path.join(torch.hub.get_dir(), FILENAME)
40
+ state_dict = torch.load(predictor_path, map_location="cpu")
41
+ self.predictor.load_state_dict(state_dict)
42
+ self.eval()
43
+
44
+
45
+ def forward(self, pixel_values):
46
+ embed = self.encoder.get_image_features(pixel_values=pixel_values)
47
+ embed = embed / torch.linalg.vector_norm(embed, dim=-1, keepdim=True)
48
+
49
+ return self.predictor(embed).squeeze(1)
robomaster/reward/reward_fn.py ADDED
@@ -0,0 +1,385 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from abc import ABC, abstractmethod
3
+
4
+ import torch
5
+ import torchvision.transforms as transforms
6
+ from einops import rearrange
7
+ from torchvision.datasets.utils import download_url
8
+ from typing import Optional, Tuple
9
+
10
+
11
+ # All reward models.
12
+ __all__ = ["AestheticReward", "HPSReward", "PickScoreReward", "MPSReward"]
13
+
14
+
15
+ class BaseReward(ABC):
16
+ """An base class for reward models. A custom Reward class must implement two functions below.
17
+ """
18
+ def __init__(self):
19
+ """Define your reward model and image transformations (optional) here.
20
+ """
21
+ pass
22
+
23
+ @abstractmethod
24
+ def __call__(self, batch_frames: torch.Tensor, batch_prompt: Optional[list[str]]=None) -> Tuple[torch.Tensor, torch.Tensor]:
25
+ """Given batch frames with shape `[B, C, T, H, W]` extracted from a list of videos and a list of prompts
26
+ (optional) correspondingly, return the loss and reward computed by your reward model (reduction by mean).
27
+ """
28
+ pass
29
+
30
+ class AestheticReward(BaseReward):
31
+ """Aesthetic Predictor [V2](https://github.com/christophschuhmann/improved-aesthetic-predictor)
32
+ and [V2.5](https://github.com/discus0434/aesthetic-predictor-v2-5) reward model.
33
+ """
34
+ def __init__(
35
+ self,
36
+ encoder_path="openai/clip-vit-large-patch14",
37
+ predictor_path=None,
38
+ version="v2",
39
+ device="cpu",
40
+ dtype=torch.float16,
41
+ max_reward=10,
42
+ loss_scale=0.1,
43
+ ):
44
+ from .improved_aesthetic_predictor import ImprovedAestheticPredictor
45
+ from ..video_caption.utils.siglip_v2_5 import convert_v2_5_from_siglip
46
+
47
+ self.encoder_path = encoder_path
48
+ self.predictor_path = predictor_path
49
+ self.version = version
50
+ self.device = device
51
+ self.dtype = dtype
52
+ self.max_reward = max_reward
53
+ self.loss_scale = loss_scale
54
+
55
+ if self.version != "v2" and self.version != "v2.5":
56
+ raise ValueError("Only v2 and v2.5 are supported.")
57
+ if self.version == "v2":
58
+ assert "clip-vit-large-patch14" in encoder_path.lower()
59
+ self.model = ImprovedAestheticPredictor(encoder_path=self.encoder_path, predictor_path=self.predictor_path)
60
+ # https://huggingface.co/openai/clip-vit-large-patch14/blob/main/preprocessor_config.json
61
+ # TODO: [transforms.Resize(224), transforms.CenterCrop(224)] for any aspect ratio.
62
+ self.transform = transforms.Compose([
63
+ transforms.Resize((224, 224), interpolation=transforms.InterpolationMode.BICUBIC),
64
+ transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073], std=[0.26862954, 0.26130258, 0.27577711]),
65
+ ])
66
+ elif self.version == "v2.5":
67
+ assert "siglip-so400m-patch14-384" in encoder_path.lower()
68
+ self.model, _ = convert_v2_5_from_siglip(encoder_model_name=self.encoder_path)
69
+ # https://huggingface.co/google/siglip-so400m-patch14-384/blob/main/preprocessor_config.json
70
+ self.transform = transforms.Compose([
71
+ transforms.Resize((384, 384), interpolation=transforms.InterpolationMode.BICUBIC),
72
+ transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
73
+ ])
74
+
75
+ self.model.to(device=self.device, dtype=self.dtype)
76
+ self.model.requires_grad_(False)
77
+
78
+
79
+ def __call__(self, batch_frames: torch.Tensor, batch_prompt: Optional[list[str]]=None) -> Tuple[torch.Tensor, torch.Tensor]:
80
+ batch_frames = rearrange(batch_frames, "b c t h w -> t b c h w")
81
+ batch_loss, batch_reward = 0, 0
82
+ for frames in batch_frames:
83
+ pixel_values = torch.stack([self.transform(frame) for frame in frames])
84
+ pixel_values = pixel_values.to(self.device, dtype=self.dtype)
85
+ if self.version == "v2":
86
+ reward = self.model(pixel_values)
87
+ elif self.version == "v2.5":
88
+ reward = self.model(pixel_values).logits.squeeze()
89
+ # Convert reward to loss in [0, 1].
90
+ if self.max_reward is None:
91
+ loss = (-1 * reward) * self.loss_scale
92
+ else:
93
+ loss = abs(reward - self.max_reward) * self.loss_scale
94
+ batch_loss, batch_reward = batch_loss + loss.mean(), batch_reward + reward.mean()
95
+
96
+ return batch_loss / batch_frames.shape[0], batch_reward / batch_frames.shape[0]
97
+
98
+
99
+ class HPSReward(BaseReward):
100
+ """[HPS](https://github.com/tgxs002/HPSv2) v2 and v2.1 reward model.
101
+ """
102
+ def __init__(
103
+ self,
104
+ model_path=None,
105
+ version="v2.0",
106
+ device="cpu",
107
+ dtype=torch.float16,
108
+ max_reward=1,
109
+ loss_scale=1,
110
+ ):
111
+ from hpsv2.src.open_clip import create_model_and_transforms, get_tokenizer
112
+
113
+ self.model_path = model_path
114
+ self.version = version
115
+ self.device = device
116
+ self.dtype = dtype
117
+ self.max_reward = max_reward
118
+ self.loss_scale = loss_scale
119
+
120
+ self.model, _, _ = create_model_and_transforms(
121
+ "ViT-H-14",
122
+ "laion2B-s32B-b79K",
123
+ precision=self.dtype,
124
+ device=self.device,
125
+ jit=False,
126
+ force_quick_gelu=False,
127
+ force_custom_text=False,
128
+ force_patch_dropout=False,
129
+ force_image_size=None,
130
+ pretrained_image=False,
131
+ image_mean=None,
132
+ image_std=None,
133
+ light_augmentation=True,
134
+ aug_cfg={},
135
+ output_dict=True,
136
+ with_score_predictor=False,
137
+ with_region_predictor=False,
138
+ )
139
+ self.tokenizer = get_tokenizer("ViT-H-14")
140
+
141
+ # https://huggingface.co/laion/CLIP-ViT-H-14-laion2B-s32B-b79K/blob/main/preprocessor_config.json
142
+ # TODO: [transforms.Resize(224), transforms.CenterCrop(224)] for any aspect ratio.
143
+ self.transform = transforms.Compose([
144
+ transforms.Resize((224, 224), interpolation=transforms.InterpolationMode.BICUBIC),
145
+ transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073], std=[0.26862954, 0.26130258, 0.27577711]),
146
+ ])
147
+
148
+ if version == "v2.0":
149
+ url = "https://pai-aigc-photog.oss-cn-hangzhou.aliyuncs.com/easyanimate/Third_Party/HPS_v2_compressed.pt"
150
+ filename = "HPS_v2_compressed.pt"
151
+ md5 = "fd9180de357abf01fdb4eaad64631db4"
152
+ elif version == "v2.1":
153
+ url = "https://pai-aigc-photog.oss-cn-hangzhou.aliyuncs.com/easyanimate/Third_Party/HPS_v2.1_compressed.pt"
154
+ filename = "HPS_v2.1_compressed.pt"
155
+ md5 = "4067542e34ba2553a738c5ac6c1d75c0"
156
+ else:
157
+ raise ValueError("Only v2.0 and v2.1 are supported.")
158
+ if self.model_path is None or not os.path.exists(self.model_path):
159
+ download_url(url, torch.hub.get_dir(), md5=md5)
160
+ model_path = os.path.join(torch.hub.get_dir(), filename)
161
+
162
+ state_dict = torch.load(model_path, map_location="cpu")["state_dict"]
163
+ self.model.load_state_dict(state_dict)
164
+ self.model.to(device=self.device, dtype=self.dtype)
165
+ self.model.requires_grad_(False)
166
+ self.model.eval()
167
+
168
+ def __call__(self, batch_frames: torch.Tensor, batch_prompt: list[str]) -> Tuple[torch.Tensor, torch.Tensor]:
169
+ assert batch_frames.shape[0] == len(batch_prompt)
170
+ # Compute batch reward and loss in frame-wise.
171
+ batch_frames = rearrange(batch_frames, "b c t h w -> t b c h w")
172
+ batch_loss, batch_reward = 0, 0
173
+ for frames in batch_frames:
174
+ image_inputs = torch.stack([self.transform(frame) for frame in frames])
175
+ image_inputs = image_inputs.to(device=self.device, dtype=self.dtype)
176
+ text_inputs = self.tokenizer(batch_prompt).to(device=self.device)
177
+ outputs = self.model(image_inputs, text_inputs)
178
+
179
+ image_features, text_features = outputs["image_features"], outputs["text_features"]
180
+ logits = image_features @ text_features.T
181
+ reward = torch.diagonal(logits)
182
+ # Convert reward to loss in [0, 1].
183
+ if self.max_reward is None:
184
+ loss = (-1 * reward) * self.loss_scale
185
+ else:
186
+ loss = abs(reward - self.max_reward) * self.loss_scale
187
+
188
+ batch_loss, batch_reward = batch_loss + loss.mean(), batch_reward + reward.mean()
189
+
190
+ return batch_loss / batch_frames.shape[0], batch_reward / batch_frames.shape[0]
191
+
192
+
193
+ class PickScoreReward(BaseReward):
194
+ """[PickScore](https://github.com/yuvalkirstain/PickScore) reward model.
195
+ """
196
+ def __init__(
197
+ self,
198
+ model_path="yuvalkirstain/PickScore_v1",
199
+ device="cpu",
200
+ dtype=torch.float16,
201
+ max_reward=1,
202
+ loss_scale=1,
203
+ ):
204
+ from transformers import AutoProcessor, AutoModel
205
+
206
+ self.model_path = model_path
207
+ self.device = device
208
+ self.dtype = dtype
209
+ self.max_reward = max_reward
210
+ self.loss_scale = loss_scale
211
+
212
+ # https://huggingface.co/yuvalkirstain/PickScore_v1/blob/main/preprocessor_config.json
213
+ self.transform = transforms.Compose([
214
+ transforms.Resize(224, interpolation=transforms.InterpolationMode.BICUBIC),
215
+ transforms.CenterCrop(224),
216
+ transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073], std=[0.26862954, 0.26130258, 0.27577711]),
217
+ ])
218
+ self.processor = AutoProcessor.from_pretrained("laion/CLIP-ViT-H-14-laion2B-s32B-b79K", torch_dtype=self.dtype)
219
+ self.model = AutoModel.from_pretrained(model_path, torch_dtype=self.dtype).eval().to(device)
220
+ self.model.requires_grad_(False)
221
+ self.model.eval()
222
+
223
+ def __call__(self, batch_frames: torch.Tensor, batch_prompt: list[str]) -> Tuple[torch.Tensor, torch.Tensor]:
224
+ assert batch_frames.shape[0] == len(batch_prompt)
225
+ # Compute batch reward and loss in frame-wise.
226
+ batch_frames = rearrange(batch_frames, "b c t h w -> t b c h w")
227
+ batch_loss, batch_reward = 0, 0
228
+ for frames in batch_frames:
229
+ image_inputs = torch.stack([self.transform(frame) for frame in frames])
230
+ image_inputs = image_inputs.to(device=self.device, dtype=self.dtype)
231
+ text_inputs = self.processor(
232
+ text=batch_prompt,
233
+ padding=True,
234
+ truncation=True,
235
+ max_length=77,
236
+ return_tensors="pt",
237
+ ).to(self.device)
238
+ image_features = self.model.get_image_features(pixel_values=image_inputs)
239
+ text_features = self.model.get_text_features(**text_inputs)
240
+ image_features = image_features / torch.norm(image_features, dim=-1, keepdim=True)
241
+ text_features = text_features / torch.norm(text_features, dim=-1, keepdim=True)
242
+
243
+ logits = image_features @ text_features.T
244
+ reward = torch.diagonal(logits)
245
+ # Convert reward to loss in [0, 1].
246
+ if self.max_reward is None:
247
+ loss = (-1 * reward) * self.loss_scale
248
+ else:
249
+ loss = abs(reward - self.max_reward) * self.loss_scale
250
+
251
+ batch_loss, batch_reward = batch_loss + loss.mean(), batch_reward + reward.mean()
252
+
253
+ return batch_loss / batch_frames.shape[0], batch_reward / batch_frames.shape[0]
254
+
255
+
256
+ class MPSReward(BaseReward):
257
+ """[MPS](https://github.com/Kwai-Kolors/MPS) reward model.
258
+ """
259
+ def __init__(
260
+ self,
261
+ model_path=None,
262
+ device="cpu",
263
+ dtype=torch.float16,
264
+ max_reward=1,
265
+ loss_scale=1,
266
+ ):
267
+ from transformers import AutoTokenizer, AutoConfig
268
+ from .MPS.trainer.models.clip_model import CLIPModel
269
+
270
+ self.model_path = model_path
271
+ self.device = device
272
+ self.dtype = dtype
273
+ self.condition = "light, color, clarity, tone, style, ambiance, artistry, shape, face, hair, hands, limbs, structure, instance, texture, quantity, attributes, position, number, location, word, things."
274
+ self.max_reward = max_reward
275
+ self.loss_scale = loss_scale
276
+
277
+ processor_name_or_path = "laion/CLIP-ViT-H-14-laion2B-s32B-b79K"
278
+ # https://huggingface.co/laion/CLIP-ViT-H-14-laion2B-s32B-b79K/blob/main/preprocessor_config.json
279
+ # TODO: [transforms.Resize(224), transforms.CenterCrop(224)] for any aspect ratio.
280
+ self.transform = transforms.Compose([
281
+ transforms.Resize((224, 224), interpolation=transforms.InterpolationMode.BICUBIC),
282
+ transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073], std=[0.26862954, 0.26130258, 0.27577711]),
283
+ ])
284
+
285
+ # We convert the original [ckpt](http://drive.google.com/file/d/17qrK_aJkVNM75ZEvMEePpLj6L867MLkN/view?usp=sharing)
286
+ # (contains the entire model) to a `state_dict`.
287
+ url = "https://pai-aigc-photog.oss-cn-hangzhou.aliyuncs.com/easyanimate/Third_Party/MPS_overall.pth"
288
+ filename = "MPS_overall.pth"
289
+ md5 = "1491cbbbd20565747fe07e7572e2ac56"
290
+ if self.model_path is None or not os.path.exists(self.model_path):
291
+ download_url(url, torch.hub.get_dir(), md5=md5)
292
+ model_path = os.path.join(torch.hub.get_dir(), filename)
293
+
294
+ self.tokenizer = AutoTokenizer.from_pretrained(processor_name_or_path, trust_remote_code=True)
295
+ config = AutoConfig.from_pretrained(processor_name_or_path)
296
+ self.model = CLIPModel(config)
297
+ state_dict = torch.load(model_path, map_location="cpu")
298
+ self.model.load_state_dict(state_dict, strict=False)
299
+ self.model.to(device=self.device, dtype=self.dtype)
300
+ self.model.requires_grad_(False)
301
+ self.model.eval()
302
+
303
+ def _tokenize(self, caption):
304
+ input_ids = self.tokenizer(
305
+ caption,
306
+ max_length=self.tokenizer.model_max_length,
307
+ padding="max_length",
308
+ truncation=True,
309
+ return_tensors="pt"
310
+ ).input_ids
311
+
312
+ return input_ids
313
+
314
+ def __call__(
315
+ self,
316
+ batch_frames: torch.Tensor,
317
+ batch_prompt: list[str],
318
+ batch_condition: Optional[list[str]] = None
319
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
320
+ if batch_condition is None:
321
+ batch_condition = [self.condition] * len(batch_prompt)
322
+ batch_frames = rearrange(batch_frames, "b c t h w -> t b c h w")
323
+ batch_loss, batch_reward = 0, 0
324
+ for frames in batch_frames:
325
+ image_inputs = torch.stack([self.transform(frame) for frame in frames])
326
+ image_inputs = image_inputs.to(device=self.device, dtype=self.dtype)
327
+ text_inputs = self._tokenize(batch_prompt).to(self.device)
328
+ condition_inputs = self._tokenize(batch_condition).to(device=self.device)
329
+ text_features, image_features = self.model(text_inputs, image_inputs, condition_inputs)
330
+
331
+ text_features = text_features / text_features.norm(dim=-1, keepdim=True)
332
+ image_features = image_features / image_features.norm(dim=-1, keepdim=True)
333
+ # reward = self.model.logit_scale.exp() * torch.diag(torch.einsum('bd,cd->bc', text_features, image_features))
334
+ logits = image_features @ text_features.T
335
+ reward = torch.diagonal(logits)
336
+ # Convert reward to loss in [0, 1].
337
+ if self.max_reward is None:
338
+ loss = (-1 * reward) * self.loss_scale
339
+ else:
340
+ loss = abs(reward - self.max_reward) * self.loss_scale
341
+
342
+ batch_loss, batch_reward = batch_loss + loss.mean(), batch_reward + reward.mean()
343
+
344
+ return batch_loss / batch_frames.shape[0], batch_reward / batch_frames.shape[0]
345
+
346
+
347
+ if __name__ == "__main__":
348
+ import numpy as np
349
+ from decord import VideoReader
350
+
351
+ video_path_list = ["your_video_path_1.mp4", "your_video_path_2.mp4"]
352
+ prompt_list = ["your_prompt_1", "your_prompt_2"]
353
+ num_sampled_frames = 8
354
+
355
+ to_tensor = transforms.ToTensor()
356
+
357
+ sampled_frames_list = []
358
+ for video_path in video_path_list:
359
+ vr = VideoReader(video_path)
360
+ sampled_frame_indices = np.linspace(0, len(vr), num_sampled_frames, endpoint=False, dtype=int)
361
+ sampled_frames = vr.get_batch(sampled_frame_indices).asnumpy()
362
+ sampled_frames = torch.stack([to_tensor(frame) for frame in sampled_frames])
363
+ sampled_frames_list.append(sampled_frames)
364
+ sampled_frames = torch.stack(sampled_frames_list)
365
+ sampled_frames = rearrange(sampled_frames, "b t c h w -> b c t h w")
366
+
367
+ aesthetic_reward_v2 = AestheticReward(device="cuda", dtype=torch.bfloat16)
368
+ print(f"aesthetic_reward_v2: {aesthetic_reward_v2(sampled_frames)}")
369
+
370
+ aesthetic_reward_v2_5 = AestheticReward(
371
+ encoder_path="google/siglip-so400m-patch14-384", version="v2.5", device="cuda", dtype=torch.bfloat16
372
+ )
373
+ print(f"aesthetic_reward_v2_5: {aesthetic_reward_v2_5(sampled_frames)}")
374
+
375
+ hps_reward_v2 = HPSReward(device="cuda", dtype=torch.bfloat16)
376
+ print(f"hps_reward_v2: {hps_reward_v2(sampled_frames, prompt_list)}")
377
+
378
+ hps_reward_v2_1 = HPSReward(version="v2.1", device="cuda", dtype=torch.bfloat16)
379
+ print(f"hps_reward_v2_1: {hps_reward_v2_1(sampled_frames, prompt_list)}")
380
+
381
+ pick_score = PickScoreReward(device="cuda", dtype=torch.bfloat16)
382
+ print(f"pick_score_reward: {pick_score(sampled_frames, prompt_list)}")
383
+
384
+ mps_score = MPSReward(device="cuda", dtype=torch.bfloat16)
385
+ print(f"mps_reward: {mps_score(sampled_frames, prompt_list)}")
robomaster/ui/ui.py ADDED
@@ -0,0 +1,1634 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Modified from https://github.com/guoyww/AnimateDiff/blob/main/app.py
2
+ """
3
+ import base64
4
+ import gc
5
+ import json
6
+ import os
7
+ import random
8
+ from datetime import datetime
9
+ from glob import glob
10
+
11
+ import cv2
12
+ import gradio as gr
13
+ import numpy as np
14
+ import pkg_resources
15
+ import requests
16
+ import torch
17
+ from diffusers import (AutoencoderKL, AutoencoderKLCogVideoX,
18
+ CogVideoXDDIMScheduler, DDIMScheduler,
19
+ DPMSolverMultistepScheduler,
20
+ EulerAncestralDiscreteScheduler, EulerDiscreteScheduler,
21
+ PNDMScheduler)
22
+ from diffusers.utils.import_utils import is_xformers_available
23
+ from omegaconf import OmegaConf
24
+ from PIL import Image
25
+ from safetensors import safe_open
26
+ from transformers import (CLIPImageProcessor, CLIPVisionModelWithProjection,
27
+ T5EncoderModel, T5Tokenizer)
28
+
29
+ from cogvideox.data.bucket_sampler import ASPECT_RATIO_512, get_closest_ratio
30
+ from cogvideox.models.autoencoder_magvit import AutoencoderKLCogVideoX
31
+ from cogvideox.models.transformer3d import CogVideoXTransformer3DModel
32
+ from cogvideox.pipeline.pipeline_cogvideox import CogVideoX_Fun_Pipeline
33
+ from cogvideox.pipeline.pipeline_cogvideox_control import \
34
+ CogVideoX_Fun_Pipeline_Control
35
+ from cogvideox.pipeline.pipeline_cogvideox_inpaint import \
36
+ CogVideoX_Fun_Pipeline_Inpaint
37
+ from cogvideox.utils.lora_utils import merge_lora, unmerge_lora
38
+ from cogvideox.utils.utils import (
39
+ get_image_to_video_latent, get_video_to_video_latent,
40
+ get_width_and_height_from_image_and_base_resolution, save_videos_grid)
41
+
42
+ scheduler_dict = {
43
+ "Euler": EulerDiscreteScheduler,
44
+ "Euler A": EulerAncestralDiscreteScheduler,
45
+ "DPM++": DPMSolverMultistepScheduler,
46
+ "PNDM": PNDMScheduler,
47
+ "DDIM_Cog": CogVideoXDDIMScheduler,
48
+ "DDIM_Origin": DDIMScheduler,
49
+ }
50
+
51
+ gradio_version = pkg_resources.get_distribution("gradio").version
52
+ gradio_version_is_above_4 = True if int(gradio_version.split('.')[0]) >= 4 else False
53
+
54
+ css = """
55
+ .toolbutton {
56
+ margin-buttom: 0em 0em 0em 0em;
57
+ max-width: 2.5em;
58
+ min-width: 2.5em !important;
59
+ height: 2.5em;
60
+ }
61
+ """
62
+
63
+ class CogVideoX_Fun_Controller:
64
+ def __init__(self, low_gpu_memory_mode, weight_dtype):
65
+ # config dirs
66
+ self.basedir = os.getcwd()
67
+ self.config_dir = os.path.join(self.basedir, "config")
68
+ self.diffusion_transformer_dir = os.path.join(self.basedir, "models", "Diffusion_Transformer")
69
+ self.motion_module_dir = os.path.join(self.basedir, "models", "Motion_Module")
70
+ self.personalized_model_dir = os.path.join(self.basedir, "models", "Personalized_Model")
71
+ self.savedir = os.path.join(self.basedir, "samples", datetime.now().strftime("Gradio-%Y-%m-%dT%H-%M-%S"))
72
+ self.savedir_sample = os.path.join(self.savedir, "sample")
73
+ self.model_type = "Inpaint"
74
+ os.makedirs(self.savedir, exist_ok=True)
75
+
76
+ self.diffusion_transformer_list = []
77
+ self.motion_module_list = []
78
+ self.personalized_model_list = []
79
+
80
+ self.refresh_diffusion_transformer()
81
+ self.refresh_motion_module()
82
+ self.refresh_personalized_model()
83
+
84
+ # config models
85
+ self.tokenizer = None
86
+ self.text_encoder = None
87
+ self.vae = None
88
+ self.transformer = None
89
+ self.pipeline = None
90
+ self.motion_module_path = "none"
91
+ self.base_model_path = "none"
92
+ self.lora_model_path = "none"
93
+ self.low_gpu_memory_mode = low_gpu_memory_mode
94
+
95
+ self.weight_dtype = weight_dtype
96
+
97
+ def refresh_diffusion_transformer(self):
98
+ self.diffusion_transformer_list = sorted(glob(os.path.join(self.diffusion_transformer_dir, "*/")))
99
+
100
+ def refresh_motion_module(self):
101
+ motion_module_list = sorted(glob(os.path.join(self.motion_module_dir, "*.safetensors")))
102
+ self.motion_module_list = [os.path.basename(p) for p in motion_module_list]
103
+
104
+ def refresh_personalized_model(self):
105
+ personalized_model_list = sorted(glob(os.path.join(self.personalized_model_dir, "*.safetensors")))
106
+ self.personalized_model_list = [os.path.basename(p) for p in personalized_model_list]
107
+
108
+ def update_model_type(self, model_type):
109
+ self.model_type = model_type
110
+
111
+ def update_diffusion_transformer(self, diffusion_transformer_dropdown):
112
+ print("Update diffusion transformer")
113
+ if diffusion_transformer_dropdown == "none":
114
+ return gr.update()
115
+ self.vae = AutoencoderKLCogVideoX.from_pretrained(
116
+ diffusion_transformer_dropdown,
117
+ subfolder="vae",
118
+ ).to(self.weight_dtype)
119
+
120
+ # Get Transformer
121
+ self.transformer = CogVideoXTransformer3DModel.from_pretrained_2d(
122
+ diffusion_transformer_dropdown,
123
+ subfolder="transformer",
124
+ low_cpu_mem_usage=True,
125
+ ).to(self.weight_dtype)
126
+
127
+ # Get pipeline
128
+ if self.model_type == "Inpaint":
129
+ if self.transformer.config.in_channels != self.vae.config.latent_channels:
130
+ self.pipeline = CogVideoX_Fun_Pipeline_Inpaint.from_pretrained(
131
+ diffusion_transformer_dropdown,
132
+ vae=self.vae,
133
+ transformer=self.transformer,
134
+ scheduler=scheduler_dict["Euler"].from_pretrained(diffusion_transformer_dropdown, subfolder="scheduler"),
135
+ torch_dtype=self.weight_dtype
136
+ )
137
+ else:
138
+ self.pipeline = CogVideoX_Fun_Pipeline.from_pretrained(
139
+ diffusion_transformer_dropdown,
140
+ vae=self.vae,
141
+ transformer=self.transformer,
142
+ scheduler=scheduler_dict["Euler"].from_pretrained(diffusion_transformer_dropdown, subfolder="scheduler"),
143
+ torch_dtype=self.weight_dtype
144
+ )
145
+ else:
146
+ self.pipeline = CogVideoX_Fun_Pipeline_Control.from_pretrained(
147
+ diffusion_transformer_dropdown,
148
+ vae=self.vae,
149
+ transformer=self.transformer,
150
+ scheduler=scheduler_dict["Euler"].from_pretrained(diffusion_transformer_dropdown, subfolder="scheduler"),
151
+ torch_dtype=self.weight_dtype
152
+ )
153
+
154
+ if self.low_gpu_memory_mode:
155
+ self.pipeline.enable_sequential_cpu_offload()
156
+ else:
157
+ self.pipeline.enable_model_cpu_offload()
158
+ print("Update diffusion transformer done")
159
+ return gr.update()
160
+
161
+ def update_base_model(self, base_model_dropdown):
162
+ self.base_model_path = base_model_dropdown
163
+ print("Update base model")
164
+ if base_model_dropdown == "none":
165
+ return gr.update()
166
+ if self.transformer is None:
167
+ gr.Info(f"Please select a pretrained model path.")
168
+ return gr.update(value=None)
169
+ else:
170
+ base_model_dropdown = os.path.join(self.personalized_model_dir, base_model_dropdown)
171
+ base_model_state_dict = {}
172
+ with safe_open(base_model_dropdown, framework="pt", device="cpu") as f:
173
+ for key in f.keys():
174
+ base_model_state_dict[key] = f.get_tensor(key)
175
+ self.transformer.load_state_dict(base_model_state_dict, strict=False)
176
+ print("Update base done")
177
+ return gr.update()
178
+
179
+ def update_lora_model(self, lora_model_dropdown):
180
+ print("Update lora model")
181
+ if lora_model_dropdown == "none":
182
+ self.lora_model_path = "none"
183
+ return gr.update()
184
+ lora_model_dropdown = os.path.join(self.personalized_model_dir, lora_model_dropdown)
185
+ self.lora_model_path = lora_model_dropdown
186
+ return gr.update()
187
+
188
+ def generate(
189
+ self,
190
+ diffusion_transformer_dropdown,
191
+ base_model_dropdown,
192
+ lora_model_dropdown,
193
+ lora_alpha_slider,
194
+ prompt_textbox,
195
+ negative_prompt_textbox,
196
+ sampler_dropdown,
197
+ sample_step_slider,
198
+ resize_method,
199
+ width_slider,
200
+ height_slider,
201
+ base_resolution,
202
+ generation_method,
203
+ length_slider,
204
+ overlap_video_length,
205
+ partial_video_length,
206
+ cfg_scale_slider,
207
+ start_image,
208
+ end_image,
209
+ validation_video,
210
+ validation_video_mask,
211
+ control_video,
212
+ denoise_strength,
213
+ seed_textbox,
214
+ is_api = False,
215
+ ):
216
+ gc.collect()
217
+ torch.cuda.empty_cache()
218
+ torch.cuda.ipc_collect()
219
+
220
+ if self.transformer is None:
221
+ raise gr.Error(f"Please select a pretrained model path.")
222
+
223
+ if self.base_model_path != base_model_dropdown:
224
+ self.update_base_model(base_model_dropdown)
225
+
226
+ if self.lora_model_path != lora_model_dropdown:
227
+ print("Update lora model")
228
+ self.update_lora_model(lora_model_dropdown)
229
+
230
+ if control_video is not None and self.model_type == "Inpaint":
231
+ if is_api:
232
+ return "", f"If specifying the control video, please set the model_type == \"Control\". "
233
+ else:
234
+ raise gr.Error(f"If specifying the control video, please set the model_type == \"Control\". ")
235
+
236
+ if control_video is None and self.model_type == "Control":
237
+ if is_api:
238
+ return "", f"If set the model_type == \"Control\", please specifying the control video. "
239
+ else:
240
+ raise gr.Error(f"If set the model_type == \"Control\", please specifying the control video. ")
241
+
242
+ if resize_method == "Resize according to Reference":
243
+ if start_image is None and validation_video is None and control_video is None:
244
+ if is_api:
245
+ return "", f"Please upload an image when using \"Resize according to Reference\"."
246
+ else:
247
+ raise gr.Error(f"Please upload an image when using \"Resize according to Reference\".")
248
+
249
+ aspect_ratio_sample_size = {key : [x / 512 * base_resolution for x in ASPECT_RATIO_512[key]] for key in ASPECT_RATIO_512.keys()}
250
+ if self.model_type == "Inpaint":
251
+ if validation_video is not None:
252
+ original_width, original_height = Image.fromarray(cv2.VideoCapture(validation_video).read()[1]).size
253
+ else:
254
+ original_width, original_height = start_image[0].size if type(start_image) is list else Image.open(start_image).size
255
+ else:
256
+ original_width, original_height = Image.fromarray(cv2.VideoCapture(control_video).read()[1]).size
257
+ closest_size, closest_ratio = get_closest_ratio(original_height, original_width, ratios=aspect_ratio_sample_size)
258
+ height_slider, width_slider = [int(x / 16) * 16 for x in closest_size]
259
+
260
+ if self.transformer.config.in_channels == self.vae.config.latent_channels and start_image is not None:
261
+ if is_api:
262
+ return "", f"Please select an image to video pretrained model while using image to video."
263
+ else:
264
+ raise gr.Error(f"Please select an image to video pretrained model while using image to video.")
265
+
266
+ if self.transformer.config.in_channels == self.vae.config.latent_channels and generation_method == "Long Video Generation":
267
+ if is_api:
268
+ return "", f"Please select an image to video pretrained model while using long video generation."
269
+ else:
270
+ raise gr.Error(f"Please select an image to video pretrained model while using long video generation.")
271
+
272
+ if start_image is None and end_image is not None:
273
+ if is_api:
274
+ return "", f"If specifying the ending image of the video, please specify a starting image of the video."
275
+ else:
276
+ raise gr.Error(f"If specifying the ending image of the video, please specify a starting image of the video.")
277
+
278
+ is_image = True if generation_method == "Image Generation" else False
279
+
280
+ self.pipeline.scheduler = scheduler_dict[sampler_dropdown].from_config(self.pipeline.scheduler.config)
281
+ if self.lora_model_path != "none":
282
+ # lora part
283
+ self.pipeline = merge_lora(self.pipeline, self.lora_model_path, multiplier=lora_alpha_slider)
284
+
285
+ if int(seed_textbox) != -1 and seed_textbox != "": torch.manual_seed(int(seed_textbox))
286
+ else: seed_textbox = np.random.randint(0, 1e10)
287
+ generator = torch.Generator(device="cuda").manual_seed(int(seed_textbox))
288
+
289
+ try:
290
+ if self.model_type == "Inpaint":
291
+ if self.transformer.config.in_channels != self.vae.config.latent_channels:
292
+ if generation_method == "Long Video Generation":
293
+ if validation_video is not None:
294
+ raise gr.Error(f"Video to Video is not Support Long Video Generation now.")
295
+ init_frames = 0
296
+ last_frames = init_frames + partial_video_length
297
+ while init_frames < length_slider:
298
+ if last_frames >= length_slider:
299
+ _partial_video_length = length_slider - init_frames
300
+ _partial_video_length = int((_partial_video_length - 1) // self.vae.config.temporal_compression_ratio * self.vae.config.temporal_compression_ratio) + 1
301
+
302
+ if _partial_video_length <= 0:
303
+ break
304
+ else:
305
+ _partial_video_length = partial_video_length
306
+
307
+ if last_frames >= length_slider:
308
+ input_video, input_video_mask, clip_image = get_image_to_video_latent(start_image, end_image, video_length=_partial_video_length, sample_size=(height_slider, width_slider))
309
+ else:
310
+ input_video, input_video_mask, clip_image = get_image_to_video_latent(start_image, None, video_length=_partial_video_length, sample_size=(height_slider, width_slider))
311
+
312
+ with torch.no_grad():
313
+ sample = self.pipeline(
314
+ prompt_textbox,
315
+ negative_prompt = negative_prompt_textbox,
316
+ num_inference_steps = sample_step_slider,
317
+ guidance_scale = cfg_scale_slider,
318
+ width = width_slider,
319
+ height = height_slider,
320
+ num_frames = _partial_video_length,
321
+ generator = generator,
322
+
323
+ video = input_video,
324
+ mask_video = input_video_mask,
325
+ strength = 1,
326
+ ).videos
327
+
328
+ if init_frames != 0:
329
+ mix_ratio = torch.from_numpy(
330
+ np.array([float(_index) / float(overlap_video_length) for _index in range(overlap_video_length)], np.float32)
331
+ ).unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
332
+
333
+ new_sample[:, :, -overlap_video_length:] = new_sample[:, :, -overlap_video_length:] * (1 - mix_ratio) + \
334
+ sample[:, :, :overlap_video_length] * mix_ratio
335
+ new_sample = torch.cat([new_sample, sample[:, :, overlap_video_length:]], dim = 2)
336
+
337
+ sample = new_sample
338
+ else:
339
+ new_sample = sample
340
+
341
+ if last_frames >= length_slider:
342
+ break
343
+
344
+ start_image = [
345
+ Image.fromarray(
346
+ (sample[0, :, _index].transpose(0, 1).transpose(1, 2) * 255).numpy().astype(np.uint8)
347
+ ) for _index in range(-overlap_video_length, 0)
348
+ ]
349
+
350
+ init_frames = init_frames + _partial_video_length - overlap_video_length
351
+ last_frames = init_frames + _partial_video_length
352
+ else:
353
+ if validation_video is not None:
354
+ input_video, input_video_mask, clip_image = get_video_to_video_latent(validation_video, length_slider if not is_image else 1, sample_size=(height_slider, width_slider), validation_video_mask=validation_video_mask, fps=8)
355
+ strength = denoise_strength
356
+ else:
357
+ input_video, input_video_mask, clip_image = get_image_to_video_latent(start_image, end_image, length_slider if not is_image else 1, sample_size=(height_slider, width_slider))
358
+ strength = 1
359
+
360
+ sample = self.pipeline(
361
+ prompt_textbox,
362
+ negative_prompt = negative_prompt_textbox,
363
+ num_inference_steps = sample_step_slider,
364
+ guidance_scale = cfg_scale_slider,
365
+ width = width_slider,
366
+ height = height_slider,
367
+ num_frames = length_slider if not is_image else 1,
368
+ generator = generator,
369
+
370
+ video = input_video,
371
+ mask_video = input_video_mask,
372
+ strength = strength,
373
+ ).videos
374
+ else:
375
+ sample = self.pipeline(
376
+ prompt_textbox,
377
+ negative_prompt = negative_prompt_textbox,
378
+ num_inference_steps = sample_step_slider,
379
+ guidance_scale = cfg_scale_slider,
380
+ width = width_slider,
381
+ height = height_slider,
382
+ num_frames = length_slider if not is_image else 1,
383
+ generator = generator
384
+ ).videos
385
+ else:
386
+ input_video, input_video_mask, clip_image = get_video_to_video_latent(control_video, length_slider if not is_image else 1, sample_size=(height_slider, width_slider), fps=8)
387
+
388
+ sample = self.pipeline(
389
+ prompt_textbox,
390
+ negative_prompt = negative_prompt_textbox,
391
+ num_inference_steps = sample_step_slider,
392
+ guidance_scale = cfg_scale_slider,
393
+ width = width_slider,
394
+ height = height_slider,
395
+ num_frames = length_slider if not is_image else 1,
396
+ generator = generator,
397
+
398
+ control_video = input_video,
399
+ ).videos
400
+ except Exception as e:
401
+ gc.collect()
402
+ torch.cuda.empty_cache()
403
+ torch.cuda.ipc_collect()
404
+ if self.lora_model_path != "none":
405
+ self.pipeline = unmerge_lora(self.pipeline, self.lora_model_path, multiplier=lora_alpha_slider)
406
+ if is_api:
407
+ return "", f"Error. error information is {str(e)}"
408
+ else:
409
+ return gr.update(), gr.update(), f"Error. error information is {str(e)}"
410
+
411
+ gc.collect()
412
+ torch.cuda.empty_cache()
413
+ torch.cuda.ipc_collect()
414
+
415
+ # lora part
416
+ if self.lora_model_path != "none":
417
+ self.pipeline = unmerge_lora(self.pipeline, self.lora_model_path, multiplier=lora_alpha_slider)
418
+
419
+ sample_config = {
420
+ "prompt": prompt_textbox,
421
+ "n_prompt": negative_prompt_textbox,
422
+ "sampler": sampler_dropdown,
423
+ "num_inference_steps": sample_step_slider,
424
+ "guidance_scale": cfg_scale_slider,
425
+ "width": width_slider,
426
+ "height": height_slider,
427
+ "video_length": length_slider,
428
+ "seed_textbox": seed_textbox
429
+ }
430
+ json_str = json.dumps(sample_config, indent=4)
431
+ with open(os.path.join(self.savedir, "logs.json"), "a") as f:
432
+ f.write(json_str)
433
+ f.write("\n\n")
434
+
435
+ if not os.path.exists(self.savedir_sample):
436
+ os.makedirs(self.savedir_sample, exist_ok=True)
437
+ index = len([path for path in os.listdir(self.savedir_sample)]) + 1
438
+ prefix = str(index).zfill(3)
439
+
440
+ gc.collect()
441
+ torch.cuda.empty_cache()
442
+ torch.cuda.ipc_collect()
443
+ if is_image or length_slider == 1:
444
+ save_sample_path = os.path.join(self.savedir_sample, prefix + f".png")
445
+
446
+ image = sample[0, :, 0]
447
+ image = image.transpose(0, 1).transpose(1, 2)
448
+ image = (image * 255).numpy().astype(np.uint8)
449
+ image = Image.fromarray(image)
450
+ image.save(save_sample_path)
451
+
452
+ if is_api:
453
+ return save_sample_path, "Success"
454
+ else:
455
+ if gradio_version_is_above_4:
456
+ return gr.Image(value=save_sample_path, visible=True), gr.Video(value=None, visible=False), "Success"
457
+ else:
458
+ return gr.Image.update(value=save_sample_path, visible=True), gr.Video.update(value=None, visible=False), "Success"
459
+ else:
460
+ save_sample_path = os.path.join(self.savedir_sample, prefix + f".mp4")
461
+ save_videos_grid(sample, save_sample_path, fps=8)
462
+
463
+ if is_api:
464
+ return save_sample_path, "Success"
465
+ else:
466
+ if gradio_version_is_above_4:
467
+ return gr.Image(visible=False, value=None), gr.Video(value=save_sample_path, visible=True), "Success"
468
+ else:
469
+ return gr.Image.update(visible=False, value=None), gr.Video.update(value=save_sample_path, visible=True), "Success"
470
+
471
+
472
+ def ui(low_gpu_memory_mode, weight_dtype):
473
+ controller = CogVideoX_Fun_Controller(low_gpu_memory_mode, weight_dtype)
474
+
475
+ with gr.Blocks(css=css) as demo:
476
+ gr.Markdown(
477
+ """
478
+ # CogVideoX-Fun:
479
+
480
+ A CogVideoX with more flexible generation conditions, capable of producing videos of different resolutions, around 6 seconds, and fps 8 (frames 1 to 49), as well as image generated videos.
481
+
482
+ [Github](https://github.com/aigc-apps/CogVideoX-Fun/)
483
+ """
484
+ )
485
+ with gr.Column(variant="panel"):
486
+ gr.Markdown(
487
+ """
488
+ ### 1. CogVideoX-Fun Model Type (CogVideoX-Fun模型的种类,正常模型还是控制模型).
489
+ """
490
+ )
491
+ with gr.Row():
492
+ model_type = gr.Dropdown(
493
+ label="The model type of CogVideoX-Fun (CogVideoX-Fun模型的种类,正常模型还是控制模型)",
494
+ choices=["Inpaint", "Control"],
495
+ value="Inpaint",
496
+ interactive=True,
497
+ )
498
+
499
+ gr.Markdown(
500
+ """
501
+ ### 2. Model checkpoints (模型路径).
502
+ """
503
+ )
504
+ with gr.Row():
505
+ diffusion_transformer_dropdown = gr.Dropdown(
506
+ label="Pretrained Model Path (预训练模型路径)",
507
+ choices=controller.diffusion_transformer_list,
508
+ value="none",
509
+ interactive=True,
510
+ )
511
+ diffusion_transformer_dropdown.change(
512
+ fn=controller.update_diffusion_transformer,
513
+ inputs=[diffusion_transformer_dropdown],
514
+ outputs=[diffusion_transformer_dropdown]
515
+ )
516
+
517
+ diffusion_transformer_refresh_button = gr.Button(value="\U0001F503", elem_classes="toolbutton")
518
+ def refresh_diffusion_transformer():
519
+ controller.refresh_diffusion_transformer()
520
+ return gr.update(choices=controller.diffusion_transformer_list)
521
+ diffusion_transformer_refresh_button.click(fn=refresh_diffusion_transformer, inputs=[], outputs=[diffusion_transformer_dropdown])
522
+
523
+ with gr.Row():
524
+ base_model_dropdown = gr.Dropdown(
525
+ label="Select base Dreambooth model (选择基模型[非必需])",
526
+ choices=controller.personalized_model_list,
527
+ value="none",
528
+ interactive=True,
529
+ )
530
+
531
+ lora_model_dropdown = gr.Dropdown(
532
+ label="Select LoRA model (选择LoRA模型[非必需])",
533
+ choices=["none"] + controller.personalized_model_list,
534
+ value="none",
535
+ interactive=True,
536
+ )
537
+
538
+ lora_alpha_slider = gr.Slider(label="LoRA alpha (LoRA权重)", value=0.55, minimum=0, maximum=2, interactive=True)
539
+
540
+ personalized_refresh_button = gr.Button(value="\U0001F503", elem_classes="toolbutton")
541
+ def update_personalized_model():
542
+ controller.refresh_personalized_model()
543
+ return [
544
+ gr.update(choices=controller.personalized_model_list),
545
+ gr.update(choices=["none"] + controller.personalized_model_list)
546
+ ]
547
+ personalized_refresh_button.click(fn=update_personalized_model, inputs=[], outputs=[base_model_dropdown, lora_model_dropdown])
548
+
549
+ with gr.Column(variant="panel"):
550
+ gr.Markdown(
551
+ """
552
+ ### 3. Configs for Generation (生成参数配置).
553
+ """
554
+ )
555
+
556
+ prompt_textbox = gr.Textbox(label="Prompt (正向提示词)", lines=2, value="A young woman with beautiful and clear eyes and blonde hair standing and white dress in a forest wearing a crown. She seems to be lost in thought, and the camera focuses on her face. The video is of high quality, and the view is very clear. High quality, masterpiece, best quality, highres, ultra-detailed, fantastic.")
557
+ negative_prompt_textbox = gr.Textbox(label="Negative prompt (负向提示词)", lines=2, value="The video is not of a high quality, it has a low resolution. Watermark present in each frame. The background is solid. Strange body and strange trajectory. Distortion. " )
558
+
559
+ with gr.Row():
560
+ with gr.Column():
561
+ with gr.Row():
562
+ sampler_dropdown = gr.Dropdown(label="Sampling method (采样器种类)", choices=list(scheduler_dict.keys()), value=list(scheduler_dict.keys())[0])
563
+ sample_step_slider = gr.Slider(label="Sampling steps (生成步数)", value=50, minimum=10, maximum=100, step=1)
564
+
565
+ resize_method = gr.Radio(
566
+ ["Generate by", "Resize according to Reference"],
567
+ value="Generate by",
568
+ show_label=False,
569
+ )
570
+ width_slider = gr.Slider(label="Width (视频宽度)", value=672, minimum=128, maximum=1344, step=16)
571
+ height_slider = gr.Slider(label="Height (视频高度)", value=384, minimum=128, maximum=1344, step=16)
572
+ base_resolution = gr.Radio(label="Base Resolution of Pretrained Models", value=512, choices=[512, 768, 960], visible=False)
573
+
574
+ gr.Markdown(
575
+ """
576
+ V1.0 and V1.1 support up to 49 frames of video generation, while V1.5 supports up to 85 frames.
577
+ (V1.0和V1.1支持最大49帧视频生成,V1.5支持最大85帧视频生成。)
578
+ """
579
+ )
580
+ with gr.Group():
581
+ generation_method = gr.Radio(
582
+ ["Video Generation", "Image Generation", "Long Video Generation"],
583
+ value="Video Generation",
584
+ show_label=False,
585
+ )
586
+ with gr.Row():
587
+ length_slider = gr.Slider(label="Animation length (视频帧数)", value=49, minimum=1, maximum=85, step=4)
588
+ overlap_video_length = gr.Slider(label="Overlap length (视频续写的重叠帧数)", value=4, minimum=1, maximum=4, step=1, visible=False)
589
+ partial_video_length = gr.Slider(label="Partial video generation length (每个部分的视频生成帧数)", value=25, minimum=5, maximum=85, step=4, visible=False)
590
+
591
+ source_method = gr.Radio(
592
+ ["Text to Video (文本到视频)", "Image to Video (图片到视频)", "Video to Video (视频到视频)", "Video Control (视频控制)"],
593
+ value="Text to Video (文本到视频)",
594
+ show_label=False,
595
+ )
596
+ with gr.Column(visible = False) as image_to_video_col:
597
+ start_image = gr.Image(
598
+ label="The image at the beginning of the video (图片到视频的开始图片)", show_label=True,
599
+ elem_id="i2v_start", sources="upload", type="filepath",
600
+ )
601
+
602
+ template_gallery_path = ["asset/1.png", "asset/2.png", "asset/3.png", "asset/4.png", "asset/5.png"]
603
+ def select_template(evt: gr.SelectData):
604
+ text = {
605
+ "asset/1.png": "The dog is shaking head. The video is of high quality, and the view is very clear. High quality, masterpiece, best quality, highres, ultra-detailed, fantastic.",
606
+ "asset/2.png": "a sailboat sailing in rough seas with a dramatic sunset. The video is of high quality, and the view is very clear. High quality, masterpiece, best quality, highres, ultra-detailed, fantastic.",
607
+ "asset/3.png": "a beautiful woman with long hair and a dress blowing in the wind. The video is of high quality, and the view is very clear. High quality, masterpiece, best quality, highres, ultra-detailed, fantastic.",
608
+ "asset/4.png": "a man in an astronaut suit playing a guitar. The video is of high quality, and the view is very clear. High quality, masterpiece, best quality, highres, ultra-detailed, fantastic.",
609
+ "asset/5.png": "fireworks display over night city. The video is of high quality, and the view is very clear. High quality, masterpiece, best quality, highres, ultra-detailed, fantastic.",
610
+ }[template_gallery_path[evt.index]]
611
+ return template_gallery_path[evt.index], text
612
+
613
+ template_gallery = gr.Gallery(
614
+ template_gallery_path,
615
+ columns=5, rows=1,
616
+ height=140,
617
+ allow_preview=False,
618
+ container=False,
619
+ label="Template Examples",
620
+ )
621
+ template_gallery.select(select_template, None, [start_image, prompt_textbox])
622
+
623
+ with gr.Accordion("The image at the ending of the video (图片到视频的结束图片[非必需, Optional])", open=False):
624
+ end_image = gr.Image(label="The image at the ending of the video (图片到视频的结束图片[非必需, Optional])", show_label=False, elem_id="i2v_end", sources="upload", type="filepath")
625
+
626
+ with gr.Column(visible = False) as video_to_video_col:
627
+ with gr.Row():
628
+ validation_video = gr.Video(
629
+ label="The video to convert (视频转视频的参考视频)", show_label=True,
630
+ elem_id="v2v", sources="upload",
631
+ )
632
+ with gr.Accordion("The mask of the video to inpaint (视频重新绘制的mask[非必需, Optional])", open=False):
633
+ gr.Markdown(
634
+ """
635
+ - Please set a larger denoise_strength when using validation_video_mask, such as 1.00 instead of 0.70
636
+ (请设置更大的denoise_strength,当使用validation_video_mask的时候,比如1而不是0.70)
637
+ """
638
+ )
639
+ validation_video_mask = gr.Image(
640
+ label="The mask of the video to inpaint (视频重新绘制的mask[非必需, Optional])",
641
+ show_label=False, elem_id="v2v_mask", sources="upload", type="filepath"
642
+ )
643
+ denoise_strength = gr.Slider(label="Denoise strength (重绘系数)", value=0.70, minimum=0.10, maximum=1.00, step=0.01)
644
+
645
+ with gr.Column(visible = False) as control_video_col:
646
+ gr.Markdown(
647
+ """
648
+ Demo pose control video can be downloaded here [URL](https://pai-aigc-photog.oss-cn-hangzhou.aliyuncs.com/cogvideox_fun/asset/v1.1/pose.mp4).
649
+ """
650
+ )
651
+ control_video = gr.Video(
652
+ label="The control video (用于提供控制信号的video)", show_label=True,
653
+ elem_id="v2v_control", sources="upload",
654
+ )
655
+
656
+ cfg_scale_slider = gr.Slider(label="CFG Scale (引导系数)", value=6.0, minimum=0, maximum=20)
657
+
658
+ with gr.Row():
659
+ seed_textbox = gr.Textbox(label="Seed (随机种子)", value=43)
660
+ seed_button = gr.Button(value="\U0001F3B2", elem_classes="toolbutton")
661
+ seed_button.click(
662
+ fn=lambda: gr.Textbox(value=random.randint(1, 1e8)) if gradio_version_is_above_4 else gr.Textbox.update(value=random.randint(1, 1e8)),
663
+ inputs=[],
664
+ outputs=[seed_textbox]
665
+ )
666
+
667
+ generate_button = gr.Button(value="Generate (生成)", variant='primary')
668
+
669
+ with gr.Column():
670
+ result_image = gr.Image(label="Generated Image (生成图片)", interactive=False, visible=False)
671
+ result_video = gr.Video(label="Generated Animation (生成视频)", interactive=False)
672
+ infer_progress = gr.Textbox(
673
+ label="Generation Info (生成信息)",
674
+ value="No task currently",
675
+ interactive=False
676
+ )
677
+
678
+ model_type.change(
679
+ fn=controller.update_model_type,
680
+ inputs=[model_type],
681
+ outputs=[]
682
+ )
683
+
684
+ def upload_generation_method(generation_method):
685
+ if generation_method == "Video Generation":
686
+ return [gr.update(visible=True, maximum=85, value=49, interactive=True), gr.update(visible=False), gr.update(visible=False)]
687
+ elif generation_method == "Image Generation":
688
+ return [gr.update(minimum=1, maximum=1, value=1, interactive=False), gr.update(visible=False), gr.update(visible=False)]
689
+ else:
690
+ return [gr.update(visible=True, maximum=1344), gr.update(visible=True), gr.update(visible=True)]
691
+ generation_method.change(
692
+ upload_generation_method, generation_method, [length_slider, overlap_video_length, partial_video_length]
693
+ )
694
+
695
+ def upload_source_method(source_method):
696
+ if source_method == "Text to Video (文本到视频)":
697
+ return [gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(value=None), gr.update(value=None), gr.update(value=None), gr.update(value=None), gr.update(value=None)]
698
+ elif source_method == "Image to Video (图片到视频)":
699
+ return [gr.update(visible=True), gr.update(visible=False), gr.update(visible=False), gr.update(), gr.update(), gr.update(value=None), gr.update(value=None), gr.update(value=None)]
700
+ elif source_method == "Video to Video (视频到视频)":
701
+ return [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False), gr.update(value=None), gr.update(value=None), gr.update(), gr.update(), gr.update(value=None)]
702
+ else:
703
+ return [gr.update(visible=False), gr.update(visible=False), gr.update(visible=True), gr.update(value=None), gr.update(value=None), gr.update(value=None), gr.update(value=None), gr.update()]
704
+ source_method.change(
705
+ upload_source_method, source_method, [
706
+ image_to_video_col, video_to_video_col, control_video_col, start_image, end_image,
707
+ validation_video, validation_video_mask, control_video
708
+ ]
709
+ )
710
+
711
+ def upload_resize_method(resize_method):
712
+ if resize_method == "Generate by":
713
+ return [gr.update(visible=True), gr.update(visible=True), gr.update(visible=False)]
714
+ else:
715
+ return [gr.update(visible=False), gr.update(visible=False), gr.update(visible=True)]
716
+ resize_method.change(
717
+ upload_resize_method, resize_method, [width_slider, height_slider, base_resolution]
718
+ )
719
+
720
+ generate_button.click(
721
+ fn=controller.generate,
722
+ inputs=[
723
+ diffusion_transformer_dropdown,
724
+ base_model_dropdown,
725
+ lora_model_dropdown,
726
+ lora_alpha_slider,
727
+ prompt_textbox,
728
+ negative_prompt_textbox,
729
+ sampler_dropdown,
730
+ sample_step_slider,
731
+ resize_method,
732
+ width_slider,
733
+ height_slider,
734
+ base_resolution,
735
+ generation_method,
736
+ length_slider,
737
+ overlap_video_length,
738
+ partial_video_length,
739
+ cfg_scale_slider,
740
+ start_image,
741
+ end_image,
742
+ validation_video,
743
+ validation_video_mask,
744
+ control_video,
745
+ denoise_strength,
746
+ seed_textbox,
747
+ ],
748
+ outputs=[result_image, result_video, infer_progress]
749
+ )
750
+ return demo, controller
751
+
752
+
753
+ class CogVideoX_Fun_Controller_Modelscope:
754
+ def __init__(self, model_name, model_type, savedir_sample, low_gpu_memory_mode, weight_dtype):
755
+ # Basic dir
756
+ self.basedir = os.getcwd()
757
+ self.personalized_model_dir = os.path.join(self.basedir, "models", "Personalized_Model")
758
+ self.lora_model_path = "none"
759
+ self.savedir_sample = savedir_sample
760
+ self.refresh_personalized_model()
761
+ os.makedirs(self.savedir_sample, exist_ok=True)
762
+
763
+ # model path
764
+ self.model_type = model_type
765
+ self.weight_dtype = weight_dtype
766
+
767
+ self.vae = AutoencoderKLCogVideoX.from_pretrained(
768
+ model_name,
769
+ subfolder="vae",
770
+ ).to(self.weight_dtype)
771
+
772
+ # Get Transformer
773
+ self.transformer = CogVideoXTransformer3DModel.from_pretrained_2d(
774
+ model_name,
775
+ subfolder="transformer",
776
+ low_cpu_mem_usage=True,
777
+ ).to(self.weight_dtype)
778
+
779
+ # Get pipeline
780
+ if model_type == "Inpaint":
781
+ if self.transformer.config.in_channels != self.vae.config.latent_channels:
782
+ self.pipeline = CogVideoX_Fun_Pipeline_Inpaint.from_pretrained(
783
+ model_name,
784
+ vae=self.vae,
785
+ transformer=self.transformer,
786
+ scheduler=scheduler_dict["Euler"].from_pretrained(model_name, subfolder="scheduler"),
787
+ torch_dtype=self.weight_dtype
788
+ )
789
+ else:
790
+ self.pipeline = CogVideoX_Fun_Pipeline.from_pretrained(
791
+ model_name,
792
+ vae=self.vae,
793
+ transformer=self.transformer,
794
+ scheduler=scheduler_dict["Euler"].from_pretrained(model_name, subfolder="scheduler"),
795
+ torch_dtype=self.weight_dtype
796
+ )
797
+ else:
798
+ self.pipeline = CogVideoX_Fun_Pipeline_Control.from_pretrained(
799
+ model_name,
800
+ vae=self.vae,
801
+ transformer=self.transformer,
802
+ scheduler=scheduler_dict["Euler"].from_pretrained(model_name, subfolder="scheduler"),
803
+ torch_dtype=self.weight_dtype
804
+ )
805
+
806
+ if low_gpu_memory_mode:
807
+ self.pipeline.enable_sequential_cpu_offload()
808
+ else:
809
+ self.pipeline.enable_model_cpu_offload()
810
+ print("Update diffusion transformer done")
811
+
812
+
813
+ def refresh_personalized_model(self):
814
+ personalized_model_list = sorted(glob(os.path.join(self.personalized_model_dir, "*.safetensors")))
815
+ self.personalized_model_list = [os.path.basename(p) for p in personalized_model_list]
816
+
817
+
818
+ def update_lora_model(self, lora_model_dropdown):
819
+ print("Update lora model")
820
+ if lora_model_dropdown == "none":
821
+ self.lora_model_path = "none"
822
+ return gr.update()
823
+ lora_model_dropdown = os.path.join(self.personalized_model_dir, lora_model_dropdown)
824
+ self.lora_model_path = lora_model_dropdown
825
+ return gr.update()
826
+
827
+
828
+ def generate(
829
+ self,
830
+ diffusion_transformer_dropdown,
831
+ base_model_dropdown,
832
+ lora_model_dropdown,
833
+ lora_alpha_slider,
834
+ prompt_textbox,
835
+ negative_prompt_textbox,
836
+ sampler_dropdown,
837
+ sample_step_slider,
838
+ resize_method,
839
+ width_slider,
840
+ height_slider,
841
+ base_resolution,
842
+ generation_method,
843
+ length_slider,
844
+ overlap_video_length,
845
+ partial_video_length,
846
+ cfg_scale_slider,
847
+ start_image,
848
+ end_image,
849
+ validation_video,
850
+ validation_video_mask,
851
+ control_video,
852
+ denoise_strength,
853
+ seed_textbox,
854
+ is_api = False,
855
+ ):
856
+ gc.collect()
857
+ torch.cuda.empty_cache()
858
+ torch.cuda.ipc_collect()
859
+
860
+ if self.transformer is None:
861
+ raise gr.Error(f"Please select a pretrained model path.")
862
+
863
+ if self.lora_model_path != lora_model_dropdown:
864
+ print("Update lora model")
865
+ self.update_lora_model(lora_model_dropdown)
866
+
867
+ if control_video is not None and self.model_type == "Inpaint":
868
+ if is_api:
869
+ return "", f"If specifying the control video, please set the model_type == \"Control\". "
870
+ else:
871
+ raise gr.Error(f"If specifying the control video, please set the model_type == \"Control\". ")
872
+
873
+ if control_video is None and self.model_type == "Control":
874
+ if is_api:
875
+ return "", f"If set the model_type == \"Control\", please specifying the control video. "
876
+ else:
877
+ raise gr.Error(f"If set the model_type == \"Control\", please specifying the control video. ")
878
+
879
+ if resize_method == "Resize according to Reference":
880
+ if start_image is None and validation_video is None and control_video is None:
881
+ if is_api:
882
+ return "", f"Please upload an image when using \"Resize according to Reference\"."
883
+ else:
884
+ raise gr.Error(f"Please upload an image when using \"Resize according to Reference\".")
885
+
886
+ aspect_ratio_sample_size = {key : [x / 512 * base_resolution for x in ASPECT_RATIO_512[key]] for key in ASPECT_RATIO_512.keys()}
887
+ if self.model_type == "Inpaint":
888
+ if validation_video is not None:
889
+ original_width, original_height = Image.fromarray(cv2.VideoCapture(validation_video).read()[1]).size
890
+ else:
891
+ original_width, original_height = start_image[0].size if type(start_image) is list else Image.open(start_image).size
892
+ else:
893
+ original_width, original_height = Image.fromarray(cv2.VideoCapture(control_video).read()[1]).size
894
+ closest_size, closest_ratio = get_closest_ratio(original_height, original_width, ratios=aspect_ratio_sample_size)
895
+ height_slider, width_slider = [int(x / 16) * 16 for x in closest_size]
896
+
897
+ if self.transformer.config.in_channels == self.vae.config.latent_channels and start_image is not None:
898
+ if is_api:
899
+ return "", f"Please select an image to video pretrained model while using image to video."
900
+ else:
901
+ raise gr.Error(f"Please select an image to video pretrained model while using image to video.")
902
+
903
+ if start_image is None and end_image is not None:
904
+ if is_api:
905
+ return "", f"If specifying the ending image of the video, please specify a starting image of the video."
906
+ else:
907
+ raise gr.Error(f"If specifying the ending image of the video, please specify a starting image of the video.")
908
+
909
+ is_image = True if generation_method == "Image Generation" else False
910
+
911
+ self.pipeline.scheduler = scheduler_dict[sampler_dropdown].from_config(self.pipeline.scheduler.config)
912
+ if self.lora_model_path != "none":
913
+ # lora part
914
+ self.pipeline = merge_lora(self.pipeline, self.lora_model_path, multiplier=lora_alpha_slider)
915
+
916
+ if int(seed_textbox) != -1 and seed_textbox != "": torch.manual_seed(int(seed_textbox))
917
+ else: seed_textbox = np.random.randint(0, 1e10)
918
+ generator = torch.Generator(device="cuda").manual_seed(int(seed_textbox))
919
+
920
+ try:
921
+ if self.model_type == "Inpaint":
922
+ if self.transformer.config.in_channels != self.vae.config.latent_channels:
923
+ if validation_video is not None:
924
+ input_video, input_video_mask, clip_image = get_video_to_video_latent(validation_video, length_slider if not is_image else 1, sample_size=(height_slider, width_slider), validation_video_mask=validation_video_mask, fps=8)
925
+ strength = denoise_strength
926
+ else:
927
+ input_video, input_video_mask, clip_image = get_image_to_video_latent(start_image, end_image, length_slider if not is_image else 1, sample_size=(height_slider, width_slider))
928
+ strength = 1
929
+
930
+ sample = self.pipeline(
931
+ prompt_textbox,
932
+ negative_prompt = negative_prompt_textbox,
933
+ num_inference_steps = sample_step_slider,
934
+ guidance_scale = cfg_scale_slider,
935
+ width = width_slider,
936
+ height = height_slider,
937
+ num_frames = length_slider if not is_image else 1,
938
+ generator = generator,
939
+
940
+ video = input_video,
941
+ mask_video = input_video_mask,
942
+ strength = strength,
943
+ ).videos
944
+ else:
945
+ sample = self.pipeline(
946
+ prompt_textbox,
947
+ negative_prompt = negative_prompt_textbox,
948
+ num_inference_steps = sample_step_slider,
949
+ guidance_scale = cfg_scale_slider,
950
+ width = width_slider,
951
+ height = height_slider,
952
+ num_frames = length_slider if not is_image else 1,
953
+ generator = generator
954
+ ).videos
955
+ else:
956
+ input_video, input_video_mask, clip_image = get_video_to_video_latent(control_video, length_slider if not is_image else 1, sample_size=(height_slider, width_slider), fps=8)
957
+
958
+ sample = self.pipeline(
959
+ prompt_textbox,
960
+ negative_prompt = negative_prompt_textbox,
961
+ num_inference_steps = sample_step_slider,
962
+ guidance_scale = cfg_scale_slider,
963
+ width = width_slider,
964
+ height = height_slider,
965
+ num_frames = length_slider if not is_image else 1,
966
+ generator = generator,
967
+
968
+ control_video = input_video,
969
+ ).videos
970
+ except Exception as e:
971
+ gc.collect()
972
+ torch.cuda.empty_cache()
973
+ torch.cuda.ipc_collect()
974
+ if self.lora_model_path != "none":
975
+ self.pipeline = unmerge_lora(self.pipeline, self.lora_model_path, multiplier=lora_alpha_slider)
976
+ if is_api:
977
+ return "", f"Error. error information is {str(e)}"
978
+ else:
979
+ return gr.update(), gr.update(), f"Error. error information is {str(e)}"
980
+
981
+ gc.collect()
982
+ torch.cuda.empty_cache()
983
+ torch.cuda.ipc_collect()
984
+
985
+ # lora part
986
+ if self.lora_model_path != "none":
987
+ self.pipeline = unmerge_lora(self.pipeline, self.lora_model_path, multiplier=lora_alpha_slider)
988
+
989
+ if not os.path.exists(self.savedir_sample):
990
+ os.makedirs(self.savedir_sample, exist_ok=True)
991
+ index = len([path for path in os.listdir(self.savedir_sample)]) + 1
992
+ prefix = str(index).zfill(3)
993
+
994
+ gc.collect()
995
+ torch.cuda.empty_cache()
996
+ torch.cuda.ipc_collect()
997
+ if is_image or length_slider == 1:
998
+ save_sample_path = os.path.join(self.savedir_sample, prefix + f".png")
999
+
1000
+ image = sample[0, :, 0]
1001
+ image = image.transpose(0, 1).transpose(1, 2)
1002
+ image = (image * 255).numpy().astype(np.uint8)
1003
+ image = Image.fromarray(image)
1004
+ image.save(save_sample_path)
1005
+ if is_api:
1006
+ return save_sample_path, "Success"
1007
+ else:
1008
+ if gradio_version_is_above_4:
1009
+ return gr.Image(value=save_sample_path, visible=True), gr.Video(value=None, visible=False), "Success"
1010
+ else:
1011
+ return gr.Image.update(value=save_sample_path, visible=True), gr.Video.update(value=None, visible=False), "Success"
1012
+ else:
1013
+ save_sample_path = os.path.join(self.savedir_sample, prefix + f".mp4")
1014
+ save_videos_grid(sample, save_sample_path, fps=8)
1015
+ if is_api:
1016
+ return save_sample_path, "Success"
1017
+ else:
1018
+ if gradio_version_is_above_4:
1019
+ return gr.Image(visible=False, value=None), gr.Video(value=save_sample_path, visible=True), "Success"
1020
+ else:
1021
+ return gr.Image.update(visible=False, value=None), gr.Video.update(value=save_sample_path, visible=True), "Success"
1022
+
1023
+
1024
+ def ui_modelscope(model_name, model_type, savedir_sample, low_gpu_memory_mode, weight_dtype):
1025
+ controller = CogVideoX_Fun_Controller_Modelscope(model_name, model_type, savedir_sample, low_gpu_memory_mode, weight_dtype)
1026
+
1027
+ with gr.Blocks(css=css) as demo:
1028
+ gr.Markdown(
1029
+ """
1030
+ # CogVideoX-Fun
1031
+
1032
+ A CogVideoX with more flexible generation conditions, capable of producing videos of different resolutions, around 6 seconds, and fps 8 (frames 1 to 49), as well as image generated videos.
1033
+
1034
+ [Github](https://github.com/aigc-apps/CogVideoX-Fun/)
1035
+ """
1036
+ )
1037
+ with gr.Column(variant="panel"):
1038
+ gr.Markdown(
1039
+ """
1040
+ ### 1. CogVideoX-Fun Model Type (CogVideoX-Fun模型的种类,正常模型还是控制模型).
1041
+ """
1042
+ )
1043
+ with gr.Row():
1044
+ model_type = gr.Dropdown(
1045
+ label="The model type of CogVideoX-Fun (CogVideoX-Fun模型的种类,正常模型还是控制模型)",
1046
+ choices=[model_type],
1047
+ value=model_type,
1048
+ interactive=False,
1049
+ )
1050
+
1051
+ gr.Markdown(
1052
+ """
1053
+ ### 2. Model checkpoints (模型路径).
1054
+ """
1055
+ )
1056
+ with gr.Row():
1057
+ diffusion_transformer_dropdown = gr.Dropdown(
1058
+ label="Pretrained Model Path (预训练模型路径)",
1059
+ choices=[model_name],
1060
+ value=model_name,
1061
+ interactive=False,
1062
+ )
1063
+ with gr.Row():
1064
+ base_model_dropdown = gr.Dropdown(
1065
+ label="Select base Dreambooth model (选择基模型[非必需])",
1066
+ choices=["none"],
1067
+ value="none",
1068
+ interactive=False,
1069
+ visible=False
1070
+ )
1071
+ with gr.Column(visible=False):
1072
+ gr.Markdown(
1073
+ """
1074
+ ### Minimalism is an example portrait of Lora, triggered by specific prompt words. More details can be found on [Wiki](https://github.com/aigc-apps/CogVideoX-Fun/wiki/Training-Lora).
1075
+ """
1076
+ )
1077
+ with gr.Row():
1078
+ lora_model_dropdown = gr.Dropdown(
1079
+ label="Select LoRA model",
1080
+ choices=["none"],
1081
+ value="none",
1082
+ interactive=True,
1083
+ )
1084
+
1085
+ lora_alpha_slider = gr.Slider(label="LoRA alpha (LoRA权重)", value=0.55, minimum=0, maximum=2, interactive=True)
1086
+
1087
+ with gr.Column(variant="panel"):
1088
+ gr.Markdown(
1089
+ """
1090
+ ### 3. Configs for Generation (生成参数配置).
1091
+ """
1092
+ )
1093
+
1094
+ prompt_textbox = gr.Textbox(label="Prompt (正向提示词)", lines=2, value="A young woman with beautiful and clear eyes and blonde hair standing and white dress in a forest wearing a crown. She seems to be lost in thought, and the camera focuses on her face. The video is of high quality, and the view is very clear. High quality, masterpiece, best quality, highres, ultra-detailed, fantastic.")
1095
+ negative_prompt_textbox = gr.Textbox(label="Negative prompt (负向提示词)", lines=2, value="The video is not of a high quality, it has a low resolution. Watermark present in each frame. The background is solid. Strange body and strange trajectory. Distortion. " )
1096
+
1097
+ with gr.Row():
1098
+ with gr.Column():
1099
+ with gr.Row():
1100
+ sampler_dropdown = gr.Dropdown(label="Sampling method (采样器种类)", choices=list(scheduler_dict.keys()), value=list(scheduler_dict.keys())[0])
1101
+ sample_step_slider = gr.Slider(label="Sampling steps (生成步数)", value=50, minimum=10, maximum=50, step=1, interactive=False)
1102
+
1103
+ resize_method = gr.Radio(
1104
+ ["Generate by", "Resize according to Reference"],
1105
+ value="Generate by",
1106
+ show_label=False,
1107
+ )
1108
+ width_slider = gr.Slider(label="Width (视频宽度)", value=672, minimum=128, maximum=1280, step=16, interactive=False)
1109
+ height_slider = gr.Slider(label="Height (视频高度)", value=384, minimum=128, maximum=1280, step=16, interactive=False)
1110
+ base_resolution = gr.Radio(label="Base Resolution of Pretrained Models", value=512, choices=[512, 768, 960], interactive=False, visible=False)
1111
+
1112
+ gr.Markdown(
1113
+ """
1114
+ V1.0 and V1.1 support up to 49 frames of video generation, while V1.5 supports up to 85 frames.
1115
+ (V1.0和V1.1支持最大49帧视频生成,V1.5支持最大85帧视频生成。)
1116
+ """
1117
+ )
1118
+ with gr.Group():
1119
+ generation_method = gr.Radio(
1120
+ ["Video Generation", "Image Generation"],
1121
+ value="Video Generation",
1122
+ show_label=False,
1123
+ visible=True,
1124
+ )
1125
+ length_slider = gr.Slider(label="Animation length (视频帧数)", value=49, minimum=5, maximum=85, step=4)
1126
+ overlap_video_length = gr.Slider(label="Overlap length (视频续写的重叠帧数)", value=4, minimum=1, maximum=4, step=1, visible=False)
1127
+ partial_video_length = gr.Slider(label="Partial video generation length (每个部分的视频生成帧数)", value=25, minimum=5, maximum=85, step=4, visible=False)
1128
+
1129
+ source_method = gr.Radio(
1130
+ ["Text to Video (文本到视频)", "Image to Video (图片到视频)", "Video to Video (视频到视频)", "Video Control (视频控制)"],
1131
+ value="Text to Video (文本到视频)",
1132
+ show_label=False,
1133
+ )
1134
+ with gr.Column(visible = False) as image_to_video_col:
1135
+ with gr.Row():
1136
+ start_image = gr.Image(label="The image at the beginning of the video (图片到视频的开始图片)", show_label=True, elem_id="i2v_start", sources="upload", type="filepath")
1137
+
1138
+ template_gallery_path = ["asset/1.png", "asset/2.png", "asset/3.png", "asset/4.png", "asset/5.png"]
1139
+ def select_template(evt: gr.SelectData):
1140
+ text = {
1141
+ "asset/1.png": "The dog is shaking head. The video is of high quality, and the view is very clear. High quality, masterpiece, best quality, highres, ultra-detailed, fantastic.",
1142
+ "asset/2.png": "a sailboat sailing in rough seas with a dramatic sunset. The video is of high quality, and the view is very clear. High quality, masterpiece, best quality, highres, ultra-detailed, fantastic.",
1143
+ "asset/3.png": "a beautiful woman with long hair and a dress blowing in the wind. The video is of high quality, and the view is very clear. High quality, masterpiece, best quality, highres, ultra-detailed, fantastic.",
1144
+ "asset/4.png": "a man in an astronaut suit playing a guitar. The video is of high quality, and the view is very clear. High quality, masterpiece, best quality, highres, ultra-detailed, fantastic.",
1145
+ "asset/5.png": "fireworks display over night city. The video is of high quality, and the view is very clear. High quality, masterpiece, best quality, highres, ultra-detailed, fantastic.",
1146
+ }[template_gallery_path[evt.index]]
1147
+ return template_gallery_path[evt.index], text
1148
+
1149
+ template_gallery = gr.Gallery(
1150
+ template_gallery_path,
1151
+ columns=5, rows=1,
1152
+ height=140,
1153
+ allow_preview=False,
1154
+ container=False,
1155
+ label="Template Examples",
1156
+ )
1157
+ template_gallery.select(select_template, None, [start_image, prompt_textbox])
1158
+
1159
+ with gr.Accordion("The image at the ending of the video (图片到视频的结束图片[非必需, Optional])", open=False):
1160
+ end_image = gr.Image(label="The image at the ending of the video (图片到视频的结束图片[非必需, Optional])", show_label=False, elem_id="i2v_end", sources="upload", type="filepath")
1161
+
1162
+ with gr.Column(visible = False) as video_to_video_col:
1163
+ with gr.Row():
1164
+ validation_video = gr.Video(
1165
+ label="The video to convert (视频转视频的参考视频)", show_label=True,
1166
+ elem_id="v2v", sources="upload",
1167
+ )
1168
+ with gr.Accordion("The mask of the video to inpaint (视频重新绘制的mask[非必需, Optional])", open=False):
1169
+ gr.Markdown(
1170
+ """
1171
+ - Please set a larger denoise_strength when using validation_video_mask, such as 1.00 instead of 0.70
1172
+ (请设置更大的denoise_strength,当使用validation_video_mask的时候,比如1而不是0.70)
1173
+ """
1174
+ )
1175
+ validation_video_mask = gr.Image(
1176
+ label="The mask of the video to inpaint (视频重新绘制的mask[非必需, Optional])",
1177
+ show_label=False, elem_id="v2v_mask", sources="upload", type="filepath"
1178
+ )
1179
+ denoise_strength = gr.Slider(label="Denoise strength (重绘系数)", value=0.70, minimum=0.10, maximum=1.00, step=0.01)
1180
+
1181
+ with gr.Column(visible = False) as control_video_col:
1182
+ gr.Markdown(
1183
+ """
1184
+ Demo pose control video can be downloaded here [URL](https://pai-aigc-photog.oss-cn-hangzhou.aliyuncs.com/cogvideox_fun/asset/v1.1/pose.mp4).
1185
+ """
1186
+ )
1187
+ control_video = gr.Video(
1188
+ label="The control video (用于提供控制信号的video)", show_label=True,
1189
+ elem_id="v2v_control", sources="upload",
1190
+ )
1191
+
1192
+ cfg_scale_slider = gr.Slider(label="CFG Scale (引导系数)", value=6.0, minimum=0, maximum=20)
1193
+
1194
+ with gr.Row():
1195
+ seed_textbox = gr.Textbox(label="Seed (随机种子)", value=43)
1196
+ seed_button = gr.Button(value="\U0001F3B2", elem_classes="toolbutton")
1197
+ seed_button.click(
1198
+ fn=lambda: gr.Textbox(value=random.randint(1, 1e8)) if gradio_version_is_above_4 else gr.Textbox.update(value=random.randint(1, 1e8)),
1199
+ inputs=[],
1200
+ outputs=[seed_textbox]
1201
+ )
1202
+
1203
+ generate_button = gr.Button(value="Generate (生成)", variant='primary')
1204
+
1205
+ with gr.Column():
1206
+ result_image = gr.Image(label="Generated Image (生成图片)", interactive=False, visible=False)
1207
+ result_video = gr.Video(label="Generated Animation (生成视频)", interactive=False)
1208
+ infer_progress = gr.Textbox(
1209
+ label="Generation Info (生成信息)",
1210
+ value="No task currently",
1211
+ interactive=False
1212
+ )
1213
+
1214
+ def upload_generation_method(generation_method):
1215
+ if generation_method == "Video Generation":
1216
+ return gr.update(visible=True, minimum=8, maximum=85, value=49, interactive=True)
1217
+ elif generation_method == "Image Generation":
1218
+ return gr.update(minimum=1, maximum=1, value=1, interactive=False)
1219
+ generation_method.change(
1220
+ upload_generation_method, generation_method, [length_slider]
1221
+ )
1222
+
1223
+ def upload_source_method(source_method):
1224
+ if source_method == "Text to Video (文本到视频)":
1225
+ return [gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(value=None), gr.update(value=None), gr.update(value=None), gr.update(value=None), gr.update(value=None)]
1226
+ elif source_method == "Image to Video (图片到视频)":
1227
+ return [gr.update(visible=True), gr.update(visible=False), gr.update(visible=False), gr.update(), gr.update(), gr.update(value=None), gr.update(value=None), gr.update(value=None)]
1228
+ elif source_method == "Video to Video (视频到视频)":
1229
+ return [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False), gr.update(value=None), gr.update(value=None), gr.update(), gr.update(), gr.update(value=None)]
1230
+ else:
1231
+ return [gr.update(visible=False), gr.update(visible=False), gr.update(visible=True), gr.update(value=None), gr.update(value=None), gr.update(value=None), gr.update(value=None), gr.update()]
1232
+ source_method.change(
1233
+ upload_source_method, source_method, [
1234
+ image_to_video_col, video_to_video_col, control_video_col, start_image, end_image,
1235
+ validation_video, validation_video_mask, control_video
1236
+ ]
1237
+ )
1238
+
1239
+ def upload_resize_method(resize_method):
1240
+ if resize_method == "Generate by":
1241
+ return [gr.update(visible=True), gr.update(visible=True), gr.update(visible=False)]
1242
+ else:
1243
+ return [gr.update(visible=False), gr.update(visible=False), gr.update(visible=True)]
1244
+ resize_method.change(
1245
+ upload_resize_method, resize_method, [width_slider, height_slider, base_resolution]
1246
+ )
1247
+
1248
+ generate_button.click(
1249
+ fn=controller.generate,
1250
+ inputs=[
1251
+ diffusion_transformer_dropdown,
1252
+ base_model_dropdown,
1253
+ lora_model_dropdown,
1254
+ lora_alpha_slider,
1255
+ prompt_textbox,
1256
+ negative_prompt_textbox,
1257
+ sampler_dropdown,
1258
+ sample_step_slider,
1259
+ resize_method,
1260
+ width_slider,
1261
+ height_slider,
1262
+ base_resolution,
1263
+ generation_method,
1264
+ length_slider,
1265
+ overlap_video_length,
1266
+ partial_video_length,
1267
+ cfg_scale_slider,
1268
+ start_image,
1269
+ end_image,
1270
+ validation_video,
1271
+ validation_video_mask,
1272
+ control_video,
1273
+ denoise_strength,
1274
+ seed_textbox,
1275
+ ],
1276
+ outputs=[result_image, result_video, infer_progress]
1277
+ )
1278
+ return demo, controller
1279
+
1280
+
1281
+ def post_eas(
1282
+ diffusion_transformer_dropdown,
1283
+ base_model_dropdown, lora_model_dropdown, lora_alpha_slider,
1284
+ prompt_textbox, negative_prompt_textbox,
1285
+ sampler_dropdown, sample_step_slider, resize_method, width_slider, height_slider,
1286
+ base_resolution, generation_method, length_slider, cfg_scale_slider,
1287
+ start_image, end_image, validation_video, validation_video_mask, denoise_strength, seed_textbox,
1288
+ ):
1289
+ if start_image is not None:
1290
+ with open(start_image, 'rb') as file:
1291
+ file_content = file.read()
1292
+ start_image_encoded_content = base64.b64encode(file_content)
1293
+ start_image = start_image_encoded_content.decode('utf-8')
1294
+
1295
+ if end_image is not None:
1296
+ with open(end_image, 'rb') as file:
1297
+ file_content = file.read()
1298
+ end_image_encoded_content = base64.b64encode(file_content)
1299
+ end_image = end_image_encoded_content.decode('utf-8')
1300
+
1301
+ if validation_video is not None:
1302
+ with open(validation_video, 'rb') as file:
1303
+ file_content = file.read()
1304
+ validation_video_encoded_content = base64.b64encode(file_content)
1305
+ validation_video = validation_video_encoded_content.decode('utf-8')
1306
+
1307
+ if validation_video_mask is not None:
1308
+ with open(validation_video_mask, 'rb') as file:
1309
+ file_content = file.read()
1310
+ validation_video_mask_encoded_content = base64.b64encode(file_content)
1311
+ validation_video_mask = validation_video_mask_encoded_content.decode('utf-8')
1312
+
1313
+ datas = {
1314
+ "base_model_path": base_model_dropdown,
1315
+ "lora_model_path": lora_model_dropdown,
1316
+ "lora_alpha_slider": lora_alpha_slider,
1317
+ "prompt_textbox": prompt_textbox,
1318
+ "negative_prompt_textbox": negative_prompt_textbox,
1319
+ "sampler_dropdown": sampler_dropdown,
1320
+ "sample_step_slider": sample_step_slider,
1321
+ "resize_method": resize_method,
1322
+ "width_slider": width_slider,
1323
+ "height_slider": height_slider,
1324
+ "base_resolution": base_resolution,
1325
+ "generation_method": generation_method,
1326
+ "length_slider": length_slider,
1327
+ "cfg_scale_slider": cfg_scale_slider,
1328
+ "start_image": start_image,
1329
+ "end_image": end_image,
1330
+ "validation_video": validation_video,
1331
+ "validation_video_mask": validation_video_mask,
1332
+ "denoise_strength": denoise_strength,
1333
+ "seed_textbox": seed_textbox,
1334
+ }
1335
+
1336
+ session = requests.session()
1337
+ session.headers.update({"Authorization": os.environ.get("EAS_TOKEN")})
1338
+
1339
+ response = session.post(url=f'{os.environ.get("EAS_URL")}/cogvideox_fun/infer_forward', json=datas, timeout=300)
1340
+
1341
+ outputs = response.json()
1342
+ return outputs
1343
+
1344
+
1345
+ class CogVideoX_Fun_Controller_EAS:
1346
+ def __init__(self, model_name, savedir_sample):
1347
+ self.savedir_sample = savedir_sample
1348
+ os.makedirs(self.savedir_sample, exist_ok=True)
1349
+
1350
+ def generate(
1351
+ self,
1352
+ diffusion_transformer_dropdown,
1353
+ base_model_dropdown,
1354
+ lora_model_dropdown,
1355
+ lora_alpha_slider,
1356
+ prompt_textbox,
1357
+ negative_prompt_textbox,
1358
+ sampler_dropdown,
1359
+ sample_step_slider,
1360
+ resize_method,
1361
+ width_slider,
1362
+ height_slider,
1363
+ base_resolution,
1364
+ generation_method,
1365
+ length_slider,
1366
+ cfg_scale_slider,
1367
+ start_image,
1368
+ end_image,
1369
+ validation_video,
1370
+ validation_video_mask,
1371
+ denoise_strength,
1372
+ seed_textbox
1373
+ ):
1374
+ is_image = True if generation_method == "Image Generation" else False
1375
+
1376
+ outputs = post_eas(
1377
+ diffusion_transformer_dropdown,
1378
+ base_model_dropdown, lora_model_dropdown, lora_alpha_slider,
1379
+ prompt_textbox, negative_prompt_textbox,
1380
+ sampler_dropdown, sample_step_slider, resize_method, width_slider, height_slider,
1381
+ base_resolution, generation_method, length_slider, cfg_scale_slider,
1382
+ start_image, end_image, validation_video, validation_video_mask, denoise_strength,
1383
+ seed_textbox
1384
+ )
1385
+ try:
1386
+ base64_encoding = outputs["base64_encoding"]
1387
+ except:
1388
+ return gr.Image(visible=False, value=None), gr.Video(None, visible=True), outputs["message"]
1389
+
1390
+ decoded_data = base64.b64decode(base64_encoding)
1391
+
1392
+ if not os.path.exists(self.savedir_sample):
1393
+ os.makedirs(self.savedir_sample, exist_ok=True)
1394
+ index = len([path for path in os.listdir(self.savedir_sample)]) + 1
1395
+ prefix = str(index).zfill(3)
1396
+
1397
+ if is_image or length_slider == 1:
1398
+ save_sample_path = os.path.join(self.savedir_sample, prefix + f".png")
1399
+ with open(save_sample_path, "wb") as file:
1400
+ file.write(decoded_data)
1401
+ if gradio_version_is_above_4:
1402
+ return gr.Image(value=save_sample_path, visible=True), gr.Video(value=None, visible=False), "Success"
1403
+ else:
1404
+ return gr.Image.update(value=save_sample_path, visible=True), gr.Video.update(value=None, visible=False), "Success"
1405
+ else:
1406
+ save_sample_path = os.path.join(self.savedir_sample, prefix + f".mp4")
1407
+ with open(save_sample_path, "wb") as file:
1408
+ file.write(decoded_data)
1409
+ if gradio_version_is_above_4:
1410
+ return gr.Image(visible=False, value=None), gr.Video(value=save_sample_path, visible=True), "Success"
1411
+ else:
1412
+ return gr.Image.update(visible=False, value=None), gr.Video.update(value=save_sample_path, visible=True), "Success"
1413
+
1414
+
1415
+ def ui_eas(model_name, savedir_sample):
1416
+ controller = CogVideoX_Fun_Controller_EAS(model_name, savedir_sample)
1417
+
1418
+ with gr.Blocks(css=css) as demo:
1419
+ gr.Markdown(
1420
+ """
1421
+ # CogVideoX-Fun
1422
+
1423
+ A CogVideoX with more flexible generation conditions, capable of producing videos of different resolutions, around 6 seconds, and fps 8 (frames 1 to 49), as well as image generated videos.
1424
+
1425
+ [Github](https://github.com/aigc-apps/CogVideoX-Fun/)
1426
+ """
1427
+ )
1428
+ with gr.Column(variant="panel"):
1429
+ gr.Markdown(
1430
+ """
1431
+ ### 1. Model checkpoints (模型路径).
1432
+ """
1433
+ )
1434
+ with gr.Row():
1435
+ diffusion_transformer_dropdown = gr.Dropdown(
1436
+ label="Pretrained Model Path",
1437
+ choices=[model_name],
1438
+ value=model_name,
1439
+ interactive=False,
1440
+ )
1441
+ with gr.Row():
1442
+ base_model_dropdown = gr.Dropdown(
1443
+ label="Select base Dreambooth model",
1444
+ choices=["none"],
1445
+ value="none",
1446
+ interactive=False,
1447
+ visible=False
1448
+ )
1449
+ with gr.Column(visible=False):
1450
+ gr.Markdown(
1451
+ """
1452
+ ### Minimalism is an example portrait of Lora, triggered by specific prompt words. More details can be found on [Wiki](https://github.com/aigc-apps/CogVideoX-Fun/wiki/Training-Lora).
1453
+ """
1454
+ )
1455
+ with gr.Row():
1456
+ lora_model_dropdown = gr.Dropdown(
1457
+ label="Select LoRA model",
1458
+ choices=["none"],
1459
+ value="none",
1460
+ interactive=True,
1461
+ )
1462
+
1463
+ lora_alpha_slider = gr.Slider(label="LoRA alpha (LoRA权重)", value=0.55, minimum=0, maximum=2, interactive=True)
1464
+
1465
+ with gr.Column(variant="panel"):
1466
+ gr.Markdown(
1467
+ """
1468
+ ### 2. Configs for Generation.
1469
+ """
1470
+ )
1471
+
1472
+ prompt_textbox = gr.Textbox(label="Prompt", lines=2, value="A young woman with beautiful and clear eyes and blonde hair standing and white dress in a forest wearing a crown. She seems to be lost in thought, and the camera focuses on her face. The video is of high quality, and the view is very clear. High quality, masterpiece, best quality, highres, ultra-detailed, fantastic.")
1473
+ negative_prompt_textbox = gr.Textbox(label="Negative prompt", lines=2, value="The video is not of a high quality, it has a low resolution. Watermark present in each frame. The background is solid. Strange body and strange trajectory. Distortion. " )
1474
+
1475
+ with gr.Row():
1476
+ with gr.Column():
1477
+ with gr.Row():
1478
+ sampler_dropdown = gr.Dropdown(label="Sampling method", choices=list(scheduler_dict.keys()), value=list(scheduler_dict.keys())[0])
1479
+ sample_step_slider = gr.Slider(label="Sampling steps", value=50, minimum=10, maximum=50, step=1, interactive=False)
1480
+
1481
+ resize_method = gr.Radio(
1482
+ ["Generate by", "Resize according to Reference"],
1483
+ value="Generate by",
1484
+ show_label=False,
1485
+ )
1486
+ width_slider = gr.Slider(label="Width (视频宽度)", value=672, minimum=128, maximum=1280, step=16, interactive=False)
1487
+ height_slider = gr.Slider(label="Height (视频高度)", value=384, minimum=128, maximum=1280, step=16, interactive=False)
1488
+ base_resolution = gr.Radio(label="Base Resolution of Pretrained Models", value=512, choices=[512, 768, 960], interactive=False, visible=False)
1489
+
1490
+ gr.Markdown(
1491
+ """
1492
+ V1.0 and V1.1 support up to 49 frames of video generation, while V1.5 supports up to 85 frames.
1493
+ (V1.0和V1.1支持最大49帧视频生成,V1.5支持最大85帧视频生成。)
1494
+ """
1495
+ )
1496
+ with gr.Group():
1497
+ generation_method = gr.Radio(
1498
+ ["Video Generation", "Image Generation"],
1499
+ value="Video Generation",
1500
+ show_label=False,
1501
+ visible=True,
1502
+ )
1503
+ length_slider = gr.Slider(label="Animation length (视频帧数)", value=49, minimum=5, maximum=85, step=4)
1504
+
1505
+ source_method = gr.Radio(
1506
+ ["Text to Video (文本到视频)", "Image to Video (图片到视频)", "Video to Video (视频到视频)"],
1507
+ value="Text to Video (文本到视频)",
1508
+ show_label=False,
1509
+ )
1510
+ with gr.Column(visible = False) as image_to_video_col:
1511
+ start_image = gr.Image(label="The image at the beginning of the video", show_label=True, elem_id="i2v_start", sources="upload", type="filepath")
1512
+
1513
+ template_gallery_path = ["asset/1.png", "asset/2.png", "asset/3.png", "asset/4.png", "asset/5.png"]
1514
+ def select_template(evt: gr.SelectData):
1515
+ text = {
1516
+ "asset/1.png": "The dog is shaking head. The video is of high quality, and the view is very clear. High quality, masterpiece, best quality, highres, ultra-detailed, fantastic.",
1517
+ "asset/2.png": "a sailboat sailing in rough seas with a dramatic sunset. The video is of high quality, and the view is very clear. High quality, masterpiece, best quality, highres, ultra-detailed, fantastic.",
1518
+ "asset/3.png": "a beautiful woman with long hair and a dress blowing in the wind. The video is of high quality, and the view is very clear. High quality, masterpiece, best quality, highres, ultra-detailed, fantastic.",
1519
+ "asset/4.png": "a man in an astronaut suit playing a guitar. The video is of high quality, and the view is very clear. High quality, masterpiece, best quality, highres, ultra-detailed, fantastic.",
1520
+ "asset/5.png": "fireworks display over night city. The video is of high quality, and the view is very clear. High quality, masterpiece, best quality, highres, ultra-detailed, fantastic.",
1521
+ }[template_gallery_path[evt.index]]
1522
+ return template_gallery_path[evt.index], text
1523
+
1524
+ template_gallery = gr.Gallery(
1525
+ template_gallery_path,
1526
+ columns=5, rows=1,
1527
+ height=140,
1528
+ allow_preview=False,
1529
+ container=False,
1530
+ label="Template Examples",
1531
+ )
1532
+ template_gallery.select(select_template, None, [start_image, prompt_textbox])
1533
+
1534
+ with gr.Accordion("The image at the ending of the video (Optional)", open=False):
1535
+ end_image = gr.Image(label="The image at the ending of the video (Optional)", show_label=True, elem_id="i2v_end", sources="upload", type="filepath")
1536
+
1537
+ with gr.Column(visible = False) as video_to_video_col:
1538
+ with gr.Row():
1539
+ validation_video = gr.Video(
1540
+ label="The video to convert (视频转视频的参考视频)", show_label=True,
1541
+ elem_id="v2v", sources="upload",
1542
+ )
1543
+ with gr.Accordion("The mask of the video to inpaint (视频重新绘制的mask[非必需, Optional])", open=False):
1544
+ gr.Markdown(
1545
+ """
1546
+ - Please set a larger denoise_strength when using validation_video_mask, such as 1.00 instead of 0.70
1547
+ (请设置更大的denoise_strength,当使用validation_video_mask的时候,比如1而不是0.70)
1548
+ """
1549
+ )
1550
+ validation_video_mask = gr.Image(
1551
+ label="The mask of the video to inpaint (视频重新绘制的mask[非必需, Optional])",
1552
+ show_label=False, elem_id="v2v_mask", sources="upload", type="filepath"
1553
+ )
1554
+ denoise_strength = gr.Slider(label="Denoise strength (重绘系数)", value=0.70, minimum=0.10, maximum=1.00, step=0.01)
1555
+
1556
+ cfg_scale_slider = gr.Slider(label="CFG Scale (引导系数)", value=6.0, minimum=0, maximum=20)
1557
+
1558
+ with gr.Row():
1559
+ seed_textbox = gr.Textbox(label="Seed", value=43)
1560
+ seed_button = gr.Button(value="\U0001F3B2", elem_classes="toolbutton")
1561
+ seed_button.click(
1562
+ fn=lambda: gr.Textbox(value=random.randint(1, 1e8)) if gradio_version_is_above_4 else gr.Textbox.update(value=random.randint(1, 1e8)),
1563
+ inputs=[],
1564
+ outputs=[seed_textbox]
1565
+ )
1566
+
1567
+ generate_button = gr.Button(value="Generate", variant='primary')
1568
+
1569
+ with gr.Column():
1570
+ result_image = gr.Image(label="Generated Image", interactive=False, visible=False)
1571
+ result_video = gr.Video(label="Generated Animation", interactive=False)
1572
+ infer_progress = gr.Textbox(
1573
+ label="Generation Info",
1574
+ value="No task currently",
1575
+ interactive=False
1576
+ )
1577
+
1578
+ def upload_generation_method(generation_method):
1579
+ if generation_method == "Video Generation":
1580
+ return gr.update(visible=True, minimum=5, maximum=85, value=49, interactive=True)
1581
+ elif generation_method == "Image Generation":
1582
+ return gr.update(minimum=1, maximum=1, value=1, interactive=False)
1583
+ generation_method.change(
1584
+ upload_generation_method, generation_method, [length_slider]
1585
+ )
1586
+
1587
+ def upload_source_method(source_method):
1588
+ if source_method == "Text to Video (文本到视频)":
1589
+ return [gr.update(visible=False), gr.update(visible=False), gr.update(value=None), gr.update(value=None), gr.update(value=None), gr.update(value=None)]
1590
+ elif source_method == "Image to Video (图片到视频)":
1591
+ return [gr.update(visible=True), gr.update(visible=False), gr.update(), gr.update(), gr.update(value=None), gr.update(value=None)]
1592
+ else:
1593
+ return [gr.update(visible=False), gr.update(visible=True), gr.update(value=None), gr.update(value=None), gr.update(), gr.update()]
1594
+ source_method.change(
1595
+ upload_source_method, source_method, [image_to_video_col, video_to_video_col, start_image, end_image, validation_video, validation_video_mask]
1596
+ )
1597
+
1598
+ def upload_resize_method(resize_method):
1599
+ if resize_method == "Generate by":
1600
+ return [gr.update(visible=True), gr.update(visible=True), gr.update(visible=False)]
1601
+ else:
1602
+ return [gr.update(visible=False), gr.update(visible=False), gr.update(visible=True)]
1603
+ resize_method.change(
1604
+ upload_resize_method, resize_method, [width_slider, height_slider, base_resolution]
1605
+ )
1606
+
1607
+ generate_button.click(
1608
+ fn=controller.generate,
1609
+ inputs=[
1610
+ diffusion_transformer_dropdown,
1611
+ base_model_dropdown,
1612
+ lora_model_dropdown,
1613
+ lora_alpha_slider,
1614
+ prompt_textbox,
1615
+ negative_prompt_textbox,
1616
+ sampler_dropdown,
1617
+ sample_step_slider,
1618
+ resize_method,
1619
+ width_slider,
1620
+ height_slider,
1621
+ base_resolution,
1622
+ generation_method,
1623
+ length_slider,
1624
+ cfg_scale_slider,
1625
+ start_image,
1626
+ end_image,
1627
+ validation_video,
1628
+ validation_video_mask,
1629
+ denoise_strength,
1630
+ seed_textbox,
1631
+ ],
1632
+ outputs=[result_image, result_video, infer_progress]
1633
+ )
1634
+ return demo, controller
robomaster/utils/__init__.py ADDED
File without changes
robomaster/utils/discrete_sampler.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Modified from https://github.com/THUDM/CogVideo/blob/3710a612d8760f5cdb1741befeebb65b9e0f2fe0/sat/sgm/modules/diffusionmodules/sigma_sampling.py
2
+ """
3
+ import torch
4
+
5
+ class DiscreteSampling:
6
+ def __init__(self, num_idx, uniform_sampling=False):
7
+ self.num_idx = num_idx
8
+ self.uniform_sampling = uniform_sampling
9
+ self.is_distributed = torch.distributed.is_available() and torch.distributed.is_initialized()
10
+
11
+ if self.is_distributed and self.uniform_sampling:
12
+ world_size = torch.distributed.get_world_size()
13
+ self.rank = torch.distributed.get_rank()
14
+
15
+ i = 1
16
+ while True:
17
+ if world_size % i != 0 or num_idx % (world_size // i) != 0:
18
+ i += 1
19
+ else:
20
+ self.group_num = world_size // i
21
+ break
22
+ assert self.group_num > 0
23
+ assert world_size % self.group_num == 0
24
+ # the number of rank in one group
25
+ self.group_width = world_size // self.group_num
26
+ self.sigma_interval = self.num_idx // self.group_num
27
+ print('rank=%d world_size=%d group_num=%d group_width=%d sigma_interval=%s' % (
28
+ self.rank, world_size, self.group_num,
29
+ self.group_width, self.sigma_interval))
30
+
31
+ def __call__(self, n_samples, generator=None, device=None):
32
+ if self.is_distributed and self.uniform_sampling:
33
+ group_index = self.rank // self.group_width
34
+ idx = torch.randint(
35
+ group_index * self.sigma_interval,
36
+ (group_index + 1) * self.sigma_interval,
37
+ (n_samples,),
38
+ generator=generator, device=device,
39
+ )
40
+ print('proc[%d] idx=%s' % (self.rank, idx))
41
+ else:
42
+ idx = torch.randint(
43
+ 0, self.num_idx, (n_samples,),
44
+ generator=generator, device=device,
45
+ )
46
+ return idx
robomaster/utils/lora_utils.py ADDED
@@ -0,0 +1,477 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # LoRA network module
2
+ # reference:
3
+ # https://github.com/microsoft/LoRA/blob/main/loralib/layers.py
4
+ # https://github.com/cloneofsimo/lora/blob/master/lora_diffusion/lora.py
5
+ # https://github.com/bmaltais/kohya_ss
6
+
7
+ import hashlib
8
+ import math
9
+ import os
10
+ from collections import defaultdict
11
+ from io import BytesIO
12
+ from typing import List, Optional, Type, Union
13
+
14
+ import safetensors.torch
15
+ import torch
16
+ import torch.utils.checkpoint
17
+ from diffusers.models.lora import LoRACompatibleConv, LoRACompatibleLinear
18
+ from safetensors.torch import load_file
19
+ from transformers import T5EncoderModel
20
+
21
+
22
+ class LoRAModule(torch.nn.Module):
23
+ """
24
+ replaces forward method of the original Linear, instead of replacing the original Linear module.
25
+ """
26
+
27
+ def __init__(
28
+ self,
29
+ lora_name,
30
+ org_module: torch.nn.Module,
31
+ multiplier=1.0,
32
+ lora_dim=4,
33
+ alpha=1,
34
+ dropout=None,
35
+ rank_dropout=None,
36
+ module_dropout=None,
37
+ ):
38
+ """if alpha == 0 or None, alpha is rank (no scaling)."""
39
+ super().__init__()
40
+ self.lora_name = lora_name
41
+
42
+ if org_module.__class__.__name__ == "Conv2d":
43
+ in_dim = org_module.in_channels
44
+ out_dim = org_module.out_channels
45
+ else:
46
+ in_dim = org_module.in_features
47
+ out_dim = org_module.out_features
48
+
49
+ self.lora_dim = lora_dim
50
+ if org_module.__class__.__name__ == "Conv2d":
51
+ kernel_size = org_module.kernel_size
52
+ stride = org_module.stride
53
+ padding = org_module.padding
54
+ self.lora_down = torch.nn.Conv2d(in_dim, self.lora_dim, kernel_size, stride, padding, bias=False)
55
+ self.lora_up = torch.nn.Conv2d(self.lora_dim, out_dim, (1, 1), (1, 1), bias=False)
56
+ else:
57
+ self.lora_down = torch.nn.Linear(in_dim, self.lora_dim, bias=False)
58
+ self.lora_up = torch.nn.Linear(self.lora_dim, out_dim, bias=False)
59
+
60
+ if type(alpha) == torch.Tensor:
61
+ alpha = alpha.detach().float().numpy() # without casting, bf16 causes error
62
+ alpha = self.lora_dim if alpha is None or alpha == 0 else alpha
63
+ self.scale = alpha / self.lora_dim
64
+ self.register_buffer("alpha", torch.tensor(alpha))
65
+
66
+ # same as microsoft's
67
+ torch.nn.init.kaiming_uniform_(self.lora_down.weight, a=math.sqrt(5))
68
+ torch.nn.init.zeros_(self.lora_up.weight)
69
+
70
+ self.multiplier = multiplier
71
+ self.org_module = org_module # remove in applying
72
+ self.dropout = dropout
73
+ self.rank_dropout = rank_dropout
74
+ self.module_dropout = module_dropout
75
+
76
+ def apply_to(self):
77
+ self.org_forward = self.org_module.forward
78
+ self.org_module.forward = self.forward
79
+ del self.org_module
80
+
81
+ def forward(self, x, *args, **kwargs):
82
+ weight_dtype = x.dtype
83
+ org_forwarded = self.org_forward(x)
84
+
85
+ # module dropout
86
+ if self.module_dropout is not None and self.training:
87
+ if torch.rand(1) < self.module_dropout:
88
+ return org_forwarded
89
+
90
+ lx = self.lora_down(x.to(self.lora_down.weight.dtype))
91
+
92
+ # normal dropout
93
+ if self.dropout is not None and self.training:
94
+ lx = torch.nn.functional.dropout(lx, p=self.dropout)
95
+
96
+ # rank dropout
97
+ if self.rank_dropout is not None and self.training:
98
+ mask = torch.rand((lx.size(0), self.lora_dim), device=lx.device) > self.rank_dropout
99
+ if len(lx.size()) == 3:
100
+ mask = mask.unsqueeze(1) # for Text Encoder
101
+ elif len(lx.size()) == 4:
102
+ mask = mask.unsqueeze(-1).unsqueeze(-1) # for Conv2d
103
+ lx = lx * mask
104
+
105
+ # scaling for rank dropout: treat as if the rank is changed
106
+ scale = self.scale * (1.0 / (1.0 - self.rank_dropout)) # redundant for readability
107
+ else:
108
+ scale = self.scale
109
+
110
+ lx = self.lora_up(lx)
111
+
112
+ return org_forwarded.to(weight_dtype) + lx.to(weight_dtype) * self.multiplier * scale
113
+
114
+
115
+ def addnet_hash_legacy(b):
116
+ """Old model hash used by sd-webui-additional-networks for .safetensors format files"""
117
+ m = hashlib.sha256()
118
+
119
+ b.seek(0x100000)
120
+ m.update(b.read(0x10000))
121
+ return m.hexdigest()[0:8]
122
+
123
+
124
+ def addnet_hash_safetensors(b):
125
+ """New model hash used by sd-webui-additional-networks for .safetensors format files"""
126
+ hash_sha256 = hashlib.sha256()
127
+ blksize = 1024 * 1024
128
+
129
+ b.seek(0)
130
+ header = b.read(8)
131
+ n = int.from_bytes(header, "little")
132
+
133
+ offset = n + 8
134
+ b.seek(offset)
135
+ for chunk in iter(lambda: b.read(blksize), b""):
136
+ hash_sha256.update(chunk)
137
+
138
+ return hash_sha256.hexdigest()
139
+
140
+
141
+ def precalculate_safetensors_hashes(tensors, metadata):
142
+ """Precalculate the model hashes needed by sd-webui-additional-networks to
143
+ save time on indexing the model later."""
144
+
145
+ # Because writing user metadata to the file can change the result of
146
+ # sd_models.model_hash(), only retain the training metadata for purposes of
147
+ # calculating the hash, as they are meant to be immutable
148
+ metadata = {k: v for k, v in metadata.items() if k.startswith("ss_")}
149
+
150
+ bytes = safetensors.torch.save(tensors, metadata)
151
+ b = BytesIO(bytes)
152
+
153
+ model_hash = addnet_hash_safetensors(b)
154
+ legacy_hash = addnet_hash_legacy(b)
155
+ return model_hash, legacy_hash
156
+
157
+
158
+ class LoRANetwork(torch.nn.Module):
159
+ TRANSFORMER_TARGET_REPLACE_MODULE = ["CogVideoXTransformer3DModel"]
160
+ TEXT_ENCODER_TARGET_REPLACE_MODULE = ["T5LayerSelfAttention", "T5LayerFF", "BertEncoder"]
161
+ LORA_PREFIX_TRANSFORMER = "lora_unet"
162
+ LORA_PREFIX_TEXT_ENCODER = "lora_te"
163
+ def __init__(
164
+ self,
165
+ text_encoder: Union[List[T5EncoderModel], T5EncoderModel],
166
+ unet,
167
+ multiplier: float = 1.0,
168
+ lora_dim: int = 4,
169
+ alpha: float = 1,
170
+ dropout: Optional[float] = None,
171
+ module_class: Type[object] = LoRAModule,
172
+ add_lora_in_attn_temporal: bool = False,
173
+ varbose: Optional[bool] = False,
174
+ ) -> None:
175
+ super().__init__()
176
+ self.multiplier = multiplier
177
+
178
+ self.lora_dim = lora_dim
179
+ self.alpha = alpha
180
+ self.dropout = dropout
181
+
182
+ print(f"create LoRA network. base dim (rank): {lora_dim}, alpha: {alpha}")
183
+ print(f"neuron dropout: p={self.dropout}")
184
+
185
+ # create module instances
186
+ def create_modules(
187
+ is_unet: bool,
188
+ root_module: torch.nn.Module,
189
+ target_replace_modules: List[torch.nn.Module],
190
+ ) -> List[LoRAModule]:
191
+ prefix = (
192
+ self.LORA_PREFIX_TRANSFORMER
193
+ if is_unet
194
+ else self.LORA_PREFIX_TEXT_ENCODER
195
+ )
196
+ loras = []
197
+ skipped = []
198
+ for name, module in root_module.named_modules():
199
+ if module.__class__.__name__ in target_replace_modules:
200
+ for child_name, child_module in module.named_modules():
201
+ is_linear = child_module.__class__.__name__ == "Linear" or child_module.__class__.__name__ == "LoRACompatibleLinear"
202
+ is_conv2d = child_module.__class__.__name__ == "Conv2d" or child_module.__class__.__name__ == "LoRACompatibleConv"
203
+ is_conv2d_1x1 = is_conv2d and child_module.kernel_size == (1, 1)
204
+
205
+ if not add_lora_in_attn_temporal:
206
+ if "attn_temporal" in child_name:
207
+ continue
208
+
209
+ if is_linear or is_conv2d:
210
+ lora_name = prefix + "." + name + "." + child_name
211
+ lora_name = lora_name.replace(".", "_")
212
+
213
+ dim = None
214
+ alpha = None
215
+
216
+ if is_linear or is_conv2d_1x1:
217
+ dim = self.lora_dim
218
+ alpha = self.alpha
219
+
220
+ if dim is None or dim == 0:
221
+ if is_linear or is_conv2d_1x1:
222
+ skipped.append(lora_name)
223
+ continue
224
+
225
+ lora = module_class(
226
+ lora_name,
227
+ child_module,
228
+ self.multiplier,
229
+ dim,
230
+ alpha,
231
+ dropout=dropout,
232
+ )
233
+ loras.append(lora)
234
+ return loras, skipped
235
+
236
+ text_encoders = text_encoder if type(text_encoder) == list else [text_encoder]
237
+
238
+ self.text_encoder_loras = []
239
+ skipped_te = []
240
+ for i, text_encoder in enumerate(text_encoders):
241
+ if text_encoder is not None:
242
+ text_encoder_loras, skipped = create_modules(False, text_encoder, LoRANetwork.TEXT_ENCODER_TARGET_REPLACE_MODULE)
243
+ self.text_encoder_loras.extend(text_encoder_loras)
244
+ skipped_te += skipped
245
+ print(f"create LoRA for Text Encoder: {len(self.text_encoder_loras)} modules.")
246
+
247
+ self.unet_loras, skipped_un = create_modules(True, unet, LoRANetwork.TRANSFORMER_TARGET_REPLACE_MODULE)
248
+ print(f"create LoRA for U-Net: {len(self.unet_loras)} modules.")
249
+
250
+ # assertion
251
+ names = set()
252
+ for lora in self.text_encoder_loras + self.unet_loras:
253
+ assert lora.lora_name not in names, f"duplicated lora name: {lora.lora_name}"
254
+ names.add(lora.lora_name)
255
+
256
+ def apply_to(self, text_encoder, unet, apply_text_encoder=True, apply_unet=True):
257
+ if apply_text_encoder:
258
+ print("enable LoRA for text encoder")
259
+ else:
260
+ self.text_encoder_loras = []
261
+
262
+ if apply_unet:
263
+ print("enable LoRA for U-Net")
264
+ else:
265
+ self.unet_loras = []
266
+
267
+ for lora in self.text_encoder_loras + self.unet_loras:
268
+ lora.apply_to()
269
+ self.add_module(lora.lora_name, lora)
270
+
271
+ def set_multiplier(self, multiplier):
272
+ self.multiplier = multiplier
273
+ for lora in self.text_encoder_loras + self.unet_loras:
274
+ lora.multiplier = self.multiplier
275
+
276
+ def load_weights(self, file):
277
+ if os.path.splitext(file)[1] == ".safetensors":
278
+ from safetensors.torch import load_file
279
+
280
+ weights_sd = load_file(file)
281
+ else:
282
+ weights_sd = torch.load(file, map_location="cpu")
283
+ info = self.load_state_dict(weights_sd, False)
284
+ return info
285
+
286
+ def prepare_optimizer_params(self, text_encoder_lr, unet_lr, default_lr):
287
+ self.requires_grad_(True)
288
+ all_params = []
289
+
290
+ def enumerate_params(loras):
291
+ params = []
292
+ for lora in loras:
293
+ params.extend(lora.parameters())
294
+ return params
295
+
296
+ if self.text_encoder_loras:
297
+ param_data = {"params": enumerate_params(self.text_encoder_loras)}
298
+ if text_encoder_lr is not None:
299
+ param_data["lr"] = text_encoder_lr
300
+ all_params.append(param_data)
301
+
302
+ if self.unet_loras:
303
+ param_data = {"params": enumerate_params(self.unet_loras)}
304
+ if unet_lr is not None:
305
+ param_data["lr"] = unet_lr
306
+ all_params.append(param_data)
307
+
308
+ return all_params
309
+
310
+ def enable_gradient_checkpointing(self):
311
+ pass
312
+
313
+ def get_trainable_params(self):
314
+ return self.parameters()
315
+
316
+ def save_weights(self, file, dtype, metadata):
317
+ if metadata is not None and len(metadata) == 0:
318
+ metadata = None
319
+
320
+ state_dict = self.state_dict()
321
+
322
+ if dtype is not None:
323
+ for key in list(state_dict.keys()):
324
+ v = state_dict[key]
325
+ v = v.detach().clone().to("cpu").to(dtype)
326
+ state_dict[key] = v
327
+
328
+ if os.path.splitext(file)[1] == ".safetensors":
329
+ from safetensors.torch import save_file
330
+
331
+ # Precalculate model hashes to save time on indexing
332
+ if metadata is None:
333
+ metadata = {}
334
+ model_hash, legacy_hash = precalculate_safetensors_hashes(state_dict, metadata)
335
+ metadata["sshs_model_hash"] = model_hash
336
+ metadata["sshs_legacy_hash"] = legacy_hash
337
+
338
+ save_file(state_dict, file, metadata)
339
+ else:
340
+ torch.save(state_dict, file)
341
+
342
+ def create_network(
343
+ multiplier: float,
344
+ network_dim: Optional[int],
345
+ network_alpha: Optional[float],
346
+ text_encoder: Union[T5EncoderModel, List[T5EncoderModel]],
347
+ transformer,
348
+ neuron_dropout: Optional[float] = None,
349
+ add_lora_in_attn_temporal: bool = False,
350
+ **kwargs,
351
+ ):
352
+ if network_dim is None:
353
+ network_dim = 4 # default
354
+ if network_alpha is None:
355
+ network_alpha = 1.0
356
+
357
+ network = LoRANetwork(
358
+ text_encoder,
359
+ transformer,
360
+ multiplier=multiplier,
361
+ lora_dim=network_dim,
362
+ alpha=network_alpha,
363
+ dropout=neuron_dropout,
364
+ add_lora_in_attn_temporal=add_lora_in_attn_temporal,
365
+ varbose=True,
366
+ )
367
+ return network
368
+
369
+ def merge_lora(pipeline, lora_path, multiplier, device='cpu', dtype=torch.float32, state_dict=None, transformer_only=False):
370
+ LORA_PREFIX_TRANSFORMER = "lora_unet"
371
+ LORA_PREFIX_TEXT_ENCODER = "lora_te"
372
+ if state_dict is None:
373
+ state_dict = load_file(lora_path, device=device)
374
+ else:
375
+ state_dict = state_dict
376
+ updates = defaultdict(dict)
377
+ for key, value in state_dict.items():
378
+ layer, elem = key.split('.', 1)
379
+ updates[layer][elem] = value
380
+
381
+ for layer, elems in updates.items():
382
+
383
+ if "lora_te" in layer:
384
+ if transformer_only:
385
+ continue
386
+ else:
387
+ layer_infos = layer.split(LORA_PREFIX_TEXT_ENCODER + "_")[-1].split("_")
388
+ curr_layer = pipeline.text_encoder
389
+ else:
390
+ layer_infos = layer.split(LORA_PREFIX_TRANSFORMER + "_")[-1].split("_")
391
+ curr_layer = pipeline.transformer
392
+
393
+ temp_name = layer_infos.pop(0)
394
+ while len(layer_infos) > -1:
395
+ try:
396
+ curr_layer = curr_layer.__getattr__(temp_name)
397
+ if len(layer_infos) > 0:
398
+ temp_name = layer_infos.pop(0)
399
+ elif len(layer_infos) == 0:
400
+ break
401
+ except Exception:
402
+ if len(layer_infos) == 0:
403
+ print('Error loading layer')
404
+ if len(temp_name) > 0:
405
+ temp_name += "_" + layer_infos.pop(0)
406
+ else:
407
+ temp_name = layer_infos.pop(0)
408
+
409
+ weight_up = elems['lora_up.weight'].to(dtype)
410
+ weight_down = elems['lora_down.weight'].to(dtype)
411
+ if 'alpha' in elems.keys():
412
+ alpha = elems['alpha'].item() / weight_up.shape[1]
413
+ else:
414
+ alpha = 1.0
415
+
416
+ curr_layer.weight.data = curr_layer.weight.data.to(device)
417
+ if len(weight_up.shape) == 4:
418
+ curr_layer.weight.data += multiplier * alpha * torch.mm(weight_up.squeeze(3).squeeze(2),
419
+ weight_down.squeeze(3).squeeze(2)).unsqueeze(
420
+ 2).unsqueeze(3)
421
+ else:
422
+ curr_layer.weight.data += multiplier * alpha * torch.mm(weight_up, weight_down)
423
+
424
+ return pipeline
425
+
426
+ # TODO: Refactor with merge_lora.
427
+ def unmerge_lora(pipeline, lora_path, multiplier=1, device="cpu", dtype=torch.float32):
428
+ """Unmerge state_dict in LoRANetwork from the pipeline in diffusers."""
429
+ LORA_PREFIX_UNET = "lora_unet"
430
+ LORA_PREFIX_TEXT_ENCODER = "lora_te"
431
+ state_dict = load_file(lora_path, device=device)
432
+
433
+ updates = defaultdict(dict)
434
+ for key, value in state_dict.items():
435
+ layer, elem = key.split('.', 1)
436
+ updates[layer][elem] = value
437
+
438
+ for layer, elems in updates.items():
439
+
440
+ if "lora_te" in layer:
441
+ layer_infos = layer.split(LORA_PREFIX_TEXT_ENCODER + "_")[-1].split("_")
442
+ curr_layer = pipeline.text_encoder
443
+ else:
444
+ layer_infos = layer.split(LORA_PREFIX_UNET + "_")[-1].split("_")
445
+ curr_layer = pipeline.transformer
446
+
447
+ temp_name = layer_infos.pop(0)
448
+ while len(layer_infos) > -1:
449
+ try:
450
+ curr_layer = curr_layer.__getattr__(temp_name)
451
+ if len(layer_infos) > 0:
452
+ temp_name = layer_infos.pop(0)
453
+ elif len(layer_infos) == 0:
454
+ break
455
+ except Exception:
456
+ if len(layer_infos) == 0:
457
+ print('Error loading layer')
458
+ if len(temp_name) > 0:
459
+ temp_name += "_" + layer_infos.pop(0)
460
+ else:
461
+ temp_name = layer_infos.pop(0)
462
+
463
+ weight_up = elems['lora_up.weight'].to(dtype)
464
+ weight_down = elems['lora_down.weight'].to(dtype)
465
+ if 'alpha' in elems.keys():
466
+ alpha = elems['alpha'].item() / weight_up.shape[1]
467
+ else:
468
+ alpha = 1.0
469
+
470
+ curr_layer.weight.data = curr_layer.weight.data.to(device)
471
+ if len(weight_up.shape) == 4:
472
+ curr_layer.weight.data -= multiplier * alpha * torch.mm(weight_up.squeeze(3).squeeze(2),
473
+ weight_down.squeeze(3).squeeze(2)).unsqueeze(2).unsqueeze(3)
474
+ else:
475
+ curr_layer.weight.data -= multiplier * alpha * torch.mm(weight_up, weight_down)
476
+
477
+ return pipeline
robomaster/utils/utils.py ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gc
3
+ import imageio
4
+ import numpy as np
5
+ import torch
6
+ import torchvision
7
+ import cv2
8
+ from einops import rearrange
9
+ from PIL import Image
10
+
11
+ def get_width_and_height_from_image_and_base_resolution(image, base_resolution):
12
+ target_pixels = int(base_resolution) * int(base_resolution)
13
+ original_width, original_height = Image.open(image).size
14
+ ratio = (target_pixels / (original_width * original_height)) ** 0.5
15
+ width_slider = round(original_width * ratio)
16
+ height_slider = round(original_height * ratio)
17
+ return height_slider, width_slider
18
+
19
+ def color_transfer(sc, dc):
20
+ """
21
+ Transfer color distribution from of sc, referred to dc.
22
+
23
+ Args:
24
+ sc (numpy.ndarray): input image to be transfered.
25
+ dc (numpy.ndarray): reference image
26
+
27
+ Returns:
28
+ numpy.ndarray: Transferred color distribution on the sc.
29
+ """
30
+
31
+ def get_mean_and_std(img):
32
+ x_mean, x_std = cv2.meanStdDev(img)
33
+ x_mean = np.hstack(np.around(x_mean, 2))
34
+ x_std = np.hstack(np.around(x_std, 2))
35
+ return x_mean, x_std
36
+
37
+ sc = cv2.cvtColor(sc, cv2.COLOR_RGB2LAB)
38
+ s_mean, s_std = get_mean_and_std(sc)
39
+ dc = cv2.cvtColor(dc, cv2.COLOR_RGB2LAB)
40
+ t_mean, t_std = get_mean_and_std(dc)
41
+ img_n = ((sc - s_mean) * (t_std / s_std)) + t_mean
42
+ np.putmask(img_n, img_n > 255, 255)
43
+ np.putmask(img_n, img_n < 0, 0)
44
+ dst = cv2.cvtColor(cv2.convertScaleAbs(img_n), cv2.COLOR_LAB2RGB)
45
+ return dst
46
+
47
+ def save_videos_grid(videos: torch.Tensor, path: str, rescale=False, n_rows=6, fps=12, imageio_backend=True, color_transfer_post_process=False):
48
+ videos = rearrange(videos, "b c t h w -> t b c h w")
49
+ outputs = []
50
+ for x in videos:
51
+ x = torchvision.utils.make_grid(x, nrow=n_rows)
52
+ x = x.transpose(0, 1).transpose(1, 2).squeeze(-1)
53
+ if rescale:
54
+ x = (x + 1.0) / 2.0 # -1,1 -> 0,1
55
+ x = (x * 255).numpy().astype(np.uint8)
56
+ outputs.append(Image.fromarray(x))
57
+
58
+ if color_transfer_post_process:
59
+ for i in range(1, len(outputs)):
60
+ outputs[i] = Image.fromarray(color_transfer(np.uint8(outputs[i]), np.uint8(outputs[0])))
61
+
62
+ os.makedirs(os.path.dirname(path), exist_ok=True)
63
+ if imageio_backend:
64
+ if path.endswith("mp4"):
65
+ imageio.mimsave(path, outputs, fps=fps)
66
+ else:
67
+ imageio.mimsave(path, outputs, duration=(1000 * 1/fps))
68
+ else:
69
+ if path.endswith("mp4"):
70
+ path = path.replace('.mp4', '.gif')
71
+ outputs[0].save(path, format='GIF', append_images=outputs, save_all=True, duration=100, loop=0)
72
+
73
+ def get_image_to_video_latent(validation_image_start, validation_image_end, video_length, sample_size):
74
+ if validation_image_start is not None and validation_image_end is not None:
75
+ if type(validation_image_start) is str and os.path.isfile(validation_image_start):
76
+ image_start = clip_image = Image.open(validation_image_start).convert("RGB")
77
+ image_start = image_start.resize([sample_size[1], sample_size[0]])
78
+ clip_image = clip_image.resize([sample_size[1], sample_size[0]])
79
+ else:
80
+ image_start = clip_image = validation_image_start
81
+ image_start = [_image_start.resize([sample_size[1], sample_size[0]]) for _image_start in image_start]
82
+ clip_image = [_clip_image.resize([sample_size[1], sample_size[0]]) for _clip_image in clip_image]
83
+
84
+ if type(validation_image_end) is str and os.path.isfile(validation_image_end):
85
+ image_end = Image.open(validation_image_end).convert("RGB")
86
+ image_end = image_end.resize([sample_size[1], sample_size[0]])
87
+ else:
88
+ image_end = validation_image_end
89
+ image_end = [_image_end.resize([sample_size[1], sample_size[0]]) for _image_end in image_end]
90
+
91
+ if type(image_start) is list:
92
+ clip_image = clip_image[0]
93
+ start_video = torch.cat(
94
+ [torch.from_numpy(np.array(_image_start)).permute(2, 0, 1).unsqueeze(1).unsqueeze(0) for _image_start in image_start],
95
+ dim=2
96
+ )
97
+ input_video = torch.tile(start_video[:, :, :1], [1, 1, video_length, 1, 1])
98
+ input_video[:, :, :len(image_start)] = start_video
99
+
100
+ input_video_mask = torch.zeros_like(input_video[:, :1])
101
+ input_video_mask[:, :, len(image_start):] = 255
102
+ else:
103
+ input_video = torch.tile(
104
+ torch.from_numpy(np.array(image_start)).permute(2, 0, 1).unsqueeze(1).unsqueeze(0),
105
+ [1, 1, video_length, 1, 1]
106
+ )
107
+ input_video_mask = torch.zeros_like(input_video[:, :1])
108
+ input_video_mask[:, :, 1:] = 255
109
+
110
+ if type(image_end) is list:
111
+ image_end = [_image_end.resize(image_start[0].size if type(image_start) is list else image_start.size) for _image_end in image_end]
112
+ end_video = torch.cat(
113
+ [torch.from_numpy(np.array(_image_end)).permute(2, 0, 1).unsqueeze(1).unsqueeze(0) for _image_end in image_end],
114
+ dim=2
115
+ )
116
+ input_video[:, :, -len(end_video):] = end_video
117
+
118
+ input_video_mask[:, :, -len(image_end):] = 0
119
+ else:
120
+ image_end = image_end.resize(image_start[0].size if type(image_start) is list else image_start.size)
121
+ input_video[:, :, -1:] = torch.from_numpy(np.array(image_end)).permute(2, 0, 1).unsqueeze(1).unsqueeze(0)
122
+ input_video_mask[:, :, -1:] = 0
123
+
124
+ input_video = input_video / 255
125
+
126
+ elif validation_image_start is not None:
127
+ if type(validation_image_start) is str and os.path.isfile(validation_image_start):
128
+ image_start = clip_image = Image.open(validation_image_start).convert("RGB")
129
+ image_start = image_start.resize([sample_size[1], sample_size[0]])
130
+ clip_image = clip_image.resize([sample_size[1], sample_size[0]])
131
+ else:
132
+ image_start = clip_image = validation_image_start
133
+ image_start = [_image_start.resize([sample_size[1], sample_size[0]]) for _image_start in image_start]
134
+ clip_image = [_clip_image.resize([sample_size[1], sample_size[0]]) for _clip_image in clip_image]
135
+ image_end = None
136
+
137
+ if type(image_start) is list:
138
+ clip_image = clip_image[0]
139
+ start_video = torch.cat(
140
+ [torch.from_numpy(np.array(_image_start)).permute(2, 0, 1).unsqueeze(1).unsqueeze(0) for _image_start in image_start],
141
+ dim=2
142
+ )
143
+ input_video = torch.tile(start_video[:, :, :1], [1, 1, video_length, 1, 1])
144
+ input_video[:, :, :len(image_start)] = start_video
145
+ input_video = input_video / 255
146
+
147
+ input_video_mask = torch.zeros_like(input_video[:, :1])
148
+ input_video_mask[:, :, len(image_start):] = 255
149
+ else:
150
+ input_video = torch.tile(
151
+ torch.from_numpy(np.array(image_start)).permute(2, 0, 1).unsqueeze(1).unsqueeze(0),
152
+ [1, 1, video_length, 1, 1]
153
+ ) / 255
154
+ input_video_mask = torch.zeros_like(input_video[:, :1])
155
+ input_video_mask[:, :, 1:, ] = 255
156
+ else:
157
+ image_start = None
158
+ image_end = None
159
+ input_video = torch.zeros([1, 3, video_length, sample_size[0], sample_size[1]])
160
+ input_video_mask = torch.ones([1, 1, video_length, sample_size[0], sample_size[1]]) * 255
161
+ clip_image = None
162
+
163
+ del image_start
164
+ del image_end
165
+ gc.collect()
166
+
167
+ return input_video, input_video_mask, clip_image
168
+
169
+ def get_video_to_video_latent(input_video_path, video_length, sample_size, fps=None, validation_video_mask=None):
170
+ if isinstance(input_video_path, str):
171
+ cap = cv2.VideoCapture(input_video_path)
172
+ input_video = []
173
+
174
+ original_fps = cap.get(cv2.CAP_PROP_FPS)
175
+ frame_skip = 1 if fps is None else int(original_fps // fps)
176
+
177
+ frame_count = 0
178
+
179
+ while True:
180
+ ret, frame = cap.read()
181
+ if not ret:
182
+ break
183
+
184
+ if frame_count % frame_skip == 0:
185
+ frame = cv2.resize(frame, (sample_size[1], sample_size[0]))
186
+ input_video.append(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
187
+
188
+ frame_count += 1
189
+
190
+ cap.release()
191
+ else:
192
+ input_video = input_video_path
193
+
194
+ input_video = torch.from_numpy(np.array(input_video))[:video_length]
195
+ input_video = input_video.permute([3, 0, 1, 2]).unsqueeze(0) / 255
196
+
197
+ if validation_video_mask is not None:
198
+ validation_video_mask = Image.open(validation_video_mask).convert('L').resize((sample_size[1], sample_size[0]))
199
+ input_video_mask = np.where(np.array(validation_video_mask) < 240, 0, 255)
200
+
201
+ input_video_mask = torch.from_numpy(np.array(input_video_mask)).unsqueeze(0).unsqueeze(-1).permute([3, 0, 1, 2]).unsqueeze(0)
202
+ input_video_mask = torch.tile(input_video_mask, [1, 1, input_video.size()[2], 1, 1])
203
+ input_video_mask = input_video_mask.to(input_video.device, input_video.dtype)
204
+ else:
205
+ input_video_mask = torch.zeros_like(input_video[:, :1])
206
+ input_video_mask[:, :, :] = 255
207
+
208
+ return input_video, input_video_mask, None
robomaster/video_caption/README.md ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Video Caption
2
+ English | [简体中文](./README_zh-CN.md)
3
+
4
+ The folder contains codes for dataset preprocessing (i.e., video splitting, filtering, and recaptioning), and beautiful prompt used by CogVideoX-Fun.
5
+ The entire process supports distributed parallel processing, capable of handling large-scale datasets.
6
+
7
+ Meanwhile, we are collaborating with [Data-Juicer](https://github.com/modelscope/data-juicer/blob/main/docs/DJ_SORA.md),
8
+ allowing you to easily perform video data processing on [Aliyun PAI-DLC](https://help.aliyun.com/zh/pai/user-guide/video-preprocessing/).
9
+
10
+ # Table of Content
11
+ - [Video Caption](#video-caption)
12
+ - [Table of Content](#table-of-content)
13
+ - [Quick Start](#quick-start)
14
+ - [Setup](#setup)
15
+ - [Data Preprocessing](#data-preprocessing)
16
+ - [Data Preparation](#data-preparation)
17
+ - [Video Splitting](#video-splitting)
18
+ - [Video Filtering](#video-filtering)
19
+ - [Video Recaptioning](#video-recaptioning)
20
+ - [Beautiful Prompt (For CogVideoX-Fun Inference)](#beautiful-prompt-for-cogvideox-inference)
21
+ - [Batched Inference](#batched-inference)
22
+ - [OpenAI Server](#openai-server)
23
+
24
+ ## Quick Start
25
+
26
+ ### Setup
27
+ AliyunDSW or Docker is recommended to setup the environment, please refer to [Quick Start](../../README.md#quick-start).
28
+ You can also refer to the image build process in the [Dockerfile](../../Dockerfile.ds) to configure the conda environment and other dependencies locally.
29
+
30
+ Since the video recaptioning depends on [llm-awq](https://github.com/mit-han-lab/llm-awq) for faster and memory efficient inference,
31
+ the minimum GPU requirment should be RTX 3060 or A2 (CUDA Compute Capability >= 8.0).
32
+
33
+ ```shell
34
+ # pull image
35
+ docker pull mybigpai-public-registry.cn-beijing.cr.aliyuncs.com/easycv/torch_cuda:cogvideox_fun
36
+
37
+ # enter image
38
+ docker run -it -p 7860:7860 --network host --gpus all --security-opt seccomp:unconfined --shm-size 200g mybigpai-public-registry.cn-beijing.cr.aliyuncs.com/easycv/torch_cuda:cogvideox_fun
39
+
40
+ # clone code
41
+ git clone https://github.com/aigc-apps/CogVideoX-Fun.git
42
+
43
+ # enter video_caption
44
+ cd CogVideoX-Fun/cogvideox/video_caption
45
+ ```
46
+
47
+ ### Data Preprocessing
48
+ #### Data Preparation
49
+ Place the downloaded videos into a folder under [datasets](./datasets/) (preferably without nested structures, as the video names are used as unique IDs in subsequent processes).
50
+ Taking Panda-70M as an example, the entire dataset directory structure is shown as follows:
51
+ ```
52
+ 📦 datasets/
53
+ ├── 📂 panda_70m/
54
+ │ ├── 📂 videos/
55
+ │ │ ├── 📂 data/
56
+ │ │ │ └── 📄 --C66yU3LjM_2.mp4
57
+ │ │ │ └── 📄 ...
58
+ ```
59
+
60
+ #### Video Splitting
61
+ CogVideoX-Fun utilizes [PySceneDetect](https://github.com/Breakthrough/PySceneDetect) to identify scene changes within the video
62
+ and performs video splitting via FFmpeg based on certain threshold values to ensure consistency of the video clip.
63
+ Video clips shorter than 3 seconds will be discarded, and those longer than 10 seconds will be splitted recursively.
64
+
65
+ The entire workflow of video splitting is in the [stage_1_video_splitting.sh](./scripts/stage_1_video_splitting.sh).
66
+ After running
67
+ ```shell
68
+ sh scripts/stage_1_video_splitting.sh
69
+ ```
70
+ the video clips are obtained in `cogvideox/video_caption/datasets/panda_70m/videos_clips/data/`.
71
+
72
+ #### Video Filtering
73
+ Based on the videos obtained in the previous step, CogVideoX-Fun provides a simple yet effective pipeline to filter out high-quality videos for recaptioning.
74
+ The overall process is as follows:
75
+
76
+ - Aesthetic filtering: Filter out videos with poor content (blurry, dim, etc.) by calculating the average aesthetic score of uniformly sampled 4 frames via [aesthetic-predictor-v2-5](https://github.com/discus0434/aesthetic-predictor-v2-5).
77
+ - Text filtering: Use [EasyOCR](https://github.com/JaidedAI/EasyOCR) to calculate the text area proportion of the middle frame to filter out videos with a large area of text.
78
+ - Motion filtering: Calculate interframe optical flow differences to filter out videos that move too slowly or too quickly.
79
+
80
+ The entire workflow of video filtering is in the [stage_2_video_filtering.sh](./scripts/stage_2_video_filtering.sh).
81
+ After running
82
+ ```shell
83
+ sh scripts/stage_2_video_filtering.sh
84
+ ```
85
+ the aesthetic score, text score, and motion score of videos will be saved in the corresponding meta files in the folder `cogvideox/video_caption/datasets/panda_70m/videos_clips/`.
86
+
87
+ > [!NOTE]
88
+ > The computation of the aesthetic score depends on the [google/siglip-so400m-patch14-384 model](https://huggingface.co/google/siglip-so400m-patch14-384).
89
+ Please run `HF_ENDPOINT=https://hf-mirror.com sh scripts/stage_2_video_filtering.sh` if you cannot access to huggingface.com.
90
+
91
+
92
+ #### Video Recaptioning
93
+ After obtaining the aboved high-quality filtered videos, CogVideoX-Fun utilizes [VILA1.5](https://github.com/NVlabs/VILA) to perform video recaptioning.
94
+ Subsequently, the recaptioning results are rewritten by LLMs to better meet with the requirements of video generation tasks.
95
+ Finally, an advanced VideoCLIPXL model is developed to filter out video-caption pairs with poor alignment, resulting in the final training dataset.
96
+
97
+ Please download the video caption model from [VILA1.5](https://huggingface.co/collections/Efficient-Large-Model/vila-on-pre-training-for-visual-language-models-65d8022a3a52cd9bcd62698e) of the appropriate size based on the GPU memory of your machine.
98
+ For A100 with 40G VRAM, you can download [VILA1.5-40b-AWQ](https://huggingface.co/Efficient-Large-Model/VILA1.5-40b-AWQ) by running
99
+ ```shell
100
+ # Add HF_ENDPOINT=https://hf-mirror.com before the command if you cannot access to huggingface.com
101
+ huggingface-cli download Efficient-Large-Model/VILA1.5-40b-AWQ --local-dir-use-symlinks False --local-dir /PATH/TO/VILA_MODEL
102
+ ```
103
+
104
+ Optionally, you can prepare local LLMs to rewrite the recaption results.
105
+ For example, you can download [Meta-Llama-3-8B-Instruct](https://huggingface.co/NousResearch/Meta-Llama-3-8B-Instruct) by running
106
+ ```shell
107
+ # Add HF_ENDPOINT=https://hf-mirror.com before the command if you cannot access to huggingface.com
108
+ huggingface-cli download NousResearch/Meta-Llama-3-8B-Instruct --local-dir-use-symlinks False --local-dir /PATH/TO/REWRITE_MODEL
109
+ ```
110
+
111
+ The entire workflow of video recaption is in the [stage_3_video_recaptioning.sh](./scripts/stage_3_video_recaptioning.sh).
112
+ After running
113
+ ```shell
114
+ VILA_MODEL_PATH=/PATH/TO/VILA_MODEL REWRITE_MODEL_PATH=/PATH/TO/REWRITE_MODEL sh scripts/stage_3_video_recaptioning.sh
115
+ ```
116
+ the final train file is obtained in `cogvideox/video_caption/datasets/panda_70m/videos_clips/meta_train_info.json`.
117
+
118
+
119
+ ### Beautiful Prompt (For CogVideoX-Fun Inference)
120
+ Beautiful Prompt aims to rewrite and beautify the user-uploaded prompt via LLMs, mapping it to the style of CogVideoX-Fun's training captions,
121
+ making it more suitable as the inference prompt and thus improving the quality of the generated videos.
122
+ We support batched inference with local LLMs or OpenAI compatible server based on [vLLM](https://github.com/vllm-project/vllm) for beautiful prompt.
123
+
124
+ #### Batched Inference
125
+ 1. Prepare original prompts in a jsonl file `cogvideox/video_caption/datasets/original_prompt.jsonl` with the following format:
126
+ ```json
127
+ {"prompt": "A stylish woman in a black leather jacket, red dress, and boots walks confidently down a damp Tokyo street."}
128
+ {"prompt": "An underwater world with realistic fish and other creatures of the sea."}
129
+ {"prompt": "a monarch butterfly perched on a tree trunk in the forest."}
130
+ {"prompt": "a child in a room with a bottle of wine and a lamp."}
131
+ {"prompt": "two men in suits walking down a hallway."}
132
+ ```
133
+
134
+ 2. Then you can perform beautiful prompt by running
135
+ ```shell
136
+ # Meta-Llama-3-8B-Instruct is sufficient for this task.
137
+ # Download it from https://huggingface.co/NousResearch/Meta-Llama-3-8B-Instruct or https://www.modelscope.cn/models/LLM-Research/Meta-Llama-3-8B-Instruct to /path/to/your_llm
138
+
139
+ python caption_rewrite.py \
140
+ --video_metadata_path datasets/original_prompt.jsonl \
141
+ --caption_column "prompt" \
142
+ --batch_size 1 \
143
+ --model_name /path/to/your_llm \
144
+ --prompt prompt/beautiful_prompt.txt \
145
+ --prefix '"detailed description": ' \
146
+ --saved_path datasets/beautiful_prompt.jsonl \
147
+ --saved_freq 1
148
+ ```
149
+
150
+ #### OpenAI Server
151
+ + You can request OpenAI compatible server to perform beautiful prompt by running
152
+ ```shell
153
+ OPENAI_API_KEY="your_openai_api_key" OPENAI_BASE_URL="your_openai_base_url" python beautiful_prompt.py \
154
+ --model "your_model_name" \
155
+ --prompt "your_prompt"
156
+ ```
157
+
158
+ + You can also deploy the OpenAI Compatible Server locally using vLLM. For example:
159
+ ```shell
160
+ # Meta-Llama-3-8B-Instruct is sufficient for this task.
161
+ # Download it from https://huggingface.co/NousResearch/Meta-Llama-3-8B-Instruct or https://www.modelscope.cn/models/LLM-Research/Meta-Llama-3-8B-Instruct to /path/to/your_llm
162
+
163
+ # deploy the OpenAI compatible server
164
+ python -m vllm.entrypoints.openai.api_server serve /path/to/your_llm --dtype auto --api-key "your_api_key"
165
+ ```
166
+
167
+ Then you can perform beautiful prompt by running
168
+ ```shell
169
+ python -m beautiful_prompt.py \
170
+ --model /path/to/your_llm \
171
+ --prompt "your_prompt" \
172
+ --base_url "http://localhost:8000/v1" \
173
+ --api_key "your_api_key"
174
+ ```
robomaster/video_caption/README_zh-CN.md ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 数据预处理
2
+ [English](./README.md) | 简体中文
3
+
4
+ 该文件夹包含 CogVideoX-Fun 使用的数据集预处理(即视频切分、过滤和生成描述)和提示词美化的代码。整个过程支持分布式并行处理,能够处理大规模数据集。
5
+
6
+ 此外,我们和 [Data-Juicer](https://github.com/modelscope/data-juicer/blob/main/docs/DJ_SORA.md) 合作,能让你在 [Aliyun PAI-DLC](https://help.aliyun.com/zh/pai/user-guide/video-preprocessing/) 轻松进行视频数据的处理。
7
+
8
+ # 目录
9
+ - [数据预处理](#数据预处理)
10
+ - [目录](#目录)
11
+ - [快速开始](#快速开始)
12
+ - [安装](#安装)
13
+ - [数据集预处理](#数据集预处理)
14
+ - [数据准备](#数据准备)
15
+ - [视频切分](#视频切分)
16
+ - [视频过滤](#视频过滤)
17
+ - [视频描述](#视频描述)
18
+ - [提示词美化](#提示词美化)
19
+ - [批量推理](#批量推理)
20
+ - [OpenAI 服务器](#openai-服务器)
21
+
22
+
23
+ ## 快速开始
24
+ ### 安装
25
+ 推荐使用阿里云 DSW 和 Docker 来安装环境,请参考 [快速开始](../../README_zh-CN.md#1-云使用-aliyundswdocker). 你也可以参考 [Dockerfile](../../Dockerfile.ds) 中的镜像构建流程在本地安装对应的 conda 环境和其余依赖。
26
+
27
+ 为了提高推理速度和节省推理的显存,生成视频描述依赖于 [llm-awq](https://github.com/mit-han-lab/llm-awq)。因此,需要 RTX 3060 或者 A2 及以上的显卡 (CUDA Compute Capability >= 8.0)。
28
+
29
+ ```shell
30
+ # pull image
31
+ docker pull mybigpai-public-registry.cn-beijing.cr.aliyuncs.com/easycv/torch_cuda:cogvideox_fun
32
+
33
+ # enter image
34
+ docker run -it -p 7860:7860 --network host --gpus all --security-opt seccomp:unconfined --shm-size 200g mybigpai-public-registry.cn-beijing.cr.aliyuncs.com/easycv/torch_cuda:cogvideox_fun
35
+
36
+ # clone code
37
+ git clone https://github.com/aigc-apps/CogVideoX-Fun.git
38
+
39
+ # enter video_caption
40
+ cd CogVideoX-Fun/cogvideox/video_caption
41
+ ```
42
+
43
+ ### 数据集预处理
44
+ #### 数据准备
45
+ 将下载的视频准备到文件夹 [datasets](./datasets/)(最好不使用嵌套结构,因为视频名称在后续处理中用作唯一 ID)。以 Panda-70M 为例,完整的数据集目录结构如下所示:
46
+ ```
47
+ 📦 datasets/
48
+ ├── 📂 panda_70m/
49
+ │ ├── 📂 videos/
50
+ │ │ ├── 📂 data/
51
+ │ │ │ └── 📄 --C66yU3LjM_2.mp4
52
+ │ │ │ └── 📄 ...
53
+ ```
54
+
55
+ #### 视频切分
56
+ CogVideoX-Fun 使用 [PySceneDetect](https://github.com/Breakthrough/PySceneDetect) 来识别视频中的场景变化
57
+ 并根据某些阈值通过 FFmpeg 执行视频分割,以确保视频片段的一致性。
58
+ 短于 3 秒的视频片段将被丢弃,长于 10 秒的视频片段将被递归切分。
59
+
60
+ 视频切分的完整流程在 [stage_1_video_splitting.sh](./scripts/stage_1_video_splitting.sh)。执行
61
+ ```shell
62
+ sh scripts/stage_1_video_splitting.sh
63
+ ```
64
+ 后,切分后的视频位于 `cogvideox/video_caption/datasets/panda_70m/videos_clips/data/`。
65
+
66
+ #### 视频过滤
67
+ 基于上一步获得的视频,CogVideoX-Fun 提供了一个简单而有效的流程来过滤出高质量的视频。总体流程如下:
68
+
69
+ - 美学过滤:通过 [aesthetic-predictor-v2-5](https://github.com/discus0434/aesthetic-predictor-v2-5) 计算均匀采样的 4 帧视频的平均美学分数,从而筛选出内容不佳(模糊、昏暗等)的视频。
70
+ - 文本过滤:使用 [EasyOCR](https://github.com/JaidedAI/EasyOCR) 计算中间帧的文本区域比例,过滤掉含有大面积文本的视频。
71
+ - 运动过滤:计算帧间光流差,过滤掉移动太慢或太快的视频。
72
+
73
+ 视频过滤的完整流程在 [stage_2_video_filtering.sh](./scripts/stage_2_video_filtering.sh)。执行
74
+ ```shell
75
+ sh scripts/stage_2_video_filtering.sh
76
+ ```
77
+ 后,视频的美学得分、文本得分和运动得分对应的元文件保存在 `cogvideox/video_caption/datasets/panda_70m/videos_clips/`。
78
+
79
+ > [!NOTE]
80
+ > 美学得分的计算依赖于 [google/siglip-so400m-patch14-384 model](https://huggingface.co/google/siglip-so400m-patch14-384).
81
+ 请执行 `HF_ENDPOINT=https://hf-mirror.com sh scripts/stage_2_video_filtering.sh` 如果你无法访问 huggingface.com.
82
+
83
+ #### 视频描述
84
+ 在获得上述高质量的过滤视频后,CogVideoX-Fun 利用 [VILA1.5](https://github.com/NVlabs/VILA) 来生成视频描述。随后,使用 LLMs 对生成的视频描述进行重写,以更好地满足视频生成任务的要求。最后,使用自研的 VideoCLIPXL 模型来过滤掉描述和视频内容不一致的数据,从而得到最终的训练数据集。
85
+
86
+ 请根据机器的显存从 [VILA1.5](https://huggingface.co/collections/Efficient-Large-Model/vila-on-pre-training-for-visual-language-models-65d8022a3a52cd9bcd62698e) 下载合适大小的模型。对于 A100 40G,你可以执行下面的命令来下载 [VILA1.5-40b-AWQ](https://huggingface.co/Efficient-Large-Model/VILA1.5-40b-AWQ)
87
+ ```shell
88
+ # Add HF_ENDPOINT=https://hf-mirror.com before the command if you cannot access to huggingface.com
89
+ huggingface-cli download Efficient-Large-Model/VILA1.5-40b-AWQ --local-dir-use-symlinks False --local-dir /PATH/TO/VILA_MODEL
90
+ ```
91
+
92
+ 你可以选择性地准备 LLMs 来改写上述视频描述的结果。例如,你执行下面的命令来下载 [Meta-Llama-3-8B-Instruct](https://huggingface.co/NousResearch/Meta-Llama-3-8B-Instruct)
93
+ ```shell
94
+ # Add HF_ENDPOINT=https://hf-mirror.com before the command if you cannot access to huggingface.com
95
+ huggingface-cli download NousResearch/Meta-Llama-3-8B-Instruct --local-dir-use-symlinks False --local-dir /PATH/TO/REWRITE_MODEL
96
+ ```
97
+
98
+ 视频描述的完整流程在 [stage_3_video_recaptioning.sh](./scripts/stage_3_video_recaptioning.sh).
99
+ 执行
100
+ ```shell
101
+ VILA_MODEL_PATH=/PATH/TO/VILA_MODEL REWRITE_MODEL_PATH=/PATH/TO/REWRITE_MODEL sh scripts/stage_3_video_recaptioning.sh
102
+ ```
103
+ 后,最后的训练文件会保存在 `cogvideox/video_caption/datasets/panda_70m/videos_clips/meta_train_info.json`。
104
+
105
+ ### 提示词美化
106
+ 提示词美化旨在通过 LLMs 重写和美化用户上传的提示,将其映射为 CogVideoX-Fun 训练所使用的视频描述风格、
107
+ 使其更适合用作推理提示词,从而提高生成视频的质量。
108
+
109
+ 基于 [vLLM](https://github.com/vllm-project/vllm),我们支持使用本地 LLM 进行批量推理或请求 OpenAI 服务器的方式,以进行提示词美化。
110
+
111
+ #### 批量推理
112
+ 1. 将原始的提示词以下面的格式准备在文件 `cogvideox/video_caption/datasets/original_prompt.jsonl` 中:
113
+ ```json
114
+ {"prompt": "A stylish woman in a black leather jacket, red dress, and boots walks confidently down a damp Tokyo street."}
115
+ {"prompt": "An underwater world with realistic fish and other creatures of the sea."}
116
+ {"prompt": "a monarch butterfly perched on a tree trunk in the forest."}
117
+ {"prompt": "a child in a room with a bottle of wine and a lamp."}
118
+ {"prompt": "two men in suits walking down a hallway."}
119
+ ```
120
+
121
+ 2. 随后你可以通过执行以下的命令进行提示词美化
122
+ ```shell
123
+ # Meta-Llama-3-8B-Instruct is sufficient for this task.
124
+ # Download it from https://huggingface.co/NousResearch/Meta-Llama-3-8B-Instruct or https://www.modelscope.cn/models/LLM-Research/Meta-Llama-3-8B-Instruct to /path/to/your_llm
125
+
126
+ python caption_rewrite.py \
127
+ --video_metadata_path datasets/original_prompt.jsonl \
128
+ --caption_column "prompt" \
129
+ --batch_size 1 \
130
+ --model_name /path/to/your_llm \
131
+ --prompt prompt/beautiful_prompt.txt \
132
+ --prefix '"detailed description": ' \
133
+ --saved_path datasets/beautiful_prompt.jsonl \
134
+ --saved_freq 1
135
+ ```
136
+
137
+ #### OpenAI 服务器
138
+ + 你可以通过请求 OpenAI 服务器的方式来进行提示词美化
139
+ ```shell
140
+ OPENAI_API_KEY="your_openai_api_key" OPENAI_BASE_URL="your_openai_base_url" python beautiful_prompt.py \
141
+ --model "your_model_name" \
142
+ --prompt "your_prompt"
143
+ ```
144
+
145
+ + 你也可以执行以下命令,通过 vLLM 将本地 LLMs 部署成兼容 OpenAI 的服务器
146
+ ```shell
147
+ OPENAI_API_KEY="your_openai_api_key" OPENAI_BASE_URL="your_openai_base_url" python beautiful_prompt.py \
148
+ --model "your_model_name" \
149
+ --prompt "your_prompt"
150
+ ```
151
+
152
+ 然后再执行下面的命令来进行提示词美化
153
+ ```shell
154
+ python -m beautiful_prompt.py \
155
+ --model /path/to/your_llm \
156
+ --prompt "your_prompt" \
157
+ --base_url "http://localhost:8000/v1" \
158
+ --api_key "your_api_key"
159
+ ```
robomaster/video_caption/beautiful_prompt.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This script (optional) can rewrite and beautify the user-uploaded prompt via LLMs, mapping it to the style of cogvideox's training captions,
3
+ making it more suitable as the inference prompt and thus improving the quality of the generated videos.
4
+
5
+ Usage:
6
+ + You can request OpenAI compatible server to perform beautiful prompt by running
7
+ ```shell
8
+ export OPENAI_API_KEY="your_openai_api_key" OPENAI_BASE_URL="your_openai_base_url" python beautiful_prompt.py \
9
+ --model "your_model_name" \
10
+ --prompt "your_prompt"
11
+ ```
12
+ + You can also deploy the OpenAI Compatible Server locally using vLLM. For example:
13
+ ```shell
14
+ # Meta-Llama-3-8B-Instruct is sufficient for this task.
15
+ # Download it from https://huggingface.co/NousResearch/Meta-Llama-3-8B-Instruct or https://www.modelscope.cn/models/LLM-Research/Meta-Llama-3-8B-Instruct to /path/to/your_llm
16
+
17
+ # deploy the OpenAI compatible server
18
+ python -m vllm.entrypoints.openai.api_server serve /path/to/your_llm --dtype auto --api-key "your_api_key"
19
+ ```
20
+
21
+ Then you can perform beautiful prompt by running
22
+ ```shell
23
+ python -m beautiful_prompt.py \
24
+ --model /path/to/your_llm \
25
+ --prompt "your_prompt" \
26
+ --base_url "http://localhost:8000/v1" \
27
+ --api_key "your_api_key"
28
+ ```
29
+ """
30
+ import argparse
31
+ import os
32
+
33
+ from openai import OpenAI
34
+
35
+ from cogvideox.video_caption.caption_rewrite import extract_output
36
+
37
+
38
+ def parse_args():
39
+ parser = argparse.ArgumentParser(description="Beautiful prompt.")
40
+ parser.add_argument("--model", type=str, required=True, help="The OpenAI model or the path to your local LLM.")
41
+ parser.add_argument("--prompt", type=str, required=True, help="The user-uploaded prompt.")
42
+ parser.add_argument(
43
+ "--template",
44
+ type=str,
45
+ default="cogvideox/video_caption/prompt/beautiful_prompt.txt",
46
+ help="A string or a txt file contains the template for beautiful prompt."
47
+ )
48
+ parser.add_argument(
49
+ "--max_retry_nums",
50
+ type=int,
51
+ default=5,
52
+ help="Maximum number of retries to obtain an output that meets the JSON format."
53
+ )
54
+ parser.add_argument(
55
+ "--base_url",
56
+ type=str,
57
+ default=None,
58
+ help="OpenAI API server url. If it is None, the OPENAI_BASE_URL from the environment variables will be used.",
59
+ )
60
+ parser.add_argument(
61
+ "--api_key",
62
+ type=str,
63
+ default=None,
64
+ help="OpenAI API key. If it is None, the OPENAI_API_KEY from the environment variables will be used.",
65
+ )
66
+
67
+ args = parser.parse_args()
68
+ return args
69
+
70
+
71
+ def main():
72
+ args = parse_args()
73
+
74
+ client = OpenAI(
75
+ base_url=os.getenv("OPENAI_BASE_URL", args.base_url),
76
+ api_key=os.environ.get("OPENAI_API_KEY", args.api_key),
77
+ )
78
+ if args.template.endswith(".txt") and os.path.exists(args.template):
79
+ with open(args.template, "r") as f:
80
+ args.template = "".join(f.readlines())
81
+ # print(f"Beautiful prompt template: {args.template}")
82
+
83
+ for _ in range(args.max_retry_nums):
84
+ completion = client.chat.completions.create(
85
+ model=args.model,
86
+ messages=[
87
+ # {"role": "system", "content": "You are a helpful assistant."},
88
+ {"role": "user", "content": args.template + "\n" + str(args.prompt)}
89
+ ],
90
+ temperature=0.7,
91
+ top_p=1,
92
+ max_tokens=1024,
93
+ )
94
+
95
+ output = completion.choices[0].message.content
96
+ output = extract_output(output, prefix='"detailed description": ')
97
+ if output is not None:
98
+ break
99
+ print(f"Beautiful prompt: {output}")
100
+
101
+
102
+ if __name__ == "__main__":
103
+ main()
robomaster/video_caption/caption_rewrite.py ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import re
3
+ import os
4
+ from tqdm import tqdm
5
+
6
+ import pandas as pd
7
+ import torch
8
+ from natsort import index_natsorted
9
+ from vllm import LLM, SamplingParams
10
+ from transformers import AutoTokenizer
11
+
12
+ from utils.logger import logger
13
+
14
+
15
+ def extract_output(s, prefix='"rewritten description": '):
16
+ """Customize the function according to the prompt."""
17
+ # Since some LLMs struggles to output strictly formatted JSON strings as specified by the prompt,
18
+ # thus manually parse the output string `{"rewritten description": "your rewritten description here"}`.
19
+ match = re.search(r"{(.+?)}", s, re.DOTALL)
20
+ if not match:
21
+ logger.warning(f"{s} is not in the json format. Return None.")
22
+ return None
23
+ output = match.group(1).strip()
24
+ if output.startswith(prefix):
25
+ output = output[len(prefix) :]
26
+ if output[0] == '"' and output[-1] == '"':
27
+ return output[1:-1]
28
+ else:
29
+ logger.warning(f"{output} does not start and end with the double quote. Return None.")
30
+ return None
31
+ else:
32
+ logger.warning(f"{output} does not start with {prefix}. Return None.")
33
+ return None
34
+
35
+
36
+ def parse_args():
37
+ parser = argparse.ArgumentParser(description="Rewrite the video caption by LLMs.")
38
+ parser.add_argument(
39
+ "--video_metadata_path", type=str, required=True, help="The path to the video dataset metadata (csv/jsonl)."
40
+ )
41
+ parser.add_argument(
42
+ "--video_path_column",
43
+ type=str,
44
+ default=None,
45
+ help="The column contains the video path (an absolute path or a relative path w.r.t the video_folder).",
46
+ )
47
+ parser.add_argument(
48
+ "--caption_column",
49
+ type=str,
50
+ default="caption",
51
+ help="The column contains the video caption.",
52
+ )
53
+ parser.add_argument(
54
+ "--batch_size",
55
+ type=int,
56
+ default=128,
57
+ required=False,
58
+ help="The batch size for vllm inference. Adjust according to the number of GPUs to maximize inference throughput.",
59
+ )
60
+ parser.add_argument(
61
+ "--model_name",
62
+ type=str,
63
+ default="NousResearch/Meta-Llama-3-8B-Instruct",
64
+ )
65
+ parser.add_argument(
66
+ "--prompt",
67
+ type=str,
68
+ required=True,
69
+ help="A string or a txt file contains the prompt.",
70
+ )
71
+ parser.add_argument(
72
+ "--prefix",
73
+ type=str,
74
+ required=True,
75
+ help="The prefix to extract the output from LLMs.",
76
+ )
77
+ parser.add_argument("--saved_path", type=str, required=True, help="The save path to the output results (csv/jsonl).")
78
+ parser.add_argument("--saved_freq", type=int, default=1, help="The frequency to save the output results.")
79
+
80
+ args = parser.parse_args()
81
+ return args
82
+
83
+
84
+ def main():
85
+ args = parse_args()
86
+
87
+ if args.video_metadata_path.endswith(".csv"):
88
+ video_metadata_df = pd.read_csv(args.video_metadata_path)
89
+ elif args.video_metadata_path.endswith(".jsonl"):
90
+ video_metadata_df = pd.read_json(args.video_metadata_path, lines=True)
91
+ elif args.video_metadata_path.endswith(".json"):
92
+ video_metadata_df = pd.read_json(args.video_metadata_path)
93
+ else:
94
+ raise ValueError(f"The {args.video_metadata_path} must end with .csv, .jsonl or .json.")
95
+
96
+ saved_suffix = os.path.splitext(args.saved_path)[1]
97
+ if saved_suffix not in set([".csv", ".jsonl", ".json"]):
98
+ raise ValueError(f"The saved_path must end with .csv, .jsonl or .json.")
99
+
100
+ if os.path.exists(args.saved_path) and args.video_path_column is not None:
101
+ if args.saved_path.endswith(".csv"):
102
+ saved_metadata_df = pd.read_csv(args.saved_path)
103
+ elif args.saved_path.endswith(".jsonl"):
104
+ saved_metadata_df = pd.read_json(args.saved_path, lines=True)
105
+
106
+ # Filter out the unprocessed video-caption pairs by setting the indicator=True.
107
+ merged_df = video_metadata_df.merge(saved_metadata_df, on=args.video_path_column, how="outer", indicator=True)
108
+ video_metadata_df = merged_df[merged_df["_merge"] == "left_only"]
109
+ # Sorting to guarantee the same result for each process.
110
+ video_metadata_df = video_metadata_df.iloc[index_natsorted(video_metadata_df[args.video_path_column])].reset_index(
111
+ drop=True
112
+ )
113
+ logger.info(
114
+ f"Resume from {args.saved_path}: {len(saved_metadata_df)} processed and {len(video_metadata_df)} to be processed."
115
+ )
116
+
117
+ if args.prompt.endswith(".txt") and os.path.exists(args.prompt):
118
+ with open(args.prompt, "r") as f:
119
+ args.prompt = "".join(f.readlines())
120
+ logger.info(f"Prompt: {args.prompt}")
121
+
122
+ if args.video_path_column is not None:
123
+ video_path_list = video_metadata_df[args.video_path_column].tolist()
124
+ if args.caption_column in video_metadata_df.columns:
125
+ sampled_frame_caption_list = video_metadata_df[args.caption_column].tolist()
126
+ else:
127
+ # When two columns with the same name, the dataframe merge operation on will distinguish them by adding 'x' and 'y'.
128
+ sampled_frame_caption_list = video_metadata_df[args.caption_column + "_x"].tolist()
129
+
130
+ CUDA_VISIBLE_DEVICES = os.getenv("CUDA_VISIBLE_DEVICES", None)
131
+ tensor_parallel_size = torch.cuda.device_count() if CUDA_VISIBLE_DEVICES is None else len(CUDA_VISIBLE_DEVICES.split(","))
132
+ logger.info(f"Automatically set tensor_parallel_size={tensor_parallel_size} based on the available devices.")
133
+
134
+ llm = LLM(model=args.model_name, trust_remote_code=True, tensor_parallel_size=tensor_parallel_size)
135
+ if "Meta-Llama-3" in args.model_name:
136
+ if "Meta-Llama-3-70B" in args.model_name:
137
+ # Llama-3-70B should use the tokenizer from Llama-3-8B
138
+ # https://github.com/vllm-project/vllm/issues/4180#issuecomment-2068292942
139
+ tokenizer = AutoTokenizer.from_pretrained("NousResearch/Meta-Llama-3-8B-Instruct")
140
+ else:
141
+ tokenizer = AutoTokenizer.from_pretrained(args.model_name)
142
+ stop_token_ids = [tokenizer.eos_token_id, tokenizer.convert_tokens_to_ids("<|eot_id|>")]
143
+ sampling_params = SamplingParams(temperature=0.7, top_p=1, max_tokens=1024, stop_token_ids=stop_token_ids)
144
+ else:
145
+ tokenizer = AutoTokenizer.from_pretrained(args.model_name)
146
+ sampling_params = SamplingParams(temperature=0.7, top_p=1, max_tokens=1024)
147
+
148
+ result_dict = {args.caption_column: []}
149
+ if args.video_path_column is not None:
150
+ result_dict = {args.video_path_column: [], args.caption_column: []}
151
+
152
+ for i in tqdm(range(0, len(sampled_frame_caption_list), args.batch_size)):
153
+ if args.video_path_column is not None:
154
+ batch_video_path = video_path_list[i : i + args.batch_size]
155
+ batch_caption = sampled_frame_caption_list[i : i + args.batch_size]
156
+ batch_prompt = []
157
+ for caption in batch_caption:
158
+ # batch_prompt.append("user:" + args.prompt + str(caption) + "\n assistant:")
159
+ messages = [
160
+ {"role": "system", "content": "You are a helpful assistant."},
161
+ {"role": "user", "content": args.prompt + "\n" + str(caption)},
162
+ ]
163
+ text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
164
+ batch_prompt.append(text)
165
+
166
+ batch_output = llm.generate(batch_prompt, sampling_params)
167
+ batch_output = [output.outputs[0].text.rstrip() for output in batch_output]
168
+ batch_output = [extract_output(output, prefix=args.prefix) for output in batch_output]
169
+
170
+ # Filter out data that does not meet the output format.
171
+ batch_result = []
172
+ if args.video_path_column is not None:
173
+ for video_path, output in zip(batch_video_path, batch_output):
174
+ if output is not None:
175
+ batch_result.append((video_path, output))
176
+ batch_video_path, batch_output = zip(*batch_result)
177
+
178
+ result_dict[args.video_path_column].extend(batch_video_path)
179
+ else:
180
+ for output in batch_output:
181
+ if output is not None:
182
+ batch_result.append(output)
183
+
184
+ result_dict[args.caption_column].extend(batch_result)
185
+
186
+ # Save the metadata every args.saved_freq.
187
+ if i != 0 and ((i // args.batch_size) % args.saved_freq) == 0:
188
+ if len(result_dict[args.caption_column]) > 0:
189
+ result_df = pd.DataFrame(result_dict)
190
+ if args.saved_path.endswith(".csv"):
191
+ header = True if not os.path.exists(args.saved_path) else False
192
+ result_df.to_csv(args.saved_path, header=header, index=False, mode="a")
193
+ elif args.saved_path.endswith(".jsonl"):
194
+ result_df.to_json(args.saved_path, orient="records", lines=True, mode="a", force_ascii=False)
195
+ elif args.saved_path.endswith(".json"):
196
+ # Append is not supported.
197
+ if os.path.exists(args.saved_path):
198
+ saved_df = pd.read_json(args.saved_path, orient="records")
199
+ result_df = pd.concat([saved_df, result_df], ignore_index=True)
200
+ result_df.to_json(args.saved_path, orient="records", indent=4, force_ascii=False)
201
+ logger.info(f"Save result to {args.saved_path}.")
202
+
203
+ result_dict = {args.caption_column: []}
204
+ if args.video_path_column is not None:
205
+ result_dict = {args.video_path_column: [], args.caption_column: []}
206
+
207
+ if len(result_dict[args.caption_column]) > 0:
208
+ result_df = pd.DataFrame(result_dict)
209
+ if args.saved_path.endswith(".csv"):
210
+ header = True if not os.path.exists(args.saved_path) else False
211
+ result_df.to_csv(args.saved_path, header=header, index=False, mode="a")
212
+ elif args.saved_path.endswith(".jsonl"):
213
+ result_df.to_json(args.saved_path, orient="records", lines=True, mode="a")
214
+ elif args.saved_path.endswith(".json"):
215
+ # Append is not supported.
216
+ if os.path.exists(args.saved_path):
217
+ saved_df = pd.read_json(args.saved_path, orient="records")
218
+ result_df = pd.concat([saved_df, result_df], ignore_index=True)
219
+ result_df.to_json(args.saved_path, orient="records", indent=4, force_ascii=False)
220
+ logger.info(f"Save the final result to {args.saved_path}.")
221
+
222
+
223
+ if __name__ == "__main__":
224
+ main()
robomaster/video_caption/compute_motion_score.py ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ast
2
+ import argparse
3
+ import gc
4
+ import os
5
+ from contextlib import contextmanager
6
+ from pathlib import Path
7
+
8
+ import cv2
9
+ import numpy as np
10
+ import pandas as pd
11
+ from joblib import Parallel, delayed
12
+ from natsort import natsorted
13
+ from tqdm import tqdm
14
+
15
+ from utils.logger import logger
16
+ from utils.filter import filter
17
+
18
+
19
+ @contextmanager
20
+ def VideoCapture(video_path):
21
+ cap = cv2.VideoCapture(video_path)
22
+ try:
23
+ yield cap
24
+ finally:
25
+ cap.release()
26
+ del cap
27
+ gc.collect()
28
+
29
+
30
+ def compute_motion_score(video_path):
31
+ video_motion_scores = []
32
+ sampling_fps = 2
33
+
34
+ try:
35
+ with VideoCapture(video_path) as cap:
36
+ fps = cap.get(cv2.CAP_PROP_FPS)
37
+ valid_fps = min(max(sampling_fps, 1), fps)
38
+ frame_interval = int(fps / valid_fps)
39
+ total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
40
+
41
+ # if cannot get the second frame, use the last one
42
+ frame_interval = min(frame_interval, total_frames - 1)
43
+
44
+ prev_frame = None
45
+ frame_count = -1
46
+ while cap.isOpened():
47
+ ret, frame = cap.read()
48
+ frame_count += 1
49
+
50
+ if not ret:
51
+ break
52
+
53
+ # skip middle frames
54
+ if frame_count % frame_interval != 0:
55
+ continue
56
+
57
+ gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
58
+ if prev_frame is None:
59
+ prev_frame = gray_frame
60
+ continue
61
+
62
+ flow = cv2.calcOpticalFlowFarneback(
63
+ prev_frame,
64
+ gray_frame,
65
+ None,
66
+ pyr_scale=0.5,
67
+ levels=3,
68
+ winsize=15,
69
+ iterations=3,
70
+ poly_n=5,
71
+ poly_sigma=1.2,
72
+ flags=0,
73
+ )
74
+ mag, _ = cv2.cartToPolar(flow[..., 0], flow[..., 1])
75
+ frame_motion_score = np.mean(mag)
76
+ video_motion_scores.append(frame_motion_score)
77
+ prev_frame = gray_frame
78
+
79
+ video_meta_info = {
80
+ "video_path": Path(video_path).name,
81
+ "motion_score": round(float(np.mean(video_motion_scores)), 5),
82
+ }
83
+ return video_meta_info
84
+
85
+ except Exception as e:
86
+ print(f"Compute motion score for video {video_path} with error: {e}.")
87
+
88
+
89
+ def parse_args():
90
+ parser = argparse.ArgumentParser(description="Compute the motion score of the videos.")
91
+ parser.add_argument("--video_folder", type=str, default="", help="The video folder.")
92
+ parser.add_argument(
93
+ "--video_metadata_path", type=str, default=None, help="The path to the video dataset metadata (csv/jsonl)."
94
+ )
95
+ parser.add_argument(
96
+ "--video_path_column",
97
+ type=str,
98
+ default="video_path",
99
+ help="The column contains the video path (an absolute path or a relative path w.r.t the video_folder).",
100
+ )
101
+ parser.add_argument("--saved_path", type=str, required=True, help="The save path to the output results (csv/jsonl).")
102
+ parser.add_argument("--saved_freq", type=int, default=100, help="The frequency to save the output results.")
103
+ parser.add_argument("--n_jobs", type=int, default=1, help="The number of concurrent processes.")
104
+
105
+ parser.add_argument(
106
+ "--basic_metadata_path", type=str, default=None, help="The path to the basic metadata (csv/jsonl)."
107
+ )
108
+ parser.add_argument("--min_resolution", type=float, default=0, help="The resolution threshold.")
109
+ parser.add_argument("--min_duration", type=float, default=-1, help="The minimum duration.")
110
+ parser.add_argument("--max_duration", type=float, default=-1, help="The maximum duration.")
111
+ parser.add_argument(
112
+ "--asethetic_score_metadata_path", type=str, default=None, help="The path to the video quality metadata (csv/jsonl)."
113
+ )
114
+ parser.add_argument("--min_asethetic_score", type=float, default=4.0, help="The asethetic score threshold.")
115
+ parser.add_argument(
116
+ "--asethetic_score_siglip_metadata_path", type=str, default=None, help="The path to the video quality metadata (csv/jsonl)."
117
+ )
118
+ parser.add_argument("--min_asethetic_score_siglip", type=float, default=4.0, help="The asethetic score (SigLIP) threshold.")
119
+ parser.add_argument(
120
+ "--text_score_metadata_path", type=str, default=None, help="The path to the video text score metadata (csv/jsonl)."
121
+ )
122
+ parser.add_argument("--min_text_score", type=float, default=0.02, help="The text threshold.")
123
+
124
+ args = parser.parse_args()
125
+ return args
126
+
127
+
128
+ def main():
129
+ args = parse_args()
130
+
131
+ if args.video_metadata_path.endswith(".csv"):
132
+ video_metadata_df = pd.read_csv(args.video_metadata_path)
133
+ elif args.video_metadata_path.endswith(".jsonl"):
134
+ video_metadata_df = pd.read_json(args.video_metadata_path, lines=True)
135
+ else:
136
+ raise ValueError("The video_metadata_path must end with .csv or .jsonl.")
137
+ video_path_list = video_metadata_df[args.video_path_column].tolist()
138
+
139
+ if not (args.saved_path.endswith(".csv") or args.saved_path.endswith(".jsonl")):
140
+ raise ValueError("The saved_path must end with .csv or .jsonl.")
141
+
142
+ if os.path.exists(args.saved_path):
143
+ if args.saved_path.endswith(".csv"):
144
+ saved_metadata_df = pd.read_csv(args.saved_path)
145
+ elif args.saved_path.endswith(".jsonl"):
146
+ saved_metadata_df = pd.read_json(args.saved_path, lines=True)
147
+ saved_video_path_list = saved_metadata_df[args.video_path_column].tolist()
148
+ video_path_list = list(set(video_path_list).difference(set(saved_video_path_list)))
149
+ logger.info(f"Resume from {args.saved_path}: {len(saved_video_path_list)} processed and {len(video_path_list)} to be processed.")
150
+
151
+ video_path_list = filter(
152
+ video_path_list,
153
+ basic_metadata_path=args.basic_metadata_path,
154
+ min_resolution=args.min_resolution,
155
+ min_duration=args.min_duration,
156
+ max_duration=args.max_duration,
157
+ asethetic_score_metadata_path=args.asethetic_score_metadata_path,
158
+ min_asethetic_score=args.min_asethetic_score,
159
+ asethetic_score_siglip_metadata_path=args.asethetic_score_siglip_metadata_path,
160
+ min_asethetic_score_siglip=args.min_asethetic_score_siglip,
161
+ text_score_metadata_path=args.text_score_metadata_path,
162
+ min_text_score=args.min_text_score,
163
+ )
164
+ video_path_list = [os.path.join(args.video_folder, video_path) for video_path in video_path_list]
165
+ # Sorting to guarantee the same result for each process.
166
+ video_path_list = natsorted(video_path_list)
167
+
168
+ for i in tqdm(range(0, len(video_path_list), args.saved_freq)):
169
+ result_list = Parallel(n_jobs=args.n_jobs)(
170
+ delayed(compute_motion_score)(video_path) for video_path in tqdm(video_path_list[i: i + args.saved_freq])
171
+ )
172
+ result_list = [result for result in result_list if result is not None]
173
+ if len(result_list) == 0:
174
+ continue
175
+
176
+ result_df = pd.DataFrame(result_list)
177
+ if args.saved_path.endswith(".csv"):
178
+ header = False if os.path.exists(args.saved_path) else True
179
+ result_df.to_csv(args.saved_path, header=header, index=False, mode="a")
180
+ elif args.saved_path.endswith(".jsonl"):
181
+ result_df.to_json(args.saved_path, orient="records", lines=True, mode="a", force_ascii=False)
182
+ logger.info(f"Save result to {args.saved_path}.")
183
+
184
+
185
+ if __name__ == "__main__":
186
+ main()
robomaster/video_caption/compute_text_score.py ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ from pathlib import Path
4
+
5
+ import easyocr
6
+ import numpy as np
7
+ import pandas as pd
8
+ from accelerate import PartialState
9
+ from accelerate.utils import gather_object
10
+ from natsort import natsorted
11
+ from tqdm import tqdm
12
+ from torchvision.datasets.utils import download_url
13
+
14
+ from utils.logger import logger
15
+ from utils.video_utils import extract_frames
16
+ from utils.filter import filter
17
+
18
+
19
+ def init_ocr_reader(root: str = "~/.cache/easyocr", device: str = "gpu"):
20
+ root = os.path.expanduser(root)
21
+ if not os.path.exists(root):
22
+ os.makedirs(root)
23
+ download_url(
24
+ "https://pai-aigc-photog.oss-cn-hangzhou.aliyuncs.com/easyanimate/video_caption/easyocr/craft_mlt_25k.pth",
25
+ root,
26
+ filename="craft_mlt_25k.pth",
27
+ md5="2f8227d2def4037cdb3b34389dcf9ec1",
28
+ )
29
+ ocr_reader = easyocr.Reader(
30
+ lang_list=["en", "ch_sim"],
31
+ gpu=device,
32
+ recognizer=False,
33
+ verbose=False,
34
+ model_storage_directory=root,
35
+ )
36
+
37
+ return ocr_reader
38
+
39
+
40
+ def triangle_area(p1, p2, p3):
41
+ """Compute the triangle area according to its coordinates.
42
+ """
43
+ x1, y1 = p1
44
+ x2, y2 = p2
45
+ x3, y3 = p3
46
+ tri_area = 0.5 * np.abs(x1 * y2 + x2 * y3 + x3 * y1 - x2 * y1 - x3 * y2 - x1 * y3)
47
+ return tri_area
48
+
49
+
50
+ def compute_text_score(video_path, ocr_reader):
51
+ _, images = extract_frames(video_path, sample_method="mid")
52
+ images = [np.array(image) for image in images]
53
+
54
+ frame_ocr_area_ratios = []
55
+ for image in images:
56
+ # horizontal detected results and free-form detected
57
+ horizontal_list, free_list = ocr_reader.detect(np.asarray(image))
58
+ width, height = image.shape[0], image.shape[1]
59
+
60
+ total_area = width * height
61
+ # rectangles
62
+ rect_area = 0
63
+ for xmin, xmax, ymin, ymax in horizontal_list[0]:
64
+ if xmax < xmin or ymax < ymin:
65
+ continue
66
+ rect_area += (xmax - xmin) * (ymax - ymin)
67
+ # free-form
68
+ quad_area = 0
69
+ try:
70
+ for points in free_list[0]:
71
+ triangle1 = points[:3]
72
+ quad_area += triangle_area(*triangle1)
73
+ triangle2 = points[3:] + [points[0]]
74
+ quad_area += triangle_area(*triangle2)
75
+ except:
76
+ quad_area = 0
77
+ text_area = rect_area + quad_area
78
+
79
+ frame_ocr_area_ratios.append(text_area / total_area)
80
+
81
+ video_meta_info = {
82
+ "video_path": Path(video_path).name,
83
+ "text_score": round(np.mean(frame_ocr_area_ratios), 5),
84
+ }
85
+
86
+ return video_meta_info
87
+
88
+
89
+ def parse_args():
90
+ parser = argparse.ArgumentParser(description="Compute the text score of the middle frame in the videos.")
91
+ parser.add_argument("--video_folder", type=str, default="", help="The video folder.")
92
+ parser.add_argument(
93
+ "--video_metadata_path", type=str, default=None, help="The path to the video dataset metadata (csv/jsonl)."
94
+ )
95
+ parser.add_argument(
96
+ "--video_path_column",
97
+ type=str,
98
+ default="video_path",
99
+ help="The column contains the video path (an absolute path or a relative path w.r.t the video_folder).",
100
+ )
101
+ parser.add_argument("--saved_path", type=str, required=True, help="The save path to the output results (csv/jsonl).")
102
+ parser.add_argument("--saved_freq", type=int, default=100, help="The frequency to save the output results.")
103
+
104
+ parser.add_argument(
105
+ "--basic_metadata_path", type=str, default=None, help="The path to the basic metadata (csv/jsonl)."
106
+ )
107
+ parser.add_argument("--min_resolution", type=float, default=0, help="The resolution threshold.")
108
+ parser.add_argument("--min_duration", type=float, default=-1, help="The minimum duration.")
109
+ parser.add_argument("--max_duration", type=float, default=-1, help="The maximum duration.")
110
+ parser.add_argument(
111
+ "--asethetic_score_metadata_path", type=str, default=None, help="The path to the video quality metadata (csv/jsonl)."
112
+ )
113
+ parser.add_argument("--min_asethetic_score", type=float, default=4.0, help="The asethetic score threshold.")
114
+ parser.add_argument(
115
+ "--asethetic_score_siglip_metadata_path", type=str, default=None, help="The path to the video quality metadata (csv/jsonl)."
116
+ )
117
+ parser.add_argument("--min_asethetic_score_siglip", type=float, default=4.0, help="The asethetic score (SigLIP) threshold.")
118
+ parser.add_argument(
119
+ "--motion_score_metadata_path", type=str, default=None, help="The path to the video motion score metadata (csv/jsonl)."
120
+ )
121
+ parser.add_argument("--min_motion_score", type=float, default=2, help="The motion threshold.")
122
+
123
+ args = parser.parse_args()
124
+ return args
125
+
126
+
127
+ def main():
128
+ args = parse_args()
129
+
130
+ if args.video_metadata_path.endswith(".csv"):
131
+ video_metadata_df = pd.read_csv(args.video_metadata_path)
132
+ elif args.video_metadata_path.endswith(".jsonl"):
133
+ video_metadata_df = pd.read_json(args.video_metadata_path, lines=True)
134
+ else:
135
+ raise ValueError("The video_metadata_path must end with .csv or .jsonl.")
136
+ video_path_list = video_metadata_df[args.video_path_column].tolist()
137
+
138
+ if not (args.saved_path.endswith(".csv") or args.saved_path.endswith(".jsonl")):
139
+ raise ValueError("The saved_path must end with .csv or .jsonl.")
140
+
141
+ if os.path.exists(args.saved_path):
142
+ if args.saved_path.endswith(".csv"):
143
+ saved_metadata_df = pd.read_csv(args.saved_path)
144
+ elif args.saved_path.endswith(".jsonl"):
145
+ saved_metadata_df = pd.read_json(args.saved_path, lines=True)
146
+ saved_video_path_list = saved_metadata_df[args.video_path_column].tolist()
147
+ video_path_list = list(set(video_path_list).difference(set(saved_video_path_list)))
148
+ logger.info(f"Resume from {args.saved_path}: {len(saved_video_path_list)} processed and {len(video_path_list)} to be processed.")
149
+
150
+ video_path_list = filter(
151
+ video_path_list,
152
+ basic_metadata_path=args.basic_metadata_path,
153
+ min_resolution=args.min_resolution,
154
+ min_duration=args.min_duration,
155
+ max_duration=args.max_duration,
156
+ asethetic_score_metadata_path=args.asethetic_score_metadata_path,
157
+ min_asethetic_score=args.min_asethetic_score,
158
+ asethetic_score_siglip_metadata_path=args.asethetic_score_siglip_metadata_path,
159
+ min_asethetic_score_siglip=args.min_asethetic_score_siglip,
160
+ motion_score_metadata_path=args.motion_score_metadata_path,
161
+ min_motion_score=args.min_motion_score,
162
+ )
163
+ video_path_list = [os.path.join(args.video_folder, video_path) for video_path in video_path_list]
164
+ # Sorting to guarantee the same result for each process.
165
+ video_path_list = natsorted(video_path_list)
166
+
167
+ state = PartialState()
168
+ if state.is_main_process:
169
+ # Check if the model is downloaded in the main process.
170
+ ocr_reader = init_ocr_reader(device="cpu")
171
+ state.wait_for_everyone()
172
+ ocr_reader = init_ocr_reader(device=state.device)
173
+
174
+ index = len(video_path_list) - len(video_path_list) % state.num_processes
175
+ # Avoid the NCCL timeout in the final gather operation.
176
+ logger.info(f"Drop {len(video_path_list) % state.num_processes} videos to ensure each process handles the same number of videos.")
177
+ video_path_list = video_path_list[:index]
178
+ logger.info(f"{len(video_path_list)} videos are to be processed.")
179
+
180
+ result_list = []
181
+ with state.split_between_processes(video_path_list) as splitted_video_path_list:
182
+ for i, video_path in enumerate(tqdm(splitted_video_path_list)):
183
+ try:
184
+ video_meta_info = compute_text_score(video_path, ocr_reader)
185
+ result_list.append(video_meta_info)
186
+ except Exception as e:
187
+ logger.warning(f"Compute text score for video {video_path} with error: {e}.")
188
+ if i != 0 and i % args.saved_freq == 0:
189
+ state.wait_for_everyone()
190
+ gathered_result_list = gather_object(result_list)
191
+ if state.is_main_process and len(gathered_result_list) != 0:
192
+ result_df = pd.DataFrame(gathered_result_list)
193
+ if args.saved_path.endswith(".csv"):
194
+ header = False if os.path.exists(args.saved_path) else True
195
+ result_df.to_csv(args.saved_path, header=header, index=False, mode="a")
196
+ elif args.saved_path.endswith(".jsonl"):
197
+ result_df.to_json(args.saved_path, orient="records", lines=True, mode="a", force_ascii=False)
198
+ logger.info(f"Save result to {args.saved_path}.")
199
+ result_list = []
200
+
201
+ state.wait_for_everyone()
202
+ gathered_result_list = gather_object(result_list)
203
+ if state.is_main_process and len(gathered_result_list) != 0:
204
+ result_df = pd.DataFrame(gathered_result_list)
205
+ if args.saved_path.endswith(".csv"):
206
+ header = False if os.path.exists(args.saved_path) else True
207
+ result_df.to_csv(args.saved_path, header=header, index=False, mode="a")
208
+ elif args.saved_path.endswith(".jsonl"):
209
+ result_df.to_json(args.saved_path, orient="records", lines=True, mode="a", force_ascii=False)
210
+ logger.info(f"Save the final result to {args.saved_path}.")
211
+
212
+
213
+ if __name__ == "__main__":
214
+ main()
robomaster/video_caption/compute_video_quality.py ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+
4
+ import pandas as pd
5
+ from accelerate import PartialState
6
+ from accelerate.utils import gather_object
7
+ from natsort import index_natsorted
8
+ from tqdm import tqdm
9
+ from torch.utils.data import DataLoader
10
+
11
+ import utils.image_evaluator as image_evaluator
12
+ import utils.video_evaluator as video_evaluator
13
+ from utils.logger import logger
14
+ from utils.video_dataset import VideoDataset, collate_fn
15
+
16
+
17
+ def parse_args():
18
+ parser = argparse.ArgumentParser(description="Compute scores of uniform sampled frames from videos.")
19
+ parser.add_argument(
20
+ "--video_metadata_path", type=str, default=None, help="The path to the video dataset metadata (csv/jsonl)."
21
+ )
22
+ parser.add_argument(
23
+ "--video_path_column",
24
+ type=str,
25
+ default="video_path",
26
+ help="The column contains the video path (an absolute path or a relative path w.r.t the video_folder).",
27
+ )
28
+ parser.add_argument("--video_folder", type=str, default="", help="The video folder.")
29
+ parser.add_argument(
30
+ "--caption_column",
31
+ type=str,
32
+ default=None,
33
+ help="The column contains the caption.",
34
+ )
35
+ parser.add_argument(
36
+ "--frame_sample_method",
37
+ type=str,
38
+ choices=["mid", "uniform", "image"],
39
+ default="uniform",
40
+ )
41
+ parser.add_argument(
42
+ "--num_sampled_frames",
43
+ type=int,
44
+ default=8,
45
+ help="num_sampled_frames",
46
+ )
47
+ parser.add_argument("--metrics", nargs="+", type=str, required=True, help="The evaluation metric(s) for generated images.")
48
+ parser.add_argument(
49
+ "--batch_size",
50
+ type=int,
51
+ default=10,
52
+ required=False,
53
+ help="The batch size for the video dataset.",
54
+ )
55
+ parser.add_argument(
56
+ "--num_workers",
57
+ type=int,
58
+ default=4,
59
+ required=False,
60
+ help="The number of workers for the video dataset.",
61
+ )
62
+ parser.add_argument("--saved_path", type=str, required=True, help="The save path to the output results (csv/jsonl).")
63
+ parser.add_argument("--saved_freq", type=int, default=1000, help="The frequency to save the output results.")
64
+
65
+ args = parser.parse_args()
66
+ return args
67
+
68
+
69
+ def main():
70
+ args = parse_args()
71
+
72
+ if args.video_metadata_path.endswith(".csv"):
73
+ video_metadata_df = pd.read_csv(args.video_metadata_path)
74
+ elif args.video_metadata_path.endswith(".jsonl"):
75
+ video_metadata_df = pd.read_json(args.video_metadata_path, lines=True)
76
+ else:
77
+ raise ValueError("The video_metadata_path must end with .csv or .jsonl.")
78
+
79
+ if not (args.saved_path.endswith(".csv") or args.saved_path.endswith(".jsonl")):
80
+ raise ValueError("The saved_path must end with .csv or .jsonl.")
81
+
82
+ if os.path.exists(args.saved_path):
83
+ if args.saved_path.endswith(".csv"):
84
+ saved_metadata_df = pd.read_csv(args.saved_path)
85
+ elif args.saved_path.endswith(".jsonl"):
86
+ saved_metadata_df = pd.read_json(args.saved_path, lines=True)
87
+
88
+ # Filter out the unprocessed video-caption pairs by setting the indicator=True.
89
+ merged_df = video_metadata_df.merge(saved_metadata_df, on="video_path", how="outer", indicator=True)
90
+ video_metadata_df = merged_df[merged_df["_merge"] == "left_only"]
91
+ # Sorting to guarantee the same result for each process.
92
+ video_metadata_df = video_metadata_df.iloc[index_natsorted(video_metadata_df["video_path"])].reset_index(drop=True)
93
+ if args.caption_column is None:
94
+ video_metadata_df = video_metadata_df[[args.video_path_column]]
95
+ else:
96
+ video_metadata_df = video_metadata_df[[args.video_path_column, args.caption_column + "_x"]]
97
+ video_metadata_df.rename(columns={args.caption_column + "_x": args.caption_column}, inplace=True)
98
+ logger.info(f"Resume from {args.saved_path}: {len(saved_metadata_df)} processed and {len(video_metadata_df)} to be processed.")
99
+
100
+ state = PartialState()
101
+ metric_fns = []
102
+ for metric in args.metrics:
103
+ if hasattr(image_evaluator, metric): # frame-wise
104
+ if state.is_main_process:
105
+ logger.info("Initializing frame-wise evaluator metrics...")
106
+ # Check if the model is downloaded in the main process.
107
+ getattr(image_evaluator, metric)(device="cpu")
108
+ state.wait_for_everyone()
109
+ metric_fns.append(getattr(image_evaluator, metric)(device=state.device))
110
+ else: # video-wise
111
+ if state.is_main_process:
112
+ logger.info("Initializing video-wise evaluator metrics...")
113
+ # Check if the model is downloaded in the main process.
114
+ getattr(video_evaluator, metric)(device="cpu")
115
+ state.wait_for_everyone()
116
+ metric_fns.append(getattr(video_evaluator, metric)(device=state.device))
117
+
118
+ result_dict = {args.video_path_column: [], "sample_frame_idx": []}
119
+ for metric in metric_fns:
120
+ result_dict[str(metric)] = []
121
+ if args.caption_column is not None:
122
+ result_dict[args.caption_column] = []
123
+
124
+ if args.frame_sample_method == "image":
125
+ logger.warning("Set args.num_sampled_frames to 1 since args.frame_sample_method is image.")
126
+ args.num_sampled_frames = 1
127
+
128
+ index = len(video_metadata_df) - len(video_metadata_df) % state.num_processes
129
+ # Avoid the NCCL timeout in the final gather operation.
130
+ logger.info(f"Drop {len(video_metadata_df) % state.num_processes} videos to ensure each process handles the same number of videos.")
131
+ video_metadata_df = video_metadata_df.iloc[:index]
132
+ logger.info(f"{len(video_metadata_df)} videos are to be processed.")
133
+
134
+ video_metadata_list = video_metadata_df.to_dict(orient='list')
135
+ with state.split_between_processes(video_metadata_list) as splitted_video_metadata:
136
+ video_dataset = VideoDataset(
137
+ dataset_inputs=splitted_video_metadata,
138
+ video_folder=args.video_folder,
139
+ text_column=args.caption_column,
140
+ sample_method=args.frame_sample_method,
141
+ num_sampled_frames=args.num_sampled_frames
142
+ )
143
+ video_loader = DataLoader(video_dataset, batch_size=args.batch_size, num_workers=args.num_workers, collate_fn=collate_fn)
144
+
145
+ for idx, batch in enumerate(tqdm(video_loader)):
146
+ if len(batch) > 0:
147
+ batch_video_path = batch["path"]
148
+ result_dict["sample_frame_idx"].extend(batch["sampled_frame_idx"])
149
+ batch_frame = batch["sampled_frame"] # [batch_size, num_sampled_frames, H, W, C]
150
+ batch_caption = None
151
+ if args.caption_column is not None:
152
+ batch_caption = batch["text"]
153
+ result_dict["caption"].extend(batch_caption)
154
+ # Compute the quality.
155
+ for i, metric in enumerate(args.metrics):
156
+ quality_scores = metric_fns[i](batch_frame, batch_caption)
157
+ if isinstance(quality_scores[0], list): # frame-wise
158
+ quality_scores = [
159
+ [round(score, 5) for score in inner_list]
160
+ for inner_list in quality_scores
161
+ ]
162
+ else: # video-wise
163
+ quality_scores = [round(score, 5) for score in quality_scores]
164
+ result_dict[str(metric_fns[i])].extend(quality_scores)
165
+
166
+ if args.video_folder == "":
167
+ saved_video_path_list = batch_video_path
168
+ else:
169
+ saved_video_path_list = [os.path.relpath(video_path, args.video_folder) for video_path in batch_video_path]
170
+ result_dict[args.video_path_column].extend(saved_video_path_list)
171
+
172
+ # Save the metadata in the main process every saved_freq.
173
+ if (idx != 0) and (idx % args.saved_freq == 0):
174
+ state.wait_for_everyone()
175
+ gathered_result_dict = {k: gather_object(v) for k, v in result_dict.items()}
176
+ if state.is_main_process and len(gathered_result_dict[args.video_path_column]) != 0:
177
+ result_df = pd.DataFrame(gathered_result_dict)
178
+ if args.saved_path.endswith(".csv"):
179
+ header = False if os.path.exists(args.saved_path) else True
180
+ result_df.to_csv(args.saved_path, header=header, index=False, mode="a")
181
+ elif args.saved_path.endswith(".jsonl"):
182
+ result_df.to_json(args.saved_path, orient="records", lines=True, mode="a", force_ascii=False)
183
+ logger.info(f"Save result to {args.saved_path}.")
184
+ for k in result_dict.keys():
185
+ result_dict[k] = []
186
+
187
+ # Wait for all processes to finish and gather the final result.
188
+ state.wait_for_everyone()
189
+ gathered_result_dict = {k: gather_object(v) for k, v in result_dict.items()}
190
+ # Save the metadata in the main process.
191
+ if state.is_main_process and len(gathered_result_dict[args.video_path_column]) != 0:
192
+ result_df = pd.DataFrame(gathered_result_dict)
193
+ if args.saved_path.endswith(".csv"):
194
+ header = False if os.path.exists(args.saved_path) else True
195
+ result_df.to_csv(args.saved_path, header=header, index=False, mode="a")
196
+ elif args.saved_path.endswith(".jsonl"):
197
+ result_df.to_json(args.saved_path, orient="records", lines=True, mode="a", force_ascii=False)
198
+ logger.info(f"Save the final result to {args.saved_path}.")
199
+
200
+ if __name__ == "__main__":
201
+ main()
robomaster/video_caption/cutscene_detect.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ from copy import deepcopy
4
+ from pathlib import Path
5
+ from multiprocessing import Pool
6
+
7
+ import pandas as pd
8
+ from scenedetect import open_video, SceneManager
9
+ from scenedetect.detectors import ContentDetector
10
+ from tqdm import tqdm
11
+
12
+ from utils.logger import logger
13
+
14
+
15
+ def cutscene_detection_star(args):
16
+ return cutscene_detection(*args)
17
+
18
+
19
+ def cutscene_detection(video_path, saved_path, cutscene_threshold=27, min_scene_len=15):
20
+ try:
21
+ if os.path.exists(saved_path):
22
+ logger.info(f"{video_path} has been processed.")
23
+ return
24
+ # Use PyAV as the backend to avoid (to some exent) containing the last frame of the previous scene.
25
+ # https://github.com/Breakthrough/PySceneDetect/issues/279#issuecomment-2152596761.
26
+ video = open_video(video_path, backend="pyav")
27
+ frame_rate, frame_size = video.frame_rate, video.frame_size
28
+ duration = deepcopy(video.duration)
29
+
30
+ frame_points, frame_timecode = [], {}
31
+ scene_manager = SceneManager()
32
+ scene_manager.add_detector(
33
+ # [ContentDetector, ThresholdDetector, AdaptiveDetector]
34
+ ContentDetector(threshold=cutscene_threshold, min_scene_len=min_scene_len)
35
+ )
36
+ scene_manager.detect_scenes(video, show_progress=False)
37
+ scene_list = scene_manager.get_scene_list()
38
+ for scene in scene_list:
39
+ for frame_time_code in scene:
40
+ frame_index = frame_time_code.get_frames()
41
+ if frame_index not in frame_points:
42
+ frame_points.append(frame_index)
43
+ frame_timecode[frame_index] = frame_time_code
44
+
45
+ del video, scene_manager
46
+
47
+ frame_points = sorted(frame_points)
48
+ output_scene_list = []
49
+ for idx in range(len(frame_points) - 1):
50
+ output_scene_list.append((frame_timecode[frame_points[idx]], frame_timecode[frame_points[idx+1]]))
51
+
52
+ timecode_list = [(frame_timecode_tuple[0].get_timecode(), frame_timecode_tuple[1].get_timecode()) for frame_timecode_tuple in output_scene_list]
53
+ meta_scene = [{
54
+ "video_path": Path(video_path).name,
55
+ "timecode_list": timecode_list,
56
+ "fram_rate": frame_rate,
57
+ "frame_size": frame_size,
58
+ "duration": str(duration) # __repr__
59
+ }]
60
+ pd.DataFrame(meta_scene).to_json(saved_path, orient="records", lines=True)
61
+ except Exception as e:
62
+ logger.warning(f"Cutscene detection with {video_path} failed. Error is: {e}.")
63
+
64
+
65
+ if __name__ == "__main__":
66
+ parser = argparse.ArgumentParser(description="Cutscene Detection")
67
+ parser.add_argument(
68
+ "--video_metadata_path", type=str, required=True, help="The path to the video dataset metadata (csv/jsonl)."
69
+ )
70
+ parser.add_argument(
71
+ "--video_path_column",
72
+ type=str,
73
+ default="video_path",
74
+ help="The column contains the video path (an absolute path or a relative path w.r.t the video_folder).",
75
+ )
76
+ parser.add_argument("--video_folder", type=str, default="", help="The video folder.")
77
+ parser.add_argument("--saved_folder", type=str, required=True, help="The save path to the output results (csv/jsonl).")
78
+ parser.add_argument("--n_jobs", type=int, default=1, help="The number of processes.")
79
+
80
+ args = parser.parse_args()
81
+
82
+ metadata_df = pd.read_json(args.video_metadata_path, lines=True)
83
+ video_path_list = metadata_df[args.video_path_column].tolist()
84
+ video_path_list = [os.path.join(args.video_folder, video_path) for video_path in video_path_list]
85
+
86
+ if not os.path.exists(args.saved_folder):
87
+ os.makedirs(args.saved_folder, exist_ok=True)
88
+ # The glob can be slow when there are many small jsonl files.
89
+ saved_path_list = [os.path.join(args.saved_folder, Path(video_path).stem + ".jsonl") for video_path in video_path_list]
90
+ args_list = [
91
+ (video_path, saved_path)
92
+ for video_path, saved_path in zip(video_path_list, saved_path_list)
93
+ ]
94
+ # Since the length of the video is not uniform, the gather operation is not performed.
95
+ # We need to run easyanimate/video_caption/utils/gather_jsonl.py after the program finised.
96
+ with Pool(args.n_jobs) as pool:
97
+ results = list(tqdm(pool.imap(cutscene_detection_star, args_list), total=len(video_path_list)))
robomaster/video_caption/filter_meta_train.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+
4
+ import pandas as pd
5
+ from natsort import natsorted
6
+
7
+ from utils.logger import logger
8
+ from utils.filter import filter
9
+
10
+
11
+ def parse_args():
12
+ parser = argparse.ArgumentParser()
13
+ parser.add_argument(
14
+ "--caption_metadata_path", type=str, default=None, help="The path to the video quality metadata (csv/jsonl)."
15
+ )
16
+ parser.add_argument(
17
+ "--video_path_column",
18
+ type=str,
19
+ default="video_path",
20
+ help="The column contains the video path (an absolute path or a relative path w.r.t the video_folder).",
21
+ )
22
+ parser.add_argument("--video_folder", type=str, default="", help="The video folder.")
23
+ parser.add_argument(
24
+ "--basic_metadata_path", type=str, default=None, help="The path to the basic metadata (csv/jsonl)."
25
+ )
26
+ parser.add_argument("--min_resolution", type=float, default=720*1280, help="The resolution threshold.")
27
+ parser.add_argument("--min_duration", type=float, default=-1, help="The minimum duration.")
28
+ parser.add_argument("--max_duration", type=float, default=-1, help="The maximum duration.")
29
+ parser.add_argument(
30
+ "--asethetic_score_metadata_path", type=str, default=None, help="The path to the video quality metadata (csv/jsonl)."
31
+ )
32
+ parser.add_argument("--min_asethetic_score", type=float, default=4.0, help="The asethetic score threshold.")
33
+ parser.add_argument(
34
+ "--asethetic_score_siglip_metadata_path", type=str, default=None, help="The path to the video quality (SigLIP) metadata (csv/jsonl)."
35
+ )
36
+ parser.add_argument("--min_asethetic_score_siglip", type=float, default=4.0, help="The asethetic score (SigLIP) threshold.")
37
+ parser.add_argument(
38
+ "--text_score_metadata_path", type=str, default=None, help="The path to the video text score metadata (csv/jsonl)."
39
+ )
40
+ parser.add_argument("--min_text_score", type=float, default=0.02, help="The text threshold.")
41
+ parser.add_argument(
42
+ "--motion_score_metadata_path", type=str, default=None, help="The path to the video motion score metadata (csv/jsonl)."
43
+ )
44
+ parser.add_argument("--min_motion_score", type=float, default=2, help="The motion threshold.")
45
+ parser.add_argument(
46
+ "--videoclipxl_score_metadata_path", type=str, default=None, help="The path to the video-caption VideoCLIPXL score metadata (csv/jsonl)."
47
+ )
48
+ parser.add_argument("--min_videoclipxl_score", type=float, default=0.20, help="The VideoCLIPXL score threshold.")
49
+ parser.add_argument("--saved_path", type=str, required=True)
50
+
51
+ args = parser.parse_args()
52
+ return args
53
+
54
+
55
+ def main():
56
+ args = parse_args()
57
+
58
+ raw_caption_df = pd.read_json(args.caption_metadata_path, lines=True)
59
+ video_path_list = raw_caption_df[args.video_path_column].to_list()
60
+ filtered_video_path_list = filter(
61
+ video_path_list,
62
+ basic_metadata_path=args.basic_metadata_path,
63
+ min_resolution=args.min_resolution,
64
+ min_duration=args.min_duration,
65
+ max_duration=args.max_duration,
66
+ asethetic_score_metadata_path=args.asethetic_score_metadata_path,
67
+ min_asethetic_score=args.min_asethetic_score,
68
+ asethetic_score_siglip_metadata_path=args.asethetic_score_siglip_metadata_path,
69
+ min_asethetic_score_siglip=args.min_asethetic_score_siglip,
70
+ text_score_metadata_path=args.text_score_metadata_path,
71
+ min_text_score=args.min_text_score,
72
+ motion_score_metadata_path=args.motion_score_metadata_path,
73
+ min_motion_score=args.min_motion_score,
74
+ videoclipxl_score_metadata_path=args.videoclipxl_score_metadata_path,
75
+ min_videoclipxl_score=args.min_videoclipxl_score,
76
+ video_path_column=args.video_path_column
77
+ )
78
+ filtered_video_path_list = natsorted(filtered_video_path_list)
79
+ filtered_caption_df = raw_caption_df[raw_caption_df[args.video_path_column].isin(filtered_video_path_list)]
80
+ train_df = filtered_caption_df.rename(columns={"video_path": "file_path", "caption": "text"})
81
+ train_df["file_path"] = train_df["file_path"].map(lambda x: os.path.join(args.video_folder, x))
82
+ train_df["type"] = "video"
83
+ train_df.to_json(args.saved_path, orient="records", force_ascii=False, indent=2)
84
+ logger.info(f"The final train file with {len(train_df)} videos are saved to {args.saved_path}.")
85
+
86
+
87
+ if __name__ == "__main__":
88
+ main()
robomaster/video_caption/package_patches/easyocr_detection_patched.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Modified from https://github.com/JaidedAI/EasyOCR/blob/803b907/easyocr/detection.py.
2
+ 1. Disable DataParallel.
3
+ """
4
+ import torch
5
+ import torch.backends.cudnn as cudnn
6
+ from torch.autograd import Variable
7
+ from PIL import Image
8
+ from collections import OrderedDict
9
+
10
+ import cv2
11
+ import numpy as np
12
+ from .craft_utils import getDetBoxes, adjustResultCoordinates
13
+ from .imgproc import resize_aspect_ratio, normalizeMeanVariance
14
+ from .craft import CRAFT
15
+
16
+ def copyStateDict(state_dict):
17
+ if list(state_dict.keys())[0].startswith("module"):
18
+ start_idx = 1
19
+ else:
20
+ start_idx = 0
21
+ new_state_dict = OrderedDict()
22
+ for k, v in state_dict.items():
23
+ name = ".".join(k.split(".")[start_idx:])
24
+ new_state_dict[name] = v
25
+ return new_state_dict
26
+
27
+ def test_net(canvas_size, mag_ratio, net, image, text_threshold, link_threshold, low_text, poly, device, estimate_num_chars=False):
28
+ if isinstance(image, np.ndarray) and len(image.shape) == 4: # image is batch of np arrays
29
+ image_arrs = image
30
+ else: # image is single numpy array
31
+ image_arrs = [image]
32
+
33
+ img_resized_list = []
34
+ # resize
35
+ for img in image_arrs:
36
+ img_resized, target_ratio, size_heatmap = resize_aspect_ratio(img, canvas_size,
37
+ interpolation=cv2.INTER_LINEAR,
38
+ mag_ratio=mag_ratio)
39
+ img_resized_list.append(img_resized)
40
+ ratio_h = ratio_w = 1 / target_ratio
41
+ # preprocessing
42
+ x = [np.transpose(normalizeMeanVariance(n_img), (2, 0, 1))
43
+ for n_img in img_resized_list]
44
+ x = torch.from_numpy(np.array(x))
45
+ x = x.to(device)
46
+
47
+ # forward pass
48
+ with torch.no_grad():
49
+ y, feature = net(x)
50
+
51
+ boxes_list, polys_list = [], []
52
+ for out in y:
53
+ # make score and link map
54
+ score_text = out[:, :, 0].cpu().data.numpy()
55
+ score_link = out[:, :, 1].cpu().data.numpy()
56
+
57
+ # Post-processing
58
+ boxes, polys, mapper = getDetBoxes(
59
+ score_text, score_link, text_threshold, link_threshold, low_text, poly, estimate_num_chars)
60
+
61
+ # coordinate adjustment
62
+ boxes = adjustResultCoordinates(boxes, ratio_w, ratio_h)
63
+ polys = adjustResultCoordinates(polys, ratio_w, ratio_h)
64
+ if estimate_num_chars:
65
+ boxes = list(boxes)
66
+ polys = list(polys)
67
+ for k in range(len(polys)):
68
+ if estimate_num_chars:
69
+ boxes[k] = (boxes[k], mapper[k])
70
+ if polys[k] is None:
71
+ polys[k] = boxes[k]
72
+ boxes_list.append(boxes)
73
+ polys_list.append(polys)
74
+
75
+ return boxes_list, polys_list
76
+
77
+ def get_detector(trained_model, device='cpu', quantize=True, cudnn_benchmark=False):
78
+ net = CRAFT()
79
+
80
+ if device == 'cpu':
81
+ net.load_state_dict(copyStateDict(torch.load(trained_model, map_location=device)))
82
+ if quantize:
83
+ try:
84
+ torch.quantization.quantize_dynamic(net, dtype=torch.qint8, inplace=True)
85
+ except:
86
+ pass
87
+ else:
88
+ net.load_state_dict(copyStateDict(torch.load(trained_model, map_location=device)))
89
+ # net = torch.nn.DataParallel(net).to(device)
90
+ net = net.to(device)
91
+ cudnn.benchmark = cudnn_benchmark
92
+
93
+ net.eval()
94
+ return net
95
+
96
+ def get_textbox(detector, image, canvas_size, mag_ratio, text_threshold, link_threshold, low_text, poly, device, optimal_num_chars=None, **kwargs):
97
+ result = []
98
+ estimate_num_chars = optimal_num_chars is not None
99
+ bboxes_list, polys_list = test_net(canvas_size, mag_ratio, detector,
100
+ image, text_threshold,
101
+ link_threshold, low_text, poly,
102
+ device, estimate_num_chars)
103
+ if estimate_num_chars:
104
+ polys_list = [[p for p, _ in sorted(polys, key=lambda x: abs(optimal_num_chars - x[1]))]
105
+ for polys in polys_list]
106
+
107
+ for polys in polys_list:
108
+ single_img_result = []
109
+ for i, box in enumerate(polys):
110
+ poly = np.array(box).astype(np.int32).reshape((-1))
111
+ single_img_result.append(poly)
112
+ result.append(single_img_result)
113
+
114
+ return result
robomaster/video_caption/package_patches/vila_siglip_encoder_patched.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from https://github.com/NVlabs/VILA/blob/1c88211/llava/model/multimodal_encoder/siglip_encoder.py
2
+ # 1. Support transformers >= 4.36.2.
3
+ import torch
4
+ import transformers
5
+ from packaging import version
6
+ from transformers import AutoConfig, AutoModel, PretrainedConfig
7
+
8
+ from llava.model.multimodal_encoder.vision_encoder import VisionTower, VisionTowerS2
9
+
10
+ if version.parse(transformers.__version__) > version.parse("4.36.2"):
11
+ from transformers import SiglipImageProcessor, SiglipVisionConfig, SiglipVisionModel
12
+ else:
13
+ from .siglip import SiglipImageProcessor, SiglipVisionConfig, SiglipVisionModel
14
+
15
+
16
+ class SiglipVisionTower(VisionTower):
17
+ def __init__(self, model_name_or_path: str, config: PretrainedConfig, state_dict=None):
18
+ super().__init__(model_name_or_path, config)
19
+ self.image_processor = SiglipImageProcessor.from_pretrained(model_name_or_path)
20
+ self.vision_tower = SiglipVisionModel.from_pretrained(
21
+ # TODO(ligeng): why pass config here leading to errors?
22
+ model_name_or_path, torch_dtype=eval(config.model_dtype), state_dict=state_dict
23
+ )
24
+ self.is_loaded = True
25
+
26
+
27
+ class SiglipVisionTowerS2(VisionTowerS2):
28
+ def __init__(self, model_name_or_path: str, config: PretrainedConfig):
29
+ super().__init__(model_name_or_path, config)
30
+ self.image_processor = SiglipImageProcessor.from_pretrained(model_name_or_path)
31
+ self.vision_tower = SiglipVisionModel.from_pretrained(
32
+ model_name_or_path, torch_dtype=eval(config.model_dtype)
33
+ )
34
+
35
+ # Make sure it crops/resizes the image to the largest scale in self.scales to maintain high-res information
36
+ self.image_processor.size['height'] = self.image_processor.size['width'] = self.scales[-1]
37
+
38
+ self.is_loaded = True
39
+
40
+ if version.parse(transformers.__version__) <= version.parse("4.36.2"):
41
+ AutoConfig.register("siglip_vision_model", SiglipVisionConfig)
42
+ AutoModel.register(SiglipVisionConfig, SiglipVisionModel)
robomaster/video_caption/prompt/beautiful_prompt.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ I will upload some brief prompt words to be used for AI-generated videos. Please expand these brief prompt words into a more detailed description to enhance the quality of the generated videos. The detailed description should include the main subject (person, object, animal, or none) actions and their attributes or status sequence, the background (the objects, location, weather, and time), the view shot and camera movement.
2
+ The final detailed description must not exceed 200 words. Output with the following json format:
3
+ {"detailed description": "your detailed description here"}
4
+
5
+ Here is an example:
6
+ brief prompt words: "A stylish woman in a black leather jacket, red dress, and boots walks confidently down a damp Tokyo street."
7
+ {"detailed description": "A stylish woman walks down a Tokyo street filled with warm glowing neon and animated city signage. She wears a black leather jacket, a long red dress, and black boots, and carries a black purse. She wears sunglasses and red lipstick. She walks confidently and casually. The street is damp and reflective, creating a mirror effect of the colorful lights. Many pedestrians walk about."}
8
+
9
+ Here are the brief prompt words:
robomaster/video_caption/prompt/rewrite.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ Please rewrite the video description to be useful for AI to re-generate the video, according to the following requirements
2
+ 1. Do not start with something similar to 'The video/scene/frame shows' or "In this video/scene/frame".
3
+ 2. Remove the subjective content deviates from describing the visual content of the video. For instance, a sentence like "It gives a feeling of ease and tranquility and makes people feel comfortable" is considered subjective.
4
+ 3. Remove the non-existent description that does not in the visual content of the video, For instance, a sentence like "There is no visible detail that could be used to identify the individual beyond what is shown." is considered as the non-existent description.
5
+ 4. Here are some examples of good descriptions: 1) A stylish woman walks down a Tokyo street filled with warm glowing neon and animated city signage. She wears a black leather jacket, a long red dress, and black boots, and carries a black purse. She wears sunglasses and red lipstick. She walks confidently and casually. The street is damp and reflective, creating a mirror effect of the colorful lights. Many pedestrians walk about. 2) A large orange octopus is seen resting on the bottom of the ocean floor, blending in with the sandy and rocky terrain. Its tentacles are spread out around its body, and its eyes are closed. The octopus is unaware of a king crab that is crawling towards it from behind a rock, its claws raised and ready to attack. The crab is brown and spiny, with long legs and antennae. The scene is captured from a wide angle, showing the vastness and depth of the ocean. The water is clear and blue, with rays of sunlight filtering through. The shot is sharp and crisp, with a high dynamic range. The octopus and the crab are in focus, while the background is slightly blurred, creating a depth of field effect.
6
+ 5. Output with the following json format:
7
+ {"rewritten description": "your rewritten description here"}
8
+
9
+ Here is the video description:
robomaster/video_caption/requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ pandas>=2.0.0
2
+ easyocr==1.7.1
3
+ git+https://github.com/openai/CLIP.git
4
+ natsort
5
+ joblib
6
+ scenedetect
7
+ av
8
+ # https://github.com/NVlabs/VILA/issues/78#issuecomment-2195568292
9
+ numpy<2.0.0
robomaster/video_caption/scripts/stage_1_video_splitting.sh ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ VIDEO_FOLDER="datasets/panda_70m/videos/data/"
2
+ META_FILE_PATH="datasets/panda_70m/videos/meta_file_info.jsonl"
3
+ SCENE_FOLDER="datasets/panda_70m/videos/meta_scene_info/"
4
+ SCENE_SAVED_PATH="datasets/panda_70m/videos/meta_scene_info.jsonl"
5
+ OUTPUT_FOLDER="datasets/panda_70m/videos_clips/data/"
6
+ RESOLUTION_THRESHOLD=$((512*512))
7
+
8
+ # Set the duration range of video clips.
9
+ export MIN_SECONDS=3
10
+ export MAX_SECONDS=10
11
+
12
+ # Save all video names in a video folder as a meta file.
13
+ python -m utils.get_meta_file \
14
+ --video_folder $VIDEO_FOLDER \
15
+ --saved_path $META_FILE_PATH
16
+
17
+ # Perform scene detection on the video dataset.
18
+ # Adjust the n_jobs parameter based on the actual number of CPU cores in the machine.
19
+ python cutscene_detect.py \
20
+ --video_metadata_path $META_FILE_PATH \
21
+ --video_folder $VIDEO_FOLDER \
22
+ --saved_folder $SCENE_FOLDER \
23
+ --n_jobs 32
24
+
25
+ # Gather all scene jsonl files to a single scene jsonl file.
26
+ # Adjust the n_jobs parameter based on the actual I/O speed in the machine.
27
+ python -m utils.gather_jsonl \
28
+ --meta_folder $SCENE_FOLDER \
29
+ --meta_file_path $SCENE_SAVED_PATH \
30
+ --n_jobs 64
31
+
32
+ # Perform video splitting filtered by the RESOLUTION_THRESHOLD.
33
+ # It consumes more CPU computing resources compared to the above operations.
34
+ python video_splitting.py \
35
+ --video_metadata_path $SCENE_SAVED_PATH \
36
+ --video_folder $VIDEO_FOLDER \
37
+ --output_folder $OUTPUT_FOLDER \
38
+ --n_jobs 16 \
39
+ --resolution_threshold $RESOLUTION_THRESHOLD
robomaster/video_caption/scripts/stage_2_video_filtering.sh ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ META_FILE_PATH="datasets/panda_70m/videos_clips/data/meta_file_info.jsonl"
2
+ VIDEO_FOLDER="datasets/panda_70m/videos_clips/data/"
3
+ VIDEO_QUALITY_SAVED_PATH="datasets/panda_70m/videos_clips/meta_quality_info_siglip.jsonl"
4
+ MIN_ASETHETIC_SCORE_SIGLIP=4.0
5
+ TEXT_SAVED_PATH="datasets/panda_70m/videos_clips/meta_text_info.jsonl"
6
+ MIN_TEXT_SCORE=0.02
7
+ MOTION_SAVED_PATH="datasets/panda_70m/videos_clips/meta_motion_info.jsonl"
8
+
9
+ python -m utils.get_meta_file \
10
+ --video_folder $VIDEO_FOLDER \
11
+ --saved_path $META_FILE_PATH
12
+
13
+ # Get the asethetic score (SigLIP) of all videos
14
+ accelerate launch compute_video_quality.py \
15
+ --video_metadata_path $META_FILE_PATH \
16
+ --video_folder $VIDEO_FOLDER \
17
+ --metrics "AestheticScoreSigLIP" \
18
+ --frame_sample_method uniform \
19
+ --num_sampled_frames 4 \
20
+ --saved_freq 10 \
21
+ --saved_path $VIDEO_QUALITY_SAVED_PATH \
22
+ --batch_size 4
23
+
24
+ # Get the text score of all videos filtered by the video quality score.
25
+ accelerate launch compute_text_score.py \
26
+ --video_metadata_path $META_FILE_PATH \
27
+ --video_folder $VIDEO_FOLDER \
28
+ --saved_freq 10 \
29
+ --saved_path $TEXT_SAVED_PATH \
30
+ --asethetic_score_siglip_metadata_path $VIDEO_QUALITY_SAVED_PATH \
31
+ --min_asethetic_score_siglip $MIN_ASETHETIC_SCORE_SIGLIP
32
+
33
+ # Get the motion score of all videos filtered by the video quality score and text score.
34
+ python compute_motion_score.py \
35
+ --video_metadata_path $META_FILE_PATH \
36
+ --video_folder $VIDEO_FOLDER \
37
+ --saved_freq 10 \
38
+ --saved_path $MOTION_SAVED_PATH \
39
+ --n_jobs 8 \
40
+ --text_score_metadata_path $TEXT_SAVED_PATH \
41
+ --min_text_score $MIN_TEXT_SCORE
robomaster/video_caption/scripts/stage_3_video_recaptioning.sh ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ META_FILE_PATH="datasets/panda_70m/videos_clips/data/meta_file_info.jsonl"
2
+ VIDEO_FOLDER="datasets/panda_70m/videos_clips/data/"
3
+ MOTION_SAVED_PATH="datasets/panda_70m/videos_clips/meta_motion_info.jsonl"
4
+ MIN_MOTION_SCORE=2
5
+ VIDEO_CAPTION_SAVED_PATH="datasets/panda_70m/meta_caption_info_vila_8b.jsonl"
6
+ REWRITTEN_VIDEO_CAPTION_SAVED_PATH="datasets/panda_70m/meta_caption_info_vila_8b_rewritten.jsonl"
7
+ VIDEOCLIPXL_SCORE_SAVED_PATH="datasets/panda_70m/meta_caption_info_vila_8b_rewritten_videoclipxl.jsonl"
8
+ MIN_VIDEOCLIPXL_SCORE=0.20
9
+ TRAIN_SAVED_PATH="datasets/panda_70m/train_panda_70m.json"
10
+ # Manually download Efficient-Large-Model/Llama-3-VILA1.5-8b-AWQ to VILA_MODEL_PATH.
11
+ # Manually download meta-llama/Meta-Llama-3-8B-Instruct to REWRITE_MODEL_PATH.
12
+
13
+ # Use VILA1.5-AWQ to perform recaptioning.
14
+ accelerate launch vila_video_recaptioning.py \
15
+ --video_metadata_path ${META_FILE_PATH} \
16
+ --video_folder ${VIDEO_FOLDER} \
17
+ --model_path ${VILA_MODEL_PATH} \
18
+ --precision "W4A16" \
19
+ --saved_path $VIDEO_CAPTION_SAVED_PATH \
20
+ --saved_freq 1 \
21
+ --motion_score_metadata_path $MOTION_SAVED_PATH \
22
+ --min_motion_score $MIN_MOTION_SCORE
23
+
24
+ # Rewrite video captions (optional).
25
+ python caption_rewrite.py \
26
+ --video_metadata_path $VIDEO_CAPTION_SAVED_PATH \
27
+ --batch_size 4096 \
28
+ --model_name $REWRITE_MODEL_PATH \
29
+ --prompt prompt/rewrite.txt \
30
+ --prefix '"rewritten description": ' \
31
+ --saved_path $REWRITTEN_VIDEO_CAPTION_SAVED_PATH \
32
+ --saved_freq 1
33
+
34
+ # Compute caption-video alignment (optional).
35
+ accelerate launch compute_video_quality.py \
36
+ --video_metadata_path $REWRITTEN_VIDEO_CAPTION_SAVED_PATH \
37
+ --caption_column caption \
38
+ --video_folder $VIDEO_FOLDER \
39
+ --frame_sample_method uniform \
40
+ --num_sampled_frames 8 \
41
+ --metrics VideoCLIPXLScore \
42
+ --batch_size 4 \
43
+ --saved_path $VIDEOCLIPXL_SCORE_SAVED_PATH \
44
+ --saved_freq 10
45
+
46
+ # Get the final train file.
47
+ python filter_meta_train.py \
48
+ --caption_metadata_path $REWRITTEN_VIDEO_CAPTION_SAVED_PATH \
49
+ --video_folder=$VIDEO_FOLDER \
50
+ --videoclipxl_score_metadata_path $VIDEOCLIPXL_SCORE_SAVED_PATH \
51
+ --min_videoclipxl_score $MIN_VIDEOCLIPXL_SCORE \
52
+ --saved_path=$TRAIN_SAVED_PATH
robomaster/video_caption/utils/filter.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ast
2
+ import os
3
+
4
+ import pandas as pd
5
+
6
+ from .logger import logger
7
+
8
+
9
+ def filter(
10
+ video_path_list,
11
+ basic_metadata_path=None,
12
+ min_resolution=0,
13
+ min_duration=-1,
14
+ max_duration=-1,
15
+ asethetic_score_metadata_path=None,
16
+ min_asethetic_score=4,
17
+ asethetic_score_siglip_metadata_path=None,
18
+ min_asethetic_score_siglip=4,
19
+ text_score_metadata_path=None,
20
+ min_text_score=0.02,
21
+ motion_score_metadata_path=None,
22
+ min_motion_score=2,
23
+ videoclipxl_score_metadata_path=None,
24
+ min_videoclipxl_score=0.20,
25
+ video_path_column="video_path",
26
+ ):
27
+ video_path_list = [os.path.basename(video_path) for video_path in video_path_list]
28
+
29
+ if basic_metadata_path is not None:
30
+ if basic_metadata_path.endswith(".csv"):
31
+ basic_df = pd.read_csv(basic_metadata_path)
32
+ elif basic_metadata_path.endswith(".jsonl"):
33
+ basic_df = pd.read_json(basic_metadata_path, lines=True)
34
+
35
+ basic_df["resolution"] = basic_df["frame_size"].apply(lambda x: x[0] * x[1])
36
+ filtered_basic_df = basic_df[basic_df["resolution"] < min_resolution]
37
+ filtered_video_path_list = filtered_basic_df[video_path_column].tolist()
38
+ filtered_video_path_list = [os.path.basename(video_path) for video_path in filtered_video_path_list]
39
+
40
+ video_path_list = list(set(video_path_list).difference(set(filtered_video_path_list)))
41
+ logger.info(
42
+ f"Load {basic_metadata_path} ({len(basic_df)}) and filter {len(filtered_video_path_list)} videos "
43
+ f"with resolution less than {min_resolution}."
44
+ )
45
+
46
+ if min_duration != -1:
47
+ filtered_basic_df = basic_df[basic_df["duration"] < min_duration]
48
+ filtered_video_path_list = filtered_basic_df[video_path_column].tolist()
49
+ filtered_video_path_list = [os.path.basename(video_path) for video_path in filtered_video_path_list]
50
+
51
+ video_path_list = list(set(video_path_list).difference(set(filtered_video_path_list)))
52
+ logger.info(
53
+ f"Load {basic_metadata_path} and filter {len(filtered_video_path_list)} videos "
54
+ f"with duration less than {min_duration}."
55
+ )
56
+
57
+ if max_duration != -1:
58
+ filtered_basic_df = basic_df[basic_df["duration"] > max_duration]
59
+ filtered_video_path_list = filtered_basic_df[video_path_column].tolist()
60
+ filtered_video_path_list = [os.path.basename(video_path) for video_path in filtered_video_path_list]
61
+
62
+ video_path_list = list(set(video_path_list).difference(set(filtered_video_path_list)))
63
+ logger.info(
64
+ f"Load {basic_metadata_path} and filter {len(filtered_video_path_list)} videos "
65
+ f"with duration greater than {max_duration}."
66
+ )
67
+
68
+ if asethetic_score_metadata_path is not None:
69
+ if asethetic_score_metadata_path.endswith(".csv"):
70
+ asethetic_score_df = pd.read_csv(asethetic_score_metadata_path)
71
+ elif asethetic_score_metadata_path.endswith(".jsonl"):
72
+ asethetic_score_df = pd.read_json(asethetic_score_metadata_path, lines=True)
73
+
74
+ # In pandas, csv will save lists as strings, whereas jsonl will not.
75
+ asethetic_score_df["aesthetic_score"] = asethetic_score_df["aesthetic_score"].apply(
76
+ lambda x: ast.literal_eval(x) if isinstance(x, str) else x
77
+ )
78
+ asethetic_score_df["aesthetic_score_mean"] = asethetic_score_df["aesthetic_score"].apply(lambda x: sum(x) / len(x))
79
+ filtered_asethetic_score_df = asethetic_score_df[asethetic_score_df["aesthetic_score_mean"] < min_asethetic_score]
80
+ filtered_video_path_list = filtered_asethetic_score_df[video_path_column].tolist()
81
+ filtered_video_path_list = [os.path.basename(video_path) for video_path in filtered_video_path_list]
82
+
83
+ video_path_list = list(set(video_path_list).difference(set(filtered_video_path_list)))
84
+ logger.info(
85
+ f"Load {asethetic_score_metadata_path} ({len(asethetic_score_df)}) and filter {len(filtered_video_path_list)} videos "
86
+ f"with aesthetic score less than {min_asethetic_score}."
87
+ )
88
+
89
+ if asethetic_score_siglip_metadata_path is not None:
90
+ if asethetic_score_siglip_metadata_path.endswith(".csv"):
91
+ asethetic_score_siglip_df = pd.read_csv(asethetic_score_siglip_metadata_path)
92
+ elif asethetic_score_siglip_metadata_path.endswith(".jsonl"):
93
+ asethetic_score_siglip_df = pd.read_json(asethetic_score_siglip_metadata_path, lines=True)
94
+
95
+ # In pandas, csv will save lists as strings, whereas jsonl will not.
96
+ asethetic_score_siglip_df["aesthetic_score_siglip"] = asethetic_score_siglip_df["aesthetic_score_siglip"].apply(
97
+ lambda x: ast.literal_eval(x) if isinstance(x, str) else x
98
+ )
99
+ asethetic_score_siglip_df["aesthetic_score_siglip_mean"] = asethetic_score_siglip_df["aesthetic_score_siglip"].apply(
100
+ lambda x: sum(x) / len(x)
101
+ )
102
+ filtered_asethetic_score_siglip_df = asethetic_score_siglip_df[
103
+ asethetic_score_siglip_df["aesthetic_score_siglip_mean"] < min_asethetic_score_siglip
104
+ ]
105
+ filtered_video_path_list = filtered_asethetic_score_siglip_df[video_path_column].tolist()
106
+ filtered_video_path_list = [os.path.basename(video_path) for video_path in filtered_video_path_list]
107
+
108
+ video_path_list = list(set(video_path_list).difference(set(filtered_video_path_list)))
109
+ logger.info(
110
+ f"Load {asethetic_score_siglip_metadata_path} ({len(asethetic_score_siglip_df)}) and filter {len(filtered_video_path_list)} videos "
111
+ f"with aesthetic score (SigLIP) less than {min_asethetic_score_siglip}."
112
+ )
113
+
114
+ if text_score_metadata_path is not None:
115
+ if text_score_metadata_path.endswith(".csv"):
116
+ text_score_df = pd.read_csv(text_score_metadata_path)
117
+ elif text_score_metadata_path.endswith(".jsonl"):
118
+ text_score_df = pd.read_json(text_score_metadata_path, lines=True)
119
+
120
+ filtered_text_score_df = text_score_df[text_score_df["text_score"] > min_text_score]
121
+ filtered_video_path_list = filtered_text_score_df[video_path_column].tolist()
122
+ filtered_video_path_list = [os.path.basename(video_path) for video_path in filtered_video_path_list]
123
+
124
+ video_path_list = list(set(video_path_list).difference(set(filtered_video_path_list)))
125
+ logger.info(
126
+ f"Load {text_score_metadata_path} ({len(text_score_df)}) and filter {len(filtered_video_path_list)} videos "
127
+ f"with text score greater than {min_text_score}."
128
+ )
129
+
130
+ if motion_score_metadata_path is not None:
131
+ if motion_score_metadata_path.endswith(".csv"):
132
+ motion_score_df = pd.read_csv(motion_score_metadata_path)
133
+ elif motion_score_metadata_path.endswith(".jsonl"):
134
+ motion_score_df = pd.read_json(motion_score_metadata_path, lines=True)
135
+
136
+ filtered_motion_score_df = motion_score_df[motion_score_df["motion_score"] < min_motion_score]
137
+ filtered_video_path_list = filtered_motion_score_df[video_path_column].tolist()
138
+ filtered_video_path_list = [os.path.basename(video_path) for video_path in filtered_video_path_list]
139
+
140
+ video_path_list = list(set(video_path_list).difference(set(filtered_video_path_list)))
141
+ logger.info(
142
+ f"Load {motion_score_metadata_path} ({len(motion_score_df)}) and filter {len(filtered_video_path_list)} videos "
143
+ f"with motion score smaller than {min_motion_score}."
144
+ )
145
+
146
+ if videoclipxl_score_metadata_path is not None:
147
+ if videoclipxl_score_metadata_path.endswith(".csv"):
148
+ videoclipxl_score_df = pd.read_csv(videoclipxl_score_metadata_path)
149
+ elif videoclipxl_score_metadata_path.endswith(".jsonl"):
150
+ videoclipxl_score_df = pd.read_json(videoclipxl_score_metadata_path, lines=True)
151
+
152
+ filtered_videoclipxl_score_df = videoclipxl_score_df[videoclipxl_score_df["videoclipxl_score"] < min_videoclipxl_score]
153
+ filtered_video_path_list = filtered_videoclipxl_score_df[video_path_column].tolist()
154
+ filtered_video_path_list = [os.path.basename(video_path) for video_path in filtered_video_path_list]
155
+
156
+ video_path_list = list(set(video_path_list).difference(set(filtered_video_path_list)))
157
+ logger.info(
158
+ f"Load {videoclipxl_score_metadata_path} ({len(videoclipxl_score_df)}) and "
159
+ f"filter {len(filtered_video_path_list)} videos with mixclip score smaller than {min_videoclipxl_score}."
160
+ )
161
+
162
+ return video_path_list
robomaster/video_caption/utils/gather_jsonl.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import glob
4
+ import json
5
+ from multiprocessing import Pool, Manager
6
+
7
+ import pandas as pd
8
+ from natsort import index_natsorted
9
+
10
+ from .logger import logger
11
+
12
+
13
+ def process_file(file_path, shared_list):
14
+ with open(file_path, "r") as f:
15
+ for line in f:
16
+ data = json.loads(line)
17
+ shared_list.append(data)
18
+
19
+
20
+ def parse_args():
21
+ parser = argparse.ArgumentParser(description="Gather all jsonl files in a folder (meta_folder) to a single jsonl file (meta_file_path).")
22
+ parser.add_argument("--meta_folder", type=str, required=True)
23
+ parser.add_argument("--meta_file_path", type=str, required=True)
24
+ parser.add_argument("--video_path_column", type=str, default="video_path")
25
+ parser.add_argument("--n_jobs", type=int, default=1)
26
+
27
+ args = parser.parse_args()
28
+ return args
29
+
30
+
31
+ def main():
32
+ args = parse_args()
33
+
34
+ jsonl_files = glob.glob(os.path.join(args.meta_folder, "*.jsonl"))
35
+
36
+ with Manager() as manager:
37
+ shared_list = manager.list()
38
+ with Pool(processes=args.n_jobs) as pool:
39
+ for file_path in jsonl_files:
40
+ pool.apply_async(process_file, args=(file_path, shared_list))
41
+ pool.close()
42
+ pool.join()
43
+
44
+ with open(args.meta_file_path, "w") as f:
45
+ for item in shared_list:
46
+ f.write(json.dumps(item) + '\n')
47
+
48
+ df = pd.read_json(args.meta_file_path, lines=True)
49
+ df = df.iloc[index_natsorted(df[args.video_path_column])].reset_index(drop=True)
50
+ logger.info(f"Save the gathered single jsonl file to {args.meta_file_path}.")
51
+ df.to_json(args.meta_file_path, orient="records", lines=True, force_ascii=False)
52
+
53
+
54
+ if __name__ == '__main__':
55
+ main()
robomaster/video_caption/utils/get_meta_file.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ from pathlib import Path
3
+
4
+ import pandas as pd
5
+ from natsort import natsorted
6
+ from tqdm import tqdm
7
+
8
+ from .logger import logger
9
+
10
+
11
+ ALL_VIDEO_EXT = set(["mp4", "webm", "mkv", "avi", "flv", "mov"])
12
+ ALL_IMGAE_EXT = set(["png", "webp", "jpg", "jpeg", "bmp", "gif"])
13
+
14
+
15
+ def parse_args():
16
+ parser = argparse.ArgumentParser(description="Compute scores of uniform sampled frames from videos.")
17
+ parser.add_argument(
18
+ "--image_path_column",
19
+ type=str,
20
+ default="image_path",
21
+ help="The column contains the image path (an absolute path or a relative path w.r.t the image_folder).",
22
+ )
23
+ parser.add_argument("--image_folder", type=str, default=None, help="The video folder.")
24
+ parser.add_argument(
25
+ "--video_path_column",
26
+ type=str,
27
+ default="video_path",
28
+ help="The column contains the video path (an absolute path or a relative path w.r.t the video_folder).",
29
+ )
30
+ parser.add_argument("--video_folder", type=str, default=None, help="The video folder.")
31
+ parser.add_argument("--saved_path", type=str, required=True, help="The save path to the output results (csv/jsonl).")
32
+ parser.add_argument("--recursive", action="store_true", help="Whether to search sub-folders recursively.")
33
+
34
+ args = parser.parse_args()
35
+ return args
36
+
37
+
38
+ def main():
39
+ args = parse_args()
40
+
41
+ if args.video_folder is None and args.image_folder is None:
42
+ raise ValueError("Either video_folder or image_folder should be specified in the arguments.")
43
+ if args.video_folder is not None and args.image_folder is not None:
44
+ raise ValueError("Both video_folder and image_folder can not be specified in the arguments at the same time.")
45
+
46
+ # Use the path name instead of the file name as video_path/image_path (unique ID).
47
+ if args.video_folder is not None:
48
+ video_path_list = []
49
+ video_folder = Path(args.video_folder)
50
+ for ext in tqdm(list(ALL_VIDEO_EXT)):
51
+ if args.recursive:
52
+ video_path_list += [str(file.relative_to(video_folder)) for file in video_folder.rglob(f"*.{ext}")]
53
+ else:
54
+ video_path_list += [str(file.relative_to(video_folder)) for file in video_folder.glob(f"*.{ext}")]
55
+ video_path_list = natsorted(video_path_list)
56
+ meta_file_df = pd.DataFrame({args.video_path_column: video_path_list})
57
+
58
+ if args.image_folder is not None:
59
+ image_path_list = []
60
+ image_folder = Path(args.image_folder)
61
+ for ext in tqdm(list(ALL_IMGAE_EXT)):
62
+ if args.recursive:
63
+ image_path_list += [str(file.relative_to(image_folder)) for file in image_folder.rglob(f"*.{ext}")]
64
+ else:
65
+ image_path_list += [str(file.relative_to(image_folder)) for file in image_folder.glob(f"*.{ext}")]
66
+ image_path_list = natsorted(image_path_list)
67
+ meta_file_df = pd.DataFrame({args.image_path_column: image_path_list})
68
+
69
+ logger.info(f"{len(meta_file_df)} files in total. Save the result to {args.saved_path}.")
70
+ meta_file_df.to_json(args.saved_path, orient="records", lines=True)
71
+
72
+
73
+ if __name__ == "__main__":
74
+ main()
robomaster/video_caption/utils/image_evaluator.py ADDED
@@ -0,0 +1,248 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import Union
3
+
4
+ import clip
5
+ import torch
6
+ import torch.nn as nn
7
+ import torch.nn.functional as F
8
+ from PIL import Image
9
+ from torchvision.datasets.utils import download_url
10
+ from transformers import AutoModel, AutoProcessor
11
+
12
+ from .siglip_v2_5 import convert_v2_5_from_siglip
13
+
14
+ # All metrics.
15
+ __all__ = ["AestheticScore", "AestheticScoreSigLIP", "CLIPScore"]
16
+
17
+ _MODELS = {
18
+ "CLIP_ViT-L/14": "https://pai-aigc-photog.oss-cn-hangzhou.aliyuncs.com/easyanimate/video_caption/clip/ViT-L-14.pt",
19
+ "Aesthetics_V2": "https://pai-aigc-photog.oss-cn-hangzhou.aliyuncs.com/easyanimate/video_caption/clip/sac%2Blogos%2Bava1-l14-linearMSE.pth",
20
+ "aesthetic_predictor_v2_5": "https://pai-aigc-photog.oss-cn-hangzhou.aliyuncs.com/easyanimate/video_caption/clip/aesthetic_predictor_v2_5.pth",
21
+ }
22
+ _MD5 = {
23
+ "CLIP_ViT-L/14": "096db1af569b284eb76b3881534822d9",
24
+ "Aesthetics_V2": "b1047fd767a00134b8fd6529bf19521a",
25
+ "aesthetic_predictor_v2_5": "c46eb8c29f714c9231dc630b8226842a",
26
+ }
27
+
28
+
29
+ def get_list_depth(lst):
30
+ if isinstance(lst, list):
31
+ return 1 + max(get_list_depth(item) for item in lst)
32
+ else:
33
+ return 0
34
+
35
+
36
+ def reshape_images(images: Union[list[list[Image.Image]], list[Image.Image]]):
37
+ # Check the input sanity.
38
+ depth = get_list_depth(images)
39
+ if depth == 1: # batch image input
40
+ if not isinstance(images[0], Image.Image):
41
+ raise ValueError("The item in 1D images should be Image.Image.")
42
+ num_sampled_frames = None
43
+ elif depth == 2: # batch video input
44
+ if not isinstance(images[0][0], Image.Image):
45
+ raise ValueError("The item in 2D images (videos) should be Image.Image.")
46
+ num_sampled_frames = len(images[0])
47
+ if not all(len(video_frames) == num_sampled_frames for video_frames in images):
48
+ raise ValueError("All item in 2D images should be with the same length.")
49
+ # [batch_size, num_sampled_frames, H, W, C] => [batch_size * num_sampled_frames, H, W, C].
50
+ reshaped_images = []
51
+ for video_frames in images:
52
+ reshaped_images.extend([frame for frame in video_frames])
53
+ images = reshaped_images
54
+ else:
55
+ raise ValueError("The input images should be in 1/2D list.")
56
+
57
+ return images, num_sampled_frames
58
+
59
+
60
+ def reshape_scores(scores: list[float], num_sampled_frames: int) -> list[float]:
61
+ if isinstance(scores, list):
62
+ if num_sampled_frames is not None: # Batch video input
63
+ batch_size = len(scores) // num_sampled_frames
64
+ scores = [
65
+ scores[i * num_sampled_frames:(i + 1) * num_sampled_frames]
66
+ for i in range(batch_size)
67
+ ]
68
+ return scores
69
+ else:
70
+ return [scores]
71
+
72
+
73
+ # if you changed the MLP architecture during training, change it also here:
74
+ class _MLP(nn.Module):
75
+ def __init__(self, input_size):
76
+ super().__init__()
77
+ self.input_size = input_size
78
+ self.layers = nn.Sequential(
79
+ nn.Linear(self.input_size, 1024),
80
+ # nn.ReLU(),
81
+ nn.Dropout(0.2),
82
+ nn.Linear(1024, 128),
83
+ # nn.ReLU(),
84
+ nn.Dropout(0.2),
85
+ nn.Linear(128, 64),
86
+ # nn.ReLU(),
87
+ nn.Dropout(0.1),
88
+ nn.Linear(64, 16),
89
+ # nn.ReLU(),
90
+ nn.Linear(16, 1),
91
+ )
92
+
93
+ def forward(self, x):
94
+ return self.layers(x)
95
+
96
+
97
+ class AestheticScore:
98
+ """Compute LAION Aesthetics Score V2 based on openai/clip. Note that the default
99
+ inference dtype with GPUs is fp16 in openai/clip.
100
+
101
+ Ref:
102
+ 1. https://github.com/christophschuhmann/improved-aesthetic-predictor/blob/main/simple_inference.py.
103
+ 2. https://github.com/openai/CLIP/issues/30.
104
+ """
105
+
106
+ def __init__(self, root: str = "~/.cache/clip", device: str = "cpu"):
107
+ # The CLIP model is loaded in the evaluation mode.
108
+ self.root = os.path.expanduser(root)
109
+ if not os.path.exists(self.root):
110
+ os.makedirs(self.root)
111
+ filename = "ViT-L-14.pt"
112
+ download_url(_MODELS["CLIP_ViT-L/14"], self.root, filename=filename, md5=_MD5["CLIP_ViT-L/14"])
113
+ self.clip_model, self.preprocess = clip.load(os.path.join(self.root, filename), device=device)
114
+ self.device = device
115
+ self._load_mlp()
116
+
117
+ def _load_mlp(self):
118
+ filename = "sac+logos+ava1-l14-linearMSE.pth"
119
+ download_url(_MODELS["Aesthetics_V2"], self.root, filename=filename, md5=_MD5["Aesthetics_V2"])
120
+ state_dict = torch.load(os.path.join(self.root, filename))
121
+ self.mlp = _MLP(768)
122
+ self.mlp.load_state_dict(state_dict)
123
+ self.mlp.to(self.device)
124
+ self.mlp.eval()
125
+
126
+ def __call__(self, images: Union[list[list[Image.Image]], list[Image.Image]], texts=None) -> list[float]:
127
+ images, num_sampled_frames = reshape_images(images)
128
+
129
+ with torch.no_grad():
130
+ images = torch.stack([self.preprocess(image) for image in images]).to(self.device)
131
+ image_embs = F.normalize(self.clip_model.encode_image(images))
132
+ scores = self.mlp(image_embs.float()) # torch.float16 -> torch.float32, [N, 1]
133
+
134
+ scores = scores.squeeze().tolist() # scalar or list
135
+ return reshape_scores(scores, num_sampled_frames)
136
+
137
+ def __repr__(self) -> str:
138
+ return "aesthetic_score"
139
+
140
+
141
+ class AestheticScoreSigLIP:
142
+ """Compute Aesthetics Score V2.5 based on google/siglip-so400m-patch14-384.
143
+
144
+ Ref:
145
+ 1. https://github.com/discus0434/aesthetic-predictor-v2-5.
146
+ 2. https://github.com/discus0434/aesthetic-predictor-v2-5/issues/2.
147
+ """
148
+
149
+ def __init__(
150
+ self,
151
+ root: str = "~/.cache/clip",
152
+ device: str = "cpu",
153
+ torch_dtype=torch.float16
154
+ ):
155
+ self.root = os.path.expanduser(root)
156
+ if not os.path.exists(self.root):
157
+ os.makedirs(self.root)
158
+ filename = "aesthetic_predictor_v2_5.pth"
159
+ download_url(_MODELS["aesthetic_predictor_v2_5"], self.root, filename=filename, md5=_MD5["aesthetic_predictor_v2_5"])
160
+ self.model, self.preprocessor = convert_v2_5_from_siglip(
161
+ predictor_name_or_path=os.path.join(self.root, filename),
162
+ low_cpu_mem_usage=True,
163
+ trust_remote_code=True,
164
+ )
165
+ self.model = self.model.to(device=device, dtype=torch_dtype)
166
+ self.device = device
167
+ self.torch_dtype = torch_dtype
168
+
169
+ def __call__(self, images: Union[list[list[Image.Image]], list[Image.Image]], texts=None) -> list[float]:
170
+ images, num_sampled_frames = reshape_images(images)
171
+
172
+ pixel_values = self.preprocessor(images, return_tensors="pt").pixel_values
173
+ pixel_values = pixel_values.to(self.device, self.torch_dtype)
174
+ with torch.no_grad():
175
+ scores = self.model(pixel_values).logits.squeeze().float().cpu().numpy()
176
+
177
+ scores = scores.squeeze().tolist() # scalar or list
178
+ return reshape_scores(scores, num_sampled_frames)
179
+
180
+ def __repr__(self) -> str:
181
+ return "aesthetic_score_siglip"
182
+
183
+
184
+ class CLIPScore:
185
+ """Compute CLIP scores for image-text pairs based on huggingface/transformers."""
186
+
187
+ def __init__(
188
+ self,
189
+ model_name_or_path: str = "openai/clip-vit-large-patch14",
190
+ torch_dtype=torch.float16,
191
+ device: str = "cpu",
192
+ ):
193
+ self.model = AutoModel.from_pretrained(model_name_or_path, torch_dtype=torch_dtype).eval().to(device)
194
+ self.processor = AutoProcessor.from_pretrained(model_name_or_path)
195
+ self.torch_dtype = torch_dtype
196
+ self.device = device
197
+
198
+ def __call__(self, images: Union[list[list[Image.Image]], list[Image.Image]], texts: list[str]) -> list[float]:
199
+ assert len(images) == len(texts)
200
+ images, num_sampled_frames = reshape_images(images)
201
+ # Expand texts in the batch video input case.
202
+ if num_sampled_frames is not None:
203
+ texts = [[text] * num_sampled_frames for text in texts]
204
+ texts = [item for sublist in texts for item in sublist]
205
+
206
+ image_inputs = self.processor(images=images, return_tensors="pt") # {"pixel_values": }
207
+ if self.torch_dtype == torch.float16:
208
+ image_inputs["pixel_values"] = image_inputs["pixel_values"].half()
209
+ text_inputs = self.processor(text=texts, return_tensors="pt", padding=True, truncation=True) # {"inputs_id": }
210
+ image_inputs, text_inputs = image_inputs.to(self.device), text_inputs.to(self.device)
211
+ with torch.no_grad():
212
+ image_embs = F.normalize(self.model.get_image_features(**image_inputs))
213
+ text_embs = F.normalize(self.model.get_text_features(**text_inputs))
214
+ scores = text_embs @ image_embs.T # [N, N]
215
+
216
+ scores = scores.squeeze().tolist() # scalar or list
217
+ return reshape_scores(scores, num_sampled_frames)
218
+
219
+ def __repr__(self) -> str:
220
+ return "clip_score"
221
+
222
+
223
+ if __name__ == "__main__":
224
+ from torch.utils.data import DataLoader
225
+ from tqdm import tqdm
226
+ from .video_dataset import VideoDataset, collate_fn
227
+
228
+ aesthetic_score = AestheticScore(device="cuda")
229
+ aesthetic_score_siglip = AestheticScoreSigLIP(device="cuda")
230
+ # clip_score = CLIPScore(device="cuda")
231
+
232
+ paths = ["your_image_path"] * 3
233
+ # texts = ["a joker", "a woman", "a man"]
234
+ images = [Image.open(p).convert("RGB") for p in paths]
235
+
236
+ print(aesthetic_score(images))
237
+ # print(clip_score(images, texts))
238
+
239
+ test_dataset = VideoDataset(
240
+ dataset_inputs={"video_path": ["your_video_path"] * 3},
241
+ sample_method="mid",
242
+ num_sampled_frames=2
243
+ )
244
+ test_loader = DataLoader(test_dataset, batch_size=1, num_workers=1, collate_fn=collate_fn)
245
+
246
+ for idx, batch in enumerate(tqdm(test_loader)):
247
+ batch_frame = batch["sampled_frame"]
248
+ print(aesthetic_score_siglip(batch_frame))
robomaster/video_caption/utils/logger.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Borrowed from sd-webui-controlnet/scripts/logging.py
2
+ import copy
3
+ import logging
4
+ import sys
5
+
6
+
7
+ class ColoredFormatter(logging.Formatter):
8
+ COLORS = {
9
+ "DEBUG": "\033[0;36m", # CYAN
10
+ "INFO": "\033[0;32m", # GREEN
11
+ "WARNING": "\033[0;33m", # YELLOW
12
+ "ERROR": "\033[0;31m", # RED
13
+ "CRITICAL": "\033[0;37;41m", # WHITE ON RED
14
+ "RESET": "\033[0m", # RESET COLOR
15
+ }
16
+
17
+ def format(self, record):
18
+ colored_record = copy.copy(record)
19
+ levelname = colored_record.levelname
20
+ seq = self.COLORS.get(levelname, self.COLORS["RESET"])
21
+ colored_record.levelname = f"{seq}{levelname}{self.COLORS['RESET']}"
22
+ return super().format(colored_record)
23
+
24
+
25
+ # Create a new logger
26
+ logger = logging.getLogger("VideoCaption")
27
+ logger.propagate = False
28
+
29
+ # Add handler if we don't have one.
30
+ if not logger.handlers:
31
+ handler = logging.StreamHandler(sys.stdout)
32
+ handler.setFormatter(ColoredFormatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s"))
33
+ logger.addHandler(handler)
34
+
35
+ # Configure logger
36
+ logger.setLevel("INFO")
robomaster/video_caption/utils/longclip/README.md ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Long-CLIP
2
+ Codes in this directory are borrowed from https://github.com/beichenzbc/Long-CLIP/tree/4e6f5da/model.
3
+
4
+ We only modify the following code in [model_longclip.py](model_longclip.py) from
5
+ ```python
6
+ @property
7
+ def dtype(self):
8
+ return self.visual.conv1.weight.dtype
9
+ ```
10
+ to
11
+ ```python
12
+ @property
13
+ def dtype(self):
14
+ # Fix: the VideoCLIP-XL inference.
15
+ if hasattr(self, "visual"):
16
+ return self.visual.conv1.weight.dtype
17
+ else:
18
+ return self.token_embedding.weight.dtype
19
+ ```
robomaster/video_caption/utils/longclip/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .longclip import *
robomaster/video_caption/utils/longclip/bpe_simple_vocab_16e6.txt.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:924691ac288e54409236115652ad4aa250f48203de50a9e4722a6ecd48d6804a
3
+ size 1356917