# python preprocess.py --data_path data/1.mp4  --inversion_prompt 'cartoon'

# python preprocess.py --data_path data/1.mp4  --inversion_prompt ''

# python preprocess.py --data_path data/1.mp4 --H 1024 --W  576 --n_frames 220 --inversion_prompt ''


# python preprocess.py --data_path data/1_1.mp4 --H 512 --W  288 --n_frames 220 --inversion_prompt ''


# python preprocess.py --data_path data/1_1.mp4 --H 512 --W 288 --sd_version 3 --n_frames 120 --inversion_prompt ''


# python preprocess.py --data_path data/5.mp4 --H 360 --W   640 --n_frames 120 --inversion_prompt ''

# python preprocess.py --data_path data/1.mp4 --H 512 --W 288 --sd_version 3 --n_frames 120 --inversion_prompt ''


# python preprocess.py --data_path data/7.mp4 --H 360 --W   512 --n_frames 120 --inversion_prompt ''

# python preprocess.py --data_path data/9.mp4 --H 640 --W   368 --n_frames 120 --inversion_prompt ''


# python preprocess.py --data_path data/10.mp4 --H 512 --W 288 --sd_version 2.1 --n_frames 120 --inversion_prompt ''


# python preprocess.py --data_path data/7.mp4 --H 360 --W   512 --sd_version 1.5  --n_frames 120 --inversion_prompt ''


# python preprocess.py --data_path data/7.mp4 --H 360 --W   512 --sd_version sdxl_1.0  --n_frames 120 --inversion_prompt ''


# python preprocess.py --data_path data/7_sdxl_1.mp4 --H 360 --W 512 --sd_version sdxl_1.0  --n_frames 120 --inversion_prompt ''


# python preprocess.py --data_path data/12.mp4 --H 360 --W 512 --sd_version 1.5  --n_frames 120 --inversion_prompt ''


# python preprocess.py --data_path data/12.mp4 --H 360 --W 512 --sd_version 2.1  --n_frames 120 --inversion_prompt ''

# python preprocess.py --data_path data/12a.mp4 --H 360 --W 512 --sd_version 2.1  --n_frames 120 --inversion_prompt ''


# python preprocess.py --data_path data/12c.mp4 --H 360 --W 512 --sd_version 2.1  --n_frames 120 --inversion_prompt 'Grassland, clouds, long green grass, gentle breeze'

# python preprocess.py --data_path data/12d.mp4 --H 960 --W 540 --sd_version 1.5  --n_frames 120 --inversion_prompt ''


# python preprocess.py --data_path data/12d.mp4 --H 512 --W 288 --sd_version 1.5  --n_frames 120 --inversion_prompt ''


#  python preprocess.py --data_path data/13.mp4 --H 512 --W 288 --sd_version 2.1  --n_frames 120 --inversion_prompt 'A little Shiba dog'

# python preprocess.py --data_path data/13.mp4 --H 1024 --W 576 --sd_version 2.1  --n_frames 120 --inversion_prompt 'A little Shiba dog'

# python preprocess.py --data_path data/13a.mp4 --H 512 --W 288 --sd_version chinese-style-2.1  --n_frames 120 --inversion_prompt 'A little Shiba dog'


# python preprocess.py --data_path data/13c.mp4 --H 512 --W 288 --sd_version chinese-style-2.1  --n_frames 10 --inversion_prompt ''

# python preprocess.py --data_path data/13e.mp4 --H 512 --W 288 --sd_version chinese-style-2.1  --n_frames 10 --inversion_prompt ''

#  python preprocess.py --data_path data/13e.mp4 --H 512 --W 288 --sd_version 2.1  --n_frames 10 --inversion_prompt ''


#  python preprocess.py --data_path data/13e.mp4 --H 512 --W 288 --sd_version 1.5 --lora_safetensors_path 'lora/1.5/add_detail.safetensors'  --n_frames 10 --inversion_prompt ''



#  python preprocess.py --data_path data/12.mp4 --H 960 --W 540 --sd_version 1.5 --lora_safetensors_path 'lora/1.5/add_detail.safetensors'  --n_frames 10 --inversion_prompt ''


#  python preprocess.py --data_path data/12e.mp4 --H 960 --W 540 --sd_version 1.5 --lora_safetensors_path 'lora/1.5/add_detail.safetensors'  --n_frames 10 --inversion_prompt ''


# python preprocess.py --data_path data/13h.mp4 --H 960 --W 540 --sd_version chinese-style-2.1  --n_frames 10 --inversion_prompt ''


# python preprocess.py --data_path data/13h.mp4 --H 960 --W 540 --sd_version ffusion-2.1-base-alpha  --n_frames 10 --inversion_prompt ''


# if self.sd_version == FFUSION_2_1_BASE_ALPHA:
#     latents_path = os.path.join('latents/sd_2.1/13l/steps_100/nframes_10/latents/noisy_latents_991.pt')
#     noisy_latent = torch.load(latents_path)
#     latent_reconstruction = self.ddim_sample(noisy_latent, cond, batch_size=batch_size, save_path=save_path)
#     self.latents_to_frames_and_save(latent_reconstruction, save_path=save_path, frames_path='frames_2_1', mp4_save_name="inverted_2_1")
#
# if self.sd_version == '2.1':
#     latents_path = os.path.join('latents/sd_ffusion-2.1-base-alpha/13l/steps_100/nframes_10/latents/noisy_latents_361.pt')
#     noisy_latent = torch.load(latents_path)
#     latent_reconstruction = self.ddim_sample(noisy_latent, cond, batch_size=batch_size, save_path=save_path)
#     self.latents_to_frames_and_save(latent_reconstruction, save_path=save_path, frames_path='frames_2_1', mp4_save_name="inverted_2_1")

# if self.sd_version == FFUSION_2_1_BASE_ALPHA:
#     model_2_1 = self.create_2_1_model()
#     text_input = model_2_1.tokenizer(inversion_prompt, padding='max_length', max_length=self.tokenizer.model_max_length,
#                                 truncation=True, return_tensors='pt')
#     text_embeddings = model_2_1.text_encoder(text_input.input_ids.to(device))[0]
#     uncond_input = model_2_1.tokenizer(negative_prompt, padding='max_length',
#                                   max_length=self.tokenizer.model_max_length,
#                                   return_tensors='pt')
#     uncond_embeddings = model_2_1.text_encoder(uncond_input.input_ids.to(device))[0]
#     cond = torch.cat([uncond_embeddings, text_embeddings])[1].unsqueeze(0)


# if i == 0:
#     if self.sd_version == FFUSION_2_1_BASE_ALPHA:
#         model_input = torch.load(
#             "./model_input.pt")  # cond_batch.pt -3.1348e-01, -4.4751e-01, -8.1558e-03,  ...,  2.5439e-01
#         cond_batch = torch.load(
#             "./cond_batch.pt")  # cond_batch      -0.3342, -0.4460, -0.0225,  ...,  0.2649, -0.0413, -0.2842
#     if self.sd_version == '2.1':
#         torch.save(model_input,
#                    "./model_input.pt")  # model_input 2.8516e-01,  1.0117e+00,  8.2275e-01,  ...,  2.4036e-01,
#         torch.save(cond_batch,
#                    "./cond_batch.pt")  # model_input 2.8516e-01,  1.0117e+00,  8.2275e-01,  ...,  2.4036e-01,


