import math
import os
import pickle
import numpy as np
import pandas as pd
import torch
from copy import deepcopy
from matplotlib import pyplot as plt
from tqdm.auto import tqdm
from diffusers import get_cosine_schedule_with_warmup
from accelerate import Accelerator
from torch.optim import *
from PIL import Image
from yldiffusers.utils import LOGGER, numpy_to_pil, colorstr, time_sync
from yldiffusers.pipeline import DDPMPipeline, LDMPipeline
class DDPM:
    def __init__(self, config=None):
        if config:
            self.init_base(config)
            self.init_dataloader(config)
            self.init_model(config)
    def init_base(self, config):
        self.config = config
        self.condition = True if self.config.task_type.split('_')[0] == 'conditional' else False
        self.logger = dict(
            train_noise_loss=0, train_total_num = 0, train_noise_loss_list = []
        )
    def init_model(self, config):
        self.model = deepcopy(config.model)
        self.get_optimizer(config)
        self.loss_f = config.loss_f
        lr_warmup_steps = len(self.train_dataloader) * config.warm_epochs
        self.noise_scheduler = config.noise_scheduler
        self.lr_scheduler = get_cosine_schedule_with_warmup(
            optimizer=self.optimizer,
            num_warmup_steps=lr_warmup_steps,
            num_training_steps=len(self.train_dataloader) * config.num_epochs)
        self.accelerator = Accelerator(mixed_precision=config.mixed_precision)
        self.device = self.accelerator.device
        self.model = self.model.to(self.device)
        self.model, self.optimizer, self.train_dataloader, self.lr_scheduler = self.accelerator.prepare(
            self.model,
            self.optimizer,
            self.train_dataloader,
            self.lr_scheduler)
    def init_dataloader(self, config):
        self.train_dataloader = config.train_dataloader
    def get_optimizer(self, config):
        if isinstance(config.optimizer, SGD):
            self.optimizer = config.optimizer(self.model.parameters(), lr=config.learning_rate, momentum=config.momentum,weight_decay=config.weight_decay)
        else:
            self.optimizer = config.optimizer(self.model.parameters(), lr=config.learning_rate, weight_decay=config.weight_decay)
    def train(self):
        if not os.path.exists(self.config.output_dir):
            os.makedirs(self.config.output_dir)
        prefix = colorstr('train: ')
        self.model.train()
        LOGGER.info("🚀yl-diffusion-DDPM training starts!")
        global_step = 0
        t1 = time_sync()
        for epoch in range(self.config.num_epochs):
            self.logger['train_noise_loss'] = 0
            self.logger['train_total_num'] = 0
            with tqdm(total=len(self.train_dataloader), desc=f'train : Epoch [{epoch + 1}/{self.config.num_epochs}]',
                      postfix=dict, mininterval=0.3) as pbar:
                if self.condition:
                    for step, (clean_images, labels) in enumerate(self.train_dataloader):
                        # Sample noise to add to the images
                        noise = torch.randn(clean_images.shape).to(clean_images.device)
                        bs = clean_images.shape[0]

                        # Sample a random timestep for each image
                        timesteps = torch.randint(0, self.noise_scheduler.num_train_timesteps, (bs,),
                                                  device=clean_images.device).long()

                        # Add noise to the clean images according to the noise magnitude at each timestep
                        # (this is the forward diffusion process)
                        noisy_images = self.noise_scheduler.add_noise(clean_images, noise, timesteps)
                        noise_pred = self.model(noisy_images, timesteps, class_labels=labels, return_dict=False)[0]
                        loss = self.loss_f(noise_pred, noise)
                        self.accelerator.backward(loss)
                        self.logger['train_noise_loss'] += loss.detach().item()
                        self.logger['train_total_num'] += clean_images.shape[0]
                        self.accelerator.clip_grad_norm_(self.model.parameters(), 1.0)
                        self.optimizer.step()
                        self.lr_scheduler.step()
                        self.optimizer.zero_grad()
                        pbar.update(1)
                        mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G'  # (GB)
                        logs = {'mem' : mem,
                                "loss": loss.detach().item(),
                                "lr": self.lr_scheduler.get_last_lr()[0],
                                "step": global_step}
                        pbar.set_postfix(**logs)
                        global_step += 1
                else:
                    for step, clean_images in enumerate(self.train_dataloader):
                        # Sample noise to add to the images
                        noise = torch.randn(clean_images.shape).to(clean_images.device)
                        bs = clean_images.shape[0]

                        # Sample a random timestep for each image
                        timesteps = torch.randint(0, self.noise_scheduler.num_train_timesteps, (bs,),
                                                  device=clean_images.device).long()

                        # Add noise to the clean images according to the noise magnitude at each timestep
                        # (this is the forward diffusion process)
                        noisy_images = self.noise_scheduler.add_noise(clean_images, noise, timesteps)

                        # Predict the noise residual

                        noise_pred = self.model(noisy_images, timesteps, return_dict=False)[0]
                        loss = self.loss_f(noise_pred, noise)
                        self.accelerator.backward(loss)
                        self.logger['train_noise_loss'] += loss.detach().item()
                        self.logger['train_total_num'] += clean_images.shape[0]
                        self.accelerator.clip_grad_norm_(self.model.parameters(), 1.0)
                        self.optimizer.step()
                        self.lr_scheduler.step()
                        self.optimizer.zero_grad()
                        pbar.update(1)
                        mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G'  # (GB)
                        logs = {'mem' : mem,
                                "loss": loss.detach().item(),
                                "lr": self.lr_scheduler.get_last_lr()[0],
                                "step": global_step}
                        pbar.set_postfix(**logs)
                        global_step += 1
                self.pipeline = DDPMPipeline(unet=self.accelerator.unwrap_model(self.model), scheduler=self.noise_scheduler)

                if (epoch + 1) % self.config.save_image_epochs == 0 or epoch == self.config.num_epochs - 1:
                    self.evaluate(self.config, epoch, self.pipeline)
                if (epoch + 1) % self.config.save_model_epochs == 0 or epoch == self.config.num_epochs - 1:
                    # self.ddpmpipeline.save_pretrained(self.config.output_dir)
                    self.save_pipeline()
                    LOGGER.info('🚀yl-diffusion-DDPM best model has saved!')
                train_mean_loss = round(self.logger['train_noise_loss'] / self.logger['train_total_num'], 4)
                self.logger['train_noise_loss_list'].append(train_mean_loss)
                LOGGER.info(f'{prefix}Epoch [{epoch + 1}/{self.config.num_epochs}] total_mean_loss:{train_mean_loss}')
        self.save_config()
        self.save_loss_csv()
        self.save_loss_curve()
        t2 = time_sync()
        LOGGER.info(f"🚀yl-diffusion-DDPM training ends! total time:{(t2 - t1) / 3600:.3f} hours!")
        torch.cuda.empty_cache()
    def make_grid(self, images, rows, cols):
        w, h = images[0].size
        grid = Image.new('RGB', size=(cols * w, rows * h))
        for i, image in enumerate(images):
            grid.paste(image, box=(i % cols * w, i // cols * h))
        return grid
    def evaluate(self, config, epoch, pipeline):
        # Sample some images from random noise (this is the backward diffusion process).
        # The default pipeline output type is `List[PIL.Image]`
        if self.condition:
            images = pipeline(
                batch_size=config.eval_batch_size,
                generator=torch.manual_seed(config.seed),
                num_class = self.config.nc,
                num_inference_steps = self.config.num_inference_steps
            ).images
        else:
            images = pipeline(
                batch_size=config.eval_batch_size,
                generator=torch.manual_seed(config.seed),
                num_inference_steps=self.config.num_inference_steps
            ).images

        bn = len(images)
        rows = int(math.sqrt(bn))
        while(bn % rows != 0):
            rows -= 1
        cols = bn // rows
        # Make a grid out of the images
        image_grid = self.make_grid(images, rows=rows, cols=cols)

        # Save the images
        test_dir = os.path.join(config.output_dir, "samples")
        os.makedirs(test_dir, exist_ok=True)
        image_grid.save(f"{test_dir}/{epoch:04d}.png")
    def save_loss_csv(self):
        loss_df = pd.DataFrame({'train_noise_loss' : self.logger['train_noise_loss_list']})
        loss_df.to_csv(os.path.join(self.config.output_dir,'loss.csv'), index=False)
    def save_loss_curve(self):
        # 在 1*1 的画布 fig 上创建图纸 ax
        plt.figure(figsize=(6, 6))
        plt.title('loss_curve', fontsize=15, fontweight='bold')
        # 展示网格线
        plt.grid()
        # x轴标签
        plt.xlabel('epochs', fontsize=15, fontweight='bold')
        # y轴标签
        plt.ylabel('loss', fontsize=15, fontweight='bold')
        # 绘制
        x = np.arange(0, self.config.num_epochs)
        plt.plot(x, self.logger['train_noise_loss_list'], color='blue', label='train loss')
        plt.legend(loc='upper right')  # 设置图表图例在右上角
        plt.savefig(os.path.join(self.config.output_dir, 'loss_curve.png'), bbox_inches='tight', dpi=300)
        plt.show()
    def save_pipeline(self):
        with open(os.path.join(self.config.output_dir, f'{self.config.task_type}_pipeline.pickle'), "wb") as f:
            pickle.dump(self.pipeline, f)
    def save_config(self):
        self.config.train_dataset = None
        self.config.train_dataloader = None
        self.config.model = None
        with open(os.path.join(self.config.output_dir, f'{self.config.task_type}_config.pickle'), "wb") as f:
            pickle.dump(self.config, f)
    def load(self, pipeline_pickle_path, config_pickle_path):
        with open(config_pickle_path, 'rb') as f:
            self.config = pickle.load(f)
        with open(pipeline_pickle_path, 'rb') as f:
            self.pipeline = pickle.load(f)
        self.init_base(self.config)
    def generate(self, label_index = None, batch_size = 1, is_show = True):
        self.label_index = label_index
        if label_index is not None:
            label_index = torch.tensor([label_index])
        LOGGER.info(f'🚀yl-diffusion generation starts!')
        if self.condition:
            t1 = time_sync()
            self.images = self.pipeline(batch_size = batch_size, num_class = self.config.nc, label_index = label_index, num_inference_steps = self.config.num_inference_steps).images
            t = time_sync() - t1
        else:
            t1 = time_sync()
            self.images = self.pipeline(batch_size = batch_size, num_inference_steps = self.config.num_inference_steps).images
            t = time_sync() - t1
        bn = len(self.images)
        rows = int(math.sqrt(bn))
        while (bn % rows != 0):
            rows -= 1
        cols = bn // rows
        # Make a grid out of the images
        image_grid = self.make_grid(self.images, rows=rows, cols=cols)
        if is_show:
            # show image
            plt.imshow(image_grid)
            plt.axis("off")
            plt.show()
        t = t * 1E3
        LOGGER.info(f'Speed %.1fms generate one image' %t)
    def save_image(self, image_type='jpg'):
        bs = len(self.images)
        if self.condition:
            images_label = self.config.index2label[self.label_index]
            image_save_dir_path = os.path.join(self.config.output_dir,
                                               'generate_images',
                                               images_label)

        else:
            image_save_dir_path = os.path.join(self.config.output_dir,
                                               'generate_images')
        if not os.path.exists(image_save_dir_path):
            os.makedirs(image_save_dir_path)

        file_list = os.listdir(image_save_dir_path)
        if file_list:
            image_index_list =  [int(file_name.split('.')[0].split('_')[-1]) for file_name in file_list]
            image_index = max(image_index_list) + 1
        else:
            image_index = 0
        if self.condition:
            image_file_name_list =[os.path.join(image_save_dir_path, f'{images_label}_{image_index + i}.{image_type}')for i in range(bs)]
        else:
            image_file_name_list = [os.path.join(image_save_dir_path, f'generation_{image_index + i}.{image_type}') for i in
                                    range(bs)]
        for i in range(bs):
            self.images[i].save(image_file_name_list[i])
        LOGGER.info(f'🚀yl-diffusion generation images has been saved!')

class LDM(DDPM):
    def __init__(self, config=None):
        super(LDM, self).__init__()
        if config:
            self.init_base(config)
            self.init_dataloader(config)
            self.init_model(config)
    def init_model(self, config):
        self.model = deepcopy(config.model)
        self.vae = deepcopy(config.vae)
        self.get_optimizer(config)
        lr_warmup_steps = len(self.train_dataloader) * config.warm_epochs
        self.noise_scheduler = config.noise_scheduler
        self.loss_f = config.loss_f
        self.lr_scheduler = get_cosine_schedule_with_warmup(
            optimizer=self.optimizer,
            num_warmup_steps=lr_warmup_steps,
            num_training_steps=len(self.train_dataloader) * config.num_epochs)
        self.accelerator = Accelerator(mixed_precision=config.mixed_precision)
        self.device = self.accelerator.device
        self.model = self.model.to(self.device)
        self.vae = self.vae.to(self.device)
        self.model, self.vae, self.optimizer, self.train_dataloader, self.lr_scheduler = self.accelerator.prepare(
            self.model,
            self.vae,
            self.optimizer,
            self.train_dataloader,
            self.lr_scheduler)
    def train(self):
        if not os.path.exists(self.config.output_dir):
            os.makedirs(self.config.output_dir)
        prefix = colorstr('train: ')
        self.model.train()
        LOGGER.info("🚀yl-diffusion-LDM training starts!")
        global_step = 0
        t1 = time_sync()
        # Now you train the model
        for epoch in range(self.config.num_epochs):
            self.logger['train_noise_loss'] = 0
            self.logger['train_total_num'] = 0
            with tqdm(total=len(self.train_dataloader), desc=f'train : Epoch [{epoch + 1}/{self.config.num_epochs}]',
                      postfix=dict, mininterval=0.3) as pbar:
                if self.condition:
                    for step, (clean_images, labels) in enumerate(self.train_dataloader):
                        bs = clean_images.shape[0]
                        # Sample a random timestep for each image
                        timesteps = torch.randint(0, self.noise_scheduler.num_train_timesteps, (bs,),
                                                  device=clean_images.device).long()
                        latent_z = self.vae.encode(clean_images).latents
                        # Sample noise to add to the latent z
                        noise = torch.randn(latent_z.shape).to(latent_z.device)
                        noisy_latent_zt = self.noise_scheduler.add_noise(latent_z, noise, timesteps)
                        noise_pred = self.model(noisy_latent_zt, timesteps, class_labels=labels).sample
                        noise_loss = self.loss_f(noise_pred, noise)
                        self.accelerator.backward(noise_loss)
                        self.logger['train_noise_loss'] += noise_loss.detach().item()
                        self.logger['train_total_num'] += clean_images.shape[0]
                        self.accelerator.clip_grad_norm_(self.model.parameters(), 1.0)
                        self.optimizer.step()
                        self.lr_scheduler.step()
                        self.optimizer.zero_grad()
                        mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G'  # (GB)
                        pbar.update(1)
                        logs = {"mem" : mem,
                                "noise_loss": noise_loss.detach().item(),
                                "lr": self.lr_scheduler.get_last_lr()[0],
                                "step": global_step}
                        pbar.set_postfix(**logs)
                        global_step += 1
                else:
                    for step, clean_images in enumerate(self.train_dataloader):
                        bs = clean_images.shape[0]
                        # Sample a random timestep for each image
                        timesteps = torch.randint(0, self.noise_scheduler.num_train_timesteps, (bs,),
                                                  device=clean_images.device).long()
                        latent_z = self.vae.encode(clean_images).latents
                        # Sample noise to add to the latent z
                        noise = torch.randn(latent_z.shape).to(latent_z.device)
                        noisy_latent_zt = self.noise_scheduler.add_noise(latent_z, noise, timesteps)
                        noise_pred = self.model(noisy_latent_zt, timesteps).sample
                        noise_loss = self.loss_f(noise_pred, noise)
                        self.accelerator.backward(noise_loss)
                        self.logger['train_noise_loss'] += noise_loss.detach().item()
                        self.logger['train_total_num'] += clean_images.shape[0]
                        self.accelerator.clip_grad_norm_(self.model.parameters(), 1.0)
                        self.optimizer.step()
                        self.lr_scheduler.step()
                        self.optimizer.zero_grad()
                        pbar.update(1)
                        mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G'  # (GB)
                        logs = {"mem" : mem,
                                "noise_loss": noise_loss.detach().item(),
                                "lr": self.lr_scheduler.get_last_lr()[0],
                                "step": global_step}
                        pbar.set_postfix(**logs)
                        global_step += 1
                self.pipeline = LDMPipeline(unet=self.accelerator.unwrap_model(self.model),
                                               vqvae=self.accelerator.unwrap_model(self.vae),
                                               scheduler=self.noise_scheduler)
                if (epoch + 1) % self.config.save_image_epochs == 0 or epoch == self.config.num_epochs - 1:
                    self.evaluate(self.config, epoch, self.pipeline)
                if (epoch + 1) % self.config.save_model_epochs == 0 or epoch == self.config.num_epochs - 1:
                    # self.ddpmpipeline.save_pretrained(self.config.output_dir)
                    self.save_pipeline()
                    LOGGER.info('🚀yl-diffusion-LDM best model has saved!')
                train_mean_noise_loss = round(self.logger['train_noise_loss'] / self.logger['train_total_num'], 4)
                self.logger['train_noise_loss_list'].append(train_mean_noise_loss)
                LOGGER.info(f'{prefix}Epoch [{epoch + 1}/{self.config.num_epochs}] mean_noise_loss:{train_mean_noise_loss}')
        self.save_config()
        self.save_loss_csv()
        self.save_loss_curve()
        t2 = time_sync()
        LOGGER.info(f"🚀yl-diffusion-LDM training ends! total time:{(t2 - t1) / 3600:.3f} hours!")
        torch.cuda.empty_cache()
    def save_config(self):
        self.config.train_dataset = None
        self.config.train_dataloader = None
        self.config.model = None
        self.config.vae = None
        with open(os.path.join(self.config.output_dir, f'{self.config.task_type}_config.pickle'), "wb") as f:
            pickle.dump(self.config, f)
class VQVAE(DDPM):
    def __init__(self, config=None):
        super(VQVAE, self).__init__()
        if config:
            self.init_base(config)
            self.init_dataloader(config)
            self.init_model(config)
    def init_base(self, config):
        self.config = config
        self.condition = True if self.config.task_type.split('_')[0] == 'conditional' else False
        self.logger = dict(
            train_loss = 1e9, train_vqvae_loss = 0, train_decode_loss = 0,
            train_total_num = 0, train_vqvae_loss_list = [], train_decode_loss_list = []
        )
    def init_model(self, config):
        self.vae = deepcopy(config.vae)
        self.get_optimizer(config)
        lr_warmup_steps = len(self.train_dataloader) * config.warm_epochs
        self.loss_f = config.loss_f
        self.lr_scheduler = get_cosine_schedule_with_warmup(
            optimizer=self.optimizer,
            num_warmup_steps=lr_warmup_steps,
            num_training_steps=len(self.train_dataloader) * config.num_epochs)
        self.accelerator = Accelerator(mixed_precision=config.mixed_precision)
        self.device = self.accelerator.device
        self.vae = self.vae.to(self.device)
        self.vae, self.optimizer, self.train_dataloader, self.lr_scheduler = self.accelerator.prepare(
            self.vae,
            self.optimizer,
            self.train_dataloader,
            self.lr_scheduler)
    def get_optimizer(self, config):
        if isinstance(config.optimizer, SGD):
            self.optimizer = config.optimizer(self.vae.parameters(), lr=config.learning_rate,
                                              momentum=config.momentum, weight_decay=config.weight_decay)
        else:
            self.optimizer = config.optimizer(self.vae.parameters(), lr=config.learning_rate,
                                              weight_decay=config.weight_decay)
    def train(self):
        if not os.path.exists(self.config.output_dir):
            os.makedirs(self.config.output_dir)
        prefix = colorstr('train: ')
        self.vae.train()
        LOGGER.info("🚀yl-diffusion-VQVAE training starts!")
        global_step = 0
        t1 = time_sync()
        # Now you train the model
        for epoch in range(self.config.num_epochs):
            self.logger['train_vqvae_loss'] = 0
            self.logger['train_decode_loss'] = 0
            self.logger['train_total_num'] = 0
            with tqdm(total=len(self.train_dataloader), desc=f'train Epoch [{epoch + 1}/{self.config.num_epochs}]',
                      postfix=dict, mininterval=0.3) as pbar:
                for step, clean_images in enumerate(self.train_dataloader):
                    latent_z = self.vae.encode(clean_images).latents
                    vae_decode_output = self.vae.decode(latent_z)
                    image_prev, vqvae_loss = vae_decode_output.sample, vae_decode_output.loss
                    decode_loss = self.loss_f(clean_images, image_prev)
                    total_loss =  vqvae_loss + decode_loss
                    self.accelerator.backward(total_loss)
                    self.logger['train_vqvae_loss'] += vqvae_loss.detach().item()
                    self.logger['train_decode_loss'] += decode_loss.detach().item()
                    self.logger['train_total_num'] += clean_images.shape[0]
                    self.accelerator.clip_grad_norm_(self.vae.parameters(), 1.0)
                    self.optimizer.step()
                    self.lr_scheduler.step()
                    self.optimizer.zero_grad()
                    pbar.update(1)
                    mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G'  # (GB)
                    logs = {"mem" : mem,
                            "vqvae_loss": vqvae_loss.detach().item(),
                            "decode_loss": decode_loss.detach().item(),
                            "lr": self.lr_scheduler.get_last_lr()[0],
                            "step": global_step}
                    pbar.set_postfix(**logs)
                    global_step += 1
                if (epoch + 1) % self.config.save_image_epochs == 0 or epoch == self.config.num_epochs - 1:
                    self.eval(epoch)
                # if (epoch + 1) % self.config.save_model_epochs == 0 or epoch == self.config.num_epochs - 1:
                    # self.ddpmpipeline.save_pretrained(self.config.output_dir)
                train_mean_vqvae_loss = round(self.logger['train_vqvae_loss'] / self.logger['train_total_num'], 4)
                train_mean_decode_loss = round(self.logger['train_decode_loss'] / self.logger['train_total_num'], 4)
                train_mean_total_loss = train_mean_vqvae_loss + train_mean_decode_loss
                if train_mean_total_loss < self.logger['train_loss']:
                    torch.save(self.accelerator.unwrap_model(self.vae).state_dict(), os.path.join(self.config.output_dir, f'{self.vae.__class__.__name__}.pt'))
                    LOGGER.info('🚀yl-diffusion-VQVAE best model has saved!')
                self.logger['train_vqvae_loss_list'].append(train_mean_vqvae_loss)
                self.logger['train_decode_loss_list'].append(train_mean_decode_loss)
                LOGGER.info(f'{prefix}Epoch [{epoch + 1}/{self.config.num_epochs}]  mean_vqvae_loss:{train_mean_vqvae_loss} mean_decode_loss:{train_mean_decode_loss}')
        self.save_config()
        self.save_loss_csv()
        self.save_loss_curve()
        t2 = time_sync()
        LOGGER.info(f"🚀yl-diffusion-VQVAE training ends! total time:{(t2 - t1) / 3600:.3f} hours!")
        torch.cuda.empty_cache()
    def save_loss_csv(self):
        loss_df = pd.DataFrame({'train_vqvae_loss' : self.logger['train_vqvae_loss_list'],
                                'train_decode_loss' : self.logger['train_decode_loss_list']})
        loss_df.to_csv(os.path.join(self.config.output_dir,'loss.csv'), index=False)
    def save_loss_curve(self):
        plt.figure(figsize=(6, 6))
        plt.title('loss_curve', fontsize=15, fontweight='bold')
        # 展示网格线
        plt.grid()
        # x轴标签
        plt.xlabel('epochs', fontsize=15, fontweight='bold')
        # y轴标签
        plt.ylabel('loss', fontsize=15, fontweight='bold')
        # 绘制
        x = np.arange(0, self.config.num_epochs)
        plt.plot(x, self.logger['train_vqvae_loss_list'], color='green', label='train vqvae loss')
        plt.plot(x, self.logger['train_decode_loss_list'], color='yellow', label='train decode loss')
        plt.legend(loc='upper right')  # 设置图表图例在右上角
        plt.savefig(os.path.join(self.config.output_dir, 'loss_curve.png'), bbox_inches='tight', dpi=300)
        plt.show()
    def eval(self,epoch = None):
        if epoch == None:
            epoch = self.config.num_epochs
        test_dataset_len = len(self.config.test_dataset)
        bn = self.config.eval_batch_size
        rows = int(math.sqrt(bn))
        while (bn % rows != 0):
            rows -= 1
        cols = bn // rows
        sample_index = np.random.randint(0, test_dataset_len, bn)
        images_list = []
        for i in sample_index:
            images_list.append(self.config.test_dataset[i])

        images = torch.stack(images_list, dim=0).to(self.device)
        reconstruction_images = self.vae(images).sample
        images = (images / 2 + 0.5).clamp(0, 1)
        images = images.cpu().permute(0, 2, 3, 1).detach().numpy()
        images = numpy_to_pil(images)
        reconstruction_images = (reconstruction_images / 2 + 0.5).clamp(0, 1)
        reconstruction_images = reconstruction_images.cpu().permute(0, 2, 3, 1).detach().numpy()
        reconstruction_images = numpy_to_pil(reconstruction_images)
        # Make a grid out of the images
        sample_images_grid = self.make_grid(images, rows=rows, cols=cols)
        reconstruction_images_grid = self.make_grid(reconstruction_images, rows=rows, cols=cols)
        # Save the images
        test_dir = os.path.join(self.config.output_dir, "samples")
        os.makedirs(test_dir, exist_ok=True)
        sample_images_grid.save(f"{test_dir}/sample_images_grid_{epoch:04d}.png")
        reconstruction_images_grid.save(f"{test_dir}/reconstruction_images_grid_{epoch:04d}.png")
        LOGGER.info(f'🚀yl-diffusion-VQVAE {epoch+1} epoch eval has done!')
    def save_config(self):
        self.config.train_dataset = None
        self.config.train_dataloader = None
        with open(os.path.join(self.config.output_dir, f'{self.config.task_type}_config.pickle'), "wb") as f:
            pickle.dump(self.config, f)
    def load(self, checkpoint_path, config_pickle_path):
        ckpt = torch.load(checkpoint_path)
        with open(config_pickle_path, 'rb') as f:
            config = pickle.load(f)
        config.vae.load_state_dict(ckpt)
        self.init_base(config)
        self.init_dataloader(config)
        self.init_model(config)