from dataclasses import dataclass
from yldiffusers.models.unet import UNet2DModel
from yldiffusers.models.vae import VQModel
from diffusers import DDPMScheduler, DDIMScheduler
import torch.nn as nn
from torchvision.transforms import Compose
from torch.optim import *
from torch.utils.data import Dataset, DataLoader
@dataclass(repr=True)
class DDPMConfig:
    image_size : int  # the generated image resolution
    nc : int
    train_batch_size : int
    eval_batch_size : int  # how many images to sample during evaluation
    num_epochs : int
    learning_rate : float
    weight_decay : float
    warm_epochs : int
    task_type: str
    model_args : dict
    model : UNet2DModel
    noise_scheduler_args : dict
    num_inference_steps : int
    loss_f : any
    optimizer : Optimizer
    save_image_epochs : int
    save_model_epochs : int
    mixed_precision : str  # `no` for float32, `fp16` for automatic mixed precision
    output_dir : str  # the model namy locally and on the HF Hub
    seed : int
    train_txt : str
    valid_txt : str
    test_txt : str
    train_images_file_list : list
    index2label_file_path : str
    dataset_path : str
    dataset_name : str
    train_dataset : Dataset
    train_dataloader : DataLoader
    workers : int
    transforms : Compose
    max_nums : int
unconditional_flower_ddpm_config_dict = dict(
    image_size = 128,  # the generated image resolution
    nc = 102,
    train_batch_size = 2,
    eval_batch_size = 2,  # how many images to sample during evaluation
    num_epochs = 4,
    learning_rate = 1e-4,
    weight_decay = 0.,
    warm_epochs = 3,
    task_type = 'unconditional_ddpm',
    model_args = dict(
        model_type = UNet2DModel,
        model_config = dict(
            sample_size=128,  # the target image resolution
            in_channels=3,  # the number of input channels, 3 for RGB images
            out_channels=3,  # the number of output channels
            layers_per_block=2,  # how many ResNet layers to use per UNet block
            block_out_channels=(128, 128, 256, 256, 512, 512),  # the number of output channes for each UNet block
            down_block_types=(
                "DownBlock2D",  # a regular ResNet downsampling block
                "DownBlock2D",
                "DownBlock2D",
                "DownBlock2D",
                "AttnDownBlock2D",  # a ResNet downsampling block with spatial self-attention
                "DownBlock2D",
            ),
            up_block_types=(
                "UpBlock2D",  # a regular ResNet upsampling block
                "AttnUpBlock2D",  # a ResNet upsampling block with spatial self-attention
                "UpBlock2D",
                "UpBlock2D",
                "UpBlock2D",
                "UpBlock2D"
            ),
        )),
    model = None,
    noise_scheduler_args = dict(
        noise_scheduler_type = DDPMScheduler,
        noise_scheduler_config = dict(
            num_train_timesteps = 500
        )
    ),
    num_inference_steps = 500,
    loss_f = nn.MSELoss(),
    optimizer = AdamW,
    save_image_epochs = 2,
    save_model_epochs = 2,
    mixed_precision = 'fp16',  # `no` for float32, `fp16` for automatic mixed precision
    output_dir = 'ddpm',  # the model namy locally and on the HF Hub
    seed = 213,
    train_txt='train.txt',
    valid_txt='valid.txt',
    test_txt='test.txt',
    train_images_file_list = None,
    index2label_file_path = '',
    dataset_path = "oxford-102-flowers",
    dataset_name = 'UnconditionalDDPMDataset',
    train_dataset = None,
    train_dataloader = None,
    workers = 0,
    transforms = None,
    max_nums = None
)
conditional_flower_ddpm_config_dict = dict(
    image_size = 64,  # the generated image resolution
    nc = 102,
    train_batch_size = 1,
    eval_batch_size = 1,  # how many images to sample during evaluation
    num_epochs = 4,
    learning_rate = 1e-4,
    weight_decay = 0.,
    warm_epochs = 3,
    task_type = 'conditional_ddpm',
    model_args = dict(
        model_type = UNet2DModel,
        model_config = dict(
            sample_size=64,  # the target image resolution
            in_channels=3,  # the number of input channels, 3 for RGB images
            out_channels=3,  # the number of output channels
            layers_per_block=2,  # how many ResNet layers to use per UNet block
            block_out_channels=(128, 128, 256, 256, 512, 512),  # the number of output channes for each UNet block
            down_block_types=(
                "DownBlock2D",  # a regular ResNet downsampling block
                "DownBlock2D",
                "DownBlock2D",
                "DownBlock2D",
                "AttnDownBlock2D",  # a ResNet downsampling block with spatial self-attention
                "DownBlock2D",
            ),
            up_block_types=(
                "UpBlock2D",  # a regular ResNet upsampling block
                "AttnUpBlock2D",  # a ResNet upsampling block with spatial self-attention
                "UpBlock2D",
                "UpBlock2D",
                "UpBlock2D",
                "UpBlock2D"
            ),
            num_class_embeds = 102
        )),
    model = None,
    noise_scheduler_args = dict(
        noise_scheduler_type = DDPMScheduler,
        noise_scheduler_config = dict(
            num_train_timesteps = 500
        )
    ),
    num_inference_steps = 500,
    loss_f = nn.MSELoss(),
    optimizer = AdamW,
    save_image_epochs = 2,
    save_model_epochs = 2,
    mixed_precision = 'fp16',  # `no` for float32, `fp16` for automatic mixed precision
    output_dir = 'ddpm',  # the model namy locally and on the HF Hub
    seed = 213,
    train_txt = 'train.txt',
    valid_txt = 'valid.txt',
    test_txt = 'test.txt',
    train_images_file_list = None,
    index2label_file_path = 'oxford-102-flowers/index2label.json',
    dataset_path = "oxford-102-flowers",
    dataset_name = 'ConditionalDDPMDataset',
    train_dataset = None,
    train_dataloader = None,
    workers = 0,
    transforms = None,
    max_nums = 16
)
@dataclass(repr=True)
class LDMConfig:
    image_size : int  # the generated image resolution
    nc : int
    train_batch_size : int
    eval_batch_size : int  # how many images to sample during evaluation
    num_epochs : int
    learning_rate : float
    weight_decay : float
    warm_epochs : int
    task_type: str
    model_args : dict
    model : UNet2DModel
    vae_args : dict
    vae : VQModel
    pretrained_vae_path : str
    noise_scheduler_args : dict
    num_inference_steps : int
    loss_f : any
    optimizer : Optimizer
    save_image_epochs : int
    save_model_epochs : int
    mixed_precision : str  # `no` for float32, `fp16` for automatic mixed precision
    output_dir : str  # the model namy locally and on the HF Hub
    seed : int
    train_txt : str
    valid_txt : str
    test_txt : str
    train_images_file_list : list
    index2label_file_path : str
    dataset_path : str
    dataset_name : str
    train_dataset : Dataset
    train_dataloader : DataLoader
    workers : int
    transforms : Compose
    max_nums : int
conditional_flower_ldm_config_dict = dict(
    image_size = 128,  # the generated image resolution
    nc = 102,
    train_batch_size = 1,
    eval_batch_size = 1,  # how many images to sample during evaluation
    num_epochs = 4,
    learning_rate = 1e-4,
    weight_decay = 0.,
    warm_epochs = 3,
    task_type = 'conditional_ldm',
    model_args = dict(
        model_type = UNet2DModel,
        model_config = dict(
            sample_size=32,  # the target image resolution
            in_channels=3,  # the number of input channels, 3 for RGB images
            out_channels=3,  # the number of output channels
            layers_per_block=2,  # how many ResNet layers to use per UNet block
            block_out_channels=(128, 128, 256, 256, 512, 512),  # the number of output channes for each UNet block
            down_block_types=(
                "DownBlock2D",  # a regular ResNet downsampling block
                "DownBlock2D",
                "DownBlock2D",
                "DownBlock2D",
                "AttnDownBlock2D",  # a ResNet downsampling block with spatial self-attention
                "DownBlock2D",
            ),
            up_block_types=(
                "UpBlock2D",  # a regular ResNet upsampling block
                "AttnUpBlock2D",  # a ResNet upsampling block with spatial self-attention
                "UpBlock2D",
                "UpBlock2D",
                "UpBlock2D",
                "UpBlock2D"
            ),
            num_class_embeds = 102
        )),
    model = None,
    vae_args = dict(
            vae_type = VQModel,
            vae_config = dict(
                sample_size = 16,
                in_channels=3,  # the number of input channels, 3 for RGB images
                out_channels=3,  # the number of output channels
                layers_per_block=2,  # how many ResNet layers to use per UNet block
                block_out_channels=(128, 256, 512),  # the number of output channes for each UNet block
                down_block_types=(
                    "DownEncoderBlock2D",  # a regular ResNet downsampling block
                    "DownEncoderBlock2D",
                    "DownEncoderBlock2D",
                ),
                up_block_types=(
                    "UpDecoderBlock2D",  # a regular ResNet upsampling block
                    "UpDecoderBlock2D",  # a ResNet upsampling block with spatial self-attention
                    "UpDecoderBlock2D",
                ),
                num_vq_embeddings = 8192
            )),
    vae = None,
    pretrained_vae_path = 'pretrained_model/oxford-102-flowers_VQModel.pt',
    noise_scheduler_args = dict(
        noise_scheduler_type = DDIMScheduler,
        noise_scheduler_config = dict(
            num_train_timesteps = 500
        )
    ),
    num_inference_steps = 500,
    loss_f = nn.MSELoss(),
    optimizer = AdamW,
    save_image_epochs = 2,
    save_model_epochs = 2,
    mixed_precision = 'fp16',  # `no` for float32, `fp16` for automatic mixed precision
    output_dir = 'ldm',  # the model namy locally and on the HF Hub
    seed = 213,
    train_txt = 'train.txt',
    valid_txt = 'valid.txt',
    test_txt = 'test.txt',
    train_images_file_list = None,
    index2label_file_path = 'oxford-102-flowers/index2label.json',
    dataset_path = "oxford-102-flowers",
    dataset_name = 'ConditionalDDPMDataset',
    train_dataset = None,
    train_dataloader = None,
    workers = 0,
    transforms = None,
    max_nums = None
)
conditional_garbage_ldm_config_dict = dict(
    image_size = 128,  # the generated image resolution
    nc = 40,
    train_batch_size = 1,
    eval_batch_size = 1,  # how many images to sample during evaluation
    num_epochs = 4,
    learning_rate = 1e-4,
    weight_decay = 0.,
    warm_epochs = 3,
    task_type = 'conditional_ldm',
    model_args = dict(
        model_type = UNet2DModel,
        model_config = dict(
            sample_size=32,  # the target image resolution
            in_channels=3,  # the number of input channels, 3 for RGB images
            out_channels=3,  # the number of output channels
            layers_per_block=2,  # how many ResNet layers to use per UNet block
            block_out_channels=(128, 128, 256, 256, 512, 512),  # the number of output channes for each UNet block
            down_block_types=(
                "DownBlock2D",  # a regular ResNet downsampling block
                "DownBlock2D",
                "DownBlock2D",
                "DownBlock2D",
                "AttnDownBlock2D",  # a ResNet downsampling block with spatial self-attention
                "DownBlock2D",
            ),
            up_block_types=(
                "UpBlock2D",  # a regular ResNet upsampling block
                "AttnUpBlock2D",  # a ResNet upsampling block with spatial self-attention
                "UpBlock2D",
                "UpBlock2D",
                "UpBlock2D",
                "UpBlock2D"
            ),
            num_class_embeds = 40
        )),
    model = None,
    vae_args = dict(
            vae_type = VQModel,
            vae_config = dict(
                sample_size = 16,
                in_channels=3,  # the number of input channels, 3 for RGB images
                out_channels=3,  # the number of output channels
                layers_per_block=2,  # how many ResNet layers to use per UNet block
                block_out_channels=(128, 256, 512),  # the number of output channes for each UNet block
                down_block_types=(
                    "DownEncoderBlock2D",  # a regular ResNet downsampling block
                    "DownEncoderBlock2D",
                    "DownEncoderBlock2D",
                ),
                up_block_types=(
                    "UpDecoderBlock2D",  # a regular ResNet upsampling block
                    "UpDecoderBlock2D",  # a ResNet upsampling block with spatial self-attention
                    "UpDecoderBlock2D",
                ),
                num_vq_embeddings = 8192
            )),
    vae = None,
    pretrained_vae_path = 'VQModel.pt',
    noise_scheduler_args = dict(
        noise_scheduler_type = DDIMScheduler,
        noise_scheduler_config = dict(
            num_train_timesteps = 500
        )
    ),
    num_inference_steps = 500,
    loss_f = nn.MSELoss(),
    optimizer = AdamW,
    save_image_epochs = 2,
    save_model_epochs = 2,
    mixed_precision = 'fp16',  # `no` for float32, `fp16` for automatic mixed precision
    output_dir = 'ldm',  # the model namy locally and on the HF Hub
    seed = 213,
    train_txt = 'train.txt',
    valid_txt = 'valid.txt',
    test_txt = 'test.txt',
    train_images_file_list = None,
    index2label_file_path = 'garbage/index2label.json',
    dataset_path = "garbage",
    dataset_name = 'ConditionalDDPMDataset',
    train_dataset = None,
    train_dataloader = None,
    workers = 0,
    transforms = None,
    max_nums = None
)
unconditional_ldm_config_dict = dict(
    image_size = 256,  # the generated image resolution
    nc = 102,
    train_batch_size = 8,
    eval_batch_size = 8,  # how many images to sample during evaluation
    num_epochs = 20,
    learning_rate = 1e-4,
    weight_decay = 0.,
    warm_epochs = 3,
    task_type = 'unconditional_ldm',
    model_args = dict(
        model_type = UNet2DModel,
        model_config = dict(
            sample_size=64,  # the target image resolution
            in_channels=3,  # the number of input channels, 3 for RGB images
            out_channels=3,  # the number of output channels
            layers_per_block=2,  # how many ResNet layers to use per UNet block
            block_out_channels=(128, 128, 256, 256, 512, 512),  # the number of output channes for each UNet block
            down_block_types=(
                "DownBlock2D",  # a regular ResNet downsampling block
                "DownBlock2D",
                "DownBlock2D",
                "DownBlock2D",
                "AttnDownBlock2D",  # a ResNet downsampling block with spatial self-attention
                "DownBlock2D",
            ),
            up_block_types=(
                "UpBlock2D",  # a regular ResNet upsampling block
                "AttnUpBlock2D",  # a ResNet upsampling block with spatial self-attention
                "UpBlock2D",
                "UpBlock2D",
                "UpBlock2D",
                "UpBlock2D"
            ),
        )),
    model = None,
    vae_args = dict(
            vae_type = VQModel,
            vae_config = dict(
                sample_size = 64,
                in_channels=3,  # the number of input channels, 3 for RGB images
                out_channels=3,  # the number of output channels
                layers_per_block=2,  # how many ResNet layers to use per UNet block
                block_out_channels=(128, 256, 512),  # the number of output channes for each UNet block
                down_block_types=(
                    "DownEncoderBlock2D",  # a regular ResNet downsampling block
                    "DownEncoderBlock2D",
                    "DownEncoderBlock2D",
                ),
                up_block_types=(
                    "UpDecoderBlock2D",  # a regular ResNet upsampling block
                    "UpDecoderBlock2D",  # a ResNet upsampling block with spatial self-attention
                    "UpDecoderBlock2D",
                ),
                num_vq_embeddings = 8192
            )),
    vae = None,
    pretrained_vae_path = 'vqvae/loli_dataset_256_unconditional_vqvae_1/VQModel.pt',
    noise_scheduler_args = dict(
        noise_scheduler_type = DDIMScheduler,
        noise_scheduler_config = dict(
            num_train_timesteps = 500
        )
    ),
    num_inference_steps = 500,
    loss_f = nn.MSELoss(),
    optimizer = AdamW,
    save_image_epochs = 2,
    save_model_epochs = 2,
    mixed_precision = 'fp16',  # `no` for float32, `fp16` for automatic mixed precision
    output_dir = 'ldm',  # the model namy locally and on the HF Hub
    seed = 213,
    train_txt = None,
    valid_txt = None,
    test_txt = None,
    train_images_file_list = None,
    index2label_file_path = '',
    dataset_path = "loli_dataset",
    dataset_name = 'UnconditionalDDPMDataset',
    train_dataset = None,
    train_dataloader = None,
    workers = 15,
    transforms = None,
    max_nums = None
)
@dataclass(repr=True)
class VAEConfig:
    image_size : int  # the generated image resolution
    train_batch_size : int
    eval_batch_size : int  # how many images to sample during evaluation
    num_epochs : int
    learning_rate : float
    weight_decay : float
    warm_epochs : int
    task_type : str
    vae_args : dict
    vae : VQModel
    loss_f : any
    optimizer : Optimizer
    save_image_epochs : int
    save_model_epochs : int
    mixed_precision : str  # `no` for float32, `fp16` for automatic mixed precision
    output_dir : str  # the model namy locally and on the HF Hub
    seed : int
    train_txt : str
    valid_txt : str
    test_txt : str
    train_images_file_list : list
    test_images_file_list : list
    index2label_file_path : str
    dataset_path : str
    dataset_name : str
    train_dataset : Dataset
    train_dataloader : DataLoader
    workers : int
    test_dataset : Dataset
    transforms : Compose
    max_nums : int
unconditional_flower_vqvae_config_dict = dict(
    image_size = 128,  # the generated image resolution
    train_batch_size = 2,
    eval_batch_size = 2,  # how many images to sample during evaluation
    num_epochs = 4,
    learning_rate = 1e-3,
    weight_decay = 0.,
    warm_epochs = 3,
    task_type = 'unconditional_vqvae',
    vae_args = dict(
            vae_type = VQModel,
            vae_config = dict(
                sample_size = 16,
                in_channels=3,  # the number of input channels, 3 for RGB images
                out_channels=3,  # the number of output channels
                layers_per_block=2,  # how many ResNet layers to use per UNet block
                block_out_channels=(128, 256, 512),  # the number of output channes for each UNet block
                down_block_types=(
                    "DownEncoderBlock2D",  # a regular ResNet downsampling block
                    "DownEncoderBlock2D",
                    "DownEncoderBlock2D",
                ),
                up_block_types=(
                    "UpDecoderBlock2D",  # a regular ResNet upsampling block
                    "UpDecoderBlock2D",  # a ResNet upsampling block with spatial self-attention
                    "UpDecoderBlock2D",
                ),
                num_vq_embeddings = 8192
            )),
    vae = None,
    loss_f = nn.MSELoss(),
    optimizer = AdamW,
    save_image_epochs = 2,
    save_model_epochs = 2,
    mixed_precision = 'fp16',  # `no` for float32, `fp16` for automatic mixed precision
    output_dir = 'vqvae',  # the model namy locally and on the HF Hub
    seed = 213,
    train_txt='train.txt',
    valid_txt='valid.txt',
    test_txt='test.txt',
    train_images_file_list = None,
    test_images_file_list = None,
    index2label_file_path = '',
    dataset_path = "oxford-102-flowers",
    dataset_name = 'UnconditionalDDPMDataset',
    train_dataset = None,
    train_dataloader = None,
    workers = 0,
    test_dataset = None,
    transforms = None,
    max_nums = None
)

unconditional_vqvae_config_dict = dict(
    image_size = 128,  # the generated image resolution
    train_batch_size = 2,
    eval_batch_size = 2,  # how many images to sample during evaluation
    num_epochs = 20,
    learning_rate = 1e-3,
    weight_decay = 0.,
    warm_epochs = 3,
    task_type = 'unconditional_vqvae',
    vae_args = dict(
            vae_type = VQModel,
            vae_config = dict(
                sample_size = 16,
                in_channels=3,  # the number of input channels, 3 for RGB images
                out_channels=3,  # the number of output channels
                layers_per_block=2,  # how many ResNet layers to use per UNet block
                block_out_channels=(128, 256, 512),  # the number of output channes for each UNet block
                down_block_types=(
                    "DownEncoderBlock2D",  # a regular ResNet downsampling block
                    "DownEncoderBlock2D",
                    "DownEncoderBlock2D",
                ),
                up_block_types=(
                    "UpDecoderBlock2D",  # a regular ResNet upsampling block
                    "UpDecoderBlock2D",  # a ResNet upsampling block with spatial self-attention
                    "UpDecoderBlock2D",
                ),
                num_vq_embeddings = 8192
            )),
    vae = None,
    loss_f = nn.MSELoss(),
    optimizer = AdamW,
    save_image_epochs = 2,
    save_model_epochs = 2,
    mixed_precision = 'fp16',  # `no` for float32, `fp16` for automatic mixed precision
    output_dir = 'vqvae',  # the model namy locally and on the HF Hub
    seed = 213,
    train_txt=None,
    valid_txt=None,
    test_txt=None,
    train_images_file_list = None,
    test_images_file_list = None,
    index2label_file_path = '',
    dataset_path = "loli",
    dataset_name = 'UnconditionalDDPMDataset',
    train_dataset = None,
    train_dataloader = None,
    workers = 0,
    test_dataset = None,
    transforms = None,
    max_nums = None
)

unconditional_garbage_vqvae_config_dict = dict(
    image_size = 128,  # the generated image resolution
    train_batch_size = 2,
    eval_batch_size = 2,  # how many images to sample during evaluation
    num_epochs = 4,
    learning_rate = 1e-3,
    weight_decay = 0.,
    warm_epochs = 3,
    task_type = 'unconditional_vqvae',
    vae_args = dict(
            vae_type = VQModel,
            vae_config = dict(
                sample_size = 16,
                in_channels=3,  # the number of input channels, 3 for RGB images
                out_channels=3,  # the number of output channels
                layers_per_block=2,  # how many ResNet layers to use per UNet block
                block_out_channels=(128, 256, 512),  # the number of output channes for each UNet block
                down_block_types=(
                    "DownEncoderBlock2D",  # a regular ResNet downsampling block
                    "DownEncoderBlock2D",
                    "DownEncoderBlock2D",
                ),
                up_block_types=(
                    "UpDecoderBlock2D",  # a regular ResNet upsampling block
                    "UpDecoderBlock2D",  # a ResNet upsampling block with spatial self-attention
                    "UpDecoderBlock2D",
                ),
                num_vq_embeddings = 8192
            )),
    vae = None,
    loss_f = nn.MSELoss(),
    optimizer = AdamW,
    save_image_epochs = 2,
    save_model_epochs = 2,
    mixed_precision = 'fp16',  # `no` for float32, `fp16` for automatic mixed precision
    output_dir = 'vqvae',  # the model namy locally and on the HF Hub
    seed = 213,
    train_txt='train.txt',
    valid_txt='valid.txt',
    test_txt='test.txt',
    train_images_file_list = None,
    test_images_file_list = None,
    index2label_file_path = '',
    dataset_path = "garbage",
    dataset_name = 'UnconditionalDDPMDataset',
    train_dataset = None,
    train_dataloader = None,
    workers = 0,
    test_dataset = None,
    transforms = None,
    max_nums = None
)



if __name__ == '__main__':
    # config = get_ddpmconfig(conditional_flower_ddpm_config_dict)
    # print(config.model)
    from diffusers import VQModel