import os
import json
import random

from torch.utils.data import DataLoader
from diffusers import DDPMScheduler, DDIMScheduler
import torch.nn as nn
from torchvision.transforms import Compose
from torch.optim import *
from yldiffusers.utils import *
from yldiffusers.dataset import UnconditionalDDPMDataset, ConditionalDDPMDataset
from yldiffusers.models.unet import UNet2DModel
from yldiffusers.models.vae import VQModel

PIN_MEMORY = str(os.getenv('PIN_MEMORY', True)).lower() == 'true'  # global pin_memory for dataloaders
def get_save_path(config):
    output_dir = config.output_dir
    task_type = f'{config.dataset_path}_{config.image_size}_{config.task_type}'
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    dir_list = os.listdir(output_dir)
    if dir_list:
        exp_list = [int(dir.split('_')[-1]) for dir in dir_list if task_type in dir]
        if exp_list:
            cur = max(exp_list) + 1
        else:
            cur = 0
    else:
        cur = 0
    save_dir_final = os.path.join(output_dir, f'{task_type}_{cur}')
    config.output_dir = save_dir_final
def get_data_list(data_path, data_list_file_path):
    labels, images = [], []
    file_path = os.path.join(data_path, data_list_file_path)
    with open(file_path, "r", encoding="utf-8") as f:
        lines = f.readlines()
        for line in lines:
            line = line.strip().split()
            images.append(os.path.join(data_path, line[0]))
            labels.append(int(line[1]))
    return images, labels
def get_images_file_list(data_path):
    images_file_list = os.listdir(data_path)
    images_file_list = [os.path.join(data_path, image_file_list) for image_file_list in images_file_list]
    return images_file_list
def get_index2label(config):
    with open(config.index2label_file_path, "r", encoding="utf-8") as f:
        index2index = json.load(f)
        label2index = {value: int(key) for key, value in index2index.items()}
        index2label = {value: key for key, value in label2index.items()}
    config.index2label = index2label
    config.label2index = label2index
def get_data(config):
    same_seeds(config.seed)  # 设置随机种子使实验结果可复现
    generator = torch.Generator()
    generator.manual_seed(6148914691236517205 + config.seed)
    type = config.task_type.split('_')[-1]
    if config.train_txt == None and config.valid_txt == None and config.test_txt == None:
        images_file_list = get_images_file_list(config.dataset_path)
        if type == 'vqvae':
            random.shuffle(images_file_list)
            dataset_len = len(images_file_list)
            config.train_images_file_list = images_file_list[0:int(dataset_len * 0.8)]
            config.test_images_file_list = images_file_list[int(dataset_len * 0.8):]
        else:
            config.train_images_file_list = images_file_list
    else:
        train_images_file_list, train_labels_list = get_data_list(config.dataset_path, config.train_txt)
        valid_images_file_list, valid_labels_list = get_data_list(config.dataset_path, config.valid_txt)
        test_images_file_list, test_labels_list = get_data_list(config.dataset_path, config.test_txt)
        config.train_images_file_list = train_images_file_list + valid_images_file_list
        if type == 'vqvae':
            config.test_images_file_list = test_images_file_list
        if config.index2label_file_path:
            # if type == 'vqvae':
            #     config.test_labels_list = test_labels_list
            config.train_labels_list = train_labels_list + valid_labels_list
    if type == 'vqvae':
        config.test_dataset = eval(config.dataset_name)(config,
                                                        transforms=config.transforms,
                                                        mode='test',
                                                        max_nums=config.max_nums,
                                                        )
        # config.test_dataloader = DataLoader(config.test_dataset,
        #                                     batch_size=config.eval_batch_size,
        #                                     shuffle=True,
        #                                     drop_last=True,
        #                                     pin_memory=PIN_MEMORY,
        #                                     num_workers=config.workers,
        #                                     generator=generator
        #                                     )
    config.train_dataset = eval(config.dataset_name)(config,
                                                     transforms = config.transforms,
                                                     mode = 'train',
                                                     max_nums = config.max_nums)
    config.train_dataloader = DataLoader(config.train_dataset,
                                         batch_size=config.train_batch_size,
                                         shuffle=True,
                                         drop_last=True,
                                         pin_memory=PIN_MEMORY,
                                         num_workers=config.workers,
                                         generator=generator
                                         )
def get_model_vae_noise_scheduler(config):
    if config.task_type.split('_')[-1] in ['vqvae', 'ldm']:
        config.vae = config.vae_args['vae_type'](**config.vae_args['vae_config'])
        if config.task_type.split('_')[-1] == 'vqvae':
            return
        if config.task_type.split('_')[-1] == 'ldm':
            ckpt = torch.load(config.pretrained_vae_path)
            config.vae.load_state_dict(ckpt)
    config.model = config.model_args['model_type'](**config.model_args['model_config'])
    config.noise_scheduler = config.noise_scheduler_args['noise_scheduler_type'](**config.noise_scheduler_args['noise_scheduler_config'])
def get_config(config_dict, config_type):
    config = config_type(**config_dict)
    get_save_path(config)
    if config.index2label_file_path:
        get_index2label(config)
    get_model_vae_noise_scheduler(config)
    if config_dict['dataset_path'] != None and config_dict['dataset_name'] != None:
        get_data(config)
    return config