import mindspore.numpy as mnp
import mindspore.dataset as ds
import mindspore as ms
import numpy as np
import os

# from .transforms import *
# from .datasets_dummy import *
# from .datasets_real import *
from .datasets_mm import MultiModalDataset, MultiModalMAEMask
from .datasets_mm_guide import MultiModalDatasetGuide, MultiModalMAEMaskGuide
from .data_config import DataManager

MAX_ROWSIZE = 256 + 16

# def build_dataset(args):
#     if args.mask_guide:
#         print('Use Mask Guide!!!!')
#         return build_dataset_guide(args)
#     manager = DataManager(tuple(args.modality_list), total_dim=args.total_dim, seq_len=args.seq_len)
#     dataset_generator = MultiModalDataset(
#         manager,
#         balance=args.data_balance,
#         fix_batches=args.fix_batches
#     )

#     dataset = ds.GeneratorDataset(
#         source=dataset_generator, 
#         column_names=dataset_generator.column_names, 
#         # column_types=dataset_generator.column_types,
#         num_parallel_workers=args.num_workers, 
#         shuffle=True,  
#         max_rowsize=MAX_ROWSIZE,
#         )
#     train_sampler = ds.DistributedSampler(args.world_size, args.rank)
#     dataset.use_sampler(train_sampler)
#     mask = MultiModalMAEMask(manager, mask_ratio=args.mask_ratio)
#     dataset = dataset.map(
#         operations=mask,
#         input_columns=['image', 'modality_idx'],
#         output_columns=mask.column_names,
#         num_parallel_workers=args.num_workers,
#         max_rowsize=MAX_ROWSIZE,
#         )
#     dataset = dataset.batch(args.batch_size, drop_remainder=True)

#     return dataset


def build_dataset(args):
    manager = DataManager(tuple(args.modality_list), total_dim=args.total_dim, seq_len=args.seq_len)
    dataset_generator = MultiModalDatasetGuide(
        data_manager=manager,
        balance=args.data_balance,
        fix_batches=args.fix_batches,
        mask_guide=args.mask_guide
    )

    dataset = ds.GeneratorDataset(
        source=dataset_generator, 
        column_names=dataset_generator.column_names, 
        # column_types=dataset_generator.column_types,
        num_parallel_workers=args.num_workers, 
        shuffle=True,  
        max_rowsize=MAX_ROWSIZE,
        )
    train_sampler = ds.DistributedSampler(args.world_size, args.rank)
    dataset.use_sampler(train_sampler)
    mask = MultiModalMAEMaskGuide(
        guide_mode=args.guide_mode,
        guide_scale=args.guide_scale,
        data_manager=manager, 
        mask_ratio=args.mask_ratio,
        mask_guide=args.mask_guide,
        num_prefix_tokens=args.num_prefix_tokens
        )
    dataset = dataset.map(
        operations=mask,
        input_columns=dataset_generator.column_names,
        output_columns=mask.column_names,
        num_parallel_workers=args.num_workers,
        max_rowsize=MAX_ROWSIZE,
        )
    dataset = dataset.batch(args.batch_size, drop_remainder=True)

    return dataset

# def build_dataset(args):
#     manager = DataManager(tuple(args.modality_list), total_dim=args.total_dim, seq_len=args.seq_len) if args.exp_type == 'mm' else None
#     if args.exp_type in ['mae', 'ijepa']:
#         # if args.data_path == '':
#         #     dataset_generator = DummyDataset3D(args)
#         # else:
#         #     dataset_generator = RealDataset(args)
#         dataset_generator = DummyDatasetMAE(args)
#     elif args.exp_type == 'clip':
#         dataset_generator = DummyDatasetMM(args)
#     elif args.exp_type == 'cls':
#         dataset_generator = DummyDataset3D(args, label=True)
#     elif args.exp_type == 'mm':
#         dataset_generator = MultiModalDataset(
#             manager,
#             balance=args.data_balance,
#             fix_batches=args.fix_batches
#         )
#     else:
#         raise NotImplementedError
#     dataset = ds.GeneratorDataset(
#         source=dataset_generator, 
#         column_names=dataset_generator.column_names, 
#         # column_types=dataset_generator.column_types,
#         num_parallel_workers=args.num_workers, 
#         shuffle=True,  
#         max_rowsize=MAX_ROWSIZE,
#         )
#     train_sampler = ds.DistributedSampler(args.world_size, args.rank)
#     dataset.use_sampler(train_sampler)
    
#     if args.exp_type != 'mm':
#         trans = [
#             # GammaAdjust(p=0.5),
#             # BrightnessContrast(p=0.5),
#             # SpatialAug(img_size=tuple(args.img_size), rot_degree=10, p_scale=0.3, p_rot=0.3),
#             WindowTransform(),
#         ]
#         dataset = dataset.map(
#             input_columns="image", 
#             num_parallel_workers=args.num_workers, 
#             operations=trans,
#             max_rowsize=MAX_ROWSIZE
#             )
    
    
#     if args.exp_type == 'mm':
#         mask = MultiModalMAEMask(manager, mask_ratio=args.mask_ratio)
#         dataset = dataset.map(
#             operations=mask,
#             input_columns=['image', 'modality_idx'],
#             output_columns=mask.column_names,
#             num_parallel_workers=args.num_workers,
#             max_rowsize=MAX_ROWSIZE,
#             )
#     # elif args.exp_type == 'mae':
#     #     mask = MaeMask(            
#     #         image_size=args.img_size,
#     #         patch_size=args.patch_size,
#     #         mask_ratio=args.mask_ratio
#     #     )
#     #     dataset = dataset.map(
#     #                 operations=mask,
#     #                 input_columns=['image'],
#     #                 output_columns=mask.column_names,
#     #                 num_parallel_workers=args.num_workers,
#     #                 max_rowsize=MAX_ROWSIZE
#     #                 )   

#     dataset = dataset.batch(args.batch_size, drop_remainder=True)
#     # ds.config.set_enable_autotune(True)
#     # ds.config.set_enable_shared_mem(False)
#     return dataset


def build_eval_dataset(args):
    modality_list = tuple(args.modality_list)
    manager = DataManager(modality_list, total_dim=args.total_dim, seq_len=args.seq_len)
    dataset_collection = {}
    for modality_name in modality_list:
        dataset_generator = MultiModalDataset(
            manager, 
            single_modality=modality_name,
            )
        dataset = ds.GeneratorDataset(
            source=dataset_generator, 
            column_names=dataset_generator.column_names, 
            # column_types=dataset_generator.column_types,
            num_parallel_workers=args.num_workers, 
            shuffle=False,  
            max_rowsize=MAX_ROWSIZE,
            )
        val_sampler = ds.DistributedSampler(args.world_size, args.rank)
        dataset.use_sampler(val_sampler)
        mask = MultiModalMAEMask(manager, mask_ratio=args.mask_ratio, seed=42)
        dataset = dataset.map(
            operations=mask,
            input_columns=['image', 'modality_idx'],
            output_columns=mask.column_names,
            num_parallel_workers=args.num_workers,
            max_rowsize=MAX_ROWSIZE,
            )
        dataset = dataset.batch(args.batch_size, drop_remainder=False)
        dataset_collection[modality_name] = dataset
    return dataset_collection