from mindspore import context
import mindspore as ms
import mindspore.dataset as ds
from data_utils.datasets_mm import MultiModalDataset, MultiModalMAEMask
from data_utils.data_config import DataManager
from models.mae_mm_native import ViTMAEConfigMM, ViTMAEForPreTrainingMM

# context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU",)

context.set_context(mode=context.GRAPH_MODE, device_target="Ascend",)
modality_list = ['3d_thin', '3d_mid', '3d_thick']
manager = DataManager(modality_list, total_dim=8192, seq_len=2048)
generator = MultiModalDataset(data_manager=manager)
dataset = ds.GeneratorDataset(
    source=generator, 
    column_names=generator.column_names, 
    shuffle=True,  
    )
mask = MultiModalMAEMask(manager, 0.75)
dataset = dataset.map(
    operations=mask,
    input_columns=['image', 'modality_idx'],
    output_columns=mask.column_names,
    )
dataset = dataset.batch(2)
config = ViTMAEConfigMM(
    mask_ratio=0.75,
    modality_list=modality_list,
    norm_pix_loss=True,
    num_channels=8192,
    seq_len=2048,
    embed_dim=384,
    depth=12,
    num_heads=6,
    decoder_embed_dim=384,
    decoder_depth=4,
    decoder_num_heads=6
    )

model = ViTMAEForPreTrainingMM(config)
model.set_train(True)

for inputs in dataset.create_tuple_iterator():
    # image, modality_idx, instance_ids, target, rand_indices, unmask_index
    loss = model(*inputs)
    print(loss)
    break