import os
import sys
import time
from typing import *
import functools
import itertools
import argparse
from dataclasses import dataclass, field

import mindspore as ms
import mindspore
import mindspore.nn as mnn
import mindspore.ops as mops
import mindspore.train as mtrain
import mindspore.dataset as mds

import numpy as np

from models.mae_mm_native_guide import ViTMAEConfigMM, ViTMAEForPreTrainingMM
from models.vit_mm_native import ViTConfigMM, ViTModelMM
from data_utils.data_config import DataConfig, DataManager

from omegaconf import OmegaConf
import omegaconf


def get_default_config(root: str):
    default_cfg = OmegaConf.load(os.path.join(root, 'default.yaml'))
    return default_cfg


def get_extra_config(root: str):
    get_default_config = OmegaConf.load(os.path.join(root, 'ttvitbase.yaml'))
    return get_default_config


def get_predefined_config(root: str, config: str):
    cfg = OmegaConf.load(os.path.join(root, config))
    return cfg


def get_cli_config(args_list: List[str]):
    """
    Mainly load data from command line arguments. Use equal sign to separate key and value.
    To support arbitrary number of options, use argparse.REMAINDER.
    Example:
        parser.add_argument(
            "opts",
            default=None,
            nargs=argparse.REMAINDER,
        )
        args = parser.parse(sys.argv[1:])
        This will collect all the remaining options into a list.
        They should be in the form of "Key1=Value1 Key2=Value2".
        Do not prepend "--" or "-" to the keys, or the parser will not recognize them.
        Then the "args.opts" will be a list of key-value strings.
    Example:
        In [15]: omegaconf.OmegaConf.from_cli(["dml=12", "skip-connection=True"])
        Out[15]: {'dml': 12, "skip-connection': True}
    """
    return OmegaConf.from_cli(args_list)


def calculate_patch_ranges(patch_dict: Dict[str, Sequence[int]]) -> Dict[str, Tuple[int, int]]:
    patch_ranges = dict()
    lo = 0
    for k, p in patch_dict.items():
        hi = lo + np.prod(p)
        patch_ranges[k] = (lo, hi)
        lo = hi
    return patch_ranges


@dataclass
class DataConfigMk2(object):
    name: str
    img_size: Tuple[int, ...]
    patch_size: Tuple[int, ...]
    token_group: str
    num_channels: int = 1
    data_path: str = ''
    modality_idx: Optional[int] = None
    lo: Optional[int] = None
    hi: Optional[int] = None
    n_dim: int = field(init=False)
    grid_size: Tuple[int, ...] = field(init=False)
    token_dim: int = field(init=False)
    seq_len: int = field(init=False)

    def __post_init__(self):
        assert len(self.img_size) == len(
            self.patch_size), f"The dimension of patch_size ({self.patch_size}) and img_size ({self.img_size}) should be the same."
        self.img_size = tuple(int(x) for x in self.img_size)
        self.patch_size = tuple(int(x) for x in self.patch_size)
        self.num_channels = int(self.num_channels)
        self.n_dim = len(self.img_size)
        self.grid_size = tuple(s // p for s, p in zip(self.img_size, self.patch_size))
        self.token_dim = self.num_channels * np.prod(self.patch_size)
        self.seq_len = np.prod(self.grid_size)

    def get_range(self) -> Tuple[int, int]:
        return self.lo, self.hi


class DataManagerMk2(DataManager):
    def __init__(self,
                 data_config_dict: OrderedDict[str, Dict[str, Any]],
                 patch_aggregate_dict: Dict[str, Sequence[int]],
                 total_dim: int = 6144, seq_len: int = 2048):
        self.data_config_dict = data_config_dict
        self.patch_aggregate_dict = patch_aggregate_dict
        self.n_modality = len(data_config_dict)
        self.modality_list = list(patch_aggregate_dict.keys())
        self.total_dim = total_dim

        assert self.n_modality > 0, "There is no modality to begin with. Please check the modality_dict."
        t = []
        for k, v in data_config_dict.items():
            dc = DataConfigMk2(
                name=k,
                img_size=tuple(v["input_size"]),
                patch_size=tuple(v["patch_size"]),
                token_group=str(v["token_group"]),
                num_channels=str(v["num_channels"]),
                data_path=v["data_path"]
            )
            t.append(dc)
        self.modalities = t

        # calculate patch_config
        patch_ranges: Dict[str, Tuple[int, int]] = calculate_patch_ranges(patch_aggregate_dict)
        max_token_dim_pos = max([x[1] for x in patch_ranges.values()])

        if max_token_dim_pos > total_dim:
            raise ValueError(
                f"Total dimension ({total_dim}) is smaller than the maximum value ({max_token_dim_pos}) in patch_ranges.")
        self.token_dims = [m.patch_size for m in self.modalities]

        ## enumerate different patch proj sizes
        # this is the simplified version, 
        for m_idx, m in enumerate(self.modalities):
            m.modality_idx = m_idx
            m.lo, m.hi = patch_ranges[m.token_group]

        self.seq_len = seq_len
        self.m_seq_lens = [m.seq_len for m in self.modalities]
        assert seq_len >= np.max(
            self.m_seq_lens), f"The total sequence length ({seq_len}) is smaller than the maximum sequence length ({np.max(self.m_seq_lens)}) in the modalities."
        self.pad_lens = [seq_len - m.seq_len for m in self.modalities]

    def __getitem__(self, idx) -> DataConfigMk2:
        return self.modalities[idx]

    def get_modality_by_name(self, name) -> DataConfigMk2:
        assert name in self.modality_list
        return self.modalities[self.modality_list.index(name)]

    def __len__(self) -> int:
        return len(self.modalities)


@dataclass
class ViTMAEConfigMMMk2:
    mask_ratio: float = 0.75
    modality_list: Tuple[str, ...] = ('3d_thick', '2d')
    data_config_list: Dict[str, Dict[str, Any]] = field(default_factory=dict)
    patch_aggregate_settings: Dict[str, Sequence[int]] = field(default_factory=dict)
    modality_embed: bool = False
    num_channels: int = 4096
    seq_len: int = 2048
    initializer_range: float = 0.02
    embed_dim: int = 384
    depth: int = 12
    num_heads: int = 6
    mlp_ratio: float = 4.0
    qkv_bias: bool = True
    attn_drop_rate: float = 0.0
    drop_rate: float = 0.0
    drop_path_rate: float = 0.0
    decoder_embed_dim: int = 216
    decoder_depth: int = 8
    decoder_num_heads: int = 8
    norm_pix_loss: bool = True
    loss_type: str = 'l2'
    recompute: bool = False
    guide_mode: str = 'patch'
    guide_scale: float = 2.0
    num_prefix_tokens: int = 1
    use_flash_attn: bool = True
    in_chans: int = field(init=False)
    norm_pixel_loss: bool = field(init=False)
    manager: DataManager = field(init=False)

    def __post_init__(self):
        self.manager = DataManagerMk2(
            data_config_dict=self.data_config_list,
            patch_aggregate_dict=self.patch_aggregate_settings,
            total_dim=self.num_channels,
            seq_len=self.seq_len)
        self.in_chans = self.num_channels
        self.norm_pixel_loss = self.norm_pix_loss


def build_model(args: Union[argparse.Namespace, omegaconf.dictconfig.DictConfig]):
    """
        Build model for training.
        the args might be an instance of Mapping[str, Any] or argparse.Namespace or omegaconf.dictconfig.DictConfig.
        Remember to add "exp_type" (set to mae for pretraining), "exp_name" and "output_dir" to args.
   """
    modality_list = tuple(args.data_config_list.keys())
    data_manager = DataManagerMk2(
        args.data_config_list,
        args.patch_aggregate_settings,
        total_dim=args.total_dim,
        seq_len=args.seq_len)

    cfg = ViTMAEConfigMMMk2(
        mask_ratio=args.mask_ratio,
        modality_list=modality_list,
        data_config_list=args.data_config_list,
        patch_aggregate_settings=args.patch_aggregate_settings,
        num_channels=args.total_dim,
        seq_len=args.seq_len,
        embed_dim=args.embed_dim,
        depth=args.depth,
        num_heads=args.num_heads,
        decoder_embed_dim=args.decoder_embed_dim,
        decoder_depth=args.decoder_depth,
        decoder_num_heads=args.decoder_num_heads,
        norm_pix_loss=args.norm_pix_loss,
        loss_type=args.loss_type,
        guide_mode=args.guide_mode,
        guide_scale=args.guide_scale,
        num_prefix_tokens=args.num_prefix_tokens,
        use_flash_attn=args.use_flash_attn
    )
    cfg.manager = data_manager
    print("build_model, cfg.data_config_list: {0}, args.modality_list: {1}".format(
        cfg.data_config_list, modality_list), flush=True
    )
    model = ViTMAEForPreTrainingMM(config=cfg)
    return model


def build_dataset(args):
    from data_utils.datasets_mm_guide import MultiModalDatasetGuide, MultiModalMAEMaskGuide
    data_manager = DataManagerMk2(
        args.data_config_list,
        args.patch_aggregate_settings,
        total_dim=args.total_dim,
        seq_len=args.seq_len)

    dataset_generator = MultiModalDatasetGuide(
        data_manager=data_manager,
        balance=args.data_balance,
        fix_batches=args.fix_batches,
        mask_guide=args.mask_guide
    )

    dataset = mds.GeneratorDataset(
        source=dataset_generator,
        column_names=dataset_generator.column_names,
        # column_types=dataset_generator.column_types,
        num_parallel_workers=args.num_workers,
        shuffle=True,
        max_rowsize=args.max_rowsize,
    )
    train_sampler = mds.DistributedSampler(args.world_size, args.rank)
    dataset.use_sampler(train_sampler)
    mask = MultiModalMAEMaskGuide(
        guide_mode=args.guide_mode,
        guide_scale=args.guide_scale,
        data_manager=data_manager,
        mask_ratio=args.mask_ratio,
        mask_guide=args.mask_guide,
        num_prefix_tokens=args.num_prefix_tokens
    )
    dataset = dataset.map(
        operations=mask,
        input_columns=dataset_generator.column_names,
        output_columns=mask.column_names,
        num_parallel_workers=args.num_workers,
        max_rowsize=args.max_rowsize,
    )
    dataset = dataset.batch(args.batch_size, drop_remainder=True)

    return dataset


if __name__ == "__main__":
    base_conf = get_default_config(
        r"C:\Users\ly004\Documents\code\wpgeneralpurposemedicalfoundationmodels\simplified\config")
    extra_conf = get_extra_config(
        r"C:\Users\ly004\Documents\code\wpgeneralpurposemedicalfoundationmodels\simplified\config")
    cli_conf = get_cli_config(sys.argv[1:])  # use "key=value" to pass the argument

    # from left to right, the rightmost will overwrite left ones.
    # for argas
    cfg = OmegaConf.merge(base_conf, extra_conf, cli_conf)
    print(cfg)

    # test the newly modified data manager
    data_config = DataManagerMk2(
        cfg.data_config_list,
        cfg.patch_aggregate_settings,
        total_dim=cfg.total_dim,
        seq_len=cfg.seq_len)
    print(f"data_config.total_dim={data_config.total_dim}, data_config.pad_lens={data_config.pad_lens}")

    # instantiate the model to check the configuration
    model = build_model(cfg)
    print(model)

    build_dataset(cfg)
