import os
import time
from datetime import datetime
import numpy as np
from einops import rearrange
import json
from .data_config import DataManager, DataConfig


def rseq(file_path: str) -> dict:
    """
    Load a JSON file and return its content.

    Parameters:
    - file_path (str): The path to the JSON file.

    Returns:
    - dict: The content of the JSON file.
    """
    if not file_path.endswith('.json'):
        file_path += '.json'

    with open(file_path, 'r') as f:
        content = json.load(f)

    return content


class ModalityData:
    def __init__(self, modality: DataConfig):
        root = modality.data_path
        self.img_size = modality.img_size
        self.patch_size = modality.patch_size
        self.grid_size = modality.grid_size
        self.token_dim = modality.token_dim
        self.num_channels = modality.num_channels
        self.is_tab = (modality.name == 'tab')
        if root == '':
            self.data_list = [None for _ in range(50)]  # for testing
        else:
            self.data_list = []
            for i in [os.path.join(root, p) for p in os.listdir(root) if '.json' in p]:
                self.data_list += rseq(i).get('files')
            # self.data_list = [os.path.join(root, p) for p in os.listdir(root)]
        print(f'Modality {modality.name}: {len(self.data_list)} instances')

    def patchify(self, d):
        p, g, c = self.patch_size, self.grid_size, self.num_channels
        if len(p) == 3:
            d = rearrange(
                d,
                'c (g0 p0) (g1 p1) (g2 p2) -> (g0 g1 g2) (c p0 p1 p2)',
                c=c, g0=g[0], g1=g[1], g2=g[2], p0=p[0], p1=p[1], p2=p[2]
            )
        elif len(p) == 2:
            d = rearrange(
                d,
                'c (g0 p0) (g1 p1) -> (g0 g1) (c p0 p1)',
                c=c, g0=g[0], g1=g[1], p0=p[0], p1=p[1],
            )
        elif len(p) == 1:
            d = rearrange(
                d,
                'c (g0 p0) -> g0 (c p0)',
                c=c, g0=g[0], p0=p[0],
            )
        return d

    def __len__(self):
        return len(self.data_list)

    def __getitem__(self, idx):
        data = self.data_list[idx]
        if isinstance(data, str):
            data_path = os.path.join(data, 'img.npy')
            if self.num_channels == 3:
                data = np.load(data_path)
            else:
                data = np.load(data_path)[None, :]
        else:
            data = np.random.rand(self.num_channels, *self.img_size).astype(np.float32)
        data = self.patchify(data)
        return data


class MultiModalDataset:
    column_names = [
        'image', 'modality_idx',
    ]

    def __init__(self, data_manager: DataManager, single_modality=None, balance=False, fix_batches=-1, dataset_type=ModalityData):
        self.data_manager = data_manager
        self.n_modality = self.data_manager.n_modality
        if single_modality is not None:
            self.datasets = []
            for m in data_manager:
                if m.name == single_modality:
                    self.datasets.append(dataset_type(m))
                else:
                    self.datasets.append([])
        else:
            self.datasets = [dataset_type(m) for m in data_manager]
        self.datasets_len = [len(d) for d in self.datasets]

        self.balance = balance
        self.fix_batches = fix_batches

        max_datasets_len = max(self.datasets_len)
        if balance or self.fix_batches > 0:
            if self.fix_batches > 0 and self.fix_batches < max_datasets_len:
                print(
                    f'Warning: Fix batches ({self.fix_batches}) is smaller than max modality batches ({max_datasets_len})!')
            target_len = fix_batches if fix_batches > 0 else max_datasets_len
            data_list_c1 = np.concatenate([
                np.ones(target_len) * d_idx for d_idx, d in enumerate(self.datasets)
            ])
            data_list_c2 = np.concatenate([
                np.arange(target_len) % len(d) for d in self.datasets
            ])
        else:
            data_list_c1 = np.concatenate([np.ones(len(d)) * d_idx for d_idx, d in enumerate(self.datasets)])
            data_list_c2 = np.concatenate([np.arange(len(d)) for d in self.datasets])

        self.data_list = np.concatenate([data_list_c1.reshape(-1, 1), data_list_c2.reshape(-1, 1)], axis=1).astype(int)

        self.total_dim = data_manager.total_dim
        self.seq_len = data_manager.seq_len

    def __len__(self):
        return self.data_list.shape[0]

    def __getitem__(self, idx):
        m_idx, data_idx = self.data_list[idx]
        m = self.data_manager[m_idx]
        m_seq_len = m.seq_len
        lo, hi = m.get_range()
        ## load data
        data = np.zeros((self.seq_len, self.total_dim), dtype=np.float32)
        data[:m_seq_len, lo:hi] = self.datasets[m_idx][data_idx]
        return data, m_idx


class MultiModalMAEMask:
    column_names = [
        'image', 'modality_idx', 'instance_ids',
        'target_mask', 'rand_indices', 'unmask_idx',
    ]

    # column_types = [mstype.float16, mstype.int32, mstype.float16, mstype.int32, mstype.int32]
    def __init__(self, data_manager: DataManager, mask_ratio=0.75, seed=None):
        self.data_manager = data_manager
        self.n_modality = self.data_manager.n_modality

        self.total_dim = data_manager.total_dim
        self.seq_len = data_manager.seq_len
        self.pad_lens = data_manager.pad_lens
        self.m_seq_lens = data_manager.m_seq_lens

        self.keep_num = int((1 - mask_ratio) * self.seq_len)
        # precompute instance ids
        # plus one for cls token
        self.m_instance_ids = np.zeros((self.n_modality, 1 + self.seq_len), dtype=np.int)
        for m_idx in range(self.n_modality):
            self.m_instance_ids[m_idx, :self.m_seq_lens[m_idx] + 1] = 1
        self.seed = seed

    def __call__(self, data, m_idx):
        is_tab = False

        m = self.data_manager[m_idx]
        m_seq_len = m.seq_len
        lo, hi = m.get_range()
        pad_len = self.pad_lens[m_idx]

        ## add random mask
        if self.seed is not None:
            state = np.random.RandomState(seed=self.seed)
            noise = state.uniform(size=(self.seq_len,))
        else:
            noise = np.random.uniform(size=(self.seq_len,))

        rand_indices = np.argsort(noise, axis=0).astype(np.int32)
        # ids_restore = np.argsort(rand_indices, axis=0).astype(np.int)

        unmask_index = rand_indices[:self.keep_num]
        target_token_index = rand_indices[self.keep_num:]

        target_mask = np.zeros((self.seq_len, self.total_dim), dtype=bool)

        # instance id, 0 for pad and invalid tokens
        instance_ids = self.m_instance_ids[m_idx]

        if not is_tab:
            # for other modalities, the target mask is a tube over channels
            target_mask[target_token_index, lo:hi] = 1
        else:
            # invalid tokens are missing, takes value -1
            invalid_tokens = np.any((data < -0.5), axis=-1)
            instance_ids[invalid_tokens] = 0
            # tab diagonal mask
            target_token_channel_id = lo + (target_token_index % m_seq_len)
            target_mask[target_token_index, target_token_channel_id] = 1
            target_mask[invalid_tokens, :] = 0

        if pad_len > 0:
            target_mask[-pad_len:, :] = 0
        # blockwise attention mask
        # attention_mask = (instance_ids[None, :] == instance_ids[:, None]).astype(np.float32)

        return data, m_idx, instance_ids, target_mask, rand_indices, unmask_index
