# -*- coding: utf-8 -*-
import math
import pathlib
import random
from typing import Callable, List, Optional

import h5py
import numpy as np
import torch
import torch.utils.data as Data
from torch.nn.utils.rnn import pad_sequence
import os

"""create dataset"""


class MCFDataSet(Data.IterableDataset):
    """A universal dataset generation for MCF research
    """
    def __init__(
        self,
        h5_list: List[pathlib.Path],
        nodes: List[str],
        dtype: np.dtype = np.float32,
        sample_num: Optional[int] = None,
        std_callback: Callable = None,
    ) -> None:
        """
        Args:
            h5_list: list of using h5 files
            nodes: keys of h5 files to retrieve
            dtype: the return dtype of MCFDataSet
            sample_num: the sample numer for model debug.
            std_callback: a function for standardize return data.
        Returns:
            None
        """        
        if (sample_num is not None) and (sample_num < len(h5_list)):
            h5_list = random.sample(h5_list, k=sample_num)
        self.h5_count = len(h5_list)
        self.h5_list = h5_list
        self.nodes = nodes
        self.dtype = dtype
        self.std_callback = std_callback

    def __iter__(self):
        # This is the distribute process key.
        """
        see genH5Data.  
        """
        worker_info = Data.get_worker_info()
        if worker_info is None:
            iter_h5s = self.h5_list
        else:
            per_worker = int(math.ceil(self.h5_count / float(worker_info.num_workers)))
            worker_id = worker_info.id
            iter_start = per_worker * worker_id
            iter_end = min(iter_start + per_worker, len(self.h5_list))
            iter_h5s = self.h5_list[iter_start:iter_end]
        return self.genH5Data(iter_h5s)

    def __len__(self):
        return self.h5_count

    def genH5Data(self, iter_h5s):
        """
        Args:
            iter_h5s (Iterable[Path]): the h5_files to retrieve  
        Yields:
            data, time_len, node_flags: see getH5Data returns.
            h5_file: the path of retrieved h5_file
        """
        for h5_file in iter_h5s:
            data, time_len, node_flags = self.getH5Data(h5_file)
            if self.std_callback is not None:
                data = self.std_callback(data)
            yield data, time_len, node_flags, h5_file

    def getH5Data(self, h5_file):
        """  
        Args:
            h5_file (Path): the readed h5 file.
        Retuns:
            data (ndrray): [seq_len, node_size], seq_len: sequence length
            timeLen (int): sequence length.
            node_flags (list): with length of node_size. the existence flags in h5_file nodes.
        """
        # delta_offset = 1e-8
        # add the data to supplymentary.
        # the file is not existence.
        with h5py.File(h5_file, mode="r") as hf:
            times = hf["time"][()]
            # sample_rate = 1e-3
            # time_start = 0
            # time_end = times[-1] + delta_offset
            # timeAxis = np.arange(time_start, time_end, sample_rate, dtype=dtype)
            timeAxis = hf["time"][()]
            timeLen = len(timeAxis)
            data: np.ndarray = np.empty((timeLen, 0), dtype=self.dtype)
            node_flags = []
            for node in self.nodes:
                # col = db[node]
                # nodeDict = col.find_one({"shot":shot})
                # 处理数据为None 也就是不存在的情况
                if hf[node].shape is None:
                    nodeData = np.zeros_like(timeAxis, dtype=self.dtype)
                    node_flags.append(0)
                else:
                    node_flags.append(1)
                    nodeData = hf[node][()]
                    nodeData = np.nan_to_num(nodeData)
                    # if the times lengeh is unsame with nodeData length
                    assert len(times) == len(nodeData), "h5_file: %d, Node:%s" % (
                        h5_file,
                        node,
                    )
                    # nodeData = np.interp(timeAxis, times, nodeData)
                nodeData = np.array(nodeData)[:, np.newaxis].astype(self.dtype)
                data = np.append(data, nodeData, axis=1)
        return (data, timeLen, node_flags)


def pad_collate(batch):
    """Puts data, and lengths into a packed_padded_sequence then returns
    the packed_padded_sequence and the labels. Set use_lengths to True
    to use this collate function.
    Args:
      batch: (list of tuples) [(audio, target, shots)].
    Returns:
      batch_seq (paddedSequence): see torch.nn.utils.rnn.pack_padded_sequence
      valid_len (Tensor): [batch]; the valid length of each padded sequence of padded batch
      val_channels (Tensor): [batch, channel_size]; the valid channels of each padded sequence.
      batch_files: (List[str]), the used file names of batch.
    """
    if len(batch) == 1:
        batch_seq, valid_len, val_channels, batch_files = (
            batch[0][0],
            batch[0][1],
            batch[0][2],
            batch[0][3],
        )
        batch_seq = torch.unsqueeze(torch.from_numpy(batch_seq), 0)
        valid_len = torch.unsqueeze(torch.tensor(valid_len), 0)
        val_channels = torch.unsqueeze(torch.tensor(val_channels), 0)
        batch_files = [batch_files]
        dtype = batch_seq.dtype

    if len(batch) > 1:
        preparded_seq = []
        preparded_len = []
        preparded_node_flags = []
        preparded_shots = []
        for single in batch:
            preparded_seq.append(torch.from_numpy(single[0]))
            preparded_len.append(single[1])
            preparded_node_flags.append(single[2])
            preparded_shots.append(single[3])
        # uniform naming
        batch_files = preparded_shots
        valid_len = preparded_len
        val_channels = preparded_node_flags
        batch_seq = pad_sequence(preparded_seq, batch_first=True)
        valid_len = torch.tensor(valid_len, dtype=torch.int32)
        val_channels = torch.tensor(val_channels, dtype=torch.int32)
        dtype = preparded_seq[0].dtype
    batch_seq = batch_seq.to(dtype)
    valid_len = valid_len.int()
    return batch_seq, valid_len, val_channels, batch_files



def create_datasets(config, is_debug=False):
    """
    Args:
        config: the configuration dict
    Returns:
        train_set (IterableDataset),
        val_set (IterableDataset)
    """
    input_node_list = config['nodes']['input_list']
    output_node_list = config['nodes']['output_list']
    input_nodes = []
    output_nodes = []
    nodes = []
    for tmp_nodes_name in input_node_list:
        input_nodes.extend(config['nodes'][tmp_nodes_name])     
    for tmp_nodes_name in output_node_list:
        output_nodes.extend(config['nodes'][tmp_nodes_name])
    nodes.extend(input_nodes)
    nodes.extend(output_nodes)
    data_dir = pathlib.Path(os.path.expandvars(config['data']['root_dir']))
    h5s = list(data_dir.iterdir())
    if is_debug: h5s = h5s[:10]
    split_ratio = config['data']['split_ratio']
    data_delimiter = int(len(h5s) * split_ratio)

    train_h5s = h5s[:data_delimiter]
    val_h5s = h5s[data_delimiter:]

    train_set = MCFDataSet(train_h5s, nodes)
    val_set = MCFDataSet(val_h5s, nodes)
    return train_set, val_set