from typing import Mapping, TypeVar, Generic, Optional, Sized, Iterator, List, Any

import numpy as np
import dgl
import pandas as pd
from dgl import NID, EID, heterograph, transforms, backend as F
from dgl._dataloading import utils
from dgl._dataloading.dataloader import _find_exclude_eids
# from dgl._dataloading.dataloader import EdgeCollator
import mindspore as ms

class DataFrameDataset:
    def __init__(self, df):
        self.data = df

    def __getitem__(self, indice):
        row = self.data.iloc[indice].to_dict()
        return [v for v in row.values()]

    def __len__(self):
        return len(self.data)


class DataFrameWithNegDataset:
    def __init__(self, df, negs, cast_cols=('user', 'item', 'neg'), cast_dtype=np.int32):
        # if cast_cols:
        #     for col in cast_cols:
        #         if col == 'neg':
        #             negs = negs.astype(cast_dtype)
        #         else:
        #             df[col] = df[col].astype(cast_dtype)
        self.data = df
        self.negs = negs

    def __getitem__(self, indice):
        row = self.data.iloc[indice].to_dict()
        row['neg'] = self.negs[indice]
        return [v for v in row.values()]

    def __len__(self):
        return len(self.data)


def _get_id_tensor_from_mapping(indices, keys):
    dtype = list(indices.values())[0].dtype
    id_tensor = ms.Tensor(np.empty(
        (sum(v.shape[0] for v in indices.values()), 2)), dtype=dtype)

    offset = 0
    for i, k in enumerate(keys):
        if k not in indices:
            continue
        index = indices[k]
        length = index.shape[0]
        id_tensor[offset:offset+length, 0] = i
        id_tensor[offset:offset+length, 1] = index
        offset += length
    return id_tensor


T_co = TypeVar('T_co', covariant=True)

class Sampler(Generic[T_co]):
    r"""Base class for all Samplers.

    Every Sampler subclass has to provide an :meth:`__iter__` method, providing a
    way to iterate over indices of dataset elements, and a :meth:`__len__` method
    that returns the length of the returned iterators.

    .. note:: The :meth:`__len__` method isn't strictly required by
              :class:`~torch.utils.data.DataLoader`, but is expected in any
              calculation involving the length of a :class:`~torch.utils.data.DataLoader`.
    """

    def __init__(self, data_source: Optional[Sized]) -> None:
        pass

    def __iter__(self) -> Iterator[T_co]:
        raise NotImplementedError


class SequentialSampler(Sampler[int]):
    r"""Samples elements sequentially, always in the same order.

    Args:
        data_source (Dataset): dataset to sample from
    """
    data_source: Sized

    def __init__(self, data_source: Sized) -> None:
        self.data_source = data_source

    def __iter__(self) -> Iterator[int]:
        return iter(range(len(self.data_source)))

    def __len__(self) -> int:
        return len(self.data_source)


class BatchSampler(Sampler[List[int]]):
    r"""Wraps another sampler to yield a mini-batch of indices.

    Args:
        sampler (Sampler or Iterable): Base sampler. Can be any iterable object
        batch_size (int): Size of mini-batch.
        drop_last (bool): If ``True``, the sampler will drop the last batch if
            its size would be less than ``batch_size``

    Example:
        >>> list(BatchSampler(SequentialSampler(range(10)), batch_size=3, drop_last=False))
        [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
        >>> list(BatchSampler(SequentialSampler(range(10)), batch_size=3, drop_last=True))
        [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
    """

    def __init__(self, sampler: Sampler[int], batch_size: int, drop_last: bool) -> None:
        # Since collections.abc.Iterable does not check for `__getitem__`, which
        # is one way for an object to be an iterable, we don't do an `isinstance`
        # check here.
        if not isinstance(batch_size, int) or isinstance(batch_size, bool) or \
                batch_size <= 0:
            raise ValueError("batch_size should be a positive integer value, "
                             "but got batch_size={}".format(batch_size))
        if not isinstance(drop_last, bool):
            raise ValueError("drop_last should be a boolean value, but got "
                             "drop_last={}".format(drop_last))
        self.sampler = sampler
        self.batch_size = batch_size
        self.drop_last = drop_last

    def __iter__(self) -> Iterator[List[int]]:
        batch = []
        for idx in self.sampler:
            batch.append(idx)
            if len(batch) == self.batch_size:
                yield batch
                batch = []
        if len(batch) > 0 and not self.drop_last:
            yield batch

    def __len__(self) -> int:
        # Can only be called if self.sampler has __len__ implemented
        # We cannot enforce this condition, so we turn off typechecking for the
        # implementation below.
        # Somewhat related: see NOTE [ Lack of Default `__len__` in Python Abstract Base Classes ]
        if self.drop_last:
            return len(self.sampler) // self.batch_size  # type: ignore[arg-type]
        else:
            return (len(self.sampler) + self.batch_size - 1) // self.batch_size  # type: ignore[arg-type]


class _TensorizedDatasetIter(object):
    def __init__(self, dataset, batch_size, drop_last, mapping_keys):
        self.dataset = dataset
        self.batch_size = batch_size
        self.drop_last = drop_last
        self.mapping_keys = mapping_keys
        self.index = 0

    def __iter__(self):
        return self

    def _next_indices(self):
        num_items = self.dataset.shape[0]
        if self.index >= num_items:
            raise StopIteration
        end_idx = self.index + self.batch_size
        if end_idx > num_items:
            if self.drop_last:
                raise StopIteration
            end_idx = num_items
        batch = self.dataset[self.index:end_idx]
        self.index += self.batch_size

        return batch

    def __next__(self):
        batch = self._next_indices()
        if self.mapping_keys is None:
            return batch.copy()

        # convert the type-ID pairs to dictionary
        type_ids = batch[:, 0]
        indices = batch[:, 1]
        _, type_ids_sortidx = ms.ops.sort(ms.Tensor(type_ids, dtype=ms.float16))
        type_ids = type_ids[type_ids_sortidx]
        indices = indices[type_ids_sortidx]
        # type_id_uniq, type_id_count = ms.ops.unique_consecutive(type_ids, return_counts=True)
        type_id_uniq, type_id_count = np.unique(type_ids.asnumpy(), return_counts=True)
        type_id_uniq = type_id_uniq.tolist()
        type_id_offset = np.cumsum(type_id_count, 0).tolist()
        type_id_offset.insert(0, 0)
        id_dict = {
            self.mapping_keys[type_id_uniq[i]]:
                indices[type_id_offset[i]:type_id_offset[i+1]].copy()
            for i in range(len(type_id_uniq))}
        return id_dict


class TensorizedDataset:
    """Custom Dataset wrapper that returns a minibatch as tensors or dicts of tensors.
    When the dataset is on the GPU, this significantly reduces the overhead.
    """
    def __init__(self, indices, batch_size, drop_last, shuffle):
        if isinstance(indices, Mapping):
            self._mapping_keys = list(indices.keys())
            self._id_tensor = _get_id_tensor_from_mapping(
                indices, self._mapping_keys)
        else:
            self._id_tensor = indices
            self._mapping_keys = None
        self._indices = np.arange(self._id_tensor.shape[0])
        self.batch_size = batch_size
        self.drop_last = drop_last
        self._shuffle = shuffle

    def shuffle(self):
        """Shuffle the dataset."""
        np.random.shuffle(self._indices)

    def __iter__(self):
        id_tensor = self._id_tensor[ms.Tensor(self._indices, dtype=ms.int32)]
        return _TensorizedDatasetIter(
            id_tensor, self.batch_size, self.drop_last, self._mapping_keys)

    def __len__(self):
        num_samples = self._id_tensor.shape[0]
        return (num_samples + (0 if self.drop_last else (self.batch_size - 1))) // self.batch_size


class DataLoader:
    def __init__(self, indices, batch_size=1, drop_last=False, shuffle=False):
        # self.graph_sampler = graph_sampler
        self.batch_size = batch_size
        self.drop_last = drop_last
        self.shuffle = shuffle
        self.index_dataset = TensorizedDataset(
            indices, self.batch_size, self.drop_last, self.shuffle)
        # self._index_sampler = BatchSampler(SequentialSampler(indices), batch_size, drop_last)
        # self._reset()

    def __iter__(self):
        if self.shuffle:
            self.index_dataset.shuffle()
        self.index_iter = iter(self.index_dataset)
        return self

    def _next_data(self):
        return next(self.index_iter)  # may raise StopIteration

    def __next__(self) -> Any:
        return self._next_data()

    def __len__(self):
        return len(self.index_dataset)


class NodeCollator(dgl._dataloading.NodeCollator):
    def collate(self, items):
        if not isinstance(items, dict) and isinstance(items[0], tuple):
            # returns a list of pairs: group them by node types into a dict
            items = utils.group_as_dict(items)
        items = utils.prepare_tensor_or_dict(self.g, items, 'items')
        input_nodes, output_nodes, blocks = self.graph_sampler.sample_blocks(self.g, items)
        return input_nodes, output_nodes, blocks


class EdgeCollator(dgl._dataloading.EdgeCollator):
    def _collate(self, items):
        if not isinstance(items, dict) and isinstance(items[0], tuple):
            # returns a list of pairs: group them by node types into a dict
            items = utils.group_as_dict(items)
        items = utils.prepare_tensor_or_dict(self.g_sampling, items, 'items')

        pair_graph = self.g.edge_subgraph(items)
        seed_nodes = pair_graph.ndata[NID]

        exclude_eids = _find_exclude_eids(
            self.g_sampling,
            self.exclude,
            items,
            reverse_eid_map=self.reverse_eids,
            reverse_etype_map=self.reverse_etypes)

        input_nodes, _, blocks = self.graph_sampler.sample_blocks(
            self.g_sampling, seed_nodes, exclude_eids=exclude_eids)

        return input_nodes, pair_graph, blocks


    def _collate_with_negative_sampling(self, items):
        if not isinstance(items, dict) and isinstance(items[0], tuple):
            # returns a list of pairs: group them by node types into a dict
            items = utils.group_as_dict(items)
        items = utils.prepare_tensor_or_dict(self.g_sampling, items, 'items')

        pair_graph = self.g.edge_subgraph(items, relabel_nodes=False)
        induced_edges = pair_graph.edata[EID]

        neg_srcdst = self.negative_sampler(self.g, items)
        if not isinstance(neg_srcdst, Mapping):
            assert len(self.g.etypes) == 1, \
                'graph has multiple or no edge types; ' \
                'please return a dict in negative sampler.'
            neg_srcdst = {self.g.canonical_etypes[0]: neg_srcdst}
        # Get dtype from a tuple of tensors
        dtype = F.dtype(list(neg_srcdst.values())[0][0])
        ctx = F.context(pair_graph)
        neg_edges = {
            etype: neg_srcdst.get(etype, (F.copy_to(F.tensor([], dtype), ctx),
                                          F.copy_to(F.tensor([], dtype), ctx)))
            for etype in self.g.canonical_etypes}
        neg_pair_graph = heterograph(
            neg_edges, {ntype: self.g.number_of_nodes(ntype) for ntype in self.g.ntypes})

        pair_graph, neg_pair_graph = transforms.compact_graphs([pair_graph, neg_pair_graph])
        pair_graph.edata[EID] = induced_edges

        seed_nodes = pair_graph.ndata[NID]

        exclude_eids = _find_exclude_eids(
            self.g_sampling,
            self.exclude,
            items,
            reverse_eid_map=self.reverse_eids,
            reverse_etype_map=self.reverse_etypes)

        input_nodes, _, blocks = self.graph_sampler.sample_blocks(
            self.g_sampling, seed_nodes, exclude_eids=exclude_eids)

        return input_nodes, pair_graph, neg_pair_graph, blocks


class NodeDataLoader(DataLoader):
    def __init__(self, g, indices, sampler, batch_size=1, drop_last=False, shuffle=False):
        super().__init__(indices, batch_size, drop_last, shuffle)
        self.collator = NodeCollator(g, indices, graph_sampler=sampler)

    def _next_data(self):
        return self.collator.collate(next(self.index_iter))  # may raise StopIteration


class EdgeDataLoader(NodeDataLoader):
    def __init__(self, g, indices, sampler, negative_sampler, batch_size=1, drop_last=False, shuffle=False):
        super().__init__(g, indices, sampler, batch_size, drop_last, shuffle)
        self.collator = EdgeCollator(g, indices, graph_sampler=sampler, negative_sampler=negative_sampler)
