import os
import sys
import codecs
import torch
import numpy as np
from torch.utils.data import Dataset, DataLoader
from typing import Any, Callable, Dict, List, Optional, Tuple
from torchvision.transforms import (ToTensor)
from . import tools


class YRBASIN(Dataset):
    def __init__(
            self,
            root: str,
            data_flag: int = 0,
            transform: Optional[Callable] = None,
            target_transform: Optional[Callable] = None) -> None:
        """
        初始化

        参数:
            root: 
            data_flag: 数据标记。 0=训练集 1=测试集
            transform:  
            target_transform: 
        """
        self.root = root
        self.train_flag = data_flag
        self.transform = transform
        self.target_transform = target_transform
        self.features, self.targets = self._load_data()

    @property
    def length(self) -> int:
        """数据集长度"""
        return len(self.features)

    def __len__(self) -> int:
        return self.length

    def __getitem__(self, index: int) -> Tuple[Any, Any]:
        """
        Args:
            index (int): Index

        Returns:
            tuple (feature, target): where target is index of the target class.
        """
        feature, target = self.features[index], int(self.targets[index])
        if self.transform:
            feature = self.transform(feature)
        if self.target_transform:
            target = self.target_transform(target)
        return feature, target  # X, y

    def _load_data(self):
        """
        训练集:
            train_yrb_features_idx12_float
            train_yrb_targets_idx01_ubyte

        测试集:
            t10k_yrb_features_idx12_float
            t10k_yrb_targets_idx01_ubyte
        """

        feature_files = ["train_yrb_features_idx12_float",
                         "t10k_yrb_features_idx12_float"]

        target_files = ["train_yrb_targets_idx01_ubyte",
                        "t10k_yrb_targets_idx01_ubyte"]

        # feature_file = feature_files[self.train_flag]
        # target_file = target_files[self.train_flag]
        feature_file = os.path.join(self.root, feature_files[self.train_flag])
        target_file = os.path.join(self.root, target_files[self.train_flag])
        # features = tools.read_feature_file(
        #     os.path.join(self.root, feature_file))

        # target_file = target_files[self.train_flag]
        # targets = tools.read_target_file(os.path.join(self.root, target_file))
        # return features, targets
        return tools.load_data(feature_file, target_file)


"""
...
"""

'''

def read_image_file_bak(path: str) -> torch.Tensor:
    x = tools.read_sn3_pascalvincent_tensor(path, strict=False)
    if x.dtype != torch.float32:
        raise TypeError(
            f"x should be of dtype torch.uint8 instead of {x.dtype}")
    if x.ndimension() != 4:
        raise ValueError(
            f"x should have 4 dimension instead of {x.ndimension()}")
    return x


def read_label_file_bak(path: str) -> torch.Tensor:
    x = tools.read_sn3_pascalvincent_tensor(path, strict=False)
    if x.dtype != torch.uint8:
        raise TypeError(
            f"x should be of dtype torch.uint8 instead of {x.dtype}")
    if x.ndimension() != 1:
        raise ValueError(
            f"x should have 1 dimension instead of {x.ndimension()}")
    return x.long()


SN3_PASCALVINCENT_TYPEMAP = {
    8: torch.uint8,
    9: torch.int8,
    11: torch.int16,
    12: torch.int32,
    13: torch.float32,
    14: torch.float64,
}


def read_sn3_pascalvincent_tensor_bak(path: str, strict: bool = True) -> torch.Tensor:
    """
    Read a SN3 file in "Pascal Vincent" format (Lush file 'libidx/idx-io.lsh').
    Argument may be a filename, compressed filename, or file object.
    """
    # read
    with open(path, "rb") as f:
        data = f.read()

    # parse
    magic = get_int(data[0:4])
    nd = magic % 256
    ty = magic // 256
    assert 1 <= nd <= 4
    assert 8 <= ty <= 14
    torch_type = SN3_PASCALVINCENT_TYPEMAP[ty]

    # nd = 4 (I, D, H, W) = (1200, 12, 9, 9)
    s = [get_int(data[4 * (i + 1): 4 * (i + 2)]) for i in range(nd)]
    # s[0] = 60000

    # The MNIST format uses the big endian byte order. If the system uses little endian byte order by default,
    # we need to reverse the bytes before we can read them with torch.frombuffer().
    # num_bytes_per_value = torch.iinfo(torch_type).bits // 8
    # num_bytes_per_value = torch.finfo(torch_type).bits // 8
    # needs_byte_reversal = sys.byteorder == "little" and num_bytes_per_value > 1

    parsed = torch.frombuffer(
        bytearray(data), dtype=torch_type, offset=(4 * (nd + 1)))
    # if needs_byte_reversal:
    #     parsed = parsed.flip(0)
    assert parsed.shape[0] == np.prod(s) or not strict
    return parsed.view(*s)


def get_int(b: bytes) -> int:
    return int(codecs.encode(b, "hex"), 16)

'''
