import numpy as np
import pandas as pd
from torch.utils.data import Dataset
from typing import List
from eegGAN import transform


def str_to_float(wavstr: str):
    """
    :param wavstr:脑电数据字符串
    :return: 返回脑电数据array
    """
    # O(n)
    return np.array([np.float32(i) for i in wavstr[1:-1].split(',')]).reshape(1, -1)


class MyDataset(Dataset):
    def __init__(self, path="./dataset_part/test_BIS_10.csv", transforms=None, logger=None) -> None:
        """
        :param path: 文件路径
        :param divide: 归一化的分母
        """
        super().__init__()
        if logger:
            logger.info(f"loading data, dataset={path}")
        self.transforms = transforms
        self.path = path
        self.data = self._load_data(self.path)
        if logger:
            logger.info('load data success!')


    def _load_data(self, path):
        df_iter = pd.read_csv(
            path, header=0,
            iterator=True,
            chunksize=1000,
            skip_blank_lines=True
        )
        bis_list = []
        wav_list = []
        for df in df_iter:
            for bis, wav in zip(df['BIS'], df['WAV']):
                bis = np.float32(bis)
                wav = str_to_float(wav)
                bis_list.append(bis)
                wav_list.append(wav)

        bises, wavs = self._slice(bis_list, wav_list)
        if self.transforms:
            wavs = self.transforms(wavs)

        return list(zip(wavs, bises))

    def _slice(self, bis_list, wav_list, sec=2):
        step = 128 * sec
        bises = []
        wavs = []
        for bis, wav in zip(bis_list, wav_list):
            for i in range(0, len(wav), step):
                bises.append(bis)
                wavs.append(wav[0][i:i + step].reshape(1, -1))
        return bises, wavs

    def __len__(self):
        return len(self.data)

    def __getitem__(self, index):

        return self.data[index]


class MergeDataset(Dataset):
    def __init__(self, datasets: List[MyDataset], logger=None):
        super().__init__()
        if logger:
            logger.info(f"loading data...")
        self.data = self.merge(datasets)
        if logger:
            logger.info('load data success!')

    def merge(self, datasets: List[MyDataset]):
        res = []
        for dataset in datasets:
            for data in dataset.data:
                res.append(data)
        return res

    def __len__(self):
        return len(self.data)

    def __getitem__(self, item):
        return self.data[item]


class DatasetFilter(Dataset):
    def __init__(self, dataset: Dataset, index: list):
        super().__init__()
        self.data = self.filter(dataset, index)

    def filter(self, dataset, index):
        res = []
        for i in index:
            res.append(dataset[i])
        return res

    def __len__(self):
        return len(self.data)

    def __getitem__(self, item):
        return self.data[item]


if __name__ == "__main__":
    import logging

    logging.basicConfig(level=logging.DEBUG, format="[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s")
    logger = logging.getLogger(__name__)
    t = transform.ZScore()
    dataset = MyDataset("D:/Data/pycharm-workspace/EEG_pytorch/dataset_part/test_BIS_10.csv", transforms=t,
                        logger=logger)
    print(len(dataset))
