import torch
from torch import device
import numpy as np
from pathlib import Path
import pickle


def standardization(data, mean, std):
    """
    数据标准化
    :param data:
    :param mean:
    :param std:
    :return:
    """
    return (data - mean) / std


def restore(data, mean, std):
    """
    数据去标准化
    :param data:
    :param mean:
    :param std:
    :return:
    """
    return data * std + mean


class PreprocessData(object):
    def __init__(self, data, filename, augment=True):
        self.length = None
        self.std = None
        self.mean = None
        self.augment = augment
        self.trainData, self.trainLabel = self.preprocess(Path('dataset', data, 'labeled', 'train', filename),
                                                          train=True)  # 训练集和标签
        self.testData, self.testLabel = self.preprocess(Path('dataset', data, 'labeled', 'test', filename),
                                                        train=False)  # 测试集和标签

    def preprocess(self, path, train=True):
        """
        将数据变为标准正态分布，并增强
        :param path:
        :param train:
        :return:
        """
        with open(str(path), 'rb') as f:
            data = torch.FloatTensor(pickle.load(f))
            label = data[:, -1]
            data = data[:, :-1]

        # 训练集的均值和标准差
        if train:
            self.mean = data.mean(dim=0)  # 计算均值
            self.std = data.std(dim=0)  # 计算标准差
            self.length = len(data)

        # 数据增强
        if self.augment:
            data, label = self.augmentation(data, label)

        # 将数据变为标准正态分布
        data = standardization(data, self.mean, self.std)

        return data, label

    def augmentation(self, data, label, noise_ratio=0.05, noise_interval=0.0005, max_length=100000):
        """
        数据增强
        :param data:
        :param label:
        :param noise_ratio:
        :param noise_interval:
        :param max_length:
        :return:
        """
        noiseSeq = torch.randn(data.size())  # 符合标准正态分布的填充随机数的张量
        augmentedData = data.clone()
        augmentedLabel = label.clone()
        for _ in np.arange(0, noise_ratio, noise_interval):
            noiseData = noise_ratio * self.std.expand_as(data) * noiseSeq  # 引入噪声增强数据
            augmentedData = torch.cat([augmentedData, data + noiseData], dim=0)
            augmentedLabel = torch.cat([augmentedLabel, label])

            # 截取数据，控制样本量上限
            if len(augmentedData) > max_length:
                augmentedData = augmentedData[:max_length]
                augmentedLabel = augmentedLabel[:max_length]
                break

        return augmentedData, augmentedLabel

    def batch(self, args, data, batch_size):
        """
        修剪数据并做批量处理
        :param args:
        :param data:
        :param batch_size:
        :return:
        """
        batch_num = data.size(0) // batch_size
        trimmed_data = data.narrow(0, 0, batch_num * batch_size)  # 对数据集进行切片，将边缘数据丢弃
        batched_data = trimmed_data.contiguous().view(batch_size, -1, trimmed_data.size(-1)).transpose(0, 1)
        batched_data = batched_data.to(device(args.device))

        return batched_data
