import pickle   # 用于在文件中存储和加载 Python 对象
import numpy as np 
import tensorflow as tf
import pandas as pd
import random

from sklearn.preprocessing import MinMaxScaler  # sklearn 库中的一个工具，用于归一化数据


'''
数据集处理：
数据加载、缺失值填补、数据归一化、数据切分、窗口滑动和批处理等功能
'''

class DataPreprocessor:
    def __init__(self, window_size):
        self._scaler = None
        self._window_size = window_size

    def _load_pickle(self, filepath):
        # 从 filepath 中加载数据
        try:
            with open(filepath, 'rb') as f:
                data = pickle.load(f)
                return data
        except Exception as e:
            raise Exception('Failed to load data from file {}\n {}'.format(filepath, e))

    def load_data(self, datapath):
        # 通过_load_pickle 方法加载训练数据、测试数据和标签数据
        train_data = self._load_pickle('{}_train.pkl'.format(datapath))
        test_data = self._load_pickle('{}_test.pkl'.format(datapath))
        labels = self._load_pickle('{}_test_label.pkl'.format(datapath))

        # # np.nan_to_num 函数将数据中缺失值（NA）替换为零，并将数据类型转换为 float32
        train_data = np.nan_to_num(train_data.astype('float32'))
        test_data = np.nan_to_num(test_data.astype('float32'))
        return train_data, test_data, labels

    def transform(self, data, build_scaler=False):
        # 数据归一化
        if build_scaler:
            self._scaler = MinMaxScaler()
            scaled_data = self._scaler.fit_transform(data)
        else:
            if self._scaler is None:
                raise ValueError('Scaler has not been initialized yet,please initialize it')
            scaled_data = self._scaler.transform(data)
        return scaled_data

    def train_val_split(self, data, validation_split):
        # 将 data 分割为训练集和验证集 （validation_split为验证集的比例）
        if validation_split <= 0 or validation_split >= 1:
            raise ErrorValue('Validation split is invalid {}\
                    it must be between 0 and 1'.format(validation_split))
        num_val_data = int(len(data) * validation_split)
        train_data = data[:-num_val_data]
        val_data = data[-num_val_data:]
        return train_data, val_data

    def _time_window_sliding(self, data, step=1):
        # 实现滑动窗口 返回三维数组
        # 第一维是提取到的窗口数量，第二维是窗口的大小（即 window_size），第三维是特征数量（即每个样本的特征数）
        data_type = type(data)
        if data_type == pd.DataFrame or data_type == list:
            data = np.array(data)
        elif data_type == np.ndarray:
            pass
        else:
            raise TypeError(
                'time_window_sliding only supports array-like of shape (n_samples, n_features), but data type is %s' % (
                    data_type))
        result = []
        for i in range(0, len(data) - self._window_size + 1, step):
            result.append(data[i:i + self._window_size, :])
        return np.array(result)

    def generate_sliding_data(self, data, batch_size=None, shuffle=True):
        # 将滑动窗口数据转换为 TensorFlow 数据集格式，并按照指定的 batch_size 进行分批处理
        # 返回经过处理的滑动窗口数据集（如果提供 batch_size ）和数据数量 num_data
        sliding_data = self._time_window_sliding(data)
        num_data = len(sliding_data)
        print('num data', num_data)
        if shuffle:
            random.shuffle(sliding_data)
        if batch_size:
            sliding_data = tf.data.Dataset.from_tensor_slices(sliding_data).batch(batch_size)
        return sliding_data, num_data