import os

import numpy as np
import pandas as pd
import torch
from sklearn.preprocessing import StandardScaler
from torch.utils.data import Dataset
from tqdm import tqdm


class stock_prediction_data_set(Dataset):
    def __init__(self, data_dir, data_file_limit=0.5, window=30, data_column=[], label_column="", cache_label="train"):
        '''
        'openIndex', 'closeIndex', 'highIndex', 'lowIndex','turnoverValueIndex', 'turnoverVolIndex', 'tradeDate', 'zm3', 'zm6','z1', 'z2', 'z3', 'z5', 'z7', 'z10', 'z30', 'm2', 'm5', 'm10', 'm30','openHl', 'closeHl', 'highHl', 'lowHl', 'profit', 'profitPct'
        :param data_path:
        :param column:
        '''
        self.data_column = data_column
        self.label_column = label_column
        self.data = []
        self.label = []
        self.file = {}
        data_cache = str(data_file_limit) + "-" + str(window) + "-data" + cache_label + ".npy"
        label_cache = str(data_file_limit) + "-" + str(window) + "-label" + cache_label + ".npy"

        if os.path.exists(data_cache) and os.path.exists(label_cache):
            self.data = np.load(data_cache)
            self.label = np.load(label_cache)
        else:
            file_list = [f for f in os.listdir(data_dir) if os.path.isfile(os.path.join(data_dir, f))]
            for data_path_item in tqdm(file_list[0:int(len(file_list) * data_file_limit)], desc="数据加载"):
                result = pd.read_csv(os.path.join(data_dir, data_path_item))
                result = result.fillna(method='ffill')
                scaler = StandardScaler()
                columns_to_normalize = ['openIndex', 'closeIndex', 'highIndex', 'lowIndex', 'turnoverValueIndex',
                                        'turnoverVolIndex', 'zm3', 'zm6', 'z1', 'z2', 'z3', 'z5', 'z7', 'z10', 'z30',
                                        'm2',
                                        'm5', 'm10', 'm30', 'openHl', 'closeHl', 'highHl', 'lowHl']

                scaled_result = scaler.fit_transform(result[columns_to_normalize])
                result[columns_to_normalize] = scaled_result
                data_items = [result.iloc[i:i + window] for i in range(len(result) - window + 1)]
                for data_item in data_items:
                    if data_item.isna().values.sum() == 0:
                        data_length = len(data_item)
                        data_value = data_item.iloc[0:data_length - 1][self.data_column]
                        label_value = data_item.iloc[data_length - 1][self.label_column]
                        self.data.append(data_value.values.tolist())
                        self.label.append([1, 0] if label_value else [0, 1])
            # 将数据转换为tensor
            self.data = np.array(self.data)
            self.label = np.array(self.label)

            np.save(data_cache, self.data)
            np.save(label_cache, self.label)

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        return torch.tensor(self.data[idx], dtype=torch.float), torch.tensor(self.label[idx], dtype=torch.float)
