import os
import random

import numpy as np
import torch
import pandas as pd
from pandas.core.frame import DataFrame
from sklearn.model_selection import train_test_split
from torch.utils.data import Dataset, DataLoader
from typing import List
import pickle
from typing import Optional
from MyUtil import check_and_create_path, get_logger
import time
from collections import Counter

seed = 1111
random.seed(seed)
random_state = seed
dataset_dir = '../dataset'


class SelectedFeatures:
    algorithm_select_columns_15 = [
        'environment_tmp',
        'generator_speed',
        'int_tmp',
        'pitch1_angle',
        'pitch1_moto_tmp',
        'pitch2_angle',
        'pitch2_moto_tmp',
        'pitch3_angle',
        'pitch3_moto_tmp',
        'pitch_angle_mean',
        'pitch_moto_tmp_mean',
        'pitch_moto_tmp_sd',
        'power',
        'r_square',
        'r_wind_speed_to_generator_speed',
        'r_wind_speed_to_power',
        'tmp_diff',
        'wind_speed_square'
    ]
    algorithm_select_columns_15_power = algorithm_select_columns_15 + [
        'wind_speed',
        'wind_speed_cube'
    ]
    algorithm_select_columns_21 = [
        'environment_tmp',
        'generator_speed',
        'int_tmp',
        'pitch1_angle',
        'pitch1_moto_tmp',
        'pitch2_angle',
        'pitch2_moto_tmp',
        'pitch3_angle',
        'pitch3_moto_tmp',
        'pitch_angle_mean',
        'pitch_moto_tmp_mean',
        'power',
        'r_square',
        'r_wind_speed_to_generator_speed',
        'r_wind_speed_to_power',
        'tmp_diff',
        'wind_speed_square',
        'yaw_position'
    ]
    # 15有pitch_moto_tmp_sd，21有yaw_position
    union_1521_columns = sorted(list(set(algorithm_select_columns_15 + algorithm_select_columns_21)))
    inter_1521_columns = sorted(set(algorithm_select_columns_15).intersection(algorithm_select_columns_21))
    
    algorithm_select_columns = [
        'environment_tmp',
        'generator_speed',
        'int_tmp',
        'pitch1_angle',
        'pitch1_moto_tmp',
        'pitch2_angle',
        'pitch2_moto_tmp',
        'pitch3_angle',
        'pitch3_moto_tmp',
        'pitch_angle_mean',
        'pitch_angle_sd',
        'pitch_moto_tmp_mean',
        'power',
        'r_square',
        'r_wind_speed_to_generator_speed',
        'r_wind_speed_to_power',
        'tmp_diff',
        'wind_speed_square'
    ]   # 添加了yaw_position
    algorithm_select_columns2 = [
        'environment_tmp',
        'generator_speed',
        'int_tmp',
        'pitch1_angle',
        'pitch1_moto_tmp',
        'pitch2_angle',
        'pitch2_moto_tmp',
        'pitch3_angle',
        'pitch3_moto_tmp',
        'pitch_angle_mean',
        'pitch_moto_tmp_mean',
        'power',
        'r_square',
        'r_wind_speed_to_generator_speed',
        'r_wind_speed_to_power',
        'tmp_diff',
        'wind_speed_square',
        'yaw_position'
    ]
    
    mechanism_select_columns = [
        'environment_tmp',
        'int_tmp',
        'pitch_speed_mean',
        'pitch_speed_sd',
        'power',
        'tmp_diff',
        'wind_direction',
        'wind_direction_mean',
        'wind_speed',
        'wind_speed_cube',
        'wind_speed_square',
        'yaw_position',
        'yaw_speed'
    ]
    
    original_columns = [
        'acc_x',
        'acc_y',
        'environment_tmp',
        'generator_speed',
        'int_tmp',
        'pitch1_angle',
        'pitch1_moto_tmp',
        'pitch1_ng5_DC',
        'pitch1_ng5_tmp',
        'pitch1_speed',
        'pitch2_angle',
        'pitch2_moto_tmp',
        'pitch2_ng5_DC',
        'pitch2_ng5_tmp',
        'pitch2_speed',
        'pitch3_angle',
        'pitch3_moto_tmp',
        'pitch3_ng5_DC',
        'pitch3_ng5_tmp',
        'pitch3_speed',
        'power',
        'wind_direction',
        'wind_direction_mean',
        'wind_speed',
        'yaw_position',
        'yaw_speed'
    ]
    
    union_select_columns = sorted(list(set(algorithm_select_columns + mechanism_select_columns)))
    
    all_columns = [
        'acc_x',
        'acc_y',
        'cp',
        'ct',
        'environment_tmp',
        'generator_speed',
        'int_tmp',
        'lambda',
        'pitch1_angle',
        'pitch1_moto_tmp',
        'pitch1_ng5_DC',
        'pitch1_ng5_tmp',
        'pitch1_speed',
        'pitch2_angle',
        'pitch2_moto_tmp',
        'pitch2_ng5_DC',
        'pitch2_ng5_tmp',
        'pitch2_speed',
        'pitch3_angle',
        'pitch3_moto_tmp',
        'pitch3_ng5_DC',
        'pitch3_ng5_tmp',
        'pitch3_speed',
        'pitch_angle_mean',
        'pitch_angle_sd',
        'pitch_moto_tmp_mean',
        'pitch_moto_tmp_sd',
        'pitch_speed_mean',
        'pitch_speed_sd',
        'power',
        'r_square',
        'r_wind_speed_to_generator_speed',
        'r_wind_speed_to_power',
        'tmp_diff',
        'torque',
        'wind_direction',
        'wind_direction_mean',
        'wind_speed',
        'wind_speed_cube',
        'wind_speed_square',
        'yaw_position',
        'yaw_speed'
    ]


class MyDataloader():
    @staticmethod
    def get_dataloader_one(dataset_cls, config, num=15, *args, **kwargs):
        # 从一个风机的数据集中获取训练集和验证集
        
        if num != 15 and num != 21:
            assert False
        
        logger = get_logger()
        logger.info(f'get data one from {dataset_cls}')
        
        data_list, label_list = dataset_cls.get_data_labels(config, num, *args, **kwargs)
        # print(type(label_list))
        test_size = kwargs.get('test_size', 0.2)
        # print(test_size)
        if test_size == 0:
            info = f'len(data_list) = {len(data_list)}, len(test_data) = {len(data_list)}'
            logger.info(info)
            test_loader = DataLoader(SimpleDataset(data_list, label_list), batch_size=config['batch_size'],
                                     shuffle=True)
            return (None, None), (None, None), (test_loader, len(data_list))
        
        train_data, val_data, train_labels, val_labels = train_test_split(
            data_list, label_list, test_size=test_size, random_state=random_state, stratify=label_list)
        
        train_loader = DataLoader(SimpleDataset(train_data, train_labels), batch_size=config['batch_size'],
                                  shuffle=True)
        val_loader = DataLoader(SimpleDataset(val_data, val_labels), batch_size=config['batch_size'], shuffle=True)
        
        info = f'len(data_list) = {len(train_data) + len(val_data)}, len(train_data) = {len(train_data)}, len(val_data) = {len(val_data)}'
        
        logger.info(info)
        
        return ((train_loader, len(train_data)),
                (val_loader, len(val_data)),
                (None, None))
    
    @staticmethod
    def get_dataloader_union(dataset_cls, config, *args, **kwargs):
        # 从15号和21号风机整个数据中等比例抽取训练集，验证集和测试集
        # 训练集：验证集：测试集 = 8：1：1
        logger = get_logger()
        logger.info(f'get data union from {dataset_cls}')
        # 15号风机数据
        data_list_15, label_list_15 = dataset_cls.get_data_labels(config, 15, *args, **kwargs)
        
        train_data_15, val_data_15, train_labels_15, val_labels_15 = train_test_split(
            data_list_15, label_list_15, test_size=0.2, random_state=random_state, stratify=label_list_15)
        
        # 21号风机数据
        data_list_21, label_list_21 = dataset_cls.get_data_labels(config, 21, *args, **kwargs)
        
        train_data_21, val_data_21, train_labels_21, val_labels_21 = train_test_split(
            data_list_21, label_list_21, test_size=0.2, random_state=random_state, stratify=label_list_21
        )
        
        # 整合训练数居
        train_data = train_data_15 + train_data_21
        train_labels = train_labels_15 + train_labels_21
        train_loader = DataLoader(SimpleDataset(train_data, train_labels), batch_size=config['batch_size'],
                                  shuffle=True)
        
        # 整合验证和测试数据
        val_test_data = val_data_15 + val_data_21
        val_test_labels = val_labels_15 + val_labels_21
        
        # 划分训练和测试数据
        val_data, test_data, val_labels, test_labels = train_test_split(
            val_test_data, val_test_labels, test_size=0.5, random_state=random_state, stratify=val_test_labels
        )
        val_loader = DataLoader(SimpleDataset(val_data, val_labels), batch_size=config['batch_size'], shuffle=True)
        test_loader = DataLoader(SimpleDataset(test_data, test_labels), batch_size=config['batch_size'], shuffle=True)
        
        info = f'len(data_list) = {len(train_data) + len(val_data) + len(test_data)}, len(train_data) = {len(train_data)}, len(val_data) = {len(val_data)}, len(test_data) = {len(test_data)}'
        logger = get_logger()
        logger.info(info)
        
        return (train_loader, len(train_data)), (val_loader, len(val_data)), (test_loader, len(test_data))
    
    @staticmethod
    def get_dataloader_ratio(dataset_cls, config, train_size_15=0.8, train_size_21=0.2, test_size_21=0.5, *args,
                             **kwargs):
        # 大量15号风机+少量21号风机训练，剩下21号风机测试
        logger = get_logger()
        logger.info(f'get data ratio from {dataset_cls}')
        # 15号风机数据
        data_list_15, label_list_15 = dataset_cls.get_data_labels(config, 15, *args, **kwargs)
        train_data_15, val_data_15, train_labels_15, val_labels_15 = train_test_split(
            data_list_15, label_list_15, train_size=train_size_15, random_state=random_state, stratify=label_list_15)
        
        # 21号风机数据
        data_list_21, label_list_21 = dataset_cls.get_data_labels(config, 21, *args, **kwargs)
        train_data_21, val_test_data_21, train_labels_21, val_test_labels_21 = train_test_split(
            data_list_21, label_list_21, train_size=train_size_21, random_state=random_state, stratify=label_list_21
        )
        # 整合训练数居，15号风机+21号风机
        train_data = train_data_15 + train_data_21
        train_labels = train_labels_15 + train_labels_21
        train_loader = DataLoader(SimpleDataset(train_data, train_labels), batch_size=config['batch_size'],
                                  shuffle=True)
        
        # 21号风机剩下的数据划分验证集和测试集
        val_data_21, test_data_21, val_labels_21, test_labels_21 = train_test_split(
            val_test_data_21, val_test_labels_21, test_size=test_size_21, random_state=random_state, stratify=val_test_labels_21
        )
        
        # 整合验证数据，15号风机20%+21号风机40%
        val_data = val_data_15 + val_data_21
        val_labels = val_labels_15 + val_labels_21
        val_loader = DataLoader(SimpleDataset(val_data, val_labels), batch_size=config['batch_size'], shuffle=True)
        
        # 测试数据，21号风机40%
        test_loader = DataLoader(SimpleDataset(test_data_21, test_labels_21), batch_size=config['batch_size'],
                                 shuffle=True)
        test_data = test_data_21
        info = f'len(data_list) = {len(train_data) + len(val_data) + len(test_data)}, len(train_data) = {len(train_data)}, len(val_data) = {len(val_data)}, len(test_data) = {len(test_data)}'
        logger = get_logger()
        logger.info(info)
        
        return (train_loader, len(train_data)), (val_loader, len(val_data)), (test_loader, len(test_data))


class SimpleDataset(Dataset):
    def __init__(self, data, labels):
        self.data = data
        self.labels = labels
    
    def __len__(self):
        return len(self.data)
    
    def __getitem__(self, idx):
        return self.data[idx], self.labels[idx]


class WindIcingDatasetV1(Dataset):
    # 剔除label为-1的无效数据，根据新组new_group划分时间段，时间段内划分滑动窗口
    # 滑动窗口大小，滑动窗口步长，以及时间序列长度
    # 根据seq_len划分序列，即每个输入样本形状为（seq_len, window_size, feature_dim），每个样本对应一个标签label
    def __init__(self,
                 seq_len: int,
                 window_size: int,
                 step_size: int,
                 ):
        
        self.seq_len = seq_len
        self.window_size = window_size
        self.step_size = step_size
        
        self.default_drop_columns = ['time', 'group', 'label', 'new_group']
    
    def process(self,
                df: Optional[DataFrame],
                drop_columns: Optional[List[str]] = None):
        
        print("begin processing data")
        # 保留字典
        # 基本处理
        # data_df['time'] = pd.to_datetime(data_df['time'])
        df = df[df['label'] != -1]
        
        self.data = []
        self.labels = []
        
        # 按group分组 划分时间段
        new_grouped = df.groupby('new_group')
        for group_name, group_df in new_grouped:
            label = group_df.head(1)['label'].sum()
            
            group_df = group_df.drop(drop_columns, axis=1)
            # 滑动窗口划分数据段
            segments = []
            for start in range(0, len(group_df) - self.window_size + 1, self.step_size):
                end = start + self.window_size
                segment = group_df.iloc[start:end]
                segments.append(torch.tensor(segment.values.tolist()))
            
            # 根据序列长度划分
            segments = self._split_fill(segments, self.seq_len)
            self.data += [(seq, mask_index) for seq, mask_index in segments]
            self.labels += [label for _ in range(len(segments))]
        
        print("finish processing data")
    
    def __len__(self):
        return len(self.data)
    
    def __getitem__(self, idx):
        return self.data[idx], self.labels[idx]
    
    def save(self, num):
        save_path = self.get_dataset_filename(num)
        check_and_create_path(save_path)
        torch.save({
            'seq_len': self.seq_len,
            'window_size': self.window_size,
            'data': self.data,
            'labels': self.labels,
        }, save_path)
    
    def load(self, num):
        state = torch.load(self.get_dataset_filename(num))
        self.data = state['data']
        self.labels = state['labels']
        assert self.seq_len == state['seq_len']
        assert self.window_size == state['window_size']
    
    def get_dataset_filename(self, num):
        return os.path.join(dataset_dir, f'{num}_{self.__class__.__name__}.pkl')
    
    @classmethod
    def get_data_labels(cls, config, num=15, *args, **kwargs):
        # print(cls)
        dataset = cls(config['seq_len'], config['window_size'], config['step_size'])
        dataset.load(num)
        return dataset.data, dataset.labels
    
    @classmethod
    def main(cls, config):
        
        seq_len = config['seq_len']
        window_size = config['window_size']
        step_size = config['step_size']
        
        data_num = 21
        data_file = f'../process/data/{data_num}_label_newgroup.csv'
        print('processing data: ' + data_file)
        df = pd.read_csv(data_file)
        data = cls(seq_len, window_size, step_size)
        start_time = time.time()
        data.process(df, drop_columns=['time', 'group', 'label', 'new_group'])
        end_time = time.time() - start_time
        print(len(data), 'data processed in {:.2f}s'.format(end_time))
        data.save(data_num)
        
        data_loader = DataLoader(data, batch_size=5, shuffle=False)
        for (data, mask_index), label in data_loader:
            print(data.shape, label.shape)
            break
    
    def _split_fill(self, input_list: List, split_size: int) -> List:
        output_list = []
        l = len(input_list)
        for i in range(0, l, split_size):
            # 掩码初始为0，这样训练的时候-1就可以直接获取没有掩码序列的最后一个时间戳
            mask_index = 0
            group = input_list[i:i + split_size]
            # 如果剩余元素不满 n / 2，则忽略
            if len(group) < split_size / 2:
                continue
            
            # 如果剩余元素不足n个，0填充至满足序列长度
            if len(group) < split_size:
                # 记录掩码位置开始的索引
                mask_index = len(group)
                padding = [torch.zeros_like(group[0])] * (split_size - len(group))
                group += padding
                # print(group[mask_index-1], group[mask_index]) #group[mask_index]正是掩码开始的位置，因此取前一个位置的时间步作预测
            output_list.append((torch.stack(group), torch.tensor(mask_index, dtype=torch.int)))
        
        return output_list


class WindIcingDatasetV2(WindIcingDatasetV1):
    # 添加新特征，并在加载时选择特征
    
    def __init__(self,
                 seq_len: int,
                 window_size: int,
                 step_size: int,
                 ):
        super().__init__(seq_len, window_size, step_size)
        self.columns_index = None
    
    def process(self, df: Optional[DataFrame], drop_columns: Optional[List[str]] = None):
        # 不删除列，全部保存，加载时指定需要的列
        
        print("begin processing data")
        
        # 基本处理
        # data_df['time'] = pd.to_datetime(data_df['time'])
        df = df[df['label'] != -1]
        
        self.data = []
        self.labels = []
        
        # 按group分组 划分时间段
        new_grouped = df.groupby('new_group')
        for group_name, group_df in new_grouped:
            label = group_df.head(1)['label'].sum()
            
            group_df = group_df.drop(self.default_drop_columns, axis=1)
            
            # 保留字典
            if self.columns_index is None:
                self.columns_index = {col: i for i, col in enumerate(group_df.columns)}
            
            # 滑动窗口划分数据段
            segments = []
            for start in range(0, len(group_df) - self.window_size + 1, self.step_size):
                end = start + self.window_size
                segment = group_df.iloc[start:end]
                segments.append(torch.tensor(segment.values.tolist()))
            
            # 根据序列长度划分
            segments = self._split_fill(segments, self.seq_len)
            self.data += [(seq, mask_index) for seq, mask_index in segments]
            self.labels += [label for _ in range(len(segments))]
        
        print("finish processing data")
    
    def save(self, num):
        save_path = self.get_dataset_filename(num)
        check_and_create_path(save_path)
        
        assert self.columns_index is not None
        
        torch.save({
            'seq_len': self.seq_len,
            'window_size': self.window_size,
            'columns_index': self.columns_index,
            'data': self.data,
            'labels': self.labels,
        }, save_path)
    
    def load(self, num):
        state = torch.load(self.get_dataset_filename(num))
        self.data = state['data']
        self.labels = state['labels']
        self.columns_index = state['columns_index']
        assert self.columns_index is not None
        assert self.seq_len == state['seq_len']
        assert self.window_size == state['window_size']
    
    @classmethod
    def get_data_labels(cls, config, num=15, *args, **kwargs):
        # print(cls)
        dataset = cls(config['seq_len'], config['window_size'], config['step_size'])
        dataset.load(num)
        
        select_columns = kwargs.get('select_columns', list())
        # [((seq_len, window_size, feature_dim), mask_index)]
        data = dataset.data
        labels = dataset.labels
        
        assert select_columns
        logger = get_logger()
        logger.info(f'select columns: {select_columns}')
        if select_columns:
            # 步骤1: 确定select_columns中每个列名对应的索引
            selected_indices = [dataset.columns_index[col] for col in select_columns]
            # 步骤2: 使用这些索引来选择tensor中对应的列
            selected_data = []
            for seq, mask_index in data:
                select_features = seq[:, :, selected_indices]
                selected_data.append((select_features, mask_index))
            
            data = selected_data
        
        return data, labels
    
    @classmethod
    def main(cls, config):
        data_file_15 = f'../process/data/15_05.csv'
        data_file_21 = f'../process/data/21_05.csv'
        
        seq_len = config["seq_len"]
        window_size = config["window_size"]
        step_size = config["step_size"]
        
        # 处理15号风机数据
        print('processing data: ' + data_file_15)
        df = pd.read_csv(data_file_15)
        data = cls(seq_len, window_size, step_size)
        start_time = time.time()
        data.process(df)
        end_time = time.time() - start_time
        print(f'data processed in {end_time}s for {data_file_15} with length {len(data)}')
        data.save(15)
        
        print('processing data: ' + data_file_21)
        df = pd.read_csv(data_file_21)
        data = cls(seq_len, window_size, step_size)
        start_time = time.time()
        data.process(df)
        end_time = time.time() - start_time
        print(f'data processed in {end_time}s for {data_file_21} with length {len(data)}')
        data.save(21)
        
        data_loader = DataLoader(data, batch_size=5, shuffle=False)
        for (data, mask_index), label in data_loader:
            print(data.shape, mask_index.shape, label.shape)
            break


class WindIcingDatasetV3(WindIcingDatasetV2):
    # V2基础上记录每个滑动窗口的功率平均值
    # 采用起始随机偏移的滑窗分割方法，不添加掩码，保证每个滑窗和序列长度都是完整的
    def __init__(self, seq_len: int, seq_step_size: int, window_size: int, step_size: int):
        super().__init__(seq_len, window_size, step_size)
        self.seq_step_size = seq_step_size
    
    def process(self, df: Optional[DataFrame], drop_columns: Optional[List[str]] = None):
        print("begin processing data")
        
        df = df[df['label'] != -1]
        self.data = []
        # self.mask_indexes = []
        self.labels = []
        label_counter = Counter()
        
        # 按group分组 划分时间段
        new_grouped = df.groupby('new_group')
        for group_name, group_df in new_grouped:
            label = group_df.head(1)['label'].sum()
            
            group_df = group_df.drop(self.default_drop_columns, axis=1)
            
            # 保留字典
            if self.columns_index is None:
                self.columns_index = {col: i for i, col in enumerate(group_df.columns)}
            
            # 滑动窗口划分数据段，每个滑窗记录功率平均值
            segments = []
            segment_powers = []
            # for start in range(0, len(group_df) - self.window_size + 1, self.step_size):
            for start in window_step_range(len(group_df), self.window_size, self.step_size):
                end = start + self.window_size
                
                # assert end <= len(group_df)
                
                segment = group_df.iloc[start:end]
                segment_power = segment['power'].mean()
                
                segments.append(segment.values.tolist())
                segment_powers.append(segment_power)
            
            assert len(segments) == len(segment_powers)
            
            # 序列长度划分，也是根据滑窗原理
            # for start in range(0, len(segments) - self.seq_len + 1, self.step_size):
            for start in window_step_range(len(segments), self.seq_len, self.seq_step_size):
                end = start + self.seq_len
                # assert end <= len(segments)
                self.data.append(
                    (torch.tensor(segments[start:end]), torch.tensor(segment_powers[start:end], dtype=torch.float32)))
                self.labels.append(torch.tensor(label, dtype=torch.long))
                label_counter.update([label])
            
        print(label_counter)
            
    
    @classmethod
    def get_data_labels(cls, config, num=15, *args, **kwargs):
        select_columns = kwargs.get('select_columns', list())
        filter = kwargs.get('filter', False)
        assert select_columns
        logger = get_logger()
        
        no_power_list = ['power', 'r_square', 'r_wind_speed_to_power', 'torque', 'cp', 'ct']
        filter_columns = []
        for item in select_columns:
            if item in no_power_list:
                logger.warn(f'{item} in {no_power_list}')
            else:
                filter_columns.append(item)
        if filter:
            select_columns = filter_columns
        
        logger.info(f'filter: {filter}, column_num: {len(select_columns)}, select columns: {select_columns}')
        
        # print(cls)
        dataset = cls(config['seq_len'], config["seq_step_size"], config['window_size'], config['step_size'])
        dataset.load(num)
        
        # [((seq_len, window_size, feature_dim), mask_index)]
        data = dataset.data
        labels = dataset.labels
        
        if len(select_columns) > 0:
            # 步骤1: 确定select_columns中每个列名对应的索引
            selected_indices = [dataset.columns_index[col] for col in select_columns]
            # 步骤2: 使用这些索引来选择tensor中对应的列
            selected_data = []
            for seq, x in data:
                select_features = seq[:, :, selected_indices]
                selected_data.append((select_features, x))
            
            data = selected_data
        
        return data, labels
    
    @classmethod
    def main(cls, config):
        
        seq_len = config["seq_len"]
        seq_step_size = config["seq_step_size"]
        window_size = config["window_size"]
        step_size = config["step_size"]
        
        data_file_15 = f'../process/data/15_05.csv'
        data_file_21 = f'../process/data/21_05.csv'
        
        # 处理15号风机数据
        print('processing data: ' + data_file_15)
        df = pd.read_csv(data_file_15)
        data = cls(seq_len, seq_step_size, window_size, step_size)
        start_time = time.time()
        data.process(df)
        end_time = time.time() - start_time
        print(f'data processed in {end_time}s for {data_file_15} with length {len(data)}')
        data.save(15)
        
        print('processing data: ' + data_file_21)
        df = pd.read_csv(data_file_21)
        data = cls(seq_len, seq_step_size, window_size, step_size)
        start_time = time.time()
        data.process(df)
        end_time = time.time() - start_time
        print(f'data processed in {end_time}s for {data_file_21} with length {len(data)}')
        data.save(21)
        
        # data_loader = DataLoader(data, batch_size=5, shuffle=False)
        # for (data, power), label in data_loader:
        #     print(data.shape, power.shape, label.shape)
        #     break
    
    def save(self, num):
        save_path = self.get_dataset_filename(num)
        check_and_create_path(save_path)
        
        assert self.columns_index is not None
        
        torch.save({
            'seq_len': self.seq_len,
            'seq_step_size': self.seq_step_size,
            'window_size': self.window_size,
            'step_size': self.step_size,
            'columns_index': self.columns_index,
            'data': self.data,
            'labels': self.labels,
        }, save_path)
    
    def load(self, num):
        state = torch.load(self.get_dataset_filename(num))
        self.data = state['data']
        self.labels = state['labels']
        self.columns_index = state['columns_index']
        assert self.columns_index is not None
        assert self.seq_len == state['seq_len'], f"self.seq_len: {self.seq_len} != {state['seq_len']}"
        assert self.seq_step_size == state['seq_step_size'], f"self.seq_step_size: {self.seq_step_size} != {state['seq_step_size']}"
        assert self.window_size == state['window_size'], f"self.window_size: {self.window_size} != {state['window_size']}"
        assert self.step_size == state['step_size'], f"self.step_size: {self.step_size} != {state['step_size']}"

class WindIcingDatasetV4(WindIcingDatasetV3):
    # V3基础上记录被过滤的与power直接相关的生成特征
    def __init__(self, seq_len: int, seq_step_size: int, window_size: int, step_size: int):
        super().__init__(seq_len, seq_step_size, window_size, step_size)
        self.no_power_list = ['power', 'r_square', 'r_wind_speed_to_power', 'torque', 'cp', 'ct']
    
    def process(self, df: Optional[DataFrame], drop_columns: Optional[List[str]] = None):
        print("begin processing data")
        
        df = df[df['label'] != -1]
        self.data = []
        # self.mask_indexes = []
        self.labels = []
        label_counter = Counter()
        
        # 按group分组 划分时间段
        new_grouped = df.groupby('new_group')
        for group_name, group_df in new_grouped:
            label = group_df.head(1)['label'].sum()
            
            group_df = group_df.drop(self.default_drop_columns, axis=1)
            
            # 保留字典
            if self.columns_index is None:
                self.columns_index = {col: i for i, col in enumerate(group_df.columns)}
            
            # 滑动窗口划分数据段，每个滑窗记录功率平均值，和与功率相关的原始特征
            segments = []
            segment_powers = []
            segment_power_features = []
            # for start in range(0, len(group_df) - self.window_size + 1, self.step_size):
            for start in window_step_range(len(group_df), self.window_size, self.step_size):
                end = start + self.window_size
                
                # assert end <= len(group_df)
                
                segment = group_df.iloc[start:end]
                segment_power = segment['power'].mean()
                
                segments.append(segment.values.tolist())
                segment_powers.append(segment_power)
                segment_power_features.append(group_df[self.no_power_list].iloc[start:end].values.tolist())
            
            assert len(segments) == len(segment_powers)
            
            # 序列长度划分，也是根据滑窗原理
            # for start in range(0, len(segments) - self.seq_len + 1, self.step_size):
            for start in window_step_range(len(segments), self.seq_len, self.seq_step_size):
                end = start + self.seq_len
                # assert end <= len(segments)
                self.data.append(
                    (torch.tensor(segments[start:end]), torch.tensor(segment_powers[start:end], dtype=torch.float32), torch.tensor(segment_power_features[start:end])))
                self.labels.append(torch.tensor(label, dtype=torch.long))
                label_counter.update([label])
        
        print(label_counter)
    
    @classmethod
    def get_data_labels(cls, config, num=15, *args, **kwargs):
        select_columns = kwargs.get('select_columns', list())
        filter = kwargs.get('filter', False)
        assert select_columns
        logger = get_logger()
        
        no_power_list = ['power', 'r_square', 'r_wind_speed_to_power', 'torque', 'cp', 'ct']
        filter_columns = []
        for item in select_columns:
            if item in no_power_list:
                logger.warn(f'{item} in {no_power_list}')
            else:
                filter_columns.append(item)
        if filter:
            select_columns = filter_columns
        
        logger.info(f'filter: {filter}, column_num: {len(select_columns)}, select columns: {select_columns}')
        
        # print(cls)
        dataset = cls(config['seq_len'], config["seq_step_size"], config['window_size'], config['step_size'])
        dataset.load(num)
        
        # [((seq_len, window_size, feature_dim), mask_index)]
        data = dataset.data
        labels = dataset.labels
        
        if len(select_columns) > 0:
            # 步骤1: 确定select_columns中每个列名对应的索引
            selected_indices = [dataset.columns_index[col] for col in select_columns]
            # 步骤2: 使用这些索引来选择tensor中对应的列
            selected_data = []
            for seq, x1, x2 in data:
                select_features = seq[:, :, selected_indices]
                selected_data.append((select_features, (x1, x2)))
            
            data = selected_data
        
        return data, labels
def window_step_range(total_length: int, window_size: int, step_size: int):
    
    # 根据滑动窗口大小和步幅长度，创建具有随机起始偏移的range
    if (total_length - window_size) % step_size == 0:
        # 数据能够完整分组
        return range(0, total_length - window_size + 1, step_size)
    # 计算总共有多少完整的组
    num_groups = (total_length - window_size) // step_size + 1
    # 计算最后一个完整组的起始位置
    last_group_start = (num_groups - 1) * step_size
    # 计算最后一个完整组的结束位置
    last_group_end = last_group_start + window_size
    # 计算最后剩余元素数量
    last_size = total_length - last_group_end
    
    # 如果最上面if不满足的话，最后一定剩余元素
    assert last_size > 0
    
    # 计算随机偏移
    random_start = random.randint(0, last_size)
    
    return range(random_start, total_length - window_size + 1, step_size)


if __name__ == '__main__':
    # WindIcingDatasetV2.main()
    
    # data, labels = WindIcingDatasetV2.get_data_labels(
    #     config={'seq_len': 10, 'window_size': 64, 'step_size': 1},
    #     num=15,
    #     select_columns=['wind_speed', 'wind_speed_square', 'wind_speed_cube'],
    # )
    
    # data = WindIcingDatasetV2(10, 64, 1)
    # data.load(15)
    # print(data.columns_index)
    
    # data_loader = DataLoader(data, batch_size=5, shuffle=False)
    # for (data, mask_index), label in data_loader:
    #     print(data.shape, mask_index.shape, label.shape)
    #     break
    
    # data_loader = DataLoader(data, batch_size=5, shuffle=False)
    # for (data, mask_index) in data_loader:
    #     print(data.shape, mask_index.shape)
    #     print(data[0][0:10])
    #     break
    config = dict(
        seq_len=10,
        seq_step_size=10,
        window_size=64,
        step_size=1
    )
    
    dataset_class = WindIcingDatasetV3
    # dataset = WindIcingDatasetV2(seq_len=10, window_size=64, step_size=1)
    dataset_class.main(config)
    dataset = dataset_class(seq_len=config["seq_len"], seq_step_size=config["seq_step_size"],
                                 window_size=config["window_size"], step_size=config["step_size"])
    dataset.load(21)
    data_loader = DataLoader(dataset, batch_size=5, shuffle=False)
    for (data, power), labels in data_loader:
        # print(data)
        # print(power)
        print(data.shape, power.shape, labels.shape)
        # print(power[0][1])
        break
    
    pass
