from torch.utils.data import DataLoader, Dataset
from sklearn.preprocessing import StandardScaler
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import TomekLinks
import numpy as np
import pandas as pd
import torch

class NasaDataset(Dataset):
    def __init__(self, datafile: str = "D:\云归处清风来\Desktop\工程研究与实习\softwareDP\data\KC2.csv", isTrain: bool = True,
                 test_size: float = 0.3):
        # 加载数据集
        pd_data = pd.read_csv(datafile)
        pd_data = pd_data.sample(frac=1).reset_index(drop=True)  # 打乱数据集
        count = pd_data.shape[0]  # 多少行

        train_len = int(count * (1 - test_size))

        train_data = pd_data[:train_len]
        test_data = pd_data[train_len:]

        pd_data = train_data if isTrain else test_data

        # 数据标准化
        scaler = StandardScaler()
        Nasa_scaled = scaler.fit_transform(pd_data.iloc[:, 0:21].to_numpy())
        y = pd_data.iloc[:, 21].to_numpy()

        # 创建 SMOTE 对象
        smote = SMOTE(sampling_strategy=1.0, random_state=42)  # 1.0 表示将类别1的数量提升到类别0的数量
        # 使用 SMOTE 进行过采样
        X_resampled, y_resampled = smote.fit_resample(Nasa_scaled, y)

        # 使用 TomekLinks 进行欠采样
        tl = TomekLinks(sampling_strategy='auto')
        X_resampled, y_resampled = tl.fit_resample(X_resampled, y_resampled)

        self.x = X_resampled
        self.y = np.expand_dims(y_resampled, axis=1) if y_resampled.ndim == 1 else y_resampled

    def __len__(self):
        return self.y.shape[0]

    def __getitem__(self, item):
        data = torch.from_numpy(self.x[item]).float()
        target = torch.from_numpy(self.y[item]).float()

        return data, target

if __name__ == '__main__':
    filePath = "D:\云归处清风来\Desktop\工程研究与实习\softwareDP\data\KC2.csv"
    train_dataset = NasaDataset(datafile=filePath, isTrain=True)
    test_dataset = NasaDataset(datafile=filePath, isTrain=False)

    print(len(train_dataset) + len(test_dataset))
