import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import torch
from torch.utils.data import Dataset, DataLoader


class FaultDataset(Dataset):
    def __init__(self, features, labels):
        self.features = torch.FloatTensor(features)
        self.labels = torch.LongTensor(labels)

    def __len__(self):
        return len(self.features)

    def __getitem__(self, idx):
        return self.features[idx], self.labels[idx]


class DataProcessor:
    def __init__(self):
        self.scaler = StandardScaler()

    def load_data(self, file_path):
        """加载数据文件"""
        try:
            if file_path.endswith('.csv'):
                df = pd.read_csv(file_path)
            else:
                df = pd.read_excel(file_path)
            return df
        except Exception as e:
            raise Exception(f"数据加载失败: {str(e)}")

    def preprocess_data(self, df, target_column):
        """预处理数据"""
        # 分离特征和标签
        X = df.drop(columns=[target_column]).values
        y = df[target_column].values

        # 数据标准化
        X = self.scaler.fit_transform(X)

        # 划分训练集和测试集
        X_train, X_test, y_train, y_test = train_test_split(
            X, y, test_size=0.2, random_state=42
        )

        # 创建数据加载器
        train_dataset = FaultDataset(X_train, y_train)
        test_dataset = FaultDataset(X_test, y_test)

        train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
        test_loader = DataLoader(test_dataset, batch_size=32, shuffle=False)

        return train_loader, test_loader

    def prepare_inference_data(self, data):
        """准备推理数据"""
        # 确保数据是2D数组
        if isinstance(data, list):
            data = np.array(data)
        if len(data.shape) == 1:
            data = data.reshape(1, -1)

        # 标准化数据
        data = self.scaler.transform(data)
        return torch.FloatTensor(data)
