import pandas as pd
import torch
from torch.utils.data import Dataset, DataLoader
import pytorch_lightning as pl


class MyDataset(Dataset):
    def __init__(self, xlsx_file):
        self.data = pd.read_excel(xlsx_file)
        self.lables = self.data.iloc[:, 3]
        self.features = self.data.iloc[:, 4:18]

        self.features_mean = self.features.mean()
        self.features_std = self.features.std()

        self.labels_mean = self.lables.mean()
        self.labels_std = self.lables.std()

    def __len__(self):
        return len(self.data)
    
    def __getitem__(self, idx):
        if idx >= len(self.data):
            raise IndexError("Index out of range")
        label = torch.tensor(self.lables.iloc[idx], dtype=torch.float32)
        features = torch.tensor(self.features.iloc[idx].values, dtype=torch.float32)

        # convert mean and std to tensors
        labels_mean = torch.tensor(self.labels_mean, dtype=torch.float32)
        labels_std = torch.tensor(self.labels_std, dtype=torch.float32)
        features_mean = torch.tensor(self.features_mean.values, dtype=torch.float32)
        features_std = torch.tensor(self.features_std.values, dtype=torch.float32)

        # apply standardization
        label = (label - labels_mean) / labels_std
        features = (features - features_mean) / features_std

        return features, label
    

class MyData(pl.LightningDataModule):
    def __init__(self, data_dir, batch_size, num_workers, *random_state):
        super().__init__()
        self.data_dir = data_dir
        self.batch_size = batch_size
        self.num_workers = num_workers

    def prepare_data(self):
        # download data
        pass

    def setup(self, stage=None):
        # load the dataset from excel
        dataset = MyDataset(xlsx_file=self.data_dir)

        # split the dataset into train, validation, and test sets
        train_size = int(0.7 * len(dataset))
        val_size = int(0.15 * len(dataset))
        test_size = len(dataset) - train_size - val_size

        self.train_dataset, self.val_dataset, self.test_dataset = torch.utils.data.random_split(
            dataset, [train_size, val_size, test_size]
        )

    def train_dataloader(self):
        return DataLoader(
            self.train_dataset, 
            batch_size=self.batch_size, 
            num_workers=self.num_workers,
            shuffle=True,
            drop_last=True
        )

    def val_dataloader(self):
        return DataLoader(
            self.val_dataset, 
            batch_size=self.batch_size, 
            num_workers=self.num_workers,
            shuffle=False,
            drop_last=True
        )

    def test_dataloader(self):
        return DataLoader(
            self.test_dataset, 
            batch_size=self.batch_size, 
            num_workers=self.num_workers,
            shuffle=False,
            drop_last=True
        )