import os
import csv
import torch
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
import numpy as np
class DATASET(Dataset):
    def __init__(self,file_path:str,bearing_condition:str,bearing_label:str,window):
        feature_path = file_path + '/feature/'
        label_path = file_path + '/label/'
        self.window = window
        self.dataX = []
        self.dataY = []
        self.labels = []
        for name in sorted(os.listdir(feature_path)):
            if name[7] == bearing_condition and (bearing_label == '0' or bearing_label == name[9]):
                if name[10] == 'x':
                    Xreader = csv.reader(open(feature_path + name))
                    for line in Xreader:
                        line = [float(x) for x in line]
                        self.dataX.append(line)
                if name[10] == 'y':
                    Yreader = csv.reader(open(feature_path + name))
                    for line in Yreader:
                        line = [float(x) for x in line]
                        self.dataY.append(line)

        for name in sorted(os.listdir(label_path)):
            if name[7] == bearing_condition and (bearing_label == '0' or bearing_label == name[9]):
                lreader = csv.reader(open(label_path + name))
                for line in lreader:
                    line = [float(x) for x in line]
                    self.labels.append(line)
        if len(self.labels) > 0:
            print('bearing{}_{} has been loaded'.format(bearing_condition, bearing_label))
    def __len__(self):
        return len(self.labels) - self.window

    def __getitem__(self, idx):
        dst = idx + self.window
        return torch.tensor(self.dataX[idx:dst]),torch.tensor(self.dataY[idx:dst]), torch.tensor(self.labels[dst])


def getLoader(path,batch_size,bearing_condition,bearing_label,window,is_shuffle = False):
    bearing_condition = str(bearing_condition)
    bearing_label = str(bearing_label)
    ds = DATASET(path, bearing_condition, bearing_label,window)
    return DataLoader(ds,batch_size,is_shuffle)

