import pandas as pd
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from sklearn.preprocessing import StandardScaler, MinMaxScaler
import torch.cuda
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
from sklearn.preprocessing import OneHotEncoder
      
class MonitorEntityDataset_SMD(Dataset):
    def __init__(self, trainfiles, testfiles, sequence_length, z_dim, period=1440, seed=0, is_minmax=False, gpu=None, period_num=None):
        
        self.gpu = gpu
        self.sequence_length = sequence_length
        
        self.device = torch.device(f'cuda:{self.gpu}' if torch.cuda.is_available() and self.gpu is not None else 'cpu')
        
        self.is_train = True
        self.train_index = []
        
        self.current_test = None
        self.test_index = {}
        self.representative_curves = {}
        
        self.fea_dims = []
        
        self.tensors = []
        
        trainfiles = sorted(trainfiles)
        testfiles = sorted(testfiles)
        
        for i in range(len(trainfiles)):
            example_train = pd.read_csv(trainfiles[i], header=None)
            example_test = pd.read_csv(testfiles[i], header=None)
            
            self.fea_dims.append(example_train.shape[1])
            
            if is_minmax:
                scaler = MinMaxScaler()
            else:
                scaler = StandardScaler()
            
            example_train = pd.DataFrame(scaler.fit_transform(example_train))
            example_test = pd.DataFrame(scaler.transform(example_test))
            
            len_test = len(example_test)
            
            example_all = pd.concat((example_train, example_test),axis=0)
            example_all.reset_index(drop=True, inplace=True)
            
            if period_num is None:
                tensor = example_all.values
                tensor = tensor[period*2:]
            else:
                tensor = example_all.values
                tensor = tensor[-len_test-period*period_num:]
            
            self.tensors.append(torch.from_numpy(tensor).float().to(self.device))
            self.train_index.extend([(i, j) for j in range(self.sequence_length-1, len(tensor) - len_test)])
            
            self.test_index[testfiles[i]+'_TrainSet'] = [(i, j) for j in range(self.sequence_length-1, len(tensor) - len_test)]
            self.test_index[testfiles[i]] = [(i, j) for j in range(len(tensor)-len_test, len(tensor))]
            
    def train(self, current_train=None):
        self.is_train = True
        if current_train is None:
            self.current_train = None
        else:
            self.current_train = current_train + '_TrainSet'
        
    def test(self, current_test):
        self.is_train = False
        self.current_test = current_test
            
    def __len__(self):
        if self.is_train:
            if self.current_train is None:
                return len(self.train_index)
            else:
                return len(self.test_index[self.current_train])
        else:
            return len(self.test_index[self.current_test])
            

    def __getitem__(self, i):
        '''
        return metric_tensor of shape [seq_len, fea_dim]
            and sfa_tensor of shape [fea_dim, sfa_len]
        '''
        if self.is_train:
            if self.current_train is None:
                loc_set, loc_seq= self.train_index[i]
                return torch.flatten(self.tensors[loc_set][loc_seq-self.sequence_length+1:loc_seq+1])
            else:
                loc_set, loc_seq= self.test_index[self.current_train][i]
                return torch.flatten(self.tensors[loc_set][loc_seq-self.sequence_length+1:loc_seq+1])
        else:
            loc_set, loc_seq= self.test_index[self.current_test][i]
            return torch.flatten(self.tensors[loc_set][loc_seq-self.sequence_length+1:loc_seq+1])     


class MonitorEntityDataset_MSL_SMAP(Dataset):
    def __init__(self, trainfiles, testfiles, sequence_length, is_minmax=False, gpu=None):
        
        self.gpu = gpu
        self.sequence_length = sequence_length
        
        self.device = torch.device(f'cuda:{self.gpu}' if torch.cuda.is_available() and self.gpu is not None else 'cpu')
        
        self.is_train = True
        self.train_index = []
        
        self.current_test = None
        self.test_index = {}
        self.representative_curves = {}
        
        self.fea_dims = []
        
        self.tensors = []
        
        trainfiles = sorted(trainfiles)
        testfiles = sorted(testfiles)
        
        for i in range(len(trainfiles)):
            example_train = np.load(trainfiles[i])
            example_test = np.load(testfiles[i])
            
            self.fea_dims.append(example_train.shape[1])
            
            if is_minmax:
                scaler = MinMaxScaler()
            else:
                scaler = StandardScaler()
            
            example_train = pd.DataFrame(scaler.fit_transform(example_train))
            example_test = pd.DataFrame(scaler.transform(example_test))
            
            len_test = len(example_test)
            
            example_all = pd.concat((example_train, example_test),axis=0)
            example_all.reset_index(drop=True, inplace=True)


            tensor = example_all.values
            
            self.tensors.append(torch.from_numpy(tensor).float().to(self.device))
            self.train_index.extend([(i, j) for j in range(self.sequence_length-1, len(tensor) - len_test)])
            
            self.test_index[testfiles[i]] = [(i, j) for j in range(self.sequence_length-1, len(tensor) - len_test)]
            self.test_index[testfiles[i]] = [(i, j) for j in range(len(tensor)-len_test, len(tensor))]
            
    def train(self, current_train=None):
        self.is_train = True
        if current_train is None:
            self.current_train = None
        else:
            self.current_train = current_train 
        
    def test(self, current_test):
        self.is_train = False
        self.current_test = current_test
            
    def __len__(self):
        if self.is_train:
            if self.current_train is None:
                return len(self.train_index)
            else:
                return len(self.test_index[self.current_train])
        else:
            return len(self.test_index[self.current_test])
            

    def __getitem__(self, i):
        '''
        return metric_tensor of shape [seq_len, fea_dim]
            and sfa_tensor of shape [fea_dim, sfa_len]
        '''
        if self.is_train:
            if self.current_train is None:
                loc_set, loc_seq= self.train_index[i]
                return torch.flatten(self.tensors[loc_set][loc_seq-self.sequence_length+1:loc_seq+1])
            else:
                loc_set, loc_seq= self.test_index[self.current_train][i]
                return torch.flatten(self.tensors[loc_set][loc_seq-self.sequence_length+1:loc_seq+1])
        else:
            loc_set, loc_seq= self.test_index[self.current_test][i]
            return torch.flatten(self.tensors[loc_set][loc_seq-self.sequence_length+1:loc_seq+1])    