import os
import sys
import pandas as pd
import numpy as np
from sklearn.preprocessing import *
from datetime import datetime


output_folder = 'processed'
data_folder = 'data'

datasets = [ 'SMD', 'SWaT', 'SMAP', 'MSL', 'WADI']

def load_and_save(category, filename, dataset, dataset_folder):
    temp = np.genfromtxt(os.path.join(dataset_folder, category, filename),
                         dtype=np.float64,
                         delimiter=',')
    print(dataset, category, filename, temp.shape)
    np.save(os.path.join(output_folder, f"SMD/{dataset}_{category}.npy"), temp)
    return temp.shape

def load_and_save2(category, filename, dataset, dataset_folder, shape):
	temp = np.zeros(shape)
	with open(os.path.join(dataset_folder, 'interpretation_label', filename), "r") as f:
		ls = f.readlines()
	for line in ls:
		pos, values = line.split(':')[0], line.split(':')[1].split(',')
		start, end, indx = int(pos.split('-')[0]), int(pos.split('-')[1]), [int(i)-1 for i in values]
		temp[start-1:end-1, indx] = 1
	print(dataset, category, filename, temp.shape)
	np.save(os.path.join(output_folder, f"SMD/{dataset}_{category}.npy"), temp)

def normalize(a):
	a = a / np.maximum(np.absolute(a.max(axis=0)), np.absolute(a.min(axis=0)))
	return (a / 2 + 0.5)

def normalize2(a, min_a = None, max_a = None):
	if min_a is None: min_a, max_a = min(a), max(a)
	return (a - min_a) / (max_a - min_a), min_a, max_a

def normalize3(a, min_a = None, max_a = None):
	if min_a is None: min_a, max_a = np.min(a, axis = 0), np.max(a, axis = 0)
	return (a - min_a) / (max_a - min_a + 0.0001), min_a, max_a



def load_data(dataset):
	folder = os.path.join(output_folder, dataset)
	os.makedirs(folder, exist_ok=True)
	if dataset == 'SMD':
		dataset_folder = 'data/SMD'
		file_list = os.listdir(os.path.join(dataset_folder, "train"))
		for filename in file_list:
			if filename.endswith('.txt'):
				load_and_save('train', filename, filename.strip('.txt'), dataset_folder)
				s = load_and_save('test', filename, filename.strip('.txt'), dataset_folder)
				load_and_save2('labels', filename, filename.strip('.txt'), dataset_folder, s)
	elif dataset == 'SWaT':
		down_sampling_rate = 1
		train = pd.read_csv("data/SWaT/SWaT_Dataset_Normal_v1.csv")
		train = train.drop(["Timestamp", "Normal/Attack"], axis = 1)
		for i in list(train):
			train[i] = train[i].apply(lambda x: str(x).replace("," , "."))
		train = train.astype(float)
		train = train.groupby(np.arange(len(train.index)) // down_sampling_rate).mean()
		min_max_scaler = StandardScaler()
		train = min_max_scaler.fit_transform(train.values)

		test = pd.read_csv("data/SWaT/SWaT_Dataset_Attack_v0.csv",sep=";")
		labels = [ float(label!= 'Normal' ) for label  in test["Normal/Attack"].values]
		test = test.drop(["Timestamp" , "Normal/Attack" ] , axis = 1)
		for i in list(test):
			test[i]=test[i].apply(lambda x: str(x).replace("," , "."))
		test = test.astype(float)
		test = test.groupby(np.arange(len(test.index)) // down_sampling_rate).mean()
		test = min_max_scaler.transform(test.values)

		#标签降采样
		labels_down=[]
		for i in range(len(labels)//down_sampling_rate):
			if labels[down_sampling_rate*i:down_sampling_rate*(i+1)].count(1.0):
				labels_down.append(1.0) #test
			else:
				labels_down.append(0.0) #train
		if down_sampling_rate != 1:		
			if labels[down_sampling_rate*(i+1):].count(1.0):
				labels_down.append(1.0) #test
			else:
				labels_down.append(0.0) #train	
		labels = np.array(labels_down).reshape(-1,1)

		print(train.shape, test.shape, labels.shape)
		for file in ['train', 'test', 'labels']:
			np.save(os.path.join(folder, f'{file}.npy'), eval(file))

	elif dataset in ['SMAP', 'MSL']:
		dataset_folder = 'data/SMAP_MSL'
		file = os.path.join(dataset_folder, 'labeled_anomalies.csv')
		values = pd.read_csv(file)
		values = values[values['spacecraft'] == dataset]
		filenames = values['chan_id'].values.tolist()
		print(len(filenames))
		for fn in filenames:
			print(fn)
			train = np.load(f'{dataset_folder}/train/{fn}.npy')
			test = np.load(f'{dataset_folder}/test/{fn}.npy')

			train, min_a, max_a = normalize3(train)
			test, _, _ = normalize3(test, min_a, max_a)
			np.save(f'{folder}/{fn}_train.npy', train)
			np.save(f'{folder}/{fn}_test.npy', test)
			labels = np.zeros((test.shape[0],1))
			indices = values[values['chan_id'] == fn]['anomaly_sequences'].values[0]
			indices = indices.replace(']', '').replace('[', '').split(', ')
			indices = [int(i) for i in indices]
			for i in range(0, len(indices), 2):
				labels[indices[i]:indices[i+1], :] = 1
			np.save(f'{folder}/{fn}_labels.npy', labels)
	elif dataset == 'WADI':
		# 正常数据
		train = pd.read_csv("data/WADI/WADI_14days.csv",sep=',', skiprows=[0,1,2,3],skip_blank_lines=True)
		train = train.drop(train.columns[[0,1,2,50,51,86,87]],axis=1) # 去掉空白列和时间列
		train = train.astype(float)
		# 降采样
		down_sampling_rate = 1
		train=train.groupby(np.arange(len(train.index)) // down_sampling_rate).mean()
		min_max_scaler = MinMaxScaler()
		x = train.values
		x_scaled = min_max_scaler.fit_transform(x)
		train = pd.DataFrame(x_scaled).fillna(0).values
		
		# 异常数据
		test = pd.read_csv("data/WADI/WADI_attackdata.csv",sep=",")

		# 由pdf上的攻击时间添加label
		labels=[]

		test.reset_index()
		for index, row in test.iterrows():
			date_temp=row['Date']
			date_mask="%m/%d/%Y"
			date_obj=datetime.strptime(date_temp, date_mask)
			time_temp=row['Time']
			time_mask="%I:%M:%S.%f %p"
			time_obj=datetime.strptime(time_temp,time_mask)

			if date_obj==datetime.strptime('10/9/2017', '%m/%d/%Y'):
				if time_obj>=datetime.strptime('7:25:00.000 PM', '%I:%M:%S.%f %p') and time_obj<=datetime.strptime('7:50:16.000 PM', '%I:%M:%S.%f %p'):
					labels.append('Attack')
					continue

			if date_obj==datetime.strptime('10/10/2017', '%m/%d/%Y'):
				if time_obj>=datetime.strptime('10:24:10.000 AM', '%I:%M:%S.%f %p') and time_obj<=datetime.strptime('10:34:00.000 AM', '%I:%M:%S.%f %p'):
					labels.append('Attack')
					continue
				elif time_obj>=datetime.strptime('10:55:00.000 AM', '%I:%M:%S.%f %p') and time_obj<=datetime.strptime('11:24:00.000 AM', '%I:%M:%S.%f %p'):
					labels.append('Attack')
					continue
				elif time_obj>=datetime.strptime('11:30:40.000 AM', '%I:%M:%S.%f %p') and time_obj<=datetime.strptime('11:44:50.000 AM', '%I:%M:%S.%f %p'):
					labels.append('Attack')
					continue
				elif time_obj>=datetime.strptime('1:39:30.000 PM', '%I:%M:%S.%f %p') and time_obj<=datetime.strptime('1:50:40.000 PM', '%I:%M:%S.%f %p'):
					labels.append('Attack')
					continue
				elif time_obj>=datetime.strptime('2:48:17.000 PM', '%I:%M:%S.%f %p') and time_obj<=datetime.strptime('2:59:55.000 PM', '%I:%M:%S.%f %p'):
					labels.append('Attack')
					continue
				elif time_obj>=datetime.strptime('5:40:00.000 PM', '%I:%M:%S.%f %p') and time_obj<=datetime.strptime('5:49:40.000 PM', '%I:%M:%S.%f %p'):
					labels.append('Attack')
					continue
				elif time_obj>=datetime.strptime('10:55:00.000 AM', '%I:%M:%S.%f %p') and time_obj<=datetime.strptime('10:56:27.000 AM', '%I:%M:%S.%f %p'):
					labels.append('Attack')
					continue
		
			if date_obj==datetime.strptime('10/11/2017', '%m/%d/%Y'):
				if time_obj>=datetime.strptime('11:17:54.000 AM', '%I:%M:%S.%f %p') and time_obj<=datetime.strptime('11:31:20.000 AM', '%I:%M:%S.%f %p'):
					labels.append('Attack')
					continue
				elif time_obj>=datetime.strptime('11:36:31.000 AM', '%I:%M:%S.%f %p') and time_obj<=datetime.strptime('11:47:00.000 AM', '%I:%M:%S.%f %p'):
					labels.append('Attack')
					continue
				elif time_obj>=datetime.strptime('11:59:00.000 AM', '%I:%M:%S.%f %p') and time_obj<=datetime.strptime('12:05:00.000 PM', '%I:%M:%S.%f %p'):
					labels.append('Attack')
					continue
				elif time_obj>=datetime.strptime('12:07:30.000 PM', '%I:%M:%S.%f %p') and time_obj<=datetime.strptime('12:10:52.000 PM', '%I:%M:%S.%f %p'):
					labels.append('Attack')
					continue
				elif time_obj>=datetime.strptime('12:16:00.000 PM', '%I:%M:%S.%f %p') and time_obj<=datetime.strptime('12:25:36.000 PM', '%I:%M:%S.%f %p'):
					labels.append('Attack')
					continue
				elif time_obj>=datetime.strptime('3:26:30.000 PM', '%I:%M:%S.%f %p') and time_obj<=datetime.strptime('3:37:00.000 PM', '%I:%M:%S.%f %p'):
					labels.append('Attack')
					continue

			labels.append('Normal')
			
		test=test.drop(test.columns[[0,1,2,50,51,86,87]],axis=1)
		test=test.astype(float)
		test=test.groupby(np.arange(len(test.index)) // down_sampling_rate).mean()
		test = min_max_scaler.transform(test.values)
		
		#标签降采样
		labels_down=[]
		for i in range(len(labels)//down_sampling_rate):
			if labels[down_sampling_rate*i:down_sampling_rate*(i+1)].count('Attack'):
				labels_down.append(1.0) #test
			else:
				labels_down.append(0.0) #train
		if down_sampling_rate != 1:		
			if labels[down_sampling_rate*(i+1):].count(1.0):
				labels_down.append(1.0) #test
			else:
				labels_down.append(0.0) #train	
		labels = np.array(labels_down).reshape(-1,1)
		print(train.shape, test.shape, labels.shape)
		for file in ['train', 'test', 'labels']:
			np.save(os.path.join(folder, f'{file}.npy'), eval(file))
	else:
		raise Exception(f'Not Implemented. Check one of {datasets}')

if __name__ == '__main__':
	commands = sys.argv[1:]
	load = []
	if len(commands) > 0:
		for d in commands:
			load_data(d)
	else:
		print("Usage: python data_preprocess.py <datasets>")
		print(f"where <datasets> is space separated list of {datasets}")