import numpy as np
import json
import pandas as pd
from numpy.lib.stride_tricks import sliding_window_view
from datetime import timedelta

TimeDelta = {
	"pressure" : timedelta(hours=1),
	"seepage" :timedelta(hours=1),
	"radar" :timedelta(hours=1),
	"movement" :timedelta(hours=1),
	"threshold":timedelta(minutes=15)
}

LegalRange = {
	"pressure": [0, 100.0],
	"seepage": [0, 200.0],
	"radar": [-8.0, 8.0],
	"movement" :[-8.0, 8.0]
}
configs = {
	'pressure': {
		"data": {
			"window_size": 6
		}, 
		"model": {
			"input_size": 1,
			"num_lstm_layers": 5,
			"lstm_size": 64,
			"dropout": 0.2,
			"output_size":1
		},
		"training": {
			"device": "cuda", 
			"batch_size": 32,
			"num_epoch": 160,
			"learning_rate": 0.01,
			"scheduler_step_size": 40
		}
	},
	'seepage': {
		"data": {
			"window_size": 6
		}, 
		"model": {
			"input_size": 1,
			"num_lstm_layers": 5,
			"lstm_size": 64,
			"dropout": 0.2,
			"output_size":1
		},
		"training": {
			"device": "cuda", 
			"batch_size": 32,
			"num_epoch": 160,
			"learning_rate": 0.01,
			"scheduler_step_size": 40
		}
	},
	'radar': {
		"data": {
			"window_size": 6
		}, 
		"model": {
			"input_size": 1,
			"num_lstm_layers": 5,
			"lstm_size": 64,
			"dropout": 0.2,
			"output_size":1
		},
		"training": {
			"device": "cuda", 
			"batch_size": 32,
			"num_epoch": 160,
			"learning_rate": 0.01,
			"scheduler_step_size": 40
		}
	},
	'movement': {
		"data": {
			"window_size": 6
		}, 
		"model": {
			"input_size": 3,
			"num_lstm_layers": 5,
			"lstm_size": 64,
			"dropout": 0.2,
			"output_size":3
		},
		"training": {
			"device": "cuda", 
			"batch_size": 32,
			"num_epoch": 160,
			"learning_rate": 0.01,
			"scheduler_step_size": 40
		}
	}
}
class Normalizer():
	def __init__(self, mu=None, sd=None):
		self.mu = mu
		self.sd = sd

	def fit_transform(self, x):
		self.mu = np.mean(x, axis=0, keepdims=True)
		self.sd = np.std(x, axis=0, keepdims=True)
		normalized_x = (x - self.mu)/self.sd
		return normalized_x

	def test_transform(self, x):
		normalized_x = (x - self.mu)/self.sd
		return normalized_x

	def inverse_transform(self, x):
		return (x*self.sd) + self.mu

def prepare_data_x(x, window_size):
	output = sliding_window_view(x, window_shape=(window_size, x.shape[-1]))[:, 0, :, :]
	return output[:-1], output[-1]

def prepare_data_y(x, window_size):
	# # perform simple moving average
	dim = x.shape[-1]
	output = []
	for i in range(dim):
		output.append(np.convolve(x[:, i], np.ones(window_size), 'valid') / window_size)
	output = np.stack(output, axis=-1)
	
	# use the next day as label
	output = x[window_size:]
	return output

def prepare_data(normalized_vals, config):
	data_x, data_x_unseen = prepare_data_x(normalized_vals, window_size=config["data"]["window_size"])
	data_y = prepare_data_y(normalized_vals, window_size=config["data"]["window_size"])

	# split dataset
	data_x_train = data_x
	data_y_train = data_y

	return data_x_train, data_y_train, data_x_unseen


def removeIllegalValues(rare_data_df, key):
	match key:
		case 'movement':
			data_df = rare_data_df.mask(rare_data_df['val_e']<=LegalRange[key][0], np.nan)
			data_df = rare_data_df.mask(rare_data_df['val_n']<=LegalRange[key][0], np.nan)
			data_df = rare_data_df.mask(rare_data_df['val_u']<=LegalRange[key][0], np.nan)

			data_df = data_df.mask(data_df['val_e']>=LegalRange[key][-1], np.nan)
			data_df = data_df.mask(data_df['val_n']>=LegalRange[key][-1], np.nan)
			data_df = data_df.mask(data_df['val_u']>=LegalRange[key][-1], np.nan)

			data_df['val_e'] = data_df['val_e'].interpolate(method='linear', axis=0, limit_direction='both')
			data_df['val_n'] = data_df['val_n'].interpolate(method='linear', axis=0, limit_direction='both')
			data_df['val_u'] = data_df['val_u'].interpolate(method='linear', axis=0, limit_direction='both')

		case _:
			data_df = rare_data_df.mask(rare_data_df['value']<LegalRange[key][0], np.nan)
			data_df = data_df.mask(data_df['value']>LegalRange[key][-1], np.nan)
			data_df['value'] = data_df['value'].interpolate(method='linear',axis=0,limit_direction='both')
	return data_df

def loadDict4test(input_dic):
	stime = pd.to_datetime(input_dic["startTime"])
	etime = pd.to_datetime(input_dic["endTime"])
	data_dic = input_dic["data"]

	all_dic = {
		"pressure": {"stationID":[], "time":[], "value":[]},
		"seepage":{"stationID":[], "time":[], "value":[]},
		"radar":{"stationID":[], "time":[], "value":[]},
		"movement":{"stationID":[], "time":[], "val_e":[], "val_n":[], "val_u":[]},
	}
	for x in data_dic:
		match x["StationType"]:
			case "渗压":
				all_dic["pressure"]["stationID"].append(x["stationID"])
				all_dic["pressure"]["time"].append(x["time"])
				all_dic["pressure"]["value"].append(x["value"])
			
			case "渗流":
				all_dic["seepage"]["stationID"].append(x["stationID"])
				all_dic["seepage"]["time"].append(x["time"])
				all_dic["seepage"]["value"].append(x["value"])
				# print(x["time"])
			
			case "超声波雷达":
				all_dic["radar"]["stationID"].append(x["stationID"])
				all_dic["radar"]["time"].append(x["time"])
				all_dic["radar"]["value"].append(x["value"])
			
			case "变形监测":
				all_dic["movement"]["stationID"].append(x["stationID"])
				all_dic["movement"]["time"].append(x["time"])
				all_dic["movement"]["val_e"].append(x["val_e"])
				all_dic["movement"]["val_n"].append(x["val_n"])
				all_dic["movement"]["val_u"].append(x["val_u"])


	for key, dic in all_dic.items():
	
		data_df = pd.DataFrame(dic)
		match key:
			case 'movement':df = data_df.astype({
				"stationID": "string",
				'time': 'datetime64[ns]',
				'val_e': 'float64',
				'val_n': 'float64',
				'val_u': 'float64',

			}) 
			case _ : df = data_df.astype({
				"stationID": "string",
				'time': 'datetime64[ns]',
				'value': 'float64'
			})
		group_info = {}
		for group_id, group_df in df.groupby('stationID'):
			# 按时间排序
			sorted_df = group_df.sort_values('time').reset_index(drop=True)
			
			# 检查时间是否连续
			time_deltas = sorted_df['time'].diff().dropna()
			is_continuous = all(time_deltas >= TimeDelta[key] - TimeDelta["threshold"]) and all(time_deltas <= TimeDelta[key] + TimeDelta["threshold"])
			
			# 检查起止时间是否匹配
			times = sorted_df['time']
			is_correct_range = (times.iloc[0] == stime) and (times.iloc[-1] == etime)
			
			# 保存
			sorted_df = removeIllegalValues(sorted_df, key)

			is_legal = is_continuous and is_correct_range
			group_info[group_id] = {
				'df': sorted_df,
				'is_legal': is_legal	
			}
		all_dic[key] = group_info

	return all_dic
