# study from : https://blog.csdn.net/wangyanbeilin/article/details/81350683
# mnist attention
import numpy as np
import os
np.random.seed(1337)
from keras.datasets import mnist
from keras.utils import np_utils
from keras.layers import *
from keras.models import *
from keras.optimizers import Adam
import numpy
from sklearn.preprocessing import MinMaxScaler

# from pandas import read_csv
from pandas import *
from ML_Methods.utils import create_dataset, set_label_dataset, mean_absolute_percentage_error

#https://blog.csdn.net/sinat_34373382/article/details/88719465
TIME_STEPS = 28 # add(Dense(TIME_STEPS)) +Dense的数量
INPUT_DIM = 2 #二维 (738,1)
lstm_units = 64 # lstm_units 就是这个层的隐藏神经元个数

# data pre-processing
# (X_train, y_train), (X_test, y_test) = mnist.load_data('mnist.npz')
# X_train = X_train.reshape(-1, 28, 28) / 255.
# X_test = X_test.reshape(-1, 28, 28) / 255.
# y_train = np_utils.to_categorical(y_train, num_classes=10)
# y_test = np_utils.to_categorical(y_test, num_classes=10)
# print('X_train shape:', X_train.shape)
# print('X_test shape:', X_test.shape)


# split a univariate sequence into samples
def split_sequence(sequence, n_steps):
	X, y = list(), list()
	for i in range(len(sequence)):
		# find the end of this pattern
		end_ix = i + n_steps
		# check if we are beyond the sequence
		if end_ix > len(sequence)-1:
			break
		# gather input and output parts of the pattern
		seq_x, seq_y = sequence[i:end_ix], sequence[end_ix]
		X.append(seq_x)
		y.append(seq_y)
	return array(X), array(y)



# first way attention
def attention_3d_block(inputs):
    # input_dim = int(inputs.shape[2])
    a = Permute((2, 1))(inputs)
    a = Dense(TIME_STEPS, activation='softmax')(a)
    a_probs = Permute((2, 1), name='attention_vec')(a)
    # output_attention_mul = merge([inputs, a_probs], name='attention_mul', mode='mul')
    output_attention_mul = multiply([inputs, a_probs], name='attention_mul')
    return output_attention_mul


WORK_PATH = os.getcwd() #获取当前项目工作目录，
DATA_PATH = os.path.join(WORK_PATH, r'indices_caculate\result\ROW_SPEI\ROW_SPEI-12\SPEI-12_52533.txt') #models 目录
file_path = DATA_PATH

spei_n = 12
train_rate = 0.9

# 加载数据
dataframe = read_csv(file_path, header=None, names=('TIME', 'SPEI-12' ))
dataframe = dataframe.set_index(['TIME'], drop=True)  # 把日期作为索引

# 创建训练和测试数据集
train_dataset, test_dataset = create_dataset(numpy.array(dataframe), train_rate) # 0.9

# 将数据归一化处理，缩放到[-1,1]范围
# MaxAbsScaler是归到[-1,1],MinMaxScaler归到[0,1]
scaler = MinMaxScaler(feature_range=(0, 1))
scaler.fit(train_dataset)
train_dataset = scaler.fit_transform(train_dataset)
test_dataset = scaler.fit_transform(test_dataset)

# # get label data
# X_train, y_train = set_label_dataset(train_dataset, 1)
# X_test, y_test = set_label_dataset(test_dataset, 1)


# choose a number of time steps
time_steps = 4

# train data process
# split into samples
X_train, y_train = split_sequence(train_dataset, time_steps)
# reshape from [samples, timesteps] into [samples, subsequences, timesteps, features]
n_features = 1
n_seq = 2
n_steps = 2
X_train = X_train.reshape((X_train.shape[0], n_seq, n_steps, n_features))


# test data process
# split into samples
X_test, y_test = split_sequence(test_dataset, time_steps)
# reshape from [samples, timesteps] into [samples, subsequences, timesteps, features]
n_features = 1
n_seq = 2
n_steps = 2
X_test = X_test.reshape((X_test.shape[0], n_seq, n_steps, n_features))



# build RNN model with attention
inputs = Input(shape=(TIME_STEPS, INPUT_DIM))
drop1 = Dropout(0.3)(inputs)
lstm_out = Bidirectional(LSTM(lstm_units, return_sequences=True), name='bilstm')(drop1)  # lstm_units 就是这个层的隐藏神经元个数
attention_mul = attention_3d_block(lstm_out)
attention_flatten = Flatten()(attention_mul)
drop2 = Dropout(0.3)(attention_flatten)
output = Dense(10, activation='sigmoid')(drop2)   # LSTM中用sigmoid是固定的，其他都不好
model = Model(inputs=inputs, outputs=output)

# second way attention

# inputs = Input(shape=(TIME_STEPS, INPUT_DIM))
# units = 32
# activations = LSTM(units, return_sequences=True, name='lstm_layer')(inputs)
#
# attention = Dense(1, activation='tanh')(activations)
# attention = Flatten()(attention)
# attention = Activation('softmax')(attention)
# attention = RepeatVector(units)(attention)
# attention = Permute([2, 1], name='attention_vec')(attention)
# attention_mul = merge([activations, attention], mode='mul', name='attention_mul')
# out_attention_mul = Flatten()(attention_mul)
# output = Dense(10, activation='sigmoid')(out_attention_mul)
# model = Model(inputs=inputs, outputs=output)


model.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy'])
print(model.summary())

print('Training------------')
model.fit(X_train, y_train, epochs=10, batch_size=16)

print('Testing--------------')
loss, accuracy = model.evaluate(X_test, y_test)

print('test loss:', loss)
print('test accuracy:', accuracy)