import warnings
warnings.filterwarnings('ignore')
import tensorflow as tf
import tensorflow.keras as keras
print(keras.__version__)
print(tf.__version__)

import numpy as np
import pandas as pd
import time
import pickle

# @title Load Keras
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Conv1D,Flatten,Dense,Dropout,GRU
from tensorflow.keras.utils import plot_model,Sequence

model_name = ''
meters = ['microwave','fridge','dish washer','washer dryer']
meter_key = meters[3]
window_size = 100

def seq2point():
    global model_name
    model_name = 'seq2p'
    # Using the same window_size as in Omar's code
    model = Sequential()
    # 1D Conv
    model.add(Conv1D(30, 10, activation='relu', input_shape=(window_size,1), padding="same", strides=1))
    model.add(Dropout(0.5))
    model.add(Conv1D(30, 8, activation='relu', padding="same", strides=1))
    model.add(Dropout(0.5))
    model.add(Conv1D(40, 6, activation='relu', padding="same", strides=1))
    model.add(Dropout(0.5))
    model.add(Conv1D(50, 5, activation='relu', padding="same", strides=1))
    model.add(Dropout(0.5))
    model.add(Conv1D(50, 5, activation='relu', padding="same", strides=1))
    model.add(Dropout(0.5))
    # Fully Connected Layers
    model.add(Flatten())
    model.add(Dense(1024, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(1, activation='linear'))

    model.compile(loss='mse', optimizer='adam',metrics=['accuracy'])
    model.summary()
    
    return model

class BahdanauAttention(tf.keras.layers.Layer):
    def __init__(self, units):
        super(BahdanauAttention, self).__init__()
        self.W1 = tf.keras.layers.Dense(units)
        self.W2 = tf.keras.layers.Dense(units)
        self.V = tf.keras.layers.Dense(1)

    def call(self, query, values):
        # hidden shape == (batch_size, hidden size)
        # hidden_with_time_axis shape == (batch_size, 1, hidden size)
        # we are doing this to perform addition to calculate the score
        hidden_with_time_axis = tf.expand_dims(query, 1)

        # score shape == (batch_size, max_length, 1)
        # we get 1 at the last axis because we are applying score to self.V
        # the shape of the tensor before applying self.V is (batch_size, max_length, units)
        score = self.V(tf.nn.tanh(
            self.W1(values) + self.W2(hidden_with_time_axis)))

        # attention_weights shape == (batch_size, max_length, 1)
        attention_weights = tf.nn.softmax(score, axis=1)

        # context_vector shape after sum == (batch_size, hidden_size)
        context_vector = attention_weights * values
        context_vector = tf.reduce_sum(context_vector, axis=1)

        return context_vector, attention_weights

    def get_config(self):
        config = super(BahdanauAttention, self).get_config()
        # config.update({'units': self.units})

        return config

def ba_attention():
    global model_name
    model_name = 'ba_att'
    hidden_units = 128
    # Encoder
    enc_input = keras.Input(shape=(window_size,1))
    # x = Conv1D(16, 4, activation="relu", padding="same", strides=1)(enc_input)
    enc_output, enc_state = keras.layers.GRU(hidden_units, return_sequences=True, return_state=True)(enc_input)
    # gru2 = keras.layers.GRU(128, return_sequences=True, return_state=True, activation='relu')
    # Attention
    context_vector, atteantion_weights = BahdanauAttention(hidden_units)(enc_state, enc_output)
     # context_vector Shape(batch_size, hidden_size)
    # context_vector = tf.keras.layers.Attention(128)(enc_state, enc_output)
    # Decoder
    input_dense = tf.concat([context_vector, enc_state], axis=-1)

    dense_out = tf.keras.layers.Dense(100, activation='relu')(input_dense)
    # pred = tf.keras.layers.Dense(10, activation='softmax')(dense_out)
    
    dec_output = keras.layers.Dense(1,activation='linear')(dense_out)

    att_model = keras.Model(inputs=enc_input, outputs=dec_output)
    att_model.compile(loss='mse', optimizer='adam',metrics=['accuracy'])

    att_model.summary()
    # plot_model(full_model, to_file='full_model.png', show_shapes=True)
    return att_model

df_train_x = pd.read_pickle('created_data/REDD/train_main.pkl.zip')
df_train_y = pd.read_pickle('created_data/REDD/train_{}.pkl.zip'.format(meter_key))
df_test_x = pd.read_pickle('created_data/REDD/test_main.pkl.zip')
df_test_y = pd.read_pickle('created_data/REDD/test_{}.pkl.zip'.format(meter_key))


from many2one_attention import M2O_Attention
def many2one_att():
    global model_name
    model_name = 'm2o_att'
    hidden_units = 128
    INPUT_DIM = 1
   # Encoder
    inp = keras.Input(shape=(window_size,INPUT_DIM,))
    
    # encoder == 1D Conv    
    # x = Conv1D(30, 10, activation='relu',padding="same", strides=1)(inp)
    # x = Dropout(0.5)(x)
    # x = Conv1D(30, 8, activation='relu', padding="same", strides=1)(x)
    # x = Dropout(0.5)(x)    
    # x = Conv1D(40, 6, activation='relu', padding="same", strides=1)(x)
    # x = Dropout(0.5)(x)
    # x = Conv1D(50, 5, activation='relu', padding="same", strides=1)(x)
    # x = Dropout(0.5)(x)
    # x = Conv1D(50, 5, activation='relu', padding="same", strides=1)(x)
    # x = Dropout(0.5)(x)

    # encoder == GRU
    enc_out, enc_state = keras.layers.GRU(hidden_units, return_sequences=True, return_state=True)(inp)
    
    # encoder == Bidirectional(GRU)
    # enc_out = keras.layers.Bidirectional(GRU(hidden_units, return_sequences=True, recurrent_dropout=0.2, dropout=0.2))(inp)
    
    #Dropout
    # attention_vector = M2O_Attention()(enc_out, enc_state)
    attention_vector = M2O_Attention()(enc_out)
    
    # attention_vector = M2O_Attention()(x)
    attention_vector = Flatten()(attention_vector)
    
    dense_out = Dense(1024,activation='relu')(attention_vector)
    dropout = Dropout(0.5)(dense_out)
    out = Dense(INPUT_DIM, activation='linear')(dropout)
    model = keras.Model(inputs=[inp], outputs=out)
    model.compile(loss='mse', optimizer='adam',metrics=['accuracy'])
    model.summary()
    return  model


def _get_ready_for_fit(df_train_x,df_train_y,window_size):
    # Normalize
    df_train_x = df_train_x/df_train_x.max()
    df_train_y = df_train_y/df_train_x.max()
    # Replace NaNs with 0s
    df_train_x.fillna(0, inplace=True)
    df_train_y.fillna(0,inplace=True)
    # Find common parts of timeseries
    ix = df_train_x.index.intersection(df_train_y.index)
    # Move from Pandas to Numpy - the array/tensor format appreciated by Keras
    np_train_x = np.array(df_train_x[ix])
    np_train_y = np.array(df_train_y[ix])
    # Create the indexer matrix
    rows = np.arange(len(np_train_x) - window_size + 1).reshape(-1,1)
    cols = np.arange(window_size).reshape(1,-1)
    indexer = rows + cols
    # Reshape the x training data into sliding windows.  Ending up with
    # the number of rows staying as the number of readings and the columns
    # ending up as the window_size. 
    np_train_x = np_train_x[indexer]
    # Set the y training data to the midpoint of the device's column values
    np_train_y = np_train_y[indexer]
    midpoint = window_size // 2 - 1
    np_train_y = np_train_y[:,midpoint]
    # Get the data into the 3D tensor shape expected by model.fit

    return np_train_x,np_train_y
    
train_x,train_y = _get_ready_for_fit(df_train_x,df_train_y,100)
print(train_x.shape)

train_x = np.reshape(train_x, (train_x.shape[0], train_x.shape[1], 1))
print(train_x.shape)

#train / val Split
from sklearn.model_selection import train_test_split

train_x, test_x, train_y, test_y = train_test_split(train_x, train_y, test_size=0.2, random_state=42)
print(f'train_x shape: {train_x.shape}, train_y shape: {train_y.shape}, test_x shape: {test_x.shape}, test_y.shape: {test_y.shape}')


from tensorflow.keras.callbacks import EarlyStopping
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=10, verbose=1, mode='auto',
        restore_best_weights=True)

# model = many2one_att()
model = seq2point()
# model = attention()

str_time = time.strftime('%Y%m%d_%H%M%S')
csv_logger = tf.keras.callbacks.CSVLogger('{}_{}_{}.log'.format(meter_key, model_name, str_time), separator=',', append=False)
# csv_logger = tf.keras.callbacks.CSVLogger(''), separator=',', append=True)

# @title Nicely formatted time string
def hms_string(sec_elapsed):
    h = int(sec_elapsed / (60 * 60))
    m = int((sec_elapsed % (60 * 60)) / 60)
    s = sec_elapsed % 60
    return f"{h}:{m:>02}:{s:>05.2f}"

#load trained model
# model = tf.keras.models.load_model('fridge_seq2p_20201201_103055.h5')
# print(model.summary())

start_time = time.time()
history = model.fit(train_x,train_y, validation_data=(test_x,test_y),epochs=1000,callbacks=[monitor, csv_logger])

# with open('trainHistoryDict_{}_{}'.format(model_name, time.ctime()), 'wb') as file_pi:
#     pickle.dump(history.history, file_pi)

elapsed_time = time.time() - start_time
print(f"Elapsed time: {elapsed_time}")

model.save('{}_{}_{}.h5'.format(meter_key, model_name, str_time))

# import matplotlib.pyplot as plt
# acc = history.history['accuracy']
# val_acc = history.history['val_accuracy']
# epochs = range(len(acc))

# plt.title('Training and Validation Accuracy')
# plt.plot(epochs,acc,color='blue',label='Train')
# plt.plot(epochs,val_acc,color='orange',label='Val')
# plt.xlabel('Epoch')
# plt.ylabel('Accuracy')
# plt.legend()
# plt.savefig('microwave_reslut.jpg')

# @title Measure RMSE error...RMSE is common for regression.
from sklearn import metrics
pred = model.predict(test_x)
score = np.sqrt(metrics.mean_squared_error(pred,test_y))
print(f"Final score (RMSE): {score}")


