from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from io import BytesIO
from tensorflow.python.lib.io import file_io
import pandas as pd
from sklearn.model_selection import StratifiedShuffleSplit

from tensorflow.python.keras.layers import Input, Activation,Dense, Convolution2D,MaxPool2D,Flatten,BatchNormalization,Dropout
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.callbacks import EarlyStopping, ModelCheckpoint
from io import BytesIO
import numpy as np
from tensorflow.python.lib.io import file_io
from datetime import datetime

import argparse
import os
import numpy as np
import json

import tensorflow as tf
from sklearn import preprocessing
from google.cloud import storage



def network(num_of_people_in_data,learning_rate=0.01):
    im_shape=(12,300,1)
    inputs_cnn=Input(shape=(im_shape), name='inputs_cnn')
    conv1_1=Convolution2D(32, (5,5), activation='relu', padding="same", input_shape=im_shape)(inputs_cnn)
    conv1_1=BatchNormalization()(conv1_1)
    pool1=MaxPool2D(pool_size=(2,2))(conv1_1)
    conv2_1=Convolution2D(64, (5,5), padding="same", activation='relu')(pool1)
    conv2_1=BatchNormalization()(conv2_1)
    pool2=MaxPool2D(pool_size=(2,2))(conv2_1)
    conv3_1=Convolution2D(128, (5,5), padding="same", activation='relu')(pool2)
    conv3_1=BatchNormalization()(conv3_1)
    pool3=MaxPool2D(pool_size=(2,2))(conv3_1)
    flatten=Flatten()(pool3)
    dense_end1 = Dense(128, activation='relu')(flatten)
    dense_end2 = Dense(50, activation='relu')(dense_end1)
    main_output = Dense(num_of_people_in_data, activation='softmax', name='main_output')(dense_end2)
    model = Model(inputs= inputs_cnn, outputs=main_output)
    model.compile(optimizer='adam', loss='sparse_categorical_crossentropy',metrics = ['sparse_categorical_accuracy'])

    return model



def network_deeper(num_of_people_in_data,learning_rate=0.01):
    im_shape=(12,300,1)
    inputs_cnn=Input(shape=(im_shape), name='inputs_cnn')
    conv1_1=Convolution2D(32, (5,5), activation='relu', padding="same", input_shape=im_shape)(inputs_cnn)
    conv1_1=BatchNormalization()(conv1_1)
    pool1=MaxPool2D(pool_size=(2,2))(conv1_1)
    conv2_1=Convolution2D(64, (5,5), padding="same", activation='relu')(pool1)
    conv2_1=BatchNormalization()(conv2_1)
    pool2=MaxPool2D(pool_size=(1,2))(conv2_1)
    conv2_1=Convolution2D(64, (5,5), padding="same", activation='relu')(pool2)
    conv2_1=BatchNormalization()(conv2_1)
    pool2=MaxPool2D(pool_size=(1,2))(conv2_1)
    conv3_1=Convolution2D(128, (5,5), padding="same", activation='relu')(pool2)
    conv3_1=BatchNormalization()(conv3_1)
    pool3=MaxPool2D(pool_size=(2,2))(conv3_1)
    flatten=Flatten()(pool3)
    dense_end1 = Dense(128, activation='relu')(flatten)
    dense_end2 = Dense(50, activation='relu')(dense_end1)
    main_output = Dense(num_of_people_in_data, activation='softmax', name='main_output')(dense_end2)
    model = Model(inputs= inputs_cnn, outputs=main_output)
    model.compile(optimizer='adam', loss='sparse_categorical_crossentropy',metrics = ['sparse_categorical_accuracy'])

    return model



def network_spatiotempo(num_of_people_in_data,learning_rate=0.01):
    im_shape=(12,300,1)
    inputs_cnn=Input(shape=(im_shape), name='inputs_cnn')
    convolution_filters=[16,16,32,32,64]
    convolution_sizes=[5,5,5,5,5]
    maxpool_sizes=[1,2,1,2,2]

    x=inputs_cnn
    for i in range(5):
        x = Convolution2D(filters=convolution_filters[i],
                          kernel_size=(1,convolution_sizes[i]),
                          padding='same',
                          activation='relu')(x)
        x = BatchNormalization()(x)
        if maxpool_sizes[i]==1:
            continue
        else:
            x = MaxPool2D(pool_size=(1,maxpool_sizes[i]))(x)

    x = Convolution2D(filters=128,
                      kernel_size=(12,1),
                      padding='same',
                      activation='relu')(x)
    x = BatchNormalization()(x)
    x = MaxPool2D(pool_size=(1,2))(x)

    x=Flatten()(x)

    x = Dense(128, activation='relu')(x)
    x=Dropout(0.2)(x)

    x = Dense(64, activation='relu')(x)
    x=Dropout(0.2)(x)

    main_output = Dense(num_of_people_in_data, activation='softmax', name='main_output')(x)
    model = Model(inputs= inputs_cnn, outputs=main_output)
    model.compile(optimizer='adam', loss='sparse_categorical_crossentropy',metrics = ['sparse_categorical_accuracy'])

    return model



def load_np_array_from_gs_dirs(bucket_name,gs_dir_list):
    client = storage.Client()
    bucket = client.bucket(bucket_name)
    files_processed=0
    for gs_dir in gs_dir_list:
        for blob in bucket.list_blobs(prefix=gs_dir):
            if blob.name.endswith(".csv"):
                files_processed=files_processed+1
                file_full_path='gs://'+bucket_name+'/'+blob.name
                print('processing: ', blob.name)
                f = BytesIO(file_io.read_file_to_string(file_full_path, binary_mode=True))
                np_datat_loaded = np.loadtxt(f, delimiter=',')
                if files_processed==1:
                    combined_data = np_datat_loaded
                else:
                    combined_data=np.concatenate((combined_data, np_datat_loaded), axis=0)

    return combined_data



def get_args():
    """Argument parser.

    Returns:
      Dictionary of arguments.
    """
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--job-dir',
        type=str,
        required=True,
        help='local or GCS location for writing checkpoints and exporting '
             'models')
    parser.add_argument(
        '--num-epochs',
        type=int,
        default=20,
        help='number of times to go through the data, default=20')
    parser.add_argument(
        '--batch-size',
        default=128,
        type=int,
        help='number of records to read during each training step, default=128')
    parser.add_argument(
        '--learning-rate',
        default=.01,
        type=float,
        help='learning rate for gradient descent, default=.01')
    parser.add_argument(
        '--verbosity',
        choices=['DEBUG', 'ERROR', 'FATAL', 'INFO', 'WARN'],
        default='INFO')
    args, _ = parser.parse_known_args()
    return args





def train_and_evaluate_withrepeateds(args):

    all_data_loaded=load_np_array_from_gs_dirs('ecg-data',['100k-data/china_private1/nonrepeateds/output' ])
    print('china private1 non-repeadteds data shape: '+str(all_data_loaded.shape))
    all_data_nparr=all_data_loaded[~np.any(np.isnan(all_data_loaded) , axis=1)]
    all_data_nparr = all_data_nparr.reshape((all_data_nparr.shape[0], 12, 305))
    X=all_data_nparr[:,:,:300].reshape((-1, 12, 300, 1))
    y=all_data_nparr[:,0,300]
    heartbeat_attributes_all_nonrepeateds=all_data_nparr[:,0,300:305]

    train_inds, test_inds = next(StratifiedShuffleSplit(n_splits=2, test_size=0.2, random_state=15).split(X,y))
    trainX_nonrepeateds, testX_nonrepeateds = X[train_inds], X[test_inds]
    trainY_nonrepeateds, testY_nonrepeateds = y[train_inds], y[test_inds]
    heartbeat_attributes_test_nonrepeateds=heartbeat_attributes_all_nonrepeateds[test_inds]


    print('unique patients in trainY_nonrepeateds: '+str(len(np.unique(trainY_nonrepeateds)) ))
    print('unique patients in testY_nonrepeateds: '+str(len(np.unique(testY_nonrepeateds)) ))




    repeateds_train_data=load_np_array_from_gs_dirs('ecg-data',['100k-data/china_private1/repeateds/train' ])
    repeateds_test_data=load_np_array_from_gs_dirs('ecg-data',['100k-data/china_private1/repeateds/test' ])
    print('china private1 repeateds_train_data  shape: '+str(repeateds_train_data.shape))
    print('china private1 repeateds_test_data  shape: '+str(repeateds_test_data.shape))

    repeateds_train_data=repeateds_train_data[~np.any(np.isnan(repeateds_train_data) , axis=1)]
    repeateds_test_data=repeateds_test_data[~np.any(np.isnan(repeateds_test_data) , axis=1)]
    repeateds_train_data = repeateds_train_data.reshape((repeateds_train_data.shape[0], 12, 305))
    repeateds_test_data = repeateds_test_data.reshape((repeateds_test_data.shape[0], 12, 305))

    repeateds_train_data_X=repeateds_train_data[:,:,:300].reshape((-1, 12, 300, 1))
    repeateds_train_data_y=repeateds_train_data[:,0,300]

    repeateds_test_data_X=repeateds_test_data[:,:,:300].reshape((-1, 12, 300, 1))
    repeateds_test_data_y=repeateds_test_data[:,0,300]
    heartbeat_attributes_test_repeateds=repeateds_test_data[:,0,300:305]


    repeateds_test_data_y_plusmillion=repeateds_test_data_y+1000000



    print('unique values in repeateds_train_data_y: '+str(len(np.unique(repeateds_train_data_y)) ))
    print('unique patients in repeateds_test_data_y: '+str(len(np.unique(repeateds_test_data_y)) ))

    trainX=np.concatenate(( trainX_nonrepeateds,repeateds_train_data_X), axis=0)
    trainY=np.concatenate(( trainY_nonrepeateds,repeateds_train_data_y), axis=0)

    testX=np.concatenate(( testX_nonrepeateds,repeateds_test_data_X), axis=0)
    testY=np.concatenate(( testY_nonrepeateds,repeateds_test_data_y_plusmillion), axis=0)
    heartbeat_attributes_test_all=np.concatenate(( heartbeat_attributes_test_nonrepeateds,heartbeat_attributes_test_repeateds), axis=0)



    le = preprocessing.LabelEncoder()
    le.fit(np.concatenate((trainY,testY)))
    train_y_encoded = le.transform(trainY)
    test_y_encoded = le.transform(testY)

    num_of_people_in_data=np.unique(testY).shape[0]
    print('num_of_people_in_data: '+str(num_of_people_in_data))

    # Create the Keras Model
    keras_model = network( num_of_people_in_data,learning_rate=args.learning_rate)
    callbacks = [EarlyStopping(monitor='val_loss', patience=8),ModelCheckpoint(filepath='best_model_100k_identification.h5', monitor='val_loss', save_best_only=True)]
    keras_model.fit(trainX, train_y_encoded,epochs=200,callbacks=callbacks, batch_size=1000,validation_data=(testX,test_y_encoded))

    export_path = os.path.join(args.job_dir, 'keras_export')
    tf.keras.experimental.export_saved_model(keras_model, export_path)
    print('Model exported to: {}'.format(export_path))

    prediction_proba=keras_model.predict(testX)
    y_pred=np.argmax(prediction_proba,axis=1)
    y_pred= y_pred.reshape(y_pred.shape[0],1)
    test_y_encoded=test_y_encoded.reshape(test_y_encoded.shape[0],1)
    print('y_pred shape:'+str(y_pred.shape))
    print('test_y_encoded shape:'+str(test_y_encoded.shape))
    print('heartbeat_attributes_test_all shape:'+str(heartbeat_attributes_test_all.shape))

    dateTimeObj = datetime.now()
    timestampStr = dateTimeObj.strftime("%Y-%m-%d-%H-%M-%S")
    test_results_attributes=np.hstack(( y_pred,test_y_encoded,heartbeat_attributes_test_all))
    dest = 'gs://ecg-data/test-results/iden-chinaprivate/identification_results_attributes_china_private'+timestampStr+'.csv'
    np.savetxt(file_io.FileIO(dest, 'w'), test_results_attributes)



    # age and gender
    # y_genders=all_data_nparr[:,0,303]
    # y_ages=all_data_nparr[:,0,302]
    #
    #
    #
    #
    # gender_missidentifications=y_genders[np.where(test_y_encoded!=y_pred)]
    # gender_identifications=y_genders[np.where(test_y_encoded==y_pred)]
    #
    # print('gender_missidentifications: '+str(np.unique(gender_missidentifications, return_counts=True)))
    # print('gender_identifications: '+str(np.unique(gender_identifications, return_counts=True)))
    #
    # ages_missidentifications=y_ages[np.where(test_y_encoded!=y_pred)]
    # ages_identifications=y_ages[np.where(test_y_encoded==y_pred)]
    #
    # print('age_missidentifications: '+str(np.unique(ages_missidentifications, return_counts=True)))
    # print('age_identifications: '+str(np.unique(ages_identifications, return_counts=True)))







def train_and_evaluate(args):

    all_data_loaded=load_np_array_from_gs_dirs('ecg-data',['100k-data/china_private1/all/output' ])
    print('china private1 non-repeadteds data shape: '+str(all_data_loaded.shape))
    all_data_nparr=all_data_loaded[~np.any(np.isnan(all_data_loaded) , axis=1)]
    all_data_nparr = all_data_nparr.reshape((all_data_nparr.shape[0], 12, 305))
    X=all_data_nparr[:,:,:300].reshape((-1, 12, 300, 1))
    y=all_data_nparr[:,0,300]
    heartbeat_attributes_all=all_data_nparr[:,0,300:305]

    train_inds, test_inds = next(StratifiedShuffleSplit(n_splits=2, test_size=0.2, random_state=100).split(X,y))
    trainX, testX = X[train_inds], X[test_inds]
    trainY, testY = y[train_inds], y[test_inds]
    heartbeat_attributes_test=heartbeat_attributes_all[test_inds]

    print('unique patients in trainY: '+str(len(np.unique(trainY)) ))
    print('unique patients in testY: '+str(len(np.unique(testY)) ))

    le = preprocessing.LabelEncoder()
    le.fit(np.concatenate((trainY,testY)))
    train_y_encoded = le.transform(trainY)
    test_y_encoded = le.transform(testY)

    num_of_people_in_data=np.unique(testY).shape[0]
    print('num_of_people_in_data: '+str(num_of_people_in_data))

    # Create the Keras Model
    keras_model = network(num_of_people_in_data,learning_rate=args.learning_rate)
    callbacks = [EarlyStopping(monitor='val_loss', patience=6),ModelCheckpoint(filepath='best_model_100k_identification.h5', monitor='val_loss', save_best_only=True)]
    history=keras_model.fit(trainX, train_y_encoded,epochs=200,callbacks=callbacks, batch_size=1500,validation_data=(testX,test_y_encoded))

    export_path = os.path.join(args.job_dir, 'keras_export')
    tf.keras.experimental.export_saved_model(keras_model, export_path)
    print('Model exported to: {}'.format(export_path))

    prediction_proba=keras_model.predict(testX)
    y_pred=np.argmax(prediction_proba,axis=1)
    y_pred= y_pred.reshape(y_pred.shape[0],1)
    test_y_encoded=test_y_encoded.reshape(test_y_encoded.shape[0],1)
    print('y_pred shape:'+str(y_pred.shape))
    print('test_y_encoded shape:'+str(test_y_encoded.shape))
    print('heartbeat_attributes_test_all shape:'+str(heartbeat_attributes_test.shape))

    test_results_attributes=np.hstack(( y_pred,test_y_encoded,heartbeat_attributes_test))
    dateTimeObj = datetime.now()
    timestampStr = dateTimeObj.strftime("%Y-%m-%d-%H-%M-%S")

    dest = 'gs://ecg-data/test-results/original-iden/testset_results_china_private-'+timestampStr+'.csv'
    np.savetxt(file_io.FileIO(dest, 'w'), test_results_attributes)

    history_dest = 'gs://ecg-data/test-results/original-iden/model_training-history-'+timestampStr+'.json'
    history_dict = history.history
    json.dump(str(history_dict), file_io.FileIO(history_dest, 'w'))





if __name__ == '__main__':
    args = get_args()
    tf.compat.v1.logging.set_verbosity(args.verbosity)
    train_and_evaluate(args)
