import os
import cv2
import imutils
import argparse
import numpy as np 
import matplotlib.pyplot as plt
import tensorflow as tf 
import time
import model
import data_loader_process
from imutils import paths
from keras.utils import np_utils
from keras.preprocessing.image import img_to_array
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder

def averagenum(num_list):
    nsum = 0
    for i in range(len(num_list)):
        nsum += num_list[i]
    return nsum / len(num_list)

def into_batch(data,label,batch_size,shuffle):

    if shuffle:
        rand_indexes = np.random.permutation(data.shape[0])
        data = data[rand_indexes]
        label = label[rand_indexes]
    
    batch_count = len(data)/batch_size
    
    batches_data = np.array_split(data[:int(batch_count)*batch_size],int(batch_count))
    batches_data.append(data[int(batch_count)*batch_size:])

    batches_labels = np.array_split(label[:int(batch_count)*batch_size],int(batch_count))
    batches_labels.append(label[int(batch_count)*batch_size:])

    batch_count += 1

    return batches_data,batches_labels,int(batch_count)


def train(train_data,test_data,train_label,test_label,batch_size_value,total_eopches_value,weight):

    X = tf.placeholder(tf.float32,[None,28,28,1],name = 'X')
    Y = tf.placeholder(tf.float32,[None,2])

    global_step = tf.train.get_or_create_global_step()
    total_eopches = total_eopches_value

    prediction,W1,W2,wfc1,wfc2 = model.inference(X)

    regularizer = tf.contrib.layers.l2_regularizer(0.0001)
    regularization = regularizer(W1) + regularizer(W2) + regularizer(wfc1) + regularizer(wfc2)
    #loss = loss_function(prediction,Y)
    #loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = Y, logits = prediction))    
    loss = tf.reduce_mean(tf.nn.weighted_cross_entropy_with_logits(logits = prediction,targets = Y, pos_weight = weight))
    if_correct = tf.equal(tf.argmax(prediction,1),tf.argmax(Y,1))
    accuracy = tf.reduce_mean(tf.cast(if_correct,tf.float32))
    train = tf.train.AdamOptimizer(learning_rate = 0.001,beta1 = 0.9,beta2 = 0.999,epsilon = 1e-08,).minimize(loss)

    tf.add_to_collection('prediction', prediction)

    with tf.Session() as sess:

        sess.run(tf.global_variables_initializer())

        train_loss_per_bat = []
        train_acc_per_bat = []

        average_test_acc = []

        for epoch in range(1,total_eopches + 1):

            test_batches_data,test_batches_labels,test_batch_count = into_batch(test_data,test_label,batch_size_value,shuffle = True)
            train_batches_data,train_batches_labels,train_batch_count = into_batch(train_data,train_label,batch_size_value,shuffle = True)
    
            for batch_id in range(int(train_batch_count)):
                data_per_bat = train_batches_data[batch_id]
                labels_per_bat = train_batches_labels[batch_id]
           
                result_per_bat = sess.run([train,loss,accuracy],feed_dict = {X:data_per_bat,Y:labels_per_bat})

                train_loss_per_bat.append(result_per_bat[1])
                train_acc_per_bat.append(result_per_bat[2])

                if(batch_id + 1) % 100 == 0:
                
                    print('epoch: %d    batch:%d    in %d'%(epoch,(batch_id + 1),train_batch_count))
                    print('train_loss:%f  train_accuracy:%f'%(result_per_bat[1],result_per_bat[2]))
                
       
            for batch_id in range(int(test_batch_count)):
                data_per_bat = test_batches_data[batch_id]
                labels_per_bat = test_batches_labels[batch_id]
            
                result_per_bat = sess.run(accuracy,feed_dict = {X:data_per_bat,Y:labels_per_bat})

                average_test_acc.append(result_per_bat)
                
            print('epoch: %d    average_test_accuracy: %f'%(epoch,averagenum(average_test_acc)))

            saver = tf.train.Saver()
            saver.save(sess,modle_save_path)

if __name__ == '__main__':
    
    parser = argparse.ArgumentParser()
    parser.add_argument('--dataset',required = True,help = 'path to input dataset of faces')
    parser.add_argument('--batch_size',type = int,default = 100)
    parser.add_argument('--total_epoches',type = int,default = 40)
    parser.add_argument('--model_path',required = True,help = 'path to output model')
    args = parser.parse_args()
    positives_data_path = args.dataset + '/positives/positives7'
    negatives_data_path = args.dataset + '/negatives/negatives7'
    batch_size_value = args.batch_size
    total_eopches_value = args.total_epoches
    modle_save_path = args.model_path

    train_data,test_data,train_label,test_label,classWeight = data_loader_process.data_loader_process(positives_data_path,negatives_data_path)
    train(train_data,test_data,train_label,test_label,batch_size_value,total_eopches_value,classWeight)

    
    



