# -*- coding: utf-8 -*-
"""
# -*- coding: utf-8 -*-
Created on Sat Feb 20 18:42:13 2021
this code is modified from mnist_cnn for detecting cat and dog
@author: LI
"""

import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import matplotlib.pyplot as plt
import glob
import time
import sys
import numpy as np
from skimage import io, transform
#from tensorflow.python.framework import graph_util
import cv2 as cv
import os
import os.path
import copy
import stat
import shutil
import pickle

#from tensorflow.python.ops.gen_linalg_ops import BatchSelfAdjointEig


import imageReady
from imageReady import generateDataSet,_show_time, slipImgChannel, showLoss, showAccuracy, one_hot, smoothVal

import mnist8
import config as cfg

#from tensorflow.examples.tutorials.mnist import input_data # for data
# prepare data and tf.session
#mnist = input_data.read_data_sets('MNIST_data', one_hot=True)

#CNN input size
config = cfg.Config()
row =  config.row
col = config.col
c = config.c
class_num = 2       # 0: not sim     1:sim
print("row: ", row, "col: ", col, "c: ", c)
time.sleep(1)

# 定义两个placeholder   
x = tf.placeholder(tf.float32, shape=[None, row, col, c], name='x-input')
y = tf.placeholder(tf.int64, shape=[None, class_num], name='y-input')

 # keep_prob用来表示神经元的输出概率
keep_prob = tf.placeholder(tf.float32, name='keep_prob')


# 设置批次的大小
batch_size = config.batch_size
batch_size_test = config.batch_size_valid
# 计算一共有多少个批次
epochs = config.epoch
learnRate = config.learnRate
keepPro_train = config.keepPro_train
optimizer = config.optimizer  #"RMSProp"  #"Adam" 

# 载入数据集path

# 创建会话
with tf.Session() as sess:
    model = mnist8.CNN_NET(x, y, keep_prob, learnRate, class_num, optimizer, 8.27)
    model.buildCNN_net28x28_5x5()
# >>>   read img data
    imgs, labels = generateDataSet("/home/user/ljl/cnn/merge0", row, col)
    imgs_test, labels_test = generateDataSet("/home/user/ljl/cnn/test", row, col)

    # imgs, labels = generateDataSet("/home/user/ljl/cnn/merge", row, col)
    # imgs_test, labels_test = generateDataSet("/home/user/ljl/cnn/valid", row, col)
    
    if imgs.shape[3] != 2:
        print("imgs channel error: ", imgs.shape)
        sys.exit(0)
    # 打乱顺序
    num_example = imgs.shape[0]
    arr = np.arange(num_example)  # arr = [0,1,2,........]
    np.random.shuffle(arr)
    imgs = imgs[arr]
    labels = labels[arr]  # 打乱顺序

    # 打乱顺序
    num_example = imgs_test.shape[0]
    arr = np.arange(num_example)  # arr = [0,1,2,........]
    np.random.shuffle(arr)
    imgs_test = imgs_test[arr]
    labels_test = labels_test[arr]  # 打乱顺序
    
    print('\n\n\n start sess')
    print('\n *** model version is:  %f *** ' % model.version.eval())
    print('\n image data size is: %d \n' % num_example)
    print(arr[:10])
# <<<

    with open("./readme.txt", "w") as Ftxt:
        Ftxt.write("this cnn model param:\n")
        #Ftxt.write(" cnn input size: [1-, 28, 28, 2], kenel size: 3x3xn\n cnn output size: 2 class, 0:not sim,  1: sim")
        Ftxt.write(" cnn input size: [" + str(len(imgs)) + ", " + str(row) + ", " + str(col) + ", " + str(c) + "] \n"+ 
                    " kenel size: 3x3xn\n cnn output size: 2 class, 0:not sim,  1: sim\n batch_size: "+str(batch_size)+
                    "\n learnRate: " + str(learnRate) + "\n epochs: " + str(epochs)+ "\n keepPro: " + str(keepPro_train)
                     )


    cost = []
    cost_smooth = []
    cost_test = []
    cost_test_smooth = []
    accList_train = []
    accList_valid = []
    lastVal1 = -1
    lastVal2 = -1
    #plt.ion()
    plt.figure("loss and acc")
    start_time = time.time()
    sess.run(tf.global_variables_initializer())  # 初始化变量   
    print("init value cost time: ", time.time() - start_time, " s")

    for epoch in range(epochs):  # 迭代epochs次
        print("迭代： ", epoch+1, " 次")
        valid_idx = 0
        for batch in range(0, len(imgs), batch_size):
            batch_xs = imgs[batch:batch + batch_size]
            #batch_ys = one_hot(labels[batch:batch + batch_size])
            batch_ys = labels[batch:batch + batch_size]
            batch_xs = np.array(batch_xs)
            #sess.run(model.train_step, feed_dict={x: batch_xs, y: batch_ys, keep_prob: keepPro_train})  # 进行迭代训练
            _, cost_value, train_acc, pre = sess.run([model.train_step, model.cross_entropy, model.accuracy, model.prediction], 
                                                     feed_dict={x: batch_xs, y: batch_ys, keep_prob: keepPro_train})
            cost.append(cost_value)
            cost_value = smoothVal(lastVal1, cost_value)
            cost_smooth.append(cost_value)
            lastVal1 = cost_value

            accList_train.append(train_acc)
            
            print(pre.shape, pre[0])
            print('loss cost: ', cost_value)
            print('Training Accuracy=           '+str(train_acc))
            

            #valid
            valid_idx = valid_idx % len(imgs_test)
            batch_xs_test = imgs_test[valid_idx: valid_idx + batch_size_test]
            batch_xs_test = np.array(batch_xs_test)
            batch_ys_test = labels_test[valid_idx:valid_idx + batch_size_test]
            loss_test, valid_acc = sess.run([model.cross_entropy, model.accuracy] , feed_dict={x: batch_xs_test, y: batch_ys_test, keep_prob: 1.0})

            cost_test.append(loss_test)
            loss_test = smoothVal(lastVal2, loss_test)
            cost_test_smooth.append(loss_test)
            lastVal2 = loss_test

            accList_valid.append(valid_acc)
            valid_idx += batch_size_test
            # 绘制损失曲线
            #showLoss(cost, cost_test)
            
            #plt.clf()
            plt.subplot(221)
            plt.plot(cost, 'g-', label='train_loss')
            plt.plot(cost_test, 'b-', label='test_loss')
            
            plt.title('model loss')
            plt.xlabel('epoch')
            plt.ylabel('loss value')
            plt.legend(['train', 'valid'], loc='upper left')
            plt.tight_layout(1.5)

            #
            plt.subplot(223)
            plt.plot(cost_smooth, 'g-', label='train_loss')
            plt.plot(cost_test_smooth, 'b-', label='test_loss')
            
            plt.title('model smooth loss')
            plt.xlabel('epoch')
            plt.ylabel('loss value')
            plt.legend(['train'], loc='upper left')
            
            #acc
            plt.subplot(222)
            plt.plot(accList_train, 'g-', label='train_accuracy')
            plt.plot(accList_valid, 'b-', label='valid_accuracy')
            
            plt.title('model accuracy')
            plt.xlabel('epoch')
            plt.ylabel('accuracy value')
            plt.legend(['train', 'valid'], loc='upper left')
            plt.pause(0.01)

        if (epoch % 200 == 0):
        #save model cpkt
            #print(delete_file(save_path))  # detect the file_dir, delete it if it exits
            ckpt_file_path = "./model"
            path_ = os.path.dirname(os.path.abspath(ckpt_file_path))
            if os.path.isdir(path_) is False:
                os.makedirs(path_)
            saver = tf.compat.v1.train.Saver(max_to_keep=3)
            saver.save(sess, ckpt_file_path + '/model.ckpt', write_meta_graph=True)
            
        #save model pb
            graph = tf.compat.v1.graph_util.convert_variables_to_constants(sess, sess.graph_def, ["x-input","y-input","output","keep_prob", "version"])
            tf.io.write_graph(graph, './model_pb', 'model.pb',as_text= False)

            plt.clf()
            plt.plot(cost, 'g-', label='train_loss')
            plt.plot(cost_test, 'b-', label='test_loss')
            
            plt.title('model loss')
            plt.xlabel('epoch')
            plt.ylabel('loss value')
            plt.legend(['train', 'valid'], loc='upper left')
            plt.savefig("./loss.jpg")

    #end_time = time.clock()
    cost_time = time.time() - start_time
    h, m, s = _show_time(cost_time)
    
    
    #print(delete_file(save_path))  # detect the file_dir, delete it if it exits
    save_path = "./model"
    ckpt_file_path = save_path
    path_ = os.path.dirname(os.path.abspath(ckpt_file_path))
    if os.path.isdir(path_) is False:
        os.makedirs(path_)
    saver = tf.compat.v1.train.Saver(max_to_keep=3)
    saver.save(sess, ckpt_file_path + '/model.ckpt', write_meta_graph=True)
    
#save model pb
    save_path_pb = "./model_pb"
    path_ = os.path.dirname(os.path.abspath(save_path_pb))
    if os.path.isdir(path_) is False:
        os.makedirs(path_)
    graph = tf.compat.v1.graph_util.convert_variables_to_constants(sess, sess.graph_def, ["x-input","y-input","output","keep_prob","version"])
    tf.io.write_graph(graph, save_path_pb, 'model.pb',as_text= False)

    plt.clf()
    plt.plot(cost, 'g-', label='train_loss')
    plt.plot(cost_test, 'b-', label='test_loss')
    
    plt.title('model loss')
    plt.xlabel('epoch')
    plt.ylabel('loss value')
    plt.legend(['train', 'valid'], loc='upper left')
    plt.savefig("./loss.jpg")


    sys.exit(0)

'''

            cv.imshow("aaa", batch_left[0])
            cv.waitKey(0)
            cv.destroyAllWindows()
            sys.exit()
'''