# _*_ coding:utf-8 _*_
"""
__Author__    :  yuan
__Date__      :  2020/4/5
__File__      :  train.py
__Desc__      :
"""
import os
import time
import tensorflow as tf
from tensorflow.python.framework.errors_impl import OutOfRangeError
import numpy as np
import tensorflow.summary as sum
from prynet import pry_stn_seresnet
from tensorflow.python.saved_model.simple_save import simple_save

from net import se_resnet
import tensorflow.contrib.slim as slim
from data import get_data_iter,mixup,binary_focal_loss

os.environ['CUDA_VISIBLE_DEVICES'] = "3"

def num_combine(n,m):
    def factorial(s):
        if s<=1:
            return 1
        return s*(factorial(s-1))
    return factorial(n) // (factorial(m)*(factorial(n-m)))

def batch_data_mixup(batch_datas,alpha=0.5):
    batch_data,batch_label=batch_datas
    batch = batch_data.shape[0]
    combine_num=num_combine(batch,2)
    return_batch = np.zeros((combine_num+batch,*batch_data[0].shape),dtype=batch_data[0].dtype)
    return_label = np.zeros((combine_num+batch,2),dtype=batch_label[0].dtype)
    count=0
    for i in range(batch):
        for j in range(i+1,batch):
            i_data,i_label = batch_data[i],batch_label[i]
            j_data,j_label = batch_data[j],batch_label[j]
            new_data,new_label = mixup(i_data,i_label,j_data,j_label,alpha)
            return_batch[batch+count]=new_data
            return_label[batch+count]=new_label
    return_batch[:batch]=batch_data
    return_label[:batch]=batch_label
    return [return_batch,return_label]


def get_nb_files(directory):
    """Get number of files by searching directory recursively"""
    if not os.path.exists(directory):
        return 0
    cnt = 0
    for r, dirs, files in os.walk(directory):
        if files:
            cnt += len(files)
    return cnt

def train_model():
    #img_root = r"/data/soft/javad/COCO/animals/animals"
    img_root = r"/data/soft/javad/COCO/convd/train"
    logdir = r"./logs"
    svlogdir=r"./svlogs"
    iter = 1
    batch = 4
    H = 600
    W = 600
    numclass=2
    showInverval = 2
    alpha=5
    if not os.path.exists(logdir):
        os.mkdir(logdir)
    inputs = tf.placeholder(tf.float32, [None,H,W, 3])
    labels = tf.placeholder(tf.uint8, [None, numclass])
    # logit = se_resnet(inputs, blocks=[3, 4, 6, 3],numclass=numclass)
    logit = pry_stn_seresnet(inputs, blocks=[3, 4, 6, 3],numclass=numclass)

    lossop = slim.losses.sigmoid_cross_entropy(logit, labels)

    prop = slim.softmax(logit)
    correct_prediction = tf.equal(tf.argmax(prop, 1), tf.argmax(labels, 1))  # predict_op = tf.argmax(y, 1)
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))

    tf.summary.scalar("loss", lossop)
    global_step = tf.Variable(0,trainable=False)
    lr = tf.train.exponential_decay(0.0001,global_step,200,0.1)
    #train_op = slim.train.GradientDescentOptimizer(learning_rate=0.001).minimize(lossop,global_step)
    opt = slim.train.GradientDescentOptimizer(learning_rate=lr)
    train_op = slim.learning.create_train_op(lossop, opt, global_step)  # REW:针对slim特别地单独创造训练op

    train_iter = get_data_iter(img_root, iter=iter, batch=batch)
    total = iter * 1700
    n_rounds = total // batch

    saver = tf.train.Saver()
    init = tf.global_variables_initializer()
    save_dir = "./models"
    if not os.path.exists(save_dir):
        os.mkdir(save_dir)
    model_name = "senet_{}"

    i = 0
    lossv = 0.
    accv = 0.
    cpu_c=3
    cpu_config = tf.ConfigProto(intra_op_parallelism_threads=cpu_c,
                                inter_op_parallelism_threads=cpu_c,
                                device_count={'CPU': cpu_c})
    cpu_config.gpu_options.allow_growth = True
    # sv = tf.train.Supervisor(logdir=svlogdir, save_summaries_secs=0, saver=None)

    with tf.Session(config=cpu_config) as sess:
        sess.run(init)
        loger = sum.FileWriter(logdir, session=sess)
        merger = tf.summary.merge_all()

        while True:
            try:
                batch_data = sess.run(train_iter)
                batch_data = batch_data_mixup(batch_data,alpha=alpha)
                loss, _, summary, acc = sess.run([lossop, train_op, merger, accuracy],
                                                 feed_dict={inputs: batch_data[0], labels: batch_data[1]})
                loger.add_summary(summary, i)
                lossv += loss
                accv += acc
                # TODO:两百打印一次
                if (i + 1) % showInverval == 0:
                    print(
                        f"[{i + 1}/{n_rounds}] Loss: {lossv / showInverval:.5f}, Acc: {accv / showInverval}")
                    lossv = 0.
                    accv = 0.
                if (i + 1) % 200 == 0:
                    suffix=time.strftime('%Y_%m_%d_%H_%M',time.localtime(time.time()))
                    saver.save(sess, f"{save_dir}//{model_name.format(suffix)}")
                i += 1
            except OutOfRangeError:
                break
            except KeyboardInterrupt:
                suffix = time.strftime('%Y_%m_%d_%H_%M', time.localtime(time.time()))
                saver.save(sess, f"{save_dir}//{model_name.format(suffix)}")
        suffix = time.strftime('%Y_%m_%d_%H_%M', time.localtime(time.time()))
        saver.save(sess, f"{save_dir}//{model_name.format(suffix)}")
    # 使用SavedModel保存和加载模型 - 变量、图和图的元数据:最简单：
    # simple_save(sess,
    #             f"{save_dir}",
    #                  inputs={"x": x, "y": y},
    #                  outputs={"z": z})


if __name__ == "__main__":
    train_model()
