# -*- encoding: utf8 -*- 
"""
License: Apache-2.0
Author:  Wentongxin
E-mail: flybywind@foxmail.com
"""

import os
import tensorflow as tf
import numpy as np
from tqdm import tqdm
import logging
from config import cfg
from utils import *
from FadeNet import FadeNet
import app_logger 

logger = logging.getLogger("main") 
app_logger.init(cfg.log_lvl)

def init_env():
    if not path.exists(cfg.imgdir):
        os.mkdir(cfg.imgdir)
        
def main(_):
    init_env()
    logger.info("argument : %r" % vars(cfg)["__flags"])
    fade_net = FadeNet()
    logger.info('Graph loaded')
    sv = tf.train.Supervisor(graph=fade_net.graph,
                             logdir=cfg.logdir,
                             # disable automatically save
                             save_model_secs = cfg.save_freq * 60,
                             checkpoint_basename = "checkpoint",
                             save_summaries_secs=0)
    if cfg.init_data:
        repartition_images_disk(img_num_one_block = cfg.img_part_num, 
            attr_list = (
            "Arched_Eyebrows","Bags_Under_Eyes","Bald","Bangs",
            "Big_Lips","Black_Hair","Blond_Hair","Brown_Hair","Bushy_Eyebrows","Chubby","Eyeglasses",
            "Goatee","Gray_Hair","High_Cheekbones","Male","Mouth_Slightly_Open","Mustache","Narrow_Eyes",
            "No_Beard","Pale_Skin","Pointy_Nose","Receding_Hairline","Smiling","Straight_Hair","Wavy_Hair","Young"))
        logger.info("init data done ...")
        
    splits = [float(x) for x in cfg.sample_splits.split(",")]
    train_sample = CelebSample(start=0., end=splits[0], 
                               batch_size = cfg.batch_size)
    valid_sample = CelebSample(start=splits[0], end=splits[1], 
                               batch_size = cfg.batch_size, 
                               shuffle = True, is_train = False)
    test_sample = CelebSample(start = splits[1], end=1.,
                              batch_size = cfg.batch_size, 
                              shuffle = False, is_train = False)
    
    logger.info("size of valid dataset: %d" % len(valid_sample))
     
    if not os.path.exists(cfg.model_dir):
        os.mkdir(cfg.model_dir)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with sv.managed_session(config=config) as sess:
        num_batch = len(train_sample)
        num_steps = num_batch*cfg.epoch
        for step in tqdm(range(num_steps), total=num_steps, ncols=70, leave=False, unit='b'):
            if sv.should_stop():
                break
            x, y = train_sample[step]
            yy = expand_attr2mat(y)
            sess.run(fade_net.train_op, {fade_net.X: x, fade_net.labels: y, 
                                         fade_net.mat_labels: yy,
                                         fade_net.is_training: True})
                
            if step % cfg.train_sum_freq == 0:
                logger.info("\nget summury at step: %d" % (step))
                feed_dict = {fade_net.X: x, fade_net.labels: y, 
                             fade_net.mat_labels: yy,
                             fade_net.is_training: False}
                summary_str = sess.run(fade_net.train_summary, feed_dict)
                sv.summary_writer.add_summary(summary_str, step)
                
            if step % cfg.valid_sum_freq == 0:
                logger.info("\nvalidation at step: %d" % (step))
                fade_net.valid_model(sess, valid_sample, step)
             
            del x 
            del y
            del yy

        sv.saver.save(sess, cfg.model_dir + '/fade_model_final')
        
    logger.info('Training done')


if __name__ == "__main__":
    tf.app.run()
