import os
import time
import logging
import argparse
import tensorflow as tf
from models.network import BaseModel
from data.data_process import dataiter

os.environ["CUDA_VISIBLE_DEVICES"] = "0"

logging.basicConfig(level=logging.INFO, filename='./logs/log/train.txt', filemode='w', format='%(message)s')
logger = logging.getLogger(__name__)


parse = argparse.ArgumentParser()
parse.add_argument('--traindata', type=str, default='../data')

args = parse.parse_args()


if __name__ == '__main__':
    gpu_options = tf.GPUOptions(allow_growth=True)
    config = tf.ConfigProto(allow_soft_placement=True,
                            log_device_placement=False,
                            gpu_options=gpu_options)
    # with tf.Graph().as_default():
    with tf.Session(config) as sess:
        model = BaseModel(args)

        sess.run(tf.global_variables_initializer())
        saver = tf.train.Saver(max_to_keep=10)

        stime = time.time()
        for epoch in range(10):
            for data_batch in dataiter():
                results = model.train_step(sess, data_batch)

                loss = results['loss']
                train_step = results['global_step']

                if train_step % 50 == 0:
                    logger.info('train_step: %d, loss: %.4f, spend_time: %.4f' %
                                (train_step, loss, time.time() - stime))
                    stime = time.time()

            saver.save(sess, './logs/checkpoint/model-'+str(epoch)+'.ckpt')
