
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding=utf-8

import os
import numpy as np
import shutil
import datetime
import tensorflow as tf
from model import BigGAN_128
from cfg import make_config
from utils import *
from npu_bridge.npu_init import *

flags = tf.flags
FLAGS = flags.FLAGS

## Required parameters
desc = "Tensorflow implementation of BigGAN"

flags.DEFINE_string("result", "result", "The result directory where the model checkpoints will be written.")
flags.DEFINE_string("phase", "train", "train or test ?")
flags.DEFINE_string("dataset", "BigGAN", "[mnist / cifar10 / custom_dataset]")
flags.DEFINE_string("obs_dir", "obs://big-gan-tensorflow/log/", "obs result path, not need on gpu and apulis platform")

#flags.DEFINE_integer("epoch", 3, "The number of epochs to run")
#flags.DEFINE_integer("iteration", 3, "The number of training iterations")
flags.DEFINE_integer("epoch", 1, "The number of epochs to run")
flags.DEFINE_integer("iteration", 2, "The number of training iterations")
flags.DEFINE_integer("batch_size", 32, "The size of batch per gpu")
flags.DEFINE_integer("ch", 96, "base channel number per layer")

flags.DEFINE_integer("print_freq", 1000, "The number of image_print_freqy")
flags.DEFINE_integer("save_freq", 1000, "The number of ckpt_save_freq")

flags.DEFINE_float("g_lr", 0.00005, "learning rate for generator")
flags.DEFINE_float("d_lr", 0.0002, "learning rate for discriminator")

flags.DEFINE_float("beta1", 0.0, "beta1 for Adam optimizer")
flags.DEFINE_float("beta2", 0.9, "beta2 for Adam optimizer")
flags.DEFINE_float("moving_decay", 0.9999, "moving average decay for generator")

flags.DEFINE_integer("z_dim", 128, "Dimension of noise vector")
flags.DEFINE_boolean("sn", True, "using spectral norm")

flags.DEFINE_string("gan_type", "hinge", "[gan / lsgan / wgan-gp / wgan-lp / dragan / hinge]")
flags.DEFINE_float("ld", 10.0, "The gradient penalty lambda")

flags.DEFINE_integer("n_critic", 2, "The number of critic")

flags.DEFINE_integer("img_size", 128, "The size of image")
flags.DEFINE_integer("sample_num", 64, "The number of sample images")

flags.DEFINE_integer("test_num", 10, "The number of images generated by the test")

flags.DEFINE_string("checkpoint_dir", "checkpoint", "Directory name to save the checkpoints")
flags.DEFINE_string("result_dir", "results", "Directory name to save the generated images")
#flags.DEFINE_string("log_dir", "logs", "Directory name to save training logs")
flags.DEFINE_string("sample_dir", "samples", "Directory name to save the samples on training")

## Other parametersresult
flags.DEFINE_float("learning_rate", 1e-3, "The initial learning rate for Adam.")
flags.DEFINE_integer("train_step", 150, "total epochs for training")
flags.DEFINE_integer("save_step", 5, "epochs for saving checkpoint")
flags.DEFINE_integer("decay_step", 500, "update the learning_rate value every decay_steps times")
flags.DEFINE_float("decay_rate", 0.9, "momentum used in optimizer")
flags.DEFINE_string("resume_path", None, "checkpoint path")
flags.DEFINE_string("chip", "npu", "Run on which chip, (npu or gpu or cpu)")
flags.DEFINE_string("platform", "apulis", "Run on linux/apulis/modelarts platform. Modelarts Platform has some extra data copy operations")

## The following params only useful on NPU chip mode
flags.DEFINE_boolean("npu_profiling", True, "profiling for performance or not")

#if FLAGS.chip == 'npu':
print("===>>>flags.DEFINE  end :{}")

def main(_):
    print("--->main(_)")
    tf.logging.set_verbosity(tf.logging.INFO)
    tf.logging.info("**********")
    print("===>>>dataset:{}".format(FLAGS.dataset))
    print("===>>>result:{}".format(FLAGS.result))
    print("===>>>train_step:{}".format(FLAGS.train_step))

    ## print all parameters
    for attr, flag_obj in sorted(FLAGS.__flags.items()):
        print("{} = {}".format(attr.lower(), flag_obj.value))

    config = make_config(FLAGS)
    print("------>>check dataset",FLAGS.dataset)
    print("------>>start train .......:{}")

    # start training
    with tf.Session(config=config) as sess:
        init_op = tf.group(tf.local_variables_initializer(), tf.global_variables_initializer())
        sess.run(init_op)
        print("------>>Session(config=config)")
        gan = BigGAN_128(sess, FLAGS)
        print("------>>build_model")
        gan.build_model()
        # show network architecture
        print("------>>show_all_variables")
        show_all_variables()

        print("------>>gan.train()")
        print()

        if FLAGS.phase == 'train' :
            # launch the graph in a session
#            train_writer = tf.summary.FileWriter(logdir=os.path.join(FLAGS.result, "train"), graph=sess.graph)
#            test_writer = tf.summary.FileWriter(logdir=os.path.join(FLAGS.result, "test"), graph=sess.graph)

            gan.train()

            # visualize learned generator
            #gan.visualize_results(FLAGS.epoch - 1)

            print(" [*] Training finished!")

        if FLAGS.phase == 'test' :
            gan.test()
            print(" [*] Test finished!")

#    if FLAGS.platform.lower() == 'modelarts':
    if True:
        print("------>>modelarts_result2obs(FLAGS)")
        from help_modelarts import modelarts_result2obs
        modelarts_result2obs(FLAGS)


if __name__ == "__main__":
    print("===>>>main .....:{}")
    flags.mark_flag_as_required("dataset")
    flags.mark_flag_as_required("result")
    flags.mark_flag_as_required("obs_dir")
    tf.app.run()