# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
# for Enflame DTU
os.environ["ENFLAME_AUTO4C"] = "true"
import argparse
import timeit
import sys
import numpy as np
import tensorflow as tf
from tensorflow.keras import applications
import time as sys_time
# Training settings
parser = argparse.ArgumentParser(description='TensorFlow Synthetic Benchmark')
parser.add_argument('--model', type=str, default='ResNet50',
                    help='model to benchmark, e.g., DenseNet121, DenseNet169, DenseNet201, InceptionResNetV2, InceptionV3, MobileNet, MobileNetV2, NASNetLarge, NASNetMobile, ResNet50, VGG16, VGG19, Xception')
parser.add_argument('--num-warmup-batches', type=int, default=10,
                    help='number of warm-up batches')
parser.add_argument('--num-batches-per-iter', type=int, default=10,
                    help='number of batches per benchmark iteration')
parser.add_argument('--num-iters', type=int, default=10,
                    help='number of benchmark iterations')
parser.add_argument('--batch-size', type=int, default=32,
                    help='input batch size')
parser.add_argument('--nrank', type=int, default=1,
                    help='number of devices')
parser.add_argument('--print_info', type=bool, default=False,
                    help='Enable printing infomation (default: False)')
parser.add_argument('--print_fetches_targets', type=bool, default=False,
                    help='Enable printing fetches_targets (default: False)')
parser.add_argument('--device', choices=['dtu', 'gpu'], default='gpu')
parser.add_argument('--precision', choices=['fp16', 'fp32'], default='fp32')
args = parser.parse_args()

if args.device == 'gpu':
    args.device = 'GPU'
    num_cluster = 1
elif args.device == 'dtu':
    args.device = 'XLA_DTU'
    num_cluster = 4

if args.precision == 'fp16':
    tf.keras.backend.set_floatx('float16')
elif args.precision == 'fp32':
    tf.keras.backend.set_floatx('float32')


# Set up standard model.
with tf.device('/device:{}:0'.format(args.device)):
    print("applications = " + str(applications))
    model = getattr(applications, args.model)(weights=None)
    print("model = " + str(model))
    # Build model
    lr_scaler = args.nrank
    opt = tf.train.GradientDescentOptimizer(0.01 * lr_scaler)
    init = tf.global_variables_initializer()
    # Build fake dataset
    if args.precision == 'fp16':
        data = tf.random_uniform([args.batch_size, 224, 224, 3], dtype=tf.float16)
    elif args.precision == 'fp32':
        data = tf.random_uniform([args.batch_size, 224, 224, 3], dtype=tf.float32)
    print("dtype=", data.dtype)
    # Not use tf.random_uniform because enflame dtu doesn't support it
    target = tf.constant(
        np.random.randint(low=0, high=1000, size=(args.batch_size, 1), dtype=int),
        dtype=tf.int64,
    )
    # target = tf.random_uniform([args.batch_size, 1], minval=0, maxval=999, dtype=tf.int64)
    # Build training target
    def loss_function():
        probs = model(data, training=True)
        return tf.losses.sparse_softmax_cross_entropy(target, probs)
    loss = loss_function()
    train_opt = opt.minimize(loss)

################# Tensroflow run
def log(s, nl=True):
    print(s, end='\n' if nl else '', file=sys.stderr)
config = tf.ConfigProto()
config.allow_soft_placement = True
config.gpu_options.allow_growth = True
args.nrank = 1
log('Model: %s' % args.model)
log('Running warmup...')
log('Running benchmark...')
def run(benchmark_step):
    # Warm-up
    log('Running warmup...')
    timeit.timeit(benchmark_step, number=args.num_warmup_batches)
    # Benchmark
    log('Running benchmark...')
    img_secs = []
    for x in range(args.num_iters):
        start = sys_time.time()
        time = timeit.timeit(benchmark_step,
                                number=args.num_batches_per_iter)
        end = sys_time.time()
        # print('end-start:', end - start)
        # print('time:', time)
        # print('num_batches_per_iter:', args.num_batches_per_iter)
        img_sec = args.batch_size * args.num_batches_per_iter / time
        log('Iter #%d: %.1f img/sec per GPU' % (x, img_sec))
        img_secs.append(img_sec)
    # Results
    img_sec_mean = np.mean(img_secs)
    img_sec_conf = 1.96 * np.std(img_secs)
    log('Img/sec per GPU: %.1f +-%.1f' % (img_sec_mean, img_sec_conf))
    log('Total img/sec on %d GPU(s): %.1f +-%.1f' %
        (args.nrank, args.nrank * img_sec_mean, args.nrank * img_sec_conf))

    info = 'tf.keras,{},{},{},{},{},{}'.format(
        args.device,
        args.precision,
        args.model,
        'sgd',
        args.batch_size * num_cluster, # 如果是dtu，则有多个核
        img_sec_mean * num_cluster,
    )
    with open('./info.csv', 'a+') as f:
        f.write(info)
        f.write('\n')
# The session initialization is created by given model protobuf.
# Init operators and fetches operators are runned during training.
with tf.compat.v1.Session(config=config) as sess:
    sess.run(init)
    run(lambda: sess.run([train_opt, loss]))

