############################################################################
# 2019 - present Contributed by Apulis Technology (Shenzhen) Co. LTD
#
# This program and the accompanying materials are made available under the
# terms of the MIT License, which is available at
# https://www.opensource.org/licenses/MIT
#
# See the NOTICE file distributed with this work for additional
# information regarding copyright ownership.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# SPDX-License-Identifier: MIT
############################################################################

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import argparse
import os
import ssl
import sys

import numpy as np
import tensorflow as tf
from npu_bridge.estimator import npu_ops
from npu_bridge.estimator.npu.npu_optimizer import NPUDistributedOptimizer
from tensorflow.contrib import slim
from tensorflow.core.protobuf.rewriter_config_pb2 import RewriterConfig

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.get_logger().setLevel('ERROR')
ssl._create_default_https_context = ssl._create_unverified_context

FLAGS = None
execpath = sys.argv[1]


def get_rank_id():
    return int(os.environ["RANK_INDEX"])


def get_local_rank_id():
    return int(os.environ["RANK_INDEX"])


def get_rank_size():
    return int(os.environ["RANK_SIZE"])


def train_input_generator(x_train, y_train, batch_size=64):
    assert len(x_train) == len(y_train)
    while True:
        p = np.random.permutation(len(x_train))
        x_train, y_train = x_train[p], y_train[p]
        index = 0
        while index <= len(x_train) - batch_size:
            yield x_train[index:index + batch_size], \
                  y_train[index:index + batch_size],
            index += batch_size


def main(_):
    # init npu hccl
    npu_int = npu_ops.initialize_system()

    config = tf.ConfigProto()
    custom_op = config.graph_options.rewrite_options.custom_optimizers.add()
    custom_op.name = "NpuOptimizer"
    custom_op.parameter_map["use_off_line"].b = True
    # tensorflow.python.framework.errors_impl.InternalError: The input shape of GeOp5_0 is dynamic, please ensure that npu option[dynamic_input] is set correctly, for more details please refer to the migration guide.
    custom_op.parameter_map["dynamic_input"].b = True
    custom_op.parameter_map["dynamic_graph_execute_mode"].s = tf.compat.as_bytes("lazy_recompile")
    config.graph_options.rewrite_options.remapping = RewriterConfig.OFF

    init_sess = tf.Session(config=config)
    init_sess.run(npu_int)

    with np.load(os.path.join(execpath, "MNIST/MNIST-data-0")) as f:
        x_train, y_train = f['x_train'], f['y_train']
        x_test, y_test = f['x_test'], f['y_test']

    x_train = np.reshape(x_train, (-1, 784)) / 255.0
    x_test = np.reshape(x_test, (-1, 784)) / 255.0

    x = tf.placeholder(tf.float32, [None, 784])
    y = tf.placeholder(tf.float32, [None])
    target = tf.one_hot(tf.cast(y, tf.int32), 10, 1, 0)
    x_image = tf.reshape(x, [-1, 28, 28, 1])

    # model
    net = slim.conv2d(x_image, 32, [5, 5], scope='conv1')
    net = slim.max_pool2d(net, [2, 2], scope='pool1')
    net = slim.conv2d(net, 64, [5, 5], scope='conv2')
    net = slim.max_pool2d(net, [2, 2], scope='pool2')
    net = tf.reshape(net, [-1, 7 * 7 * 64])
    net = slim.fully_connected(net, 1024, scope='fc1')
    keep_prob = tf.placeholder('float')
    net = tf.nn.dropout(net, keep_prob)
    net = slim.fully_connected(net, 10, scope='fc2')
    logits = tf.nn.softmax(net)

    # loss
    loss = tf.losses.softmax_cross_entropy(target, logits)

    # optimizer
    opt = tf.train.AdamOptimizer(0.0001 * get_rank_size())
    opt = NPUDistributedOptimizer(opt)

    input = tf.trainable_variables()
    # bcast_global_variables_op = hccl_ops.broadcast(input, 0)

    global_step = tf.train.get_or_create_global_step()
    train_op = opt.minimize(loss, global_step=global_step)

    co_preds = tf.equal(tf.argmax(logits, axis=1), tf.argmax(target, axis=1))
    accuracy = tf.reduce_mean(tf.cast(co_preds, 'float'))

    # session config
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.gpu_options.visible_device_list = str(get_local_rank_id())
    custom_op = config.graph_options.rewrite_options.custom_optimizers.add()
    custom_op.name = "NpuOptimizer"
    custom_op.parameter_map["use_off_line"].b = True
    custom_op.parameter_map["dynamic_input"].b = True
    custom_op.parameter_map["dynamic_graph_execute_mode"].s = tf.compat.as_bytes("lazy_recompile")
    config.graph_options.rewrite_options.remapping = RewriterConfig.OFF

    if int(os.getenv("RANK_SIZE", "1")) > 1:
        config.graph_options.rewrite_options.optimizers.extend(["GradFusionOptimizer"])
    # custom_op.parameter_map["mix_compile_mode"].b = mix_compile_mode
    # custom_op.parameter_map["iterations_per_loop"].i = iterations_per_loop

    training_batch_generator = train_input_generator(
        x_train, y_train, batch_size=64)

    init = tf.global_variables_initializer()
    sess = tf.Session(config=config)
    saver = tf.train.Saver()
    sess.run(init)

    # sess.run(bcast_global_variables_op)

    for i in range(50 // get_rank_size()):
        image_, label_ = next(training_batch_generator)
        acc_, loss_ = sess.run(
            [accuracy, loss],
            feed_dict={x: image_, y: label_, keep_prob: 1.0})
        print('Step %d, train accuracy: %g,\t loss: %g' % (i, acc_, loss_))
        sess.run(train_op, feed_dict={x: image_, y: label_, keep_prob: 0.5})

    if get_rank_id() == 0:
        acc_, loss_ = sess.run(
            [accuracy, loss],
            feed_dict={x: x_test, y: y_test, keep_prob: 1.0})
        print('Test accuracy: %g, \t loss: %g' % (acc_, loss_))
    saver.save(sess, os.path.join("./lenet"))
    npu_shutdown = npu_ops.shutdown_system()
    init_sess.run(npu_shutdown)
    init_sess.close()


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--data_dir', type=str, default='./mnist/')
    FLAGS, unparsed = parser.parse_known_args()

    tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
