#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Huawei Technologies Co., Ltd. 2024. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================

import argparse
import os
import time
import logging

import numpy as np
import tensorflow as tf
from mpi4py import MPI
from tensorflow.core.protobuf.rewriter_config_pb2 import RewriterConfig
# must load before mxrec_pybind
ops_so = tf.load_op_library("/usr/local/python3.7.5/lib/python3.7/site-packages/mx_rec/libasc/librecsdk_tf_npu_ops.so")

import mxrec_pybind


tf.compat.v1.disable_eager_execution()
logging.basicConfig(level=logging.DEBUG)


def set_ascend_env(rank, rank_size, local_rank_size, file=None, dev_id=-1, dev_index=1):
    rank = str(rank)
    rank_size = str(rank_size)
    local_rank_size = int(local_rank_size)

    os.environ["MOX_USE_NPU"] = "1"
    os.environ["FUSION_TENSOR_SIZE"] = "2000000000"
    os.environ["MOX_USE_TF_ESTIMATOR"] = "0"
    os.environ["MOX_USE_TDT"] = "1"
    os.environ["HEARTBEAT"] = "1"
    os.environ["CONTINUE_TRAIN"] = "true"

    os.environ["RANK_ID"] = rank
    local_rank_id = int(rank) % int(local_rank_size)
    if dev_id != -1:
        os.environ["DEVICE_ID"] = str(dev_id)
        os.environ["ASCEND_DEVICE_ID"] = str(dev_id)
    else:
        os.environ["DEVICE_ID"] = str(local_rank_id)
        os.environ["ASCEND_DEVICE_ID"] = str(local_rank_id)
    if dev_index != -1:
        os.environ["DEVICE_INDEX"] = str(dev_index)
    else:
        os.environ["DEVICE_INDEX"] = str(local_rank_id)

    os.environ["RANK_SIZE"] = rank_size
    if file:
        os.environ["RANK_TABLE_FILE"] = file

    os.environ["HCCL_CONNECT_TIMEOUT"] = "600"

    os.environ["JOB_ID"] = "10086"
    os.environ["SOC_VERSION"] = "Ascend910"
    os.environ["GE_AICPU_FLAG"] = "1"
    os.environ["NEW_GE_FE_ID"] = "1"
    os.environ["EXPERIMENTAL_DYNAMIC_PARTITION"] = "1"
    os.environ["ENABLE_FORCE_V2_CONTROL"] = "1"


class WideDeep:
    def __init__(self, table, lookup_table, matrix, shape):
        self.gather_all_result = None
        self.shape_vec = shape
        self.table = table
        self.lookup = lookup_table
        self.matrix = matrix
        self.forward()

    def forward(self):
        with tf.control_dependencies([self.table, self.lookup]):
            gather_all_result = ops_so.lccl_gather_all(emb_table=self.table,
                                                lookup=self.lookup,
                                                send_count_matrix=self.matrix,
                                                shape_vec=self.shape_vec,
                                                peer_mem=peer_mem,
                                                rank=rank_id,
                                                rank_size=rank_size,
                                                dim=emb_dim)
            self.gather_all_result = tf.reshape(gather_all_result, gather_all_result.shape[:2])
        return self.gather_all_result


def verify_result(real_result: np.ndarray, golden: np.ndarray):
    loss = 1e-4
    minimum = 10e-10

    result = np.abs(real_result - golden)
    deno = np.maximum(np.abs(real_result), np.abs(golden))
    result_atol = np.less_equal(result, loss)
    result_rtol = np.less_equal(result / np.add(deno, minimum), loss)
    if not result_rtol.all() and not result_atol.all():
        if np.sum(result_rtol == 0) > real_result.size * loss and \
            np.sum(result_atol == 0) > real_result.size * loss:
            raise ValueError("precision error")
    logging.info("GatherAll precision test pass")


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='base')
    parser.add_argument("--local_rank_size")
    parser.add_argument("--hccl_json")
    args = parser.parse_args()
    local_rank_size = int(args.local_rank_size)

    comm = MPI.COMM_WORLD
    rank_id = comm.Get_rank()
    comm_server_rank_id = 0  # select rank 0 as server for lccl meta info exchange node
    rank_size = comm.Get_size()
    logging.info(f"rank {rank_id}/{rank_size}")
    local_rank_id = rank_id % rank_size
    set_ascend_env(rank_id, rank_size, local_rank_size, file=args.hccl_json)

    peer_mem_ = mxrec_pybind.get_peer_mem(rank_id, comm_server_rank_id, rank_size)
    logging.info(f"python peer_mem_ = {peer_mem_}")

    # create session
    sess_config = tf.compat.v1.ConfigProto()
    custom_op = sess_config.graph_options.rewrite_options.custom_optimizers.add()
    custom_op.parameter_map["use_off_line"].b = True
    custom_op.parameter_map["mix_compile_mode"].b = True
    custom_op.name = "NpuOptimizer"
    custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes('must_keep_origin_dtype')
    sess_config.graph_options.rewrite_options.remapping = RewriterConfig.OFF
    custom_op.parameter_map["enable_data_pre_proc"].b = True
    sess_config.gpu_options.allow_growth = True
    custom_op.parameter_map["hcom_parallel"].b = False
    custom_op.parameter_map["op_execute_timeout"].i = 500

    emb_dim = 128
    emb_len = 3000
    lookup_num = 2048

    emb_table_np = np.random.randn(emb_len, emb_dim)
    for i in range(emb_table_np.shape[0]):
        for j in range(len(emb_table_np[i])):
            emb_table_np[i][j] = rank_id * 100000 + i

    lookup_idx_np = np.arange(0, lookup_num)
    emb_table = tf.convert_to_tensor(emb_table_np, dtype=tf.float32)
    lookup_idx = tf.convert_to_tensor(lookup_idx_np, dtype=tf.int32)

    send_count_matrix = np.full((rank_size, rank_size), lookup_num // rank_size * emb_dim)
    send_count_matrix = tf.convert_to_tensor(send_count_matrix, dtype=tf.int64)

    shape_vec = tf.constant([1] * lookup_num, dtype=tf.int32)
    shape_vec = tf.reshape(shape_vec, [-1, 1])

    peer_mem = tf.convert_to_tensor(peer_mem_, dtype=tf.int64)

    # model run parameter
    stop_steps = 1
    model = WideDeep(emb_table, lookup_idx, send_count_matrix, shape_vec)

    with tf.compat.v1.Session(config=sess_config) as sess:
        sess.run(tf.compat.v1.global_variables_initializer())

        logging.info("============start GatherAll test=============")
        # start run loop
        current_steps = 0
        train_finished = False
        while not train_finished:
            try:
                current_steps += 1
                logging.info(f"current step = {current_steps}")
                run_dict = {
                    "gather_all_result": model.gather_all_result,
                }
                start_time = time.time()
                results = sess.run(fetches=run_dict)
                end_time = time.time()

                logging.info(f"current steps: {current_steps}, time cost(ms):{(end_time - start_time) * 1000}")
                results_np = np.array(results.get("gather_result"))
                if current_steps >= stop_steps:
                    comm.Barrier()
                    logging.info("gather finished")
                    train_finished = True
            except tf.errors.OutOfRangeError as e:
                comm.Barrier()
                logging.info(f"gather test failed with error:{e}")
                train_finished = True
        MPI.Finalize()
        
    # check precision
    expect_gather_all = emb_table_np[lookup_idx_np]
    send_row_per_rank = lookup_num // rank_size
    for i in range(expect_gather_all.shape[0]):
        for j in range(len(expect_gather_all[i])):
            expect_gather_all[i][j] = int(i / send_row_per_rank) * 100000 + \
                (i % send_row_per_rank) + send_row_per_rank * rank_id
    
    actual = np.array(results.get("gather_all_result"))
    verify_result(actual, expect_gather_all)
    logging.info("============end GatherAll test=============")