import numpy as np
import struct
import os
import subprocess
import re
import collections
import inspect
from mindspore import Tensor
from mindspore.train.serialization import export
from mindspore import log as logger
import mindspore.nn as nn
from mindspore.common.api import ms_function


def _count_unequal_element(data_expected, data_me, rtol, atol):
    assert data_expected.shape == data_me.shape
    total_count = len(data_expected.flatten())
    error = np.abs(data_expected - data_me)
    greater = np.greater(error, atol + np.abs(data_me) * rtol)
    loss_count = np.count_nonzero(greater)
    assert (loss_count / total_count) < rtol, \
        "\ndata_expected_std:{0}\ndata_me_error:{1}\nloss:{2}". \
            format(data_expected[greater], data_me[greater], error[greater])


def allclose_nparray(data_expected, data_me, rtol, atol, equal_nan=True):
    if np.any(np.isnan(data_expected)) or np.any(np.isnan(data_me)):
        assert np.allclose(data_expected, data_me, rtol, atol, equal_nan=equal_nan)
    elif not np.allclose(data_expected, data_me, rtol, atol, equal_nan=equal_nan):
        _count_unequal_element(data_expected, data_me, rtol, atol)
    else:
        assert np.array(data_expected).shape == np.array(data_me).shape


def clean_all_ckpt_files(folder_path):
    if os.path.exists(folder_path):
        for file_name in os.listdir(folder_path):
            if file_name.endswith('.ckpt') or file_name.endswith('.meta'):
                try:
                    os.remove(os.path.join(folder_path, file_name))
                except FileNotFoundError as e:
                    logger.warning("[{}] remove ckpt file error.".format(e))


def find_newest_ckpt_file(folder_path):
    ckpt_files = map(lambda f: os.path.join(folder_path, f),
                     filter(lambda f: f.endswith('.ckpt'),
                            os.listdir(folder_path)))
    return max(ckpt_files, key=os.path.getctime)


def clean_all_ir_files(folder_path):
    if os.path.exists(folder_path):
        for file_name in os.listdir(folder_path):
            if file_name.endswith('.ir') or file_name.endswith('.dot') or \
                    file_name.endswith('.dat'):
                os.remove(os.path.join(folder_path, file_name))


def find_newest_validateir_file(folder_path):
    ckpt_files = map(lambda f: os.path.join(folder_path, f),
                     filter(lambda f: re.match(r'\d+_validate_\d+.ir', f),
                            os.listdir(folder_path)))
    return max(ckpt_files, key=os.path.getctime)


def find_newest_begin_ir_file(folder_path):
    ckpt_files = map(lambda f: os.path.join(folder_path, f),
                     filter(lambda f: re.match(r'step_parallel+_begin_\d+.ir', f),
                            os.listdir(folder_path)))
    return max(ckpt_files, key=os.path.getctime)


def dump_tensor_t(tensor, filepath, is_output):
    if isinstance(tensor, Tensor):
        if is_output:
            tensor = tensor.asnumpy().astype(np.float32)
        else:
            tensor = tensor.asnumpy()
    else:
        tensor = np.array(tensor)
    rank = len(tensor.shape)
    # input NCHW --> NHWC
    if rank == 4 and not is_output:
        tensor = np.transpose(tensor, [0, 2, 3, 1])
    t = (rank,) + tensor.shape
    desc = struct.pack('I%dI' % rank, *t)
    if not os.path.isdir(os.path.dirname(filepath)):
        os.makedirs(os.path.dirname(filepath))
    with open(filepath, 'wb') as f:
        f.write(desc)
        f.write(tensor.tobytes())


def dump_tensor(tensor, filepath, is_output, env_lite, *index):
    if not os.path.isdir(os.path.dirname(filepath)):
        os.makedirs(os.path.dirname(filepath))
    filepath += "_outputME" if is_output else "_inputME"
    for i in index:
        filepath += str(i)
    if env_lite:
        dump_tensor_t(tensor, filepath + ".t", is_output)
    if isinstance(tensor, Tensor):
        tensor_np = tensor.asnumpy()
    else:
        tensor_np = np.array(tensor)
    np.save(filepath + ".npy", tensor_np)
    with open(filepath + ".bin", 'wb') as f:
        f.write(tensor_np)


def wrap_op(op):
    cur_path = os.path.abspath(os.path.dirname(__file__))
    data_path = cur_path + "/../offline_infer/data/ops/"
    env_air = os.environ.get('ME_EXPORT_DATA')
    env_mindir = os.environ.get('ME_EXPORT_MINDIR')
    env_lite = os.environ.get('ME_EXPORT_LITE')
    export_flag = env_air or env_mindir
    arguments_list = []
    export_list = []

    class WrapOp(op):
        instances = 0

        def __init__(self, *args, **kwargs):
            super().__init__(*args, **kwargs)
            WrapOp.instances += 1
            if export_flag:
                sig = inspect.signature(super().__init__)
                param = sig.parameters
                is_different_model = True
                arguments = collections.OrderedDict()
                keys = []
                for key, value in param.items():
                    keys.append(key)
                    if value.default == inspect._empty:
                        arguments[key] = None
                    else:
                        arguments[key] = value.default
                for key, value in kwargs.items():
                    if isinstance(value, Tensor):
                        pass
                    elif isinstance(value, np.ndarray):
                        pass
                    else:
                        arguments[key] = value
                for i, arg in enumerate(args):
                    if isinstance(arg, Tensor):
                        pass
                    elif isinstance(arg, np.ndarray):
                        pass
                    else:
                        arguments[keys[i]] = arg
                if arguments in arguments_list:
                    is_different_model = False
                if WrapOp.instances == 1:
                    is_different_model = True
                export_list.append(is_different_model)
                if is_different_model:
                    arguments_list.append(arguments)

        def __call__(self, *args, **kwargs):
            op_name = op.__name__
            if WrapOp.instances > 1:
                op_name = op.__name__ + str(WrapOp.instances)
            if export_flag and export_list[WrapOp.instances - 1]:
                if env_mindir:
                    try:
                        export(self, *args, file_name=data_path + op_name,
                               file_format='MINDIR')
                    except RuntimeError as err:
                        assert "Only has return node, can't convert to binary model!" \
                               in str(err)
                if env_air and "Ctrl" not in op_name:
                    try:
                        export(self, *args, file_name=data_path + op_name,
                               file_format='AIR')
                    except RuntimeError as err:
                        assert "Can't find OpAdapter for" in str(err)
            out = super().__call__(*args, **kwargs)
            if export_flag and export_list[WrapOp.instances - 1]:
                path = data_path + op_name
                for i, arg in enumerate(args):
                    if isinstance(arg, tuple):
                        for k, ar in enumerate(arg):
                            dump_tensor(ar, path, False, env_lite, i, k)
                    else:
                        dump_tensor(arg, path, False, env_lite, i)

                if isinstance(out, tuple):
                    for j, outj in enumerate(out):
                        if isinstance(outj, tuple):
                            for k, outjk in enumerate(outj):
                                dump_tensor(outjk, path, True, env_lite, j, k)
                        else:
                            dump_tensor(outj, path, True, env_lite, j)
                else:
                    dump_tensor(out, path, True, env_lite)
            return out

        @property
        def cls_name(self):
            return op.__base__.__name__

    return WrapOp


def tensor_to_numpy(data):
    if isinstance(data, Tensor):
        return data.asnumpy()
    elif isinstance(data, tuple):
        if len(data) == 1:
            return tensor_to_numpy(data[0]),
        else:
            return (tensor_to_numpy(data[0]), *tensor_to_numpy(data[1:]))
    else:
        assert False, 'unsupported data type'


def allclose_nparray_recursive(data_expected, data_me, rtol, atol, equal_nan=True):
    if isinstance(data_me, np.ndarray):
        allclose_nparray(data_expected, data_me, rtol, atol, equal_nan=equal_nan)
    elif isinstance(data_me, tuple):
        allclose_nparray_recursive(data_expected[0], data_me[0], rtol, atol, equal_nan=equal_nan)
        if len(data_me) > 1:
            allclose_nparray_recursive(data_expected[1:], data_me[1:],
                                       rtol, atol, equal_nan=equal_nan)
    else:
        assert False, 'unsupported data type'


def get_padding_of_same_mode_1d(length, kernel_size, stride):
    output_length = int(np.ceil(length / stride))
    pad_width = max((output_length - 1) * stride + kernel_size - length, 0)
    pad_left = int(pad_width / 2)
    pad_right = pad_width - pad_left
    return pad_left, pad_right


def get_padding_of_same_mode(input_shape, kernel_shape, stride_shape):
    input_height, input_width = input_shape
    kernel_height, kernel_width = kernel_shape
    stride_height, stride_width = stride_shape
    output_width = int(np.ceil(input_width / stride_width))
    output_height = int(np.ceil(input_height / stride_height))
    pad_height = max((output_height - 1) * stride_height + kernel_height - input_height, 0)
    pad_width = max((output_width - 1) * stride_width + kernel_width - input_width, 0)
    pad_top = int(pad_height / 2)
    pad_bottom = pad_height - pad_top
    pad_left = int(pad_width / 2)
    pad_right = pad_width - pad_left
    return pad_left, pad_right, pad_top, pad_bottom


def get_padding_of_same_mode_3d(input_shape, kernel_shape, stride_shape):
    input_depth, input_height, input_width = input_shape
    kernel_depth, kernel_height, kernel_width = kernel_shape
    stride_depth, stride_height, stride_width = stride_shape
    output_depth = int(np.ceil(input_depth / stride_depth))
    output_width = int(np.ceil(input_width / stride_width))
    output_height = int(np.ceil(input_height / stride_height))
    pad_depth = max((output_depth - 1) * stride_depth + kernel_depth - input_depth, 0)
    pad_height = max((output_height - 1) * stride_height + kernel_height - input_height, 0)
    pad_width = max((output_width - 1) * stride_width + kernel_width - input_width, 0)
    pad_head = int(pad_depth / 2)
    pad_tail = pad_depth - pad_head
    pad_top = int(pad_height / 2)
    pad_bottom = pad_height - pad_top
    pad_left = int(pad_width / 2)
    pad_right = pad_width - pad_left
    return pad_left, pad_right, pad_top, pad_bottom, pad_head, pad_tail


def allclose_scalar_recursive(expr_value, me_value, loss=0.0):
    logger.info("expr_value:{}, \nme_value:{}.".format(expr_value, me_value))
    if type(expr_value) is type(me_value):
        if isinstance(expr_value, tuple) or isinstance(expr_value, list):
            if len(expr_value) == len(me_value):
                for index in iter(range(len(expr_value))):
                    if isinstance(expr_value[index], tuple) or isinstance(expr_value[index], list):
                        allclose_scalar_recursive(expr_value[index], me_value[index], loss)
                    else:
                        assert abs(me_value[index] - expr_value[index]) <= loss
            else:
                logger.error("expr_value len not equal with me_value.{} VS {}".format(
                    len(expr_value), len(me_value)))
                assert False
        else:
            assert abs(me_value - expr_value) <= loss
    else:
        logger.error("expr_value type not as same as me_value.{} VS {}".format(
            type(expr_value), type(me_value)))
        assert False


def cosine_similarity(num_x, num_y):
    assert len(num_x) == len(num_y), "len(num_x) != len(num_y)"
    zero_list = [0] * len(num_x)
    if num_x == zero_list or num_y == zero_list:
        return float(1) if num_x == num_y else float(0)
    res = np.array([[num_x[i] * num_y[i], num_x[i] * num_x[i], num_y[i] * num_y[i]] for i in
                    range(len(num_x))])
    cos = sum(res[:, 0]) / (np.sqrt(sum(res[:, 1])) * np.sqrt(sum(res[:, 2])))
    return cos


def execute_cmd(cmd, timeout=10):
    """
    在本地主机上执行shell命令
    :param cmd: String，待执行的shell命令
    :param timeout: int，设置执行shell命令的超时时间，防止用例卡住
    :return: shell命令的返回值(int)，标准输出(string)，标准错误(string)
    """
    try:
        ret = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
                             timeout=timeout)
        returncode = ret.returncode
        stdout = ret.stdout
        stderr = ret.stderr
        logger.info(f"\nexec {cmd} return {returncode};\nstdout:\n{stdout}\nstderr:\n{stderr}\n")
        return ret.returncode, ret.stdout.decode(), ret.stderr.decode()
    except subprocess.TimeoutExpired:
        logger.info(f"\nexec {cmd} not complete in {timeout} seconds, abort.")
        return -255, "", ""


def net_access_fault_clear():
    custom_chain_name = "MINDTESTER_TEST"
    returncode, line_numbers, _ = execute_cmd(
        "iptables -L OUTPUT --line-number | grep %s | awk '{print $1}' | wc -l" % custom_chain_name)
    if returncode == 0:
        for _ in range(0, int(line_numbers)):
            execute_cmd(
                "iptables -L OUTPUT -n -t filter --line-numbers | grep %s | head -1 | "
                "awk '{print $1}' | xargs -i iptables -D OUTPUT {}" % custom_chain_name)
    returncode, line_numbers, _ = execute_cmd(
        "iptables -L INPUT --line-number | grep %s | awk '{print $1}' | wc -l" % custom_chain_name)
    if returncode == 0:
        for _ in range(0, int(line_numbers)):
            execute_cmd(
                "iptables -L INPUT -n -t filter --line-numbers | grep %s | head -1 |"
                " awk '{print $1}' | xargs -i iptables -D INPUT {}" % custom_chain_name)
    returncode, line_numbers, _ = execute_cmd(
        "iptables -L FORWARD --line-number | grep %s | awk '{print $1}'"
        " | wc -l" % custom_chain_name)
    if returncode == 0:
        for _ in range(0, int(line_numbers)):
            execute_cmd(
                "iptables -L FORWARD -n -t filter --line-numbers | grep %s | head -1 | "
                "awk '{print $1}' | xargs -i iptables -D FORWARD {}" % custom_chain_name)
    execute_cmd(f"iptables -F {custom_chain_name}")
    execute_cmd(f"iptables -X {custom_chain_name}")


def url_access_fault(url):
    """
    模拟主机无法访问某个url
    因为部分环境上，还装了docker服务，为了清理时候不影响docker的网络，所以使用自定义链；针对url的
    80和443，禁止出站，模拟网络断连
    :param domain_name: String 域名,不带http的
    :return: int 0 正常
    """
    custom_chain_name = "MINDTESTER_TEST"
    execute_cmd(f"iptables -t filter -N {custom_chain_name}")
    execute_cmd(
        f"iptables -A {custom_chain_name}  -p "
        f"tcp -m string --string \"{url}\" --algo kmp --dport 80 -j DROP")
    execute_cmd(
        f"iptables -A {custom_chain_name}  -p"
        f" tcp -m string --string \"{url}\" --algo kmp --dport 443 -j DROP")
    execute_cmd(f"iptables -I OUTPUT -p tcp --dport 80 -j {custom_chain_name}")
    execute_cmd(f"iptables -I OUTPUT -p tcp --dport 443 -j {custom_chain_name}")


def ip_access_fault(ip_address):
    """
    模拟主机无法访问某个ip
    因为部分环境上，还装了docker服务，为了清理时候不影响docker的网络，所以使用自定义链
    :param ip_address: String ip地址
    :return: int 0 正常
    """
    custom_chain_name = "MINDTESTER_TEST"
    try:
        if re.compile(r'^((25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(25[0-5]|2[0-4]\d|[01]?\d\d?)$'). \
                match(ip_address):
            execute_cmd(f"iptables -t filter -N {custom_chain_name}")
            execute_cmd(f"iptables -A {custom_chain_name} -s {ip_address} -j DROP")
            execute_cmd(f"iptables -I OUTPUT -j {custom_chain_name}")
            execute_cmd(f"iptables -I INPUT -j {custom_chain_name}")
            execute_cmd(f"iptables -I FORWARD -j {custom_chain_name}")
        else:
            return False
    except RuntimeError:
        return False


def clear_ramdisk():
    _, ram_disk_list_orgin, _ = execute_cmd(f"ls /dev/ | grep ram", timeout=30)
    for ram_disk in ram_disk_list_orgin.split("\n"):
        if ram_disk != "":
            _, mount_path, _ = execute_cmd("mount | grep -w \"%s\" | awk \'{print $3}\'" % ram_disk,
                                           timeout=30)
            if mount_path != "":
                mount_path = mount_path.split('\n')[0]
                # 强杀文件句柄对应的进程，防止umount失败
                execute_cmd(
                    "lsof | grep \"%s\" | awk \'{print $2}\' | xargs -i kill -9 {}" % mount_path,
                    timeout=60)
            execute_cmd(f"umount {mount_path} -f", timeout=30)
    execute_cmd(f"modprobe -r brd", timeout=60)


def create_and_mount_ramdisk(mount_target, size=1):
    """
    创建一个指定大小的ramdisk，格式化成ext4格式，并mount到指定的目录
    创建ramdisk作为测试用的磁盘，比较方便，不依赖执行主机的磁盘配置
    同一时刻，用此接口在系统中创建的ramdisk只能保证为1个，老的ramdisk会被清理掉
    :param mount_target:String, ramdisk mount的目录，若不存在，会自动创建
    :param size: int, ramdisk的大小，单位为M
    :return:int, 0-成功，其它-失败
    """
    clear_ramdisk()
    kbytes = size * 1024
    execute_cmd(f"modprobe brd rd_size={kbytes}  max_part=1 rd_nr=1", timeout=30)
    execute_cmd(f"mkfs.ext4 /dev/ram0", timeout=120)
    execute_cmd(f"mkdir -p {mount_target}")
    returncode, _, _ = execute_cmd(f"mount /dev/ram0 {mount_target}")
    return returncode


def disk_space_eat_by_count(path, count=1):
    """
    在目录下创建指定大小的文件来占用磁盘空间
    :param path: String，目录路径
    :param count: int，占用磁盘空间的大小，单位为m
    :return:无
    """
    execute_cmd(f"dd if=/dev/zero of={path}/disk_space_eat bs=1M count={count}")


def get_disk_space_free_count(path):
    """
    获取指定目录所在文件系统上的磁盘剩余空间
    :param path: String， 目录
    :return: int -剩余空间，单位为m；-1 -获取磁盘剩余空间失败
    """
    returncode, stdout, _ = execute_cmd(
        "df %s -l -m -P | grep -w \"%s\" | awk \'{print $4}\'" % (path, path),
        timeout=30)
    if returncode != 0:
        return -1
    else:
        return int(stdout)


def mindspore_optmizer_choose(net, opt_type, default_lr=0.1, momentum=0.9, default_wd=0.0):
    group_params = list(filter(lambda x: x.requires_grad, net.get_parameters()))
    opt_dict = {}
    opt_dict["Adam"] = nn.Adam(params=group_params, learning_rate=default_lr,
                               weight_decay=default_wd)
    opt_dict["SGD"] = nn.SGD(params=group_params, learning_rate=default_lr, weight_decay=default_wd)
    opt_dict["Momentum"] = nn.Momentum(params=group_params, learning_rate=default_lr,
                                       momentum=momentum,
                                       weight_decay=default_wd)
    opt_dict["RMSProp"] = nn.RMSProp(params=group_params, learning_rate=default_lr, decay=0.99,
                                     epsilon=1e-8,
                                     weight_decay=default_wd)
    opt_dict["FTRL"] = nn.FTRL(params=group_params, learning_rate=default_lr,
                               weight_decay=default_wd)
    opt_dict["Lamb"] = nn.Lamb(params=group_params, learning_rate=default_lr,
                               weight_decay=default_wd)
    opt_dict["AdamWeightDecay"] = nn.AdamWeightDecay(params=group_params, learning_rate=default_lr,
                                                     weight_decay=default_wd)
    opt_dict["LazyAdam"] = nn.LazyAdam(params=group_params, learning_rate=default_lr,
                                       weight_decay=default_wd)
    opt_dict["ProximalAdagrad"] = nn.ProximalAdagrad(params=group_params, learning_rate=default_lr,
                                                     weight_decay=default_wd)
    momentum = nn.Momentum(params=group_params, learning_rate=default_lr, momentum=momentum,
                           weight_decay=default_wd)
    opt_dict["LARS"] = nn.LARS(momentum)

    return opt_dict[opt_type]


def numpy_native_wrapper(creat=False):
    flag = False
    if 'CONTEXT_MODE' in os.environ:
        mode = os.environ['CONTEXT_MODE']
        if mode == 'GRAPH_MODE' or mode == 'GRAPH' or mode == 'CONTEXT.GRAPH_MODE':
            flag = True

    if creat:
        flag = False

    def decorator(func):
        def wrapper(*args, **kwargs):
            print("%s is running" % func.__name__)
            if flag:
                from mindspore import context
                context.set_context(mode=context.PYNATIVE_MODE)

                @ms_function()
                def func_graph(*argsa):
                    return func(*argsa, **kwargs)

                return func_graph(*args)
            else:
                return func(*args, **kwargs)

        return wrapper

    return decorator


def search_string_in_file(file_name, string_to_search):
    """
    Search for the given string in file and return lines containing that string,
    along with line numbers
    """
    line_number = 0
    list_of_results = []
    with open(file_name, 'r') as read_obj:
        for line in read_obj:
            line_number += 1
            if string_to_search in line:
                list_of_results.append((line_number, line.rstrip()))
    logger.info("== String search results: {} ==".format(list_of_results))
    return list_of_results
