import time

from src.classification.modify import modify
from src.impl.output import get_output_float16, get_output_float32_with_variable, get_output_float32
import numpy as np
from src.repair.repair import repair2, fit2, fit1

b = 0.001


def fit(tensor, operator):
    if operator == 'bias_add':
        return tensor
    elif operator == 'avg_pool' or operator == 'max_pool':
        lst = [[[[0 for k in range(32)] for i in range(7)] for j in range(7)]]
        for i in range(7):
            for j in range(7):
                for k in range(32):
                    lst[0][i][j][k] = max(tensor[0][k][2*i][2*j], tensor[0][k][2*i][2*j+1],
                                       tensor[0][k][2*i+1][2*j], tensor[0][k][2*i+1][2*j+1])
        tensor_new = np.asarray(lst)
        return np.transpose(tensor_new, (0, 3, 1, 2))
    elif operator == 'softmax' or operator == 'sigmoid' or operator == 'tanh' or operator == 'relu':
        return tensor
    elif operator == 'conv2d':
        lst = [[[[0 for k in range(32)] for i in range(26)] for j in range(26)]]
        for i in range(26):
            for j in range(26):
                max_value = max(tensor[0][0][i][j], tensor[0][0][i][j+1], tensor[0][0][i][j+2],
                                       tensor[0][0][i+1][j], tensor[0][0][i+1][j+1], tensor[0][0][i+1][j+2],
                                       tensor[0][0][i+2][j], tensor[0][0][i+2][j+1], tensor[0][0][i+2][j+2])
                for k in range(32):
                    lst[0][i][j][k] = max_value
        tensor_new = np.asarray(lst)
        return np.transpose(tensor_new, (0, 3, 1, 2))
    elif operator == 'batch_normalization':
        return tensor
    elif operator == 'reduce_mean' or operator == 'reduce_max':
        return np.max(tensor, axis=-1)
    elif operator == 'dense':
        max_value = np.max(tensor)
        return np.asarray([max_value] * 10)





def get_benchmark(tensor, operator):

    # 得到tf和pytorch输出的numpy数组
    tf_output_b, torch_output_b, mnn_output_b, variable = get_output_float32(tensor, operator)

    # 获得三组输出的差异
    tf_torch_abs = np.maximum(tf_output_b - torch_output_b, torch_output_b - tf_output_b)
    tf_mnn_abs = np.maximum(tf_output_b - mnn_output_b, mnn_output_b - tf_output_b)
    torch_mnn_abs = np.maximum(torch_output_b - mnn_output_b, mnn_output_b - torch_output_b)
    # 获得总和
    tf_torch_var = np.sum(tf_torch_abs)
    tf_mnn_var = np.sum(tf_mnn_abs)
    torch_mnn_var = np.sum(torch_mnn_abs)
    # 比较并得到benchmark
    if tf_torch_var <= tf_mnn_var and tf_torch_var <= torch_mnn_var:
        benchmark = tf_output_b / 2 + torch_output_b / 2
    elif tf_mnn_var <= tf_torch_var and tf_mnn_var <= torch_mnn_var:
        benchmark = tf_output_b / 2 + mnn_output_b / 2
    else:
        benchmark = torch_output_b / 2 + mnn_output_b / 2

    return benchmark, variable


def classify(tensor, operator, framework, i):

    output_expect, variable = get_benchmark(tensor, operator)

    # f16_time = time.time()
    tf_output_16, torch_output_16, mnn_output_16, _ = get_output_float16(tensor, operator, variable)
    # f16_time = time.time() - f16_time
    # print('c:')
    # print(f16_time / 3)

    if framework == 'tf':
        output = tf_output_16.astype('float16')
    elif framework == 'torch':
        output = torch_output_16.astype('float16')
    else:
        output = mnn_output_16.astype('float16')

    # 检查是否出现inf/nan问题

    if np.any(np.isnan(output)) or np.any(np.isinf(output)):
        # 一类问题
        # 修正tensor
        # r_time = time.time()
        tensor_after, m = fit1(tensor)
        # r_time = time.time() - r_time
        # 计算结果
        tf_output_16, torch_output_16, mnn_output_16, _ = get_output_float16(tensor_after, operator, variable)
        # 回调
        if framework == 'tf':
            # r_time2 = time.time()
            tf_output_16 = fit2(tf_output_16, operator, m)
            # r_time2 = time.time() - r_time2
            # with open('../data/spend/{0}_{1}'.format(framework, operator), 'a') as f:
            #     f.write(str(r_time + r_time2) + '\n')
            output = tf_output_16.astype('float16')
        elif framework == 'torch':
            # r_time2 = time.time()
            torch_output_16 = fit2(torch_output_16, operator, m)
            # r_time2 = time.time() - r_time2
            # with open('../data/spend/{0}_{1}'.format(framework, operator), 'a') as f:
            #     f.write(str(r_time + r_time2) + '\n')
            output = torch_output_16.astype('float16')
        else:
            # r_time2 = time.time()
            mnn_output_16 = fit2(mnn_output_16, operator, m)
            # r_time2 = time.time() - r_time2
            # with open('../data/spend/{0}_{1}'.format(framework, operator), 'a') as f:
            #     f.write(str(r_time + r_time2) + '\n')
            output = mnn_output_16.astype('float16')

        # if not(np.any(np.isnan(output)) or np.any(np.isinf(output)) or np.any(output == 0)):
        #     np.save('../data/failure/tensor_{0}:{1}_{2}_0.npy'.format(framework, operator, i), tensor)
        #     np.save('../data/failure/output_{0}:{1}_{2}_0.npy'.format(framework, operator, i), output)
        has_problem = np.any(np.isnan(output)) or np.any(np.isinf(output))
        return [1, 0, 0], not has_problem
    else:
        lst_l = []
        lst_h = []
        for x in tensor.flatten():
            x_low, x_high = modify(x)
            lst_l.append(x_low)
            lst_h.append(x_high)

        # tensor_low 和 tensor_high
        tensor_low = np.asarray(lst_l).reshape(tensor.shape)
        tensor_high = np.asarray(lst_h).reshape(tensor.shape)

        # output_low 和 output_high
        output_low, variable_ = get_benchmark(tensor_low, operator)
        output_high, variable_ = get_benchmark(tensor_high, operator)

        # err(s)
        err = np.maximum(output - output_expect, output_expect - output)
        err_low = np.maximum(output_low - output_expect, output_expect - output_low)
        err_high = np.maximum(output_high - output_expect, output_expect - output_high)

        err_tensor_low = np.maximum(tensor_low - tensor, tensor - tensor_low)
        err_tensor_high = np.maximum(tensor_high - tensor, tensor - tensor_high)

        # fit
        err_tensor_low_fit = fit(err_tensor_low, operator)
        err_tensor_high_fit = fit(err_tensor_high, operator)

        # c
        c_low = err_low / err_tensor_low_fit
        c_high = err_high / err_tensor_high_fit

        c_max = np.maximum(c_low, c_high)

        if np.max(c_max) > 1:
            # r_time = time.time()
            tensor_after = repair2(tensor, operator, variable)
            # print(output_expect)
            # print(tensor)
            # print(tensor_after)
            # print(np.argmax(np.maximum(tensor_after - output_expect, output_expect - tensor_after)))
            # r_time = time.time() - r_time
            # with open('../data/spend/{0}_{1}'.format(framework, operator), 'a+') as f:
            #     f.write(str(r_time) + '\n')
            diff_matrix = tensor_after - output_expect
            single_item_matrix_abs = np.maximum(diff_matrix, -diff_matrix) / \
                                        (np.maximum(tensor_after, -tensor_after) + 1e-7)
            if np.sum(single_item_matrix_abs) / np.size(single_item_matrix_abs) <= b:
                return [0, 1, 0], True
            else:
                np.save('../data/failure/tensor{0}:{1}_{2}_1.npy'.format(framework, operator, i), tensor)
                np.save('../data/failure/output{0}:{1}_{2}_{3}.npy'.format(framework, operator, i,
                                                                           np.sum(single_item_matrix_abs)
                                                                           / np.size(single_item_matrix_abs)), output)
                return [0, 1, 0], False
        else:
            # r_time = time.time()
            tensor_after = repair2(tensor, operator, variable)
            # print(output_expect)
            # print(tensor)
            # print(tensor_after)
            # print(np.max(np.maximum(tensor_after - output_expect, output_expect - tensor_after)))
            # r_time = time.time() - r_time
            # with open('../data/spend/{0}_{1}'.format(framework, operator), 'a') as f:
            #     f.write(str(r_time) + '\n')
            diff_matrix = tensor_after - output_expect
            single_item_matrix_abs = np.maximum(diff_matrix, -diff_matrix) / \
                                     (np.maximum(tensor_after, -tensor_after) + 1e-7)
            if np.sum(single_item_matrix_abs) / np.size(single_item_matrix_abs) <= b:
                return [0, 0, 1], True
            else:
                np.save('../data/failure/tensor{0}:{1}_{2}_2.npy'.format(framework, operator, i), tensor)
                np.save('../data/failure/output{0}:{1}_{2}_{3}.npy'.format(framework, operator, i,
                                                                           np.sum(single_item_matrix_abs)
                                                                           / np.size(single_item_matrix_abs)), output)
                return [0, 0, 1], False


# def get_err_lst(tensor, operator, framework):
#
#     output_expect, variable = get_benchmark(tensor, operator)
#
#     tf_output_16, torch_output_16, mnn_output_16, _ = get_output_float16(tensor, operator, variable)
#
#     if framework == 'tf':
#         output = tf_output_16.astype('float16')
#     elif framework == 'torch':
#         output = torch_output_16.astype('float16')
#     else:
#         output = mnn_output_16.astype('float16')
#
#     lst_l = []
#     lst_h = []
#     for x in tensor.flatten():
#         x_low, x_high = modify(x)
#         lst_l.append(x_low)
#         lst_h.append(x_high)
#
#     # tensor_low 和 tensor_high
#     tensor_low = np.asarray(lst_l).reshape(tensor.shape)
#     tensor_high = np.asarray(lst_h).reshape(tensor.shape)
#
#     # output_low 和 output_high
#     output_low = get_benchmark(tensor_low, operator, variable)
#     output_high = get_benchmark(tensor_high, operator, variable)
#
#     # err(s)
#     err = np.maximum(output - output_expect, output_expect - output)
#     err_low = np.maximum(output_low - output_expect, output_expect - output_low)
#     err_high = np.maximum(output_high - output_expect, output_expect - output_high)
#
#     err_max = np.maximum(err_low, err_high)
#
#     err_diff = err - err_max
#
#     return np.argwhere(err_diff > 0).tolist()
import tensorflow as tf

if __name__ == '__main__':

    # tf.keras.backend.set_learning_phase(1)
    framework = 'tf'
    operator = 'sigmoid'
    root_path = '../data/{0}/{1}1/'.format(framework, operator)

    res = [0, 0, 0]
    success_cases = 0

    for i in range(10000):
        print('sample {0}'.format(i))
        tensor = np.load(root_path + str(i) + '.npy')
        tmp, repair_res = classify(tensor, operator, framework, i)
        res[tmp.index(1)] += 1
        if repair_res:
            success_cases += 1
        print(res)
        print(success_cases)