import math

import numpy as np
import tensorflow as tf


def fit1(tensor):
    max_num = np.max(tensor)
    min_num = np.min(tensor)
    if abs(min_num) > 1:
        m = 1 / abs(max_num)
    elif abs(max_num) < 1:
        m = 1 / abs(min_num)
    elif abs(max_num) > abs(1 / min_num):
        m = 1 / abs(max_num)
    else:
        m = 1 / abs(min_num)
    return m * tensor, m


def fit2(tensor, operator, m):
    if operator == 'bias_add':
        return tensor * (1 / m)
    elif operator == 'avg_pool' or operator == 'max_pool':
        return tensor * (1 / m)
    elif operator == 'softmax' or operator == 'sigmoid' or operator == 'tanh':
        return tensor
    elif operator == 'relu':
        return tensor * (1 / m)
    elif operator == 'conv2d':
        return tensor * (1 / m)
    elif operator == 'batch_normalization':
        return tensor * (1 / m)
    elif operator == 'reduce_mean' or operator == 'reduce_max':
        return tensor * (1 / m)
    elif operator == 'dense':
        return tensor * (1 / m)


def e_pow_by_iter(x):
    # if x == 0:
    #     return 1
    # if x > 0:
    #     res = 1
    #     for i in range(x):
    #         res = eft(res, math.e, '*')
    #     return res
    # else:
    #     res = 1
    #     for i in range(x):
    #         res = eft(res, math.e, '*')
    #     return 1 / res
    return pow(math.e, x)


def split(a, m=11):
    r = m - m / 2
    c = 2 ** r + 1
    p = a * c
    x = a - p + p
    y = a - x
    return x, y


# def eft_tensor(a, b, op):
#     if op == '+':
#         x = a + b
#         z = x + a
#         y = a - (x - z) + (b - z)
#         x = x + y
#     elif op == '*':
#         a1, a2 = split(a)
#         b1, b2 = split(b)
#         x = a.dot(b)
#         y = a2.dot(b2) - ((x - a1.dot(b1)) - a2.dot(b1) - a1.dot(b2))
#         x = x + y
#     return x
#
#
def eft(a, b, op):
    if op == '+':
        x = a + b
        z = x + a
        y = a - (x - z) + (b - z)
        x = x + y
    elif op == '*':
        a1, a2 = split(a)
        b1, b2 = split(b)
        x = a * b
        y = a2 * b2 - ((x - a1 * b1) - a2 * b1 - a1 * b2)
        x = x + y
    return x


def repair2(tensor, operator, variable):
    if operator == 'bias_add':
        return eft(tensor, variable, '+').astype('float32')
    elif operator == 'avg_pool':
        lst = [[[[0 for k in range(32)] for i in range(7)] for j in range(7)]]
        for i in range(7):
            for j in range(7):
                for k in range(32):
                    op1 = eft(tensor[0][k][2*i][2*j], 1/4, '*')
                    op2 = eft(tensor[0][k][2*i][2*j+1], 1/4, '*')
                    op3 = eft(tensor[0][k][2*i+1][2*j], 1/4, '*')
                    op4 = eft(tensor[0][k][2*i+1][2*j+1], 1/4, '*')
                    # op1 = eft(tensor[0][k][2*i][2*j], tensor[0][k][2*i][2*j+1], '+')
                    # op2 = eft(tensor[0][k][2*i+1][2*j], tensor[0][k][2*i+1][2*j+1], '+')
                    # op_sum = eft(op1, op2, '+')
                    op5 = eft(op1, op2, '+')
                    op6 = eft(op3, op4, '+')

                    lst[0][i][j][k] = eft(op5, op6, '+')
        tensor_new = np.asarray(lst)
        return np.transpose(tensor_new, (0, 3, 1, 2)).astype('float32')
    elif operator == 'max_pool':
        lst = [[[[0 for k in range(32)] for i in range(7)] for j in range(7)]]
        for i in range(7):
            for j in range(7):
                for k in range(32):
                    lst[0][i][j][k] = max(tensor[0][k][2*i][2*j], tensor[0][k][2*i][2*j+1],
                                       tensor[0][k][2*i+1][2*j], tensor[0][k][2*i+1][2*j+1])
        tensor_new = np.asarray(lst)
        return np.transpose(tensor_new, (0, 3, 1, 2))
    elif operator == 'relu':
        return np.maximum(tensor, 0).astype('float32')
    elif operator == 'tanh':
        lst = [[0 for x in range(20)]]
        for i in range(20):
            if e_pow_by_iter(tensor[0][i]) == float('inf'):
                lst[0][i] = 1.0
            elif e_pow_by_iter(tensor[0][i]) == 0.0:
                lst[0][i] = -1.0
            else:
                op1 = eft(e_pow_by_iter(tensor[0][i]), -e_pow_by_iter(-tensor[0][i]), '+')
                op2 = eft(e_pow_by_iter(tensor[0][i]), e_pow_by_iter(-tensor[0][i]), '+')
                lst[0][i] = eft(op1, 1 / op2, '*')
        np_lst = np.asarray(lst)
        np_lst = np_lst.astype('float32')
        return np_lst
    elif operator == 'sigmoid':
        lst = [[0 for x in range(20)]]
        for i in range(20):
            op1 = eft(1, e_pow_by_iter(-tensor[0][i]), '+')
            lst[0][i] = eft(1, 1 / op1, '*')
        np_lst = np.asarray(lst)
        np_lst = np_lst.astype('float32')
        return np_lst
    elif operator == 'softmax':
        res = 0
        for i in range(100):
            res = eft(res, e_pow_by_iter(tensor[0][i]), '+')
        lst = [[0 for x in range(100)]]
        for i in range(100):
            lst[0][i] = eft(e_pow_by_iter(tensor[0][i]), 1 / res, '*')
        return np.asarray(lst).astype('float32')
    elif operator == 'reduce_mean':
        lst = []
        for i in range(16):
            avg = 0
            for j in range(30):
                single = eft(tensor[i][j], 1/30, '*')
                avg = eft(avg, single, '+')
            lst.append(avg)
        return np.asarray(lst).astype('float32')
    elif operator == 'reduce_max':
        lst = []
        for i in range(16):
            mv = 0
            for j in range(30):
                mv = max(mv, tensor[i][j])
            lst.append(mv)
        return np.asarray(lst).astype('float32')
    elif operator == 'batch_normalization':
        avg_r = 0
        var_r = 0
        avg_g = 0
        var_g = 0
        avg_b = 0
        var_b = 0
        tensor_trans = np.transpose(tensor, (1, 0, 2, 3))

        for x in tensor_trans[0].flatten():
            single = eft(x, 1 / 784, '*')
            single_2 = eft(single, x, '*')
            avg_r = eft(avg_r, single, '+')
            var_r = eft(var_r, single_2, '+')
        avg_r_2 = eft(avg_r, avg_r, '*')
        var_r = eft(var_r, -avg_r_2, '+')

        for x in tensor_trans[1].flatten():
            single = eft(x, 1 / 784, '*')
            single_2 = eft(single, x, '*')
            avg_g = eft(avg_g, single, '+')
            var_g = eft(var_g, single_2, '+')
        avg_g_2 = eft(avg_g, avg_g, '*')
        var_g = eft(var_g, -avg_g_2, '+')

        for x in tensor_trans[2].flatten():
            single = eft(x, 1 / 784, '*')
            single_2 = eft(single, x, '*')
            avg_b = eft(avg_b, single, '+')
            var_b = eft(var_b, single_2, '+')
        avg_b_2 = eft(avg_b, avg_b, '*')
        var_b = eft(var_b, -avg_b_2, '+')

        tensor_trans[0] = tensor_trans[0] - avg_r
        tensor_trans[1] = tensor_trans[1] - avg_g
        tensor_trans[2] = tensor_trans[2] - avg_b

        tensor_trans[0] = tensor_trans[0] / pow(var_r + 1e-5, 0.5)
        tensor_trans[1] = tensor_trans[1] / pow(var_g + 1e-5, 0.5)
        tensor_trans[2] = tensor_trans[2] / pow(var_b + 1e-5, 0.5)

        return np.transpose(tensor_trans, (1, 0, 2, 3)).astype('float32')
    elif operator == 'dense':
        lst = []
        for i in range(10):
            res = 0
            for j in range(256):
                # single = eft(tensor[0][j], variable[j][i], '*')
                single = eft(tensor[0][j], variable[j][i], '*')
                res = eft(res, single, '+')
            lst.append(res)
        return np.asarray(lst).reshape((1, 10))
    elif operator == 'conv2d':
        lst = [[[[0 for i in range(26)] for j in range(26)] for k in range(32)]]
        for i in range(26):
            for j in range(26):
                for k in range(32):
                    conv = 0
                    for x in range(3):
                        for y in range(3):
                            single = eft(variable[x][y][0][k], tensor[0][0][i+x][j+y], '*')
                            conv = eft(conv, single, '+')
                    lst[0][k][i][j] = conv
        np_lst = np.asarray(lst)
        return np_lst.astype('float32')


# from src.generator_utils import get_random_seed_tensor
# import time
# import threading
#
# lst = [[0 for i in range(676)] for j in range(32)]


# class myThread (threading.Thread):
#     def __init__(self, s, e, v, res):
#         threading.Thread.__init__(self)
#         self.s = s
#         self.e = e
#         self.variable = v
#         self.res = res
#
#     def run(self):
#         global lst
#         for k in range(self.s, self.e):
#             for i in range(32):
#                 for j in range(676):
#                     single = eft(variable[i][k], res[k][j], '*')
#                     lst[i][j] = eft(lst[i][j], single, '+')


# if __name__ == '__main__':
#     total = 0
#     for t in range(10):
#         variable = variable = tf.random.normal([3, 3, 1, 32], stddev=0.01).numpy()
#         tensor = get_random_seed_tensor((1, 1, 28, 28))

        # variable = variable.reshape(-1, 32)
        # variable = np.transpose(variable, (1, 0))
        # tensor = tensor.reshape(-1, 28*28)
        # tensor_ = np.concatenate((tensor, tensor))
        # tensor_= np.concatenate((tensor_, tensor_))
        # tensor_= np.concatenate((tensor_, tensor_))
        # tensor_= np.concatenate((tensor_, tensor))
        # res = variable.dot(tensor_)
        # print(res.shape)
#         res = tensor[0, 0, :3, :3].reshape(9, -1)
#         for j in range(1, 26):
#             res = np.concatenate((res, tensor[0, 0, :3, j:j + 3].reshape(9, -1)), axis=1)
#         for i in range(1, 26):
#             for j in range(26):
#                 res = np.concatenate((res, tensor[0, 0, i:i + 3, j:j + 3].reshape(9, -1)), axis=1)
#         variable = variable.reshape(-1, 32)
#         variable = np.transpose(variable, (1, 0))
#         tt = time.time()
#         for k in range(9):
#             for i in range(32):
#                 for j in range(676):
#                     single = eft(variable[i][k], res[k][j], '*')
#                     lst[i][j] = eft(lst[i][j], single, '+')
#
#         # for k in range(32):
#         #     for i in range(26):
#         #         for j in range(26):
#         #             conv = 0
#         #             for x in range(3):
#         #                 for y in range(3):
#         #                     single = eft(variable[x][y][0][k], tensor[0][0][i + x][j + y], '*')
#         #                     conv = eft(conv, single, '+')
#         #             lst[0][k][i][j] = conv
#         total += time.time() - tt
# print(total / 10)