import MNN
import MNN.expr
import numpy as np
import tensorflow as tf
from src.generator_utils import get_random_seed_tensor


def get_mnn_output(tensor, operator, variable=None):
    if operator == 'avg_pool':
        real_tensor = MNN.expr.placeholder(tensor.shape)
        real_tensor.write(tensor.flatten().tolist())
        return MNN.expr.avg_pool(real_tensor, [2, 2], [2, 2])
    elif operator == 'max_pool':
        real_tensor = MNN.expr.placeholder(tensor.shape)
        real_tensor.write(tensor.flatten().tolist())
        return MNN.expr.max_pool(real_tensor, [2, 2], [2, 2])
    elif operator == 'bias_add':
        # bias = tf.Variable(tf.random.normal([real_tensor.shape[-1]], stddev=0.01))
        return MNN.expr.bias_add(tensor, variable)
    elif operator == 'conv2d':
        variable.setflags(write=True)
        # 改变variable形状
        variable_trans = np.transpose(variable, (3, 2, 0, 1))
        # 参数
        my_filter = MNN.expr.const(variable_trans, [32, tensor.shape[-3], 3, 3])
        # my_filter.write(variable.flatten().tolist())
        # input tensor
        real_tensor = MNN.expr.placeholder(tensor.shape)
        real_tensor.write(tensor.flatten().tolist())
        # conv2d
        conv2d = MNN.nn.conv(tensor.shape[-3], 32, [3, 3], [1, 1], bias=False)
        conv2d.load_parameters([my_filter])
        res = conv2d(real_tensor)
        return MNN.expr.convert(res, MNN.expr.NCHW)
    elif operator == 'softmax':
        real_tensor = MNN.expr.placeholder(tensor.shape)
        real_tensor.write(tensor.flatten().tolist())
        return MNN.expr.softmax(real_tensor)
    elif operator == 'batch_normalization':
        real_tensor = MNN.expr.placeholder(tensor.shape)
        real_tensor.write(tensor.flatten().tolist())
        bn = MNN.nn.batch_norm(channels=3)
        res = bn(real_tensor)
        return MNN.expr.convert(res, MNN.expr.NCHW)
    elif operator == 'relu':
        real_tensor = MNN.expr.placeholder(tensor.shape)
        real_tensor.write(tensor.flatten().tolist())
        return MNN.expr.relu(real_tensor)
    elif operator == 'dense':
        real_tensor = MNN.expr.placeholder(tensor.shape)
        real_tensor.write(tensor.flatten().tolist())
        w = MNN.expr.placeholder([16, 10])
        w.write(variable.flatten().tolist())
        return MNN.expr.matmul(real_tensor, w)
    elif operator == 'reduce_mean':
        real_tensor = MNN.expr.placeholder(tensor.shape)
        real_tensor.write(tensor.flatten().tolist())
        return MNN.expr.reduce_mean(real_tensor, [1], keepdims=False)
    elif operator == 'reduce_max':
        real_tensor = MNN.expr.placeholder(tensor.shape)
        real_tensor.write(tensor.flatten().tolist())
        return MNN.expr.reduce_max(real_tensor, [1], keepdims=False)
    elif operator == 'sigmoid':
        real_tensor = MNN.expr.placeholder(tensor.shape)
        real_tensor.write(tensor.flatten().tolist())
        return MNN.expr.sigmoid(real_tensor)
    elif operator == 'tanh':
        real_tensor = MNN.expr.placeholder(tensor.shape)
        real_tensor.write(tensor.flatten().tolist())
        return MNN.expr.tanh(real_tensor)


# if __name__ == '__main__':
    # tensor = get_random_seed_tensor([1, 28, 28, 1])
    # variable = tf.random.normal([np.asarray(tensor).shape[-1]], stddev=0.01).numpy()
    # print(type(get_mnn_output(tensor, 'bias_add', variable).read()))
    # random_input = np.random.random([1, 32, 14, 14])
    # input_var = MNN.expr.placeholder([1, 32, 14, 14])
    # input_var.write(random_input.flatten().tolist())
    # input_var_4 = MNN.expr.convert(input_var, MNN.expr.NC4HW4)
    # res = MNN.expr.avg_pool(input_var, [2, 2], [2, 2])
    # print(res.data_format)
    # res = MNN.expr.convert(res, MNN.expr.NCHW)
    # print(np.max(res.read() - tf.nn.avg_pool(random_input, 2, 2, 'VALID', data_format='NCHW').numpy()))