import numpy as np
import tensorflow as tf
import torch


def get_tf_output(tensor, operator, variable=None):
    if operator == 'avg_pool':
        return tf.nn.avg_pool(tensor, 2, 2, 'VALID', data_format='NCHW')
    elif operator == 'max_pool':
        return tf.nn.max_pool(tensor, 2, 2, 'VALID', data_format='NCHW')
    elif operator == 'bias_add':
        # bias = tf.Variable(tf.random.normal([real_tensor.shape[-1]], stddev=0.01))
        return tf.nn.bias_add(tensor, variable)
    elif operator == 'conv2d':
        my_filter = tf.Variable(variable)
        return tf.nn.conv2d(tensor, my_filter, strides=1, padding='VALID', data_format='NCHW')
    elif operator == 'softmax':
        return tf.nn.softmax(tensor)
    elif operator == 'batch_normalization':
        bn = tf.keras.layers.BatchNormalization(axis=1, momentum=0.99, epsilon=1e-5, autocast=True, trainable=True)
        return bn(tf.cast(tensor, dtype=tf.float32))
    elif operator == 'relu':
        return tf.nn.relu(tensor)
    elif operator == 'dense':
        if tensor.dtype == 'float16':
            tensor = tf.cast(tensor, tf.float32)
        return tf.matmul(tensor, variable)
    elif operator == 'reduce_mean':
        return tf.reduce_mean(tensor, 1, keepdims=False)
    elif operator == 'reduce_max':
        return tf.reduce_max(tensor, 1, keepdims=False)
    elif operator == 'reduce_min':
        return tf.reduce_min(tensor, 1, keepdims=False)
    elif operator == 'abs':
        return tf.abs(tensor)
    elif operator == 'reduce_sum':
        return tf.reduce_sum(tensor, 1, keepdims=False)
    elif operator == 'sigmoid':
        return tf.nn.sigmoid(tensor)
    elif operator == 'tanh':
        return tf.nn.tanh(tensor)
    elif operator == 'square':
        return tf.square(tensor)
    # elif operator == 'conv3d':
    #     # my_filter = tf.Variable(tf.random.normal([3, 3, 3, real_tensor.shape[-1], 32], stddev=0.01))
    #     return tf.nn.conv3d(real_tensor, variable, strides=[1, 1, 1, 1, 1], padding='SAME')
    # elif operator == 'dilation2d':
    #     # my_filter = tf.Variable(tf.random.normal([3, 3, real_tensor.shape[-1]], stddev=0.01))
    #     return tf.nn.dilation2d(real_tensor, variable, strides=[1, 1, 1, 1], padding='SAME',
    #                             data_format='NHWC', dilations=[1, 1, 1, 1])
    # elif operator == 'depthwise_conv2d':
    #     # my_filter = tf.Variable(tf.random.normal([3, 3, real_tensor.shape[-1], 32], stddev=0.01))
    #     return tf.nn.depthwise_conv2d(real_tensor, variable, strides=[1, 1, 1, 1], padding='SAME')
    # elif operator == 'softmax':
    #     return tf.nn.softmax(real_tensor)
    # elif operator == 'erosion2d':
    #     # my_filter = tf.Variable(tf.random.normal([3, 3, real_tensor.shape[-1]], stddev=0.01))
    #     return tf.nn.depthwise_conv2d(real_tensor, variable, strides=[1, 1, 1, 1], padding='SAME',
    #                                   data_format='NHWC', dilations=[1, 1, 1, 1])
    # elif operator == 'log_softmax':
    #     return tf.nn.log_softmax(real_tensor)
    else:
        return ''
