import numpy as np
import torch
import torch.nn.functional as F


def get_torch_output(tensor, operator, variable=None):
    real_tensor = torch.Tensor(tensor)
    if operator == 'avg_pool':
        return F.avg_pool2d(real_tensor, kernel_size=2, stride=2)
    elif operator == 'max_pool':
        torch.cuda.empty_cache()
        return F.max_pool2d(real_tensor, kernel_size=2, stride=2)
    elif operator == 'bias_add':
        bias_nd = np.broadcast_to(variable, real_tensor.shape)
        bias_nd.setflags(write=True)
        my_bias = torch.Tensor(bias_nd)
        return real_tensor.add(my_bias)
    elif operator == 'conv2d':
        # my_filter = tf.Variable(tf.random.normal([3, 3, real_tensor.shape[-1], 32], stddev=0.01))
        conv2d = torch.nn.Conv2d(in_channels=real_tensor.shape[-3], out_channels=32,
                                 kernel_size=3, stride=1, bias=False)
        variable.setflags(write=True)
        # 改变variable形状
        variable_trans = np.transpose(variable, (3, 2, 0, 1))
        param = torch.nn.Parameter(torch.Tensor(variable_trans))
        conv2d.weight = param
        return conv2d(real_tensor)
    elif operator == 'softmax':
        return torch.nn.functional.softmax(real_tensor)
    elif operator == 'batch_normalization':
        torch.cuda.empty_cache()
        bn = torch.nn.BatchNorm2d(num_features=3, eps=1e-5, momentum=0.99, affine=False)
        return bn(real_tensor)
    elif operator == 'relu':
        return torch.nn.functional.relu(real_tensor, inplace=False)
    elif operator == 'dense':
        w = torch.Tensor(variable)
        return torch.matmul(real_tensor, w)
    elif operator == 'reduce_mean':
        return torch.mean(real_tensor, dim=1, keepdim=False)
    elif operator == 'reduce_max':
        return torch.max(real_tensor, dim=1, keepdim=False).values
    elif operator == 'sigmoid':
        return torch.sigmoid(real_tensor)
    elif operator == 'tanh':
        return torch.tanh(real_tensor)
#     elif operator == 'conv3d':
#         my_filter = tf.Variable(tf.random.normal([3, 3, 3, real_tensor.shape[-1], 32], stddev=0.01))
#         return tf.nn.conv3d(real_tensor, my_filter, strides=[1, 1, 1, 1, 1], padding='SAME')
#     elif operator == 'dilation2d':
#         my_filter = tf.Variable(tf.random.normal([3, 3, real_tensor.shape[-1]], stddev=0.01))
#         return tf.nn.dilation2d(real_tensor, my_filter, strides=[1, 1, 1, 1], padding='SAME',
#                                 data_format='NHWC', dilations=[1, 1, 1, 1])
#     elif operator == 'depthwise_conv2d':
#         my_filter = tf.Variable(tf.random.normal([3, 3, real_tensor.shape[-1], 32], stddev=0.01))
#         return tf.nn.depthwise_conv2d(real_tensor, my_filter, strides=[1, 1, 1, 1], padding='SAME')
#     elif operator == 'softmax':
#         return tf.nn.softmax(real_tensor)
#     elif operator == 'erosion2d':
#         my_filter = tf.Variable(tf.random.normal([3, 3, real_tensor.shape[-1]], stddev=0.01))
#         return tf.nn.depthwise_conv2d(real_tensor, my_filter, strides=[1, 1, 1, 1], padding='SAME',
#                                       data_format='NHWC', dilations=[1, 1, 1, 1])
#     elif operator == 'log_softmax':
#         return tf.nn.log_softmax(real_tensor)
#     else:
#         return ''
# w=torch.nn.Conv2d(2,2,3,padding=1)
# print(w.weight)
# print(type(w.weight))
# a = np.ones(w.weight.shape)
# aw = torch.nn.Parameter(torch.Tensor(a))
# w.weight = aw
# print(w.weight)