'''
Function:
    Implementation of Encoding Layer: a learnable residual encoder
Author:
    Zhenchao Jin
'''
# import torch
# import torch.nn as nn
# import torch.nn.functional as F
import numpy as np

import luojianet
import luojianet.nn as nn
import luojianet.ops as ops
from luojianet import Parameter, Tensor
import luojianet.common.initializer as init


# '''Encoding'''
# class Encoding(nn.Module):
#     def __init__(self, channels, num_codes):
#         super(Encoding, self).__init__()
#         # init codewords and smoothing factor
#         self.channels, self.num_codes = channels, num_codes
#         std = 1. / ((num_codes * channels) ** 0.5)
#         # [num_codes, channels]
#         # self.codewords = nn.Parameter(torch.empty(num_codes, channels, dtype=torch.float).uniform_(-std, std), requires_grad=True)
#         # empty1 = luojianet.numpy.empty((num_codes, channels), dtype=luojianet.int32)
#         # empty2 = luojianet.numpy.empty(num_codes, dtype=luojianet.int32)
#         # test1 = ops.uniform(empty1, Tensor(-std), Tensor(std)) 
        
#         empty3 = np.empty((num_codes, channels), dtype=np.float)
#         # test2 = np.random.uniform(-std, std)
#         test2 = ops.uniform(Tensor(empty3, dtype=luojianet.int32), Tensor(-std), Tensor(std))
        

#         # self.codewords = Parameter(Tensor(test1, dtype=luojianet.float32), requires_grad=True)
#         self.codewords = Parameter(test2, requires_grad=True)
#         # [num_codes]
#         # self.scale = Parameter(torch.empty(num_codes, dtype=torch.float).uniform_(-1, 0), requires_grad=True)

#         empty4 = np.empty(num_codes, dtype=np.float)
#         test3 = np.random.uniform(-1, 0)

#         # self.scale = Parameter(ops.uniform(empty2, Tensor(-1), Tensor(0)), requires_grad=True)
#         self.scale = Parameter(Tensor(test3, dtype=luojianet.float32), requires_grad=True)
#     '''scaled l2'''
#     @staticmethod
#     def scaledl2(x, codewords, scale):
#         # batch_size = x.size(0)
#         # num_codes, channels = codewords.size()
#         batch_size = x.shape[0]
#         num_codes, channels = codewords.shape
#         reshaped_scale = scale.view((1, 1, num_codes))
#         # expanded_x = x.unsqueeze(2).expand((batch_size, x.size(1), num_codes, channels))
#         expanded_x = x.unsqueeze(2).expand((batch_size, x.shape[1], num_codes, channels))
#         reshaped_codewords = codewords.view((1, 1, num_codes, channels))
#         scaledl2_norm = reshaped_scale * (expanded_x - reshaped_codewords).pow(2).sum(dim=3)
#         return scaledl2_norm
#     '''aggregate'''
#     @staticmethod
#     def aggregate(assigment_weights, x, codewords):
#         # batch_size = x.size(0)
#         batch_size = x.shape[0]
#         num_codes, channels = codewords.size()
#         reshaped_codewords = codewords.view((1, 1, num_codes, channels))
#         # expanded_x = x.unsqueeze(2).expand((batch_size, x.size(1), num_codes, channels))
#         expanded_x = x.unsqueeze(2).expand((batch_size, x.shape[1], num_codes, channels))
#         encoded_feat = (assigment_weights.unsqueeze(3) * (expanded_x - reshaped_codewords)).sum(dim=1)
#         return encoded_feat
#     '''forward'''
#     def forward(self, x):
#         # assert x.dim() == 4 and x.size(1) == self.channels
#         assert x.dim() == 4 and x.shape[1] == self.channels
#         # [batch_size, channels, height, width]
#         # batch_size = x.size(0)
#         batch_size = x.shape[0]
#         # [batch_size, height x width, channels]
#         # x = x.view(batch_size, self.channels, -1).transpose(1, 2).contiguous()
#         x = x.view(batch_size, self.channels, -1)
#         x = x.swapaxes(1, 2)
#         # assignment_weights: [batch_size, channels, num_codes]
#         # assigment_weights = F.softmax(self.scaledl2(x, self.codewords, self.scale), dim=2)
#         assigment_weights = ops.softmax(self.scaledl2(x, self.codewords, self.scale), axis=2)
#         # aggregate
#         encoded_feat = self.aggregate(assigment_weights, x, self.codewords)
#         return encoded_feat

# https://github.com/Tramac/awesome-semantic-segmentation-pytorch/blob/d37d2a17221d2681ad454958cf06a1065e9b1f7f/core/models/encnet.py#L13
class Encoding(nn.Module):
    def __init__(self, D, K):
        super(Encoding, self).__init__()
        # init codewords and smoothing factor
        self.D, self.K = D, K
        # self.codewords = nn.Parameter(Tensor(K, D), requires_grad=True)
        self.codewords = Parameter(Tensor(np.random.randn(K, D), luojianet.float32), requires_grad=True)

        # self.scale = nn.Parameter(torch.Tensor(K), requires_grad=True)
        self.scale = Parameter(Tensor(np.random.randn(K), luojianet.float32), requires_grad=True)
        self.reset_params()

    def reset_params(self):
        std1 = 1. / ((self.K * self.D) ** (1 / 2))
        # self.codewords.data.uniform_(-std1, std1)
        self.codewords.set_data(
            init.initializer(init.Uniform(std1), self.codewords.shape, self.codewords.dtype)
        )
        # self.scale.data.uniform_(-1, 0)
        
        uniform_int = ops.UniformInt()
        self.scale.set_data(
            init.initializer(uniform_int(self.scale.shape, Tensor(-1, luojianet.int32), Tensor(0,luojianet.int32)), self.scale.shape, self.scale.dtype)
        )

    def forward(self, X):
        # input X is a 4D tensor
        # assert (X.size(1) == self.D)
        assert (X.shape[1] == self.D)
        # B, D = X.size(0), self.D
        B, D = X.shape[0], self.D
        if X.dim() == 3:
            # BxDxN -> BxNxD
            # X = X.transpose(1, 2)
            X = X.swapaxes(1, 2)
        elif X.dim() == 4:
            # BxDxHxW -> Bx(HW)xD
            # X = X.view(B, D, -1).transpose(1, 2).contiguous()
            X = X.view(B, D, -1).swapaxes(1, 2)
        else:
            raise RuntimeError('Encoding Layer unknown input dims!')
        # assignment weights BxNxK
        A = ops.softmax(self.scale_l2(X, self.codewords, self.scale), axis=2)
        # aggregate
        E = self.aggregate(A, X, self.codewords)
        return E

    def __repr__(self):
        return self.__class__.__name__ + '(' \
               + 'N x' + str(self.D) + '=>' + str(self.K) + 'x' \
               + str(self.D) + ')'

    @staticmethod
    def scale_l2(X, C, S):
        # S = S.view(1, 1, C.size(0), 1)
        S = S.view(1, 1, C.shape[0], 1)
        X = X.unsqueeze(2).broadcast_to((X.shape[0], X.shape[1], C.shape[0], C.shape[1]))
        C = C.unsqueeze(0).unsqueeze(0)
        SL = S * (X - C)
        SL = SL.pow(2).sum(3)
        return SL

    @staticmethod
    def aggregate(A, X, C):
        A = A.unsqueeze(3)
        X = X.unsqueeze(2).broadcast_to((X.shape[0], X.shape[1], C.shape[0], C.shape[1])) #expand
        C = C.unsqueeze(0).unsqueeze(0)
        E = A * (X - C)
        E = E.sum(1)
        return E