import mindspore
import mindspore.nn as nn
from mindspore.nn.loss import loss
import mindspore.ops as ops
from mindspore.common.initializer import initializer, Normal
from mindspore import Parameter

import numpy as np

def sample(weights, size):
    #mean, var = weights[:,:, :size], weights[:,:, size:]
    split = ops.Split(2, 2)
    output = split(weights)
    mean = output[0]
    var = output[1]
    normal_guassian = Normal(1, 0)
    shape = ops.Shape()(mean)
    z = initializer(normal_guassian, shape=shape, dtype=mindspore.float32)

    return mean + var*z

class Encoder(nn.Cell):
    def __init__(self, input_size, latent_size, droupout_r):
        super(Encoder, self).__init__()
        self.input_size = input_size
        self.latent_size = latent_size
        self.dropout_r = droupout_r

        self.dropout = nn.Dropout(self.dropout_r)
        self.encoder = self.encoder = nn.Dense(self.input_size, self.latent_size, weight_init='normal', has_bias=False)
        self.relation_network = nn.SequentialCell(
            nn.Dense(2*self.latent_size, 2*self.latent_size, weight_init='normal', has_bias=False),
            nn.ReLU(),
            nn.Dense(2*self.latent_size, 2*self.latent_size, weight_init='normal', has_bias=False),
            nn.ReLU(),
            nn.Dense(2*self.latent_size, 2*self.latent_size, weight_init='normal', has_bias=False),
            nn.ReLU()
        )


    def construct(self, inputs):
        #inputs->[batch, N, K, embedsize]
        after_dropout = self.dropout(inputs)
        encoder_outputs = self.encoder(after_dropout)
        b_size, N, K, latentsize = ops.Shape()(encoder_outputs)

        #construct input for relation network
        t1 = ops.repeat_elements(encoder_outputs, K, axis=2)
        t1 = ops.repeat_elements(t1, N, axis=1)
        t2 = ops.Tile()(encoder_outputs, (1, N, K, 1))
        x = ops.Concat(axis=len(ops.Shape()(t1)) - 1)((t1, t2))

        #x->[batch, N*N, K*K, latensize]
        x = self.relation_network(x)
        x = ops.Reshape()(x, (b_size, N, N*K*K, 2*latentsize))
        x = ops.ReduceMean()(x, axis=2)

        latens = sample(x, self.latent_size)
        split = ops.Split(2, 2)
        output = split(x)
        mean = output[0]
        var = output[1]
        mean, var = x[:,:, :self.latent_size], x[:,:, self.latent_size:]

        return latens, mean, var

class Decoder(nn.Cell):
    def __init__(self, input_size, latent_size):
        super(Decoder, self).__init__()
        self.input_size = input_size
        self.latent_size = latent_size
        self.matmul = ops.MatMul(transpose_b=True)
        self.shape_op = ops.Shape()
        self.reshape = ops.Reshape()
        #self.decoder = nn.Dense(self.latent_size, 2*self.input_size, weight_init='normal', has_bias=False)
        self.decoder = Parameter(initializer('normal', [2*self.input_size, self.latent_size],dtype=mindspore.float64),name="weight")

    def construct(self, x):
        #inputs=inputs.astype("float64")
        #weights = self.decoder(inputs)
        #weights = self.matmul(x, self.decoder)
        x_shape = self.shape_op(x)
        if len(x_shape) != 2:
            x = self.reshape(x, (-1, x_shape[-1]))
        x = self.matmul(x, self.decoder)
        if len(x_shape) != 2:
            # out_shape = x_shape[:-1] + (-1,)
            x = self.reshape(x, (x_shape[0], x_shape[1], -1))
        weights = x

        classfer_weights = sample(weights, self.input_size)
        return classfer_weights

class WithLossDecoder(nn.Cell):
    def __init__(self, decoder, classifier):
        super(WithLossDecoder, self).__init__()
        self.decoder = decoder
        self.classifier = classifier

    def construct(self, latents, inputs, target):
        weights = self.decoder(latents)
        _, loss = self.classifier(inputs, weights, target)
        return weights, loss

class classifier(nn.Cell):
    def __init__(self, way, input_size):
        super(classifier, self).__init__()
        self.way = way
        self.input_size = input_size

    def construct(self, inputs, weights, target):
        criterion = nn.SoftmaxCrossEntropyWithLogits(sparse=True)

        b_size, N, K, input_size = ops.Shape()(inputs)

        inputs = ops.Reshape()(inputs, (b_size, -1, input_size))

        #predict
        log = ops.Log()
        softmax = nn.Softmax(axis=-1)
        outputs = ops.BatchMatMul(transpose_b=True)(inputs, weights)    
        outputs = ops.Reshape()(outputs, (-1, ops.Shape()(outputs)[-1]))
        outputs = outputs.astype('float32')
        outputs = softmax(outputs)
        outputs = log(outputs)

        target = ops.Reshape()(target, (ops.Shape()(target)[0], -1, ops.Shape()(target)[-1]))
        target = ops.Reshape()(target, (-1, ops.Shape()(target)[-1]))
        target = ops.Squeeze(axis=1)(target)

        loss = criterion(outputs, target)
        return outputs, loss

class LEO(nn.Cell):
    def __init__(self, input_size, latent_size, way, shot, dropout_r, inner_lr_init, finetuning_lr_init):
        super(LEO, self).__init__()

        self.input_size = input_size
        self.latent_size = latent_size
        self.way = way
        self.shot = shot
        self.dropout_r = dropout_r
        self.inner_lr_init = inner_lr_init
        self.finetuning_lr_init = finetuning_lr_init

        self.inner_lr = mindspore.Parameter(mindspore.Tensor(self.inner_lr_init), name="inner_lr")
        self.finetuning_lr = mindspore.Parameter(mindspore.Tensor(self.finetuning_lr_init), name="finetuning_lr")
        self.encoder = Encoder(self.input_size, self.latent_size, self.dropout_r)
        self.decoder = Decoder(self.input_size, self.latent_size)
        self.classfier = classifier(self.way, self.input_size)
        self.withlossdecoder = WithLossDecoder(self.decoder, self.classfier)

    def construct(self, inputs, target):
        latens, _, _ = self.encoder(inputs)
        weights = self.decoder(latens)

        b_size, N, K, input_size = ops.Shape()(inputs)

        inputs = ops.Reshape()(inputs, (b_size, -1, input_size))

        #predict
        outputs, _ = self.classfier(inputs, weights, target)
        return outputs


class CalLatentsGrad(nn.Cell):
    def __init__(self, model, getall=False, getbylist=False):
        super(CalLatentsGrad, self).__init__()
        self.model = model
        self.grad = ops.GradOperation(get_all=getall, get_by_list=getbylist)

    def construct(self, latents, inputs, target):
        gradient_function = self.grad(self.model)
        return gradient_function(latents, inputs, target)

class CalWeightsGrad(nn.Cell):
    def __init__(self, model, getall=False, getbylist=False):
        super(CalWeightsGrad, self).__init__()
        self.model = model
        self.grad = ops.GradOperation(get_all=getall, get_by_list=getbylist)

    def construct(self, inputs, weights, target):
        gradient_function = self.grad(self.model)
        return gradient_function(inputs, weights, target)