import mindspore
import mindspore.nn as nn
import mindspore.numpy as mnp
from mindspore.nn.loss.loss import MSELoss
import mindspore.ops as ops
#from mindspore.explainer.explanation import Gradient
import mindspore.common.initializer as initializer
import math
import numpy as np

import model
   
class OuterLoop(nn.Cell):
    def __init__(self, input_size, latent_size, way, shot,
                        dropout, kl_weight, encoder_penalty_weight,
                        inner_lr_init, finetuning_lr_init, 
                        inner_step, finetune_inner_step, is_meta_training=True):
        super(OuterLoop, self).__init__()
        self.input_size = input_size
        self.latent_size = latent_size
        self.way = way
        self.shot = shot
        self.dropout = dropout
        self.kl_weight = mindspore.Tensor(float(kl_weight), mindspore.float32)
        self.encoder_penalty_weight = mindspore.Tensor(float(encoder_penalty_weight), mindspore.float32)
        self.inner_lr_init = inner_lr_init
        self.finetuning_lr_init = finetuning_lr_init
        self.inner_step = inner_step
        self.finetune_inner_step = finetune_inner_step
        self.is_meta_training = is_meta_training

        self.leo = model.LEO(self.input_size, self.latent_size, self.way, self.shot, self.dropout, self.inner_lr_init, self.finetuning_lr_init)
    
    def construct(self, train_inputs, train_labels, val_inputs, val_labels):
        latents, mean, var = self.leo.encoder(train_inputs)
        kl = self.cal_kl_divergence(latents, mean, var)
        train_loss, adapted_classfier_weights, encoder_penalty = self.leo_inner_loop(train_inputs, train_labels, latents)

        val_loss, val_acc = self.finetune_inner_loop(train_inputs, train_labels, val_inputs, val_labels, train_loss, adapted_classfier_weights)

        val_loss = val_loss + self.kl_weight * kl
        val_loss = val_loss + self.encoder_penalty_weight * encoder_penalty

        regularization_penalty = self.orthogonality(list(self.leo.decoder.trainable_params())[0]) #+ self.l2_regularization(list(self.leo.encoder.encoder.trainable_params())[0],list(self.leo.encoder.relation_network.trainable_params()),list(self.leo.decoder.trainable_params())[0])

        batch_val_loss = ops.ReduceMean()(val_loss, axis=0)
        batch_val_acc = val_acc
        
        return batch_val_loss + regularization_penalty, batch_val_acc

    def leo_inner_loop(self, train_inputs, train_labels, latents):
        latents = latents.astype("float64")
        #latents = mindspore.Tensor(np.random.randn(1,5,64), mindspore.float64)
        start_latents = latents
        classfier_weights = self.leo.decoder(latents)#decoder dense需要float64
        classfier_weights = classfier_weights.astype("float32")
        _, loss = self.leo.classfier(train_inputs, classfier_weights, train_labels)#classfier need float32


        for i in range(self.inner_step):
            train_inputs = train_inputs.astype("float64")
            latents_grad = model.CalLatentsGrad(self.leo.withlossdecoder, getall=True)(latents, train_inputs, train_labels)[0]
            # latents = classfier_weights.astype("float64")
            latents = latents - self.leo.inner_lr * latents_grad
            latents = mindspore.Tensor(latents, mindspore.float64)
            classfier_weights = self.leo.decoder(latents)
            _, loss = self.leo.classfier(train_inputs, classfier_weights, train_labels)
        
        getmse = MSELoss()
        latents = latents.astype("float32")
        start_latents = start_latents.astype("float32")
        if self.is_meta_training:
            encoder_penalty = getmse(latents, start_latents)
        else:
            encoder_penalty = initializer.Constant(0.)
        
        return loss.astype("float32"), classfier_weights.astype("float32"), encoder_penalty

    def finetune_inner_loop(self, train_inputs, train_labels, val_inputs, val_labels, leo_loss, classfier_weights):
        train_loss = leo_loss

        for _ in range(self.finetune_inner_step):
            #CalLatentsGrad返回一个梯度函数，梯度函数对输入求出网络权重梯度和输入梯度
            loss_grad = model.CalWeightsGrad(self.leo.classfier, getall=True)(train_inputs, classfier_weights, train_labels)[1]
            #loss_grad = ops.Reshape()(loss_grad, (-1, ops.Shape()(loss_grad)[-1]))
            classfier_weights = classfier_weights - self.leo.finetuning_lr * loss_grad
            #aaa = mindspore.Tensor(classfier_weights, mindspore.float32)
            classfier_weights = classfier_weights.astype("float32")#mindspore类型转换有毛病

            train_loss, _ = self.cal_target_loss(train_inputs, classfier_weights, train_labels)
        
        val_loss, val_acc = self.cal_target_loss(val_inputs, classfier_weights, val_labels)
        print(f'val_acc:{val_acc}--val_loss:{val_loss.mean()}')
        return val_loss, val_acc

    def cal_kl_divergence(self, latens, mean, var):
        return ops.ReduceMean()(self.cal_log_prob(latens, mean, var) - 
            self.cal_log_prob(latens, ops.Zeros()(ops.Shape()(mean), mindspore.float32), ops.Ones()(ops.Shape()(var), mindspore.float32)))

    def cal_log_prob(self, x, mean, var):
        eps = 1e-32
        log_unnormalized = - 0.5 * ((x - mean)/ (var+eps))**2
        log_normalization = ops.Log()(var+eps) + 0.5 * math.log(2*math.pi)

        return log_unnormalized - log_normalization

    def cal_target_loss(self, inputs, classfier_weights, target):
        outputs, target_loss = self.leo.classfier(inputs, classfier_weights, target)

        target = ops.Reshape()(target, (ops.Shape()(target)[0], -1, ops.Shape()(target)[-1]))
        target = ops.Reshape()(target, (-1, ops.Shape()(target)[-1]))
        target = ops.Squeeze(axis=1)(target)

        pred = ops.Argmax(axis=-1)(outputs)
        corr = ops.Equal()(pred, target).asnumpy().astype(np.float32)
        corr = ops.ReduceSum()(mindspore.Tensor(corr, mindspore.float32))
        total = ops.ReduceSum()(ops.Fill()(mindspore.float32, ops.Shape()(pred), 1))

        return target_loss, corr/total

    def orthogonality(self, weight):
        weight = weight.astype("float32")
        w2 = ops.MatMul(transpose_b=True)(weight, weight)

        wn = nn.Norm(axis=1, keep_dims=True)(weight) + 1e-32
        correlation_matrix = w2 / ops.MatMul(transpose_b=True)(wn, wn)

        assert ops.Shape()(correlation_matrix)[0] == ops.Shape()(correlation_matrix)[1]
        I = ops.Eye()(ops.Shape()(correlation_matrix)[0], ops.Shape()(correlation_matrix)[0], mindspore.float32)
        return ops.ReduceMean()((correlation_matrix - I) ** 2)
        
    def l2_regularization(self,weighte,weightr,weightd):
        weighte = weighte.astype("float32")
        weightr_zero = weightr[0].astype("float32")
        weightr_one = weightr[1].astype("float32")
        weightr_two = weightr[2].astype("float32")
        weightd = weightd.astype("float32")
        l2_loss = ops.L2Loss()
        output=l2_loss(weighte)+l2_loss(weightr_zero)+l2_loss(weightr_one)+l2_loss(weightr_two)+l2_loss(weightd)

        return output