from re import T
import os
os.environ['GLOG_v'] = "2"
os.environ['GLOG_log_dir'] = '/var/log'

import data
import config
import utils
import model
import outerloop

import argparse
import mindspore
import mindspore as nn
import mindspore.ops as ops
import mindspore.nn as nn
from mindspore import context, ParameterTuple
from mindspore import Tensor
mindspore.ops.operations.array_ops


context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU", save_graphs=True, save_graphs_path="./ir_path")


    
data_config = config.get_data_config()
inner_model_config = config.get_inner_model_config()
outer_model_config = config.get_outer_model_config()



    #定义裁剪梯度
def clip_gradients(gradients, gradient_threshold, gradient_norm_threshold):
    if gradient_threshold > 0:
        gradients = [
            ops.clip_by_value(g, -gradient_threshold, gradient_threshold)
            for g in gradients
        ]
    if gradient_norm_threshold > 0:
        gradients = [
            ops.clip_by_global_norm(g, gradient_norm_threshold)
            for g in gradients
        ]
    return gradients

class TrainOneStepCell(nn.Cell):
    def __init__(self, TrainNet, sens=1.0):
        super(TrainOneStepCell, self).__init__(auto_prefix=False)
        self.network = TrainNet
        self.trainable_params = self.network.trainable_params()
        self.weights = ParameterTuple(self.network.trainable_params())

        #取更新后的学习率
        self.inner_lr = ParameterTuple(list(filter(lambda x: "inner_lr" in x.name, self.trainable_params)))
        self.finetuning_lr = ParameterTuple(list(filter(lambda x: "finetuning_lr" in x.name, self.trainable_params)))

        #分别定义优化器
        self.inner_lr_opt = nn.Adam(self.inner_lr, learning_rate=1e-4 ,weight_decay=0)
        self.finetuning_lr_opt = nn.Adam(self.finetuning_lr, learning_rate=1e-4)
        self.inner_lr_opt.learning_rate.name = "inner_lr"
        self.inner_lr_opt.beta1_power.name = "inner_lr_beta1_power"
        self.inner_lr_opt.beta2_power.name = "inner_lr_beta2_power"
        

        self.grad = ops.GradOperation(get_all=True, get_by_list=True, sens_param=False)

        self.sens = sens

    def construct(self, train_inputs, train_labels, val_inputs, val_labels):
        loss, _ = self.network(train_inputs, train_labels, val_inputs, val_labels)

        #sens = ops.Fill()(ops.DType()(loss), ops.Shape()(loss), self.sens)

        #求梯度
        gradient_function = self.grad(self.network, self.weights)
        grads = gradient_function(train_inputs, train_labels, val_inputs, val_labels)
        
        #梯度裁剪
        grads = clip_gradients(grads, outer_model_config['gradient_threshold'], outer_model_config['gradient_norm_threshold'])
    
        #更新学习率
        self.inner_lr_opt(grads)
        self.finetuning_lr_opt(grads)

        loss = 1
        return  loss, self.inner_lr_opt(grads),  self.finetuning_lr_opt(grads)
    
     

if __name__ == '__main__':



    inner_lr_init=inner_model_config['inner_lr_init']
    finetuning_lr_init=inner_model_config['finetuning_lr_init']

    # 获取训练过程数据
    data_utils = data.Data_Utils(
                train=True, seed=100, way=outer_model_config['num_classes'], shot=outer_model_config['num_tr_examples_per_class'],
                data_path=data_config['data_path'], dataset_name=data_config['dataset_name'], embedding_crop=data_config['embedding_crop'],
                batch_size=outer_model_config['metatrain_batch_size'], val_batch_size=outer_model_config['metavalid_batch_size'], test_batch_size=outer_model_config['metatest_batch_size'],
                meta_val_steps=outer_model_config['num_val_examples_per_class'], embedding_size=640, verbose=True
            )

    for i in range(outer_model_config['total_steps']):
        TrainNet = outerloop.OuterLoop(
                input_size=640, latent_size=inner_model_config['num_latents'], way=outer_model_config['num_classes'], shot=outer_model_config['num_tr_examples_per_class'],
                dropout=inner_model_config['dropout_rate'], kl_weight=inner_model_config['kl_weight'], encoder_penalty_weight=inner_model_config['encoder_penalty_weight'],
                inner_lr_init=inner_lr_init, finetuning_lr_init=finetuning_lr_init, inner_step=inner_model_config['inner_unroll_length'],
                finetune_inner_step=inner_model_config['finetuning_unroll_length'], is_meta_training=True
            )

        # 获取train的task的batch，batch中包含train，val
        train_batch = data_utils.get_batch('train')
        
        train_net=TrainOneStepCell(TrainNet)

        total_loss, inner_lr_init, finetuning_lr_init = train_net(train_batch['train']['input'], train_batch['train']['target'], train_batch['val']['input'], train_batch['val']['target'])
       
        print(str(i) + " total_loss:" + str(total_loss) + ", inner_lr_init:" + str(inner_lr_init)+ ", finetuning_lr_init:" + str(finetuning_lr_init))




