# -*- coding: utf-8 -*-
"""
Created on Mon Apr 18 16:14:24 2022

@author: S1mple

对encoder进行训练，选择是否使用迁移学习，并且用protonet进行encoder的在测试集上的性能评估

"""
#载入绘图包
from plotmap import plt_2_

#载入model、cls、pth字典
from classifier.__init__ import classifier_dict
from model import model_dict
from pth.__init__ import pth_dict

#加载自定义辅助函数
import utils

#其他包
from GetData import GetDataTasks
import argparse
import learn2learn as l2l
import torch
from torch.utils.data import DataLoader
from torchvision import transforms as transforms
import numpy as np
import torch.nn as nn
import os

def model_2_features(model,model_name):    
    if 'efficientnetb' in model_name:
        model = nn.Sequential(
            model.features,
            torch.nn.AdaptiveAvgPool2d(1)
            )
    elif "SWIN_VIT" in model_name:
        model.classifier = False
    elif 'vit' in model_name:
        model.is_classifier = False
    elif 'resnet' in model_name:
        model = model
    #很重要！关闭model中的dropout层
    model.eval()
    return model.to(device)

def SetModel(args):
        if 'vit' in args.encoder:
            model = model_dict[args.encoder](global_pool=True)
        elif 'resnet' in args.encoder or 'ResNet'  in args.encoder:
            model = model_dict[args.encoder](model_name=args.encoder,
                                             num_classes=0#输出最后一层的features（没有分类器的），返回是一个list
                                             )

        elif'convnext' in args.encoder:
            model = model_dict[args.encoder](model_name=args.encoder,
                                             num_classes=0,
                                            #输出最后一层的features（没有分类器的），返回是一个list
                                             )

        elif'efficientnetv2' in args.encoder:
            model = model_dict[args.encoder](model_name=args.encoder,
                                             num_classes=0,#num_classes为0表示不要分类器
                                            #输出最后一层的features（没有分类器的），返回是一个list
                                             )
         
        else:    
            model = model_dict[args.encoder]()
        if args.pretrained == True:
           model = LoadModel(args, model)
        return model.to(device)
    
def SetClassifier(args,features,image_size):
    
    if args.classifier == 'protonet':
        classifier=classifier_dict["protonet"](distance=args.distance) 
    elif args.classifier == 'SVC':
        classifier=classifier_dict["SVC"]() #pip install cvxpy以及pip install qpth
    elif args.classifier == 'FC':
        classifier=classifier_dict["FC"]()
    elif args.classifier == 'LR':
        features_size = (torch.flatten(features(torch.ones([1,3,image_size,image_size]).to(device)),1)).shape[1]
        classifier = classifier_dict["LR"](features_size,args,device=device)
    elif args.classifier == 'crosstransformer':
        img_shape = (features(torch.ones([1,3,image_size,image_size]).to(device))).shape
        classifier = classifier_dict["crosstransformer"](img_shape=img_shape,args=args)
    return classifier.to(device)   

def fast_adapt(task,features,args,classifier):
    
    data, labels = task   
    data = data.squeeze(0)
    labels = labels.squeeze(0)
    
    #分开support和query的data和label
    (support_data, support_labels), (query_data, query_labels) = l2l.data.partition_task(
        data=data,
        labels=labels,
        shots=args.test_shot,
    )
    #获得处理后的特征数据
    (support_data, support_labels), (query_data, query_labels) = getFeatures(features,
                                                                             args,
                                                                             support_data,
                                                                             support_labels,
                                                                             query_data,
                                                                             query_labels)
    #训练分类器
    classifier.train()  
    classifier.fit_(support_data, support_labels)#计算原型并且返回
    classifier.to(device)
    #测试分类器      
    classifier.eval()
    with torch.no_grad():
        preds = classifier(query_data.to(device))#计算查询集的标签（依据原型网络）
        acc = l2l.utils.accuracy(preds, query_labels)
        loss = torch.nn.functional.cross_entropy(preds, query_labels)
        return acc, loss 
    
def SetDataset(args):
    
    valid_tasks,image_size =GetDataTasks().get2(args)
    valid_loader = DataLoader(valid_tasks, pin_memory=False, shuffle=False, num_workers=args.num_workers)
    return valid_loader,image_size

def SetDevice(args):
    
    device = torch.device('cpu')
    if args.gpu and torch.cuda.device_count():
        print("Using gpu")
        torch.cuda.manual_seed(43)
        device = torch.device('cuda')
    return device

def LoadModel(args,model):
    if 'vit' in args.encoder:
        #以下方法等价
        # del model.head
        # utils.load_checkpoint(model, pth_dict[args.encoder],strict=False)
        # print("权重读取成功！")
        # print(model)
        checkpoint = torch.load(pth_dict[args.encoder], map_location='cpu')

        print("Load pre-trained checkpoint from: %s" % pth_dict[args.encoder])
        checkpoint_model = checkpoint['model']
        state_dict = model.state_dict()
        for k in ['head.weight', 'head.bias']:
            if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape:
                print(f"Removing key {k} from pretrained checkpoint")
                del checkpoint_model[k]
        # interpolate position embedding
        utils.interpolate_pos_embed(model, checkpoint_model)
        
        # load pre-trained model
        msg = model.load_state_dict(checkpoint_model, strict=False)
        print(msg)
    elif 'ResNet' in args.encoder or 'resnet' in args.encoder:
        utils.load_checkpoint(model,pth_dict[args.encoder],strict=False)
        print("ResNet权重读取成功！")
        
    elif ('convnext' in args.encoder)or ('efficientnetv2' in args.encoder):
        
        utils.load_checkpoint(model,pth_dict[args.encoder],strict=False)
        
    else:
        if os.path.exists(pth_dict[args.encoder]):
            weights_dict = torch.load(pth_dict[args.encoder], map_location=device)
            load_weights_dict = {k: v for k, v in weights_dict.items()
                              if model.state_dict()[k].numel() == v.numel()} 
            print(model.load_state_dict(load_weights_dict, strict=False))#非匹配的读取权重数据
            print("权重读取成功！")
        else: 
            print("权重读取失败！")
    
    return model

def getFeatures(model,args,support_data,support_labels,query_data,query_labels):
    #处理support   
    
    if(args.dataAugment==True):
        print("执行数据增强！")
        transforms_list = []
        if 'ColorJitter' in args.augmentLayer:
            transforms_list.append(transforms.ColorJitter(
                brightness=args.augmentBrightness,
                contrast=args.augmentContrast,
                hue=args.augmentHue,
                saturation=args.augmentSaturation ))
        if 'RandomHorizontalFlip' in args.augmentLayer:
            transforms_list.append(transforms.RandomHorizontalFlip(p=args.augmentFlip))
        if 'RandomErasing' in args.augmentLayer:
            transforms_list.append(transforms.RandomErasing(p=args.augmentErasing))
        if 'AutoAugment' in args.augmentLayer:
            transforms_list.append(transforms.AutoAugment())   
        #print("Transform:",transform)
        
        original_data = support_data
        original_labels = support_labels
        for i in range(len(transforms_list)):
            data = transforms_list[i](original_data)
            support_data = torch.cat([support_data,data],dim=0)#support_data一定要放在上面，因为LR权重初始化要使用support_data
            support_labels = torch.cat([original_labels,support_labels],dim=0)
            
        del data
        del original_data
        
    with torch.no_grad():#重要，否则会导致显存超出
        support_data = model(support_data.to(device))
    support_data = torch.flatten(support_data, 1)
    # data = (data - data.mean(dim=0, keepdim=True))#减去一个mean是特征更平滑，防止过拟合

    #处理query
    with torch.no_grad():
        query_data = model(query_data.to(device))
    query_data = torch.flatten(query_data, 1)
    
    print("支持集：",support_data.shape)
    print("查询集：",query_data.shape)
    plt_2_(support_data)
    
    return (support_data.to(device), support_labels.long().to(device)), (query_data.to(device), query_labels.long().to(device))
    
    
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    #-----------------------------------
    #小样本参数设置
    parser.add_argument('--max-epoch', type=int, default=1)
    parser.add_argument('--test-way', type=int, default=5)
    parser.add_argument('--test-shot', type=int, default=5)
    parser.add_argument('--test-query', type=int, default=1)
    
    #GPU
    parser.add_argument('--num_workers',type=int, default=0)
    parser.add_argument('--gpu', default=0)    
    
    #backbone和分类器
    parser.add_argument('--encoder', default='efficientnetb0')
    parser.add_argument('--pretrained', type=bool, default=True)
    parser.add_argument('--classifier',type=str ,default='protonet')#'LR' 'SVC' 'protonet'
    #protonet相关设置
    parser.add_argument('--distance',type=str, default='cosine')# for protonet :'euclidean', 'cosine'
    #LR的相关设置
    parser.add_argument('--LR_init',type=str ,default='L2')#for LR init weights:'L2','gassion'
    parser.add_argument('--LR_epoch',type=int ,default=1)
    parser.add_argument('--LR_drop',type=float ,default=0.0)
    parser.add_argument('--LR_layers',type=str,default='')
    parser.add_argument('--LR_lr',type=float,default=1e-3)
    #数据集
    parser.add_argument('--data_root',type=str, default='../../dataset/mini_imagenet/test')
    parser.add_argument('--val_res',type=int, default=224)
    parser.add_argument('--res_times',type=float,default=1)
    #数据增强
    parser.add_argument('--dataAugment',action='store_true')
    parser.add_argument('--augmentLayer',type=str, default='ColorJitter&&RandomHorizontalFlip')
    parser.add_argument('--augmentFlip',type=float, default=1)#水平翻转概率
    parser.add_argument('--augmentBrightness',type=tuple, default=(0.75,1.25))#亮度
    parser.add_argument('--augmentContrast',type=tuple, default=(0.75,1.25))#对比度
    parser.add_argument('--augmentSaturation',type=tuple, default=(0.75,1.25))#饱和度
    parser.add_argument('--augmentHue',type=tuple, default=(-0.25,0.25))#色相
    parser.add_argument('--augmentErasing',type=float, default=1)#色相
    args = parser.parse_args()
    print(args)
    
    
    #根据参数设置设备
    global device 
    device = SetDevice(args)
    #设置model，是否读取权重
    model = SetModel(args).to(device)
    #调整model，去掉其原有分类器
    features = model_2_features(model, args.encoder).to(device)   
    #读取imagenet的图片数据，并将其放入加载器, 返回 数据读取器 和 图像大小
    valid_loader,image_size = SetDataset(args)
    #设置classifier(随机往features一个传入一个数据，查看输出的长度)
    classifier = SetClassifier(args,features,image_size).to(device)
    
#开始测试
    valid_acc = 0.0
    valid_loss = 0.0
    num_list = []
    for step,data in enumerate(valid_loader):
        acc, loss = fast_adapt(
            task=data,
            args=args,
            features=features,
            classifier=classifier
        )
        valid_acc+=acc.item()
        valid_loss+=loss.item()
        num_list.append(acc.item())
        print('\r step:{} , acc:{:.3f} , loss:{:.4f} '.format(step+1,valid_acc/(step+1),valid_loss/(step+1)),end='')     
    print('\nstd:',np.std(num_list))

    
    
    