############
## Import ##
############
import argparse
import os
import torch
import torch.nn as nn
import torch.optim as optim
from lion_pytorch import Lion
from torch.utils.data import DataLoader
from model.model import encoder
from dataset.datasets import load_dataset
import matplotlib.pyplot as plt
import numpy as np
import torch.nn.functional as F
import torchvision.transforms.functional as FF
from tqdm import tqdm
from torchvision.datasets import CIFAR10
from loss import TotalCodingRate
from func import chunk_avg
from lars import LARS, LARSWrapper
from func import WeightedKNNClassifier
import torch.optim.lr_scheduler as lr_scheduler
from torch.cuda.amp import GradScaler, autocast

######################
## Parsing Argument ##
######################
import argparse
parser = argparse.ArgumentParser(description='Unsupervised Learning')

parser.add_argument('--patch_sim', type=int, default=200,
                    help='coefficient of cosine similarity (default: 200)')
parser.add_argument('--tcr', type=int, default=1,
                    help='coefficient of tcr (default: 1)')
parser.add_argument('--num_patches', type=int, default=100,
                    help='number of patches used in EMP-SSL (default: 100)')
parser.add_argument('--arch', type=str, default="resnet18-cifar",
                    help='network architecture (default: resnet18-cifar)')
parser.add_argument('--bs', type=int, default=8,
                    help='batch size (default: 100)')
parser.add_argument('--lr', type=float, default=0.03,
                    help='learning rate (default: 0.3)')        
parser.add_argument('--eps', type=float, default=0.2,
                    help='eps for TCR (default: 0.2)') 
parser.add_argument('--msg', type=str, default="NONE",
                    help='additional message for description (default: NONE)')     
parser.add_argument('--dir', type=str, default="EMP-SSL-Training-2",
                    help='directory name (default: EMP-SSL-Training-2)')
parser.add_argument('--data', type=str, default="cifar10",
                    help='data (default: cifar10)')          
parser.add_argument('--epoch', type=int, default=30,
                    help='max number of epochs to finish (default: 30)')  

args = parser.parse_args()

print(args)

num_patches = args.num_patches
dir_name = f"./logs/{args.dir}/{args.data}/{args.arch}/patchsim{args.patch_sim}_numpatch{args.num_patches}_bs{args.bs}_lr{args.lr}_{args.msg}"

capsule_network_family_list = ['efficient-capsule-orgin' ,'efficient-capsule' ,'efficient-res-capsule',
                               'res-capsule', 'efficient-resnet18-capsule','simple_efficient_res_capsule',
                               'resizer_efficient-resnet18-capsule']
crate_network_family_list = ['crate_small','crate_base','crate_large', 'crate_tiny', 'crate_tiny_small']
#####################
## Helper Function ##
#####################

def chunk_avg(x,n_chunks=2,normalize=False):
    x_list = x.chunk(n_chunks,dim=0)
    x = torch.stack(x_list,dim=0)
    if not normalize:
        return x.mean(0)
    else:
        return F.normalize(x.mean(0),dim=1)


class Similarity_Loss(nn.Module):
    def __init__(self, ):
        super().__init__()
        pass

    def forward(self, z_list, z_avg):
        z_sim = 0
        num_patch = len(z_list)
        z_list = torch.stack(list(z_list), dim=0)
        z_avg = z_list.mean(dim=0)
        
        z_sim = 0
        for i in range(num_patch):
            z_sim += F.cosine_similarity(z_list[i], z_avg, dim=1).mean()
            
        z_sim = z_sim/num_patch
        z_sim_out = z_sim.clone().detach()
                
        return -z_sim, z_sim_out
    
def cal_TCR(z, criterion, num_patches):
    z_list = z.chunk(num_patches,dim=0)
    loss = 0
    for i in range(num_patches):
        loss += criterion(z_list[i])
    loss = loss/num_patches
    return loss

######################
## Prepare Training ##
######################
torch.multiprocessing.set_sharing_strategy('file_system')

if args.data == "imagenet100" or args.data == "imagenet":
    train_dataset = load_dataset("imagenet", train=True, num_patch = num_patches)
    dataloader = DataLoader(train_dataset, batch_size=args.bs, shuffle=True, drop_last=True,num_workers=4)

else:
    train_dataset = load_dataset(args.data, train=True, num_patch = num_patches)
    dataloader = DataLoader(train_dataset, batch_size=args.bs, shuffle=True, drop_last=True,num_workers=4)


use_cuda = True
if use_cuda == True:
    if torch.cuda.is_available():
        device = "cuda"
        use_cuda = True
    else:
        device = "cpu"
        use_cuda = False
else:
    device = "cpu"
    use_cuda = False
# device = "cpu"
# use_cuda = False
# print('device',device)
# import torch.backends.cudnn as cudnn
#
# cudnn.deterministic = True
# cudnn.benchmark = True
# torch.backends.cudnn.enabled = True

os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"


models_list = ['vgg16', 'vgg16_bn', 'resnet18', 'resnet-34', 'resnet-50',
               'resnet101', 'resnet152','mobilenet_v3',
               'efficient-resnet18-capsule', 'resizer_efficient-resnet18-capsule',
               'crate_small' ,'crate_base' ,'crate_large', 'crate_tiny', 'crate_tiny_small',
               'vit-b-50', 'vit-b-125', 'vit-l-50', 'vit-l-125','glom',
               'faster_vit_4_21k_224']

if args.arch == 'efficient-res-capsule':
    net = encoder(user_device=device, arch=args.arch, hidden_dim=256)
elif args.arch == 'efficient-capsule-orgin':
    net = encoder(user_device=device, arch=args.arch, hidden_dim=16)
elif args.arch == 'conv-encoder':
    net = encoder(user_device=device, arch=args.arch, hidden_dim=256)
elif args.arch == 'efficient-capsule':
    net = encoder(user_device=device,arch=args.arch, hidden_dim=512)
elif args.arch == 'bilateralfsunet':
    net = encoder(user_device=device,arch=args.arch, hidden_dim=128)
elif args.arch == 'vgg16':
    net = encoder(user_device=device,arch=args.arch, hidden_dim=4096)
elif args.arch == 'vgg16_bn':
    net = encoder(user_device=device,arch=args.arch, hidden_dim=4096)
elif args.arch == 'resnet18':
    net = encoder(user_device=device,arch=args.arch, hidden_dim=512)
elif args.arch == 'resnet18-imagenet':
    net = encoder(user_device=device,arch=args.arch, hidden_dim=512)
elif args.arch == 'resnet34':
    net = encoder(user_device=device,arch=args.arch, hidden_dim=512)
elif args.arch == 'resnet50':
    net = encoder(user_device=device,arch=args.arch, hidden_dim=2048)
elif args.arch == 'resnet101':
    net = encoder(user_device=device,arch=args.arch, hidden_dim=2048)
elif args.arch == 'resnet152':
    net = encoder(user_device=device,arch=args.arch, hidden_dim=2048)
elif args.arch == 'crate_base':
    net = encoder(user_device=device,arch=args.arch, hidden_dim=768)
elif args.arch == 'mobilenet_v3':
    net = encoder(user_device=device,arch=args.arch, hidden_dim=1024)
elif args.arch == 'efficient-resnet18-capsule':
    net = encoder(user_device=device, arch=args.arch, hidden_dim=128)
elif args.arch == 'simple_efficient_res_capsule':
    net = encoder(user_device=device, arch=args.arch, hidden_dim=288)
elif args.arch == 'resizer_efficient-resnet18-capsule':
    net = encoder(user_device=device, arch=args.arch, hidden_dim=256)
elif args.arch in crate_network_family_list:
    net = encoder(user_device=device, arch=args.arch, hidden_dim=128)
elif args.arch == "glom":
    net = encoder(user_device=device, arch=args.arch, hidden_dim=1024)
else:
    net = encoder(user_device=device, arch = args.arch, hidden_dim=2048)
# net = nn.DataParallel(net, device_ids=[0])
# 多GPU使用 torch.nn.DataParallel(model, device_ids=device_ids).cuda()
# net.cuda()
net.to(device)
# if use_cuda == True:
#     net.cuda()
# else:
#     net.to(device)

# opt = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4,nesterov=True)
opt = Lion(net.parameters(), lr=args.lr, weight_decay=1e-4)
opt = LARSWrapper(opt,eta=0.005,clip=True,exclude_bias_n_norm=True)

scaler = GradScaler()
if args.data == "imagenet-100":
    num_converge = (150000//args.bs)*args.epoch
else:
    num_converge = (50000//args.bs)*args.epoch
    
scheduler = lr_scheduler.CosineAnnealingLR(opt, T_max=num_converge, eta_min=0,last_epoch=-1)

# Loss
contractive_loss = Similarity_Loss()
criterion = TotalCodingRate(eps=args.eps)


##############
## Training ##
##############
def main():
    for epoch in range(args.epoch):            
        for step, (data, label) in tqdm(enumerate(dataloader)):
            net.zero_grad()
            opt.zero_grad()
        
            data = torch.cat(data, dim=0) 
            # data = data.cuda()  # GPU设置
            data = data.to(device)
            # print("data.shape:",data.shape)
            # if args.arch == 'efficient-capsule':
            if args.arch in capsule_network_family_list:
                # 网络获得表征和概率
                z_proj_capsule, probs = net(data)

                z_proj = z_proj_capsule
                # print("type(z_proj):",type(z_proj))
                # print("z_proj.shape:", z_proj.shape)
            else:
                z_proj = net(data)
                # print("type(z_proj):", type(z_proj))
                # print("z_proj.shape:", z_proj.shape)
            z_list = z_proj.chunk(num_patches, dim=0)
            z_avg = chunk_avg(z_proj, num_patches)
            
            
            #Contractive Loss
            loss_contract, _ = contractive_loss(z_list, z_avg)
            loss_TCR = cal_TCR(z_proj, criterion, num_patches)
            
            loss = args.patch_sim*loss_contract + args.tcr*loss_TCR
          
            loss.backward()
            opt.step()
            scheduler.step()
            

        model_dir = dir_name+"/save_models/"
        if not os.path.exists(model_dir):
            os.makedirs(model_dir)
        torch.save(net.state_dict(), model_dir+str(epoch)+".pt")
        torch.save(net,model_dir+str(epoch)+"encoding_.pth")
        
    
        print("At epoch:", epoch, "loss similarity is", loss_contract.item(), ",loss TCR is:", (loss_TCR).item(), "and learning rate is:", opt.param_groups[0]['lr'])
       
                


# Press the green button in the gutter to run the script.
if __name__ == '__main__':
    main()

# See PyCharm help at https://www.jetbrains.com/help/pycharm/
