import numpy as np
import os
import torch
import torch.nn as nn
from timm.optim import create_optimizer_v2, optimizer_kwargs
from timm.scheduler import create_scheduler
from tqdm import tqdm
from tensorboardX import SummaryWriter

from datasets import get_loader
from model import get_model, get_backbone
from utils import mk_path, _get_logger, loadyaml,_topk_retrieval
from train import finetune

def main():
    path=r"config/mocov2_cifar10.yaml"
    root = os.path.dirname(os.path.realpath(__file__))  # 获取绝对路径
    args = loadyaml(os.path.join(root, path))  # 加载yaml

    args.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
    args.save_path = os.path.join(root, args.save_path)
    mk_path(args.save_path, False) # 创建路径
    mk_path(os.path.join(args.save_path, "tensorboardX")) # 创建 日志路径
    mk_path(os.path.join(args.save_path, "model")) # 创建 模型保存路径
    args.pretrain_save_path = os.path.join(args.save_path,"model", "pretrain_model.pth")# 设置预训练权重
    args.finetune_save_path = os.path.join(args.save_path,"model", "finetune_model.pth")# 设置微调练权重
    args.writer = SummaryWriter(os.path.join(args.save_path, "tensorboardX"))
    args.logger = _get_logger(os.path.join(args.save_path, "log.log"), "info")

    
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)
    torch.backends.cudnn.deterministic = False  # 单卡的不需要分布式
    torch.backends.cudnn.benchmark = True  # 寻找最佳 的训练路径
    
    # step 1:构建数据集
    pretrain_dataloader = get_loader(args,"mocov2")
    train_loader, test_dataloader = get_loader(args=args,method="supervised")
    memory_loader = get_loader(args,"memory")

    args.logger.info("==========> train_loader length:{}".format(len(train_loader) * args.batch_size))
    args.logger.info("==========> test_dataloader length:{}".format(len(test_dataloader) * args.batch_size))
    args.logger.info("==========> pretrain_dataloader length:{}".format(len(pretrain_dataloader) * args.batch_size))
    args.logger.info("==========> memory_loader length:{}".format(len(memory_loader) * args.batch_size))

    #  step 2: 构建模型
    model = get_model(args)
    model.to(device=args.device)

    # step 3: 进行自监督的训练
    mocov2_pretrain(model=model, pretrain_dataloader=pretrain_dataloader, memory_loader=memory_loader,
             test_loader=test_dataloader, 
             args=args, pretrain_cfg=args.pretrain_cfg)

    # step 4: 加载预训练权重
    finetune_model = get_backbone(args.backbone,num_classes=args.num_classes)
    checkpoint = torch.load(args.pretrain_save_path, map_location="cpu")
    state_dict = checkpoint["model"].encoder_q.state_dict()
    msg = finetune_model.load_state_dict(state_dict, strict=False)
    assert set(msg.missing_keys) == {"fc.weight", "fc.bias"}
    args.logger.info(msg)
    args.logger.info("pretrain_model lodding success ")
    # 冻结backbone,只留最后一层全连接层
    for name, param in finetune_model.named_parameters():
        if name not in ['fc.weight', 'fc.bias']:
            param.requires_grad = False
    # init the fc layer 初始化最后一层的全连接层
    finetune_model.fc.weight.data.normal_(mean=0.0, std=0.01)
    finetune_model.fc.bias.data.zero_()
    finetune_model.to(args.device)

    
    # step 5: 进行有监督的微调 lineer_eval
    finetune(model=finetune_model, finetune_dataloader=train_loader,
             test_dataloader=test_dataloader,
             finetune_cfg=args.finetune_cfg,
             args=args)


def mocov2_pretrain(model, pretrain_dataloader, memory_loader, test_loader, args, pretrain_cfg):
    args.logger.info("=================================> pretrain start ")
    # 定义学习率和优化器
    optimizer = create_optimizer_v2(model, **optimizer_kwargs(cfg=pretrain_cfg))
    lr_scheduler, num_epochs = create_scheduler(pretrain_cfg, optimizer)
    criterion = nn.CrossEntropyLoss().to(args.device)
    for epoch in range(num_epochs):
        run_loss = 0
        model.train()
        for i, (image, label) in enumerate(tqdm(pretrain_dataloader)):
            x_i, x_j = image
            x_i = x_i.to(args.device)
            x_j = x_j.to(args.device)
            logit, label = model(x_i, x_j)
            loss = criterion(logit, label)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            lr_scheduler.step(epoch)
            run_loss += loss.item()
            
        torch.save({
            "model": model,
            "epoch": epoch,
            "optimizer": optimizer,
        }, args.pretrain_save_path)

        lr = optimizer.param_groups[0]['lr']
        epoch_pretrain_loss = run_loss / len(pretrain_dataloader)

        # 记录训练数据
        args.logger.info("epoch: {} pretrain/lr:{:.5f}".format(epoch, lr))
        args.logger.info("epoch: {} pretrain/loss:{:.5f}".format(epoch, epoch_pretrain_loss))
        args.writer.add_scalar("pretrain/lr", lr, epoch)  # 记录学习率
        args.writer.add_scalar("pretrain/loss", epoch_pretrain_loss, epoch)  # 记录每个epoch的损失

        if args.knn_monitor and epoch % args.knn_interval == 0:
            accuracy = _topk_retrieval(model=model.encoder_q, train_dataloader=memory_loader,
                                       val_dataloader=test_loader,
                                       K=args.knn_k, feat_dim=args.out_dim,
                                       device=args.device)
            model.encoder_q.train()
            args.logger.info("epoch: {} pretrain/accuracy:{:.5f}".format(epoch, accuracy))
            args.writer.add_scalar("pretrain/accuracy", accuracy, epoch)  # 记录学习率

if __name__ == "__main__":
    main()