import itertools
import torch
import torch.nn as nn
from timm.optim import create_optimizer_v2, optimizer_kwargs
from timm.scheduler import create_scheduler
from tqdm import tqdm
from einops import repeat, rearrange
from einops.layers.torch import Rearrange
from torch.utils.data import DataLoader
from easydict import EasyDict 

from utils import _topk_retrieval,SimSiamLoss,mae_topk_retrieval,SimCLRLoss






def simclr_pretrain(model, pretrain_dataloader, memory_loader, test_loader, args, pretrain_cfg):
    args.logger.info("=================================> pretrain start ")
    # 定义学习率和优化器
    optimizer = create_optimizer_v2(model, **optimizer_kwargs(cfg=pretrain_cfg))
    lr_scheduler, num_epochs = create_scheduler(pretrain_cfg, optimizer)

    criterion= SimCLRLoss(batch_size=args.batch_size,device=args.device,temperature=args.temperature).to(args.device)

    for epoch in range(num_epochs):
        run_loss = 0
        model.train()
        for i, (image, label) in enumerate(tqdm(pretrain_dataloader)):
            x_i, x_j = image
            x_i = x_i.to(args.device)
            x_j = x_j.to(args.device)
            z_i, z_j = model(x_i, x_j)
            loss = criterion(z_i, z_j)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            lr_scheduler.step(epoch)
            run_loss += loss.item()
            
        torch.save({
            "model": model,
            "epoch": epoch,
            "optimizer": optimizer,
        }, args.pretrain_save_path)

        lr = optimizer.param_groups[0]['lr']
        epoch_pretrain_loss = run_loss / len(pretrain_dataloader)

        # 记录训练数据
        args.logger.info("epoch: {} pretrain/lr:{:.5f}".format(epoch, lr))
        args.logger.info("epoch: {} pretrain/loss:{:.5f}".format(epoch, epoch_pretrain_loss))
        args.writer.add_scalar("pretrain/lr", lr, epoch)  # 记录学习率
        args.writer.add_scalar("pretrain/loss", epoch_pretrain_loss, epoch)  # 记录每个epoch的损失

        if args.knn_monitor and epoch % args.knn_interval == 0:
            accuracy = _topk_retrieval(model=model.encoder, train_dataloader=memory_loader,
                                       val_dataloader=test_loader,
                                       K=args.knn_k, feat_dim=args.out_dim,
                                       device=args.device)
            model.encoder.train()
            args.logger.info("epoch: {} pretrain/accuracy:{:.5f}".format(epoch, accuracy))
            args.writer.add_scalar("pretrain/accuracy", accuracy, epoch)  # 记录学习率


def simsiam_pretrain(model, pretrain_dataloader, memory_loader, test_loader, args, pretrain_cfg):
    args.logger.info("=================================> pretrain start ")
    # 定义学习率和优化器
    optimizer = create_optimizer_v2(model, **optimizer_kwargs(cfg=pretrain_cfg))
    lr_scheduler, num_epochs = create_scheduler(pretrain_cfg, optimizer)
    criterion = SimSiamLoss().to(args.device)

    for epoch in range(num_epochs):
        run_loss = 0
        model.train()
        for image, label in tqdm(pretrain_dataloader):
            x_i, x_j = image
            optimizer.zero_grad()
            x_i = x_i.to(args.device)
            x_j = x_j.to(args.device)
            z1, z2, p1, p2 = model(x_i, x_j)
            loss=criterion(z1, z2, p1, p2)
            loss.backward()
            optimizer.step()
            lr_scheduler.step(epoch)
            run_loss += loss.item()

        torch.save({
            "model": model,
            "optimizer": optimizer,
            "epoch": epoch,
            "lr_scheduler":lr_scheduler
        }, args.pretrain_save_path)

        lr = optimizer.param_groups[0]['lr']
        epoch_pretrain_loss = run_loss / len(pretrain_dataloader)

        # 记录训练数据
        args.logger.info("epoch: {} pretrain/lr:{:.5f}".format(epoch, lr))
        args.logger.info("epoch: {} pretrain/loss:{:.5f}".format(epoch, epoch_pretrain_loss))
        args.writer.add_scalar("pretrain/lr", lr, epoch)  # 记录学习率
        args.writer.add_scalar("pretrain/loss", epoch_pretrain_loss, epoch)  # 记录每个epoch的损失

        if args.knn_monitor and epoch % args.knn_interval == 0:
            accuracy = _topk_retrieval(model=model.encoder, train_dataloader=memory_loader, val_dataloader=test_loader,
                                       K=args.knn_k, feat_dim=args.feat_dim,
                                       device=args.device)
            model.encoder.train()
            args.logger.info("epoch: {} pretrain/accuracy:{:.5f}".format(epoch, accuracy))
            args.writer.add_scalar("pretrain/accuracy", accuracy, epoch)  # 记录学习率


def mae_pretrain(model:nn.Module, pretrain_dataloader:DataLoader, memory_loader:DataLoader, test_loader:DataLoader, args, pretrain_cfg):
    args.logger.info("=================================> pretrain start ")
    # 定义学习率和优化器
    optimizer = create_optimizer_v2(model, **optimizer_kwargs(cfg=pretrain_cfg))
    lr_scheduler, num_epochs = create_scheduler(pretrain_cfg, optimizer)

    for epoch in range(num_epochs):
        run_loss = 0
        model.train()
        for img, _ in tqdm(pretrain_dataloader):
            img = img.to(args.device)
            predicted_img, mask = model(img)
            loss = torch.mean((predicted_img - img) ** 2 * mask) / args.mask_ratio
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            lr_scheduler.step(epoch)
            run_loss += loss.item()

        torch.save({
            "model": model,
            "optimizer": optimizer,
            "epoch": epoch,
            "lr_scheduler":lr_scheduler
        }, args.pretrain_save_path)

        lr = optimizer.param_groups[0]['lr']
        epoch_pretrain_loss = run_loss / len(pretrain_dataloader)

        # 记录训练数据
        args.logger.info("epoch: {} pretrain/lr:{:.5f}".format(epoch, lr))
        args.logger.info("epoch: {} pretrain/loss:{:.5f}".format(epoch, epoch_pretrain_loss))
        args.writer.add_scalar("pretrain/lr", lr, epoch)  # 记录学习率
        args.writer.add_scalar("pretrain/loss", epoch_pretrain_loss, epoch)  # 记录每个epoch的损失

        #  绘制中间图片
        model.eval()
        with torch.no_grad():
            val_img = torch.stack([test_loader.dataset[i][0] for i in range(16)])
            val_img = val_img.to(args.device)
            predicted_val_img, mask = model(val_img)
            predicted_val_img = predicted_val_img * mask + val_img * (1 - mask)
            img = torch.cat([val_img * (1 - mask), predicted_val_img, val_img], dim=0)
            img = rearrange(img, '(v h1 w1) c h w -> c (h1 h) (w1 v w)', w1=2, v=3)
            args.writer.add_image('mae_image', (img + 1) / 2, global_step=epoch)

        if args.knn_monitor and epoch % args.knn_interval == 0:
            # accuracy = knn_monitor(net=model.encoder, memory_data_loader=memory_loader, test_data_loader=test_loader,
            #                        k=min(args.knn_k, len(memory_loader.dataset)),
            #                        hide_progress=args.hide_progress)
            accuracy = mae_topk_retrieval(model=model.encoder, train_dataloader=memory_loader, val_dataloader=test_loader,
                                       K=args.knn_k, feat_dim=args.feat_dim,
                                       device=args.device)
            model.encoder.train()
            args.logger.info("epoch: {} pretrain/accuracy:{:.5f}".format(epoch, accuracy))
            args.writer.add_scalar("pretrain/accuracy", accuracy, epoch)  # 记录学习率


def densecl_pretrain(model, pretrain_dataloader, memory_loader, test_loader, args, pretrain_cfg):
    args.logger.info("=================================> pretrain start ")
    # 定义学习率和优化器
    optimizer = create_optimizer_v2(model, **optimizer_kwargs(cfg=pretrain_cfg))
    lr_scheduler, num_epochs = create_scheduler(pretrain_cfg, optimizer)

    criterion = nn.CrossEntropyLoss().to(args.device)
    for epoch in range(num_epochs):
        run_loss = 0
        model.train()
        for i, (image, label) in enumerate(tqdm(pretrain_dataloader)):
            x_i, x_j = image
            x_i = x_i.to(args.device)
            x_j = x_j.to(args.device)
            data_dict = model.forward(x_i, x_j)
            loss = data_dict['loss'].mean()

            optimizer.zero_grad() 
            loss.backward()
            optimizer.step()
            lr_scheduler.step(epoch)
            run_loss += loss.item()
            
        torch.save({
            "model": model,
            "epoch": epoch,
            "optimizer": optimizer,
            "lr_scheduler":lr_scheduler
        }, args.pretrain_save_path)

        lr = optimizer.param_groups[0]['lr']
        epoch_pretrain_loss = run_loss / len(pretrain_dataloader)

        # 记录训练数据
        args.logger.info("epoch: {} pretrain/lr:{:.5f}".format(epoch, lr))
        args.logger.info("epoch: {} pretrain/loss:{:.5f}".format(epoch, epoch_pretrain_loss))
        args.writer.add_scalar("pretrain/lr", lr, epoch)  # 记录学习率
        args.writer.add_scalar("pretrain/loss", epoch_pretrain_loss, epoch)  # 记录每个epoch的损失

        if args.knn_monitor and epoch % args.knn_interval == 0:
            # accuracy = knn_monitor(net=model.encoder, memory_data_loader=memory_loader, test_data_loader=test_loader,
            #                        k=min(args.knn_k, len(memory_loader.dataset)),
            #                        hide_progress=args.hide_progress)
            accuracy = _topk_retrieval(model=model.backbone_q, train_dataloader=memory_loader,
                                       val_dataloader=test_loader,
                                       K=args.knn_k, feat_dim=args.out_dim,
                                       device=args.device)
            model.backbone_q.train()
            args.logger.info("epoch: {} pretrain/accuracy:{:.5f}".format(epoch, accuracy))
            args.writer.add_scalar("pretrain/accuracy", accuracy, epoch)  # 记录学习率

def supervised(model, supervised_dataloader, test_dataloader, args, supervised_cfg):
    # 定义学习率和优化器

    optimizer = create_optimizer_v2(model,
                                    **optimizer_kwargs(cfg=supervised_cfg))
    lr_scheduler, num_epochs = create_scheduler(supervised_cfg, optimizer)
    criterion = nn.CrossEntropyLoss().to(args.device)
    best_acc = 0
    # 开始 训练
    # backbone.

    for epoch in range(num_epochs):
        run_loss = 0
        model.train()
        for i, (image, label) in enumerate(supervised_dataloader):
            optimizer.zero_grad()
            image = image.to(args.device)
            label = label.to(args.device)
            label_pred = model(image)
            loss = criterion(label_pred, label)
            loss.backward()
            optimizer.step()
            lr_scheduler.step(epoch)
            run_loss += loss.item()

        lr = optimizer.param_groups[0]['lr']
        epoch_pretrain_loss = run_loss / len(supervised_dataloader)

        # 进行评估
        acc, acc_top5 = evaluate(model=model, dataloader=test_dataloader, args=args)

        # 如果学习率最佳，则进行保存
        if acc > best_acc:
            best_acc = acc
            torch.save({
                "model": model,
                "epoch": epoch,
                "optimizer": optimizer,
                "acc": acc,
            }, args.supervised_save_path)
            # save_model(model=backbone, optimizer=optimizer, epoch=epoch, lr_scheduler=lr_scheduler, acc=acc,
            #            path=args.model_save_path)

        # 记录训练数据
        args.logger.info("epoch: {} supervised/lr:{:.5f}".format(epoch, lr))
        args.logger.info("epoch: {} supervised/loss:{:.5f}".format(epoch, epoch_pretrain_loss))
        args.logger.info("epoch: {} supervised/accuracy:{:.5f}".format(epoch, acc))
        args.logger.info("epoch: {} supervised/accuracy_top5:{:.5f}".format(epoch, acc_top5))
        args.writer.add_scalar("supervised/lr", lr, epoch)  # 记录学习率
        args.writer.add_scalar("supervised/loss", epoch_pretrain_loss, epoch)  # 记录每个epoch的损失
        args.writer.add_scalar("supervised/accuracy", acc, epoch)  # 添加有监督准确率
        args.writer.add_scalar("supervised/accuracy_top5", acc_top5, epoch)  # 添加有监督准确率


def linear_eval(model, classifier,finetune_dataloader, test_dataloader, args, finetune_cfg):
    args.logger.info("=================================> finetune start ")
    optimizer = create_optimizer_v2(
        classifier, **optimizer_kwargs(cfg=finetune_cfg))
    lr_scheduler, num_epochs = create_scheduler(finetune_cfg, optimizer)
    criterion = nn.CrossEntropyLoss().to(args.device)
    best_acc = 0
    avgpool = nn.AdaptiveAvgPool2d((1, 1))
    # 开始 训练
    for epoch in range(num_epochs):
        run_loss = 0
        model.eval()
        classifier.train()
        for i, (image, label) in enumerate(tqdm(finetune_dataloader)):
            optimizer.zero_grad()
            image = image.to(args.device)
            label = label.to(args.device)
            with torch.no_grad():
               feature = model(image)

            feature = avgpool(feature)
            feature = feature.reshape(feature.size(0), -1)
            label_pred = classifier(feature)
            loss = criterion(label_pred, label)
            loss.backward()
            optimizer.step()
            lr_scheduler.step(epoch)
            run_loss += loss.item()

        lr = optimizer.param_groups[0]['lr']
        epoch_pretrain_loss = run_loss / len(finetune_dataloader)

        # 进行评估
        classifier.eval()
        acc=0.0
        acc_top5=0.0
        # acc5_meter = AverageMeter(name='Acc@5')
        for idx,  (image, label) in enumerate(test_dataloader):
            label = label.to(args.device)
            with torch.no_grad():
                feature = model(image.to(args.device))
                # for feature map output
                feature = avgpool(feature)
                feature = feature.reshape(feature.size(0), -1)

                preds = classifier(feature)
                acc += (preds.argmax(dim=1) == label).sum().item()
                _, output_topk = preds.topk(5, 1, True, True)
                acc_top5 += (output_topk == label.view(-1,
                                                   1).expand_as(output_topk)).sum().float().item()
        acc = acc / len(test_dataloader.dataset)
        acc_top5 = acc_top5 / len(test_dataloader.dataset)

        # 如果学习率最佳，则进行保存
        if acc > best_acc:
            best_acc = acc
            torch.save({
                "model": model,
                "optimizer": optimizer,
                "epoch": epoch,
                "lr_scheduler":lr_scheduler,
                "acc": acc
            }, args.finetune_save_path)

        # 记录训练数据
        args.logger.info("epoch: {} finetune/lr:{:.5f}".format(epoch, lr))
        args.logger.info("epoch: {} finetune/loss:{:.5f}".format(epoch, epoch_pretrain_loss))
        args.logger.info("epoch: {} finetune/accuracy:{:.5f}".format(epoch, acc))
        args.logger.info("epoch: {} finetune/accuracy_top5:{:.5f}".format(epoch, acc_top5))

        args.writer.add_scalar("finetune/lr", lr, epoch)  # 记录学习率
        args.writer.add_scalar("finetune/loss", epoch_pretrain_loss, epoch)  # 记录每个epoch的损失
        args.writer.add_scalar("finetune/accuracy", acc, epoch)  # 添加有监督准确率
        args.writer.add_scalar("finetune/accuracy_top5", acc_top5, epoch)  # 添加有监督准确率


def finetune(model, finetune_dataloader, test_dataloader, args, finetune_cfg):
    """
    微调,只使用backbone 抽特征,训练分类头
    """
    args.logger.info("=================================> finetune start ")
    optimizer = create_optimizer_v2(
        model, **optimizer_kwargs(cfg=finetune_cfg))
    lr_scheduler, num_epochs = create_scheduler(finetune_cfg, optimizer)
    criterion = nn.CrossEntropyLoss().to(args.device)
    best_acc = 0
    # 开始 训练
    for epoch in range(num_epochs):
        run_loss = 0
        model.train()
        for i, (image, label) in enumerate(tqdm(finetune_dataloader)):
            optimizer.zero_grad()
            image = image.to(args.device)
            label = label.to(args.device)
            label_pred = model(image)
            loss = criterion(label_pred, label)
            loss.backward()
            optimizer.step()
            lr_scheduler.step(epoch)
            run_loss += loss.item()

        lr = optimizer.param_groups[0]['lr']
        epoch_pretrain_loss = run_loss / len(finetune_dataloader)

        # 进行评估
        acc, acc_top5 = evaluate(model=model, dataloader=test_dataloader, args=args)

        # 如果学习率最佳，则进行保存
        if acc > best_acc:
            best_acc = acc

            torch.save({
                "model": model,
                "optimizer": optimizer,
                "epoch": epoch,
                "lr_scheduler":lr_scheduler,
                "acc": acc
            }, args.finetune_save_path)
            # save_model(model=classifer, optimizer=optimizer, epoch=epoch, lr_scheduler=lr_scheduler, acc=acc,
            #            path=args.model_save_path)

        # 记录训练数据
        args.logger.info("epoch: {} finetune/lr:{:.5f}".format(epoch, lr))
        args.logger.info("epoch: {} finetune/loss:{:.5f}".format(epoch, epoch_pretrain_loss))
        args.logger.info("epoch: {} finetune/accuracy:{:.5f}".format(epoch, acc))
        args.logger.info("epoch: {} finetune/accuracy_top5:{:.5f}".format(epoch, acc_top5))

        args.writer.add_scalar("finetune/lr", lr, epoch)  # 记录学习率
        args.writer.add_scalar("finetune/loss", epoch_pretrain_loss, epoch)  # 记录每个epoch的损失
        args.writer.add_scalar("finetune/accuracy", acc, epoch)  # 添加有监督准确率
        args.writer.add_scalar("finetune/accuracy_top5", acc_top5, epoch)  # 添加有监督准确率


def evaluate(model, dataloader, args):
    """验证准确率

    Args:
        model (_type_): _description_
        dataloader (_type_): _description_
        args (_type_): _description_

    Returns:
        _type_: _description_
    """
    model.eval()
    acc = 0.0  # 定义准确率
    acc_top5 = 0.0  # 定义top5准确率
    with torch.no_grad():
        for image, label in dataloader:
            image = image.to(args.device)
            label = label.to(args.device)
            label_pred = model(image)

            # 计算top1 准确率
            acc += (label_pred.argmax(dim=1) == label).sum().item()
            #  计算top5 准确率
            _, output_topk = label_pred.topk(5, 1, True, True)
            acc_top5 += (output_topk == label.view(-1,
                                                   1).expand_as(output_topk)).sum().float().item()

        acc = acc / len(dataloader.dataset)
        acc_top5 = acc_top5 / len(dataloader.dataset)
    return acc, acc_top5
