import os.path
import numpy as np
import torch
from copy import deepcopy
import wandb
from tqdm import tqdm
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from timm.optim import create_optimizer_v2, optimizer_kwargs
from timm.scheduler import create_scheduler

from model import build_model
from datasets import build_loader
from val import test
from utils import loadyaml, _get_logger, mk_path, update_ema_variables

def main():
    
    path = r"config/mean_teacher_wideresnet_200_cifar10.yaml"
    root = os.path.dirname(os.path.realpath(__file__))  # 获取绝对路径
    args = loadyaml(os.path.join(root, path))  # 加载yaml
    if args.cuda:
        args.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
    else:
        args.device = torch.device("cpu")

    root = os.path.dirname(os.path.realpath(__file__))  # 获取绝对路径
    args.save_path = os.path.join(root, args.save_path)
    mk_path(args.save_path)  # 创建文件保存位置
    mk_path(os.path.join(args.save_path, "tensorboardX"))  # 创建 tensorboardX日志保存位置
    mk_path(os.path.join(args.save_path, "model"))  # 创建模型保存位置

    args.model_save_path = os.path.join(args.save_path, "model", "model.pth") # 设置模型保存位置
    args.ema_model_save_path = os.path.join(args.save_path, "model", "ema_model_model.pth")

    args.logger = _get_logger(os.path.join(args.save_path, "log.log"), "info")
    args.tqdm = os.path.join(args.save_path, "tqdm.log")

    wandb.init(
        entity="jokerak777",  # wandb上对应的team名称（必填）,类似于用户名
        project="semi_cls",  # wandb上对应的team名称（必填）,
        name="6-26",  # 本次实验的名称（可选，如果不设置，wandb会自动生成本次实验名称）
        config=args,
        dir=os.path.join(args.save_path, "tensorboardX")
    )

    label_loader, unlabel_loader, test_loader = build_loader(args)  # 构建数据集
    args.epochs = args.total_itrs // args.step_size  # 设置模型epoch
    args.logger.info("==========> label_loader length:{}".format(len(label_loader.dataset)))
    args.logger.info("==========> unlabel_loader length:{}".format(len(unlabel_loader.dataset)))
    args.logger.info("==========> test_loader length:{}".format(len(test_loader.dataset)))
    args.logger.info("==========> epochs length:{}".format(args.epochs))

    # step 1: 构建模型
    model = build_model(args=args).to(device=args.device)  # 创建模型
    ema_model = deepcopy(model)  # 创建ema_model
    for name, param in ema_model.named_parameters():
        param.requires_grad = False

    
    torch.manual_seed(args.seed)  # 设置随机种子
    torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)

    torch.backends.cudnn.deterministic = False  # 单卡的不需要分布式
    torch.backends.cudnn.benchmark = True  # 寻找最佳 的训练路径

    # step 2: 训练模型
    Mean_Teacher(model, ema_model, label_loader, unlabel_loader, test_loader, args)

def consistency_loss(logits_w1, logits_w2):
    logits_w2 = logits_w2.detach()
    assert logits_w1.size() == logits_w2.size()
    return F.mse_loss(torch.softmax(logits_w1, dim=-1), torch.softmax(logits_w2, dim=-1), reduction='mean')

def Mean_Teacher(model:nn.Module, ema_model:nn.Module, label_loader, unlabel_loader, test_loader, args):

    # optimizer = build_optimizer(args=args, model=model)
    # lr_scheduler = build_lr_scheduler(args=args, optimizer=optimizer)

    optimizer = create_optimizer_v2(model, **optimizer_kwargs(cfg=args))
    lr_scheduler, num_epochs = create_scheduler(args, optimizer)

    ce_loss = nn.CrossEntropyLoss(ignore_index=-1)
    model.train()
    ema_model.train()
    cur_itrs = 0
    acc = 0.0
    ema_acc = 0.0
    epoch = 0 

    args.logger.info("start training")

    pbar = tqdm(total=args.total_itrs)
    
    while True:
        train_loss = 0.0
        for  (((img1, img2), target_label),((img1_ul, img2_ul), _)) in zip(label_loader, unlabel_loader):

            cur_itrs += 1
            pbar.update(1)

            batch_size_labeled = img1.shape[0]
            input1 = Variable(torch.cat([img1, img1_ul]).to(args.device))
            input2 = Variable(torch.cat([img2, img2_ul]).to(args.device))
            target = Variable(target_label.to(args.device))

            output = model(input1)

            with torch.no_grad():
               ema_output = ema_model(input2)

            out_x = output[:batch_size_labeled]
            sup_loss = ce_loss(out_x, target) # 有监督损失

            unsup_loss = consistency_loss(output, ema_output) # 计算一致性损失
            warm_up = float(np.clip((cur_itrs) / (args.unsup_warm_up * args.total_itrs), 0., 1.))
            loss = sup_loss + warm_up * args.lambda_u * unsup_loss  # 损失为 有标签的交叉熵损失+ 一致性损失(基于平滑性假设，一个模型对于 一个输入及其变形应该保持一致性）

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            lr_scheduler.step(epoch= epoch)
            train_loss += loss.item()
            lr = optimizer.param_groups[0]["lr"]
            update_ema_variables(model, ema_model, args.ema_decay, cur_itrs)

            #  记录日志
            wandb.log({
                "mean_teacher/loss": loss.item(),
                "mean_teacher/lr": lr,
                "mean_teacher/warm_up": warm_up,
            })

            # 50000个样本，只使用4000label
            # mean-teacher
            # 4000label acc=90%
            # 2000label acc=87%
            # 1000label acc=83%
            # 500 label acc=58%
            # 250 label acc=52%

            if cur_itrs % args.step_size == 0:

                epoch += 1
                # 有监督损失
                tmp_acc = test(model, test_loader=test_loader, args=args)
                wandb.log({
                    'mean_teacher/acc': tmp_acc,
                })
                if tmp_acc > acc:
                    acc = tmp_acc
                    #  保存模型
                    torch.save({
                        "cur_itrs": cur_itrs,
                        "model": model.state_dict(),
                        "ema_model": ema_model.state_dict(),
                        "optimizer": optimizer.state_dict(),
                        "best_acc": acc
                    }, args.model_save_path)
                
                tmp_acc = test(ema_model, test_loader=test_loader, args=args)
                wandb.log({
                    'mean_teacher/ema_acc': tmp_acc,
                })
                if tmp_acc > ema_acc:
                    ema_acc = tmp_acc

                args.logger.info("model best_acc: {:.4f}  ema best_acc: {:.4f}".format(acc, ema_acc))

            if cur_itrs > args.total_itrs:
                wandb.finish()
                return
        
if __name__ == '__main__':
    main()
