import os.path
import numpy as np
import torch
from copy import deepcopy
import wandb
from tqdm import tqdm
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from timm.optim import create_optimizer_v2, optimizer_kwargs
from timm.scheduler import create_scheduler

from model import build_model
from datasets import build_loader
from val import test
from utils import loadyaml, _get_logger, mk_path, update_ema_variables

def main():
    
    path = r"config/fixmatch_wideresnet_200_cifar10.yaml"
    root = os.path.dirname(os.path.realpath(__file__))  # 获取绝对路径
    args = loadyaml(os.path.join(root, path))  # 加载yaml
    if args.cuda:
        args.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
    else:
        args.device = torch.device("cpu")

    root = os.path.dirname(os.path.realpath(__file__))  # 获取绝对路径
    args.save_path = os.path.join(root, args.save_path)
    mk_path(args.save_path)  # 创建文件保存位置
    mk_path(os.path.join(args.save_path, "tensorboardX"))  # 创建 tensorboardX日志保存位置
    mk_path(os.path.join(args.save_path, "model"))  # 创建模型保存位置

    args.model_save_path = os.path.join(args.save_path, "model", "model.pth") # 设置模型保存位置
    args.ema_model_save_path = os.path.join(args.save_path, "model", "ema_model_model.pth")

    args.logger = _get_logger(os.path.join(args.save_path, "log.log"), "info")
    args.tqdm = os.path.join(args.save_path, "tqdm.log")

    wandb.init(
        entity="jokerak777",  # wandb上对应的team名称（必填）,类似于用户名
        project="semi_fixmatch",  # wandb上对应的team名称（必填）,
        name="6-26",  # 本次实验的名称（可选，如果不设置，wandb会自动生成本次实验名称）
        config=args,
        dir=os.path.join(args.save_path, "tensorboardX")
    )

    label_loader, unlabel_loader, test_loader = build_loader(args)  # 构建数据集
    args.epochs = args.total_itrs // args.step_size  # 设置模型epoch
    args.logger.info("==========> label_loader length:{}".format(len(label_loader.dataset)))
    args.logger.info("==========> unlabel_loader length:{}".format(len(unlabel_loader.dataset)))
    args.logger.info("==========> test_loader length:{}".format(len(test_loader.dataset)))
    args.logger.info("==========> epochs length:{}".format(args.epochs))

    # step 1: 构建模型
    model = build_model(args=args).to(device=args.device)  # 创建模型
    ema_model = deepcopy(model)  # 创建ema_model
    for name, param in ema_model.named_parameters():
        param.requires_grad = False

    
    torch.manual_seed(args.seed)  # 设置随机种子
    torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)

    torch.backends.cudnn.deterministic = False  # 单卡的不需要分布式
    torch.backends.cudnn.benchmark = True  # 寻找最佳 的训练路径

    # step 2: 训练模型
    FixMatch(model, label_loader, unlabel_loader, test_loader, args)

def consistency_loss(logits_w1, logits_w2):
    logits_w2 = logits_w2.detach()
    assert logits_w1.size() == logits_w2.size()
    return F.mse_loss(torch.softmax(logits_w1, dim=-1), torch.softmax(logits_w2, dim=-1), reduction='mean')

def FixMatch(model:nn.Module, label_loader, unlabel_loader, test_loader, args):

    optimizer = create_optimizer_v2(model, **optimizer_kwargs(cfg=args))
    lr_scheduler, num_epochs = create_scheduler(args, optimizer)

    ce_loss = nn.CrossEntropyLoss(ignore_index=-1)
    model.train()
    cur_itrs = 0
    acc = 0.0
    ema_acc = 0.0
    epoch = 0 

    args.logger.info("start training")

    pbar = tqdm(total=args.total_itrs)
    
    while True:
        train_loss = 0.0
        for  ((img_labeled, target_label),((img_unlabeled_weak, img_unlabeled_strong), _)) in zip(label_loader, unlabel_loader):

            cur_itrs += 1
            pbar.update(1)

            #########################################################################################################
            # 拼接输入
            inputs = Variable(torch.cat([img_labeled, img_unlabeled_weak, img_unlabeled_strong],dim=0).to(args.device))
            target_label = Variable(target_label.to(args.device))  # 让标签在 对应的device 上

            logits = model(inputs)  # 获取输出
            #######################################################################################################
            # 分离输出
            batch_size = img_labeled.shape[0]
            logits_x = logits[:batch_size]  # 有标签输出
            #  有标签的损失
            loss_sup = F.cross_entropy(logits_x, target_label)

            # 无标签-弱增强-输出，无标签-强增强-输出
            logits_u_w, logits_u_s = logits[batch_size:].chunk(2)
            #  使用弱增强的出来的结果制作 伪标签，因为稳定
            pseudo_label = torch.softmax(logits_u_w.detach() / args.T, dim=-1)
            max_probs, targets_u = torch.max(pseudo_label, dim=-1)
            # 只有通过阈值的伪标签才能进行计算
            mask = max_probs.ge(args.threshold).float()
            Lu = (F.cross_entropy(logits_u_s, targets_u, reduction='none') * mask).mean()

            # 所有损失= 有标签数据弱增广的交叉嫡  +  无标签强增广数据、弱增广伪标签的交叉嫡
            loss = loss_sup + args.lambda_u * Lu

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            lr_scheduler.step(epoch= epoch)
            train_loss += loss.item()
            lr = optimizer.param_groups[0]["lr"]

            #  记录日志
            wandb.log({
                "fixmatch/loss": loss.item(),
                "fixmatch/lr": lr,
                "fixmatch/loss_sup":loss_sup.item(),
                "fixmatch/loss_unlabel":Lu.item(),
            })

            # 50000个样本，只使用4000label
            # fixmatch
            # 4000 label acc=95%
            # 250  label acc=94%
            # 40   label acc=89%

            if cur_itrs % args.step_size == 0:

                epoch += 1
                # 有监督损失
                tmp_acc = test(model, test_loader=test_loader, args=args)
                wandb.log({
                    'fixmatch/acc': tmp_acc,
                })
                if tmp_acc > acc:
                    acc = tmp_acc
                    #  保存模型
                    torch.save({
                        "cur_itrs": cur_itrs,
                        "model": model.state_dict(),
                        "optimizer": optimizer.state_dict(),
                        "best_acc": acc
                    }, args.model_save_path)
                

                args.logger.info("model best_acc: {:.4f}".format(acc))

            if cur_itrs > args.total_itrs:
                wandb.finish()
                return

        
if __name__ == '__main__':
    main()
