import os.path
import numpy as np
import torch
from copy import deepcopy
from tqdm import tqdm
import torch.nn as nn
import random
import wandb
from medpy import metric
from scipy.ndimage import zoom
from utils import loadyaml, _get_logger, mk_path, get_current_consistency_weight, update_ema_variables
from utils import build_lr_scheduler, build_optimizer,SSIM,batch_PSNR
from model import build_model
from datasets import build_loader
from torchvision.utils import make_grid

os.environ["WANDB_API_KEY"] = "f8faa7a67ba54735a3e66f118d466640711b323d"
os.environ["WANDB_MODE"] = "offline"



def main():
    

    path =r"config/prenet_raintrainh_10k_224x224.yaml"
    root = os.path.dirname(os.path.realpath(__file__))  # 获取绝对路径
    args = loadyaml(os.path.join(root, path))  # 加载yaml
    if args.cuda:
        args.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
    else:
        args.device = torch.device("cpu")

    root = os.path.dirname(os.path.realpath(__file__))  # 获取绝对路径
    args.save_path = os.path.join(root, args.save_path)
    mk_path(args.save_path)  # 创建文件保存位置
    # 创建 tensorboardX日志保存位置
    mk_path(os.path.join(args.save_path, "tensorboardX"))
    mk_path(os.path.join(args.save_path, "model"))  # 创建模型保存位置
    args.model_save_path = os.path.join(args.save_path, "model", "model.pth")
    args.ema_model_save_path = os.path.join(args.save_path, "model", "ema_model_model.pth")

    # args.writer = SummaryWriter(os.path.join(args.save_path, "tensorboardX"))

    wandb.init(
        entity="jokerak777",  # wandb上对应的team名称（必填）,类似于用户名
        project="image-deraining",  # wandb上对应的team名称（必填）,
        name="9-14",  # 本次实验的名称（可选，如果不设置，wandb会自动生成本次实验名称）
        config=args,
        dir=os.path.join(args.save_path, "tensorboardX")
    )

    args.logger = _get_logger(os.path.join(args.save_path, "log.log"), "info")
    args.tqdm = os.path.join(args.save_path, "tqdm.log")
    torch.manual_seed(args.seed)  # 设置随机种子
    torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)

    torch.backends.cudnn.deterministic = False  # 单卡的不需要分布式
    torch.backends.cudnn.benchmark = True  # 寻找最佳 的训练路径

    train_loader, test_loader = build_loader(args)  # 构建数据集
    args.logger.info("==========> train_loader length:{}".format(len(train_loader.dataset)))
    args.logger.info("==========> test_dataloader length:{}".format(len(test_loader)))
    args.logger.info("==========> epochs length:{}".format(args.epochs))

    # step 1: 构建模型
    model = build_model(args=args).to(device=args.device)  # 创建模型

    wandb.watch(model, log='all', log_freq=1000)

    # step 2: 训练模型
    PReNet(model, train_loader, test_loader, argss)


def PReNet(model, train_loader, test_loader, args):
    optimizer = build_optimizer(args=args, model=model)
    lr_scheduler = build_lr_scheduler(args=args, optimizer=optimizer)
    max_epoch = args.total_itrs // len(unlabel_loader) + 1
    med_loss = Med_Sup_Loss(args.num_classes)

    criterion = SSIM()

    model.train()
    ema_model.train()
    cur_itrs = 0

    best_psnr = 0.0
    args.logger.info("start training")

    while True:
        train_loss = 0.0
        for input_train, target_train in tqdm(train_loader):

            input_train, target_train = input_train.cuda(), target_train.cuda()
            out_train, _ = model(input_train)
            pixel_metric = criterion(target_train, out_train)
            loss = -pixel_metric

            loss.backward()
            optimizer.step()
            wandb.log({
                "prenet/loss": loss.item(),
                "prenet/lr": lr,
            })

            if cur_itrs % args.step_size == 0:
                psnr = test_acdc(model=model, test_loader=test_loader, args=args, name="model1")
                args.logger.info("model psnr: {:.4f}".format(psnr))

                wandb.log({
                    'prenet/psnr': psnr,
                })


                if mean_dice > best_dice1:
                    best_dice1 = mean_dice
                    torch.save(
                        {
                            "model": model.state_dict(),
                            "optimizer": optimizer.state_dict(),
                            "lr_scheduler": lr_scheduler.state_dict(),
                            "cur_itrs": cur_itrs,
                            "best_dice": best_psnr
                        }, args.model_save_path)

        scheduler.step(epoch)

                

def test(model, test_loader, args, name):

    model.eval()
    count = 0
    psnr_train =0.0
    for input_train, target_train in enumerate(test_loader):
        target_train = target_train.to(args.device)
        input_train = input_train.to(args.device)
        out_train, _ = model(input_train)
        out_train = torch.clamp(out_train, 0., 1.)
        psnr_train +=batch_PSNR(out_train, target_train, 1.)*input_train.shape[0]
        count += input_train.shape[0]

        if i_batch == 0:
            im_target = make_grid(target_train.data, nrow=8, normalize=True, scale_each=True)

            print(im_derain.shape)
            im_input = make_grid(input_train.data, nrow=8, normalize=True, scale_each=True)
            im_derain = make_grid(out_train.data, nrow=8, normalize=True, scale_each=True)

            Img = wandb.Image(im_derain)
            label_pred = wandb.Image(im_target)
            label_true = wandb.Image(im_input)

            wandb.log({"{}/Image".format(name): Img, "{}/label_pred".format(name): label_pred, "{}/label_true".format(name): label_true})
    
    
    return psnr_train/count

    

if __name__ == '__main__':
    main()
