import argparse
import os
import copy

import numpy as np
import torch
from torch import nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm

# from models import SRCNN  # lcj-改为Unet
# from models import ResAttU_Net
from torch import Tensor
from models import ResAttU_Net_noBN
from datasets import TrainDataset, EvalDataset
from utils import AverageMeter, calc_psnr
import torch.nn as nn
import matplotlib.pyplot as plt


if __name__ == '__main__':
    psnrLog = [] # 构建一个用来画图的列表
    lossLog = []
    parser = argparse.ArgumentParser()
    parser.add_argument('--train-file', type=str,default=r'D:\codes\01-rebuild\SR\20220211j7in0LBK\SRCNN-pytorch-master\double_channel\HR_beads.h5')
    parser.add_argument('--eval-file', type=str,default=r'D:\codes\01-rebuild\SR\20220211j7in0LBK\SRCNN-pytorch-master\double_channel\eval_HR_beads.h5')
    parser.add_argument('--outputs-dir', type=str,default=r'D:\codes\01-rebuild\SR\20220211j7in0LBK\SRCNN-pytorch-master\double_channel\output_models')
    # parser.add_argument('--scale', type=int, default=2)
    parser.add_argument('--lr', type=float, default=1e-4)
    parser.add_argument('--batch-size', type=int, default=16)
    parser.add_argument('--num-epochs', type=int, default=100)  # 原来default=400，---lcj
    parser.add_argument('--num-workers', type=int, default=8)
    parser.add_argument('--seed', type=int, default=123)
    args = parser.parse_args()

    args.outputs_dir = os.path.join(args.outputs_dir, '{}'.format('1009_gitter_200epoch'))

    if not os.path.exists(args.outputs_dir):
        os.makedirs(args.outputs_dir)

    cudnn.benchmark = True
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    print("device:", device)
    torch.manual_seed(args.seed)

    # model = UNet().to(device)   #  lcj-4.7
    #### ******
    if torch.cuda.device_count() > 1:
        print("Let's use", torch.cuda.device_count(), "GPUs")
        model_DP = nn.DataParallel(ResAttU_Net_noBN())

    model = model_DP.to(device)
    #### ****

    # model.load_state_dict(torch.load(r'D:\codes\01-rebuild\SR\20220211j7in0LBK\SRCNN-pytorch-master\lcj\outputs\x3\best.pth'))
    criterion = nn.MSELoss()

    # lcj--4.7替换为下面的optimizer
    # optimizer = optim.Adam([
    #     {'params': model.conv1.parameters()},
    #     {'params': model.conv2.parameters()},
    #     {'params': model.conv3.parameters(), 'lr': args.lr * 0.1}
    # ], lr=args.lr)

    optimizer = optim.Adam(model.parameters(), lr=args.lr)

    # optimizer = optim.Adam([
    #     {'params': model.conv0_0.parameters()},
    #     {'params': model.conv1_0.parameters()},
    #     {'params': model.conv2_0.parameters()},
    #     {'params': model.conv3_0.parameters()},
    #     {'params': model.conv4_0.parameters()},
    #
    #     {'params': model.conv3_1.parameters()},
    #     {'params': model.conv2_2.parameters()},
    #     {'params': model.conv1_3.parameters()},
    #     {'params': model.conv0_4.parameters()},
    #
    #     {'params': model.final.parameters(), 'lr': args.lr * 0.1}
    # ], lr=args.lr)

    train_dataset = TrainDataset(args.train_file)
    train_dataloader = DataLoader(dataset=train_dataset,
                                  batch_size=args.batch_size,  # batch_size == 16
                                  shuffle=True,
                                  num_workers=args.num_workers,
                                  pin_memory=True,
                                  drop_last=True)
    # # **** lcj - 5.13 看到输入和输出张量的大小了 ****
    # for data in train_dataloader:
    #     input = data.to(device)
    #     output = model(input)
    #     print("Outside: input size", input.size(),
    #           "output_size", output.size())
    #
    # #####
    eval_dataset = EvalDataset(args.eval_file)
    eval_dataloader = DataLoader(dataset=eval_dataset, batch_size=1)

    best_weights = copy.deepcopy(model.state_dict())
    best_epoch = 0
    best_psnr = 0.0

    for epoch in range(args.num_epochs):
        model.train()
        epoch_losses = AverageMeter()

        with tqdm(total=(len(train_dataset) - len(train_dataset) % args.batch_size)) as t:
            t.set_description('epoch: {}/{}'.format(epoch, args.num_epochs - 1))

            for data in train_dataloader:
                # lcj
                inputs, labels = data

                inputs = inputs.to(device)
                labels = labels.to(device)

                preds = model(inputs)

                loss = criterion(preds, labels)

                epoch_losses.update(loss.item(), len(inputs))

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

                # 进度条更新
                t.set_postfix(loss='{:.6f}'.format(epoch_losses.avg))
                t.update(len(inputs))
        # 记录losslog 方面画图
        lossLog.append(np.array(epoch_losses.avg))
        # 可以在前面加上路径
        np.savetxt(r'C:\lcj\01-codes\SRCNN-pytorch-master\models\920_experi_beads\1009_gitter_ave3\lossLog.txt', lossLog)
        torch.save(model.state_dict(), os.path.join(args.outputs_dir, 'epoch_{}.pth'.format(epoch)))

        # 是否更新当前最好的参数
        model.eval()
        epoch_psnr = AverageMeter()

        for data in eval_dataloader:
            inputs, labels = data

            inputs = inputs.to(device)
            labels = labels.to(device)

            with torch.no_grad():
                preds = model(inputs).clamp(0.0, 1.0)

            epoch_psnr.update(calc_psnr(preds, labels), len(inputs))

        print('eval psnr: {:.2f}'.format(epoch_psnr.avg))
        psnrLog.append(Tensor.cpu(epoch_psnr.avg)) # 画图data保存
        np.savetxt(r'C:\lcj\01-codes\SRCNN-pytorch-master\models\920_experi_beads\1009_gitter_ave3\psnrLog.txt', psnrLog)
        ## 保留psnr最高的模型
        if epoch_psnr.avg > best_psnr:
            best_epoch = epoch
            best_psnr = epoch_psnr.avg
            best_weights = copy.deepcopy(model.state_dict())
        # **************************** #

    print('best epoch: {}, psnr: {:.2f}'.format(best_epoch, best_psnr))
    torch.save(best_weights, os.path.join(args.outputs_dir, 'best.pth'))

    plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
    plt.subplot(1, 2, 1)
    plt.plot(lossLog)
    plt.xlabel('epoch轮数')
    plt.ylabel('Loss')
    plt.subplot(1, 2, 2)
    plt.plot(psnrLog)

    plt.xlabel('epoch轮数')
    plt.ylabel('PNSR（单位dB）')
    plt.title('评估结果')
    plt.show()