import argparse
import os
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader
from dudenet import *
from dataset import prepare_data, Dataset
from utils import *

os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"

parser = argparse.ArgumentParser(description="combinet")
parser.add_argument("--batchSize", type=int, default=32, help="Training batch size")
parser.add_argument("--epochs", type=int, default=3, help="Number of training epochs")
parser.add_argument("--lr", type=float, default=1e-3, help="Initial learning rate")
parser.add_argument("--outf", type=str, default="logs", help='path of log files')
parser.add_argument("--mode", type=str, default="S", help='with known noise level (S) or blind training (B)')
parser.add_argument("--val_noiseL", type=float, default=50, help='noise level used on validation set')
opt = parser.parse_args()


def main():
    # Load dataset
    # t1 = time.process_time()
    save_dir = 'sigma_' + str(opt.val_noiseL) + '/'
    if not os.path.exists(save_dir):
        os.mkdir(save_dir)
    # Load dataset
    print('Loading dataset ...\n')
    dataset_train = Dataset(train=True)
    dataset_val = Dataset(train=False)
    loader_train = DataLoader(dataset=dataset_train, num_workers=4, batch_size=opt.batchSize, shuffle=True)
    loader_val = DataLoader(dataset=dataset_val, num_workers=4, batch_size=opt.batchSize, shuffle=True)
    print("# of training samples: %d\n" % int(len(dataset_train)))
    # Build model
    net = dudenet()
    criterion = nn.MSELoss().cuda()
    # Move to GPU
    device_ids = [0]
    model = nn.DataParallel(net, device_ids=device_ids).cuda()
    # Optimizer
    optimizer = optim.Adam(model.parameters(), lr=opt.lr)
    scheduler = lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9)
    noiseL_B = [0, 55]  # ingnored when opt.mode=='S'
    psnr_list = []
    stdN = np.float32(np.random.uniform(noiseL_B[0], noiseL_B[1], size=len(dataset_train)))

    total_iters = 0
    opt.save_iter_freq = opt.batchSize * 500
    opt.update_lr_freq = opt.batchSize * 200
    stop_update = 100000

    for epoch in range(opt.epochs):
        epoch_iter = 0
        # train
        for i, data_train in enumerate(loader_train, 0):
            total_iters += opt.batchSize
            epoch_iter += opt.batchSize
            # training step
            data = data_train['data']
            noise = torch.zeros(data.shape)
            sizeN = noise.shape[1:]
            stdN_tmp = stdN[(i * opt.batchSize):((i + 1) * opt.batchSize)]
            # if opt.mode == 'S':
            #     noise = torch.FloatTensor(img_train.size()).normal_(mean=0, std=opt.noiseL / 255.)
            for n in range(noise.shape[0]):
                noise[n, :, :, :] = torch.FloatTensor(sizeN).normal_(mean=0, std=opt.val_noiseL / 255.)
            imgn_train = torch.clamp(data + noise, 0., 1.).cuda()
            out_train = model(imgn_train)
            loss = criterion(out_train, data.cuda()) / (imgn_train.size()[0] * 2)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            #result
            model.eval()
            psnr_train = batch_PSNR(out_train, data, 1.)
            print("[epoch %d][%d/%d] loss: %.4f PSNR_train: %.4f" %
                  (epoch + 1, i + 1, len(loader_train), loss.item(), psnr_train))

            if epoch_iter % opt.save_iter_freq == 0:
                model_name = 'epoch' + str(epoch) + '_' + str(int(epoch_iter / opt.save_iter_freq)) + '.pth'
                torch.save(model.state_dict(), os.path.join(save_dir, model_name))
                # validate
                psnr_val = 0
                k = 0
                with torch.no_grad():
                    for _, data_val in enumerate(loader_val):
                        data = data_val['data'].cuda()
                        noise = torch.zeros(data.shape).cuda()
                        sizeN = noise.shape[1:]
                        for n in range(noise.shape[0]):
                            noise[n, :, :, :] = torch.FloatTensor(sizeN).normal_(mean=0, std=opt.val_noiseL / 255.).cuda()
                        data_n = torch.clamp(data + noise, 0., 1.).cuda()
                        out_val = torch.clamp(model(data_n), 0., 1.)
                        psnr_val += batch_PSNR(out_val, data, 1.)
                        k += 1

                    psnr_val /= k
                    psnr_list.append(str(psnr_val))
                    print("[epoch %d] PSNR_val: %.4f" % (epoch + 1, psnr_val))

                f = open(save_dir + 'psnr.txt', 'w')
                for line in psnr_list:
                    f.write(line + '\n')
                f.close()

            if epoch_iter <= stop_update:
                if epoch_iter % opt.update_lr_freq == 0:
                    scheduler.step()
                    lr = optimizer.param_groups[0]['lr']
                    print('update <lr> at iters %d learning rate = %.5f' % (total_iters, lr))


if __name__ == "__main__":
    # data_dir = './data/train'
    # filename = 'train.h5'
    # prepare_data(filename, data_dir, 64, stride=64)
    # data_dir = './data/val'
    # filename = 'val.h5'
    # prepare_data(filename, data_dir, 64, stride=64)
    main()
