#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import torch
import torch.nn as nn
from torch.utils.data import DataLoader
# from datasets import load_dataset
import dataloader
# from noise2noise import Noise2Noise
from n2n_swin import Noise2Noise
from argparse import ArgumentParser

from train_data_aug_local import TrainData #############
from val_data import ValData
from val_data_train import ValData_train
#import os
#os.environ["CUDA_VISIBLE_DEVICES"] = "6"


def parse_args():
    """Command-line argument parser for training."""

    # New parser
    parser = ArgumentParser(description='PyTorch implementation of Noise2Noise from Lehtinen et al. (2018)')

    # Data parameters
    parser.add_argument('-t', '--train-dir', help='training set path', default='./../data/train')
    parser.add_argument('-v', '--valid-dir', help='test set path', default='./../data/valid') 
    parser.add_argument('--ckpt-save-path', help='checkpoint save path', default='./../ckpts')
    parser.add_argument('--ckpt-overwrite', help='overwrite model checkpoint on save', action='store_true')
    parser.add_argument('--report-interval', help='batch report interval', default=1, type=int)
    parser.add_argument('-ts', '--train-size', help='size of train dataset', type=int) 
    parser.add_argument('-vs', '--valid-size', help='size of valid dataset', type=int) 
 
    # Training hyperparameters
    parser.add_argument('-lr', '--learning-rate', help='learning rate', default=0.0001, type=float)
    parser.add_argument('-a', '--adam', help='adam parameters', nargs='+', default=[0.9, 0.99, 1e-8], type=list)
    parser.add_argument('-b', '--batch-size', help='minibatch size', default=8, type=int)
    parser.add_argument('-e', '--nb-epochs', help='number of epochs', default=100, type=int)
    parser.add_argument('-l', '--loss', help='loss function', choices=['l1', 'l2', 'hdr'], default='l1', type=str)
    parser.add_argument('--cuda', help='use cuda', action='store_true')
    parser.add_argument('--plot-stats', help='plot stats after every epoch', action='store_true')    
  
    # Corruption parameters  
    parser.add_argument('-n', '--noise-type', help='noise type', 
        choices=['gaussian', 'poisson', 'text', 'mc'], default='gaussian', type=str)
    parser.add_argument('-p', '--noise-param', help='noise parameter (e.g. std for gaussian)', default=50, type=float)
    parser.add_argument('-s', '--seed', help='fix random seed', type=int)
    # parser.add_argument('-c', '--crop-size', help='random crop size', default=128, type=int)#224
    parser.add_argument('--crop_size', help='random crop size', default=128, type=int)#224
    parser.add_argument('--clean-targets', help='use clean targets for training', action='store_true')  
    parser.add_argument('--ckpt_load_path', help='start training with a pretrained model',default=None)
    return parser.parse_args()  
 
  
if __name__ == '__main__':    
    """Trains Noise2Noise."""   
     
    # Parse training parameters  
    params = parse_args()     
    # Train/valid datasets 
    #train_dataset = dataloader.lowlight_loader(params.train_dir)		
    #train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=params.batch_size, shuffle=True)
    #valid_dataset = dataloader.lowlight_loader(params.valid_dir)		 
    #valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=params.batch_size, shuffle=False)  
 
  
# --- Load training data and validation/test data --- # 
    # 原版
    # train_loader = DataLoader(TrainData([217, 326], params.train_dir), batch_size=params.batch_size, shuffle=True, num_workers=0) #[1024,1024] [2160,3840]
    train_loader = DataLoader(TrainData([params.crop_size, params.crop_size], params.train_dir), batch_size=params.batch_size, shuffle=True, num_workers=0) #[1024,1024] [2160,3840]
    valid_loader = DataLoader(ValData_train(params.valid_dir), batch_size=1, shuffle=False, num_workers=0)#DataLoader(ValData(params.valid_dir), batch_size=500, shuffle=False, num_workers=24) #
    #valid_dataset = dataloader.lowlight_loader(params.valid_dir)		 
    #valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=500, shuffle=False) 

    # Initialize model and train
    n2n = Noise2Noise(params, trainable=True)

    #resume
    if params.ckpt_load_path is not None:
        n2n.load_model(params.ckpt_load_path)
    
    n2n.train(train_loader, valid_loader)
