from __future__ import print_function
from math import log10
import argparse
import torch
import numpy as np
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
from my_model import My_model
from my_data import get_trainning_set,get_test_set
from mymodel_ESPCN import ESPCN

import math
from torch.optim.lr_scheduler import ReduceLROnPlateau
#python my_main.py --upscale_factor 3 --batch_size 4 --test_batch_size 4 --epochs 120 --lr 0.0001


parser = argparse.ArgumentParser(description='Pytorch super resolution ')
parser.add_argument('--upscale_factor',type=int,required=True,help='upscale_factor')
parser.add_argument('--batch_size',type=int,default=16,required=True,help='batch size')
parser.add_argument('--test_batch_size',type = int,default=10,required=True,help='test batch_size')
parser.add_argument('--epochs',type=int,default=1200,required=True,help='epochs ')
parser.add_argument('--lr',type=float,default=0.0001,required=True,help='learning rate you set')
parser.add_argument('--threads',type = int,default=6,help='how may threads you choose')
parser.add_argument('--seed',type = int,default=123,help='random seed to use,Default=123')
parser.add_argument('--step',type = int,default=10,help='step learning rate decay')
parser.add_argument('--cuda',action='store_true',help='use cuda?')

#parser.add_argument('--cuda',action='store_true',help='use cuda?')
opt = parser.parse_args()
#print(opt)

def main():

    # use_cuda = opt.cuda
    # if use_cuda and not torch.cuda.is_available():
    #     raise Exception("No GPU to use please run without --cuda")
    torch.manual_seed(opt.seed)
    #
    # if use_cuda:
    #     torch.manual_seed(opt.seed)
    #-------------------------读取训练/测试数据-------------------

    trainset = get_trainning_set(opt.upscale_factor)
    testset = get_test_set(opt.upscale_factor)

    #使用DataLoader()读取自己重写的My_dataset
    trainning_set_Loader = DataLoader(dataset=trainset,num_workers=opt.threads,batch_size=opt.batch_size,
                                      shuffle=True)
    testing_set_Loader = DataLoader(dataset=testset,num_workers=opt.threads,batch_size=opt.test_batch_size,
                                      shuffle=False)
    #-----------------------损失函数，optimizer准备------------------
    Skip_connection_SR_Model =  My_model()#实例化自己的模型
    Skip_connection_SR_Model = nn.DataParallel(Skip_connection_SR_Model)#多GPU并行训练
    criterion = nn.MSELoss()

    # if (use_cuda):#使用gpu，则模型实例化对象和损失函数都放到cuda上
    #     Skip_connection_SR_Model.cuda()
    #     criterion = criterion.cuda()

    def adjust_learning_rate(epoch):
        lr = opt.lr * (0.1 ** (epoch // opt.step))
        return lr

    optimizer = optim.Adam(Skip_connection_SR_Model.parameters(),lr = opt.lr)
    #ReduceLROnPlateau
    #-----------------------训练阶段------------------------------------
    def train_process(epoch):
        lr = adjust_learning_rate(epoch-1)

        for param_group in optimizer.param_groups:
            param_group["lr"] = lr

        print("Epoch = {}, lr = {}".format(epoch, optimizer.param_groups[0]["lr"]))

        epoch_loss = 0
        #for iteration,(input,target) in enumerate(trainning_set_Loader):
        for iteration,batch in enumerate(trainning_set_Loader, 1):
            input,target = Variable(batch[0]), Variable(batch[1])

            # if use_cuda:
            #     input = input.cuda()
            #     target = target.cuda()
            optimizer.zero_grad()
            model_out = Skip_connection_SR_Model(input)
            #print("input type is ",type(input))
            loss = criterion(model_out,target)
            epoch_loss += loss.item()
            loss.backward()#误差反向传播
            optimizer.step()

            print("===> Epoch[{}]({}/{}): Loss: {:.4f}".format(epoch, iteration, len(trainning_set_Loader),loss.item()))
        print("===> Epoch {} Complete: Avg. Loss: {:.4f}".format(epoch, epoch_loss / len(trainning_set_Loader)))

    #----------------------测试阶段-------------------------------------
    def test_process():
        average_loss = 0
        #for (input,target) in testing_set_Loader:
        for batch in testing_set_Loader:
            input,target = Variable(batch[0]),Variable(batch[1])
            # if use_cuda:
            #     input = input.cuda()
            #     target = target.cuda()

            predication = Skip_connection_SR_Model(input)#模型测试时的输出
            mse = criterion(predication,target)
            psnr = 10 * log10(1 / mse.item())
            average_loss += psnr
        print("===> Avg. PSNR: {:.4f} dB".format(average_loss / len(testing_set_Loader)))

    #---------------------------保存模型--------------------------------
    def checkpoint(epoch):
        model_out_path = "./model/model_out_1116_{}.pth".format(epoch)
        torch.save(Skip_connection_SR_Model,model_out_path)
        print("CheckPoint saved to {}".format(model_out_path))

    for epoch in range(1,opt.epochs+1):
        train_process(epoch)
        test_process()
        if (epoch %100 == 0):
            checkpoint(epoch)


#-----------主函数运行------------------
if __name__ =="__main__":
    main()

















