import os.path
import logging
import torch
from torch.utils import data
from utils import utils_logger, utils_imgreadwrite as bk
from utils import utils_image as util
# from utils import utils_model
from gdal_dataset import gdal_dataset
from models.network_rrdbnet import RRDBNet as net
from tqdm import tqdm
import matplotlib.pyplot as plt
import os
import numpy as np
from osgeo import gdal
import argparse


class Tester(object):

    def __init__(self, args, model_name, img):
        self.args = args
        self.sf = args.sf
        self.GPU_patch_size = args.patch_size  # GPU patch size
        self.oversize = args.oversize  # overlap of two image
        self.write_oversize = int(self.oversize * self.sf / 2)
        self.write_patch_size = int(self.sf * (self.GPU_patch_size - self.oversize))  # write patch interval


        self.dataset = gdal_dataset(img, args.patch_size, args.oversize, args.sf)
        self.img_width = self.dataset.img_width
        self.img_height = self.dataset.img_height
        self.bandsnum = self.dataset.bandsnum
        ###write img###
        self.img_name, ext = os.path.splitext(os.path.basename(img))
        driver = gdal.GetDriverByName("GTiff")
        if self.bandsnum != 1:
            self.outdata = driver.Create(os.path.join(args.resultroot, self.img_name + '_' + model_name + '.tif'),
                                    self.img_width * self.sf, self.img_height * self.sf, self.bandsnum, gdal.GDT_UInt16)
        else:
            self.outdata = driver.Create(os.path.join(args.resultroot, self.img_name + '_' + model_name + '.tif'),
                                    self.img_width * self.sf, self.img_height * self.sf, self.bandsnum, gdal.GDT_UInt16)
        ##check batchsize exact division
        # more_batch = (self.dataset.range_y * self.dataset.range_x) % args.batch_size
        # print("more_batch", more_batch)
        # if more_batch != 0:
        #     [i,j] = self.dataset.waiting_list[-1]
        #     for more_batch_idx in range(more_batch):
        #         self.dataset.waiting_list.append([i,j])

        kwargs = {'num_workers': args.workers, 'pin_memory': True}
        self.test_loader = data.DataLoader(self.dataset, batch_size=args.batch_size, shuffle=False, **kwargs)

        model_path = os.path.join(args.modelroot, model_name + '.pth')  # set model path
        self.model = net(in_nc=3, out_nc=3, nf=64, nb=23, gc=32, sf=args.sf)

        # print("Model's state_dict:")
        # for param_tensor in self.model.state_dict():
        #     print(param_tensor, "\t", self.model.state_dict()[param_tensor].size())

        if args.cuda:
            torch.cuda.empty_cache()
            self.model = torch.nn.DataParallel(self.model, device_ids=self.args.gpu_ids)
            # self.model = self.model.cuda()
            self.model.to(f'cuda:{self.model.device_ids[0]}')
        if not os.path.isfile(model_path):
            raise RuntimeError("=> no checkpoint found at '{}'".format(model_path))

        print("=> loaded checkpoint '{}'".format(model_path))
        self.model.module.load_state_dict(torch.load(model_path))

    def test(self):

        self.model.eval()
        tbar = tqdm(self.test_loader, desc='\r')
        for idx, image in enumerate(tbar):
            if self.args.cuda:
                # image = image.cuda()
                image = image.to(f'cuda:{self.model.device_ids[0]}')
            with torch.no_grad():
                output = self.model(image)
            pred = output.data.cpu().numpy()
            for batch_idx in range(pred.shape[0]):
                img_tmp = np.transpose(pred[batch_idx], axes=[1, 2, 0])
                if self.dataset.bandsnum == 1:
                    img_tmp = img_tmp[:,:,0]
                    img_tmp = img_tmp * (self.dataset.maxvalue - self.dataset.minvalue) + self.dataset.minvalue
                else:
                    img_tmp *= 255.0
                ## show in matplot
                # img_tmp = img_tmp * 255.0
                # plt.figure()
                # plt.title('display')
                # plt.imshow(img_tmp.astype(np.uint8))
                # plt.show(block=True)

                ###write to file

                img_tmp = img_tmp.astype(int)
                i,j = self.dataset.waiting_list[idx * self.args.batch_size + batch_idx]


                if j > 0:
                    startj = self.write_oversize
                    writej = j * self.write_patch_size + self.write_oversize
                else:
                    startj = 0
                    writej = 0


                if i > 0:
                    starti = self.write_oversize
                    writei = i * self.write_patch_size + self.write_oversize
                else:
                    starti = 0
                    writei = 0

                if self.write_patch_size + self.write_oversize + writei > self.img_height * self.sf:
                    writei = int(self.img_height * self.sf - self.write_patch_size - self.write_oversize)
                    endi = self.GPU_patch_size * self.sf
                else:
                    writei = int(writei)
                    endi = -self.write_oversize

                if self.write_patch_size + self.write_oversize + writej > self.img_width * self.sf:
                    writej = int(self.img_width * self.sf - self.write_patch_size - self.write_oversize)
                    endj = self.GPU_patch_size * self.sf
                else:
                    writej = int(writej)
                    endj = -self.write_oversize

                if self.bandsnum != 1:
                    for band in range(self.bandsnum):
                        outband = self.outdata.GetRasterBand(band + 1)
                        final_img = img_tmp[starti:endi, startj:endj, band]
                        outband.WriteArray(final_img, writej, writei)
                else:
                    outband = self.outdata.GetRasterBand(1)
                    final_img = img_tmp[starti:endi, startj:endj]
                    outband.WriteArray(final_img, writej, writei)
        print("inference Done")
        del outband







if __name__ == '__main__':
    utils_logger.logger_info('blind_sr_log', log_path='blind_sr_log.log')
    logger = logging.getLogger('blind_sr_log')
    #    print(torch.__version__)               # pytorch version
    #    print(torch.version.cuda)              # cuda version
    #    print(torch.backends.cudnn.version())  # cudnn version

    parser = argparse.ArgumentParser('Process some str')
    parser.add_argument('--dataroot', dest='dataroot', help='input directory for image-files', type=str,
                        default='testsets/2/')
    parser.add_argument('--sf', dest='sf', help='input Magnification 2 or 4', type=int, default=2)
    parser.add_argument('--modelroot', dest='modelroot', help='output directory for model', type=str,
                        default='model_zoo')
    parser.add_argument('--resultroot', dest='resultroot', help='output directory for result', type=str,
                        default='result')
    parser.add_argument('--patch_size', type=int, default=1024, help='patch small image')
    parser.add_argument('--oversize', type=int, default=8, help='overlap size')
    parser.add_argument('--batch_size', type=int, default=4, help='input batch size')
    parser.add_argument('--workers', type=int, default=0, help='dataloader threads')
    parser.add_argument('--gpu-ids', type=str, default='1,2', help='use which gpu to train, must be a \
                                comma-separated list of integers only (default=0)')
    args = parser.parse_args()
    args.cuda = torch.cuda.is_available()
    if args.cuda:
        try:
            args.gpu_ids = [int(s) for s in args.gpu_ids.split(',')]
            print("GPU ID: ", args.gpu_ids)
        except ValueError:
            raise ValueError('Argument --gpu_ids must be a comma-separated list of integers only')


    # testsets = 'testsets'  # fixed, set path of testsets
    # testset_Ls = ['RealSRSet']  # ['RealSRSet','DPED']
    # model_names = ['RRDB', 'ESRGAN', 'FSSR_DPED', 'FSSR_JPEG', 'RealSR_DPED', 'RealSR_JPEG']
    if args.sf == 2:
        model_names = ['PMRSGANx2']  # 'PMRSGANx2' for scale factor 2
    else:
        model_names = ['PMRSGAN']


    if not os.listdir(args.dataroot):
        raise RuntimeError('the file is invoid')
    else:
        L_path = args.dataroot


    for model_name in model_names:
        # if model_name in ['PMRSGANx2']:
        #     sf = 2
        logger.info('{:>16s} : {:s}'.format('Model Name', model_name))

        logger.info('{:>16s} : {:<d}'.format('GPU ID', torch.cuda.current_device()))
        for img in util.get_image_paths(L_path):
            tester = Tester(args, model_name, img)
            tester.test()
            logger.info('{:>16s} : {:s}'.format('Input Path', L_path))
            logger.info('{:>16s} : {:s}'.format('Output Path', args.resultroot))
    logger.info('{:>16s}'.format('end time'))

