import os
import argparse
import glob
import cv2
import numpy as np

import mindspore.common.dtype as mstype
from mindspore.common import set_seed
from mindspore import Tensor, ops, context
from mindspore.train.serialization import load_param_into_net, load_checkpoint

from model import DnCNN
from src.metric import get_PSNR_SSIM
from src.show_image import show_image

parser = argparse.ArgumentParser(description="DnCNN_Test")
parser.add_argument("--num_of_layers", type=int, default=17, help="Number of total layers")
parser.add_argument("--ckpt_path", type=str, default='', help="restore ckpt file path")
parser.add_argument("--test_data_path", type=str, default='data/Test/Set12',
                    help='test on Set12 or Set68')
parser.add_argument("--test_noiseL", type=float, default=25, help='noise level used on test set')
parser.add_argument("--verbose", type=bool, default=False, help='show image result')
parser.add_argument('--device_target', type=str, default='Ascend', help='Device target')
parser.add_argument('--device_id', type=int, default=0, help='Device id')
args = parser.parse_args()

set_seed(1)

def normalize(data):
    return data/255.

if __name__ == '__main__':
    context.set_context(device_id=args.device_id)
    context.set_context(mode=context.GRAPH_MODE, device_target='Ascend', save_graphs=False)
    print("======> model test <======\n")
    # load checkpoint file
    dncnn = DnCNN()
    param_dict = load_checkpoint(args.ckpt_path)
    load_param_into_net(dncnn, param_dict)
    dncnn.set_train(False)
    print("======> loading data <======\n")
    file_source = glob.glob(os.path.join(args.test_data_path, '*png'))
    file_source.sort()
    print("======> processing data <======\n")
    psnr_test = 0
    ssim_test = 0
    for f in file_source:
        print(">>>generate noisy image......\n")
        img = cv2.imread(f)
        img = normalize(np.float32(img[:, :, 0]))
        img = np.expand_dims(img, 0)
        img = np.expand_dims(img, 1)
        source = Tensor(img, dtype=mstype.float32)
        noise = np.random.standard_normal(size = source.shape)*(args.test_noiseL / 255.0)
        noise = Tensor(noise, dtype=mstype.float32)
        noisy_img = source + noise
        print(">>>generate success!\n")
        print("for each image, start predicting:\n")
        out = ops.clip_by_value(noisy_img - dncnn(noisy_img), Tensor(0.), Tensor(1.))
        if args.verbose:
            save_path = os.path.join('./', args.test_data_path.split('/')[-1] + '_L%s_result'%args.test_noiseL)
            if not os.path.exists(save_path):
                os.makedirs(save_path)
            show_image(source, noisy_img, out, os.path.join(save_path, f.split('/')[-1]))
        psnr, ssim = get_PSNR_SSIM(out, source, 1.)
        psnr_test += psnr
        ssim_test += ssim
        print('Image id: %s, PSNR: %.2f, SSIM: %.2f \n' % (f.split('/')[-1], psnr, ssim))
    print("======> the result shows below: <======\n")
    psnr_test = psnr_test/len(file_source)
    ssim_test = ssim_test/len(file_source)
    print("Dataset: %s, noise level: %d, PSNR = %3f, SSIM = %3f" %
          (args.test_data_path.split('/')[-1], args.test_noiseL, psnr_test, ssim_test))
    print("======> Test finish <======\n")