"""
(Testing FPS)
Pixel Difference Networks for Efficient Edge Detection (accepted as an ICCV 2021 oral)
See paper in https://arxiv.org/abs/2108.07009

Author: Zhuo Su, Wenzhe Liu
Date: Aug 22, 2020
"""

from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division

import argparse
import os
import time
from PIL import Image
import models
from utils import *
from edge_dataloader import Custom_Loader
from torch.utils.data import DataLoader

import torch
import torch_npu
def check_npu():
    if not torch_npu.npu.is_available():
        print("NPU is not available")
        return False
    return True
def get_args():
    parser = argparse.ArgumentParser(description='PyTorch Diff Convolutional Networks (Train)')

    parser.add_argument('--datadir', type=str, default='./data', 
            help='dir to the dataset')
    parser.add_argument('--tardir', type=str, default='./experiments/results',
            help='data directory for saving the model results')

    parser.add_argument('--model', type=str, default='baseline', 
            help='model to train the dataset')
    parser.add_argument('--sa', action='store_true', 
            help='use attention in diffnet')
    parser.add_argument('--dil', action='store_true', 
            help='use dilation in diffnet')
    parser.add_argument('--config', type=str, default='nas-all', 
            help='model configurations, please refer to models/config.py for possible configurations')
    parser.add_argument('--seed', type=int, default=None, 
            help='random seed (default: None)')
    parser.add_argument('--device', type=str, default='npu')
    
    args = parser.parse_args()
    return args
 


def visual_sketion(result, save_path, img_name):
    """
    可视化模型预测结果并保存。

    Args:
        result (torch.Tensor): 模型的预测结果，形状为 (1, 1, H, W)。
        save_path (str): 保存预测结果的目录。
        img_name (str): 原始图像的文件名，用于生成保存的文件名。

    Returns:
        None
    """
    # 确保保存目录存在
    os.makedirs(save_path, exist_ok=True)

    # 将预测结果转换为 NumPy 数组，形状为 (H, W)
    result = result.squeeze().cpu().numpy()

    # 将预测结果归一化到 [0, 255] 区间，便于可视化
    result_normalized = (result - result.min()) / (result.max() - result.min()) * 255
    result_normalized = result_normalized.astype(np.uint8)

    # 使用 PIL 保存预测结果
    save_file = os.path.join(save_path, f"{img_name}.png")
    Image.fromarray(result_normalized).save(save_file)

    # 可选：打印保存路径
    print(f"结果已保存到: {save_file}")
def test(test_loader, model, args):

    model.eval()

    end = time.perf_counter()
    for idx, (image, img_name) in enumerate(test_loader):
        with torch.no_grad():
            image = image.to(args.device)
            _, _, H, W = image.shape
            results = model(image)
            
            # 可视化并保存结果
            visual_sketion(results[0], args.tardir, img_name[0])
    end = time.perf_counter() - end
    print('fps: %f' % (len(test_loader) / end))


if __name__ == '__main__':
    args=get_args()
    
    ### Refine args
    if args.seed is None:
        args.seed = int(time.time())
    torch.manual_seed(args.seed)
    os.makedirs(args.tardir, exist_ok=True)

    ### Create model
    model = getattr(models, args.model)(args)

    ### Transfer to cuda devices
    if args.device=='cpu':
        device = torch.device('cpu')
    else:
        if check_npu():
            device = torch.device('npu')
        else:
            raise ValueError("NPU is not available.")   
        
        if args.model == 'hed':
            model.weight_deconv2 = model.weight_deconv2.to(device)
            model.weight_deconv3 = model.weight_deconv3.to(device)
            model.weight_deconv4 = model.weight_deconv4.to(device)
            model.weight_deconv5 = model.weight_deconv5.to(device)
        # model = torch.nn.parallel.DistributedDataParallel(model).to(device)
        model = model.to(device)
        print(f"Using {args.device} device")

    ### Load Data
    test_dataset = Custom_Loader(args.datadir)
    
    test_loader = DataLoader(
        test_dataset, batch_size=1, num_workers=1, shuffle=False)

    test(test_loader, model, args)