"""
Testing scripts (modified for PointMamba-DFNet).

Authors: Hongjie Fang, modified by <your name>.
"""

import os
import yaml
import torch
import logging
import warnings
import argparse
import numpy as np
import torch.nn as nn
from tqdm import tqdm
from utils.logger import ColoredLogger
from utils.builder import ConfigBuilder
from utils.functions import to_device
from time import perf_counter

# ---------------- Logging ----------------
logging.setLoggerClass(ColoredLogger)
logger = logging.getLogger(__name__)
warnings.simplefilter("ignore", UserWarning)

# ---------------- Args ----------------
parser = argparse.ArgumentParser()
parser.add_argument(
    '--cfg', '-c',
    default=os.path.join('configs', 'default.yaml'),
    help='path to the configuration file',
    type=str
)
parser.add_argument(
    '--checkpoint', '-ckpt',
    default=None,
    help='(optional) manually specify checkpoint path',
    type=str
)
args = parser.parse_args()
cfg_filename = args.cfg

# ---------------- Load Config ----------------
with open(cfg_filename, 'r') as cfg_file:
    cfg_params = yaml.load(cfg_file, Loader=yaml.FullLoader)

builder = ConfigBuilder(**cfg_params)

# ---------------- Build Model ----------------
logger.info('Building models ...')
model = builder.get_model()
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
model.to(device)

# ---------------- Build Dataloader ----------------
logger.info('Building dataloaders ...')
test_dataloader = builder.get_dataloader(split='test')

# ---------------- Load Checkpoint ----------------
logger.info('Checking checkpoints ...')
stats_dir = builder.get_stats_dir()

# 优先使用命令行参数 --checkpoint 指定的路径
checkpoint_file = args.checkpoint if args.checkpoint else os.path.join(stats_dir, 'checkpoint.tar')

if os.path.isfile(checkpoint_file):
    logger.info(f"Loading checkpoint: {checkpoint_file}")
    checkpoint = torch.load(checkpoint_file, map_location=device)

    try:
        model.load_state_dict(checkpoint['model_state_dict'], strict=True)
        logger.info(f"✅ Checkpoint successfully loaded (epoch {checkpoint['epoch']}).")
    except RuntimeError as e:
        logger.warning("⚠️ Strict loading failed, attempting non-strict load (some layers may mismatch).")
        model.load_state_dict(checkpoint['model_state_dict'], strict=False)
        logger.warning(str(e))
else:
    raise FileNotFoundError(f'❌ No checkpoint found at {checkpoint_file}')

# ---------------- Metrics ----------------
metrics = builder.get_metrics()

# ---------------- Test Function ----------------
def test():
    logger.info('Start testing process.')
    model.eval()
    metrics.clear()
    running_time = []
    total_images = 0  # 新增：统计总图片数

    with tqdm(test_dataloader) as pbar:
        for data_dict in pbar:
            batch_size = data_dict['rgb'].size(0)  # 获取当前批次大小
            total_images += batch_size  # 累加图片数量

            data_dict = to_device(data_dict, device)
            with torch.no_grad():
                time_start = perf_counter()
                res = model(data_dict['rgb'], data_dict['depth'])
                time_end = perf_counter()

                depth_scale = data_dict['depth_max'] - data_dict['depth_min']
                res = res * depth_scale.reshape(-1, 1, 1) + data_dict['depth_min'].reshape(-1, 1, 1)
                data_dict['pred'] = res
                _ = metrics.evaluate_batch(data_dict, record=True)

            duration = time_end - time_start
            pbar.set_description('Batch Time: {:.4f}s'.format(duration))
            running_time.append(duration)

    # 计算各种时间指标
    total_time = np.sum(running_time)
    avg_batch_time = np.mean(running_time)
    avg_time_per_image = total_time / total_images  # 单张图片平均时间
    fps = total_images / total_time  # FPS (Frames Per Second)

    # 详细输出
    logger.info('=' * 50)
    logger.info('推理时间统计:')
    logger.info(f'总测试图片数: {total_images}')
    logger.info(f'总推理时间: {total_time:.4f}s')
    logger.info(f'平均每批次时间: {avg_batch_time:.4f}s')
    logger.info(f'平均每张图片时间: {avg_time_per_image:.4f}s')
    logger.info(f'推理速度: {fps:.2f} FPS')
    logger.info('=' * 50)

    metrics_result = metrics.get_results()
    metrics.display_results()
    return metrics_result

# ---------------- Run ----------------
if __name__ == '__main__':
    test()


# """
# Testing scripts.
#
# Authors: Hongjie Fang.
# """
# import os
# import yaml
# import torch
# import logging
# import warnings
# import argparse
# import numpy as np
# import torch.nn as nn
# from tqdm import tqdm
# from utils.logger import ColoredLogger
# from utils.builder import ConfigBuilder
# from utils.functions import to_device
# from time import perf_counter
#
#
# logging.setLoggerClass(ColoredLogger)
# logger = logging.getLogger(__name__)
# warnings.simplefilter("ignore", UserWarning)
#
# parser = argparse.ArgumentParser()
# parser.add_argument(
#     '--cfg', '-c',
#     default = os.path.join('configs', 'default.yaml'),
#     help = 'path to the configuration file',
#     type = str
# )
# args = parser.parse_args();
# cfg_filename = args.cfg
#
# with open(cfg_filename, 'r') as cfg_file:
#     cfg_params = yaml.load(cfg_file, Loader = yaml.FullLoader)
#
# builder = ConfigBuilder(**cfg_params)
#
# logger.info('Building models ...')
#
# model = builder.get_model()
#
# device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# model.to(device)
#
# logger.info('Building dataloaders ...')
# test_dataloader = builder.get_dataloader(split = 'test')
#
# logger.info('Checking checkpoints ...')
# stats_dir = builder.get_stats_dir()
# checkpoint_file = os.path.join(stats_dir, 'checkpoint.tar')
# if os.path.isfile(checkpoint_file):
#     checkpoint = torch.load(checkpoint_file,weights_only=False)
#     model.load_state_dict(checkpoint['model_state_dict'])
#     start_epoch = checkpoint['epoch']
#     logger.info("Checkpoint {} (epoch {}) loaded.".format(checkpoint_file, start_epoch))
# else:
#     raise FileNotFoundError('No checkpoint.')
#
# metrics = builder.get_metrics()
#
#
#
# def test():
#     logger.info('Start testing process.')
#     model.eval()
#     metrics.clear()
#     running_time = []
#     with tqdm(test_dataloader) as pbar:
#         for data_dict in pbar:
#             data_dict = to_device(data_dict, device)
#             with torch.no_grad():
#                 time_start = perf_counter()
#                 res = model(data_dict['rgb'], data_dict['depth'])
#                 time_end = perf_counter()
#                 depth_scale = data_dict['depth_max'] - data_dict['depth_min']
#                 res = res * depth_scale.reshape(-1, 1, 1) + data_dict['depth_min'].reshape(-1, 1, 1)
#                 data_dict['pred'] = res
#                 _ = metrics.evaluate_batch(data_dict, record = True)
#             duration = time_end - time_start
#             pbar.set_description('Time: {:.4f}s'.format(duration))
#             running_time.append(duration)
#     avg_running_time = np.stack(running_time).mean()
#     logger.info('Finish testing process, average running time: {:.4f}s'.format(avg_running_time))
#     metrics_result = metrics.get_results()
#     metrics.display_results()
#     return metrics_result
#
#
# if __name__ == '__main__':
#     test()
#
