#!/usr/bin/env python
# -*- coding: utf-8 -*-

import os
import sys
import argparse
import torch
import numpy as np
import glob
from tqdm import tqdm
import time
import h5py
from pathlib import Path
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import json

# 设置matplotlib支持中文显示
import matplotlib
matplotlib.rcParams['font.family'] = ['SimHei', 'Microsoft YaHei', 'sans-serif']  # 优先使用黑体、微软雅黑
matplotlib.rcParams['axes.unicode_minus'] = False  # 解决负号显示问题
# 如果系统中没有中文字体，可以尝试使用系统默认字体
try:
    plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
except:
    print("警告: 无法设置中文字体，可能导致中文显示为乱码")

# 导入模型定义
from transformer_denoising_model import PointCloudTransformerDenoiser
from denoise_pointcloud import PointCloudDenoiser

class PointCloudEvaluator:
    def __init__(self, model_path, device='auto'):
        """
        点云评估器
        
        Args:
            model_path: 预训练模型的路径
            device: 运行设备 ('cuda', 'cpu' 或 'auto')
        """
        # 创建去噪器（内部会加载模型）
        self.denoiser = PointCloudDenoiser(model_path, device)
        self.device = self.denoiser.device
        self.model = self.denoiser.model
    
    def chamfer_distance(self, x, y):
        """
        计算Chamfer距离
        
        Args:
            x: 点云 1, 形状为 [B, N, 3]
            y: 点云 2, 形状为 [B, N, 3]
            
        Returns:
            Chamfer距离
        """
        x = x.unsqueeze(2)  # [B, N, 1, 3]
        y = y.unsqueeze(1)  # [B, 1, N, 3]
        
        # 计算欧氏距离
        dist = torch.sum((x - y) ** 2, dim=-1)  # [B, N, N]
        
        # 计算最小距离
        min_dist_xy = torch.min(dist, dim=2)[0]  # [B, N]
        min_dist_yx = torch.min(dist, dim=1)[0]  # [B, N]
        
        # 计算Chamfer距离
        chamfer_dist = torch.mean(min_dist_xy, dim=1) + torch.mean(min_dist_yx, dim=1)  # [B]
        
        return torch.mean(chamfer_dist)
    
    def earth_mover_distance(self, x, y, iterations=50):
        """
        估计Earth Mover's Distance（简化版本）
        
        Args:
            x: 点云 1, 形状为 [B, N, 3]
            y: 点云 2, 形状为 [B, N, 3]
            iterations: 匹配迭代次数
            
        Returns:
            EMD估计值
        """
        batch_size, num_points, _ = x.shape
        
        # 初始化距离矩阵
        cost_matrix = torch.cdist(x, y)  # [B, N, N]
        
        # 匹配
        indices = torch.zeros(batch_size, num_points, dtype=torch.long).to(x.device)
        for b in range(batch_size):
            # 贪心匹配（一种EMD的近似）
            indices[b] = self._greedy_match(cost_matrix[b])
        
        # 收集匹配点
        matched_y = torch.zeros_like(x)
        for b in range(batch_size):
            matched_y[b] = y[b, indices[b]]
        
        # 计算距离
        emd = torch.mean(torch.sqrt(torch.sum((x - matched_y) ** 2, dim=2)))
        
        return emd
    
    def _greedy_match(self, cost_matrix):
        """
        贪心匹配（EMD的近似）
        
        Args:
            cost_matrix: 成本矩阵 [N, N]
            
        Returns:
            匹配索引 [N]
        """
        num_points = cost_matrix.shape[0]
        
        # 可用点
        x_available = torch.ones(num_points, dtype=torch.bool).to(cost_matrix.device)
        y_available = torch.ones(num_points, dtype=torch.bool).to(cost_matrix.device)
        
        # 匹配结果
        y_indices = torch.zeros(num_points, dtype=torch.long).to(cost_matrix.device)
        
        # 贪心匹配
        for i in range(num_points):
            # 找到可用点中距离最小的配对
            masked_cost = cost_matrix.clone()
            # 将不可用点的距离设为无穷大
            masked_cost[~x_available, :] = float('inf')
            masked_cost[:, ~y_available] = float('inf')
            
            # 找到最小距离的索引
            min_idx = torch.argmin(masked_cost.view(-1))
            x_idx = min_idx // num_points
            y_idx = min_idx % num_points
            
            # 记录匹配
            y_indices[x_idx] = y_idx
            
            # 标记为已使用
            x_available[x_idx] = False
            y_available[y_idx] = False
        
        return y_indices
    
    def point_to_point_distance(self, x, y):
        """
        计算点到点距离
        
        Args:
            x: 点云 1, 形状为 [B, N, 3]
            y: 点云 2, 形状为 [B, N, 3]
            
        Returns:
            点到点距离（欧氏距离的均值）
        """
        return torch.mean(torch.sqrt(torch.sum((x - y) ** 2, dim=2)))
    
    def f_score(self, denoised, clean, threshold=0.01):
        """
        计算F-Score
        
        Args:
            denoised: 去噪后的点云 [B, N, 3]
            clean: 干净的点云 [B, N, 3]
            threshold: 判断为"接近"的阈值
            
        Returns:
            F-Score值
        """
        batch_size, num_points, _ = denoised.shape
        
        # 为每个批次计算F-Score
        f_scores = []
        
        for b in range(batch_size):
            # 计算距离矩阵
            dist_matrix = torch.cdist(denoised[b], clean[b])  # [N, N]
            
            # 计算精确度: 去噪点到clean点的最小距离小于阈值的比例
            min_dist_denoise_to_clean = torch.min(dist_matrix, dim=1)[0]  # [N]
            precision = torch.mean((min_dist_denoise_to_clean < threshold).float())
            
            # 计算召回率: clean点到去噪点的最小距离小于阈值的比例
            min_dist_clean_to_denoise = torch.min(dist_matrix, dim=0)[0]  # [N]
            recall = torch.mean((min_dist_clean_to_denoise < threshold).float())
            
            # 计算F-Score
            if precision + recall > 0:
                f_score = 2 * precision * recall / (precision + recall)
            else:
                f_score = torch.tensor(0.0).to(denoised.device)
            
            f_scores.append(f_score)
        
        return torch.mean(torch.stack(f_scores))
    
    def evaluate_dataset(self, h5_file, batch_size=32, cd_threshold=0.01):
        """
        评估模型在数据集上的性能
        
        Args:
            h5_file: 包含噪声和干净点云的H5文件
            batch_size: 批处理大小
            cd_threshold: F-Score的阈值
            
        Returns:
            评估结果字典
        """
        # 加载数据
        with h5py.File(h5_file, 'r') as f:
            clean_points = torch.tensor(f['clean_points'][:], dtype=torch.float32)
            noisy_points = torch.tensor(f['noisy_points'][:], dtype=torch.float32)
            if 'labels' in f:
                labels = torch.tensor(f['labels'][:], dtype=torch.long)
            else:
                labels = torch.zeros(len(clean_points), dtype=torch.long)
        
        # 设置模型为评估模式
        self.model.eval()
        
        # 评估指标
        total_chamfer_dist = 0.0
        total_emd = 0.0
        total_p2p_dist = 0.0
        total_f_score = 0.0
        
        # 最佳和最差样本
        best_idx = -1
        worst_idx = -1
        best_cd = float('inf')
        worst_cd = 0.0
        
        # 批量处理数据
        num_samples = len(clean_points)
        num_batches = (num_samples + batch_size - 1) // batch_size
        
        print(f"评估数据集: {h5_file}")
        print(f"样本数量: {num_samples}")
        
        with torch.no_grad():
            for i in tqdm(range(num_batches), desc="评估中"):
                # 获取批次数据
                start_idx = i * batch_size
                end_idx = min((i + 1) * batch_size, num_samples)
                
                batch_noisy = noisy_points[start_idx:end_idx].to(self.device)
                batch_clean = clean_points[start_idx:end_idx].to(self.device)
                
                # 去噪
                start_time = time.time()
                batch_denoised = self.model(batch_noisy)
                end_time = time.time()
                
                # 计算指标
                batch_cd = self.chamfer_distance(batch_denoised, batch_clean).item()
                batch_emd = self.earth_mover_distance(batch_denoised, batch_clean).item()
                batch_p2p = self.point_to_point_distance(batch_denoised, batch_clean).item()
                batch_f_score = self.f_score(batch_denoised, batch_clean, threshold=cd_threshold).item()
                
                # 更新总指标
                total_chamfer_dist += batch_cd * (end_idx - start_idx)
                total_emd += batch_emd * (end_idx - start_idx)
                total_p2p_dist += batch_p2p * (end_idx - start_idx)
                total_f_score += batch_f_score * (end_idx - start_idx)
                
                # 查找每个样本的CD分数
                for j in range(end_idx - start_idx):
                    idx = start_idx + j
                    
                    sample_denoised = batch_denoised[j].unsqueeze(0)
                    sample_clean = batch_clean[j].unsqueeze(0)
                    
                    sample_cd = self.chamfer_distance(sample_denoised, sample_clean).item()
                    
                    # 更新最佳和最差样本
                    if sample_cd < best_cd:
                        best_cd = sample_cd
                        best_idx = idx
                    if sample_cd > worst_cd:
                        worst_cd = sample_cd
                        worst_idx = idx
        
        # 计算平均指标
        avg_chamfer_dist = total_chamfer_dist / num_samples
        avg_emd = total_emd / num_samples
        avg_p2p_dist = total_p2p_dist / num_samples
        avg_f_score = total_f_score / num_samples
        
        # 组织评估结果
        results = {
            "chamfer_distance": avg_chamfer_dist,
            "earth_movers_distance": avg_emd,
            "point_to_point_distance": avg_p2p_dist,
            "f_score": avg_f_score,
            "best_sample_idx": int(best_idx),
            "best_sample_cd": float(best_cd),
            "worst_sample_idx": int(worst_idx),
            "worst_sample_cd": float(worst_cd),
            "num_samples": num_samples,
            "model_path": self.denoiser.model._get_name(),
        }
        
        # 打印评估结果
        print("\n评估结果:")
        print(f"Chamfer Distance: {avg_chamfer_dist:.6f}")
        print(f"Earth Mover's Distance: {avg_emd:.6f}")
        print(f"Point-to-Point Distance: {avg_p2p_dist:.6f}")
        print(f"F-Score (threshold={cd_threshold}): {avg_f_score:.6f}")
        print(f"最佳样本: {best_idx}, CD: {best_cd:.6f}")
        print(f"最差样本: {worst_idx}, CD: {worst_cd:.6f}")
        
        return results, best_idx, worst_idx, clean_points, noisy_points
    
    def visualize_sample(self, clean_points, noisy_points, sample_idx, is_best=True, save_path=None):
        """
        可视化特定样本
        
        Args:
            clean_points: 所有干净点云
            noisy_points: 所有噪声点云
            sample_idx: 要可视化的样本索引
            is_best: 是否为最佳样本
            save_path: 保存路径
        """
        # 获取样本点云
        clean = clean_points[sample_idx].to(self.device)
        noisy = noisy_points[sample_idx].to(self.device)
        
        # 去噪
        with torch.no_grad():
            denoised = self.model(noisy.unsqueeze(0))[0]
        
        # 转换为numpy数组
        clean_np = clean.cpu().numpy()
        noisy_np = noisy.cpu().numpy()
        denoised_np = denoised.cpu().numpy()
        
        # 创建图形
        fig = plt.figure(figsize=(15, 5))
        
        # 噪声点云
        ax1 = fig.add_subplot(131, projection='3d')
        ax1.scatter(noisy_np[:, 0], noisy_np[:, 1], noisy_np[:, 2], c='red', s=1)
        ax1.set_title('噪声点云')
        
        # 去噪后点云
        ax2 = fig.add_subplot(132, projection='3d')
        ax2.scatter(denoised_np[:, 0], denoised_np[:, 1], denoised_np[:, 2], c='green', s=1)
        ax2.set_title('去噪点云')
        
        # 干净点云
        ax3 = fig.add_subplot(133, projection='3d')
        ax3.scatter(clean_np[:, 0], clean_np[:, 1], clean_np[:, 2], c='blue', s=1)
        ax3.set_title('干净点云')
        
        # 设置标题
        type_str = "最佳" if is_best else "最差"
        fig.suptitle(f"{type_str}样本 (索引: {sample_idx})")
        
        plt.tight_layout()
        
        # 保存图片
        if save_path:
            plt.savefig(save_path, dpi=200, bbox_inches='tight')
            print(f"图像已保存至: {save_path}")
        
        # 显示图片
        plt.show()


def main():
    parser = argparse.ArgumentParser(description="点云Transformer去噪模型评估")
    
    # 基本参数
    parser.add_argument('--model_path', type=str, required=True,
                        help='预训练模型路径')
    parser.add_argument('--device', type=str, default='auto',
                        choices=['auto', 'cuda', 'cpu'],
                        help='运行设备 (auto/cuda/cpu)')
    
    # 评估参数
    parser.add_argument('--noise_type', type=str, default='gaussian',
                       help='噪声类型: gaussian, outlier, mixed')
    parser.add_argument('--noise_level', type=str, default='0.05',
                       help='噪声级别')
    parser.add_argument('--test_data', type=str, default=None,
                       help='测试数据集路径 (h5文件)')
    parser.add_argument('--data_dir', type=str, default='./data/modelnet_noisy',
                       help='数据目录')
    parser.add_argument('--batch_size', type=int, default=32,
                       help='评估批次大小')
    parser.add_argument('--cd_threshold', type=float, default=0.01,
                       help='F-Score阈值')
    
    # 结果参数
    parser.add_argument('--save_results', action='store_true', default=True,
                       help='保存评估结果')
    parser.add_argument('--results_dir', type=str, default=None,
                       help='评估结果保存目录')
    parser.add_argument('--vis_best', action='store_true',
                       help='可视化表现最好的样本')
    parser.add_argument('--vis_worst', action='store_true',
                       help='可视化表现最差的样本')
    parser.add_argument('--num_vis_samples', type=int, default=5,
                       help='可视化样本数量')
    
    args = parser.parse_args()
    
    # 确定测试数据路径
    if args.test_data is None:
        # 基于噪声类型和级别构建默认测试数据路径
        test_data_path = os.path.join(args.data_dir, f"{args.noise_type}_{args.noise_level}", "test_denoising.h5")
    else:
        test_data_path = args.test_data
    
    # 确定结果保存目录
    if args.results_dir is None:
        results_dir = os.path.join('./results/evaluation', f"{args.noise_type}_{args.noise_level}")
    else:
        results_dir = args.results_dir
    
    os.makedirs(results_dir, exist_ok=True)
    
    # 创建评估器
    evaluator = PointCloudEvaluator(args.model_path, args.device)
    
    # 评估数据集
    results, best_idx, worst_idx, clean_points, noisy_points = evaluator.evaluate_dataset(
        test_data_path,
        batch_size=args.batch_size,
        cd_threshold=args.cd_threshold
    )
    
    # 保存评估结果
    if args.save_results:
        # 保存指标
        results_file = os.path.join(results_dir, 'evaluation_results.json')
        with open(results_file, 'w') as f:
            json.dump(results, f, indent=2)
        print(f"评估结果已保存至: {results_file}")
        
        # 保存为txt格式方便阅读
        txt_results_file = os.path.join(results_dir, 'evaluation_results.txt')
        with open(txt_results_file, 'w', encoding='utf-8') as f:
            f.write("点云去噪模型评估结果\n")
            f.write("=" * 50 + "\n\n")
            f.write(f"模型路径: {args.model_path}\n")
            f.write(f"噪声类型: {args.noise_type}\n")
            f.write(f"噪声级别: {args.noise_level}\n")
            f.write(f"测试数据: {test_data_path}\n")
            f.write(f"样本数量: {results['num_samples']}\n\n")
            
            f.write("评估指标:\n")
            f.write(f"Chamfer Distance: {results['chamfer_distance']:.6f}\n")
            f.write(f"Earth Mover's Distance: {results['earth_movers_distance']:.6f}\n")
            f.write(f"Point-to-Point Distance: {results['point_to_point_distance']:.6f}\n")
            f.write(f"F-Score (threshold={args.cd_threshold}): {results['f_score']:.6f}\n\n")
            
            f.write("样本信息:\n")
            f.write(f"最佳样本索引: {results['best_sample_idx']}, Chamfer Distance: {results['best_sample_cd']:.6f}\n")
            f.write(f"最差样本索引: {results['worst_sample_idx']}, Chamfer Distance: {results['worst_sample_cd']:.6f}\n\n")
            
            f.write("评估时间: " + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + "\n")
        
        print(f"评估结果文本版已保存至: {txt_results_file}")
    
    # 可视化最佳样本
    if args.vis_best:
        vis_dir = os.path.join(results_dir, 'visualization')
        os.makedirs(vis_dir, exist_ok=True)
        
        evaluator.visualize_sample(
            clean_points,
            noisy_points,
            best_idx,
            is_best=True,
            save_path=os.path.join(vis_dir, 'best_sample.png')
        )
    
    # 可视化最差样本
    if args.vis_worst:
        vis_dir = os.path.join(results_dir, 'visualization')
        os.makedirs(vis_dir, exist_ok=True)
        
        evaluator.visualize_sample(
            clean_points,
            noisy_points,
            worst_idx,
            is_best=False,
            save_path=os.path.join(vis_dir, 'worst_sample.png')
        )
    
    print("评估完成!")


if __name__ == "__main__":
    main() 