#!/usr/bin/env python
# -*- coding: utf-8 -*-

import os
import sys
import argparse
import torch
import numpy as np
import glob
from tqdm import tqdm
import time
from pathlib import Path
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from pyntcloud import PyntCloud

# 导入模型定义
from transformer_denoising_model import PointCloudTransformerDenoiser

class PointCloudDenoiser:
    def __init__(self, model_path, device='auto'):
        """
        点云去噪器
        
        Args:
            model_path: 预训练模型的路径
            device: 运行设备 ('cuda', 'cpu' 或 'auto')
        """
        # 设置设备
        if device == 'auto':
            self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        else:
            self.device = torch.device(device)
        
        print(f"使用设备: {self.device}")
        
        # 加载模型
        self.model = self._load_model(model_path)
        
        # 设置模型为评估模式
        self.model.eval()
    
    def _load_model(self, model_path):
        """
        加载预训练模型
        
        Args:
            model_path: 模型路径
            
        Returns:
            加载好的模型
        """
        # 检查模型文件是否存在
        if not os.path.exists(model_path):
            raise FileNotFoundError(f"模型文件 {model_path} 不存在")
        
        print(f"加载模型: {model_path}")
        
        # 创建模型实例
        model = PointCloudTransformerDenoiser(
            point_dim=3,
            embed_dim=128,
            depth=4,
            heads=4,
            mlp_dim=256,
            dropout=0.1
        ).to(self.device)
        
        # 加载模型参数
        checkpoint = torch.load(model_path, map_location=self.device)
        model.load_state_dict(checkpoint['model_state_dict'])
        
        print(f"模型加载成功! (验证损失: {checkpoint.get('best_val_loss', 'N/A')})")
        return model
    
    def _load_point_cloud(self, file_path):
        """
        加载点云文件
        
        Args:
            file_path: 点云文件路径
            
        Returns:
            点云数据 (numpy数组)
        """
        try:
            # 检查文件是否存在
            if not os.path.exists(file_path):
                raise FileNotFoundError(f"文件 {file_path} 不存在")
            
            # 使用PyntCloud加载点云
            cloud = PyntCloud.from_file(file_path)
            points = cloud.points[['x', 'y', 'z']].values
            
            return points, cloud
        except Exception as e:
            print(f"加载点云文件 {file_path} 时出错: {e}")
            return None, None
    
    def _save_point_cloud(self, file_path, points, original_cloud=None):
        """
        保存点云到文件
        
        Args:
            file_path: 输出文件路径
            points: 点云数据 (numpy数组)
            original_cloud: 原始点云对象 (用于保留除xyz之外的其他属性)
        """
        try:
            # 确保目录存在
            os.makedirs(os.path.dirname(file_path), exist_ok=True)
            
            # 创建DataFrame
            df = pd.DataFrame(data=points, columns=['x', 'y', 'z'])
            
            # 如果有原始点云，复制其他属性
            if original_cloud is not None:
                for col in original_cloud.points.columns:
                    if col not in ['x', 'y', 'z'] and len(original_cloud.points) == len(points):
                        df[col] = original_cloud.points[col].values
            
            # 创建新的点云对象
            cloud = PyntCloud(df)
            
            # 保存到文件
            cloud.to_file(file_path)
            print(f"点云已保存至: {file_path}")
        except Exception as e:
            print(f"保存点云文件 {file_path} 时出错: {e}")
    
    def _preprocess_points(self, points):
        """
        预处理点云数据
        
        Args:
            points: 原始点云数据 (numpy数组)
            
        Returns:
            预处理后的点云张量
        """
        # 归一化点云
        center = np.mean(points, axis=0)
        points = points - center
        scale = np.max(np.linalg.norm(points, axis=1))
        normalized_points = points / scale
        
        # 确保点的数量是2048
        target_num_points = 2048
        
        if len(normalized_points) > target_num_points:
            # 随机采样
            indices = np.random.choice(len(normalized_points), target_num_points, replace=False)
            sampled_points = normalized_points[indices]
        elif len(normalized_points) < target_num_points:
            # 重复点以达到目标数量
            repeat_factor = int(np.ceil(target_num_points / len(normalized_points)))
            repeated_points = np.repeat(normalized_points, repeat_factor, axis=0)
            sampled_points = repeated_points[:target_num_points]
        else:
            sampled_points = normalized_points
        
        # 转换为tensor
        return torch.tensor(sampled_points, dtype=torch.float32), center, scale
    
    def _postprocess_points(self, denoised_points, center, scale, original_points_shape):
        """
        后处理去噪后的点云
        
        Args:
            denoised_points: 去噪后的点云张量
            center: 原始点云的中心
            scale: 原始点云的缩放因子
            original_points_shape: 原始点云的形状
            
        Returns:
            后处理的点云 (numpy数组)
        """
        # 转换为numpy数组
        points = denoised_points.cpu().numpy()
        
        # 反归一化
        points = points * scale + center
        
        # 如果原始点云和去噪后的点云大小不同，需要采样或填充
        if len(points) != original_points_shape[0]:
            if len(points) > original_points_shape[0]:
                # 随机采样
                indices = np.random.choice(len(points), original_points_shape[0], replace=False)
                points = points[indices]
            else:
                # 填充（重复点）
                repeat_factor = int(np.ceil(original_points_shape[0] / len(points)))
                repeated_points = np.repeat(points, repeat_factor, axis=0)
                points = repeated_points[:original_points_shape[0]]
        
        return points
    
    def denoise_file(self, input_file, output_file):
        """
        去噪单个点云文件
        
        Args:
            input_file: 输入点云文件路径
            output_file: 输出点云文件路径
            
        Returns:
            是否成功去噪
        """
        # 加载点云
        points, original_cloud = self._load_point_cloud(input_file)
        if points is None:
            return False
        
        original_shape = points.shape
        
        # 预处理点云
        normalized_points, center, scale = self._preprocess_points(points)
        
        # 添加批次维度
        batch_points = normalized_points.unsqueeze(0).to(self.device)
        
        # 去噪
        with torch.no_grad():
            start_time = time.time()
            denoised_batch_points = self.model(batch_points)
            end_time = time.time()
        
        print(f"去噪用时: {end_time - start_time:.4f} 秒")
        
        # 后处理
        denoised_points = self._postprocess_points(
            denoised_batch_points[0], 
            center, 
            scale, 
            original_shape
        )
        
        # 保存结果
        self._save_point_cloud(output_file, denoised_points, original_cloud)
        
        return True
    
    def denoise_directory(self, input_dir, output_dir, batch_size=1):
        """
        批量去噪点云文件
        
        Args:
            input_dir: 输入点云目录
            output_dir: 输出点云目录
            batch_size: 批处理大小
            
        Returns:
            成功处理的文件数量
        """
        # 确保输出目录存在
        os.makedirs(output_dir, exist_ok=True)
        
        # 获取所有点云文件
        point_cloud_files = []
        for ext in ['*.ply', '*.pcd', '*.xyz', '*.off', '*.obj']:
            point_cloud_files.extend(glob.glob(os.path.join(input_dir, ext)))
        
        if not point_cloud_files:
            print(f"在 {input_dir} 中未找到点云文件")
            return 0
        
        print(f"找到 {len(point_cloud_files)} 个点云文件，开始批量去噪...")
        
        # 批量处理
        success_count = 0
        for i, input_file in enumerate(tqdm(point_cloud_files)):
            # 构建输出文件路径
            rel_path = os.path.relpath(input_file, input_dir)
            output_file = os.path.join(output_dir, rel_path)
            
            # 去噪
            if self.denoise_file(input_file, output_file):
                success_count += 1
            
            # 显示进度
            if (i + 1) % 10 == 0 or (i + 1) == len(point_cloud_files):
                print(f"已处理: {i+1}/{len(point_cloud_files)} 文件")
        
        print(f"批量去噪完成！成功处理 {success_count}/{len(point_cloud_files)} 个文件")
        return success_count
    
    def visualize_comparison(self, input_file, output_file, title=None, save_path=None, point_size=1.0):
        """
        可视化去噪前后的对比
        
        Args:
            input_file: 输入点云文件路径
            output_file: 输出点云文件路径
            title: 可视化标题
            save_path: 保存图像的路径
            point_size: 点的大小
        """
        # 加载点云
        input_points, _ = self._load_point_cloud(input_file)
        output_points, _ = self._load_point_cloud(output_file)
        
        if input_points is None or output_points is None:
            print("无法加载点云进行可视化")
            return
        
        # 创建图形
        fig = plt.figure(figsize=(15, 7))
        
        # 原始点云
        ax1 = fig.add_subplot(121, projection='3d')
        ax1.scatter(input_points[:, 0], input_points[:, 1], input_points[:, 2], 
                    c=input_points[:, 2], cmap='viridis', s=point_size)
        ax1.set_title('原始噪声点云')
        ax1.set_xlabel('X')
        ax1.set_ylabel('Y')
        ax1.set_zlabel('Z')
        
        # 去噪后点云
        ax2 = fig.add_subplot(122, projection='3d')
        ax2.scatter(output_points[:, 0], output_points[:, 1], output_points[:, 2], 
                    c=output_points[:, 2], cmap='plasma', s=point_size)
        ax2.set_title('去噪后点云')
        ax2.set_xlabel('X')
        ax2.set_ylabel('Y')
        ax2.set_zlabel('Z')
        
        # 设置整体标题
        if title:
            fig.suptitle(title)
        else:
            fig.suptitle(f'点云去噪对比 - {os.path.basename(input_file)}')
        
        plt.tight_layout()
        
        # 保存图片
        if save_path:
            plt.savefig(save_path, dpi=200, bbox_inches='tight')
            print(f"对比图已保存至：{save_path}")
        
        # 显示图片
        plt.show()


def main():
    parser = argparse.ArgumentParser(description="点云Transformer去噪工具")
    
    # 基本参数
    parser.add_argument('--model_path', type=str, required=True,
                        help='预训练模型路径')
    parser.add_argument('--device', type=str, default='auto',
                        choices=['auto', 'cuda', 'cpu'],
                        help='运行设备 (auto/cuda/cpu)')
    
    # 文件模式参数
    parser.add_argument('--input_file', type=str, default=None,
                        help='输入点云文件路径 (单文件模式)')
    parser.add_argument('--output_file', type=str, default=None,
                        help='输出去噪点云文件路径 (单文件模式)')
    
    # 目录模式参数
    parser.add_argument('--input_dir', type=str, default=None,
                        help='输入点云目录 (批处理模式)')
    parser.add_argument('--output_dir', type=str, default=None,
                        help='输出去噪点云目录 (批处理模式)')
    parser.add_argument('--batch_size', type=int, default=1,
                        help='批处理大小')
    
    # 可视化参数
    parser.add_argument('--visualize', action='store_true',
                        help='可视化去噪结果')
    parser.add_argument('--save_comparison', action='store_true',
                        help='保存对比可视化')
    parser.add_argument('--point_size', type=float, default=1.0,
                        help='可视化时的点大小')
    
    args = parser.parse_args()
    
    # 检查参数有效性
    single_file_mode = args.input_file is not None
    batch_mode = args.input_dir is not None
    
    if not single_file_mode and not batch_mode:
        parser.error("请指定 --input_file 或 --input_dir")
    
    if single_file_mode and args.output_file is None:
        # 如果没有指定输出文件，默认在输入文件相同目录下创建denoised_前缀的文件
        input_path = Path(args.input_file)
        args.output_file = str(input_path.parent / f"denoised_{input_path.name}")
    
    if batch_mode and args.output_dir is None:
        # 如果没有指定输出目录，默认在输入目录创建denoised_output子目录
        args.output_dir = os.path.join(args.input_dir, "denoised_output")
    
    # 创建点云去噪器
    denoiser = PointCloudDenoiser(args.model_path, args.device)
    
    # 单文件模式
    if single_file_mode:
        print(f"处理单个点云文件: {args.input_file}")
        success = denoiser.denoise_file(args.input_file, args.output_file)
        
        if success and (args.visualize or args.save_comparison):
            save_path = None
            if args.save_comparison:
                vis_dir = os.path.join(os.path.dirname(args.output_file), "visualization")
                os.makedirs(vis_dir, exist_ok=True)
                base_name = os.path.splitext(os.path.basename(args.input_file))[0]
                save_path = os.path.join(vis_dir, f"{base_name}_comparison.png")
            
            denoiser.visualize_comparison(
                args.input_file, 
                args.output_file, 
                save_path=save_path,
                point_size=args.point_size
            )
    
    # 批处理模式
    elif batch_mode:
        print(f"批量处理点云文件: {args.input_dir} -> {args.output_dir}")
        success_count = denoiser.denoise_directory(
            args.input_dir, 
            args.output_dir, 
            args.batch_size
        )
        
        if success_count > 0 and args.save_comparison:
            # 为前5个成功处理的文件生成对比图
            vis_dir = os.path.join(args.output_dir, "visualization")
            os.makedirs(vis_dir, exist_ok=True)
            
            processed_files = []
            for ext in ['*.ply', '*.pcd', '*.xyz']:
                processed_files.extend(glob.glob(os.path.join(args.output_dir, ext)))
            
            for i, output_file in enumerate(processed_files[:5]):
                if os.path.exists(output_file):
                    rel_path = os.path.relpath(output_file, args.output_dir)
                    input_file = os.path.join(args.input_dir, rel_path)
                    
                    if os.path.exists(input_file):
                        base_name = os.path.splitext(os.path.basename(input_file))[0]
                        save_path = os.path.join(vis_dir, f"{base_name}_comparison.png")
                        
                        denoiser.visualize_comparison(
                            input_file, 
                            output_file, 
                            save_path=save_path,
                            point_size=args.point_size
                        )
    
    print("处理完成!")


if __name__ == "__main__":
    main() 