#!/usr/bin/env python
# -*- coding: utf-8 -*-

import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import h5py
from tqdm import tqdm
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import argparse
from torch.utils.data import Dataset, DataLoader
import time
import datetime

# 设置随机种子，确保结果可复现
torch.manual_seed(42)
np.random.seed(42)

# 点云去噪数据集类
class PointCloudDenoisingDataset(Dataset):
    def __init__(self, h5_file):
        """
        加载点云去噪数据集
        
        Args:
            h5_file: H5文件路径，包含clean_points和noisy_points
        """
        self.h5_file = h5_file
        
        # 加载数据
        with h5py.File(h5_file, 'r') as f:
            self.clean_points = torch.tensor(f['clean_points'][:], dtype=torch.float32)
            self.noisy_points = torch.tensor(f['noisy_points'][:], dtype=torch.float32)
            self.labels = torch.tensor(f['labels'][:], dtype=torch.long)
        
        print(f"加载数据集: {h5_file}")
        print(f"  - 样本数量: {len(self.clean_points)}")
        print(f"  - 点云形状: {self.clean_points.shape}")
    
    def __len__(self):
        return len(self.clean_points)
    
    def __getitem__(self, idx):
        clean = self.clean_points[idx]
        noisy = self.noisy_points[idx]
        label = self.labels[idx]
        
        return {
            'clean': clean,           # 干净的点云
            'noisy': noisy,           # 带噪声的点云
            'label': label            # 类别标签
        }

# 自注意力模块
class SelfAttention(nn.Module):
    def __init__(self, dim, heads=4, dim_head=64, dropout=0.):
        """
        自注意力模块
        
        Args:
            dim: 输入维度
            heads: 注意力头数
            dim_head: 每个头的维度
            dropout: Dropout比率
        """
        super().__init__()
        inner_dim = dim_head * heads
        project_out = not (heads == 1 and dim_head == dim)
        
        self.heads = heads
        self.scale = dim_head ** -0.5
        
        self.attend = nn.Softmax(dim=-1)
        self.to_qkv = nn.Linear(dim, inner_dim * 3, bias=False)
        
        self.to_out = nn.Sequential(
            nn.Linear(inner_dim, dim),
            nn.Dropout(dropout)
        ) if project_out else nn.Identity()
    
    def forward(self, x):
        b, n, c = x.shape
        qkv = self.to_qkv(x).chunk(3, dim=-1)
        q, k, v = map(lambda t: t.reshape(b, n, self.heads, -1).transpose(1, 2), qkv)
        
        dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale
        attn = self.attend(dots)
        
        out = torch.matmul(attn, v)
        out = out.transpose(1, 2).reshape(b, n, -1)
        
        return self.to_out(out)

# Transformer编码器层
class TransformerEncoderLayer(nn.Module):
    def __init__(self, dim, heads=4, dim_head=64, mlp_dim=256, dropout=0.):
        """
        Transformer编码器层
        
        Args:
            dim: 输入维度
            heads: 注意力头数
            dim_head: 每个头的维度
            mlp_dim: MLP隐藏层维度
            dropout: Dropout比率
        """
        super().__init__()
        
        self.attention = SelfAttention(dim, heads, dim_head, dropout)
        self.norm1 = nn.LayerNorm(dim)
        
        self.mlp = nn.Sequential(
            nn.Linear(dim, mlp_dim),
            nn.GELU(),
            nn.Dropout(dropout),
            nn.Linear(mlp_dim, dim),
            nn.Dropout(dropout)
        )
        self.norm2 = nn.LayerNorm(dim)
    
    def forward(self, x):
        # 自注意力 + 残差连接
        x = x + self.attention(self.norm1(x))
        # MLP + 残差连接
        x = x + self.mlp(self.norm2(x))
        return x

# 点云Transformer去噪模型
class PointCloudTransformerDenoiser(nn.Module):
    def __init__(self, point_dim=3, embed_dim=128, depth=4, heads=4, mlp_dim=256, dropout=0.1):
        """
        基于Transformer的点云去噪模型
        
        Args:
            point_dim: 点的维度 (通常是3，代表xyz坐标)
            embed_dim: 嵌入维度
            depth: Transformer编码器层数
            heads: 注意力头数
            mlp_dim: MLP隐藏层维度
            dropout: Dropout比率
        """
        super().__init__()
        
        # 点特征嵌入
        self.point_embedding = nn.Sequential(
            nn.Linear(point_dim, embed_dim // 2),
            nn.ReLU(),
            nn.Linear(embed_dim // 2, embed_dim)
        )
        
        # Transformer编码器层
        self.transformer_encoders = nn.ModuleList([
            TransformerEncoderLayer(embed_dim, heads, embed_dim // heads, mlp_dim, dropout)
            for _ in range(depth)
        ])
        
        # 点云重建 (去噪)
        self.point_reconstruction = nn.Sequential(
            nn.Linear(embed_dim, embed_dim // 2),
            nn.ReLU(),
            nn.Linear(embed_dim // 2, point_dim)
        )
        
        # 初始化权重
        self.apply(self._init_weights)
    
    def _init_weights(self, m):
        if isinstance(m, nn.Linear):
            nn.init.xavier_uniform_(m.weight)
            if m.bias is not None:
                nn.init.zeros_(m.bias)
    
    def forward(self, points):
        """
        前向传播
        
        Args:
            points: 带噪声的点云，形状为 [batch_size, num_points, 3]
            
        Returns:
            去噪后的点云，形状为 [batch_size, num_points, 3]
        """
        # 点特征嵌入
        x = self.point_embedding(points)  # [batch_size, num_points, embed_dim]
        
        # 通过Transformer编码器层
        for encoder in self.transformer_encoders:
            x = encoder(x)
        
        # 点云重建
        denoised_points = self.point_reconstruction(x)  # [batch_size, num_points, 3]
        
        return denoised_points

# 点云去噪训练器
class PointCloudDenoisingTrainer:
    def __init__(self, model, device, train_loader, val_loader, learning_rate=1e-4, weight_decay=1e-5):
        """
        点云去噪训练器
        
        Args:
            model: 去噪模型
            device: 训练设备 ('cuda' 或 'cpu')
            train_loader: 训练数据加载器
            val_loader: 验证数据加载器
            learning_rate: 学习率
            weight_decay: 权重衰减
        """
        self.model = model
        self.device = device
        self.train_loader = train_loader
        self.val_loader = val_loader
        
        # 优化器和损失函数
        self.optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
        self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.optimizer, mode='min', factor=0.5, patience=5, min_lr=1e-6, verbose=True
        )
        
        # 创建保存目录
        self.save_dir = os.path.join('./results', 'transformer_denoising_' + datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
        os.makedirs(self.save_dir, exist_ok=True)
        
        # 记录训练过程
        self.train_losses = []
        self.val_losses = []
        self.best_val_loss = float('inf')
    
    def chamfer_distance(self, x, y):
        """
        计算Chamfer距离
        
        Args:
            x: 点云 1, 形状为 [B, N, 3]
            y: 点云 2, 形状为 [B, N, 3]
            
        Returns:
            Chamfer距离
        """
        x = x.unsqueeze(2)  # [B, N, 1, 3]
        y = y.unsqueeze(1)  # [B, 1, N, 3]
        
        # 计算欧氏距离
        dist = torch.sum((x - y) ** 2, dim=-1)  # [B, N, N]
        
        # 计算最小距离
        min_dist_xy = torch.min(dist, dim=2)[0]  # [B, N]
        min_dist_yx = torch.min(dist, dim=1)[0]  # [B, N]
        
        # 计算Chamfer距离
        chamfer_dist = torch.mean(min_dist_xy, dim=1) + torch.mean(min_dist_yx, dim=1)  # [B]
        
        return torch.mean(chamfer_dist)
    
    def train_epoch(self, epoch):
        """
        训练一个epoch
        
        Args:
            epoch: 当前epoch
            
        Returns:
            平均训练损失
        """
        self.model.train()
        total_loss = 0.0
        
        pbar = tqdm(self.train_loader, desc=f"Epoch {epoch+1} Training")
        for batch in pbar:
            # 获取数据
            noisy_points = batch['noisy'].to(self.device)
            clean_points = batch['clean'].to(self.device)
            
            # 前向传播
            self.optimizer.zero_grad()
            denoised_points = self.model(noisy_points)
            
            # 计算损失
            chamfer_loss = self.chamfer_distance(denoised_points, clean_points)
            l2_loss = F.mse_loss(denoised_points, clean_points)
            
            # 总损失 (组合Chamfer距离和L2距离)
            loss = chamfer_loss + 0.5 * l2_loss
            
            # 反向传播
            loss.backward()
            self.optimizer.step()
            
            # 更新进度条
            total_loss += loss.item()
            pbar.set_postfix(loss=f"{loss.item():.4f}")
        
        avg_loss = total_loss / len(self.train_loader)
        self.train_losses.append(avg_loss)
        
        return avg_loss
    
    def validate(self, epoch):
        """
        在验证集上评估模型
        
        Args:
            epoch: 当前epoch
            
        Returns:
            平均验证损失
        """
        self.model.eval()
        total_loss = 0.0
        
        with torch.no_grad():
            pbar = tqdm(self.val_loader, desc=f"Epoch {epoch+1} Validation")
            for batch in pbar:
                # 获取数据
                noisy_points = batch['noisy'].to(self.device)
                clean_points = batch['clean'].to(self.device)
                
                # 前向传播
                denoised_points = self.model(noisy_points)
                
                # 计算损失
                chamfer_loss = self.chamfer_distance(denoised_points, clean_points)
                l2_loss = F.mse_loss(denoised_points, clean_points)
                
                # 总损失
                loss = chamfer_loss + 0.5 * l2_loss
                
                # 更新进度条
                total_loss += loss.item()
                pbar.set_postfix(loss=f"{loss.item():.4f}")
        
        avg_loss = total_loss / len(self.val_loader)
        self.val_losses.append(avg_loss)
        
        # 更新学习率
        self.scheduler.step(avg_loss)
        
        # 保存最佳模型
        if avg_loss < self.best_val_loss:
            self.best_val_loss = avg_loss
            self.save_model(os.path.join(self.save_dir, 'best_model.pth'))
            print(f"保存最佳模型，验证损失: {avg_loss:.4f}")
        
        return avg_loss
    
    def train(self, num_epochs):
        """
        训练模型
        
        Args:
            num_epochs: 训练轮数
        """
        print(f"开始训练，总共 {num_epochs} 个epochs...")
        
        # 记录开始时间
        start_time = time.time()
        
        for epoch in range(num_epochs):
            # 训练一个epoch
            train_loss = self.train_epoch(epoch)
            
            # 验证
            val_loss = self.validate(epoch)
            
            # 打印进度
            print(f"Epoch {epoch+1}/{num_epochs} - 训练损失: {train_loss:.4f}, 验证损失: {val_loss:.4f}")
            
            # 保存当前模型
            if (epoch + 1) % 10 == 0:
                self.save_model(os.path.join(self.save_dir, f'model_epoch_{epoch+1}.pth'))
        
        # 保存最终模型
        self.save_model(os.path.join(self.save_dir, 'final_model.pth'))
        
        # 计算训练时间
        train_time = time.time() - start_time
        print(f"训练完成! 总用时: {train_time/60:.2f} 分钟")
        
        # 绘制训练曲线
        self.plot_training_curves()
    
    def save_model(self, path):
        """
        保存模型
        
        Args:
            path: 保存路径
        """
        torch.save({
            'model_state_dict': self.model.state_dict(),
            'optimizer_state_dict': self.optimizer.state_dict(),
            'scheduler_state_dict': self.scheduler.state_dict(),
            'train_losses': self.train_losses,
            'val_losses': self.val_losses,
            'best_val_loss': self.best_val_loss
        }, path)
    
    def load_model(self, path):
        """
        加载模型
        
        Args:
            path: 模型路径
        """
        checkpoint = torch.load(path)
        self.model.load_state_dict(checkpoint['model_state_dict'])
        self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        self.scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
        self.train_losses = checkpoint['train_losses']
        self.val_losses = checkpoint['val_losses']
        self.best_val_loss = checkpoint['best_val_loss']
    
    def plot_training_curves(self):
        """绘制训练曲线"""
        plt.figure(figsize=(12, 5))
        
        plt.subplot(1, 2, 1)
        plt.plot(self.train_losses, label='训练损失')
        plt.plot(self.val_losses, label='验证损失')
        plt.title('训练和验证损失')
        plt.xlabel('Epoch')
        plt.ylabel('损失')
        plt.legend()
        plt.grid(True)
        
        plt.subplot(1, 2, 2)
        plt.plot(self.train_losses, label='训练损失')
        plt.plot(self.val_losses, label='验证损失')
        plt.title('训练和验证损失 (对数尺度)')
        plt.xlabel('Epoch')
        plt.ylabel('损失 (log)')
        plt.yscale('log')
        plt.legend()
        plt.grid(True)
        
        plt.tight_layout()
        plt.savefig(os.path.join(self.save_dir, 'training_curves.png'), dpi=200)
        plt.show()

# 点云可视化器
class PointCloudVisualizer:
    def __init__(self, model, device):
        """
        点云可视化器
        
        Args:
            model: 训练好的去噪模型
            device: 运行设备 ('cuda' 或 'cpu')
        """
        self.model = model
        self.device = device
        
        # 设置模型为评估模式
        self.model.eval()
    
    def visualize_results(self, h5_file, num_samples=5, save_dir='./results/visualization'):
        """
        可视化去噪结果
        
        Args:
            h5_file: 测试数据集的H5文件
            num_samples: 要可视化的样本数量
            save_dir: 可视化结果保存目录
        """
        # 创建保存目录
        os.makedirs(save_dir, exist_ok=True)
        
        # 加载数据
        with h5py.File(h5_file, 'r') as f:
            clean_points = torch.tensor(f['clean_points'][:], dtype=torch.float32)
            noisy_points = torch.tensor(f['noisy_points'][:], dtype=torch.float32)
            labels = f['labels'][:]
        
        # 读取类别信息
        categories = {}
        category_file = os.path.join(os.path.dirname(h5_file), "categories.txt")
        if os.path.exists(category_file):
            with open(category_file, 'r') as f:
                for line in f:
                    parts = line.strip().split(': ')
                    if len(parts) == 2:
                        idx, name = parts
                        categories[int(idx)] = name
        
        # 随机选择样本
        indices = np.random.choice(len(clean_points), min(num_samples, len(clean_points)), replace=False)
        
        for i, idx in enumerate(indices):
            # 获取干净点云和噪声点云
            clean = clean_points[idx].unsqueeze(0).to(self.device)
            noisy = noisy_points[idx].unsqueeze(0).to(self.device)
            label = labels[idx][0]
            
            # 获取类别名称
            category_name = categories.get(int(label), f"类别 {int(label)}")
            
            # 去噪
            with torch.no_grad():
                denoised = self.model(noisy)
            
            # 转换为numpy数组
            clean = clean.squeeze(0).cpu().numpy()
            noisy = noisy.squeeze(0).cpu().numpy()
            denoised = denoised.squeeze(0).cpu().numpy()
            
            # 创建图形
            fig = plt.figure(figsize=(15, 5))
            
            # 可视化干净点云
            ax1 = fig.add_subplot(131, projection='3d')
            ax1.scatter(clean[:, 0], clean[:, 1], clean[:, 2], s=2, c='blue', alpha=0.8)
            ax1.set_title(f'原始点云 ({category_name})', fontsize=12)
            ax1.set_xlabel('X', fontsize=10)
            ax1.set_ylabel('Y', fontsize=10)
            ax1.set_zlabel('Z', fontsize=10)
            ax1.view_init(elev=30, azim=45)
            ax1.set_axis_off()
            
            # 可视化噪声点云
            ax2 = fig.add_subplot(132, projection='3d')
            ax2.scatter(noisy[:, 0], noisy[:, 1], noisy[:, 2], s=2, c='red', alpha=0.8)
            ax2.set_title(f'带噪声点云', fontsize=12)
            ax2.set_xlabel('X', fontsize=10)
            ax2.set_ylabel('Y', fontsize=10)
            ax2.set_zlabel('Z', fontsize=10)
            ax2.view_init(elev=30, azim=45)
            ax2.set_axis_off()
            
            # 可视化去噪点云
            ax3 = fig.add_subplot(133, projection='3d')
            ax3.scatter(denoised[:, 0], denoised[:, 1], denoised[:, 2], s=2, c='green', alpha=0.8)
            ax3.set_title(f'去噪点云', fontsize=12)
            ax3.set_xlabel('X', fontsize=10)
            ax3.set_ylabel('Y', fontsize=10)
            ax3.set_zlabel('Z', fontsize=10)
            ax3.view_init(elev=30, azim=45)
            ax3.set_axis_off()
            
            plt.tight_layout()
            
            # 保存图像
            plt.savefig(os.path.join(save_dir, f'result_sample_{i}_{category_name}.png'), dpi=200, bbox_inches='tight')
            plt.close()
            
            print(f"处理样本 {i+1}/{num_samples}，类别: {category_name}")

# 主函数
def main(args):
    """
    主函数
    
    Args:
        args: 命令行参数
    """
    # 设置设备
    device = torch.device('cuda' if torch.cuda.is_available() and not args.cpu else 'cpu')
    print(f"使用设备: {device}")
    
    # 加载数据集
    train_dataset = PointCloudDenoisingDataset(args.train_data)
    val_dataset = PointCloudDenoisingDataset(args.val_data)
    
    train_loader = DataLoader(
        train_dataset, 
        batch_size=args.batch_size, 
        shuffle=True, 
        num_workers=4,
        pin_memory=True if device.type == 'cuda' else False
    )
    
    val_loader = DataLoader(
        val_dataset, 
        batch_size=args.batch_size, 
        shuffle=False, 
        num_workers=4,
        pin_memory=True if device.type == 'cuda' else False
    )
    
    # 创建模型
    model = PointCloudTransformerDenoiser(
        point_dim=3,
        embed_dim=args.embed_dim,
        depth=args.depth,
        heads=args.heads,
        mlp_dim=args.mlp_dim,
        dropout=args.dropout
    ).to(device)
    
    print(f"模型参数数量: {sum(p.numel() for p in model.parameters() if p.requires_grad)}")
    
    # 创建训练器
    trainer = PointCloudDenoisingTrainer(
        model=model,
        device=device,
        train_loader=train_loader,
        val_loader=val_loader,
        learning_rate=args.learning_rate,
        weight_decay=args.weight_decay
    )
    
    # 训练模型
    if args.mode == 'train':
        trainer.train(args.epochs)
    
    # 可视化结果
    elif args.mode == 'vis':
        trainer.load_model(args.model_path)
        
        visualizer = PointCloudVisualizer(model, device)
        visualizer.visualize_results(
            args.test_data,
            num_samples=args.vis_samples,
            save_dir=os.path.join('./results/visualization', os.path.basename(os.path.dirname(args.test_data)))
        )
    
    # 同时训练和可视化
    elif args.mode == 'both':
        trainer.train(args.epochs)
        
        visualizer = PointCloudVisualizer(model, device)
        visualizer.visualize_results(
            args.test_data,
            num_samples=args.vis_samples,
            save_dir=os.path.join('./results/visualization', os.path.basename(os.path.dirname(args.test_data)))
        )

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="点云Transformer去噪模型")
    
    # 数据集参数
    parser.add_argument('--train_data', type=str, default='./data/modelnet_noisy/gaussian_0.05/train_denoising.h5', help='训练数据集路径')
    parser.add_argument('--val_data', type=str, default='./data/modelnet_noisy/gaussian_0.05/test_denoising.h5', help='验证数据集路径')
    parser.add_argument('--test_data', type=str, default='./data/modelnet_noisy/gaussian_0.05/test_denoising.h5', help='测试数据集路径')
    
    # 模型参数
    parser.add_argument('--embed_dim', type=int, default=128, help='嵌入维度')
    parser.add_argument('--depth', type=int, default=4, help='Transformer编码器层数')
    parser.add_argument('--heads', type=int, default=4, help='注意力头数')
    parser.add_argument('--mlp_dim', type=int, default=256, help='MLP隐藏层维度')
    parser.add_argument('--dropout', type=float, default=0.1, help='Dropout比率')
    
    # 训练参数
    parser.add_argument('--batch_size', type=int, default=16, help='批次大小')
    parser.add_argument('--epochs', type=int, default=100, help='训练轮数')
    parser.add_argument('--learning_rate', type=float, default=1e-4, help='学习率')
    parser.add_argument('--weight_decay', type=float, default=1e-5, help='权重衰减')
    
    # 其他参数
    parser.add_argument('--mode', type=str, choices=['train', 'vis', 'both'], default='both', help='运行模式: 训练/可视化/训练+可视化')
    parser.add_argument('--model_path', type=str, default=None, help='模型路径 (用于可视化)')
    parser.add_argument('--vis_samples', type=int, default=10, help='可视化的样本数量')
    parser.add_argument('--cpu', action='store_true', help='强制使用CPU')
    
    args = parser.parse_args()
    
    # 创建结果目录
    os.makedirs('./results', exist_ok=True)
    os.makedirs('./results/visualization', exist_ok=True)
    
    # 运行
    main(args) 