import laspy
import numpy as np
import open3d as o3d
from sklearn.cluster import DBSCAN
from sklearn.ensemble import RandomForestClassifier
from scipy.spatial import KDTree
import warnings
import os
warnings.filterwarnings("ignore")

def load_las_file(file_path):
    """
    加载LAS文件并返回点云数据
    """
    print(f"正在加载LAS文件: {file_path}")
    las = laspy.read(file_path)
    
    # 获取点坐标
    points = np.vstack((las.x, las.y, las.z)).transpose()
    
    # 获取强度信息（如果可用）
    intensity = np.array(las.intensity) if hasattr(las, 'intensity') else np.zeros(len(points))
    
    # 获取分类信息（如果可用）
    classification = np.array(las.classification) if hasattr(las, 'classification') else np.zeros(len(points))
    
    print(f"加载了 {len(points)} 个点")
    return points, intensity, classification

def estimate_local_terrain(points, search_radius=15.0, min_neighbors=50):
    """
    估计每个点周围的局部地形高度
    修复了原始代码的缩进错误
    """
    print("估计局部地形...")
    tree = KDTree(points[:, :2])  # 只在XY平面建树
    
    local_ground_levels = np.zeros(len(points))
    
    for i in range(len(points)):
        # 搜索半径内的邻居点
        neighbors_idx = tree.query_ball_point(points[i, :2], search_radius)
        
        if len(neighbors_idx) < min_neighbors:
            # 如果邻居太少，使用全局统计
            local_ground_levels[i] = np.percentile(points[:, 2], 10)  # 使用最低的10%点作为地面估计
        else:
            neighbor_points = points[neighbors_idx]
            # 使用最低的5%点的高度作为局部地面估计
            local_ground_levels[i] = np.percentile(neighbor_points[:, 2], 5) # 修复：正确缩进
    
    return local_ground_levels

def preprocess_point_cloud(points, intensity, voxel_size=0.5):
    """
    预处理点云：下采样和计算法线
    """
    print("预处理点云...")
    # 创建Open3D点云对象
    pcd = o3d.geometry.PointCloud()
    pcd.points = o3d.utility.Vector3dVector(points)
    
    # 如果有强度信息，可以将其作为颜色
    if np.any(intensity):
        print("存在强度信息，强度范围为{}-{}".format(np.min(intensity), np.max(intensity)))
        # 归一化强度值到0-1范围
        intensity_normalized = (intensity - np.min(intensity)) / (np.max(intensity) - np.min(intensity))
        # 创建颜色数组（灰度）
        colors = np.zeros((points.shape[0], 3))
        colors[:, 0] = intensity_normalized  # R
        colors[:, 1] = intensity_normalized  # G
        colors[:, 2] = intensity_normalized  # B
        pcd.colors = o3d.utility.Vector3dVector(colors)
    
    # 下采样
    downpcd = pcd.voxel_down_sample(voxel_size=voxel_size)
    print(f"下采样后点数: {len(downpcd.points)}")
    
    # 估计法线
    downpcd.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=voxel_size*2, max_nn=30))
    
    return downpcd

def extract_advanced_features(pcd, intensity, local_ground_levels, k=50):
    """
    为每个点提取更丰富的特征，包括相对高度
    增加了区分树木/池塘的关键特征
    """
    print("提取高级特征...")
    points = np.asarray(pcd.points)
    normals = np.asarray(pcd.normals)
    
    # 创建KD树用于最近邻搜索
    pcd_tree = o3d.geometry.KDTreeFlann(pcd)
    
    # 初始化特征数组 - 增加到16个特征
    num_points = points.shape[0]
    features = np.zeros((num_points, 16))
    
    for i in range(num_points):
        # 1. 相对于局部地形的高度 (最重要!)
        features[i, 0] = points[i, 2] - local_ground_levels[i]
        
        # 2. 当前点的强度
        if len(intensity) == len(points):
            features[i, 1] = intensity[i]
        else:
            features[i, 1] = 0
        
        # 3. 法线的z分量 (垂直程度)
        features[i, 2] = normals[i, 2]
        
        # 查找k个最近邻点
        [k_found, idx, _] = pcd_tree.search_knn_vector_3d(pcd.points[i], k)
        
        neighbor_points = points[idx]
        neighbor_normals = normals[idx]
        
        # 4. 局部点云的高度变化
        features[i, 3] = np.std(neighbor_points[:, 2])
        
        # 5. 局部点云的高度范围
        features[i, 4] = np.max(neighbor_points[:, 2]) - np.min(neighbor_points[:, 2])
        
        # 6. 局部点云的密度
        if len(idx) > 1:
            distances = np.linalg.norm(neighbor_points - points[i], axis=1)
            features[i, 5] = len(idx) / (np.pi * (np.max(distances)) ** 2 + 1e-6)
        else:
            features[i, 5] = 0
        
        # 7. 局部点云的法线变化
        features[i, 6] = np.std(neighbor_normals[:, 2])
        
        # 8. 局部点云的水平分布
        neighbor_xy = neighbor_points[:, :2]
        centroid_xy = np.mean(neighbor_xy, axis=0)
        features[i, 7] = np.mean(np.linalg.norm(neighbor_xy - centroid_xy, axis=1))
        
        # 9. 点到局部质心的距离
        centroid = np.mean(neighbor_points, axis=0)
        features[i, 8] = np.linalg.norm(points[i] - centroid)
        
        # 10. 局部曲率 (使用特征值)
        centered = neighbor_points - centroid
        cov_matrix = centered.T @ centered / len(centered)
        eigenvalues = np.linalg.eigvalsh(cov_matrix)
        eigenvalues_sorted = np.sort(eigenvalues)[::-1]
        if eigenvalues_sorted[0] > 0:
            features[i, 9] = eigenvalues_sorted[2] / eigenvalues_sorted[0]  # 线性度
        else:
            features[i, 9] = 0
        
        # 11. 局部地形的坡度
        xy_distances = np.linalg.norm(neighbor_xy - points[i, :2], axis=1)
        z_differences = neighbor_points[:, 2] - points[i, 2]
        valid_mask = xy_distances > 0
        if np.any(valid_mask):
            slopes = np.abs(z_differences[valid_mask] / xy_distances[valid_mask])
            features[i, 10] = np.mean(slopes) if len(slopes) > 0 else 0
        else:
            features[i, 10] = 0
        
        # 12. 局部粗糙度
        if len(idx) >= 5:
            # 使用局部平面拟合的残差作为粗糙度
            A = np.column_stack([neighbor_xy, np.ones(len(neighbor_xy))])
            try:
                coeffs, _, _, _ = np.linalg.lstsq(A, neighbor_points[:, 2], rcond=None)
                predicted_z = A @ coeffs
                features[i, 11] = np.std(neighbor_points[:, 2] - predicted_z)
            except:
                features[i, 11] = 0
        else:
            features[i, 11] = 0

        # --- 新增特征 (13-16) ---
        
        # 13. 法线方向混乱度 (树木 vs 建筑物的关键区分点)
        normal_angles = np.arccos(np.clip(np.abs(neighbor_normals[:, 2]), 0, 1))
        features[i, 12] = np.std(normal_angles)

        # 14. 垂直延伸度 (Z_std / XY_std) (树木高，建筑物 ≈ 1)
        xy_std = np.std(neighbor_points[:, :2], axis=0).mean() + 1e-6
        z_std = np.std(neighbor_points[:, 2])
        features[i, 13] = z_std / xy_std

        # 15. 局部点密度 (包围盒体积) (池塘低)
        bbox_vol = np.prod(np.max(neighbor_points, axis=0) - np.min(neighbor_points, axis=0)) + 1e-6
        features[i, 14] = len(neighbor_points) / bbox_vol

        # 16. 强度变异系数 (可选，水面可能有异常)
        if len(intensity) == len(points):
            neighbor_intensity = intensity[idx]
            mean_i = np.mean(neighbor_intensity)
            if mean_i > 1e-6:
                features[i, 15] = np.std(neighbor_intensity) / mean_i
            else:
                features[i, 15] = 0
        else:
            features[i, 15] = 0

    return features

def generate_pseudo_labels(features, local_ground_levels, intensity):
    """
    基于规则生成伪标签，用于训练分类器
    0=Building, 1=Road, 2=Tree, 3=Pond
    道路现在需要满足强度 > 20000
    """
    print("生成伪标签用于训练...")
    rel_h = features[:, 0]
    rough = features[:, 11]
    z_xy_ratio = features[:, 13]
    density = features[:, 14]
    normal_confusion = features[:, 12]
    normal_z = features[:, 2]
    height_var = features[:, 3]

    labels = np.full(len(features), -1) # -1: 未知

    # --- 池塘: 低高度 + 极低密度 + 相对光滑 ---
    pond_mask = (
        (rel_h < 0.3) &
        (density < np.percentile(density, 10)) & # 极低密度
        (rough < np.percentile(rough, 30))
    )
    labels[pond_mask] = 3 # Pond

    # --- 道路: 低高度 + 高密度 + 光滑 + 低坡度 + 高强度 ---
    road_mask = (
        (rel_h < 0.5) &
        (density > np.percentile(density, 50)) &
        (rough < np.percentile(rough, 30)) &
        (height_var < np.percentile(height_var, 40)) &
        (intensity > 20000) # 新增强度条件
    )
    labels[road_mask & (labels == -1)] = 1 # Road

    # --- 树木: 中高高度 + 高粗糙度 + 高垂直延伸 + 法线混乱 ---
    tree_mask = (
        (rel_h > 0.8) & # 比地面高
        (rough > np.percentile(rough, 70)) &
        (z_xy_ratio > 1.2) &
        (normal_confusion > np.percentile(normal_confusion, 60))
    )
    labels[tree_mask & (labels == -1)] = 2 # Tree

    # --- 建筑物: 高相对高度 + 法线较垂直 + 表面相对光滑 ---
    building_mask = (
        (rel_h > 1.5) &
        (normal_z > 0.6) &
        (rough < np.percentile(rough, 60))
    )
    labels[building_mask & (labels == -1)] = 0 # Building

    print(f"  生成的标签统计: B:{np.sum(labels==0)}, R:{np.sum(labels==1)}, T:{np.sum(labels==2)}, P:{np.sum(labels==3)}, U:{np.sum(labels==-1)}")
    return labels

def classify_points_with_rf(features, local_ground_levels, intensity):
    """
    使用随机森林分类器进行四类分割
    """
    print("训练并应用随机森林分类器...")
    pseudo_labels = generate_pseudo_labels(features, local_ground_levels, intensity)
    
    # 获取有标签的样本用于训练
    train_mask = pseudo_labels != -1
    if np.sum(train_mask) < 10: # 样本太少无法训练
        print("  有标签样本太少，回退到规则分类")
        return pseudo_labels

    X_train = features[train_mask]
    y_train = pseudo_labels[train_mask]

    # 训练分类器
    rf_clf = RandomForestClassifier(n_estimators=100, random_state=42, max_depth=10)
    rf_clf.fit(X_train, y_train)

    # 预测所有点
    all_labels = np.full(len(features), -1)
    all_labels[train_mask] = rf_clf.predict(X_train) # 确保训练样本标签不变
    
    # 预测未标记样本
    unlab_mask = ~train_mask
    if np.any(unlab_mask):
        X_unlab = features[unlab_mask]
        y_pred_unlab = rf_clf.predict(X_unlab)
        all_labels[unlab_mask] = y_pred_unlab

    print(f"  最终预测标签统计: B:{np.sum(all_labels==0)}, R:{np.sum(all_labels==1)}, T:{np.sum(all_labels==2)}, P:{np.sum(all_labels==3)}, U:{np.sum(all_labels==-1)}")
    return all_labels

def post_process_labels(points, labels, min_component_size=20, min_pond_area_sqm=10.0):
    """
    后处理：使用DBSCAN去除小连通组件，并对池塘进行面积过滤
    """
    print("后处理：去除小连通组件和过滤小面积池塘...")
    unique_labels = np.unique(labels)
    unique_labels = unique_labels[unique_labels != -1] # 排除未分类点

    for class_id in unique_labels:
        class_mask = (labels == class_id)
        class_points = points[class_mask]
        original_indices = np.where(class_mask)[0]

        if len(class_points) < min_component_size:
            labels[class_mask] = -1
            continue

        clustering = DBSCAN(eps=2.0, min_samples=min_component_size).fit(class_points)
        comp_labels = clustering.labels_

        # 保留大簇，小簇设为未分类
        for comp_id in set(comp_labels):
            if comp_id == -1: continue # 跳过噪声点
            comp_point_indices = original_indices[comp_labels == comp_id]
            comp_size = len(comp_point_indices)
            
            if comp_size < min_component_size:
                labels[comp_point_indices] = -1
                continue
            
            # 对池塘类别进行面积过滤
            if class_id == 3: # 池塘
                comp_points_xy = class_points[comp_labels == comp_id][:, :2]
                min_x, min_y = np.min(comp_points_xy, axis=0)
                max_x, max_y = np.max(comp_points_xy, axis=0)
                area = (max_x - min_x) * (max_y - min_y)
                
                if area < min_pond_area_sqm:
                    # print(f"  过滤小池塘组件，面积: {area:.2f} < {min_pond_area_sqm}")
                    labels[comp_point_indices] = -1
                # else:
                #     print(f"  保留池塘组件，面积: {area:.2f}")

    print(f"  后处理后标签统计: B:{np.sum(labels==0)}, R:{np.sum(labels==1)}, T:{np.sum(labels==2)}, P:{np.sum(labels==3)}, U:{np.sum(labels==-1)}")
    return labels

def visualize_results(pcd, labels):
    """
    可视化四类分割结果
    """
    print("可视化结果...")
    colors = np.zeros((len(pcd.points), 3))
    
    color_map = {
        0: [1.0, 0.0, 0.0], # 建筑物 - 红
        1: [0.3, 0.3, 0.3], # 道路 - 深灰
        2: [0.0, 1.0, 0.0], # 树木 - 绿
        3: [0.0, 0.0, 1.0], # 池塘 - 蓝
        -1: [0.7, 0.7, 0.7] # 未分类 - 浅灰
    }

    for i, label in enumerate(labels):
        colors[i] = color_map.get(label, [0.5, 0.5, 0.5])
    
    pcd.colors = o3d.utility.Vector3dVector(colors)
    
    o3d.visualization.draw_geometries([pcd],
                                      window_name="四类地物分割结果 (建筑物-红, 道路-灰, 树木-绿, 池塘-蓝)",
                                      width=1200,
                                      height=800,
                                      point_show_normal=False)

def main(las_file_path):
    """
    主函数
    """
    # 定义缓存文件名
    features_cache_file = "features.npz"
    ground_cache_file = "local_ground_levels.npy"

    # 1. 加载LAS文件
    points, intensity, classification = load_las_file(las_file_path)
    
    # 2. 预处理点云
    pcd = preprocess_point_cloud(points, intensity, voxel_size=0.5)
    downsampled_points = np.asarray(pcd.points)
    downsampled_intensity = intensity[:len(downsampled_points)] # 确保强度与下采样点匹配

    # 3. 检查并加载/计算 特征 和 局部地面高度
    if os.path.exists(features_cache_file) and os.path.exists(ground_cache_file):
        print(f"从缓存加载特征: {features_cache_file} 和 {ground_cache_file}")
        loaded_data = np.load(features_cache_file)
        features = loaded_data['features']
        local_ground_levels = np.load(ground_cache_file)
    else:
        print("缓存文件不存在，开始计算特征和局部地面高度...")
        # 3a. 估计局部地形 (修复了原始代码的bug)
        local_ground_levels = estimate_local_terrain(downsampled_points)
        np.save(ground_cache_file, local_ground_levels)
        print(f"局部地面高度已保存到 {ground_cache_file}")

        # 3b. 提取高级特征 (增加了16维特征)
        features = extract_advanced_features(pcd, downsampled_intensity, local_ground_levels, k=50)
        np.savez_compressed(features_cache_file, features=features)
        print(f"特征已保存到 {features_cache_file}")

    # 4. 使用随机森林分类器进行四类分割 (核心改进)
    #    注意：传递 downsampled_intensity 用于道路的强度判断
    labels = classify_points_with_rf(features, local_ground_levels, downsampled_intensity)
    
    # 5. 后处理 (去除小块噪声，并过滤小面积池塘)
    #    新增 min_pond_area_sqm 参数
    labels = post_process_labels(downsampled_points, labels, min_component_size=20, min_pond_area_sqm=10.0)
    
    # 6. 可视化结果
    visualize_results(pcd, labels)
    
    print("处理完成!")

if __name__ == "__main__":
    # 替换为你的LAS文件路径
    las_file_path = "/mnt/d/temp_files/part_pointCloud.las"
    main(las_file_path)