import numpy as np
import pandas as pd
import rasterio
import geopandas as gpd
from rasterio.mask import mask
from matplotlib import rcParams
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LinearRegression
from shapely.geometry import Point  # 添加这行导入
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
import os

# 设置中文字体
rcParams['font.sans-serif'] = ['Noto Sans CJK JP']  # 选择简体中文
rcParams['axes.unicode_minus'] = False  # 解决负号显示问题

def read_raster(filepath):
    """读取栅格数据"""
    with rasterio.open(filepath) as src:
        return src.read(1), src.transform

# def random_points_within_mask(mask_path, n_points=10000):
#     """在陕西省范围内生成随机点"""
#     # 读取陕西省边界
#     gdf = gpd.read_file(mask_path)
    
#     # 获取边界范围
#     bounds = gdf.total_bounds
    
#     # 生成随机点
#     points = []
#     while len(points) < n_points:
#         x = np.random.uniform(bounds[0], bounds[2])
#         y = np.random.uniform(bounds[1], bounds[3])
#         point = Point(x, y)
#         if gdf.contains(point).any():
#             points.append((x, y))
    
#     return np.array(points)

def extract_values_at_points(points, raster_data, transform):
    """提取点位置的栅格值，确保点在栅格范围内"""
    rows, cols = rasterio.transform.rowcol(transform, points[:, 0], points[:, 1])
    
    # 创建有效点的掩码
    valid_mask = (
        (rows >= 0) & 
        (rows < raster_data.shape[0]) & 
        (cols >= 0) & 
        (cols < raster_data.shape[1])
    )
    
    # 仅保留有效范围内的值
    valid_rows = rows[valid_mask]
    valid_cols = cols[valid_mask]
    
    values = np.full(len(points), np.nan)  # 初始化为 NaN
    values[valid_mask] = raster_data[valid_rows, valid_cols]
    
    return values

def random_points_within_mask(mask_path, raster_data, transform, n_points=10000, max_attempts=50000):
    """在陕西省范围内生成随机点，并确保点在有效的栅格数据范围内"""
    # 读取陕西省边界
    gdf = gpd.read_file(mask_path)
    
    # 获取边界范围
    bounds = gdf.total_bounds
    
    # 生成随机点
    points = []
    attempts = 0
    
    while len(points) < n_points and attempts < max_attempts:
        x = np.random.uniform(bounds[0], bounds[2])
        y = np.random.uniform(bounds[1], bounds[3])
        point = Point(x, y)
        
        # 检查点是否在陕西省边界内
        if gdf.contains(point).any():
            # 检查点是否在栅格的有效范围内
            row, col = rasterio.transform.rowcol(transform, x, y)
            if (0 <= row < raster_data.shape[0] and 
                0 <= col < raster_data.shape[1] and 
                not np.isnan(raster_data[row, col])):
                points.append((x, y))
        
        attempts += 1
    
    if len(points) < n_points:
        print(f"警告：只生成了 {len(points)} 个有效点，少于请求的 {n_points} 个点")
    
    return np.array(points)

def resample_raster(src_data, src_transform, dst_transform, dst_shape):
    """将源数据重采样到目标分辨率"""
    return rasterio.warp.reproject(
        source=src_data,
        src_transform=src_transform,
        src_crs='EPSG:4326',
        destination=np.zeros(dst_shape),
        dst_transform=dst_transform,
        dst_crs='EPSG:4326',
        resampling=rasterio.warp.Resampling.bilinear
    )[0]
def clean_raster_data(data, no_data_value):
    data = data.astype(np.float32)  # ✅ 改为 float32
    data[data == no_data_value] = np.nan
    return data


# 主程序
def main():
    # 1. 读取数据并处理no_data值
    path = r'/home/gsr/Documents/代做外包/SRTM地形'
    
    # 读取并清理DEM数据
    dem_data, dem_transform = read_raster(os.path.join(path,'SRTMDEM','srtm_dem.tif'))
    dem_data = clean_raster_data(dem_data, 32767)#65535)
    
    # 读取并清理坡度数据
    # slope_data, slope_transform = read_raster(os.path.join(path,'SRTMSLOPE','srtm_slope.tif'))
    # slope_data = clean_raster_data(slope_data, -128)
    slope_data, slope_transform = read_raster(os.path.join(path,'Slope_csj.tif'))
    slope_data = clean_raster_data(slope_data, -128)
    
    # 读取并清理坡向数据
    aspect_data, aspect_transform = read_raster(os.path.join(path,'SRTMSPECT','srtm_spect.tif'))
    aspect_data = clean_raster_data(aspect_data, 15)
    
    # 读取20年NPP数据并处理
    # npp_data = []
    # for year in range(2002, 2022+1):
    #     data, npp_transform = read_raster(os.path.join('/home/gsr/Documents/代做外包/mean_npp',
    #                                    f"MOD17A3HGF.061_Npp_500m_doy{year}001_aid0001.tif"))
    #     # 清理NPP数据
    #     data = clean_raster_data(data, 32767)
        
    #     # 重采样到DEM分辨率
    #     resampled_data = resample_raster(
    #         data,
    #         npp_transform,
    #         dem_transform,
    #         dem_data.shape
    #     )
    #     npp_data.append(resampled_data)
    #     print(f"Year {year} NPP resampled shape:", resampled_data.shape)
    
    # # 计算20年平均值（忽略NaN值）
    # npp_mean = np.nanmean(np.array(npp_data), axis=0)
    npp_sum = np.zeros_like(dem_data, dtype=np.float32)
    valid_count = np.zeros_like(dem_data, dtype=np.uint16)

    for year in range(2002, 2023):
        data, npp_transform = read_raster(os.path.join('/home/gsr/Documents/代做外包/mean_npp',
                                    f"MOD17A3HGF.061_Npp_500m_doy{year}001_aid0001.tif"))
        data = clean_raster_data(data, 32767)
        resampled_data = resample_raster(
            data, npp_transform, dem_transform, dem_data.shape
        )
        resampled_data/=10
        mask_valid = ~np.isnan(resampled_data)
        npp_sum[mask_valid] += resampled_data[mask_valid]
        valid_count[mask_valid] += 1
        del data, resampled_data  # ✅ 释放内存

    npp_mean = np.full_like(dem_data, np.nan, dtype=np.float32)
    valid_pixels = valid_count > 0
    npp_mean[valid_pixels] = npp_sum[valid_pixels] / valid_count[valid_pixels]
    del npp_sum, valid_count  # ✅ 释放中间变量

    print("数据统计信息：")
    print(f"DEM 有效像元数: {np.sum(~np.isnan(dem_data))}")
    print(f"坡度有效像元数: {np.sum(~np.isnan(slope_data))}")
    print(f"坡向有效像元数: {np.sum(~np.isnan(aspect_data))}")
    print(f"NPP平均值有效像元数: {np.sum(~np.isnan(npp_mean))}")

    # 2. 生成随机点
    points = random_points_within_mask("/home/gsr/Documents/代做外包/shannxi_area/shanxi.shp", 
                                    dem_data, dem_transform, n_points=10000)

    # 3. 提取各点位置的值
    elevation = extract_values_at_points(points, dem_data, dem_transform)
    slope = extract_values_at_points(points, slope_data, slope_transform)
    aspect = extract_values_at_points(points, aspect_data, aspect_transform)
    npp = extract_values_at_points(points, npp_mean, dem_transform)

    # 4. 创建数据框
    # df = pd.DataFrame({
    #     'elevation': elevation,
    #     'slope': slope,
    #     'aspect': aspect,
    #     'npp': npp
    # })
    df = pd.DataFrame({
        'elevation': elevation,
        'slope': slope,
        'aspect': aspect,
        'npp': npp
    }).astype(np.float32).dropna()

    # df = df.dropna()
    
    print(f"有效样本点数量: {len(df)}")
    # 5. 相关性分析
    corr_matrix = df.corr(method='pearson')
    
    from scipy.stats import pearsonr

    # 构造空矩阵
    p_values = pd.DataFrame(index=df.columns, columns=df.columns)

    # 双重循环计算每对变量的 p 值
    for col1 in df.columns:
        for col2 in df.columns:
            if col1 == col2:
                p_values.loc[col1, col2] = 0  # 自相关设为 0
            else:
                _, p = pearsonr(df[col1], df[col2])
                p_values.loc[col1, col2] = p

    # 转换为 float 类型
    p_values = p_values.astype(float)

    # 可视化 p 值矩阵
    plt.figure(figsize=(10, 8))
    sns.heatmap(p_values, annot=True, cmap='YlGnBu', vmin=0, vmax=1)
    plt.title('变量间Pearson相关的 p 值')
    plt.savefig('p_value_matrix.png', dpi=300, bbox_inches='tight')
    plt.close('all')

    # 绘制相关系数热力图
    plt.figure(figsize=(10, 8))
    sns.heatmap(corr_matrix, 
                annot=True,
                cmap='RdBu_r',
                vmin=-1, vmax=1)
    plt.title('变量间Pearson相关系数')
    plt.savefig('r_value_correlation_matrix.png', dpi=300, bbox_inches='tight')
    plt.close('all')

    # # 6. 建立回归模型
    # X_elevation = df['elevation'].values.reshape(-1, 1)
    # X_slope = df['slope'].values.reshape(-1, 1)
    # y = df['npp'].values

    # # 海拔模型
    # model_elevation = LinearRegression()
    # model_elevation.fit(X_elevation, y)
    # y_pred_elevation = model_elevation.predict(X_elevation)

    # # 坡度模型
    # model_slope = LinearRegression()
    # model_slope.fit(X_slope, y)
    # y_pred_slope = model_slope.predict(X_slope)

    # # 7. 误差分析
    # def print_metrics(y_true, y_pred, model_name):
    #     mae = mean_absolute_error(y_true, y_pred)
    #     rmse = np.sqrt(mean_squared_error(y_true, y_pred))
    #     r2 = r2_score(y_true, y_pred)
    #     print(f"\n{model_name}模型评估指标：")
    #     print(f"MAE: {mae:.4f}")
    #     print(f"RMSE: {rmse:.4f}")
    #     print(f"R²: {r2:.4f}")

    # print_metrics(y, y_pred_elevation, "海拔高度")
    # print_metrics(y, y_pred_slope, "坡度")

    # # 8. 可视化回归结果
    # fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 6))

    # ax1.scatter(X_elevation, y, alpha=0.5)
    # ax1.plot(X_elevation, y_pred_elevation, color='red')
    # ax1.set_xlabel('海拔高度 (m)')
    # ax1.set_ylabel('NPP')
    # ax1.set_title('NPP与海拔高度的关系')

    # ax2.scatter(X_slope, y, alpha=0.5)
    # ax2.plot(X_slope, y_pred_slope, color='red')
    # ax2.set_xlabel('坡度 (°)')
    # ax2.set_ylabel('NPP')
    # ax2.set_title('NPP与坡度的关系')

    # plt.tight_layout()
    # plt.savefig('regression_analysis.png', dpi=300, bbox_inches='tight')
    # plt.close()

if __name__ == "__main__":
    main()