# -*- coding:utf-8 -*-
from datetime import datetime
import psutil
import pandas as pd
import pyarrow as pa
from pandarallel import pandarallel
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from gwlsa.datasets import calc_max_neighbours, find_neighbours
from gwlsa.datasets import split_train_val_test
from utils.general_utils import timer
from utils.landslide_utils import read_gpkg_addColumns, save_df_tofile, load_df_fromfile
from utils.landslide_utils import nestedDataframes_to_dictList
import gwlsa_settings as GS
from gwlsa_settings import net_params, split_params

import warnings
warnings.filterwarnings('ignore', message="You are on Windows. If you detect any issue with pandarallel, be sure you checked out the Troubleshooting page:")

# 【非并行】版本的scale_findNeighbours函数，很慢，【不推荐使用】
def scale_findNeighbours(gdf, buffer_distance:int, resolution:int,
                         spatial_columns:list, x_columns:list, id_column='id',
                         test_ratio=0.2, val_ratio=0.2,
                         scale_fn="minmax_scale", x_include_pointSelf=True):
    # step 1： 缩放X_columns数据
    if scale_fn == "minmax_scale":
        scaler_x = MinMaxScaler()
        scaler_y = MinMaxScaler()
    elif scale_fn == "standard_scale":
        scaler_x = StandardScaler()
        scaler_y = StandardScaler()
    gdf[x_columns] = scaler_x.fit_transform(gdf[x_columns])

    # step 2 增加常数1.0那一列
    gdf['xb'] = 1.0  # 由于公式Wx+b存在常数项，增加1.0用于乘以常数项

    # step 3：找出每个点的邻居
    max_num_neighbours = calc_max_neighbours(buffer_distance, resolution)
    print(f'max_num_neighbours:{max_num_neighbours}')
    # 使用apply方法，axis=1表示按行应用函数
    # gdf['neighbours'] = gdf.apply(lambda row: find_neighbours(row[spatial_columns[0]], row[spatial_columns[1]], gdf, buffer_distance), axis=1)
    for idx, row in gdf.iterrows():
        target_x_utm = row[spatial_columns[0]]
        target_y_utm = row[spatial_columns[1]]
        neighbours = find_neighbours(target_x_utm, target_y_utm, gdf, buffer_distance).copy()
        query_point = neighbours['geometry'].loc[idx]
        neighbours['distance'] = neighbours['geometry'].apply(
            lambda geom: geom.distance(query_point)
        )

        # 归一化当前点到邻居的空间距离，与前面的标准化X_Columns不同，注意不要混淆
        dist_scaler = MinMaxScaler()
        scaled_data_vals = dist_scaler.fit_transform(neighbours['distance'].values.reshape(-1, 1))
        neighbours['distance'] = scaled_data_vals

        num_rows_to_fill = max_num_neighbours - len(neighbours)
        # 如果neighbours当前行数不足，【添加全为0的行】至数量max_num_neighbours。因为如果neighbours数据不一致，神经网络的输入序列长度就会变化，网络报错
        if num_rows_to_fill > 0:
            empty_rows = pd.DataFrame([[0] * len(neighbours.columns)] * num_rows_to_fill, columns=neighbours.columns)
            neighbours = pd.concat([neighbours, empty_rows], ignore_index=True)
        elif num_rows_to_fill==0:
            pass
        elif num_rows_to_fill < 0:
            raise ValueError('请检查gnnwr_settings.py中resolution和max_distance参数的设置是否正确！！！')
        # 邻居列表里面是否需要包含自身
        if x_include_pointSelf:
            other_rows = neighbours
        else:
            other_rows = neighbours[neighbours.index != idx]
        gdf.at[idx, 'neighbours'] = other_rows.loc[:, [id_column] + x_columns + ['xb'] ]
        gdf.at[idx, 'distances'] = other_rows.loc[:, [id_column, 'distance']]

    # 邻居已经找到，geometry列不再需要了，原地删除geometry列
    gdf.drop('geometry', axis=1, inplace=True)
    gdf.drop('posX', axis=1, inplace=True)
    gdf.drop('posY', axis=1, inplace=True)

    # # 转换嵌套DataFrame为可序列化格式
    # gdf = convert_nested_dataframes(gdf)

    print('开始分割数据......')
    # step 4: 分割数据为训练集、验证集和测试集
    train_df, val_df, test_df = split_train_val_test(gdf, test_ratio, val_ratio)

    return gdf, train_df, val_df, test_df

# 【并行】版本的scale_findNeighbours函数，推荐使用
def scale_findNeighbours_parallel(gdf, buffer_distance:int, resolution:int,
                         spatial_columns:list, x_columns:list, id_column='id',
                         test_ratio=0.2, val_ratio=0.2,
                         scale_fn="minmax_scale", x_include_pointSelf=True):

    # 初始化Pandarallel,
    # nb_workers：用于指定并行化时使用的工人（worker）数量。如果不设置，默认为操作系统可识别的核心数。这是一个整数参数。
    # verbose：控制日志输出的详细程度，是一个整数参数，默认值为 2。具体含义如下：
    # 0 - 不显示任何日志。
    # 1 - 仅显示警告日志。
    # 2 - 显示所有日志。
    logic_workers = psutil.cpu_count(logical=True)  # 逻辑处理器总数
    use_workers = int(logic_workers * 0.8)
    pandarallel.initialize(progress_bar=True, verbose=0, nb_workers=use_workers)

    print('step 1: 开始缩放数据......')
    # step 1：缩放X_columns数据
    if scale_fn == "minmax_scale":
        scaler_x = MinMaxScaler()
        scaler_y = MinMaxScaler()
    elif scale_fn == "standard_scale":
        scaler_x = StandardScaler()
        scaler_y = StandardScaler()
    gdf[x_columns] = scaler_x.fit_transform(gdf[x_columns])

    # step 2 增加常数1.0那一列
    gdf['xb'] = 1.0

    # step 3：找出每个点的邻居
    print('step 3.1: 找出最大邻居数......')
    max_num_neighbours = calc_max_neighbours(buffer_distance, resolution)
    print(f'max_num_neighbours:{max_num_neighbours}')

    def process_neighbours(row, gdf, spatial_columns, buffer_distance, max_num_neighbours, id_column, x_columns,
                           x_include_pointSelf):
        import pandas as pd
        from sklearn.preprocessing import MinMaxScaler
        from gwlsa.datasets import find_neighbours

        target_x_utm = row[spatial_columns[0]]
        target_y_utm = row[spatial_columns[1]]
        neighbours = find_neighbours(target_x_utm, target_y_utm, gdf, buffer_distance).copy()
        query_point = neighbours['geometry'].loc[row.name]
        neighbours['distance'] = neighbours['geometry'].apply(lambda geom: geom.distance(query_point))

        dist_scaler = MinMaxScaler()
        scaled_data_vals = dist_scaler.fit_transform(neighbours['distance'].values.reshape(-1, 1))
        neighbours['distance'] = scaled_data_vals

        num_rows_to_fill = max_num_neighbours - len(neighbours)
        if num_rows_to_fill > 0:
            empty_rows = pd.DataFrame([[0] * len(neighbours.columns)] * num_rows_to_fill, columns=neighbours.columns)
            neighbours = pd.concat([neighbours, empty_rows], ignore_index=True)
        elif num_rows_to_fill < 0:
            raise ValueError('请检查gnnwr_settings.py中resolution和max_distance参数的设置是否正确！！！')
        # 邻居列表里面是否需要包含自身
        if x_include_pointSelf:
            other_rows = neighbours
        else:
            other_rows = neighbours[neighbours.index != row.name]
        final_neighbours = other_rows.loc[:, [id_column] + x_columns + ['xb']]
        final_distances = other_rows.loc[:, [id_column, 'distance']]
        return final_neighbours, final_distances

    # 并行化邻居查找和距离计算
    print('step 3.2: 开始查找邻居......')
    final_neighbours_distances = gdf.parallel_apply(
        lambda row: process_neighbours(row, gdf, spatial_columns, buffer_distance, max_num_neighbours, id_column, x_columns, x_include_pointSelf),
        axis=1
    )
    # 将结果分配到gdf的新列中
    gdf['neighbours'] = final_neighbours_distances.apply(lambda x: x[0])
    gdf['distances'] = final_neighbours_distances.apply(lambda x: x[1])

    # 邻居已经找到，geometry列不再需要了，原地删除geometry列
    gdf.drop('geometry', axis=1, inplace=True)
    gdf.drop('posX', axis=1, inplace=True)
    gdf.drop('posY', axis=1, inplace=True)

    # 转换嵌套DataFrame为可序列化格式
    gdf = nestedDataframes_to_dictList(gdf)

    print('step 4：开始分割数据......')
    # step 4: 分割数据为训练集、验证集和测试集
    train_df, val_df, test_df = split_train_val_test(gdf, test_ratio, val_ratio)

    return gdf, train_df, val_df, test_df

def get_arrow_schema(column_names):
    # 1. 构建一个映射字典：ID -> int64, Y-->int16, 其他列 -> float64
    type_mapping = {}
    for col in column_names:
        if col == 'neighbours' or col == 'distances' or col == 'geometry':
            continue
        if col == GS.id_column:
            type_mapping[col] = pa.int64()
        elif col == GS.y_column_name:
            type_mapping[col] = pa.float64()
        else:
            type_mapping[col] = pa.float64()

    # 2. 将映射字典转换为PyArrow Schema所需的格式
    schema_fields = [(col_name, pa_type) for col_name, pa_type in type_mapping.items()]
    neighbours_fields = ('neighbours', pa.list_(pa.struct(schema_fields)))
    distances_fields = ('distances', pa.list_(pa.struct([
        ('id', pa.int64()),
        ('distance', pa.float64())
    ])))
    # geometry_fields = ('geometry', pa.geometry('XY'))
    schema_fields.append(neighbours_fields)
    schema_fields.append(distances_fields)
    return schema_fields

@timer
def exec_split(save_or_load=0):
    # 0为Save,1为load
    if save_or_load == 0:
        max_data_id_value, gdf = read_gpkg_addColumns(net_params['raw_data_path'],
                                               id_column=net_params['id_column'],
                                               layer_name=net_params['layer_name'],
                                               csv_encoding=net_params['csv_encoding'],
                                               added_columns=['distances', 'neighbours'])
        gdf, train_df, val_df, test_df = scale_findNeighbours_parallel(gdf, buffer_distance=net_params['max_distance'],
                                                                       resolution=net_params['resolution'],
                                                                       spatial_columns=net_params['spatial_column_names'],
                                                                       x_columns=net_params['x_column_names'],
                                                                       id_column=net_params['id_column'],
                                                                       test_ratio=net_params['test_ratio'],
                                                                       val_ratio=net_params['valid_ratio'])

        column_names = gdf.columns
        schema_fields = get_arrow_schema(column_names)
        custom_schema = pa.schema(schema_fields)
        # print(custom_schema)

        save_df_tofile(split_params['split_gpkg_saved_dir'], net_params['layer_name'],
                       train_df, val_df, test_df,
                       net_params['max_distance'], net_params['resolution'],
                       file_format=net_params['data_load_format'],
                       parquet_schema=custom_schema)

    elif save_or_load == 1:
        train_df, val_df, test_df = load_df_fromfile(net_params['data_load_dir'],
                                                     buffer_distance=net_params['max_distance'],
                                                     resolution=net_params['resolution'],
                                                     file_format=net_params['data_load_format'])

if __name__ == '__main__':
    current_time = datetime.now()
    formatted_time = current_time.strftime("%Y-%m-%d %H:%M:%S")
    print("当前时间：", formatted_time)
    # 输出示例：当前时间是： 2025-09-24 22:56:46
    # 0为Save,1为load
    exec_split(save_or_load=0)
