# -*- coding:utf-8 -*-
import os
from datetime import datetime
import platform
import torch.nn as nn
import psutil

def get_num_workers():
    logic_workers = psutil.cpu_count(logical=True)  # 逻辑处理器总数
    num_workers = int(logic_workers * 0.8)
    return num_workers

def get_settingsFile_dir():
    # 获取当前文件所在目录的绝对路径
    current_dir = os.path.dirname(os.path.abspath(__file__))
    return current_dir

def get_out_subFolder():
    formatted_date = datetime.now().strftime("%Y%m%d")
    prefix_sub = f'D{buffer_distance}_res{resolution}'
    out_subFolder = f'{prefix_sub}_{formatted_date}'
    return out_subFolder
########***********【Step 1】: 原始数据信息***********#############
raw_data_path = get_settingsFile_dir() + '/data/RasterUnits20220905.gpkg'
layer_name = "RasterUnits20220905"
column_strings = 'NDVI_2010,LU_2013,Factor_坡,Factor__1,Factor_斜,Factor_地,Factor__12,Factor__13,Aster_DEM_,' \
                  'Factor_流,Factor__14,Factor_河,Factor_水,Factor_工,Factor_断,Factor__15,Factor__16,RainTmp_20'
y_column_name = 'y_isLandsl'
spatial_column_names = ['posX', 'posY']
id_column = 'id'

# raw_data_path = get_settingsFile_dir() + '/data/万州RasterUnits20250930.gpkg'
# layer_name = "万州RasterUnits20250930"
# column_strings = 'A1,A10,A11,A12,A13,A14,A2,A3,A4,A5,A6,A7,A8,A9,B1,B2,C1,C2,C3,C4,C5,C6,C7,C8,' \
#                   'Earthquake,Land_use,NDVI,NDWI,Rainfall'
# y_column_name = 'y_isLandsl'
# spatial_column_names = ['posX', 'posY']
# id_column = 'ID'

buffer_distance = 32
resolution = 30
# 选择18个VIF<7.5的因子
x_column_names = column_strings.split(',')
#----------------------------------------------------------------------


########***********【Step 2】：数据准备[调用data_preparer.py]。拆分原始数据为train、val、test***********#############
split_params = {
        'split_gpkg_saved_dir': get_settingsFile_dir() + '/data/data_split_csv',        # 将原始数据gpkg数据分割为train,val,test时的输出目录
        'split_csv_saved_dir': f'{layer_name}val0.2test0.2',                            #将原始csv分割为train,val,test数据集时存放输出结果的子文件夹名称
}
#----------------------------------------------------------------------

#######*********gwlsa【训练参数】设置和【加载模型】预测设置*********###############
#-------------DATA_LOAD_DIR中可能包含pkl或者csv数据，数据应当是拆分过的train、val、test-----
num_workers = get_num_workers()
# DESKTOP-CBRG2H1为昂达12400
if platform.node() == 'Desktop-AD12400':
    gwlsa_train_outputs = 'C:/tmp/neighborhood-enhanced-gwlsa/results(predCSV)'
    DATA_LOAD_DIR = get_settingsFile_dir() + '/data/data_split_csv/RasterUnits20220905_20240915_025555_D32res30'
# DESKTOP-EKK62VH为华南 X99
elif platform.node() == 'DESKTOP-EKK62VH':
    gwlsa_train_outputs = 'C:/tmp/neighborhood-enhanced-gwlsa-outputs'
    DATA_LOAD_DIR = get_settingsFile_dir() + '/data/data_split_csv/RasterUnits20220905_20250923_214521_D32res30'
    num_workers = 6   # 华南 X99的内存只有96G,不太够，减少CPU线程数量可能会好点
# 其他为办公室的AMD 5800
elif platform.node()=='DESKTOP-3OROMHA':
    gwlsa_train_outputs = 'F:/tmp/neighborhood-enhanced-gwlsa-outputs'
    DATA_LOAD_DIR = get_settingsFile_dir() + '/data/data_split_csv/RasterUnits20220905_20251009_154641_D32res30'
    num_workers = 10
# 下面为云计算服务器的目录
else:
    gwlsa_train_outputs = get_settingsFile_dir() + '/tmp'
    DATA_LOAD_DIR = get_settingsFile_dir() + '/data/data_split_csv/RasterUnits20220905_20250905_000537_D32res30'
    num_workers = 4     # 云计算服务器上的CPU数目需要根据实际调整


#######*********gwlsa、DNN, LSTM, LR_RF【数据加载】设置*********###############
#-------------DATA_LOAD_DIR中可能包含pkl或者csv数据，数据应当是拆分过的train、val、test-----
# 如果从csv数据读取，则csv数据应该被拆分成了3份
# 训练数据集
TRAIN_CSV_FILENAME_LIST = ['train.csv'
                           ]
# 验证数据列表
VAL_CSV_FILENAME_LIST = ['val.csv'
                         ]
# 测试数据集列表
TEST_CSV_FILENAME_LIST = ['test.csv',
                          ]
#----------------------------------------------------------------------


#######*********DNN, LSTM, LR_RF【输出结果保存目录】设置*********###############
out_subFolder = get_out_subFolder()
# 保存预测结果csv的目录
PREDICTED_Y_DIR = get_settingsFile_dir() + f'/outputs/results(predCSV)/{out_subFolder}'
# 保存tpr,fpr的目录
FPR_TPR_DIR = get_settingsFile_dir() + f'/outputs/results(fprTpr)/{out_subFolder}'
# 保存已经训练好的模型目录（对DNN和LSTM有效）
SAVED_MODEL_DIR = get_settingsFile_dir() + f'/outputs/saved_models/{out_subFolder}'
# 保存绘图结果
SAVED_PLOTS = get_settingsFile_dir() + f'/outputs/results(pics)/{out_subFolder}'

# 运行DNN, LSTM, LR_RF需要删除的列
DROP_LIST = [None]
DROP_LIST_DICT = {"None": "Base"}
#####################################################

net_params = {
        'data_load_dir': DATA_LOAD_DIR, # 训练时从该目录加载train,val,test数据集
        'data_load_format': "parquet", # 训练时从该目录加载train,val,test数据集的格式，pkl或者parquet或者csv"
        'raw_data_path': raw_data_path, #最原始的gpkg数据的完整路径
        'layer_name': layer_name,  # 原始gpkg数据的图层名称
        'csv_encoding': 'utf-8',
        'max_distance': buffer_distance,
        'resolution': resolution,
        'x_column_names': x_column_names,
        'y_column_name': y_column_name,
        'spatial_column_names': spatial_column_names,
        'id_column': id_column,
        'num_workers': num_workers,
        'max_epochs': 300,
        'test_ratio': 0.2,
        'valid_ratio': 0.2,
        'batch_size': 256,
        'max_val_size': 1024,
        'max_test_size': 1024,
        'train_outputs': gwlsa_train_outputs, # 训练过程中，模型结果全部输出到该目录
        'dense_layers': [512, 512, 512, 512],
        'lr': 0.01,
        'optimizer': "Adagrad",
        'activate_func': nn.PReLU(init=0.1),
        'drop_out': 0.2,
        'show_train_metrics': False,  # 是否打印训练过程中的评估指标，例如AUC，R2，AIC等等
        'show_bestAIC_result': True, # 是否打印最佳AIC对应的模型精度
        'show_bestAUC_result': False, # 是否打印最佳AUC对应的模型精度
        'show_bestR2_result': False, # 是否打印最佳R2对应的模型精度
        'load_model_path': r'F:\tmp\neighborhood-enhanced-gwlsa-outputs\gwlsa20250928_234516\20250928_234516.bestAIC.pkl', #后缀名称要和下面的 model_postfix 一致
        'model_postfix': '.bestAIC.pkl'
    }

if __name__ == '__main__':
    print(get_settingsFile_dir())
    print(get_num_workers())