""""
随机什么衡量特征重要性
"""
# import pandas as pd
# from sklearn.ensemble import RandomForestRegressor
# from sklearn.model_selection import train_test_split
#
# # 读取预处理后的数据
# df = pd.read_csv('processed_data_with_bands.csv')
#
# # 指定特征列和目标列
# # 请根据实际情况替换 'cloudsat_cbh' 为你的目标变量名称
# features = ['fy_cth_log', 'fy_crf_scaled', 'fy_ctt_scaled', 'fy_ctp_scaled', 'fy_olr_scaled',
#             'fy_lat_sin', 'fy_lat_cos', 'fy_lon_sin', 'fy_lon_cos',
#             'fy_band1_scaled', 'fy_band2_scaled', 'fy_band5_scaled', 'fy_band6_scaled', 'fy_band7_scaled',
#             'fy_band12_scaled', 'fy_band13_scaled']
# target = 'cloudsat_cbh'  # 替换为实际的目标变量列名
#
# X = df[features]
# y = df[target]
#
# # 划分训练集和测试集
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
#
# # 使用随机森林模型
# model = RandomForestRegressor(n_estimators=100, random_state=42)
# model.fit(X_train, y_train)
#
# # 提取特征重要性
# importances = model.feature_importances_
# feature_importances = pd.Series(importances, index=features)
#
# # 按重要性排序
# feature_importances = feature_importances.sort_values(ascending=False)
# print(feature_importances)
#
#
# # 根据重要性进行筛选
# # 假设我们保留重要性大于某个阈值的特征（如大于0.05）
# important_features = feature_importances[feature_importances > 0.05].index
# print("保留的重要特征:", important_features)
#
# # 使用这些重要特征进行后续建模
# X_train_filtered = X_train[important_features]
# X_test_filtered = X_test[important_features]

""""
"""
import pandas as pd
from xgboost import XGBRegressor  # 或者使用 XGBClassifier 进行分类
from sklearn.model_selection import train_test_split

# 读取预处理后的数据
df = pd.read_csv('processed_data_with_bands.csv')

# 指定特征列和目标列
# 请根据实际情况替换 'cloudsat_cbh' 为你的目标变量名称
features = ['fy_cth_log', 'fy_crf_scaled', 'fy_ctt_scaled', 'fy_ctp_scaled', 'fy_olr_scaled',
            'fy_lat_sin', 'fy_lat_cos', 'fy_lon_sin', 'fy_lon_cos',
            'fy_band1_scaled', 'fy_band2_scaled', 'fy_band5_scaled', 'fy_band6_scaled', 'fy_band7_scaled',
            'fy_band12_scaled', 'fy_band13_scaled']
target = 'cloudsat_cbh'  # 替换为实际的目标变量列名

X = df[features]
y = df[target]

# 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# 使用 XGBoost 模型
model = XGBRegressor(n_estimators=100, random_state=42)  # 对于分类问题，使用 XGBClassifier
model.fit(X_train, y_train)

# 提取特征重要性
importances = model.feature_importances_
feature_importances = pd.Series(importances, index=features)

# 按重要性排序
feature_importances = feature_importances.sort_values(ascending=False)
print(feature_importances)

# 根据重要性进行筛选
# 如果需要，你可以设置一个阈值来筛选重要特征
# important_features = feature_importances[feature_importances > 0.05].index
# print("保留的重要特征:", important_features)
