













# 识别重要特征
important_features = [
    'Sound_Mean_x_S5_CO2', 'Sound_Max', 'Light_Mean_x_Sound_Mean',
    'Light_Max', 'Sound_Var', 'Light_Mean', 'Light_Var',
    'Sound_Mean', 'Temp_Mean_x_Light_Mean', 'Light_Mean_x_S5_CO2'
]

# 对重要特征和其他特征分别进行缩放
important_scaler = StandardScaler()  # 使用标准化而非最小最大缩放
other_scaler = MinMaxScaler(feature_range=(0, 1))













# 创建序列数据
X_sequences = []
y_sequences = []

for i in range(len(X_scaled) - sequence_length):
    X_sequences.append(X_scaled[i:i+sequence_length])
    y_sequences.append(y_scaled[i+sequence_length])


    def create_lstm_model(input_shape, lstm_units=64, dropout_rate=0.2):
        model = Sequential()

        # 使用双向LSTM以更好地捕捉时序模式
        model.add(Bidirectional(LSTM(units=lstm_units, return_sequences=True), input_shape=input_shape))
        model.add(Dropout(dropout_rate))

        # 第二个LSTM层，减少层数避免过拟合
        model.add(LSTM(units=lstm_units // 2))
        model.add(Dropout(dropout_rate))

        # 输出层
        model.add(Dense(units=1))

        # 编译模型
        optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
        model.compile(optimizer=optimizer, loss='mean_squared_error')

        return model


    # 设置早停，增加patience以避免过早停止
    early_stopping = EarlyStopping(
        monitor='val_loss',
        patience=20,
        restore_best_weights=True
    )
    # 添加学习率调度器
    reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(
        monitor='val_loss',
        factor=0.2,
        patience=10,
        min_lr=0.0001
    )
    # 训练模型
    history = model.fit(
        X_train, y_train,
        epochs=150,
        batch_size=32,
        validation_split=0.2,
        callbacks=[early_stopping, reduce_lr],
        verbose=1
    )

# 删除不需要的列
time_cols = ['Date', 'Time', 'DateTime']
time_cols = [col for col in time_cols if col in df.columns]
if time_cols:
    df = df.drop(columns=time_cols)

# 分离特征和目标变量
X = df.drop(columns=['Room_Occupancy_Count'])
y = df['Room_Occupancy_Count']



model = RandomForestRegressor(
    n_estimators=200,      # 使用200棵决策树
    max_depth=None,        # 树的深度不限制，允许充分生长
    min_samples_split=5,   # 节点至少有5个样本才会继续分裂
    min_samples_leaf=2,    # 叶节点至少包含2个样本
    max_features='sqrt',   # 每棵树随机选择sqrt(n)个特征
    bootstrap=True,        # 使用有放回抽样创建每棵树的训练集
    random_state=42,       # 固定随机种子，确保结果可复现
    n_jobs=-1              # 使用所有可用CPU核心并行训练
)



# 训练模型
model.fit(X_train, y_train)

# 预测测试集
y_pred = model.predict(X_test)

# 计算评估指标
mse = mean_squared_error(y_test, y_pred)
rmse = np.sqrt(mse)
mae = mean_absolute_error(y_test, y_pred)
r2 = r2_score(y_test, y_pred)




# 特征重要性可视化
feature_importance = model.feature_importances_
importance_df = pd.DataFrame({
    'Feature': feature_names,
    'Importance': feature_importance
})
importance_df = importance_df.sort_values('Importance', ascending=False)

# 删除不需要的列
time_cols = ['Date', 'Time', 'DateTime']
time_cols = [col for col in time_cols if col in df.columns]
if time_cols:
    df = df.drop(columns=time_cols)

# 分离特征和目标变量
X = df.drop(columns=['Room_Occupancy_Count'])
y = df['Room_Occupancy_Count']

# 划分训练集和测试集 - 使用分层抽样
y_binned = pd.cut(y, bins=5, labels=False)
X_train, X_test, y_train, y_test = train_test_split(
    X, y, test_size=0.2, random_state=42, stratify=y_binned
)




model = xgb.XGBRegressor(
    objective='reg:squarederror',  # 回归任务，优化均方误差
    n_estimators=200,              # 200棵决策树
    max_depth=8,                   # 树的最大深度为8
    learning_rate=0.05,            # 学习率0.05，稳步学习
    subsample=0.9,                 # 每棵树使用90%的样本
    colsample_bytree=0.9,          # 每棵树使用90%的特征
    gamma=0.1,                     # 控制树分裂的最小损失减少
    min_child_weight=3,            # 叶节点所需的最小样本权重和
    reg_alpha=0.01,                # L1正则化
    reg_lambda=1,                  # L2正则化
    random_state=42,               # 固定随机种子
    n_jobs=-1,                     # 使用所有CPU核心
    eval_metric='rmse',            # 评估指标
    early_stopping_rounds=20       # 早停机制
)



# 训练模型
model.fit(
    X_train,
    y_train,
    eval_set=[(X_train, y_train)],
    verbose=False
)


# 预测测试集
y_pred = model.predict(X_test)

# 计算评估指标
mse = mean_squared_error(y_test, y_pred)
rmse = np.sqrt(mse)
mae = mean_absolute_error(y_test, y_pred)
r2 = r2_score(y_test, y_pred)














