
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
#from catboost import CatBoostRegressor
import lightgbm as lgb
import xgboost as xgb
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_absolute_error
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import VotingRegressor
from sklearn.svm import SVR

# 加载数据
file_path = 'data.csv'
data = pd.read_csv(file_path)

# 检查数据结构
print(data.head())

# 准备数据
# 假设广告主ID和优化ID是前两列，最后一列是目标变量（得分）
X = data.iloc[:, 3:-1]  # 所有特征列（去掉前两列和最后一列）
y = data.iloc[:, -1]    # 目标变量（得分）

# 分割数据集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# XGBoost回归模型
xgb_model = xgb.XGBRegressor(n_estimators=100, learning_rate=0.1, random_state=42)
xgb_model.fit(X_train, y_train)
y_pred_xgb = xgb_model.predict(X_test)

# 计算MAE
mae_xgb = mean_absolute_error(y_test, y_pred_xgb)
print(f'XGBoost MAE: {mae_xgb}')

# LightGBM回归模型
lgb_model = lgb.LGBMRegressor(n_estimators=100, learning_rate=0.1, random_state=42)
lgb_model.fit(X_train, y_train)
y_pred_lgb = lgb_model.predict(X_test)

# 计算MAE
mae_lgb = mean_absolute_error(y_test, y_pred_lgb)
print(f'LightGBM MAE: {mae_lgb}')

# CatBoost回归模型
cat_model = CatBoostRegressor(iterations=100, learning_rate=0.1, depth=6, random_seed=42, verbose=0)
cat_model.fit(X_train, y_train)
y_pred_cat = cat_model.predict(X_test)

# 计算MAE
mae_cat = mean_absolute_error(y_test, y_pred_cat)
print(f'CatBoost MAE: {mae_cat}')

# 创建基学习器（回归模型）
lr_reg = LinearRegression()
tree_reg = DecisionTreeRegressor(max_depth=4, random_state=42)
knn_reg = KNeighborsRegressor(n_neighbors=15)
svm_reg = SVR(kernel='linear', C=0.1)

# 创建Voting Regressor
voting_reg = VotingRegressor(estimators=[
    ('lr', lr_reg),
    ('tree', tree_reg),
    ('knn', knn_reg),
    ('svm', svm_reg)
])

# 训练集成模型
voting_reg.fit(X_train, y_train)
y_pred = voting_reg.predict(X_test)

# 计算MAE
mae_voting = mean_absolute_error(y_test, y_pred)
print(f'Voting Regressor MAE: {mae_voting}')

# 随机森林回归模型
rf_model = RandomForestRegressor(n_estimators=100, random_state=42)
rf_model.fit(X_train, y_train)
y_pred_rf = rf_model.predict(X_test)
mae_rf = mean_absolute_error(y_test, y_pred_rf)
print(f'Random Forest MAE: {mae_rf}')

# 梯度提升回归模型
gb_model = GradientBoostingRegressor(n_estimators=100, learning_rate=0.1, random_state=42)
gb_model.fit(X_train, y_train)
y_pred_gb = gb_model.predict(X_test)
mae_gb = mean_absolute_error(y_test, y_pred_gb)
print(f'Gradient Boosting MAE: {mae_gb}')

# 如果需要可以保存模型
import joblib
joblib.dump(rf_model, 'random_forest_model.pkl')
joblib.dump(gb_model, 'gradient_boosting_model.pkl')
