import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.preprocessing import LabelEncoder, StandardScaler
import lightgbm as lgb
from sklearn.metrics import mean_squared_error
import warnings
warnings.filterwarnings('ignore')

# 加载数据
train = pd.read_csv('训练集.csv')
test = pd.read_csv('测试集.csv')

# 特征工程函数
def feature_engineering(df):
    # 便利设施数量
    df['便利设施数量'] = df['便利设施'].apply(lambda x: len(x.lstrip('{').rstrip('}').split(',')))
    
    # 处理异常价格值
    if '价格' in df.columns:
        df = df[df['价格'] > 0].copy()
    
    # 房间相关特征
    df['房间总数'] = df['卧室数量'] + df['洗手间数量']
    df['人均房间数'] = df['房间总数'] / (df['容纳人数'] + 1)
    df['床位密度'] = df['床的数量'] / (df['容纳人数'] + 1)
    
    # 地理位置特征
    df['经纬度距离'] = np.sqrt(df['维度']**2 + df['经度']**2)
    
    # 评论相关特征
    df['有评论'] = (df['评论个数'] > 0).astype(int)
    df['评论密度'] = df['评论个数'] / (df['容纳人数'] + 1)
    
    # 房主特征
    df['房主完整度'] = (df['房主是否有个人资料图片'].astype(str) == 't').astype(int) + \
                    (df['房主身份是否验证'].astype(str) == 't').astype(int)
    
    # 时间特征
    date_cols = ['首次评论日期', '最近评论日期', '何时成为房主']
    for col in date_cols:
        if col in df.columns:
            df[col] = pd.to_datetime(df[col], errors='coerce')
            df[f'{col}_年'] = df[col].dt.year
            df[f'{col}_月'] = df[col].dt.month
            df[f'{col}_是否缺失'] = df[col].isnull().astype(int)
    
    # 处理房主回复率
    if '房主回复率' in df.columns:
        df['房主回复率'] = df['房主回复率'].astype(str).str.replace('%', '').replace('', '0')
        df['房主回复率'] = pd.to_numeric(df['房主回复率'], errors='coerce').fillna(0)
    
    return df

# 应用特征工程
train = feature_engineering(train)
test = feature_engineering(test)

no_features = ['数据ID', '价格', '便利设施', '首次评论日期', '最近评论日期', '何时成为房主']

# 类别编码
data = pd.concat([train, test], axis=0)
for col in train.select_dtypes(include=['object']).columns:
    if col not in no_features:
        lb = LabelEncoder()
        lb.fit(data[col].astype(str))
        train[col] = lb.transform(train[col].astype(str))
        test[col] = lb.transform(test[col].astype(str))

features = [col for col in train.columns if col not in no_features]
X = train[features]
y = train['价格']
X_test = test[features]

# 数据标准化
scaler = StandardScaler()
numeric_features = X.select_dtypes(include=[np.number]).columns
X[numeric_features] = scaler.fit_transform(X[numeric_features])
X_test[numeric_features] = scaler.transform(X_test[numeric_features])

# 分割数据
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42)

# 进一步优化的参数 - 目标是降低到30以下
best_params = {
    'learning_rate': 0.03,  # 进一步降低学习率
    'boosting_type': 'gbdt',
    'objective': 'regression',
    'metric': 'rmse',
    'feature_fraction': 0.9,  # 增加特征采样
    'bagging_fraction': 0.9,  # 增加样本采样
    'bagging_freq': 3,
    'num_leaves': 300,  # 进一步减少叶子数
    'verbose': -1,
    'max_depth': 6,  # 进一步限制深度
    'seed': 2019,
    'reg_alpha': 0.3,  # 增加L1正则化
    'reg_lambda': 0.3,  # 增加L2正则化
    'min_child_samples': 20,  # 增加最小样本数
    'subsample_for_bin': 200000,
    'min_split_gain': 0.1,
}

# 训练最终模型
final_model = lgb.LGBMRegressor(**best_params, n_estimators=3000)
final_model.fit(X_train, y_train, eval_set=[(X_val, y_val)], eval_metric='rmse',
                callbacks=[lgb.log_evaluation(300), lgb.early_stopping(150)])

y_pred = final_model.predict(X_val)
final_score = mean_squared_error(y_val, y_pred)
print(f'最终优化MSE分数: {final_score:.2f}')

if final_score < 30:
    print("✅ 成功！MSE分数已降低到30以下")
else:
    print("❌ 需要进一步优化")
    
# 如果分数仍然不够好，尝试集成方法
if final_score >= 30:
    print("尝试集成多个模型...")
    
    # 模型1：更保守的参数
    params1 = best_params.copy()
    params1.update({'learning_rate': 0.02, 'num_leaves': 200, 'max_depth': 5})
    model1 = lgb.LGBMRegressor(**params1, n_estimators=3000)
    model1.fit(X_train, y_train, eval_set=[(X_val, y_val)], eval_metric='rmse',
               callbacks=[lgb.early_stopping(150)])
    
    # 模型2：稍微激进的参数
    params2 = best_params.copy()
    params2.update({'learning_rate': 0.05, 'num_leaves': 400, 'max_depth': 7})
    model2 = lgb.LGBMRegressor(**params2, n_estimators=2000)
    model2.fit(X_train, y_train, eval_set=[(X_val, y_val)], eval_metric='rmse',
               callbacks=[lgb.early_stopping(100)])
    
    # 集成预测
    pred1 = model1.predict(X_val)
    pred2 = model2.predict(X_val)
    pred_final = final_model.predict(X_val)
    
    # 加权平均
    ensemble_pred = 0.5 * pred_final + 0.3 * pred1 + 0.2 * pred2
    ensemble_score = mean_squared_error(y_val, ensemble_pred)
    print(f'集成模型MSE分数: {ensemble_score:.2f}')
    
    if ensemble_score < final_score:
        print("集成模型效果更好！")
        final_score = ensemble_score