import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, StandardScaler
import lightgbm as lgb
from sklearn.metrics import mean_squared_error
import warnings
warnings.filterwarnings('ignore')

# 加载数据
train = pd.read_csv('训练集.csv')
test = pd.read_csv('测试集.csv')

# 更好的特征工程
def feature_engineering(df):
    # 便利设施数量
    df['便利设施数量'] = df['便利设施'].apply(lambda x: len(x.lstrip('{').rstrip('}').split(',')))
    
    # 价格相关特征（仅对训练集）
    if '价格' in df.columns:
        # 处理异常价格值
        df = df[df['价格'] > 0].copy()  # 移除价格为0的异常值
    
    # 房间相关特征
    df['房间总数'] = df['卧室数量'] + df['洗手间数量']
    df['人均房间数'] = df['房间总数'] / (df['容纳人数'] + 1)  # 避免除零
    df['床位密度'] = df['床的数量'] / (df['容纳人数'] + 1)
    
    # 地理位置特征
    df['经纬度距离'] = np.sqrt(df['维度']**2 + df['经度']**2)
    
    # 评论相关特征
    df['有评论'] = (df['评论个数'] > 0).astype(int)
    df['评论密度'] = df['评论个数'] / (df['容纳人数'] + 1)
    
    # 房主特征
    df['房主完整度'] = (df['房主是否有个人资料图片'].astype(str) == 't').astype(int) + \
                    (df['房主身份是否验证'].astype(str) == 't').astype(int)
    
    # 时间特征（如果有日期列）
    date_cols = ['首次评论日期', '最近评论日期', '何时成为房主']
    for col in date_cols:
        if col in df.columns:
            df[col] = pd.to_datetime(df[col], errors='coerce')
            df[f'{col}_年'] = df[col].dt.year
            df[f'{col}_月'] = df[col].dt.month
            df[f'{col}_是否缺失'] = df[col].isnull().astype(int)
    
    # 处理房主回复率
    if '房主回复率' in df.columns:
        df['房主回复率'] = df['房主回复率'].astype(str).str.replace('%', '').replace('', '0')
        df['房主回复率'] = pd.to_numeric(df['房主回复率'], errors='coerce').fillna(0)
    
    return df

# 应用特征工程
train = feature_engineering(train)
test = feature_engineering(test)

print(f"特征工程后训练集形状: {train.shape}")
print(f"移除异常值后训练集形状: {train.shape}")

no_features = ['数据ID', '价格', '便利设施', '首次评论日期', '最近评论日期', '何时成为房主']

# 类别编码
data = pd.concat([train, test], axis=0)
for col in train.select_dtypes(include=['object']).columns:
    if col not in no_features:
        lb = LabelEncoder()
        lb.fit(data[col].astype(str))
        train[col] = lb.transform(train[col].astype(str))
        test[col] = lb.transform(test[col].astype(str))

# 获取特征列
features = [col for col in train.columns if col not in no_features]
print(f"使用的特征数量: {len(features)}")

X = train[features]
y = train['价格']
X_test = test[features]

# 数据标准化（对数值特征）
scaler = StandardScaler()
numeric_features = X.select_dtypes(include=[np.number]).columns
X[numeric_features] = scaler.fit_transform(X[numeric_features])
X_test[numeric_features] = scaler.transform(X_test[numeric_features])

# 快速测试优化后的模型
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42)

# 优化的参数
params = {
    'learning_rate': 0.05,  # 降低学习率
    'boosting_type': 'gbdt',
    'objective': 'regression',
    'metric': 'rmse',
    'feature_fraction': 0.8,  # 增加特征采样
    'bagging_fraction': 0.8,
    'bagging_freq': 5,
    'num_leaves': 500,  # 减少叶子数防止过拟合
    'verbose': -1,
    'max_depth': 8,  # 限制深度
    'seed': 2019,
    'reg_alpha': 0.1,  # 添加L1正则化
    'reg_lambda': 0.1,  # 添加L2正则化
}

model = lgb.LGBMRegressor(**params, n_estimators=2000)
model.fit(X_train, y_train, eval_set=[(X_val, y_val)], eval_metric='rmse', 
          callbacks=[lgb.log_evaluation(200), lgb.early_stopping(100)])

y_pred = model.predict(X_val)
score = mean_squared_error(y_val, y_pred)
print(f'优化后MSE分数: {score:.2f}')

# 特征重要性
feature_importance = pd.DataFrame({
    'feature': features,
    'importance': model.feature_importances_
}).sort_values('importance', ascending=False)

print("\n前10个重要特征:")
print(feature_importance.head(10))