# %% [markdown]
# ## 导入需要的包

# %%
import time
import lightgbm as lgb
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn import metrics
from sklearn.model_selection import KFold
from sklearn.preprocessing import LabelEncoder

# %% [markdown]
# ## 加载数据

# %%
train = pd.read_csv('训练集.csv')
test = pd.read_csv('测试集.csv')

# %%
# 查看列名
train.columns

# %%
#打印数据大小
print(train.shape,test.shape)

# %%
# 查看数据集缺失值
train.shape[0]-train.count()

# %%
# 查看数据类型
train.dtypes

# %%
# 价格密度分布
train['价格'].plot.kde()

# %% [markdown]
# ## 数据预处理  
# ### 类别变量数据预处理

# %%
train.select_dtypes(include=['object']).columns

# %%
train['便利设施数量']=train['便利设施'].apply(lambda x:len(x.lstrip('{').rstrip('}').split(',')))
test['便利设施数量']=test['便利设施'].apply(lambda x:len(x.lstrip('{').rstrip('}').split(',')))

train['便利设施数量'].head()

# %%
no_features = ['数据ID', '价格','便利设施']

# %%
# 其他类别变量，暂时先直接简单的类别编码
data = pd.concat([train, test], axis=0)
for col in train.select_dtypes(include=['object']).columns:
    if col not in no_features:
        lb = LabelEncoder()
        lb.fit(data[col].astype(str))
        train[col] = lb.transform(train[col].astype(str))
        test[col] = lb.transform(test[col].astype(str))

# %%
train.head(2)

# %%
# 输入特征列
features = [col for col in train.columns if col not in no_features]
features

# %%
X = train[features] # 训练集输入
y = train['价格'] # 训练集标签
X_test = test[features] # 测试集输入

# %% [markdown]
# ## 模型训练  
# 
# - 定义一个LightGBM回归模型  
# - 进行5折交叉验证训练  
# 

# %%
n_fold = 5
folds = KFold(n_splits=n_fold, shuffle=True,random_state=1314)

# %%
# 祖传参数
params = {
    'learning_rate': 0.1,
    'boosting_type': 'gbdt',
    'objective': 'regression',
    'metric': 'mae',
    'feature_fraction': 0.6,
    'bagging_fraction': 0.8,
    'bagging_freq': 5,
    'num_leaves': 1000,
    'verbose': -1,
    'max_depth': -1,
    'seed': 2019,
    # 'n_jobs': -1,
    # 'device': 'gpu',
    # 'gpu_device_id': 0,
}



# %%
oof = np.zeros(len(X))
prediction = np.zeros(len(X_test))
for fold_n, (train_index, valid_index) in enumerate(folds.split(X)):
    X_train, X_valid = X[features].iloc[train_index], X[features].iloc[valid_index]
    y_train, y_valid = y[train_index], y[valid_index]
    model = lgb.LGBMRegressor(**params, n_estimators=50000, n_jobs=-1)
    model.fit(X_train, y_train,
              eval_set=[(X_train, y_train), (X_valid, y_valid)],
              eval_metric='rmse',
              verbose=50, early_stopping_rounds=200)
    y_pred_valid = model.predict(X_valid)
    y_pred = model.predict(X_test, num_iteration=model.best_iteration_)
    oof[valid_index] = y_pred_valid.reshape(-1, )
    prediction += y_pred
prediction /= n_fold

# %%
# 验证集评估
from sklearn.metrics import mean_squared_error
score=mean_squared_error(oof,train['价格'].values)
score

# %% [markdown]
# ## 提交结果

# %%
test['价格'] = prediction
test[['数据ID', '价格']].to_csv('sub.csv'.format(score), index=None)

# %%
test[['数据ID', '价格']].head()

# %% [markdown]
# ## TODO  
# - 可以做更细粒度的特征  
# - 尝试不同的模型  
# - 模型融合  
# - ...


