import lightgbm as lgb
import pandas as pd
import numpy as np
from xgboost import XGBClassifier
from sklearn.preprocessing import OneHotEncoder
from sklearn.ensemble import GradientBoostingClassifier


def gbdt_model(train_data, y, test_data):
    testGBDT = GradientBoostingClassifier(
        learning_rate=0.1,
        n_estimators=10,  # 数值大没关系，cv会自动返回合适的n_estimators
        max_depth=5,
        subsample=0.8,
        random_state=5
    )

    x = train_data
    testGBDT.fit(x, y)  # 训练Xgboost

    X_test_leaves = testGBDT.apply(test_data)  # 相当于用xgb提取五个特征
    X_train_leaves = testGBDT.apply(x)
    X_leaves = np.concatenate((X_train_leaves, X_test_leaves), axis=0)  # 把训练集和测试集同时求出叶子结点，相加，没下文了？？？？？？

    X_train_leaves = np.squeeze(X_train_leaves)
    X_test_leaves = np.squeeze(X_test_leaves)
    X_leaves = np.squeeze(X_leaves)

    gbdtbenc = OneHotEncoder()
    test = gbdtbenc.fit_transform(X_leaves)  # 对上面的0--1编码，进行0--1编码训练
    x_train_gtdt_onehot = gbdtbenc.transform(X_train_leaves).toarray()  # 训练集的叶子0----1编码转换, 转成49维特征
    x_test_gbdt_onehot = gbdtbenc.transform(X_test_leaves).toarray()  # 测试集的叶子0----1编码转换, 转成49维特征
    gbdt_onehotDfforTrain = pd.DataFrame(x_train_gtdt_onehot, index=train_data.index)  # 输出df形式
    gbdt_onehotDfforTest = pd.DataFrame(x_test_gbdt_onehot, index=test_data.index)  # 输出df形式

    return gbdt_onehotDfforTrain, gbdt_onehotDfforTest