import os
import numpy as np
import pandas as pd
import lightgbm as lgb
from sklearn.metrics import f1_score, accuracy_score
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LogisticRegression

all_feature_columns = ["time", "click_times", "creative_id", "industry",
                       "advertiser_id","product_category","product_id",
                       "ad_id"]

feature_column = ["time", "click_times", "creative_id", "industry",
                       "advertiser_id","product_category","product_id",
                       "ad_id"]

target_columns = ["age", "gender"]

train_dir = "/home/datanfs/macong_data/tencent_data/train_preliminary/click_uid_ad.csv"

def train():
    # model set
    target_column = target_columns[1]
    params = {
        'boosting_type': 'gbdt',
        'objective': 'binary',
        'metric': 'binary_logloss',
        'num_leaves': 32,
        'learning_rate': 0.05,
        'feature_fraction': 0.9,
        'bagging_fraction': 0.8,
        'bagging_freq': 5,
        'verbose': 1
    }

    # print basic info
    print("### 基础LightGbm,预测目标是性别，二分类 ###")
    print("### 模型基本信息")
    print("### 不填不缺失值")
    print("feature:", feature_column)
    print("label:", target_column)
    print("max_iter:", 1000)
    print("n_jobs:", -1)
    print("k_fold:", 5)
    print("parasm:", params)
    if os.path.exists(train_dir) is False:
        print(train_dir, "not exit")
        raise FileExistsError
    train_df = pd.read_csv(train_dir)

    # 填补缺失值
    # train_df = train_df.fillna(0)
    kf = KFold(n_splits=5, shuffle=True, random_state=42)
    X = train_df[feature_column]
    y = train_df[target_column]

    X = np.array(X)
    y = np.array(y)

    xx_logloss = []
    xx_acc = []

    for k, (train_in, test_in) in enumerate(kf.split(X, y)):
        X_train, X_test, y_train, y_test = X[train_in], X[test_in], y[train_in], y[test_in]
        lgb_train = lgb.Dataset(X_train, y_train)
        lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
        gbm = lgb.train(params,
                        lgb_train,
                        num_boost_round=5000,
                        valid_sets=lgb_eval,
                        early_stopping_rounds=50,
                        verbose_eval=50,
                        )
        print("f1:", f1_score(y_test, np.where(gbm.predict(X_test, num_iteration=gbm.best_iteration) > 0.5, 1, 0)))
        acc_s = accuracy_score(y_test, np.where(gbm.predict(X_test, num_iteration=gbm.best_iteration) > 0.5, 1, 0))
        print("acc:", acc_s)

        xx_logloss.append(gbm.best_score['valid_0']['binary_logloss'])
        xx_acc.append(acc_s)
    # scores = cross_val_score(alg, train_df[feature_column], train_df[target_column], cv=kf)
    print("logloss mean:", xx_logloss.mean())
    print("acc_score mean:", xx_acc.mean())

def unit_test():
    print("### processing unit test ###")
    train()


if __name__ == '__main__':
    unit_test()