import model
import pandas as pd
import pickle
from process import columns3
import lightgbm as lgb
from sklearn.model_selection import cross_val_predict,KFold,cross_validate
from sklearn.metrics import mean_absolute_error,r2_score
from sklearn.utils import shuffle

from sklearn.ensemble import RandomForestRegressor
from lightgbm import LGBMRegressor

def run_training_cv(df, category_fields, numeric_fields, yfield, rs, regressor_name, **kwargs):
    x_part= df[category_fields+ numeric_fields]
    data_train, onehot_encoder = model.convert_traindf_to_matrix(x_part.loc[ :,   category_fields +  numeric_fields] ,
                                        category_fields, numeric_fields)
    target_train = df.loc[:, yfield].values

    data_train, target_train = shuffle(data_train, target_train, random_state=rs)

    y_cv = cross_val_predict(regressor_name(**kwargs), data_train, target_train,cv=5)
    mae = mean_absolute_error(y_cv, target_train)
    r2 = r2_score(y_cv, target_train)
    print("\navg : mae:{}\tr2:{}".format(mae, r2))
    # cv_result = cross_validate(regressor_name(**kwargs), data_train, target_train,  cv = 5,
    #                       scoring=('r2','neg_mean_absolute_error'),
    #                        return_train_score=True)
    #
    # for k, v in cv_result.items():
    #     print(k, "\t", v)

    regressor = regressor_name(**kwargs)
    regressor.fit(data_train, target_train)

    return regressor, onehot_encoder


def run_predict(df_predict, category_fields, numeric_fields, onehot_encoder, *regressors):
    x_pred = model.convert_testdf_to_matrix(df_predict[category_fields + numeric_fields], category_fields ,numeric_fields, onehot_encoder)

    df_predict.loc[:, 'predict'] = 0
    for regressor in regressors:
        y_pred = regressor.predict(x_pred)
        df_predict.loc[:, 'predict'] += y_pred/len(regressors)
    df_predict.loc[:, 'predict'] = df_predict['predict'].astype(int)

    mae = mean_absolute_error(df_predict['predict'].values, df_predict['remain_seconds'])
    r2 = r2_score(df_predict['predict'].values, df_predict['remain_seconds'])
    print("avg : mae:{}\tr2:{}".format(mae, r2))

    return df_predict

def convert_df_2_lgbm(df, category_fields):
    category_map = model.fit_onehots(df, category_fields)
    model.transform_onehots(df, category_map)


def run_lgbm_cv(df, category_fields, numeric_fields):
    convert_df_2_lgbm(df, category_fields)
    oh_category_fields = [x+"_oh" for x in category_fields]

    df1 = df.iloc[:5000,:]
    df2 = df.iloc[5000:,:]

    lgb_train = lgb.Dataset(df[oh_category_fields + numeric_fields], label= df['remain_seconds'], categorical_feature=list(range(len(oh_category_fields))))
   # lgb_eval = lgb.Dataset(df2[oh_category_fields + numeric_fields], label= df2['remain_seconds'],  categorical_feature=list(range(len(oh_category_fields))) ,reference=lgb_train)

    params = {
        'task': 'train',
        'objective': 'regression',  # 目标函数
        'metric': {'l2', 'auc'},  # 评估函数
        'num_leaves': 30,  # 叶子节点数
        'learning_rate': 0.1,  # 学习速率
        'n_estimators':500,
        'verbose': 1 , # <0 显示致命的, =0 显示错误 (警告), >0 显示信息

        'feature_fraction': 0.9,  # 建树的特征选择比例
        'bagging_fraction': 0.8,  # 建树的样本采样比例
        'bagging_freq': 5,  # k 意味着每 k 次迭代执行bagging
    }
    print('Start training...')
    # 训练 cv and train
    cv_results = lgb.cv(params, lgb_train,   nfold=4,  metrics='mse',early_stopping_rounds=100)
    print(cv_results)
    print('best n_estimators:', len(cv_results['l2-mean']))
    print('best cv score:', pd.Series(cv_results['l2-mean']).min())

    #gbm = lgb.train(params, lgb_train, num_boost_round=30)  # 训练数据需要参数列表和数据集
    # y_pred = gbm.predict(df2[oh_category_fields + numeric_fields])
    # mae = mean_absolute_error(y_pred, df2['remain_seconds'])
    # r2 = r2_score(y_pred, df2['remain_seconds'])
    # print(mae)
    # print(r2)


def run_lgbm(df_train, df_test, category_fields, numeric_fields):
    category_map = model.fit_onehots(df_train, category_fields)
    model.transform_onehots(df_train, category_map)
    model.transform_onehots(df_test, category_map)
    oh_category_fields = [x + "_oh" for x in category_fields]

    params = {
        'task': 'train',
        'objective': 'regression',  # 目标函数
        'metric': {'l2', 'auc'},  # 评估函数
        'num_leaves': 31,  # 叶子节点数
        'learning_rate': 0.1,  # 学习速率
        'n_estimators': 300,
        'verbose': 1,  # <0 显示致命的, =0 显示错误 (警告), >0 显示信息

        'feature_fraction': 0.9,  # 建树的特征选择比例
        'bagging_fraction': 0.8,  # 建树的样本采样比例
        'bagging_freq': 5,  # k 意味着每 k 次迭代执行bagging
    }

    lgb_train = lgb.Dataset(df_train[oh_category_fields + numeric_fields], label= df_train['remain_seconds'],)
    gbm = lgb.train(params, lgb_train, num_boost_round=30,  categorical_feature=list(range(len(oh_category_fields))))
    y_pred = gbm.predict(df_test[oh_category_fields + numeric_fields])
    mae = mean_absolute_error(y_pred, df_test['remain_seconds'])
    print(mae)


def train_predict(category_fields, numeric_fields):
    df_train = pd.read_csv("train_out.csv")
    df_predict = pd.read_csv("test_out.csv")
    print("train : {} ".format(len(df_train)))
    category_map = model.fit_onehots(df_train, category_fields)
    model.transform_onehots(df_train, category_map)
    model.transform_onehots(df_predict, category_map)
    oh_category_fields = [x + "_oh" for x in category_fields]

    params = {
        'task': 'train',
        'objective': 'regression',  # 目标函数
        'metric': {'l2', 'auc'},  # 评估函数
        'num_leaves': 31,  # 叶子节点数
        'learning_rate': 0.1,  # 学习速率
        'n_estimators': 300,
        'verbose': 1,  # <0 显示致命的, =0 显示错误 (警告), >0 显示信息

        'feature_fraction': 0.9,  # 建树的特征选择比例
        'bagging_fraction': 0.8,  # 建树的样本采样比例
        'bagging_freq': 5,  # k 意味着每 k 次迭代执行bagging
    }

    lgb_train = lgb.Dataset(df_train[oh_category_fields + numeric_fields], label= df_train['remain_seconds'],)
    gbm = lgb.train(params, lgb_train, num_boost_round=30,  categorical_feature=list(range(len(oh_category_fields))))

    y_pred = gbm.predict(df_predict[oh_category_fields + numeric_fields])
    df_predict.loc[:,'predict'] = y_pred
    df_predict[['remain_seconds', "predict", "loadingOrder", "timestamp"]].to_csv("predict.csv", index=False)


def manual_test(category_fields, numeric_fields):
    df_train = pd.read_csv("train_out.csv")
    df_train.columns = columns3
    print("train : {}".format(len(df_train)), end="\t")
    #df_train = df_train[df_train['dst_anchor'] == 1]
    print(" > {}" .format(len(df_train)))

    df_test = pd.read_csv("eval_out.csv")
    df_test.columns = columns3
    print("test : {}".format(len(df_test)), end="\t")
    df_test = df_test[df_test['dst_anchor'] == 1]
    print(" > {}".format(len(df_test)))

    run_lgbm(df_train, df_test, category_fields, numeric_fields)


if __name__ == "__main__":
    category_fields = ["anchor", "dst_anchor", "carrierName", "vesselMMSI", "block_start", "block_current","block_end",]
    numeric_fields=["speed1","relative_direction", "relative_distance", "relative_x", "relative_y",
     "abs_x", "abs_y", "sum_distance"]

    train_predict(category_fields, numeric_fields)