import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score

from sklearn.model_selection import train_test_split
import lightgbm as lgb


def get_pd_merged():
    pd.set_option('display.max_columns', None)
    df_summary = pd.read_excel(r"C:\Users\Administrator\PycharmProjects\PythonProject1\data\bwic.xlsx")
    df_detail = pd.read_excel(r"C:\Users\Administrator\PycharmProjects\PythonProject1\data\bid.xlsx")
    # df_detail = df_detail[df_detail['bid_successful'] == 'Y']
    df_detail.reset_index(inplace=True)
    df_res = df_summary.merge(df_detail, how='inner', on='bwic_id')
    df_res.drop(['bwic_id', 'bid_id'], axis=1, inplace=True)
    print(f"df_detail count num: {df_detail.shape[0]}")
    return df_res


def reset_train_df(df: pd.DataFrame):
    df = df.drop('index', axis=1)
    df_columns = ['cusip', 'sett_country', 'created_by', 'request_cp', 'client_id', 'bid_successful']
    for i in df_columns:
        df[i] = df[i].astype('category')

    return df


def get_param():
    return {
        "objective": "mean_squared_error",
        "num_leaves": 60,
        "max_depth": -1,
        "learning_rate": 0.01,
        "bagging_fraction": 0.9,
        "feature_fraction": 0.9,
        "bagging_freq": 5,
        "bagging_seed": 2018,
        "verbosity": -1}


if __name__ == '__main__':
    target = "starting_amount"
    df_data = get_pd_merged()
    df_data = reset_train_df(df_data)
    print(df_data)
    train, test = train_test_split(df_data)
    raw_train, raw_test = train, test
    train_x = train.drop([target], axis=1)
    test_x = test.drop([target], axis=1)
    train_y = train[[target]]
    test_y = test[[target]]
    params = get_param()

    # train_ds1 = Dataset(train_x.astype('float').values, train_y)
    # test_ds1 = Dataset(test_x.astype('float').values, test_y, reference=train_ds1)
    train_ds1 = lgb.Dataset(train_x, train_y)
    test_ds1 = lgb.Dataset(test_x, test_y)

    gbm = lgb.train(params, train_ds1, num_boost_round=10000, valid_sets=[train_ds1, test_ds1])

    predicted_y = gbm.predict(test_x)
    expected_y = test_y

    rmse = np.sqrt(mean_squared_error(expected_y, predicted_y))
    mae = mean_absolute_error(expected_y, predicted_y)
    r2 = r2_score(expected_y, predicted_y)

    print(f"rmse: {rmse}, mae: {mae}, r2: {r2}")
    gbm.save_model('lightgbm_model.txt')

    plt.figure(figsize=(20, 10))
    plt.plot(np.arange(len(test_x)), test_y, color='r', label='true y')
    plt.plot(np.arange(len(test_x)), predicted_y, color='g', label='pred y')
    plt.title('test_y & predict_y')
    plt.legend(loc='upper right')
    plt.tight_layout()
    plt.show()

    feat_imp = pd.Series(
        gbm.feature_importance(),
        index=list(train.columns)[:-1]
    )
    feat_imp.nlargest(100).plot(kind='barh', figsize=(10, 10))
    print(f"feat_imp: {feat_imp}")

    load_model = lgb.Booster(model_file='lightgbm_model.txt')
    res = load_model.predict(test_x)
    print(f"res: {res}")

    output = pd.DataFrame({'cusip': list(raw_test['cusip']), "starting_amount": list(res)})
    print(output)
