import pandas as pd
from matplotlib import pyplot
import xgboost as xgb
from xgboost import plot_importance
import os.path as path

from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split

from data_preprocessing import generate_feature
from score import cal_score_tianchi

# from tqdm import tqdm

data_base = '../data'

train_set_base = path.join(data_base, 'zhaopin_round1_train_20190716')
test_set_base = path.join(data_base, 'zhaopin_round1_test_20190716')

local_cross_validation = False

# Read Train Set

# Users
train_set_user = pd.read_table(path.join(train_set_base, 'table1_user'))
# Jobs
job_information = pd.read_table(path.join(train_set_base, 'table2_jd'), error_bad_lines=False)
# Actions
train_set_action = pd.read_table(path.join(train_set_base, 'table3_action'))

# Read Test Set

# Users
test_set_user = pd.read_table(path.join(test_set_base, 'user_ToBePredicted'))
# Target List
test_set_action = pd.read_table(path.join(data_base, 'zhaopin_round1_user_exposure_B_20190819'), delimiter=' ')

train_set_action['satisfied_score'] = train_set_action['satisfied']
test_set_action['satisfied_score'] = float(0.0)

# Merge Tables

# train_set_action INNER JOIN train_set_user ON user_id INNER JOIN job_information ON jd_no
train_table = pd.merge(train_set_action, train_set_user, how='inner', on=['user_id'])
train_table = pd.merge(train_table, job_information, how='inner', on=['jd_no'])

# test_set_action INNER JOIN test_set_user ON user_id INNER JOIN job_information ON jd_no
test_table = pd.merge(test_set_action, test_set_user, how='inner', on=['user_id'])
test_table = pd.merge(test_table, job_information, how='inner', on=['jd_no'])

# Drop unused / useless columns
test_table.drop(['company_name', 'max_edu_level', 'is_mangerial', 'resume_language_required'], axis=1, inplace=True)
train_table.drop(['company_name', 'max_edu_level', 'is_mangerial', 'resume_language_required'], axis=1, inplace=True)

# print(train_set_action[['satisfied_score']].describe())

test_table = generate_feature(test_table)
train_table = generate_feature(train_table)

all_data_set_table = pd.concat([train_table, test_table])
target_name = 'satisfied_score'


def generate_x_and_y_pair(*data_frames):
    result = []
    category_features = ['city_eq_jd_city_1','city_eq_jd_city_2','city_eq_jd_city_3',
                         'work_years_satisfied','salary_more_than_desire','salary_less_than_desire',
                         'salary_more_than_cur','is_travel','city', 'cur_industry_id', 'cur_jd_type', 'cur_salary',
                         'cur_salary_id']
    using_features = ['desire_jd_city_cnt','work_years','desire_salary','min_years','cur_salary','experience_num',
                      'min_edu_level','cur_degree_id','require_nums','max_salary','min_salary','birthday',
                      'desire_jd_type_score','job_match_score','degree_distance','curr_jd_type_score']

    label_dict = {}

    print 'Fit label encoder'
    for feature in category_features:

        label_encoder = LabelEncoder()
        label_encoder.fit(all_data_set_table[feature].astype(str))
        label_dict[feature] = label_encoder

    using_features.extend(category_features)
    using_features = list(set(using_features))
    data_frame_index = 0
    for data_frame in data_frames:
        x = data_frame[using_features]
        if target_name in data_frame.columns:
            y = data_frame[[target_name]]
        else:
            y = None

        for feature in category_features:
            label_encoder = label_dict[feature]
            x[feature] = label_encoder.transform(x[feature].astype(str))

        print 'Generated pair '+ str(data_frame_index)
        data_frame_index += 1

        result.append((x, y))
    return result


params = {
            'booster': 'gbtree',
            'objective': 'binary:logistic',
            'eta': 0.025,
            'max_depth': 8,
            'eval_metric': 'logloss',
            'seed': 508,
            'missing': -1,
            'silent': 1
        }

max_rounds = 250


def train_xgb_model(xgb_train):
    watch_list = [(xgb_train, 'train'), (xgb_train, 'test')]
    model = xgb.train(params, xgb_train, max_rounds, watch_list, early_stopping_rounds=15)
    return model


def do_cross_validation():

    using_split_user_file = True

    if not using_split_user_file:
        user_ids = train_set_user['user_id'].unique()
        train_users,  validate_users = train_test_split(user_ids, test_size = 0.2 )
        train_user_frame = pd.DataFrame(train_users)
        validate_user_frame = pd.DataFrame(validate_users)
        train_user_frame.to_csv('../result/train_users.csv',header=False,index=False)
        validate_user_frame.to_csv('../result/validate_users.csv',header=False,index=False)
    else:
        train_user_frame = pd.read_csv('../result/train_users.csv',names=['user_id'])
        validate_user_frame = pd.read_csv('../result/validate_users.csv',names=['user_id'])
        train_users = train_user_frame['user_id']
        validate_users = validate_user_frame['user_id']

    local_train = train_table[(train_table['user_id'].isin(train_users))].reset_index(drop=True)
    local_validate = train_table[(train_table['user_id'].isin(validate_users))].reset_index(drop=True)
    result = generate_x_and_y_pair(local_train, local_validate)
    t_x, t_y = result[0]
    test_x, test_y = result[1]
    xgb_train = xgb.DMatrix(t_x, t_y)
    xgb_test = xgb.DMatrix(test_x)

    model = train_xgb_model(xgb_train)
    predict_score = model.predict(xgb_test)

    submit = local_validate[['user_id', 'jd_no']]
    submit['score'] = predict_score
    submit = submit.reset_index(drop=True)
    result = pd.merge(train_set_action, submit, how='left', on=['user_id', 'jd_no'])
    result.fillna(-100000, inplace=True)

    result = result.groupby('user_id', as_index=False).apply(lambda x: x.sort_values('score', ascending=False))
    cal_score_tianchi(result)
    plot_importance(model)
    pyplot.show()


def do_predict():
    result = generate_x_and_y_pair(train_table, test_table)
    t_x, t_y = result[0]
    test_x, _ = result[1]
    xgb_train = xgb.DMatrix(t_x, t_y)
    xgb_test = xgb.DMatrix(test_x)

    model = train_xgb_model(xgb_train)
    predict_score = model.predict(xgb_test)

    submit = test_table[['user_id','jd_no']]
    submit['score'] = predict_score
    submit = submit.reset_index(drop=True)
    result = pd.merge(test_set_action,submit,how='left',on=['user_id','jd_no'])
    result.fillna(-100000,inplace=True)

    result = result.groupby('user_id',as_index=False).apply(lambda x:x.sort_values('score',ascending=False))
    result[['user_id','jd_no']].drop_duplicates().to_csv('../result/submission.csv',index=False)


if local_cross_validation:
    do_cross_validation()
else:
    do_predict()