import os
import pandas as pd

home = os.path.abspath(os.path.dirname(__file__))
raw_data_path = home + '/data/initial.csv'
feature_selected_file_name = '/data/feature_selected.csv'
feature_selected_path = home + feature_selected_file_name

useless_columns = [
    "acc_now_delinq",
    "application_type",
    "annual_inc_joint",
    "all_util",
    "bc_open_to_buy",
    "collection_recovery_fee",
    "collections_12_mths_ex_med",
    "chargeoff_within_12_mths",
    "desc",
    "delinq_amnt",
    "dti_joint",
    "disbursement_method",
    "debt_settlement_flag",
    "debt_settlement_flag_date",
    "earliest_cr_line",
    "emp_title",
    "fico_range_high",
    "fico_range_low",
    "funded_amnt",
    "funded_amnt_inv",
    "hardship_payoff_balance_amount",
    "hardship_last_payment_amount",
    "hardship_flag",
    "hardship_type",
    "hardship_reason",
    "hardship_status",
    "deferral_term",
    "hardship_amount",
    "hardship_start_date",
    "hardship_end_date",
    "payment_plan_start_date",
    "hardship_length",
    "hardship_dpd",
    "hardship_loan_status",
    "id",
    "il_util",
    "installment",
    "int_rate",
    "issue_d",
    "initial_list_status",
    "inq_fi",
    "inq_last_12m",
    "last_credit_pull_d",
    "last_pymnt_d",
    "loan_amnt",
    "max_bal_bc",
    "member_id",
    "mths_since_last_major_derog",
    "mths_since_recent_bc_dlq",
    "mths_since_recent_revol_delinq",
    "mths_since_last_delinq",
    "mths_since_last_record",
    "mths_since_rcnt_il",
    "next_pymnt_d",
    "open_acc_6m",
    "open_act_il",
    "open_il_12m",
    "open_il_24m",
    "out_prncp",
    "out_prncp_inv",
    "orig_projected_additional_accrued_interest",
    "policy_code",
    "pub_rec",
    "pub_rec_bankruptcies",
    "purpose",
    "pymnt_plan",
    "recoveries",
    "revol_bal_joint",
    "sec_app_fico_range_low",
    "sec_app_fico_range_high",
    "sec_app_earliest_cr_line",
    "sec_app_inq_last_6mths",
    "sec_app_mort_acc",
    "sec_app_open_acc",
    "sec_app_revol_util",
    "sec_app_open_act_il",
    "sec_app_num_rev_accts",
    "sec_app_chargeoff_within_12_mths",
    "sec_app_collections_12_mths_ex_med",
    "sec_app_mths_since_last_major_derog",
    "settlement_status",
    "settlement_date",
    "settlement_amount",
    "settlement_percentage",
    "settlement_term",
    "sub_grade",
    "tax_liens",
    "term",
    "title",
    "total_cu_tl",
    "total_bal_il",
    "open_rv_12m",
    "open_rv_24m",
    "total_pymnt",
    "total_pymnt_inv",
    "total_rec_int",
    "total_rec_late_fee",
    "total_rec_prncp",
    "total_rev_hi_lim",
    "url",
    "verification_status_joint",
    "zip_code"
]


# get columns have unique value
def get_unique_value_columns(l_data):
    original_columns = l_data.columns
    drop_columns = []
    for col in original_columns:
        col_series = l_data[col].dropna().unique()
        if len(col_series) == 1:
            drop_columns.append(col)
    return drop_columns


# get too many null value columns
def get_too_many_null_columns(l_data, rate=0.3):
    null_counts = l_data.isnull().sum() / len(l_data)
    return null_counts[null_counts > rate].index.tolist()


# process null value
def processData(data):
    years = ['< 1 year', '1 year', '2 years', '3 years', '4 years', '5 years', '6 years',
             '7 years', '8 years', '9 years', '10+ years']
    # 众数填充emp_length
    print(data['emp_length'].value_counts())
    data['emp_length'] = loans_data['emp_length'].fillna('10+ years')
    for inx, val in enumerate(years):
        data['emp_length'] = loans_data['emp_length'].replace(val, inx)
    # sub_grade  home_ownership verification_status loan_status addr_state

    # 是数值的列  使用平均数填充
    for item in loans_data.columns.values:
        if loans_data[item].dtype != object and loans_data[item].isnull().sum() > 0:
            loans_data[item] = loans_data[item].fillna(round(loans_data[item].mean(), 2))


if __name__ == '__main__':
    loans_data = pd.read_csv(raw_data_path)
    # drop useless columns
    print(useless_columns)
    if useless_columns:
        loans_data = loans_data.drop(useless_columns, axis=1)
    # drop too many null value columns
    too_many_null_columns = get_too_many_null_columns(loans_data)
    print(too_many_null_columns)
    if too_many_null_columns:
        loans_data = loans_data.drop(too_many_null_columns, axis=1)
    # drop columns have unique value
    unique_value_columns = get_unique_value_columns(loans_data)
    print(unique_value_columns)
    if unique_value_columns:
        loans_data = loans_data.drop(unique_value_columns, axis=1)
    loans_data.drop_duplicates()
    # # save data
    loans_data.to_csv(feature_selected_path, index=False)
