import numpy as np
import pandas as pd
from sklearn.impute import SimpleImputer  # 用来填补缺失值
from sklearn.model_selection import GridSearchCV
import find_feature

from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier



train_file="../dataset/train.csv"
test_file="../dataset/test.csv"
res_file="../dataset/res.csv"
tmp_file="../dataset/tmp.csv"
importance_file="../dataset/important.txt"

def main():
    #数据载入
    train_data=pd.read_csv(train_file)

    test_data = pd.read_csv(test_file)
    # 获得标签
    train_label = train_data['loan default']

    # 异常值处理
    # 发现存在inf数据
    train_data.replace(np.inf, 1.12, inplace=True)
    test_data.replace(np.inf, 1.12, inplace=True)

    #年龄特征绘图寻找
    #find_feature.ageFeature(train_data)
    #其他特征绘图寻找
    #find_feature.everyFeature(train_data,'average age')

    train_data = train_data.drop('personal id', axis=1)
    test_data = test_data.drop('personal id', axis=1)
    train_data = train_data.drop('loan default', axis=1)

    # 查看数据条数和特征数量并记录,120000+50
    n_samples = train_data.shape[0]  # 样本个数
    n_features = train_data.shape[1]  # 特征个数
    print(train_data.shape[0], train_data.shape[1])

    # 将数据的数字特征输出到文件
    feature_show = open(tmp_file)
    train_data.describe(percentiles=[.1, .2, .3, .4, .5, .6, .7, .8, .9, .99]).to_csv(tmp_file)
    feature_show.close()


    #样本特征人为处理
    rnd = np.random.RandomState(1)  #定义随机数种子
    n_samples_missing = int(n_samples * n_features * 0.25)   #30%的数据将要被我们擦除掉。计算擦除的个数
    #随机采样出n_samples_missing个整数
    missing_samples_list = rnd.randint(0, n_samples, n_samples_missing)
    #随机采样出n_samples_missing个整数
    missing_features_list = rnd.randint(0, n_features, n_samples_missing)
    #转换成npnum
    X_train = np.array(train_data)
    #把获得的随机的位置给抹去
    X_train[missing_samples_list, missing_features_list] = np.nan
    #获得人为丢弃的数据,置为NaN
    x_missing = pd.DataFrame(X_train)
    #print(x_missing.head())

    #使用均值进行NaN填充
    imp_mean = SimpleImputer(missing_values=np.nan, strategy='mean')
    x_missing_mean = imp_mean.fit_transform(x_missing)

    #npnum返回dataframe过程中保持列明不变
    colnames = train_data.columns.values.tolist()
    #print(colnames)
    train_data = pd.DataFrame(x_missing_mean, index=list(range(x_missing_mean.shape[0])), columns=colnames)

    print(train_data.columns.values.tolist())

    #更新文件
    feature_show = open(tmp_file)
    # 将数据的数字特征输出到文件
    train_data.describe(percentiles=[.1, .2, .3, .4, .5, .6, .7, .8, .9, .99]).to_csv(tmp_file)
    feature_show.close()

    #分析数据信息
    #手动去除肉眼可见对预测无影响的项
    list_drop=[]
    list_drop.append('mobile number flag')
    list_drop.append('id card flag')
    list_drop.append('disbursed date')
    list_drop.append('sub account tenure')

    #通过观察输出的图像去除关系不大项
    list_drop.append('manufacturer id')
    list_drop.append('employment type')
    list_drop.append('year of birth')
    list_drop.append('staff code id')


    scaler = StandardScaler()
    scalered_train_data = scaler.fit_transform(train_data)
    scalered_test_data = scaler.fit_transform(test_data)

    clf = RandomForestClassifier(n_estimators=500,
                                 #criterion='entropy',
                                 oob_score=True,
                                 max_features=40,
                                 min_samples_split=2,
                                 min_samples_leaf=40,
                                 n_jobs=-1,
                                 class_weight='balanced_subsample',
                                 bootstrap=True)

    # parameterset=[{'max_features':[7,10,15,20,25,30,40],'min_samples_leaf':[40,50,60,100,150,200]}]
    # rf=GridSearchCV(estimator=clf,param_grid=parameterset,cv=5,n_jobs=-1)
    # rf.fit(scalered_train_data, train_label)
    # print('Best parameters:')
    # print(rf.best_params_)
    clf.fit(scalered_train_data, train_label)

    # 输出特征重要性评估
    important_f=open(importance_file,'w')
    important_f.write(
    str(sorted(zip(map(lambda x: round(x, 4), clf.feature_importances_), train_data.columns), reverse=True)))
    important_f.close()

    result = clf.predict(scalered_test_data)

    tmp = pd.DataFrame({'result': result})
    tmp.to_csv(res_file)



if __name__ == "__main__":
    main()