# -*- coding: utf-8 -*-
"""
Created on Fri Oct 12 21:46:18 2018

@author: HP
"""
import pandas as pd
import numpy as np
import time
import re
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
import datetime
from my_ort import ORT

# 正交表生成类
ort = ORT()

# 评价函数
def rmse(y_true, y_pred):
    diff = y_pred - y_true
    sum_sq = sum(diff**2)    
    n = len(y_pred)   
    return np.sqrt(sum_sq/n)
# 供cv使用时：make_scorer(rmse, greater_is_better=False)

def save_res(res_arr, filename=''):
    test_ = pd.read_csv('../input/test.csv',index_col=0)
    res_df = pd.DataFrame({'Id':test_.index,'SalePrice':np.exp(res_arr)})
    t = datetime.datetime.now().strftime("%Y%m%d_%H_%M")
    res_df.to_csv('./res/'+filename+'%s.csv'%t, index=False)

class AdjustParam(object):
    '''
    model：最优模型
    ort_res: 正交试验结果
    param_res：各因素水平结果
    '''
    def __init__(self, X, y, scoring=None, cv=5):
        self.X = X
        self.y = y
        self.scoring = scoring
        self.cv = cv
        self.model = None
        self.ort_res = None
        self.param_res = None
    
    def run_ort(self, model, df_params):
        model_str = str(model)
        k=0
        n=len(df_params.index)
        df_params['ort_res'] = 0
        df_params['ort_res_std'] = 0
        df_params['ort_train_time'] = 0
        for i in df_params.index:
            k+=1
            params_li = list(map(lambda x:x+'='+ str(df_params.loc[i, x]),df_params.columns[:-3]))
            param_str = ', '.join(params_li)
            print(param_str)
            for re_p in params_li:
                p_val = re_p.split('=')[0]
                model_str = re.sub('%s=[^,)]*'%p_val, re_p ,model_str)
            model_ = eval(model_str)
            t1=time.time()
            cv_score = cross_val_score(model_, self.X, self.y, cv = 5, n_jobs = -1, scoring='neg_mean_squared_error' )
            err = np.sqrt(-cv_score)
            res = np.mean(err)
            res_std = np.std(cv_score)
            t2 = int(time.time()-t1)
            print('res: %f, time: %d, num: %d/%d'%(res,t2,k,n))
            df_params.loc[i,'ort_res'] = res
            df_params.loc[i,'ort_res_std'] = res_std
            df_params.loc[i,'ort_train_time'] = t2
        self.ort_res = df_params
        # 筛选最优值
        res_li = list(map(lambda x: df_params.groupby(x).ort_res.mean().argmin(), df_params.columns[:-3]))
        param_li = df_params.columns[:-3]
        param_opt = list(map(lambda x: '='.join(x), zip(param_li,map(str,res_li))))
        # 最优RF模型
        for re_p in param_opt:
            p_val = re_p.split('=')[0]
            model_str = re.sub('%s=[^,)]*'%p_val, re_p ,model_str)
        self.model = eval(model_str)

        temp_li = []
        for i in df_params.columns[:-3]:
            temp = df_params.groupby(i).mean()[['ort_res','ort_res_std','ort_train_time']].sort_values(['ort_res','ort_res_std','ort_train_time'])
            temp = temp.rename(index = lambda x: str(i) + '=' + str(x))
            temp_li.append(temp)
        self.param_res = pd.concat(temp_li)

#%%


# 设定打印dataframe显示的列数量
pd.set_option('display.max_columns',80)
pd.set_option('display.max_rows',100)
#pd.set_option('display.height',100)
pd.set_option('display.width',65)
# 读文件
train_file = pd.read_csv('../input/train.csv',index_col=0)
test_file = pd.read_csv('../input/test.csv',index_col=0)


training, validation = train_test_split(train_file,test_size=0.2, random_state=17)

#%%
# 输入格式统一化
training_X = training.iloc[:,:-1]
training_y = training.iloc[:,-1]
training_y_log = np.log(training_y)
validation_X = validation.iloc[:,:-1]
validation_y = validation.iloc[:,-1]
validation_y_log = np.log(validation_y)
test_X = test_file
##############################
training_X,validation_X= train_test_split(train_test[:ntrain],test_size=0.2, random_state=17)
training_y = train_file.loc[training_X.index].iloc[:,-1]
validation_y = train_file.iloc[validation_X.index,-1]
test_X = train_test[ntrain:]
##############################


# 删去列数过少的
from my_data_describe import my_data_describe
df_describe = my_data_describe(training_X)
low_na = df_describe[df_describe.na_rate<0.20].index

training_X = training_X[low_na]
validation_X = validation_X[low_na]
test_X = test_X[low_na]

#from my_boxplot import my_boxplot,my_boxplot_na
#my_boxplot(training_X.FireplaceQu, np.log(training_y))

#my_boxplot_na可以用于连续变量含缺失值的情况
#my_boxplot_na(training_X.Fence    , np.log(training_y))
#training_X.groupby("Neighborhood")["LotFrontage"].transform(lambda x: x.fillna(x.median()))
#training_X.Functional.isnull().sum()

#%% dummies 需要三个一起
# 但是有可能在dummies后 列不一致 所以要检查
def all_dummies(training, validation, test):
    training_d = pd.get_dummies(training)
    validation_d = pd.get_dummies(validation)
    test_d = pd.get_dummies(test)

    # 互相检查列是否一致
    for i in training_d.columns:
        if i not in validation_d:
            validation_d[i] = 0
        if i not in test_d:
            test_d[i] = 0
    for j in validation_d.columns:
        if j not in training_d:
            validation_d.drop(j,axis=1,inplace=True)
    for k in test_d.columns:
        if k not in training_d:
            test_d.drop(k,axis=1,inplace=True)
    # 列顺序必须一致
    validation_d = validation_d[training_d.columns]
    test_d = test_d[training_d.columns]
    return (training_d, validation_d, test_d)

training_X_process, validation_X_process, test_X_process = \
all_dummies(training_X, validation_X, test_X)

#%% fillna
training_X_process = training_X_process.fillna(training_X_process.mean())
validation_X_process = validation_X_process.fillna(validation_X_process.mean())
test_X_process = test_X_process.fillna(test_X_process.mean())

#%% 模型 RandomForestRegressor
from sklearn.ensemble import RandomForestRegressor

#n_jobs 并行数 -1：与核数一致
rf_model = RandomForestRegressor(n_jobs=-1, oob_score=True, random_state=134)
rf_params = {'max_depth': [20,30,40], 'n_estimators': [500,600,700], 'max_features': [60,70,80]}  
ort.seeSets(3,see_num=100)
rf_params_df=ort.genSets(rf_params,mode=2)

rf = AdjustParam(training_X_process, training_y_log, 'neg_mean_squared_error')
rf.run_ort(model=rf_model, df_params=rf_params_df)
rf_model_opt = rf.model
rf_ort_res = rf.ort_res
rf_param_res = rf.param_res

rf_model_opt = RandomForestRegressor(max_depth=30, n_estimators=700, max_features=80)
rf_model_opt.fit(training_X_process,training_y_log)
validation_y_log_predict = rf_model_opt.predict(validation_X_process)
rmse(validation_y_log, validation_y_log_predict)
#0.12635671064993356

#%% 模型 XGBregressor
from xgboost.sklearn import XGBRegressor

################## 调整范围 生成合适的参数dataframe
xgb_params = {'learning_rate': [0.06,0.08,0.1,0.12], 'n_estimators': [650,700,750,800], 'max_depth': [3,4,5,6], 'colsample_bytree':[0.55,0.6,0.65,0.7]}  
ort.seeSets(4,see_num=40)
xgb_params_df = ort.genSets(xgb_params,mode=2)
######################################################
# 训练
xgb_model = XGBRegressor(n_jobs=-1)
xgb = AdjustParam(training_X_process, training_y_log, 'neg_mean_squared_error')
xgb.run_ort(model=xgb_model, df_params=xgb_params_df)
#xgb_model_opt = xgb.model
xgb_ort_res = xgb.ort_res
xgb_param_res = xgb.param_res
################## 选参：选取极差和方差都小的
xgb_param_res
xgb_model_opt = XGBRegressor(learning_rate=0.06, max_depth=3,n_estimators=650, colsample_bytree=0.6)
######################################################
# 验证最优模型
xgb_model_opt.fit(training_X_process,training_y_log)
validation_y_log_predict = xgb_model_opt.predict(validation_X_process)
rmse(validation_y_log, validation_y_log_predict)

# 结果预测
xgb_model_opt.fit(pd.concat([training_X_process,validation_X_process]), pd.concat([training_y_log,validation_y_log]))
test_predict = xgb_model_opt.predict(test_X_process)
save_res(test_predict)


xgb_model.predict(test_X)

# 类别转数字
df_obj = df_describe[df_describe.type=='object']
df_obj_m = df_obj[df_obj.na_rate < 0.4]
obj_li = df_obj_m.index

#%%
# 直接dummy 均值填充
# validation-rf  : 0.13579013062998008
# validation-xgb : 0.12186200689055419
# 特征
#0.12251914001052963
#Na多的删 少的model预测 0.12001810400276995
#直接删去极偏  0.1203
