import pandas as pd#数据处理
import matplotlib.pyplot as plt #plot.scatter画散点图
import seaborn as sns#画热力图/箱型图
import numpy as np#数据处理
from scipy.stats import skew#求偏度
import warnings
warnings.filterwarnings("ignore")#忽略警告

from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin, clone#sklearn基础函数
from sklearn.preprocessing import LabelEncoder#转换
from sklearn.preprocessing import RobustScaler, StandardScaler#编码
from sklearn.preprocessing import Imputer#处理缺失值
from sklearn.metrics import mean_squared_error#均方误差
from sklearn.pipeline import Pipeline, make_pipeline#流水线
from sklearn.decomposition import PCA, KernelPCA#PCA改变维度技术
from sklearn.model_selection import cross_val_score,KFold,GridSearchCV#交叉搜索、交叉搜索、调参
from pandas import DataFrame,Series
#以下都是不同的机器学习算法
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso
from sklearn.linear_model import ElasticNet, SGDRegressor, BayesianRidge
from sklearn.kernel_ridge import KernelRidge
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, ExtraTreesRegressor
from sklearn.svm import SVR, LinearSVR

#from scipy.stats import norm
#from scipy import stats
dimension=380
def del_data(a,va,b="SalePrice",vb=2500000):#删除异常条目
    train.drop(train[(train[a]>va)&(train[b]<vb)].index,inplace=True)
def basic_cond(seq='SalePrice'):#打印出特征基本情况
    print(train[seq].describe())#售价详情
    print(train[seq].skew())#售价偏度
    print(train[seq].kurt())#售价的峰度
    print(train.shape)#表格数据计数
def sca_plt(a,b='SalePrice'):#构建和售价的散点图
	var =a
	seq=b
	data = pd.concat([train[seq], train[var]], axis=1)
	data.plot.scatter(x=var, y=seq, ylim=(0,800000))
	plt.show()
def box_plt(a,b='SalePrice',lim=800000):
	var = a
	seq=b
	data = pd.concat([train[seq], train[var]], axis=1)
	fig = sns.boxplot(x=var, y=seq, data=data)
	#fig.axis(ymin=0, ymax=lim);
	plt.show()
def cor_mat(seq='SalePrice'):
	corrmat = train.corr()#建立特征相关系数矩阵
	k = 8 #矩阵维度
	cols = corrmat.nlargest(k, seq)[seq].index#找到最大的K个和售价相关的
	cm = np.corrcoef(train[cols].values.T)
	hm = sns.heatmap(cm, annot=True,annot_kws={'size': k}, yticklabels=cols.values, xticklabels=cols.values)
	plt.show()	
def lack():#显示确实情况
	aa = combine.isnull().sum()#把缺少的统计出来
	print(aa[aa>0].sort_values(ascending=False))
def fill():
	cols=["MasVnrArea", "BsmtUnfSF", "TotalBsmtSF", "GarageCars", "BsmtFinSF2", "BsmtFinSF1", "GarageArea"]
	for col in cols:
		combine[col].fillna(0, inplace=True)
	cols1 = ["PoolQC" , "MiscFeature", "Alley", "Fence", "FireplaceQu", "GarageQual", "GarageCond", "GarageFinish", "GarageYrBlt", "GarageType", "BsmtExposure", "BsmtCond", "BsmtQual", "BsmtFinType2", "BsmtFinType1", "MasVnrType"]
	for col in cols1:
		combine[col].fillna("None", inplace=True)
	cols2 = ["MSZoning", "BsmtFullBath", "BsmtHalfBath", "Utilities", "Functional", "Electrical", "KitchenQual", "SaleType","Exterior1st", "Exterior2nd"]
	for col in cols2:
		combine[col].fillna(combine[col].mode()[0], inplace=True)
	combine.isnull().sum()[combine.isnull().sum()>0]
	NumStr = ["MSSubClass","BsmtFullBath","BsmtHalfBath","HalfBath","BedroomAbvGr","KitchenAbvGr","MoSold","YrSold","YearBuilt","YearRemodAdd","LowQualFinSF","GarageYrBlt"]
	for col in NumStr:
		combine[col]=combine[col].astype(str)
def fill_area():#先按照频率分组然后用中位数填充
	combine['LotAreaCut'] = pd.qcut(combine.LotArea,10)
	combine['LotFrontage']=combine.groupby(['LotAreaCut'])['LotFrontage'].transform(lambda x: x.fillna(x.median()))
def ob_group(x,y='LotFrontage'):#展示特征和标签变量之间的关系	
	print(combine.groupby([x])[[y]].agg(['mean','median','count']))
def num_group(x,y='LotFrontage',inter=10):#展示特征和数值变量之间的关系，频率10	
	combine[x] = pd.qcut(combine.LotArea,inter)
	print(combine.groupby([x])[[y]].agg(['mean','median','count']))	
def do_map(x,y='SalePrice'):#列出和售价的关系表格，为MAP做参考
	NumStr = ["MSSubClass","BsmtFullBath","BsmtHalfBath","HalfBath","BedroomAbvGr","KitchenAbvGr","MoSold","YrSold","YearBuilt","YearRemodAdd","LowQualFinSF","GarageYrBlt"]
	for col in NumStr:#把部分数值特征转换为分类的标签特征，为下面的groupby做准备
		combine[col]=combine[col].astype(str)
	print(combine.groupby([x])[[y]].agg(['median','mean','count']))
def map_values():#进行数值化映射
    combine["oMSSubClass"] = combine.MSSubClass.map({'180':1, 
                                        '30':2, '45':2, 
                                        '190':3, '50':3, '90':3, 
                                        '85':4, '40':4, '160':4, 
                                        '70':5, '20':5, '75':5, '80':5, '150':5,
                                        '120': 6, '60':6})    
    combine["oMSZoning"] = combine.MSZoning.map({'C (all)':1, 'RH':2, 'RM':2, 'RL':3, 'FV':4})    
    combine["oNeighborhood"] = combine.Neighborhood.map({'MeadowV':1,
                                               'IDOTRR':2, 'BrDale':2,
                                               'OldTown':3, 'Edwards':3, 'BrkSide':3,
                                               'Sawyer':4, 'Blueste':4, 'SWISU':4, 'NAmes':4,
                                               'NPkVill':5, 'Mitchel':5,
                                               'SawyerW':6, 'Gilbert':6, 'NWAmes':6,
                                               'Blmngtn':7, 'CollgCr':7, 'ClearCr':7, 'Crawfor':7,
                                               'Veenker':8, 'Somerst':8, 'Timber':8,
                                               'StoneBr':9,
                                               'NoRidge':10, 'NridgHt':10})    
    combine["oCondition1"] = combine.Condition1.map({'Artery':1,
                                           'Feedr':2, 'RRAe':2,
                                           'Norm':3, 'RRAn':3,
                                           'PosN':4, 'RRNe':4,
                                           'PosA':5 ,'RRNn':5})    
    combine["oBldgType"] = combine.BldgType.map({'2fmCon':1, 'Duplex':1, 'Twnhs':1, '1Fam':2, 'TwnhsE':2})    
    combine["oHouseStyle"] = combine.HouseStyle.map({'1.5Unf':1, 
                                           '1.5Fin':2, '2.5Unf':2, 'SFoyer':2, 
                                           '1Story':3, 'SLvl':3,
                                           '2Story':4, '2.5Fin':4})  
    combine["oExterior1st"] = combine.Exterior1st.map({'BrkComm':1,
                                             'AsphShn':2, 'CBlock':2, 'AsbShng':2,
                                             'WdShing':3, 'Wd Sdng':3, 'MetalSd':3, 'Stucco':3, 'HdBoard':3,
                                             'BrkFace':4, 'Plywood':4,
                                             'VinylSd':5,
                                             'CemntBd':6,
                                             'Stone':7, 'ImStucc':7})    
    combine["oMasVnrType"] = combine.MasVnrType.map({'BrkCmn':1, 'None':1, 'BrkFace':2, 'Stone':3})    
    combine["oExterQual"] = combine.ExterQual.map({'Fa':1, 'TA':2, 'Gd':3, 'Ex':4})    
    combine["oFoundation"] = combine.Foundation.map({'Slab':1, 
                                           'BrkTil':2, 'CBlock':2, 'Stone':2,
                                           'Wood':3, 'PConc':4})    
    combine["oBsmtQual"] = combine.BsmtQual.map({'Fa':2, 'None':1, 'TA':3, 'Gd':4, 'Ex':5})    
    combine["oBsmtExposure"] = combine.BsmtExposure.map({'None':1, 'No':2, 'Av':3, 'Mn':3, 'Gd':4})   
    combine["oHeating"] = combine.Heating.map({'Floor':1, 'Grav':1, 'Wall':2, 'OthW':3, 'GasW':4, 'GasA':5})    
    combine["oHeatingQC"] = combine.HeatingQC.map({'Po':1, 'Fa':2, 'TA':3, 'Gd':4, 'Ex':5})    
    combine["oKitchenQual"] = combine.KitchenQual.map({'Fa':1, 'TA':2, 'Gd':3, 'Ex':4})    
    combine["oFunctional"] = combine.Functional.map({'Maj2':1, 'Maj1':2, 'Min1':2, 'Min2':2, 'Mod':2, 'Sev':2, 'Typ':3})    
    combine["oFireplaceQu"] = combine.FireplaceQu.map({'None':1, 'Po':1, 'Fa':2, 'TA':3, 'Gd':4, 'Ex':5})  
    combine["oGarageType"] = combine.GarageType.map({'CarPort':1, 'None':1,
                                           'Detchd':2,
                                           '2Types':3, 'Basment':3,
                                           'Attchd':4, 'BuiltIn':5})    
    combine["oGarageFinish"] = combine.GarageFinish.map({'None':1, 'Unf':2, 'RFn':3, 'Fin':4})    
    combine["oPavedDrive"] = combine.PavedDrive.map({'N':1, 'P':2, 'Y':3})    
    combine["oSaleType"] = combine.SaleType.map({'COD':1, 'ConLD':1, 'ConLI':1, 'ConLw':1, 'Oth':1, 'WD':1,
                                       'CWD':2, 'Con':3, 'New':3})    
    combine["oSaleCondition"] = combine.SaleCondition.map({'AdjLand':1, 'Abnorml':2, 'Alloca':2, 'Family':2, 'Normal':3, 'Partial':4})             
    print("Got it")
def pshape(X):
	print(X.shape)
class labelenc(BaseEstimator, TransformerMixin):#对于年份等数值进行标准化，基类、混合类
    def __init__(self):
        pass
    
    def fit(self,X,y=None):
        return self
    
    def transform(self,X):
        lab=LabelEncoder()
        X["YearBuilt"] = lab.fit_transform(X["YearBuilt"])
        X["YearRemodAdd"] = lab.fit_transform(X["YearRemodAdd"])
        X["GarageYrBlt"] = lab.fit_transform(X["GarageYrBlt"])
        return X
class skew_dummies(BaseEstimator, TransformerMixin):#偏度筛选
    def __init__(self,skew=0.5):
        self.skew = skew
    
    def fit(self,X,y=None):
        return self
    
    def transform(self,X):
        X_numeric=X.select_dtypes(exclude=["object"])#选择出数值型特征
        skewness = X_numeric.apply(lambda x: skew(x))#类似于一个数组，显示了每个特征的偏度
        skewness_features = skewness[abs(skewness) >= self.skew].index#找出偏度大于1的特征
        X[skewness_features] = np.log1p(X[skewness_features])#对数化处理
        X = pd.get_dummies(X)#进行onehot编码升维,数值型保持不变，标签型进行矩阵编码
        return X
def show_important(FI_lasso):#显示特征比重
	print(FI_lasso.sort_values("Feature Importance",ascending=False))
	FI_lasso[FI_lasso["Feature Importance"]!=0].sort_values("Feature Importance").plot(kind="barh")#水平条形图
	plt.xticks(rotation=90)#刻度旋转90度
	plt.show()
class add_feature(BaseEstimator, TransformerMixin):
    def __init__(self,additional=1):
        self.additional = additional
    
    def fit(self,X,y=None):
        return self
    
    def transform(self,X):
        if self.additional==1:
            X["TotalHouse"] = X["TotalBsmtSF"] + X["1stFlrSF"] + X["2ndFlrSF"]   
            X["TotalArea"] = X["TotalBsmtSF"] + X["1stFlrSF"] + X["2ndFlrSF"] + X["GarageArea"]
            
        else:
            X["TotalHouse"] = X["TotalBsmtSF"] + X["1stFlrSF"] + X["2ndFlrSF"]   
            X["TotalArea"] = X["TotalBsmtSF"] + X["1stFlrSF"] + X["2ndFlrSF"] + X["GarageArea"]
            
            X["+_TotalHouse_OverallQual"] = X["TotalHouse"] * X["OverallQual"]
            X["+_GrLivArea_OverallQual"] = X["GrLivArea"] * X["OverallQual"]
            X["+_oMSZoning_TotalHouse"] = X["oMSZoning"] * X["TotalHouse"]
            X["+_oMSZoning_OverallQual"] = X["oMSZoning"] + X["OverallQual"]
            X["+_oMSZoning_YearBuilt"] = X["oMSZoning"] + X["YearBuilt"]
            X["+_oNeighborhood_TotalHouse"] = X["oNeighborhood"] * X["TotalHouse"]
            X["+_oNeighborhood_OverallQual"] = X["oNeighborhood"] + X["OverallQual"]
            X["+_oNeighborhood_YearBuilt"] = X["oNeighborhood"] + X["YearBuilt"]
            X["+_BsmtFinSF1_OverallQual"] = X["BsmtFinSF1"] * X["OverallQual"]
            
            X["-_oFunctional_TotalHouse"] = X["oFunctional"] * X["TotalHouse"]
            X["-_oFunctional_OverallQual"] = X["oFunctional"] + X["OverallQual"]
            X["-_LotArea_OverallQual"] = X["LotArea"] * X["OverallQual"]
            X["-_TotalHouse_LotArea"] = X["TotalHouse"] + X["LotArea"]
            X["-_oCondition1_TotalHouse"] = X["oCondition1"] * X["TotalHouse"]
            X["-_oCondition1_OverallQual"] = X["oCondition1"] + X["OverallQual"]
            
           
            X["Bsmt"] = X["BsmtFinSF1"] + X["BsmtFinSF2"] + X["BsmtUnfSF"]
            X["Rooms"] = X["FullBath"]+X["TotRmsAbvGrd"]
            X["PorchArea"] = X["OpenPorchSF"]+X["EnclosedPorch"]+X["3SsnPorch"]+X["ScreenPorch"]
            X["TotalPlace"] = X["TotalBsmtSF"] + X["1stFlrSF"] + X["2ndFlrSF"] + X["GarageArea"] + X["OpenPorchSF"]+X["EnclosedPorch"]+X["3SsnPorch"]+X["ScreenPorch"]

    
            return X
def rmse_cv(model,X,y):#构建分数器
    rmse = np.sqrt(-cross_val_score(model, X, y, scoring="neg_mean_squared_error", cv=5))
    return rmse
class grid():#参数调节方法
    def __init__(self,model):
        self.model = model
    
    def grid_get(self,X,y,param_grid):
        grid_search = GridSearchCV(self.model,param_grid,cv=5, scoring="neg_mean_squared_error")#评分模型
        grid_search.fit(X,y)#训练
       	grid_search.cv_results_['mean_test_score'] = np.sqrt(-grid_search.cv_results_['mean_test_score'])#对于所有进行计算
        print(grid_search.best_params_, np.sqrt(-grid_search.best_score_))#输出效果最好的那个 bestparams
        #print(pd.DataFrame(grid_search.cv_results_)[['params','std_test_score','mean_test_score']])#输出结果
def B_modeling():
	models = [Ridge(),Lasso(alpha=0.01,max_iter=10000),RandomForestRegressor(),GradientBoostingRegressor(),SVR(),LinearSVR(),
          ElasticNet(alpha=0.001,max_iter=10000),SGDRegressor(max_iter=1000,tol=1e-3),BayesianRidge(),KernelRidge(alpha=0.6, kernel='polynomial', degree=2, coef0=2.5),
          ExtraTreesRegressor()]#罗列模型
	names = ["Ridge", "Lasso", "RF", "GBR", "SVR", "LinSVR", "Ela","SGD","Bay","Ker","Extra"]#模型姓名组
	for name, model in zip(names, models):
		score = rmse_cv(model, X_scaled, y_log)
		print("{}: {:.6f}, {:.4f}".format(name,score.std(),score.mean()))#输出模型的标准差,平均数
def evaluation():
	grid(Lasso()).grid_get(X_scaled,y_log,{'alpha': [0.0004,0.0005,0.0007,0.0006,0.0009,0.0008],'max_iter':[10000]})#进行训练
	grid(Ridge()).grid_get(X_scaled,y_log,{'alpha':[35,40,45,50,55,60,65,70,80,90]})
	grid(SVR()).grid_get(X_scaled,y_log,{'C':[11,12,13,14,15],'kernel':["rbf"],"gamma":[0.0003,0.0004],"epsilon":[0.008,0.009]})
	param_grid={'alpha':[0.2,0.3,0.4,0.5], 'kernel':["polynomial"], 'degree':[3],'coef0':[0.8,1,1.2]}
	grid(KernelRidge()).grid_get(X_scaled,y_log,param_grid)		
	grid(ElasticNet()).grid_get(X_scaled,y_log,{'alpha':[0.0005,0.0008,0.004,0.005],'l1_ratio':[0.08,0.1,0.3,0.5,0.7],'max_iter':[10000]})
def ptype(a):
    print(type(a))
class AverageWeight(BaseEstimator, RegressorMixin):
    def __init__(self,mod,weight):
        self.mod = mod
        self.weight = weight
        
    def fit(self,X,y):
        self.models_ = [clone(x) for x in self.mod]
        for model in self.models_:
            model.fit(X,y)
        return self
    
    def predict(self,X):
        w = list()
        pred = np.array([model.predict(X) for model in self.models_])

        for data in range(pred.shape[1]):
            single = [pred[model,data]*weight for model,weight in zip(range(pred.shape[0]),self.weight)]
            w.append(np.sum(single))

        return w
class stacking(BaseEstimator, RegressorMixin, TransformerMixin):
    def __init__(self,mod,meta_model):
        self.mod = mod
        self.meta_model = meta_model
        self.kf = KFold(n_splits=5, random_state=42, shuffle=True)
        
    def fit(self,X,y):
        self.saved_model = [list() for i in self.mod]
        oof_train = np.zeros((X.shape[0], len(self.mod)))
        
        for i,model in enumerate(self.mod):
            for train_index, val_index in self.kf.split(X,y):
                renew_model = clone(model)
                renew_model.fit(X[train_index], y[train_index])
                self.saved_model[i].append(renew_model)
                oof_train[val_index,i] = renew_model.predict(X[val_index])
        
        self.meta_model.fit(oof_train,y)
        return self
    
    def predict(self,X):
        whole_test = np.column_stack([np.column_stack(model.predict(X) for model in single_model).mean(axis=1) 
                                      for single_model in self.saved_model]) 
        return self.meta_model.predict(whole_test)
    
    def get_oof(self,X,y,test_X):
        oof = np.zeros((X.shape[0],len(self.mod)))
        test_single = np.zeros((test_X.shape[0],5))
        test_mean = np.zeros((test_X.shape[0],len(self.mod)))
        for i,model in enumerate(self.mod):
            for j, (train_index,val_index) in enumerate(self.kf.split(X,y)):
                clone_model = clone(model)
                clone_model.fit(X[train_index],y[train_index])
                oof[val_index,i] = clone_model.predict(X[val_index])
                test_single[:,j] = clone_model.predict(test_X)
            test_mean[:,i] = test_single.mean(axis=1)
        return oof, test_mean

#1、数据导入#
#导入训练和测试数据
train=pd.read_csv('E:/PROGRAMING/KAGGLE/House Prices Advanced Regression Techniques/DATA/train.csv')
test=pd.read_csv('E:/PROGRAMING/KAGGLE/House Prices Advanced Regression Techniques/DATA/test.csv')
del_data("GrLivArea",3500)
combine=pd.concat([train,test], ignore_index=True,sort=True)#联合数据，一起预处理	
combine.drop(['Id'],axis=1, inplace=True)#删掉ID这个不想干变量
#print(train.columns)#有哪些特征
#-------------------#

#2、显示预测量基本情况#
#basic_cond()#打印出训练集销售特征的基本情况、峰度偏度以及那些特征
#-------------------#


#3、重点数据的可视化#
#cor_mat()#建立和售价相关性最大的热力矩阵图
#sca_plt('GrLivArea')#某数值特征与售价的散点图
#box_plt('YearBuilt')#某离散特征与售价的散点图
#-------------------#


#4、数据填充、清洗#
#lack()#把特征中的确实情况统计出来，降序排列
fill_area()#针对分段中位数进行填充
fill()#针对于其他情况（使用0、None、众数等进行填充）
#-------------------#

#5、特征工程#
#do_map('Neighborhood')#售价和MSSubClass之间的关系，通过这种方式对于MSSubClass进行划分
map_values()#实现离散类型的数值化#创建新的数值化特征，增加了22个
combine.drop("LotAreaCut",axis=1,inplace=True)#删除无用的变量
combine.drop(['SalePrice'],axis=1,inplace=True)#删除无用的变量
#-------------------#

#6、构建Pipline流水线#
pipe = Pipeline([
    ('labenc', labelenc()),#标准化年代特征
    ('skew_dummies', skew_dummies(skew=1)),#编码为矩阵
    ])#建立流水线
combine2 = combine.copy()#拷贝数据集副本
data_pipe = pipe.fit_transform(combine2)#标准化，去标签化，升维编码,形成新的数据集


#以下四条划分测试集、训练集和预测特征
n_train=train.shape[0]#训练集的数据数量
X = data_pipe[:n_train]#训练集
test_X = data_pipe[n_train:]#测试集
y= train.SalePrice#训练集的售价

scaler = RobustScaler()#设定RoubustScaler函数，进行标准化
X_scaled = scaler.fit(X).transform(X)#使用了ROBUST标准化训练集
test_X_scaled = scaler.transform(test_X)#使用了ROBUST标准化训练集，其类型是数组类型
y_log = np.log(train.SalePrice)#对数化y
#-------------------#


#7、特征组合#
'''
lasso=Lasso(alpha=0.001)#定义一个含参的LASSO回归
lasso.fit(X_scaled,y_log)#使用LASSO进行训练模型
FI_lasso = pd.DataFrame({"Feature Importance":lasso.coef_}, index=data_pipe.columns)#对特征进行排序，按照其重要性
show_important(FI_lasso)
'''
ridge=Ridge(alpha=60)#定义一个含参的ridge回归
ridge.fit(X_scaled,y_log)#使用ridge进行训练模型
FI_lasso = pd.DataFrame({"Feature Importance":ridge.coef_}, index=data_pipe.columns)#对特征进行排序，按照其重要性
#show_important(FI_lasso)
pipe = Pipeline([
    ('labenc', labelenc()),#对于年代进行标准化
    ('add_feature', add_feature(additional=2)),#组合特征
    ('skew_dummies', skew_dummies(skew=1)),#对标签进行矩阵编码
    ])#构建一个新的流水线
combine_pipe = pipe.fit_transform(combine)#这一部分和上一部分类似，加入了一个新的功能来构建流水线


n_train=train.shape[0]#训练集的数据数量
X = data_pipe[:n_train]#训练集
test_X = data_pipe[n_train:]#测试集
y= train.SalePrice#训练集的售价

X_scaled = scaler.fit(X).transform(X)#标准化数值部分
test_X_scaled = scaler.transform(test_X)#标准化数值部分
y_log = np.log(train.SalePrice)#预测集取对数

pca = PCA(n_components=dimension)#降维，主程序分析，参数可以调节
X_scaled=pca.fit_transform(X_scaled)#PCA处理,拟合数据标准化
test_X_scaled = pca.transform(test_X_scaled)#PCA处理
#-------------------#

#8、基本建模与评估#
#B_modeling()#建模
#evaluation()#对于模型的参数进行评估
#-------------------#

#9、集成建模评估#
lasso = Lasso(alpha=0.0005,max_iter=10000)
ridge = Ridge(alpha=60)
svr = SVR(gamma= 0.0004,kernel='rbf',C=13,epsilon=0.009)
ker = KernelRidge(alpha=0.2 ,kernel='polynomial',degree=3 , coef0=0.8)
ela = ElasticNet(alpha=0.005,l1_ratio=0.08,max_iter=10000)
bay = BayesianRidge()
mod = [lasso,ridge,svr,ker,ela,bay]#设定了算法的参数
a = Imputer().fit_transform(X_scaled)#处理训练集的缺失值
b = Imputer().fit_transform(y_log.values.reshape(-1,1)).ravel()#标准化售价值

#9.1 集成建模评估-权重法#
w1 = 0.02
w2 = 0.2
w3 = 0.25
w4 = 0.3
w5 = 0.03
w6 = 0.2
weight=[w1,w2,w3,w4,w5,w6]#设定了权重

weight_avg = AverageWeight(mod = [svr,ker],weight=[0.55,0.45])#根据权重获得模型
score = rmse_cv(weight_avg,X_scaled,y_log)#返回分数
print(score.mean())#输出
#-------------------#

#9.2 集成建模评估-Stacking#
stack_model = stacking(mod=[svr,ker,ela,bay],meta_model=ker)#stack模型

X_train_stack, X_test_stack = stack_model.get_oof(a,b,test_X_scaled)
X_train_add = np.hstack((a,X_train_stack))
X_test_add = np.hstack((test_X_scaled,X_test_stack))

score=rmse_cv(stack_model,X_train_add,b)
print(score.mean())
#-------------------#


#10、提交#
print("try")

#10.1 权重法#
weight_avg.fit(a,b)
pred = np.exp(weight_avg.predict(test_X_scaled))
result=pd.DataFrame({'Id':test.Id, 'SalePrice':pred})
result.to_csv("submission_weight.csv",index=False)
#-------------------#

#10.2 Stacking#
stack_model.fit(a,b)
pred = np.exp(stack_model.predict(test_X_scaled))
result=pd.DataFrame({'Id':test.Id, 'SalePrice':pred})
result.to_csv("submission_stacking.csv",index=False)
#-------------------#
