
# coding: utf-8

# # AMES_House数据集特征工程

# In[167]:


import numpy as np
import pandas as pd
import matplotlib.pyplot as plt

import seaborn as sns
get_ipython().run_line_magic('matplotlib', 'inline')


# In[168]:


train = pd.read_csv('Ames_House_train.csv')
test = pd.read_csv('Ames_House_test.csv')

train.head()
#test.head()


# In[169]:


train.info()


# 有些特征缺失数据非常多，考虑将其删除，另外有一些考虑用插补法填充,object代表文本类型可以将其进行类别型特征的取值及相应编码

# In[170]:


train.describe()


# In[171]:


test.info()


# 对比发现数据类型好像有一些不一样？而且差一列特征值，发现好像没有SalePrice的数据

# In[172]:


fig = plt.figure()
sns.distplot(train.SalePrice,bins=50,kde=True)
plt.xlabel('SalePr')
plt.show()
#从下图看出来 样本分布明显的左偏，


# In[173]:


plt.scatter(range(train.shape[0]),train['SalePrice'].values,color='red')


# 从图中可以看出大部分数据集中在100000~300000之间，判断大于400000的均为离群点，需要清除

# In[174]:


train = train[train.SalePrice<400000]


# In[175]:


fig = plt.figure()
sns.distplot(train.SalePrice,bins=50,kde=True)
plt.xlabel('SalePr')
plt.show()


# In[176]:


#判断有哪些特征有缺失值，将它打印出来，等下看情况填补，如果特征值缺失数目大于1000以上的话就将整个特征值删除
def null_df(df):
    num_null = df.isnull().sum()
    null_list=[]
    for i in num_null.index:
        if  0<num_null[i]<1000:
            null_list.append(i)
        elif num_null[i]>1000:
            print("这些特征建议删除:"+i)
    return null_list

list1=list(set(null_df(train)+null_df(test)))
print(list1)


# 对以上打印出来的特征做缺失值处理

# In[177]:


def process_missvalue(df):
    #先删除缺失值特别多的特征值
    df=df.drop(['Alley','PoolQC','Fence','MiscFeature'],axis=1)
    #['BsmtFinType1', 'SaleType', 'BsmtFinType2', 'Exterior2nd', 'Electrical', 
#     'MasVnrType', 'KitchenQual', 'GarageCond', 'MSZoning', 'GarageQual',
#     'BsmtUnfSF', 'GarageYrBlt', 'BsmtFullBath', 'BsmtFinSF1', 'Functional',

    #按照内容 分别填充缺失值
    df.loc[:,'BsmtFinType1']=df.loc[:,'BsmtFinType1'].fillna('No')
    
    df.loc[:,'SaleType']=df.loc[:,'SaleType'].fillna('WD')
    
    df.loc[:,'BsmtFinType2']=df.loc[:,'BsmtFinType2'].fillna('No')
    
    df.loc[:,'Exterior2nd']=df.loc[:,'Exterior2nd'].fillna(method='ffill')
    
    df.loc[:,'Electrical']=df.loc[:,'Electrical'].fillna(method='ffill')
    
    df.loc[:,'MasVnrType']=df.loc[:,'MasVnrType'].fillna('None')
    
    df.loc[:,'KitchenQual']=df.loc[:,'KitchenQual'].fillna('TA')
    
    df.loc[:,'GarageCond']=df.loc[:,'GarageCond'].fillna('No')
    
    df.loc[:,'MSZoning']=df.loc[:,'MSZoning'].fillna(method='ffill')
    
    df.loc[:,'GarageQual']=df.loc[:,'GarageQual'].fillna('No')
    
    df.loc[:,'GarageYrBlt']=df.loc[:,'GarageYrBlt'].fillna(method='ffill')
    
    df.loc[:,'BsmtUnfSF']=df.loc[:,'BsmtUnfSF'].fillna(0)
    
    df.loc[:,'BsmtFullBath']=df.loc[:,'BsmtFullBath'].fillna(0)
    
    df.loc[:,'BsmtFinSF1']=df.loc[:,'BsmtFinSF1'].fillna(0)
    
    df.loc[:,'Functional']=df.loc[:,'Functional'].fillna('Typ')
    
    df.loc[:,'GarageArea']=df.loc[:,'GarageArea'].fillna(0)
    
    df.loc[:,'MasVnrArea']=df.loc[:,'MasVnrArea'].fillna(0)
    
    df.loc[:,'BsmtFinSF2']=df.loc[:,'BsmtFinSF2'].fillna(0)
    
    df.loc[:,'Exterior1st']=df.loc[:,'Exterior1st'].fillna(method='ffill')
    
    df.loc[:,'BsmtCond']=df.loc[:,'BsmtCond'].fillna('No')
    
    df.loc[:,'FireplaceQu']=df.loc[:,'FireplaceQu'].fillna('No')
    
    df.loc[:,'GarageFinish']=df.loc[:,'GarageFinish'].fillna('No')
    
    df.loc[:,'Utilities']=df.loc[:,'Utilities'].fillna('AllPub')
    
    df.loc[:,'LotFrontage']=df.loc[:,'LotFrontage'].fillna(0)
    
    df.loc[:,'TotalBsmtSF']=df.loc[:,'TotalBsmtSF'].fillna(method='ffill')
    
    df.loc[:,'GarageType']=df.loc[:,'GarageType'].fillna('No')
    
    df.loc[:,'BsmtHalfBath']=df.loc[:,'BsmtHalfBath'].fillna(0)
    
    df.loc[:,'GarageCars']=df.loc[:,'GarageCars'].fillna(0)
    
    df.loc[:,'BsmtQual']=df.loc[:,'BsmtQual'].fillna('No')
    
    df.loc[:,'BsmtExposure']=df.loc[:,'BsmtExposure'].fillna('No')
    
    return df
train = process_missvalue(train)
test= process_missvalue(test)


# In[178]:


train_obj=train.select_dtypes(include=['object'])
test_obj=test.select_dtypes(include=['object'])
for i in train_obj.columns:
    print('\n%s特征的不同取值以及次数'%i)
    print(train_obj[i].value_counts())


# 考虑到一个一个的给文本类型赋值太累了，下面写了一个方法进行批量的赋值

# In[179]:



obj_dict=dict()
for col in train_obj.columns:
    obj_dict[col]=dict()
    i=1
    for val in set(train_obj[col]):
        if val is 'No':
            obj_dict[col][val]=0
        else:
            obj_dict[col][val]=i
        i+=1
print(obj_dict['LandContour']['Bnk'])
    


# 下面进行类别特征编码

# In[180]:


def obj2number(df,obj_dict):
#     num={'BsmtFinType1':{"No" : 0, "Unf" : 1, "LwQ": 2, "Rec" : 3, "BLQ" : 4, "ALQ" : 5, "GLQ" : 6},
#          'MSZoning':{"RL" : 1, "RM" : 2, "FV": 3, "RH" : 4, "C (all)" : 5},
#          'Street':{"Pave" : 1, "Grvl" : 2},
#          'LotShape':{"Reg" : 1, "IR1" : 2, "IR2": 3, "IR3" : 4},
#          'LandContour':{"Lvl" : 1, "Bnk" : 2, "HLS": 3, "Low" : 4},
#          'Utilities':{"AllPub" : 1, "NoSeWa" : 0},
#          'LotConfig':{"Inside" : 1, "Corner" : 2, "CulDSac": 3, "FR2" : 4, "FR3" : 5},
#          'LandSlope':{"Gtl" : 1, "Mod" : 2, "Sev": 3, "RH" : 4, "C (all)" : 5},
#          'Neighborhood':{"NAmes" : 1, "CollgCr" : 2, "OldTown": 3, "Edwards" : 4, "Somerst" : 5,
#                         "Gilbert" : 1, "Sawyer" : 2, "NWAmes": 3, "NridgHt" : 4, "SawyerW" : 5,
#                         "BrkSide" : 1, "Crawfor" : 2, "Mitchel": 3, "Timber" : 4, "IDOTRR" : 5,
#                         "NoRidge" : 1, "ClearCr" : 2, "SWISU": 3, "StoneBr" : 4, "Blmngtn" : 5,
#                         "BrkSide" : 1, "Crawfor" : 2, "Mitchel": 3, "Timber" : 4, "IDOTRR" : 5,},
    df=df.replace(obj_dict)
    return df
train_obj = obj2number(train_obj,obj_dict)
test_obj = obj2number(test_obj,obj_dict)
train_obj.info()             


# 这样一来就全是数值型特征了，部分特征采用独热编码

# In[181]:


train_num = train.drop(train_obj.columns,axis=1)
test_num = test.drop(train_obj.columns,axis=1)


# In[182]:


train_obj['train']=1
test_obj['train']=2


# In[186]:


all_num=train_obj.append(test_obj)
all_cat=pd.get_dummies(all_num,columns=all_num.columns)
all_cat.head()


# 将训练集和数据集的类别特征部分先合并统一独热编码，再分离,最后再和前面的数值特征合并

# In[193]:


train_cat=all_cat[all_cat.train_1==1].drop(['train_1','train_2'],axis=1)
test_cat=all_cat[all_cat.train_2==1].drop(['train_1','train_2'],axis=1)


# In[199]:


train_fin=pd.concat([train_cat,train_num],axis = 1, ignore_index=False)
test_fin=pd.concat([test_cat,test_num],axis = 1, ignore_index=False)


# In[201]:


test_fin.shape


# In[202]:


train_fin.to_csv('train_fin_ztt.csv',index=False)
test_fin.to_csv('test_fin_ztt.csv',index=False)


# 最后将整理好的训练集和测试集保存出来
