# coding:utf-8
# __user__ = hiicy redldw
# __time__ = 2019/8/3
# __file__ = house
# __desc__ = house price predict
import numpy as np
import pandas as pd
import torch.nn as nn
import torch
from sklearn.feature_selection import VarianceThreshold
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn.cluster as clu
from sklearn.preprocessing import StandardScaler
from actual import get_top_n_features,from_Blend_model

np.set_printoptions(threshold=-np.inf)
fl = r"f:\Resources\kdata\house\train.csv"
fj = r"f:\Resources\kdata\house\test.csv"
data = pd.read_csv(fl)
n_train = len(data)
df = pd.read_csv(fj)
teidx = df.Id
n_test = len(df)
data = pd.concat([data, df], axis=0,sort=False,ignore_index=True)
print(data.info())
print('----------\n', data.describe(), '\n-----------\n')
# REW:一个对统计应用有用的秘诀是：结合get_dummies和cut之类的离散化函数
# pd.get_dummies(pd.cut(data,bins))

# print(df.isnull().sum().sort_values())
# TODO:REW:缺失率
all_data_na = (data.isnull().sum() / len(data)) * 100
all_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index).sort_values(ascending=False)[:30]
missing_data = pd.DataFrame({'Missing Ratio' :all_data_na})
missing_data.head(20)

Alley = data['Alley'].fillna(value="no_access")
d = pd.get_dummies(Alley, prefix='Alley')
data.drop(labels=['Alley', 'PoolArea', 'PoolQC', 'Fence', 'SaleType',"YrSold",
                  'MiscFeature', 'PavedDrive','GarageYrBlt', 'SaleCondition'], axis=1, inplace=True)
data.join(d)
print('-' * 20, '\n', data.info())

data['Exterior1st'].fillna(value=data['Exterior1st'].mode().item(), inplace=True)
data['Exterior2nd'].fillna(value=data['Exterior2nd'].mode().item(), inplace=True)
data['MSZoning'].fillna(value=data['MSZoning'].mode().item(), inplace=True)
# 如果你的数据增长速率越来越快，可以选择 method='quadratic'二次插值。
# 如果数据集呈现出累计分布的样子，推荐选择 method='pchip'。
# 如果需要填补缺省值，以平滑绘图为目标，推荐选择 method='akima'。
data['LotFrontage'].interpolate(method='akima', inplace=True)
# 也可以pd.factorize作离散化
utils = {"AllPub": 4, "NoSewr": 3, "NoSeWa": 2, "ELO": 1, np.nan: 0}
data['Utilities'] = data['Utilities'].map(utils)
data['MasVnrType'] = pd.factorize(data['MasVnrType'])[0]
data['MasVnrArea'].fillna(value=0, inplace=True)
# bsmtqual = [0,70,80,90,100]  # REW:分箱法,我这既不等频，也不等宽
# pd.cut(data['BsmtQual'],bsmtqual)
data['BsmtQual'] = pd.factorize(data['BsmtQual'])[0]
data['BsmtCond'] = pd.factorize(data['BsmtCond'])[0]  # 1,2,3,4离散法
data['BsmtExposure'] = pd.factorize(data['BsmtExposure'])[0]
data['BsmtFinType1'] = pd.factorize(data['BsmtFinType1'])[0]


def check_outlier(x):  # 3σ原则检测离群点
    mean = np.mean(x)
    print(mean)
    std = np.std(x)
    print(std)
    a = []
    for i in x:
        if i - mean > 3 * std:
            a.append(i)
    print(a)

# TODO:属性构造
# 有点冗余
data['BsmtFinSF1'].fillna(method='pad', inplace=True)
data['BsmtFinType2'] = pd.factorize(data['BsmtFinType2'])[0]
data['BsmtFinSF2'].fillna(method='pad', inplace=True)
data['BsmtUnfSF'].fillna(method='pad', inplace=True)
data['TotalBsmtSF'].fillna(method='pad', inplace=True)
data['Electrical'].fillna(data['Electrical'].mode().item(), inplace=True)
data['BsmtFullBath'].fillna(data['BsmtFullBath'].mode().item(), inplace=True)
data['BsmtHalfBath'].fillna(data['BsmtHalfBath'].mode().item(), inplace=True)
kicqual = {"Ex": 5, "Gd": 4, "TA": 3, "Fa": 2, "Po": 1}
data['KitchenQual'] = data['KitchenQual'].map(kicqual)
data['KitchenQual'].fillna(value=data['KitchenQual'].median(), inplace=True)
functional = {"Sal": 1, "Sev": 2, "Maj2": 3, "Maj1": 4, "Mod": 5, "Min2": 6, "Min1": 7, "Typ": 8}
data['Functional'] = data['Functional'].map(functional)
data['Functional'].fillna(data['Functional'].mode().item(), inplace=True)
fireplacequ = {"Ex": 5, "Gd": 4, "TA": 3, "Fa": 2, "Po": 1, np.nan: 0}
data['FireplaceQu'] = data['FireplaceQu'].map(fireplacequ)
GarageType = {np.nan: -1, "Detchd": 1, "CarPort": 2, "BuiltIn": 3, "Basment": 4, "Attchd": 5, "2Types": 6}
data['GarageType'] = data['GarageType'].map(GarageType)
# data['GarageYrBlt'] = data['GarageYrBlt'].apply(lambda x: 0 if x == -1 else x)
# data['GarageYrBlt'].interpolate(method="akima", inplace=True)
data['GarageFinish'] = pd.factorize(data['GarageFinish'])[0]
data['GarageCars'].fillna(method="backfill", inplace=True)
data['GarageArea'].fillna(method="backfill", inplace=True)
data['GarageQual'] = data['GarageQual'].map(fireplacequ)
data['GarageCond'] = data['GarageCond'].map(fireplacequ)

need_direve = ['MSSubClass','MSZoning', 'Street', 'LotShape', 'LandContour', 'LotConfig', 'LandSlope', 'Neighborhood'
    , 'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'RoofStyle',
               'RoofMatl', 'Exterior1st', 'Exterior2nd', 'ExterQual', 'ExterCond', 'Foundation',
               'Heating', 'HeatingQC', 'CentralAir', 'Electrical']

# 分析特征跟值关系：groupby=就是对key的每个值所对应的columns进行计算
data[['Utilities', 'SalePrice']].groupby('Utilities').mean().plot.bar()
data[['BsmtQual', 'SalePrice']].groupby('SalePrice').mean().plot.bar()
data[['BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'SalePrice']].groupby(
    ['BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1']).mean().plot.bar()
for column in need_direve:
    data[column] = pd.factorize(data[column])[0]
idx = data.pop('Id')
labels = data.pop("SalePrice")
labels = labels[:n_train].values

#  特征相关
# 方差法选择
vart = VarianceThreshold(0.16)
vart.fit(data)
newdata = vart.transform(data)
columns = data.columns[vart.get_support()]
data = pd.DataFrame(newdata,index=data.index,columns=columns)
print(data.columns)
# 热力图法分析特征特征间关系
correlation = pd.DataFrame(
    data[['MSSubClass', 'MSZoning', 'LotFrontage', 'LotArea', 'LotShape', 'LandContour','BsmtCond','FireplaceQu',
          'LotConfig','OpenPorchSF','MiscVal', 'Neighborhood', 'Condition1','HeatingQC', 'LowQualFinSF','Electrical']])
colormap = plt.cm.viridis
fig = plt.figure(figsize=(15, 10))
plt.title('Pearson Correaltion of Feature', y=1.05, size=15)
sns.heatmap(correlation.astype(float).corr(), linewidths=0.2, vmax=1.0, square=True, cmap=colormap, linecolor='white',
            annot=True)
# features_top_n, features_importance = get_top_n_features(data[:n_train],labels,top_n_features=40) # REW:用模型预测 去提取重要特征
# data = data.loc[:,features_top_n]
print(data.describe())
stander_list = ['LotArea','YearBuilt','YearRemodAdd','1stFlrSF','GrLivArea',
                'MasVnrArea','BsmtUnfSF','TotalBsmtSF','2ndFlrSF','GarageArea',
                'WoodDeckSF','OpenPorchSF','EnclosedPorch','3SsnPorch','ScreenPorch','MiscVal']
stander = StandardScaler()
data[stander_list] = stander.fit_transform(data[stander_list])

# todo:利用交叉验证算得分
# def rmsle_cv(model):
#     kf = KFold(n_folds, shuffle=True, random_state=42).get_n_splits(train.values)
#     rmse= np.sqrt(-cross_val_score(model, train.values, y_train, scoring="neg_mean_squared_error", cv = kf))
#     return(rmse)

x_train = data[:n_train].values
x_test = data[n_train:].values
# TODO:rmse用np.log1去缩放
kmean = clu.KMeans(n_clusters=20)
x_train = kmean.fit_transform(x_train)
x_test = kmean.transform(x_test)
from_Blend_model(x_train,labels,x_test,teidx)
plt.show()
# todo: 用聚类去联合相似特征 减少变量
