# !/usr/bin/env python3

'''
    @author : walker
    @time : 2019/10/2
    @description : 北京房价预测
'''

import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from config import *
from analysis_house_data import *
from sklearn.model_selection import train_test_split
#通过交叉验证环节数据集过拟合现象
from sklearn.model_selection import KFold
#引入决策树回归模型
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import make_scorer
#通过GridSearchCV找到最优深度参数
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import r2_score
import visuals as vs



def read_data():
    '''
        读取数据并了解到关于数据的一些基本情况
        param:
            null
        return:
            hd : 房屋具体信息
    '''
    #读取数据
    house_data = pd.read_csv(data_url)
    #查看数据缺失情况
    # print(house_data.info())
    #查看数据基本情况，比如中位数，众数等
    # print(house_data.describe())
    #查看数据head
    # print(house_data.head())

    #添加新特征房屋均价
    hd = house_data.copy()
    hd['PerPrice'] = house_data['Price'] / house_data['Size']
    # 重新摆放列位置
    columns = ['Region', 'District', 'Garden', 'Layout', 'Floor', 'Year', 'Size', 'Elevator', 'Direction', 'Renovation', 'PerPrice', 'Price']
    hd = pd.DataFrame(hd, columns = columns)
    # print(hd.head())
    return hd

def direct_func(x):
    '''
        处理朝向问题
    '''
    if not isinstance(x,str):
        raise TypeError
    x = x.strip()
    x_len = len(x)
    x_list = pd.unique([y for y in x])
    if x_len != len(x_list):
        return 'no'

    if (x_len == 2) & (x not in d_list_two):
        m0 = x[0]
        m1 = x[1]
        return m1+m0
    elif (x_len == 3) & (x not in d_list_three):
        for n in d_list_three:
            if (x_list[0] in n) & (x_list[1] in n) & (x_list[2] in n):
                return n
    elif (x_len == 4) & (x not in d_list_four):
        return d_list_four[0]
    else:
        return x

def analysis_and_plt(house_data):
    '''
        对北京房价数据进行特征分析并画图
        params:
            house_data
        return:
            pic
    '''
    analysis_house = analysis_house_data(house_data)
    #对区域进行分析
    analysis_house.region_analysis()
    #对房子size进行分析
    analysis_house.size_analysis()
    #对房子户型和布局进行分析
    analysis_house.layout_analysis()
    #对房子的装修情况进行特征分析
    analysis_house.renovation_analysis()
    #对房子是否带电梯进行分析
    analysis_house.elevator_analysis()
    #year特征分析
    analysis_house.years_analysis()
    #对楼层进行特征分析
    analysis_house.floor_analysis()

def one_hot_encoder(df, nan_as_category = True):
    '''
        将数据进行one_hot处理
        params：
            df
        return:
            one_hot后的数据
    '''
    original_columns = list(df.columns)
    categorical_columns = [col for col in df.columns if df[col].dtype == 'object']
    df = pd.get_dummies(df, columns= categorical_columns, dummy_na= nan_as_category)
    new_columns = [c for c in df.columns if c not in original_columns]
    return df, new_columns

def fit_model(x,y):
    '''
        训练模型
        param:
            x : 特征
            y : 值
        return:
            model : 训练好的模型
    '''
    #通过交叉认证缓解数据集过拟合的现象
    cross_validator = KFold(10,shuffle=True)
    #建立决策树回归模型
    regressor = DecisionTreeRegressor()
    params = {'max_depth' : [1,2,3,4,5,6,7,8,9,10,11,12]}
    scoring_fnc = make_scorer(performance_metric)
    grid = GridSearchCV(estimator = regressor,param_grid = params,scoring=scoring_fnc,cv = cross_validator)

    #基于输入数据[]x,y,进行网格搜索
    grid = grid.fit(x,y)
    return grid.best_estimator_

def performance_metric(y_true,y_predict):
    '''
        计算并返回预测值相比于正确值的分数
        param:
            y_ture: 正确的值
            y_predict: 预测的值
    '''
    score = r2_score(y_true,y_predict)
    return score

def main():
    #读取数据并了解到关于数据的一些基本情况
    house_data = read_data()

    #对数据进行预处理操作
    #将处理house_data，将别墅和其他异常值排除在外
    house_data = house_data[(house_data['Layout'] != '叠拼别墅') & (house_data['Size'] < 1000)]

    # 查看电梯属性,发现在6层之后普遍有电梯存在，所以在六层之上的层数都填上有电梯
    house_data.loc[(house_data['Floor'] > 6) & (house_data['Elevator'].isnull()),'Elevator'] = '有电梯'
    house_data.loc[(house_data['Floor'] <= 6) & (house_data['Elevator'].isnull()),'Elevator'] = '有电梯'

    #对房子进行特征分析
    # analysis_and_plt(house_data)

    house_data = house_data.loc[house_data['Layout'].str.extract('^\d(.*?)\d.*?',expand=False) == '室']
    #提取室和厅数据创建新的特征
    house_data['Layout_room_num'] = house_data['Layout'].str.extract('(^\d).*',expand=False).astype('int64')
    house_data['Layout_hall_num'] = house_data['Layout'].str.extract('^\d.*?(\d).*',expand=False).astype('int64')
    # print(house_data['Layout_hall_num'].value_counts())

    #对房间方向进行数据处理
    house_data['Direction'] = house_data['Direction'].apply(direct_func)
    house_data = house_data.loc[(house_data['Direction'] != 'no') & (house_data['Direction'] != 'nan')]

    #根据已有特征创建新特征
    #共有多少个房间（客厅包括在内）
    house_data['Layout_total_num'] = house_data['Layout_room_num'] + house_data['Layout_hall_num']
    #平均每个房间大小
    house_data['Size_room_ratio'] = house_data['Size'] / house_data['Layout_total_num']

    # 按中位数对“Year”特征进行分箱
    house_data['Year'] = pd.qcut(house_data['Year'],8).astype('object')
    # print(house_data['Year'].value_counts())

    # print(house_data.head())
    #删除无用特征
    house_data = house_data.drop(['District','Layout','Garden','PerPrice'],axis = 1)

    # print(house_data.head())

    #对数据进行one_hot处理
    house_data,df_cat = one_hot_encoder(house_data)

    # print(len(house_data.columns.values))
    # for item in house_data.columns.values:
    #     print(item)

    #画heatMap图，用来查看哪些特征是严重重合的
    # colormap = plt.cm.RdBu
    # plt.figure(figsize=(20,20))
    # # plt.title('Pearson Correlation of Features', y=1.05, size=15)
    # sns.heatmap(house_data.corr(),linewidths=0.1,vmax=1.0, square=True, cmap=colormap, linecolor='white', annot=True)
    # plt.show()

    #定义好特征和标签
    features = house_data.drop('Price', axis=1)
    features = np.array(features)
    prices = house_data['Price']
    prices = np.array(prices)
    # print(features)
    # print(prices)

    #导入sklearn进行训练测试集划分
    features_train,features_test,prices_train,prices_test = train_test_split(features,prices,test_size = 0.2,random_state = 0)

    #分析模型
    vs.ModelLearning(features_train,prices_train)
    vs.ModelComplexity(features_train,prices_train)
    optimal_reg = fit_model(features_train,prices_train)

    #输出最优模型的'max_depth'参数
    print("最理想的参数'max_depth'是：",format(optimal_reg.get_params()['max_depth']))

    predict_value = optimal_reg.predict(features_test)
    r2 = performance_metric(prices_test,predict_value)
    print("最优模型在测试数据上 R^2 分数 {:,.2f}。".format(r2))


if __name__ == '__main__':
    main()
