import os
import pandas as pd
from pandas.plotting import scatter_matrix
import numpy as np
import hashlib
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from sklearn.model_selection import train_test_split,StratifiedShuffleSplit
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelBinarizer
from sklearn.base import BaseEstimator,TransformerMixin
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import FeatureUnion
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import  GridSearchCV,RandomizedSearchCV
from scipy.stats import randint
'''
感悟和理解：
1.数据收集要和数据收集人员提前做好沟通弄清数据含义
2.可视化分析数据解决最大值限制和重尾问题
3.创建训练集和测试集尽量使用分层抽样提高数据可靠性
4.通过皮尔逊系数寻找特征相关性对相关性大的组别进行可视化分析
5.机器学习算法的数据准备，将训练集的数据和标签分开，数据清洗，通过转换流水线可以帮助我们节省大量时间
6.选择最优模型，通过均方差法和交叉验证法比较线性模型、决策树模型、随机森林模型，发现随机森林模型最优，再通过网格搜索和随机搜索找寻模型最优参数，最终确定最优模型
7.分析最佳模型，通过测试集评估系统，在测试集上评估最终模型
8.监控和维护系统，定期检查系统的实时性能防止系统宕机，定期使用新数据训练模型。
'''
# 读取文件数据
def load_housing_data(housing_path = './'):
    csv_path = os.path.join(housing_path,'housing.csv')
    return pd.read_csv(csv_path)

housing = load_housing_data()
# print(housing.head(5))
# print(housing.shape)
# 查看数据集的简单描述，得到每个属性类和非空值数据量
# print(housing.info())

# 查看多少种分类，每个分类有多少的区域
# print(housing['ocean_proximity'].value_counts())
# 显示数值属性摘要
# print(housing.describe())

# 房龄和房价被设定了上限，设置了上限的区域重新采样剔除上限制
# 重尾 头高尾长，转换数据把数据转换成偏钟形分布（正态分布、平均值为0，标准差为1）
# 收入特征，需要得知数据是如何缩放的
# housing.hist(figsize=(20,15),bins=50)
# plt.show()

# 创建训练集和测试集
# 训练集用于模型训练占整个数据集的80%，测试占20%
# def split_train_test(data,test_ratio):
#     # 对原来的数组进行重新洗牌，随机打乱原来的元素顺序
#     np.random.seed(1)
#     indices = np.random.permutation(len(data))
#     test_set_size = int(len(data)*test_ratio)
#     test_indices = indices[:test_set_size]
#     train_indices = indices[test_set_size:]
#     return data.iloc[train_indices],data.iloc[test_indices]
#
# train_set,test_set = split_train_test(housing,0.2)
# print(len(train_set))
# print(len(test_set))

# 对样本设置唯一的标识符，对标识符取hash的最后一个字节，值小于等于51，256*20%，放入测试集
# hash值相同数值可能不同，hash值不同数值一定不同
#
# def test_set_check(identifier,test_ratio,hash = hashlib.md5):
#     # 返回摘要，作为二进制数据字符串
#     return hash(np.int64(identifier)).digest()[-1] < 256 * test_ratio
#
# def split_train_set_by_id(data,test_ratio,id_column):
#     ids = data[id_column]
#     in_test_set = ids.apply(lambda id_:test_set_check(id_,test_ratio))
#     return data.loc[-in_test_set],data.loc[in_test_set]
#
# # 使用行索引为标识符
# housing_with_id = housing.reset_index()
# print(housing_with_id.head(5))

# train_set,test_set = split_train_set_by_id(housing_with_id,0.2,'index')
# print(test_set.tail(5))

# 基于行索引不能中间插入和删除数据，只能末尾插入，否则行索引会变
# # 寻找稳定特征创建唯一标识符
# housing_with_id['id'] = housing['longitude'] * 1000 + housing['latitude']
# train_set,test_set = split_train_set_by_id(housing_with_id,0.2,'id')
# # print(test_set.head(5))
#
# train_set,test_set = train_test_split(housing,test_size=0.2,random_state=42)
# # print(train_set.head(5))

# 从数据可视化中探索数据
#分层采样
# housing['median_income'].hist()
# plt.show()
# 希望测试集能够代表整个数据集各种不同类型的收入
# 创建收入类型属性
# 不应该数据分层太多，但每一曾应该有足够的数据量
# 将收入中位数除以1.5，限制收入类别数量，使用ceil取整，得到离散类别，将大于5的列别合并为类别5
# housing['income_cat'] = np.ceil(housing['median_income']/1.5)
# print(housing['income_cat'].head(20))
# housing['income_cat'].where(housing['income_cat'] < 5 , 5.0 , inplace=True)
# print(housing['income_cat'].head(20))
# 把连续值转换成类别标签
housing['income_cat'] = pd.cut(housing['median_income'],bins=[0.,1.5,3.0,4.5,6.,np.inf],labels=[1,2,3,4,5])
# print(housing['income_cat'].head(20))
# print(housing['income_cat'].value_counts())
# 收入类别直方图
# housing['income_cat'].hist()
# plt.show()
# 根据收入类别进行分层采样
split = StratifiedShuffleSplit(n_splits=1,test_size=0.2,random_state=42)
for train_index,test_index in split.split(housing,housing['income_cat']):
    strat_train_set = housing.loc[train_index]
    strat_test_set = housing.loc[test_index]

# print(strat_train_set.shape)
# print(strat_train_set.head(20))

# 查看所有住房数据收入类别比例分布，收入类别百分比
# 分层抽样测试集合
# print(strat_test_set['income_cat'].value_counts()/len(strat_test_set))
# 完整数据集合
# print(housing['income_cat'].value_counts()/len(housing))
#
# def income_cat_proportions(data):
#     return data['income_cat'].value_counts()/len(data)
#
# train_set,test_set = train_test_split(housing,test_size=0.2,random_state=42)
#
# compare_prope = pd.DataFrame({
#     '全部数据':income_cat_proportions(housing),
#     '分层抽样':income_cat_proportions(strat_test_set),
#     '随机抽样':income_cat_proportions(test_set),
# }).sort_index()
# compare_prope['随机. %error'] = 100 * compare_prope['随机抽样']/compare_prope['全部数据'] - 100
# compare_prope['分层. %error'] = 100 * compare_prope['分层抽样']/compare_prope['全部数据'] - 100
# # print(compare_prope)

#将地理数据可视化
#建立一个各区域的分布图，以便于数据可视化
# california_img = mpimg.imread('./california.png')
# housing.plot(kind = 'scatter',x='longitude',y='latitude',alpha = 0.4,
#              s=housing['population']/100,label='population',figsize=(10,7),
#              c='median_house_value',cmap = plt.get_cmap('jet'),colorbar=False,
#              sharex = False)
#
# plt.imshow(california_img,extent=[-124.55,-113.80,32.45,42.05],alpha=0.5,
#            cmap=plt.get_cmap('jet'))
# plt.ylabel('Latitude',fontsize=14)
# plt.xlabel('Longitude',fontsize=14)
#
# prices = housing['median_house_value']
# tick_values = np.linspace(prices.min(),prices.max(),11)
# cbar = plt.colorbar()
# cbar.ax.set_yticklabels(['$%dk'%(round(v/1000)) for v in tick_values],fontsize=14)
# cbar.set_label('Median House Value',fontsize = 16)
# plt.legend(fontsize=16)
# plt.show()

# 从图片中告诉我们，房屋价格与地理位置靠海，和人口密度息息相关
# 寻找特征相关性
# corr（）计算出每对特征之间的相关系数，称为皮尔逊相关系数
# corr_matrix = housing.corr()
# print(corr_matrix)
# print(corr_matrix['median_house_value'].sort_values(ascending=False))
# 相关系数范围从-1变化到1，越接近1，表示越强的正相关，比如当收入的中位数上升时，房价中位数也趋于上升
# 当系数接近-1，则表示有强烈的负相关，纬度和房价中位数之间出现轻微的负相关，越往北走，房价倾向于下降
# 相关系数仅检测线性相关性，如果x上升，y上升或者下降，可能会彻底遗漏非线性相关性，例如如果x接近于0，y上升
# 使用padans scatter_matrix 函数，可以绘制特征之间的相关性

# attributes=['median_house_value','median_income','total_rooms','housing_median_age']
# scatter_matrix(housing[attributes],figsize=(12,8))
# plt.show()
# housing.plot(kind = 'scatter',x = 'median_income',y = 'median_house_value',alpha = 0.1)
# plt.axis([0,16,0,550000])
# plt.show()
# 50万美元是一条清晰的线，被设置上限，45万，35万，28万也有虚线，可能是异常值，可以在后期把数据删除掉

# 试验不同特征的组合
housing['rooms_per_household'] = housing['total_rooms']/housing['households']
housing['bedroom_per_room'] = housing['total_bedrooms']/housing['total_rooms']
housing['population_per_household'] = housing['population']/housing['households']
corr_matrix = housing.corr()
# print(corr_matrix['median_house_value'].sort_values(ascending=False))

# 机器学习算法的数据准备
# 得到一个分层抽样代表全局数据集的训练集和测试集
# 将数据集的数据和标签分开
housing_labels = strat_train_set['median_house_value'].copy()
housing = strat_train_set.drop('median_house_value',axis = 1)
# print(housing.head(5))
# print(housing.info())
# 数据清洗
# 大多数机器学习算法无法在缺失的特征上工作，创建一些函数辅助，total_bedrooms有缺失，
#放弃这些区域
#放弃这个属性
#将缺失设置为某个值，{0，平均数或者中位数都可以}
# rows = housing[housing.isnull().any(axis=1)]
# print(rows)
# 放弃这些区域
# rows.dropna(subset=['total_bedrooms'])
# 放弃这个属性
# rows.drop('total_bedrooms',axis=1)
# 将缺失值设置为某个值
# 创建一个imputer实例，指定你要属性中的中位数值替换该属性的缺失值
# imputer = SimpleImputer(strategy = 'median')
# 使用fit（）方法将imputer实例适配到训练集
housing_num = housing.drop('ocean_proximity',axis = 1)
# imputer.fit(housing_num)
# print(imputer.statistics_)
# print(housing_num.median().values)
# X = imputer.transform(housing_num)
# housing_tr = pd.DataFrame(X,columns=housing_num.columns,index=housing.index)
# print(housing_tr)
# print(housing_tr.info())
# scikit-learn
# 一致性
#     估算器 比如：各种机器学习算法fit（）执行估算器
#     转换器 比如： LabelBinarizer transform（）执行转换数据集fit_transform（）先估算，再转换
#     预测器 predict（）对给定的新数据集进行预测scor（）评估测试集的预测质量
#     检查
#     1.imputer.strategy_学习参数通过公共案例b变量访问
housing_cat = housing['ocean_proximity']
# print(housing_cat.head(10))
# print(housing_cat)
# print(housing[['ocean_proximity']].value_counts())
# encoder  = LabelEncoder()
# housing_cat = housing['ocean_proximity']
# housing_cat_encoder = encoder.fit_transform(housing_cat)
# print(housing_cat_encoder)
# print(encoder.classes_)
# 机器学习算法会以为两个相近的数字比远的的数字更相似，比如0，4比0，1相似度更高，
# 创建独热编码，当《1H，第0个属性为1，其余为0，列别是InLand时候，另一个属性为1，其余为0，1为热，0为冷，独热编码
# OneHotEncoder编码器，fit_transform需要二维数组，转换housing_cat
# encoder = OneHotEncoder()
# housing_cat_hot = encoder.fit_transform(housing_cat_encoder.reshape(-1,1))
# print(housing_cat_hot)
#从稀疏矩阵转换成数组
# print(housing_cat_hot.toarray())
encoder=LabelBinarizer()
housing_cat_1hot = encoder.fit_transform(housing_cat)
# print(housing_cat_1hot)
# 输出稀疏矩阵
# encoder=LabelBinarizer(sparse_output=True)
# 自定义转换器
room_ix,bedroom_ix,population_ix,household_ix = [list(housing.columns).index(col) for col in ('total_rooms','total_bedrooms','population','households')]
# print(room_ix,bedroom_ix,population_ix,household_ix)
class CombinedAttributesAdder(BaseEstimator,TransformerMixin):
    def __init__(self,add_bedroom_per_room = True):
        self.add_bedroom_per_room = add_bedroom_per_room
    def fit(self,X,y=None):
        return self
    def transform(self,X,y=None):
        rooms_per_household = X[:,room_ix] / X[:,household_ix]
        population_per_household = X[:,population_ix] / X[:,household_ix]
        if self.add_bedroom_per_room:
            bedroom_per_room = X[:,bedroom_ix] / X[:,room_ix]
            return np.c_[X, rooms_per_household, population_per_household,bedroom_per_room]
        else:
            return np.c_[X,rooms_per_household,population_per_household]


attr_addr = CombinedAttributesAdder(add_bedroom_per_room=False)
housing_extra_attribs = attr_addr.transform(housing.values)
housing_tr = pd.DataFrame(housing_extra_attribs)
# print(housing_tr.head())
housing_extra_attribs = pd.DataFrame(housing_extra_attribs,
                                     columns=list(housing.columns)+["rooms_per_household","population_per_household"],
                                     index=housing.index)
# print(housing_extra_attribs.head())

#特征缩放
#转换流水线

# housing_num_tr = num_pipline.fit_transform(housing_num)

class DataFrameSelector(BaseEstimator,TransformerMixin):
    def __init__(self,attribute_names):
        self.attribute_names = attribute_names
    def fit(self,X,y=None):
        return self
    def transform(self,X):
        return X[self.attribute_names].values

num_attribs = list(housing_num)
# print(num_attribs)
num_pipline = Pipeline([
    ('selector',DataFrameSelector(num_attribs)),
    ('imputer',SimpleImputer(strategy='median')),
    ('attriba_adder',CombinedAttributesAdder()),
    ('std_scaler',StandardScaler())
])

class MyLableBinarizer(TransformerMixin):
    def __init__(self,*args,**kwargs):
        self.encoder = LabelBinarizer(*args,**kwargs)
    def fit(self,x,y=0):
        self.encoder.fit(x)
        return self
    def transform(self,x,y=0):
        return self.encoder.transform(x)

cat_attribs = ['ocean_proximity']
cat_pipline = Pipeline([
    ('selector',DataFrameSelector(cat_attribs)),
    ('LabelBinarizer',MyLableBinarizer()),
])
full_pipline = FeatureUnion(transformer_list=[
    ('num_pipline',num_pipline),
    ('cat_pipline',cat_pipline),
]
)

# print(full_pipline.fit_transform(housing))
housing_prepared = pd.DataFrame(full_pipline.fit_transform(housing))
# print(housing_finally.head())

# 选择和训练模型
#获得了数据
#数据探索
#对训练集和测试集进行拆分
#编写了转换数据流水线
#自动清理和准备机器学习算法的数据

# df = pd.DataFrame(housing_prepared)
# 训练模型和评估训练集
# lin_reg = LinearRegression()
# lin_reg.fit(housing_prepared,housing_labels)
# 预测数据
# some_data = housing.iloc[:5]
# some_labels = housing_labels.iloc[:5]
#
# some_data_prepared = full_pipline.transform(some_data)
# print('Predictions:',lin_reg.predict(some_data_prepared))

# housing_predictions = lin_reg.predict(housing_prepared)
# lin_mse = mean_squared_error(housing_labels,housing_predictions)
# print(lin_mse)
# lin_rmse = np.sqrt(lin_mse)
# print(lin_rmse)
# 决策树
# tree_reg = DecisionTreeRegressor(random_state=42)
# tree_reg.fit(housing_prepared,housing_labels)
#
# housing_predictions = tree_reg.predict(housing_prepared)
# tree_mse = mean_squared_error(housing_labels,housing_predictions)
# tree_rmse = np.sqrt(tree_mse)
# print(tree_rmse)
# 完美可能是这个模型对数据严重过度拟合了，如何确认？轻易不要启动测试集，拿训练集中的一部分用于训练，另一部分用于模型的验证

# 使用交叉验证来更好的进行评估
#使用train_test_split函数将训练集分为较小的训练集和验证集，然后根据这些较小的训练集来训练模型，并对其进行评估
#sklearn的交叉验证，将训练集随机分割成10个不同的子集，每个子集称为一个折叠，对模型进行10次训练和评估，每次挑选一个折叠进行评估，另外9个进行训练

# scores = cross_val_score(tree_reg,housing_prepared,housing_labels,
#                          scoring="neg_mean_squared_error",cv=10)
# tree_rmse_scores = np.sqrt(-scores)
# print(tree_rmse_scores.mean(),tree_rmse_scores.std())

# 线性交叉验证
# line_scores = cross_val_score(lin_reg,housing_prepared,housing_labels,
#                               scoring="neg_mean_squared_error",cv=10)
# line_rmse_scores = np.sqrt(-line_scores)
# print(line_rmse_scores.mean(),line_rmse_scores.std())
# 随机森林模型
forest_reg = RandomForestRegressor(n_estimators=10,random_state=42)
forest_reg.fit(housing_prepared,housing_labels)
housing_predictions = forest_reg.predict(housing_prepared)
forest_mse = mean_squared_error(housing_labels,housing_predictions)
forest_rmse = np.sqrt(forest_mse)
# print(forest_rmse)
# 交叉验证
# forest_scores = cross_val_score(forest_reg,housing_prepared,housing_labels,
#                               scoring="neg_mean_squared_error",cv=10)
# forest_rmse_scores = np.sqrt(-forest_scores)
# print(forest_rmse_scores.mean(),forest_rmse_scores.std())

#模型调参和网格搜索
#1.手动调整超参数，找到很好的组合很困难
#2.使用GridSearchCV替你进行搜素，告诉它,进行试验的超参数是什么，和需要尝试的值，它会使用交叉验证评估所有超参数的可能组合

param_grid = [
    {'n_estimators':[3,10,10],'max_features':[2,4,6,8]},
    {'bootstrap':[False],'n_estimators':[3,10],'max_features':[2,3,4]},
]
forest_reg = RandomForestRegressor(random_state=42)
grid_search = GridSearchCV(forest_reg,param_grid,cv=5,
                           scoring='neg_mean_squared_error',return_train_score=True)
grid_search.fit(housing_prepared,housing_labels)
# print(grid_search.best_params_)
# grid_search.best_estimator_
# cvres = grid_search.cv_results_
# for mean_score,params in zip(cvres['mean_test_score'],cvres['params']):
#     print(np.sqrt(-mean_score),params)

#数据准备步骤也可以当作超参数来处理，网络搜素会自动查找是否添加你不确定的特征，比如是否使用转换器combinedAttre的超参数
#也可以使用它自动寻找处理问题的最佳方法，比如异常值，缺失特征，特征选择等。
#随机搜索
# param_distribs = {
#     'n_estimators':randint(low=1,high=200),
#     'max_features':randint(low=1,high=8),
# }
# forest_reg = RandomForestRegressor(random_state=42)
# rnd_search = RandomizedSearchCV(forest_reg,param_distributions=param_distribs,
#                                 n_iter=10,cv=5,scoring='neg_mean_squared_error',random_state=42)
# rnd_search.fit(housing_prepared,housing_labels)
# print(rnd_search)
#分析最佳模型
feature_importances = grid_search.best_estimator_.feature_importances_
# print(feature_importances)
# extra_attribs = ['room_per_household','population_per_household','bedroom_per_room']
# cat_one_hot_attribs = list(encoder.classes_)
# attributes = num_attribs+extra_attribs+cat_one_hot_attribs
# sorted(zip(feature_importances,attributes),reverse=True)
def indices_of_top_k(arr,k):
    return np.sort(np.argpartition(np.array(arr),-k)[-k:])
class TopFeatureSelector(BaseEstimator,TransformerMixin):
    def __init__(self,feature_importances,k):
        self.feature_importances = feature_importances
        self.k = k
    def fit(self,X,y=None):
        self.feature_indices = indices_of_top_k(self.feature_importances,self.k)
        return self
    def transform(self,X):
        return X[:,self.feature_indices]

prepare_select_and_predict_pipeline = Pipeline([
    ('preparation',full_pipline),
    ('feature_selection',TopFeatureSelector(feature_importances,k=5)),
    ('final_model',RandomForestRegressor(**grid_search.best_estimator_))
])

prepare_select_and_predict_pipeline.fit(housing,housing_labels)

some_data = housing.iloc[:4]
some_labels = housing_labels[:4]
print('预测值：\t',prepare_select_and_predict_pipeline.predict(some_data))
print('实际值：\t',list(some_labels))

param_grid=[{
    'preparation_num_pipline_attribs_adder_add_bedrooms_per_rooms':[True,False]
}]
grid_search_prep = GridSearchCV(prepare_select_and_predict_pipeline,param_grid,cv=5,scoring='neg_mean_squared_error',verbose=2,n_jobs=2)
print(grid_search_prep)
# 通过测试集评估系统
#从测试集中获取预测图和标签
#运行full_pipline来转换数据
#在测试集上评估最终模型
# final_model = grid_search.best_estimator_
# X_test = strat_test_set.drop('median_house_value',axis=1)
# y_test = strat_test_set['median_house_value'].copy()
# X_test_prepared = full_pipline.transform(X_test)
# # df = pd.DataFrame(X_test_prepared)
# # print(df.head())
# final_predictions = final_model.predict(X_test_prepared)
# final_mse = mean_squared_error(y_test,final_predictions)
# final_rmse = np.sqrt(final_mse)
# print(final_rmse)

#项目启动阶段
#展示解决方案 学习了什么
#什么有用
#什么没有用
#基于什么假设
#以及系统的限制有哪些
#制作漂亮的演示文稿，例如收入中位数是预测房价的首要指标

#启动，监控和维护系统
#为生产环境做好准备，将生产数据源接入系统
#编写监控代码，定期检查系统的实时性能，性能下降时触发警报，系统崩溃和性能退化
#时间推移，模型会渐渐腐坏，定期使用新数据训练模型

#评估系统性能
#需要对系统的预测结果进行抽样评估，需要人工分析，分析师可能是专家、平台工作人员，都需要将人工评估的流水线接入你的系统
#评估输入系统的数据质量
#使用新鲜数据顶起训练你的模型，最多六个月