
import pandas as pd
import seaborn as sns
import scipy as sp
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import KMeans
from sklearn import cluster
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import Pipeline
import os
import xlrd
from sklearn import cluster
from sklearn.neighbors import kneighbors_graph
from sklearn import metrics
from matplotlib import cm
from pylab import mpl
from sklearn.model_selection import cross_val_predict
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import pyplot
from sklearn import decomposition
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,AdaBoostClassifier)
from sklearn import linear_model
from sklearn import model_selection
from sklearn import tree
from sklearn.externals.six import StringIO
from scipy.cluster.vq import kmeans2
from scipy.spatial.distance import pdist, squareform
from functions import *

sns.set(color_codes=True)

print('********************** 加载数据 **********************')
csvFile = r'C:\Codes\PyProject\modelXYZ\04-犯罪相关\code\BeijingCrime_2016.txt'
raw_data = pd.read_csv(csvFile)
# print(raw_data.head(10))
print(list(raw_data))

print('********************** 基本信息 **********************')
# 发现BuildZone都是1
'''面积（平方千米）'''
area = sp.array(raw_data['KmArea'])
# 用log转换, 正态标准化
area_log = [np.log(h) for h in area]
area_log_nor = GaussianNormalization(area_log,np.mean(area_log), np.std(area_log))
# drawHist(area_log_nor,'KmArea log')
data_nor = pd.DataFrame(area_log_nor,columns=["area"])

print('********************** 人口信息 **********************')
arr_popu = ['HomeP', 'WorkP', 'WorkHome','Home_D', 'Work_D']
# 'HomeP', 'WorkP', 'Home_D', 'Work_D'：用sqrt转换，正态标准化
arr_popu1 = ['HomeP', 'WorkP', 'Home_D', 'Work_D']
for i in arr_popu1:
    ss =  np.sqrt(sp.array(raw_data[i]))
    ss_nor =  GaussianNormalization(ss,np.mean(ss), np.std(ss))
    data_nor[i] = ss_nor

# workHome：用log + sqrt转换，正态标准化
WorkHome = np.sqrt(np.log(sp.array(raw_data['WorkHome']))+1)
WorkHome_nor =  GaussianNormalization(WorkHome,np.mean(WorkHome), np.std(WorkHome))
# drawHist(WorkHome_nor, 'WorkHome_nor')
data_nor['WorkHome'] = WorkHome_nor
# print(WorkHome_nor)

# cluster
basic_data= data_nor.copy()
# corrmat = data_nor.corr()
# print(corrmat)
# 按照相关性剔除变量
basic_data['homePworkP'] = basic_data['HomeP'] + basic_data['WorkP'] + basic_data['Home_D'] + basic_data['Work_D']
del basic_data['HomeP']
del basic_data['WorkP']
del basic_data['Home_D']
del basic_data['Work_D']
# print(basic_data.head(3))
# 将人口因素用K-means分级
# 轮廓检测
# for i in range(2,16):
#     module = KMeans(init='k-means++', n_clusters=i, n_init=10)
#     module.fit(basic_data)
#     silhoutte_score = metrics.silhouette_score(basic_data, module.labels_, metric='euclidean')
#     print('The silhoutte score for %s'%i , ' cluster is %s' % silhoutte_score)
# 所以选择4个cluster
k=4
module = KMeans(init='k-means++', n_clusters=k, n_init = 10)
module.fit(basic_data)
silhoutte_score = metrics.silhouette_score(basic_data, module.labels_, metric='euclidean')
print('The silhoutte score of basic factors for %s cluster is %s' % (k,silhoutte_score))
basic_label = module.labels_
# print(basic_label)
basic_data['Basic_labels']= basic_label
# print(basic_data.head(3))
data1 = pd.DataFrame(basic_label,columns=["basic_label"])


print('********************** 通勤信息 **********************')
arr_commu = ['home_commu_people', 'work_commu_people', 'home_livehome_people', 'work_livehome_people',
             'home_worknight_people', 'work_worknight_people', 'InCommu', 'ToInCommu',   'AvgCommu', 'AvgCommuW']

# 这些变量做log，标准化
arr_commu1 = ['home_commu_people', 'work_commu_people', 'home_livehome_people', 'work_livehome_people','home_worknight_people', 'work_worknight_people']
for i in arr_commu1:
    ss = np.log(sp.array(raw_data[i])+1)
    ss_nor = GaussianNormalization(ss,np.mean(ss), np.std(ss))
    # drawHist(ss,i)
    data_nor[i] = ss_nor
# 'NCommuNum'：sqrt，标准化
noCommu = np.sqrt(raw_data['NCommuNum'])
# drawHist(noCommu,'NCommuNum')
noCommu_nor =  GaussianNormalization(noCommu,np.mean(noCommu), np.std(noCommu))
data_nor['NCommuNum'] = noCommu_nor
# 删掉toEx只保留in
# 对inCommu做sqrt，标准化
inCommu = np.sqrt(sp.array(raw_data['InCommu']))
# drawHist(inCommu,'inCommu')
inCommu_nor =  GaussianNormalization(inCommu,np.mean(inCommu), np.std(inCommu))
data_nor['InCommu'] = inCommu_nor
# 对toInCommu做减法，log，标准化
toInCommu = np.log(1.1-sp.array(raw_data['ToInCommu']))
# drawHist(toInCommu,'ToInCommu')
toInCommu_nor =  GaussianNormalization(toInCommu,np.mean(toInCommu), np.std(toInCommu))
data_nor['ToInCommu'] = toInCommu_nor
# 这些变量直接标准化
arr_commu1 = ['AvgCommu', 'AvgCommuW']
for i in arr_commu1:
    ss =  sp.array(raw_data[i])
    ss_nor = GaussianNormalization(ss,np.mean(ss), np.std(ss))
    # drawHist(ss_nor,i)
    data_nor[i] = ss_nor
# cluster
df_commu = pd.DataFrame(noCommu_nor,columns=["NCommuNum"])
arr_commu2 = ['home_commu_people', 'work_commu_people', 'home_livehome_people', 'work_livehome_people','home_worknight_people', 'work_worknight_people'
              ,'InCommu','ToInCommu','AvgCommu', 'AvgCommuW']
for i in arr_commu2:
    df_commu[i] = data_nor[i]
# print(df_commu.head(4))
# corrmat = df_commu.corr()
# corrmat.to_csv('相关性0523_通勤.csv')
# del df_commu['home_commu_people']
# del df_commu['work_commu_people']
# del df_commu['home_livehome_people']
# del df_commu['work_livehome_people']
# del df_commu['home_worknight_people']
# del df_commu['work_worknight_people']
# del df_commu['ToInCommu']

# 将通勤因素用K-means分级
# 轮廓检测
# for i in range(2,15):
#     module = KMeans(init='random', n_clusters=i, n_init=10)
#     module.fit(df_commu)
#     silhoutte_score = metrics.silhouette_score(basic_data, module.labels_, metric='euclidean')
#     print('The silhoutte score for %s'%i , ' cluster is %s' % silhoutte_score)
# 所以选择3个cluster
k=3
module = KMeans(init='random', n_clusters=k, n_init = 10)
module.fit(df_commu)
silhoutte_score = metrics.silhouette_score(df_commu, module.labels_, metric='euclidean')
print('The silhoutte score of commu factors for %s cluster is %s' % (k,silhoutte_score))
commu_label = module.labels_
# print(commu_label)
data1['commu_labels']= commu_label
# print(data1.head(10))


print('********************** 人群信息 **********************')
arr_group = [ 'Aging', 'CarRH', 'CarRW', 'UniversityH', 'UniversityW', 'IncomeH', 'IncomeW']
# 全部直接做标准化
for i in arr_group:
    ss = sp.array(raw_data[i])
    ss_nor = GaussianNormalization(ss, np.mean(ss), np.std(ss))
    # drawHist(ss_nor,i)
    data_nor[i] = ss_nor
arr_group1 = arr_group.copy()
arr_group1.remove('Aging')
df_group = pd.DataFrame(data_nor['Aging'])
for i in arr_group1:
    df_group[i] = data_nor[i]
# corrmat = df_group.corr()
# corrmat.to_csv('相关性0523_人群.csv')
del df_group['CarRW']
del df_group['UniversityW']
del df_group['IncomeW']
df_group['three'] = df_group['CarRH'] + df_group['UniversityH'] + df_group['IncomeH']
del df_group['CarRH']
del df_group['UniversityH']
del df_group['IncomeH']
# print(df_group.head(10))

# 将人群因素用K-means分级
# 轮廓检测
# for i in range(2,15):
#     module = KMeans(init='k-means++', n_clusters=i, n_init=10)
#     module.fit(df_commu)
#     silhoutte_score = metrics.silhouette_score(df_group, module.labels_, metric='euclidean')
#     print('The silhoutte score for %s'%i , ' cluster is %s' % silhoutte_score)
# 所以不用kmean
# plt.scatter(df_group['Aging'],df_group['three'])
# plt.show()
lev_group=clf_sort_mix(df_group['Aging'],df_group['three'],'popu group level')
data1['group_labels']= lev_group

print('********************** POI信息 **********************')
# sqrt, 正态标准化
arr_poi = ['Driving_D', 'Pubtrans_D','Hospital_D', 'School_D',  'Entertai_D', 'Convien_D', 'Eating_D',
           'Hotel_D', 'Industry_D','Finance_D', 'Govern_D', 'Science_D', 'Service_D']
for i in arr_poi:
    ss = np.sqrt(sp.array(raw_data[i]))
    ss_nor = GaussianNormalization(ss, np.mean(ss), np.std(ss))
    # drawHist(ss,i)
    data_nor[i] = ss_nor
 # 二值标准化
arr_poi1 = ['College_D', 'Store_D']
for i in arr_poi1:
    ss = sp.array(raw_data[i])
    ss_nor = Binary(ss)
    data_nor[i] = ss_nor

# label
df_poi =pd.DataFrame(data_nor['College_D'])
arr_poi2 =arr_poi + [ 'Store_D']
for i in arr_poi2:
    df_poi[i] = data_nor[i]
# print(df_poi.head(5))

# 将POI用K-means分级
# 轮廓检测
# for i in range(2,15):
#     module = KMeans(init='k-means++', n_clusters=i, n_init=10)
#     module.fit(df_commu)
#     silhoutte_score = metrics.silhouette_score(df_poi, module.labels_, metric='euclidean')
#     print('The silhoutte score for %s'%i , ' cluster is %s' % silhoutte_score)
# 所以不用kmean
arr_poi_busi = [  'Entertai_D',  'Eating_D', 'Hotel_D', 'Finance_D' ]
arr_poi_conven = ['Driving_D', 'Pubtrans_D',   'Convien_D', 'Service_D']
arr_poi_serv =  ['Hospital_D', 'School_D','Govern_D' ]
arr_poi_other = [ 'Science_D', 'Industry_D']
# 商业性
poiBusi = np.zeros(len(df_poi['Entertai_D']))
for i in arr_poi_busi:
    ss = sp.array(df_poi[i])
    poiBusi += ss
poiBusi1 = [MaxMinNormalization(h, np.max(poiBusi), np.min(poiBusi)) for h in poiBusi]
# drawHist(poiBusi,'poiBusi')
poiBusi2 = clf_sort(poiBusi1,'Busi POI Density',5,85,15)
data1['poiBusi'] = poiBusi2

# 便利性
poiCoven = np.zeros(len(df_poi['Entertai_D']))
for i in arr_poi_conven:
    ss = sp.array(df_poi[i])
    poiCoven += ss
poiCoven1 = [MaxMinNormalization(h, np.max(poiCoven), np.min(poiCoven)) for h in poiCoven]
# drawHist(poiCoven,'poiCoven1')
poiCoven2 = clf_sort(poiCoven1,'Coven POI Density',5,85,15)
data1['poiCoven'] = poiCoven2
# 服务性
poiServe = np.zeros(len(df_poi['Entertai_D']))
for i in arr_poi_serv:
    ss = sp.array(df_poi[i])
    poiServe += ss
poiServe1 = [MaxMinNormalization(h, np.max(poiServe), np.min(poiServe)) for h in poiServe]
# drawHist(poiServe,'poiServe1')
poiServe2 = clf_sort(poiServe1,'Serve POI Density',5,85,15)
data1['poiServe'] = poiServe2
# 其他
poiOther = np.zeros(len(df_poi['Entertai_D']))
for i in arr_poi_other:
    ss = sp.array(df_poi[i])
    poiOther += ss
poiOther1 = [MaxMinNormalization(h, np.max(poiOther), np.min(poiOther)) for h in poiOther]
# drawHist(poiOther,'poiOther1')
poiOther2 = clf_sort(poiOther1,' Other POI Density',5,85,15)
data1['poiOther'] = poiOther2


print('********************** 房价信息 **********************')
arr_re = ['re_avg', 're_max', 're_min', 're_median', 're_disper', 're_year']
# 正态标准化
for i in arr_re:
    ss = sp.array(raw_data[i])
    ss_nor = GaussianNormalization(ss, np.mean(ss), np.std(ss))
    # drawHist(ss,i)
    data_nor[i] = ss_nor
df_re = pd.DataFrame(data_nor['re_avg'])
arr_re.remove('re_avg')
for i in arr_re:
    df_re[i] = data_nor[i]
# print(df_re.head(5))
# corrmat = df_re.corr()
# corrmat.to_csv('相关性0523_房产.csv')
# del df_re['re_avg']
# 将房价因素用K-means分级
# 轮廓检测
# for i in range(2,15):
#     module = KMeans(init='k-means++', n_clusters=i, n_init=10)
#     module.fit(df_re)
#     silhoutte_score = metrics.silhouette_score(df_re, module.labels_, metric='euclidean')
#     print('The silhoutte score for %s'%i , ' cluster is %s' % silhoutte_score)
# 所以用k=3
k=3
module = KMeans(init='k-means++', n_clusters=k, n_init = 10)
module.fit(df_re)
silhoutte_score = metrics.silhouette_score(df_re, module.labels_, metric='euclidean')
print('The silhoutte score of real estate factors for %s cluster is %s' % (k,silhoutte_score))
re_label = module.labels_
# print(re_label)
data1['re_label']= re_label


print('********************** 犯罪率信息 **********************')
crimeRate = np.log(raw_data['CrimeRate2016'])
# drawHist(cr,'crimeRate')
crimeRate_nor = GaussianNormalization(crimeRate, np.mean(crimeRate), np.std(crimeRate))
data_nor['crimeRate'] = crimeRate_nor
# print("data_nor dataFrame")
# print(data_nor.head(10))
# print(list(data_nor))
# print(len(list(data_nor)))
crimeRate1 = clf_sort(crimeRate_nor,'Crime Rate',5,85,15)
data1['crimeRate']= crimeRate1
# drawHist(crimeRate_nor,'crimeRate_nor')
# plt.scatter(crimeRate_nor, crimeRate1)
# plt.show()
# print(data1.head(12))
data1.to_csv('data_xy.csv')
print ('*************************** Random Forests ****************************')
# Parameters
n_classes = 3
n_estimators = 100
plot_colors = "ryb"
cmap = plt.cm.RdYlBu
plot_step = 0.02  # fine step width for decision surface contours
plot_step_coarser = 0.5  # step widths for coarse classifier guesses
RANDOM_SEED = 13  # fix the seed on each iteration

#model
clf_rf=RandomForestClassifier(n_estimators=n_estimators, min_samples_split=4)
data2 = data1.copy()
del data2['crimeRate']
X= sp.array(data2)
# X=np.transpose(X)
# print(X)
y= sp.array(data1['crimeRate'])
# print(y)
'''decision tree'''
clf_DTs = tree.DecisionTreeClassifier(criterion='gini',max_depth=12) # 改成熵
clf_DTs_cr = clf_DTs.fit(X,y)
print ('Decision tree score is ', clf_DTs_cr.score(X,y))

with open("clf_DTs_cr.dot", 'w') as f:
    f = tree.export_graphviz(clf_DTs_cr, out_file=f)

print ('pls open <cmd> and input <dot -Tpng clf_DTs_cr.dot -o tree.png>')
# '''Random forest'''
print ('*************************** y分层后的线性回归 **********************')
module_LR = linear_model.LinearRegression()
predicted_y_nor = cross_val_predict(module_LR, X, y, cv=5)
module_LR.fit(X, y)
a=module_LR.coef_
b=module_LR.intercept_
c=module_LR.score(X, y)
scores=model_selection.cross_val_score(module_LR,X, y,cv=5)
# print ('coef is %s'%a)
# print ('intercept is %s'%b)
print ('score is %s'%c)
# print ('score1 is %s'%scores)
# np.savetxt('coef.csv',a,delimiter=',')
print ('*************************** y不分层的线性回归 **********************')
y_nor = data_nor['crimeRate']
module_LR = linear_model.LinearRegression()
predicted_y_nor = cross_val_predict(module_LR, X, y_nor, cv=5)
module_LR.fit(X, y_nor)
a=module_LR.coef_
b=module_LR.intercept_
c=module_LR.score(X, y_nor)
scores=model_selection.cross_val_score(module_LR,X, y_nor,cv=5)
# print ('coef is %s'%a)
# print ('intercept is %s'%b)
print ('score is %s'%c)
# print ('score1 is %s'%scores)
print ('*************************** PCA ***************************')
# data_nor.to_csv('data_nor.csv')
# print(data_nor.head(8))
df_PCA = data_nor.copy()
del  df_PCA['crimeRate']
# print(np.shape(df_PCA))
# print(df_PCA.head(9))

'''原始的PCA'''
'''n_components = 12
kpca = decomposition.KernelPCA(n_components=n_components,kernel='poly',fit_inverse_transform=True)
X_kpca = kpca.fit_transform(df_PCA)
X_kpca_back = kpca.inverse_transform(X_kpca)
Eigvector=kpca.alphas_
Eigvalue=kpca.lambdas_
print(Eigvector)
print(Eigvalue)'''

'''自行求PCA'''
x_all = sp.array(df_PCA)
# print(np.shape(x_all))
eig_pc1=eig_pc(x_all,10)
# print(eig_pc1)
eig_pc_new=Abs_eig_func(eig_pc1,8)
X_new=np.transpose(np.dot(eig_pc_new,np.transpose(x_all)))
# print('X_new is %s'%X_new)
# print(np.shape(X_new))
np.savetxt('eig_pc_crimeRate.csv',eig_pc_new,fmt='%4f',delimiter=',')
print ('*************************** y分层后的线性回归 - PCA **********************')
# y是分层的
module_LR = linear_model.LinearRegression()
predicted_y_nor = cross_val_predict(module_LR, X_new, y, cv=10)
# print(y)
module_LR.fit(X_new,y)
a=module_LR.coef_
b=module_LR.intercept_
c=module_LR.score(X_new,y)
scores=model_selection.cross_val_score(module_LR,X_new,y,cv=10)
print ('score for y is %s'%c)
print ('*************************** y不分层后的线性回归 - PCA **********************')
module_LR = linear_model.LinearRegression()
predicted_y_nor = cross_val_predict(module_LR, X_new, y_nor, cv=10)
# print(y)
module_LR.fit(X_new,y_nor)
a=module_LR.coef_
b=module_LR.intercept_
c=module_LR.score(X_new,y_nor)
scores=model_selection.cross_val_score(module_LR,X_new,y_nor,cv=10)
print ('score for y_nor is %s'%c)



