from matplotlib import pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import load_breast_cancer, load_iris
from sklearn.manifold import TSNE
from sklearn.metrics import fowlkes_mallows_score, calinski_harabasz_score
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.decomposition import PCA  # 生成能则
import pandas as pd
from sklearn.datasets import load_boston
from sklearn.linear_model import Lasso
from sklearn.svm import LinearSVR
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score, precision_score, recall_score, cohen_kappa_score
from sklearn.metrics import classification_report
from matplotlib import rcParams
from sklearn.metrics import roc_curve
from sklearn.linear_model import LinearRegression
from sklearn.metrics import explained_variance_score, mean_absolute_error, mean_squared_error, median_absolute_error, \
    r2_score
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import linear_model
from sklearn.linear_model import Ridge

"""
# 乳腺癌信息
cancer = load_breast_cancer()
print('breast_cancer数据集的长度为:', len(cancer))
print('breast_cancer数据集的类型为:', type(cancer))

cancer_data = cancer['data']
print('breast_cancer数据集的数据为:\n', cancer_data)

cancer_target = cancer['target']  # 取出数据集的标签
print('breast_cancer数据集的标签为:\n', cancer_target)

cancer_names = cancer['feature_names']  # 取出数据集的特征名
print('breast_cancer数据集的特征名为:\n', cancer_names)

cancer_desc = cancer['DESCR']  # 取出数据集的描述信息
print('breast_cancer数据集的描述信息为:\n', cancer_desc)
print('原始数据集数据的形状为:', cancer_data.shape)
print('原始数据集标签的形状为:', cancer_target.shape)
cancer_data_train, cancer_data_test, cancer_target_train, cancer_target_test = train_test_split(
    cancer_data,
    cancer_target,
    test_size=0.2,
    random_state=42)

print('训练集数据的形状为:', cancer_data_train.shape)
print('训练集标签的形状为:', cancer_target_train.shape)
print('测试集数据的形状为:', cancer_data_test.shape)
print('测试集标签的形状为:', cancer_target_test.shape)
Scaler = MinMaxScaler().fit(cancer_data_train)  # 生成够则
cancer_trainScaler = Scaler.transform(cancer_data_train)  # 将规则应用于训练集
cancer_testScaler = Scaler.transform(cancer_data_test)  # 将规则应用于测试集
print('离差标准化前训练集数据的最小值为:', np.min(cancer_data_train))
print('离差标准化后训练集数据的最小值为:', np.min(cancer_trainScaler))
print('离差标准化前训练集数据的最大值为:', np.max(cancer_data_train))
print('离差标准化后训练集数据的最大值为:', np.max(cancer_trainScaler))
print('离差标准化前测试集数据的最小值为:', np.min(cancer_data_test))
print('离差标准化后测试集数据的最小值为:', np.min(cancer_testScaler))
print('离差标准化前测试集数据的最大值为:', np.max(cancer_data_test))
print('离差标准化后测试集数据的最大值为:', np.max(cancer_testScaler))

pca_model = PCA(n_components=10).fit(cancer_trainScaler)  ##将规则应用于训练集
cancer_trainPca = pca_model.transform(cancer_trainScaler)  ##将规则应用于测试集
cancer_testPca = pca_model.transform(cancer_testScaler)
print('PCA降维前训练集数据的形状为:', cancer_trainScaler.shape)
print('PCA降维后训练集数据的形状为:', cancer_trainPca.shape)
print('PCA降维前测试集数据的形状为:', cancer_testScaler.shape)
print('PCA降维后测试集数据的形状为:', cancer_testPca.shape)

"""

"""
# 波士顿房价
boston = load_boston()
boston_data = boston['data']
boston_target = boston['target']
boston_names = boston['feature_names']
print('boston数据集数据的形状为:', boston_data.shape)
print('boston数据集标签的形状为:', boston_target.shape)
print('boston数据集特征名的形状为:', boston_names.shape)

# 使用train_test_split 划分boston数据集
boston_data_train, boston_data_test, boston_target_train, boston_target_test = \
    train_test_split(boston_data, boston_target, test_size=0.2, random_state=42)
print('训练集数据的形状为:', boston_data_train.shape)
print('训练集标签的形状为:', boston_target_train.shape)
print('测试集数据的形状为:', boston_data_test.shape)
print('测试集标签的形状为:', boston_target_test.shape)

stdScale = StandardScaler().fit(boston_data_train)
boston_trainScaler = stdScale.transform(boston_data_train)  # 将规则应用于训练集
boston_testScaler = stdScale.transform(boston_data_test)  # 将规则应用于测试集
print('标准差标准化后训练集数据的方差为:', np.var(boston_trainScaler))
print('标准差标准化后训练集数据的均值为:', np.mean(boston_trainScaler))
print('标准差标准化后测试集数据的方差为:', np.var(boston_testScaler))
print('标准差标准化后测试集数据的均值为:', np.mean(boston_testScaler))

pca = PCA(n_components=5).fit(boston_trainScaler)  # 将规则应用于训练集
boston_trainPca = pca.transform(boston_trainScaler)  # 将规则应用于测试集
boston_testPca = pca.transform(boston_testScaler)
print('降维后boston数据集数据测试集的形状为:', boston_trainPca.shape)
print('降维后boston数据集数据训练集的形状为:', boston_testPca.shape)
"""

"""
# 聚类模型
iris = load_iris()
iris_data = iris['data']
iris_target = iris['target']
iris_names = iris['feature_names']
scale = MinMaxScaler().fit(iris_data)
iris_dataScale = scale.transform(iris_data)
kmeans = KMeans(n_clusters=3, random_state=123).fit(iris_dataScale)
# print('构建的K-Means模型为:\n', kmeans)
# ['sepal length (cm)', 'sepal width (cm)', 'petal length (cm)', 'petal width (cm)']
result = kmeans.predict([[1.5, 1.5, 1.5, 1.5]])
# print("花瓣花萼长度宽度全为1.5的鸢尾花预测类别为:", result[0])
tsne = TSNE(n_components=2, init='random', random_state=177).fit(iris_data)
df = pd.DataFrame(tsne.embedding_)
# print(df)
df['labels'] = kmeans.labels_
# print(df['labels'])
df1 = df[df['labels'] == 0]
df2 = df[df['labels'] == 1]
df3 = df[df['labels'] == 2]
fig = plt.figure(figsize=(9, 6))
plt.plot(df1[0], df1[1], 'bo', df2[0], df2[1], 'r*', df3[0], df3[1], 'gD')
# plt.savefig('聚类.png')
plt.show()

for i in range(2,7):
    kmeans=KMeans(n_clusters=i,random_state=123).fit(iris_data)
    score=fowlkes_mallows_score(iris_target,kmeans.labels_)
    print('iris数据聚%d类FMI评价分值为%f'%(i,score))
silhouetteScore=[]
for i in range(2,15):
    kmeans=KMeans(n_clusters=i,random_state=123).fit(iris_data)
    score=fowlkes_mallows_score(iris_target,kmeans.labels_)
    silhouetteScore.append(score)
plt.figure(figsize=(10,6))
plt.plot(range(2,15),silhouetteScore,linewidth=1.5,linestyle='-')
plt.show()

for i in range(2,7):
    kmeans=KMeans(n_clusters=i,random_state=123).fit(iris_data)
    score=calinski_harabasz_score(iris_data,kmeans.labels_)
    print((i,score))
    
    
    
seeds = pd.read_csv("../data6/data 任务数据/seeds_dataset.txt", sep='\t')
print("数据集形状为:", seeds.shape)
# 处理数据
seeds_data = seeds.iloc[:, :7].values  # 数据
seeds_target = seeds.iloc[:, 7].values  # 标签
seeds_names = seeds.columns[:7].values  # 特征
stdScale = StandardScaler().fit(seeds_data)
seeds_dataScale = stdScale.transform(seeds_data)
kmeans = KMeans(n_clusters=3, random_state=42).fit(seeds_data)
print("构建de KM-eans模型为:\n", kmeans)

for i in range(2, 7):
    kmeans = KMeans(n_clusters=i, random_state=123).fit(seeds_data)
    score = calinski_harabasz_score(seeds_data, kmeans.labels_)
    print((i, score))
"""

"""
# 分类
cancer = load_breast_cancer()
cancer_data = cancer['data']
cancer_target = cancer['target']
cancer_names = cancer['feature_names']

cancer_data_train, cancer_data_test, cancer_target_train, cancer_target_test = \
    train_test_split(cancer_data, cancer_target, test_size=0.2, random_state=22)
# 标准差标准化
stdScaler = StandardScaler().fit(cancer_data_train)
cancer_trainStd = stdScaler.transform(cancer_data_train)
cancer_testStd = stdScaler.transform(cancer_data_test)

svm = SVC().fit(cancer_trainStd, cancer_target_train)
print('建立的SVM模型为：\n', svm)
cancer_target_pred = svm.predict(cancer_testStd)
print('预测前20个结果为：\n', cancer_target_pred[:20])
print(cancer_target_test)

trueResult = np.sum(cancer_target_pred == cancer_target_test)
print('预测对的结果数目为：', trueResult)
print('预测错的结果数目为：', cancer_target_test.shape[0] - trueResult)
print('预测结果准确率为：', trueResult / cancer_target_test.shape[0])

print('使用SVM预测breast_cancer数据的准确率为：',
      accuracy_score(cancer_target_test, cancer_target_pred))
print('使用SVM预测breast_cancer数据的精确率为：',
      precision_score(cancer_target_test, cancer_target_pred))
print('使用SVM预测breast_cancer数据的召回率为：',
      recall_score(cancer_target_test, cancer_target_pred))
print('使用SVM预测breast_cancer数据的Cohens Kappa系数为：',
      cohen_kappa_score(cancer_target_test, cancer_target_pred))
print('使用SVM预测breast_cancer数据的分类报告为：', '\n',
      classification_report(cancer_target_test, cancer_target_pred))

fpr, tpr, thresholds = roc_curve(cancer_target_test, cancer_target_pred)
plt.figure(figsize=(10, 6))
plt.xlim(0, 1)
plt.ylim(0.0, 1.1)
plt.xlabel('False Postive Rate')
plt.ylabel('True Postive Rate')
plt.plot(fpr, tpr, linewidth=2, linestyle='-', color='red')
plt.show()

abalone = pd.read_csv('../data6/data 任务数据/abalone.data', sep=',')
abalone_data = abalone.iloc[:, :8]
abalone_target = abalone.iloc[:, 8]
sex = pd.get_dummies(abalone_data['sex'])
abalone_data = pd.concat([abalone_data, sex], axis=1)
abalone_data.drop('sex', axis=1, inplace=True)
abalone_train, abalone_test, abalone_target_train, abalone_target_test = \
    train_test_split(abalone_data, abalone_target,
                     test_size=0.2, random_state=42)
stdScaler = StandardScaler().fit(abalone_train)
abalone_std_train = stdScaler.transform(abalone_train)
abalone_std_test = stdScaler.transform(abalone_test)
svm_abalone = SVC().fit(abalone_std_train, abalone_target_train)
print('建立的SVM模型为：', '\n', svm_abalone)
abalone_target_pred = svm_abalone.predict(abalone_std_test)
print('abalone数据集的SVM分类报告为：\n', classification_report(abalone_target_pred, abalone_target_pred))
"""

"""
# 构建并评价回归
boston = load_boston()
x = boston['data']
y = boston['target']
names = boston['feature_names']
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=125)
# 线性回归
clf = LinearRegression().fit(x_train, y_train)
print('建立的LinearRegression模型为：\n', clf)

y_pred = clf.predict(x_test)
print('预测前20个结果为：\n', y_pred[:20])

rcParams['font.sans-serif'] = 'SiHei'
fig = plt.figure(figsize=(10, 6))
plt.plot(range(y_test.shape[0]), y_test, color='blue', linewidth=1.5, linestyle='-')
plt.plot(range(y_test.shape[0]), y_pred, color='red', linewidth=1.5, linestyle='-.')
plt.xlim((0, 102))
plt.ylim((0, 55))
plt.legend(['真实值', '预测值'])
# plt.savefig('../temp/聚回归类结果.png')
# plt.show()

print('Boston数据线性回归模型的平均绝对误差为：', mean_absolute_error(y_test, y_pred))
print('Boston数据线性回归模型的均方误差为：', mean_squared_error(y_test, y_pred))
print('Boston数据线性回归模型的中值绝对误差为：', median_absolute_error(y_test, y_pred))
print('Boston数据线性回归模型的可解释方差值为：', explained_variance_score(y_test, y_pred))
print('Boston数据线性回归模型的R方值为：', r2_score(y_test, y_pred))

house = pd.read_csv('../data6/data 任务数据/cal_housing.input', sep=',')
house_data = house.iloc[:, :-1]
house_target = house.iloc[:, -1]
house_names = ['longitude', 'latitude',
               'houseingMedianAge', 'totalRooms',
               'totalBedrooms', 'population',
               'households', 'medianIncome']
house_train, house_test, house_target_train, house_target_test = \
    train_test_split(house_data, house_target, test_size=0.2, random_state=42)
GBR_house = GradientBoostingRegressor().fit(house_train, house_target_train)
print('建立的梯度提升回归模型为：\n', GBR_house)

house_target_pred = GBR_house.predict(house_test)
print('Boston数据线性回归模型的平均绝对误差为：', mean_absolute_error(house_target_test, house_target_pred))
print('Boston数据线性回归模型的均方误差为：', mean_squared_error(house_target_test, house_target_pred))
print('Boston数据线性回归模型的中值绝对误差为：', median_absolute_error(house_target_test, house_target_pred))
print('Boston数据线性回归模型的可解释方差值为：', explained_variance_score(house_target_test, house_target_pred))
print('Boston数据线性回归模型的R方值为：', r2_score(house_target_test, house_target_pred))

x = np.array([[1, 56], [2, 104], [3, 156], [4, 200], [5, 250], [6, 300]])
y = np.array([7800, 9000, 9200, 10000, 11000, 12000])
clf = linear_model.LinearRegression()
clf.fit(x, y)
k = clf.coef_
b = clf.intercept_
x0 = np.array([[7, 170]])
y0 = clf.predict(x0)
print('回归系数：', k)
print('截距：', b)
print('预测值：', y0)

x = np.array([[1, 56], [2, 104], [3, 156], [4, 200], [5, 250], [6, 300]])
y = np.array([7800, 9000, 9200, 10000, 11000, 12000])
clf = Ridge(alpha=1.0)
clf.fit(x, y)
k = clf.coef_
b = clf.intercept_
x0 = np.array([[7, 170]])
y0 = clf.predict(x0)
print('回归系数：', k)
print('截距：', b)
print('预测值：', y0)
"""
"""
#第七章航空航天数据处理及聚类
airline_data = pd.read_csv('../data7/input 任务程序/air_data.csv', encoding='gb18030')  # 导入航空数据
print('原始数据的形状为:', airline_data.shape)  # 去除票价为空的记录
exp1 = airline_data['SUM_YR_1'].notnull()
exp2 = airline_data['SUM_YR_2'].notnull()
exp = exp1 & exp2
airline_notnull = airline_data.loc[exp, :]
print('删除缺失记录后数据的形状为:', airline_notnull.shape)
# 丢弃票价为0，或者平均折扣率为0，或总飞行公里为0的记录
index1 = airline_notnull['SUM_YR_1'] != 0
index2 = airline_notnull['SUM_YR_2'] != 0
index3 = (airline_notnull['SEG_KM_SUM'] > 0) & (airline_notnull['avg_discount'] != 0)
airline = airline_notnull[(index1 | index2) & index3]
print('删除异常记录后数据的形状为:', airline.shape)
# 选取需求特征
airline_selection = airline[['FFP_DATE', 'LOAD_TIME', 'FLIGHT_COUNT', 'LAST_TO_END', 'avg_discount', 'SEG_KM_SUM']]
# 构建I特征
L = pd.to_datetime(airline_selection['LOAD_TIME']) - pd.to_datetime(airline_selection['FFP_DATE'])
L = L.astype('str').str.split().str[0]
L = L.astype('int') / 30  # 合并特征
airline_features = pd.concat([L, airline_selection.iloc[:, 2:]], axis=1)
print('构建的LRFMC特征前5行为:\n', airline_features.head())

input = StandardScaler().fit_transform(airline_features)
np.savez('../temp/airline_scale.npz', input)
print('标准化后LRFMC五个特征为:\n', input[:5, :])

airline_scale = np.load('../temp/airline_scale.npz')['arr_0']
k = 5  # 确定聚类中心款#构建模型
kmeans_model = KMeans(n_clusters=k, n_jobs=4, random_state=123)
fit_kmeans = kmeans_model.fit(airline_scale)  # 模型训练
print(kmeans_model.cluster_centers_)  # 查看聚类中心
print(kmeans_model.labels_)  # 查看样本的类别标签
# 统计不同类别学本的数目
r1 = pd.Series(kmeans_model.labels_).value_counts()
print('最终每个类别的数目为:\n', r1)

"""

"""

#第八章
inputfile = '../data8/input 任务程序/input.csv'
input = pd.read_csv(inputfile)

print('相关系数矩阵为:', np.round(input.corr(method='pearson'), 2))

lasso = Lasso(1000)
lasso.fit(input.iloc[:, 0:13], input['y'])
print('相关系数为：', np.round(lasso.coef_, 5))

# 计算相关系数非零的个数
print('相关系数的个数为：', np.sum(lasso.coef_ != 0))

mask = lasso.coef_ != 0
print('非零个数为：', mask)

outputfile = '../temp/new_reg_data.csv'
mask = np.append(mask, True)
new_reg_data = input.iloc[:, mask]
new_reg_data.to_csv(outputfile)
print('输出数据的维度为：', new_reg_data.shape)

'''灰色预测函数'''


def GM11(x0):  # 自定义灰色预测函数
    import numpy as np
    x1 = x0.cumsum()  # 生成累加序列
    z1 = (x1[:len(x1) - 1] + x1[1:]) / 2.0  # 生成紧邻均值（MEAN）序列，比直接使用累加序列好，共 n-1 个值
    z1 = z1.reshape((len(z1), 1))
    B = np.append(-z1, np.ones_like(z1), axis=1)  # 生成 B 矩阵
    Y = x0[1:].reshape((len(x0) - 1, 1))  # Y 矩阵
    [[a], [u]] = np.dot(np.dot(np.linalg.inv(np.dot(B.T, B)), B.T), Y)  # 计算参数
    f = lambda k: (x0[0] - u / a) * np.exp(-a * (k - 1)) - (x0[0] - u / a) * np.exp(-a * (k - 2))  # 还原值
    delta = np.abs(x0 - np.array([f(i) for i in range(1, len(x0) + 1)]))  # 计算残差
    C = delta.std() / x0.std()
    P = 1.0 * (np.abs(delta - delta.mean()) < 0.6745 * x0.std()).sum() / len(x0)
    return f, a, u, x0[0], C, P  # 返回灰色预测函数、a、b、首项、方差比、小残差概率


inputfile = '../temp//new_reg_data.csv'
inputfile1 = '../data8/input 任务程序/input.csv'
new_reg_data = pd.read_csv(inputfile)
input = pd.read_csv(inputfile1)
new_reg_data.index = range(1994, 2014)
new_reg_data.loc[2014] = None
new_reg_data.loc[2015] = None
l = ['x1', 'x3', 'x4', 'x5', 'x6', 'x7', 'x8', 'x13']
for i in l:
    f = GM11(new_reg_data.loc[range(1994, 2014), i].values)[0]
    new_reg_data.loc[2014, i] = f(len(new_reg_data) - 1)
    new_reg_data.loc[2015, i] = f(len(new_reg_data))
    new_reg_data[i] = new_reg_data[i].round(2)
outputfile = '../temp/new_reg_data_GM11.xls'
y = list(input['y'].values)
y.extend([np.nan, np.nan])
new_reg_data['y'] = y
new_reg_data.to_excel(outputfile)
print('预测结果为：', new_reg_data.loc[2014:2015:1])


inputfile = '../temp/new_reg_data_GM11.xls'  # 灰色预测后保存的路径
input = pd.read_excel(inputfile)  # 读取数据
feature = ['x1', 'x3', 'x4', 'x5', 'x6', 'x7', 'x8', 'x13']
data_train = input.loc[range(1994, 2014)].copy()  # 取2014年前的数据建模
data_mean = data_train.mean()
data_std = data_train.std()
data_train = (data_train - data_mean) / data_std  # 数据标准化
x_train = data_train[feature].as_matrix()  # 特征数据
y_train = data_train['y'].as_matrix()  # 标签数据
linearsvr = LinearSVR()  # 
linearsvr.fit(x_train, y_train)
x = ((input[feature] - data_mean[feature]) / \
     data_std[feature]).as_matrix()  # 预测，并还原结果。
input[u'y_pred'] = linearsvr.predict(x) * \
                  data_std['y'] + data_mean['y']
outputfile = '../temp/new_reg_data_GM11_revenue.xls'
input.to_excel(outputfile)
print('真实值与预测值分别为:', input[['y', 'y pred']])
"""
"""
inputfile = '../temp/new_reg_data_GM11.xls'  # 灰色预测后保存的路径
data = pd.read_excel(inputfile)  # 读取数据
p = plt.figure(figsize=(7, 7))
ax1 = p.add_subplot(2, 1, 1)
plt.plot(range(0, 22), data['y'], color='r', linestyle='-', marker='o')
plt.xticks(range(0, 23, 2), data.index[::2])
plt.legend('y', loc='upper left')
ax2 = p.add_subplot(2, 1, 2)
plt.plot(data.index[::], data['y_pred'], color='r', linestyle='-', marker='*')
plt.xticks(data.index[::2])
plt.legend(('y_pred',))  # 后面加个逗号，可以把文字显示全
plt.show()
"""
