#coding=utf-8
import mglearn
import numpy as np
import matplotlib as plt

# 加载数据
from sklearn.datasets import load_breast_cancer
cancer =  load_breast_cancer()
print("cancer.keys(): \n{}".format(cancer.keys()))
print("Shape of cancer data: {}".format(cancer.data.shape))
print("Sample counts per class: \n{}".format(
    {n: v for n, v in zip(cancer.target_names, np.bincount(cancer.target))}
))
# feature_names: 每个特征的语义说明
print("Feature names: \n{}".format(cancer.feature_names))

# 回归数据集
from sklearn.datasets import load_boston
boston = load_boston()
print("Data shape: {}".format(boston.data.shape))

# 扩展的数据集
X, y = mglearn.datasets.load_extended_boston()
print("X shape: {}".format(X.shape))
# print(y)

# 不同机器学习算法的性质
# knn forge数据集
mglearn.plots.plot_knn_classification(n_neighbors=1)
mglearn.plots.plot_knn_classification(n_neighbors=3)

# scikit-learn中的knn
# 1)分为训练集、测试集
from sklearn.model_selection import train_test_split
X, y = mglearn.datasets.make_forge()
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# 2)导入类 并将其实例化。可以设定参数
from sklearn.neighbors import KNeighborsClassifier
clf = KNeighborsClassifier(n_neighbors=3)
# 3)利用训练集对分类器进行拟合。 对于KNeighborsClassifier而言，就是保存数据集，以便在预测时计算与邻居之间的距离
clf.fit(X_train, y_train)
# 4)用predict方法对测试数据进行预测。对测试集中的每个点都要计算它在训练集的最近邻。并找出其中出现次数最多的类别
print("Test set prediction: {}".format(clf.predict(X_test)))
# 5)评估模型的泛华能力好坏。对测试数据和测试标签调用score方法
print("Test set accuracy: {:.2f}".format(clf.score(X_test, y_test)))
# 0.86表示：在测试数据集中，模型对其中865的样本预测的类别都是正确的

# 2 分析KNeighborsClassifier
# 决策边界
# fig, axes = plt.subplots(1, 3, figsize=(10, 3))

from sklearn.datasets import load_breast_cancer
cancer = load_breast_cancer()
X_train, X_test, y_train, y_test = train_test_split(
    cancer.data, cancer.target, stratify=cancer.target, random_state=66
)
training_accuracy = []
test_accuracy = []
#n_neighbors取值从1到10
neighbors_settings = range(1, 11)
for n_neighors in neighbors_settings:
    # 构建模型
    clf = KNeighborsClassifier(n_neighbors=n_neighors)
    clf.fit(X_train, y_train)
    # 记录训练集精度
    training_accuracy.append(clf.score(X_train, y_train))
    # 记录泛化精度
    test_accuracy.append(clf.score(X_test, y_test))
print(training_accuracy)
print(test_accuracy)
# plt.plot(neighbors_settings, training_accuracy, label="training accuracy")
# plt.plot(neighbors_ j jsettings, test_accuracy, label="test accuracy")

# k近邻回归
from sklearn.neighbors import KNeighborsRegressor
X, y = mglearn.datasets.make_wave(n_samples=40)
# 将wave数据集分为训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# 将模型实例化，并将邻居个数设为3
reg = KNeighborsRegressor(n_neighbors=3)
# 利用训练数据和训练目标值来拟合模型
reg.fit(X_train, y_train)
# 对测试数据进行预测
print("Test set predictions: \n{}".format(reg.predict(X_test)))
# score 评估
print("Test set R^2: {:.2f}".format(reg.score(X_test, y_test)))
# 结果是0.83，表示模型的拟合 相对较好。