from matplotlib import pyplot as plt
import pandas as pd

# 读取数据
data = pd.read_csv("two_class_data.csv", header=0)

# 读取数据列
x = data['x']
y = data['y']
c = data['class']

# 绘制散点图，c 参数用于分类着色
plt.scatter(x, y, c=c)
plt.show()


from sklearn.model_selection import train_test_split
from sklearn.linear_model import Perceptron

# 定义特征变量和目标变量,
feature = data[['x', 'y']].values
target = data['class'].values

# 对数据集进行切分，70% 为训练集，30% 为测试集。
X_train, X_test, y_train, y_test = train_test_split(
    feature, target, test_size=0.3, random_state=50)
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)


# 构建模型
model = Perceptron(max_iter=1000, tol=1e-3)
# 训练模型
model.fit(X_train, y_train)
# 预测
results = model.predict(X_test)
print(results)

plt.figure(figsize=(9, 8))
# 以默认样式绘制训练数据
plt.scatter(X_train[:, 0], X_train[:, 1], alpha=0.3)
# 以方块样式绘制测试数据
plt.scatter(X_test[:, 0], X_test[:, 1], marker=',', c=y_test)
# 将预测结果用标签样式标注在测试数据左上方
for i, txt in enumerate(results):
    plt.annotate(txt, (X_test[:, 0][i], X_test[:, 1][i]))
plt.show()
model.score(X_test, y_test)#导出分类评估数据









#支持向量机
from sklearn.svm import SVC

# 构建模型
model = SVC(gamma='scale')
# 训练模型
model.fit(X_train, y_train)
# 预测
results = model.predict(X_test)

plt.figure(figsize=(9, 8))
# 以默认样式绘制训练数据
plt.scatter(X_train[:, 0], X_train[:, 1], alpha=0.3)
# 以方块样式绘制测试数据
plt.scatter(X_test[:, 0], X_test[:, 1], marker=',', c=y_test)
# 将预测结果用标签样式标注在测试数据左上方
for i, txt in enumerate(results):
    plt.annotate(txt, (X_test[:, 0][i], X_test[:, 1][i]))



#引入核函数来解决非线性分类的问题
# 导入数据
data = pd.read_csv('zoo.csv', header=0)
# 定义特征变量和目标变量
feature = data.iloc[:, 1:17].values
target = data['type'].values
# 对数据集进行切分，70% 为训练集，30% 为测试集。
X_train, X_test, y_train, y_test = train_test_split(
    feature, target, test_size=0.3, random_state=50)
# 构建模型
model = SVC(gamma='scale')
# 训练模型
model.fit(X_train, y_train)
# 预测
results = model.predict(X_test)
model.score(X_test, y_test)

#PCA 降维，它的作用是缩减特征的数量，从而更方便可视化呈现。

from sklearn.decomposition import PCA

# 使用 PCA 降维至 2
pca = PCA(n_components=2)
feature_pca = pca.fit_transform(feature)

# 对数据集进行切分，70% 为训练集，30% 为测试集。
X_train, X_test, y_train, y_test = train_test_split(
    feature_pca, target, test_size=0.3, random_state=50)

# 构建模型
model = SVC(gamma='scale')
# 训练模型
model.fit(X_train, y_train)
# 预测
results = model.predict(X_test)
model.score(X_test, y_test)

#当特征降为 2 维时，我们就可以通过平面图画出来了。
# 以默认样式绘制训练数据
plt.scatter(X_train[:, 0], X_train[:, 1], alpha=0.3)
# 以方块样式绘制测试数据
plt.scatter(X_test[:, 0], X_test[:, 1], marker=',', c=y_test)
# 将预测结果用标签样式标注在测试数据左上方
for i, txt in enumerate(results):
    plt.annotate(txt, (X_test[:, 0][i], X_test[:, 1][i]))



#K-近邻法（kNN）
from sklearn.neighbors import KNeighborsClassifier

# 导入数据
data = pd.read_csv('three_class_data.csv', header=0)
# 定义特征变量和目标变量
feature = data[['x', 'y']].values
target = data['class'].values
# 对数据集进行切分，70% 为训练集，30% 为测试集。
X_train, X_test, y_train, y_test = train_test_split(
    feature, target, test_size=0.3, random_state=50)

# 构建模型
model = KNeighborsClassifier()
# 训练模型
model.fit(X_train, y_train)
# 预测
results = model.predict(X_test)
model.score(X_test, y_test)

import numpy as np
from matplotlib.colors import ListedColormap

# 绘制决策边界等高线图
cm0 = plt.cm.Oranges
cm1 = plt.cm.Greens
cm2 = plt.cm.Reds
cm_color = ListedColormap(['red', 'yellow'])

x_min, x_max = data['x'].min() - .5, data['x'].max() + .5
y_min, y_max = data['y'].min() - .5, data['y'].max() + .5

xx, yy = np.meshgrid(np.arange(x_min, x_max, .1),
                     np.arange(y_min, y_max, .1))

Z0 = model.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 0]
Z1 = model.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
Z2 = model.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 2]

Z0 = Z0.reshape(xx.shape)
Z1 = Z1.reshape(xx.shape)
Z2 = Z2.reshape(xx.shape)

plt.contourf(xx, yy, Z0, cmap=cm0, alpha=.9)
plt.contourf(xx, yy, Z1, cmap=cm1, alpha=.5)
plt.contourf(xx, yy, Z2, cmap=cm2, alpha=.4)

# 绘制训练集和测试集
plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_color)
plt.scatter(X_test[:, 0], X_test[:, 1], c=y_test,
            cmap=cm_color, edgecolors='black')


#决策树和随机森林
from sklearn import datasets

# 载入数据集
iris = datasets.load_iris()
from sklearn.tree import DecisionTreeClassifier

# 建立模型
model = DecisionTreeClassifier()
# 模型训练
model.fit(iris.data, iris.target)
# 安装 graphviz 包
from sklearn.tree import export_graphviz
import graphviz

img = export_graphviz(
    model, out_file=None,
    feature_names=iris.feature_names,  # 传入特征名称
    class_names=iris.target_names,  # 传入类别值
    filled=True, node_ids=True,
    rounded=True)

graphviz.Source(img)  # 展示决策树


from sklearn.ensemble import RandomForestClassifier

X_train = iris.data[:120]
X_test = iris.data[120:]

y_train = iris.target[:120]
y_test = iris.target[120:]

# 建立模型
model_tree = DecisionTreeClassifier(random_state=10)
model_random = RandomForestClassifier(random_state=10, n_estimators=10)

# 训练模型并验证
model_tree.fit(X_train, y_train)
s1 = model_tree.score(X_test, y_test)

model_random.fit(X_train, y_train)
s2 = model_random.score(X_test, y_test)

print('DecisionTree:', s1)
print('RandomForest:', s2)

#人工神经网络
from sklearn.neural_network import MLPClassifier

model = MLPClassifier(max_iter=1000)
model.fit(X_train, y_train)
model.score(X_test, y_test)

#http://playground.tensorflow.org/    Google 提供的简单神经网络结构的网站