"""
鸢尾花分类
1.Iris-setosa 山鸢尾花
2.Iris-versicolor 变色鸢尾花
3.Iris-virginic 维吉尼亚鸢尾花
"""
import numpy as np
from sklearn import svm
from sklearn import model_selection
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import r2_score
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.tree import DecisionTreeClassifier


# step1.准备数据
def iris_type(s):
    """
    转换为数字便于计算
    """
    it = {b'Iris-setosa': 0, b'Iris-versicolor': 1, b'Iris-virginica': 2}
    return it[s]


# 获取数据集，并对第4列进行文本转换
dataset = np.loadtxt('iris_data.txt', dtype=float, delimiter=',', converters={4: iris_type})
# 分割数据集，总共只有5列，前四列是特征，赋值给x，第五列是标签，赋值给y
x, y = np.split(dataset, (4,), axis=1)
# 获取x集合的前两列数据
x = x[:, 0:2]
# 借助函数分割出训练数据和测试数据，random_state随机数种子，test_size测试数据占比
x_train, x_test, y_train, y_test = model_selection.train_test_split(x, y, random_state=1, test_size=0.2)


# step2.创建模型
def classifies():
    """
    svm.SVC是支持向量机分类器的一个实现，SVC是一种监督学习方法，用于分类和回归分析
    C: 定义错误时的惩罚程度，参数C是正则化强度的倒数, C越大惩罚越大，默认1
    kernel: 核函数类型，linear线性核，poly多项式核，rbf径向基函数核，sigmoid核
    decision_function_shape: ovr-one vs rest一对多-多类分类问题，ovo-one vs one一对一-单类分类问题
    :return:
    """
    model = svm.SVC(C=2, kernel='linear', decision_function_shape='ovr')
    """
    逻辑回归 (Logistic Regression)：分类问题，尤其是二分类问题，输出概率。
    C参数为正则化强度的倒数, C越大惩罚越大, 默认1
    max_iter参数以确保模型能够收敛到最优解
    """
    model = LogisticRegression(C=2, solver='liblinear', multi_class='ovr', penalty='l1')
    """
    决策树 (Decision Trees)：直观的分类和回归算法，易于理解和解释。
    """
    model = DecisionTreeClassifier(max_depth=3)
    """
    随机森林 (Random Forests)：由多个决策树组成的集成学习算法，用于分类和回归。
    """
    model = RandomForestClassifier(n_estimators=3)
    """
    朴素贝叶斯 (Naive Bayes)：基于贝叶斯定理的分类算法，适用于某些类型的分类问题
    """
    model = GaussianNB()
    """
    k最近邻 (k-Nearest Neighbors, k-NN)：基于距离的分类和回归算法。
    """
    model = KNeighborsClassifier(n_neighbors=5)
    """
    梯度提升机（Gradient Boosting Machine, GBM）：分类和回归问题
    """
    model = GradientBoostingClassifier(n_estimators=10)
    """
    神经网络（Neural Networks）：广泛的应用，包括图像识别、语音识别等复杂问题。
    """
    model = MLPClassifier(hidden_layer_sizes=(100,), max_iter=1000, random_state=1)
    return model


clf = classifies()


# step3.训练模型
def train(clf, x_train, y_train):
    # fit函数训练分类器clf，接收特征集合(二维数组)和标签集合(一维数组)
    # ravel 转换为一维数组
    clf.fit(x_train, y_train.ravel())


train(clf, x_train, y_train)


# step4.模型评估
def show_accuracy(a, b, tip):
    # 即判断预估值和正确值是否相等
    acc = a.ravel() == b.ravel()
    # acc是True/False集合，np.mean会转换为1/0后进行均值计算
    # 集合所有数相加/集合长度，未匹配为False即0被忽略，剩余的即为匹配数据，计算结果为匹配度
    print('%s Accuracy: %.3f' % (tip, np.mean(acc)))


def print_accuracy(clf, x_train, y_train, x_test, y_test):
    # 分别打印训练集和测试集的准确率  score(x_train,y_train):表示输出x_train,y_train在模型上的准确率
    print('training prediction:%.3f' % (clf.score(x_train, y_train)))
    # print(r2_score(y_test, clf.predict(x_test)))
    print('test prediction:%.3f' % (clf.score(x_test, y_test)))
    # 原始结果与预测结果进行对比   predict()表示对x_train样本进行预测，返回样本类别
    show_accuracy(clf.predict(x_train), y_train, 'training data')
    show_accuracy(clf.predict(x_test), y_test, 'testing data')
    # 计算决策函数的值，表示x到各分割平面的距离
    # print('decision_function:\n', clf.decision_function(x_train))


print_accuracy(clf, x_train, y_train, x_test, y_test)


# step.使用模型
def draw(clf, x):
    iris_feature = 'sepal length', 'sepal width', 'petal length', 'petal width'
    # 第0列的范围
    x1_min, x1_max = x[:, 0].min(), x[:, 0].max()
    # 第1列的范围
    x2_min, x2_max = x[:, 1].min(), x[:, 1].max()
    # 生成网格采样点
    x1, x2 = np.mgrid[x1_min:x1_max:200j, x2_min:x2_max:200j]
    # stack() 沿着新的轴加入一系列数组
    grid_test = np.stack((x1.flat, x2.flat), axis=1)
    # print('grid_test:\n', grid_test)
    # 输出样本到决策面的距离
    # z = clf.decision_function(grid_test)
    # print('the distance to decision plane:\n', z)
    # 预测分类值 得到[0,0,....,2,2]
    grid_hat = clf.predict(grid_test)
    # print('grid_hat:\n', grid_hat)
    # reshape 使grid_hat和x1形状一致
    grid_hat = grid_hat.reshape(x1.shape)
    # mpl.colors.ListedColormap 为数据可视化中的不同类别分配颜色
    cm_light = mpl.colors.ListedColormap(['#A0FFA0', '#FFA0A0', '#A0A0FF'])
    cm_dark = mpl.colors.ListedColormap(['g', 'b', 'r'])
    # plt.pcolormesh 生成二维数据的伪彩色图, x1x2代表xy轴坐标, grid_hat坐标集合, cmap颜色映射
    plt.pcolormesh(x1, x2, grid_hat, cmap=cm_light)
    # plt.scatter 生成训练散点图，np.squeeze指定点的颜色，edgecolors点的边缘颜色为黑色，s点的大小，cmap颜色映射
    plt.scatter(x_train[:, 0], x_train[:, 1], c=np.squeeze(y_train), edgecolors='k', s=50, cmap=cm_dark)
    # plt.scatter 生成测试散点图，facecolors点无填充颜色，zorder使点的层级在面和线的上面
    plt.scatter(x_test[:, 0], x_test[:, 1], s=120, edgecolors='k', facecolors='none', zorder=10)
    # 设置x y轴名称
    plt.xlabel(iris_feature[0], fontsize=20)
    plt.ylabel(iris_feature[1], fontsize=20)
    # 设置x y轴最大最小值
    plt.xlim(x1_min, x1_max)
    plt.ylim(x2_min, x2_max)
    # 设置图表名称
    plt.title('svm in iris data classification', fontsize=20)
    # 添加网格线
    plt.grid()
    plt.show()


draw(clf, x)