import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, LabelEncoder
from matplotlib.colors import ListedColormap
from sklearn.svm import SVC
from mpl_toolkits.mplot3d import Axes3D  # 引入3D绘图工具

# 读取数据
iris = pd.read_csv('/home/jetson/hhq/PythonProject/svm2/zc_force_data.csv', header=None)
iris.columns = ['X_force', 'Y_force', 'Z_force', 'species']
X = iris.iloc[:, [0, 1, 2]].values  # 获取第 0、1、2 列数据
y = iris.iloc[:, -1].values  # 获取最后一列作为类别标签

# 标签编码 species划分为123
label_encoder = LabelEncoder()
y = label_encoder.fit_transform(y)

# 划分数据集 将数据分为训练集（70%）和测试集（30%）
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1, stratify=y)

# 标准化
sc = StandardScaler()
sc.fit(X_train)
X_train_std = sc.transform(X_train)
X_test_std = sc.transform(X_test)

# 训练SVM模型
svm = SVC(kernel='linear', C=1.0, random_state=1)
svm.fit(X_train_std, y_train)

# 可视化3D决策边界的函数
def plot_decision_regions_3d(X, y, classifier, test_idx=None, resolution=0.02):
    # 创建一个3D图
    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')

    # 设置标记生成器和颜色图
    markers = ('s', '^', 'o', 'x', 'v')  # 标记生成器
    colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')  # 定义颜色图
    cmap = ListedColormap(colors[:len(np.unique(y))])

    # 创建网格以绘制决策边界
    x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1  # x轴范围
    x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1  # y轴范围
    x3_min, x3_max = X[:, 2].min() - 1, X[:, 2].max() + 1  # z轴范围
    xx1, xx2, xx3 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
                                np.arange(x2_min, x2_max, resolution),
                                np.arange(x3_min, x3_max, resolution))
    Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel(), xx3.ravel()]).T)
    Z = Z.reshape(xx1.shape)  # 对不同分类进行标记

    # # 绘制决策曲面
    # ax.scatter(xx1, xx2, xx3, c=Z, alpha=0.3, cmap=cmap)

    # 绘制所有样本的散点图
    for idx, cl in enumerate(np.unique(y)):
        ax.scatter(X[y == cl, 0], X[y == cl, 1], X[y == cl, 2],
                   c=colors[idx], marker=markers[idx], label=cl, edgecolor='black')

    # # 绘制测试集样本
    # if test_idx:
    #     X_test, y_test = X[test_idx, :], y[test_idx]
    #     ax.scatter(X_test[:, 0], X_test[:, 1], X_test[:, 2],
    #                c='yellow', edgecolor='black', marker='*', s=150, label='test set')

    ax.set_xlabel('X Force')
    ax.set_ylabel('Y Force')
    ax.set_zlabel('Z Force')
    ax.legend(loc='upper left')

    plt.tight_layout()
    plt.show()


# 绘制3D决策边界
X_combined_std = np.vstack((X_train_std, X_test_std))  # 竖直堆叠
y_combined = np.hstack((y_train, y_test))  # 水平拼接
plot_decision_regions_3d(X_combined_std, y_combined, classifier=svm, test_idx=range(105, 150))

# # 使用测试集进行预测
y_pred = svm.predict(X_test_std)
print('Misclassified samples: %d' % (y_test != y_pred).sum())  # 输出错误分类的样本数
print('Accuracy: %.2f' % svm.score(X_test_std, y_test))  # 输出分类准确率
