"""
支持向量机分类预测
linear,poly,rbf
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sklearn.model_selection as ms
import sklearn.svm as svm
import sklearn.metrics as sm

data = pd.read_csv('multiple2.txt',header=None,names=['x1','x2','y'])

print(data)

data.plot.scatter(x='x1',y='x2',c='y',cmap='brg')
# plt.show()

# 整理输入集和输出集
x = data.iloc[:,:-1]
y = data.iloc[:,-1]

train_x,test_x,train_y,test_y = ms.train_test_split(x,y,test_size=0.1,random_state=7)

params = [{'kernel':['linear'],'C':[1,10,100,1000]},
          {'kernel':['poly'],'C':[1],'degree':[2,3]},
          {'kernel':['rbf'],'C':[1,10,100],'gamma':[1,0.1,0.01]}]

# 网格搜索
model = ms.GridSearchCV(svm.SVC(),params,cv=5)
# 线性核函数
# model = svm.SVC(kernel='linear')
# 多项式核函数
# model = svm.SVC(kernel='poly',degree=2)
# 径向基核函数
# model = svm.SVC(kernel='rbf',gamma=0.1,C=1)

model.fit(train_x,train_y)
pred_test_y = model.predict(test_x)

# 打印分类报告
print(sm.classification_report(test_y,pred_test_y))

print(model.best_params_)
print(model.best_score_)
print(model.best_estimator_)
# 暴力绘制分类边界线
# 1. 将x1的最小值，x1的最大值，拆成200个点 np.linspace
# 2. 将x2的最小值，x2的最大值，拆成200个点
# 3. 组合x1和x2的所有的点4w个  for循环嵌套（二维数据）
# 4. 将4w个点带入模型进行预测，得到预测类别 model.predict
# 5. 根据预测类别设置不同颜色 scatter cmap='gray'

# x1_point = np.linspace(min(x['x1']),max(x['x1']),200)
# print(x1_point)
# x2_point = np.linspace(min(x['x2']),max(x['x2']),200)
# print(x2_point)
# x = []
# for i in x1_point:
#     for j in x2_point:
#         tu = (i,j)
#         list.append(tu)
# y = model.predict(list)
# plt.plot(x,y,cmap='gray')
# plt.show()

x1s = np.linspace(data['x1'].min(),data['x1'].max(),200)
x2s = np.linspace(data['x2'].min(),data['x2'].max(),200)

points = []
for x1 in x1s:
    for x2 in x2s:
       points.append([x1,x2])
points = pd.DataFrame(points,columns=['x1','x2'])
# print(points)
points_label = model.predict(points)
plt.scatter(points['x1'],points['x2'],c=points_label,cmap='gray')
plt.scatter(data['x1'],data['x2'],c=data['y'],cmap='brg')
# plt.show()