# softmax 回归

import numpy as np
import matplotlib.pyplot as plt

from sklearn.metrics import confusion_matrix
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve,auc,classification_report
from sklearn import metrics

plt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签
plt.rcParams['axes.unicode_minus']=False #用来正常显示负

np.random.seed(2022)  #设置固定随机种子

#######训练数据 ################################################
x1 = np.random.normal([1, 1], [1, 1], [300, 2])
x2 = np.random.normal([5, 4], [1, 1], [300, 2])
x3 = np.random.normal([8, 0], [1, 1], [300, 2])
x_train = np.vstack((x1, x2, x3))  #堆叠三种样本
y_train = np.array([0.] * len(x1) + [1.] * len(x2) + [2.] * len(x3)) # 标签

#######测试数据 #################################################
xx1 = np.random.normal([1, 1], [1, 1], [100, 2])
xx2 = np.random.normal([5, 4], [1, 1], [100, 2])
xx3 = np.random.normal([8, 0], [1, 1], [100, 2])
x_test = np.vstack((xx1, xx2, xx3))
print(x_test.shape)


y_test = np.array([0.] * len(xx1) + [1.] * len(xx2) + [2.] * len(xx3)) # 标签
np.savez('data', x_train=x_train,
         y_train=y_train, x_test=x_test, y_test=y_test)  #可以保存起来，科不需要

#绘制数据的分布plt.figure()  #绘制数据的分布 ############################
plt.figure()
plt.scatter(x1[:, 0], x1[:, 1], label='0', c='r', marker='x')
plt.scatter(x2[:, 0], x2[:, 1], label='1', c='g', marker='1')
plt.scatter(x3[:, 0], x3[:, 1], label='2', c='b', marker='o')
plt.legend(loc='upper right')

## 建立模型 ############################################################
model = LogisticRegression().fit(x_train, y_train)
y_pred = model.predict(x_test)

print('coef:', model.coef_)
print('intercept:', model.intercept_)
print('error:', np.sum(y_test != y_pred)/ len(y_test)) # 检查是否存在差异

###################### 自定义softmax与模型计算预测 ###########################
# def softmax(x):  # 此步也可以不用
#     # e_x = np.exp(x - np.max(x)) # 防止exp()数值溢出
#     # return e_x / e_x.sum(axis=0)
#     e_x = np.exp(x)
#     return e_x / e_x.sum()
#
# def model(x):
#     return softmax(clf.coef_ @ x + clf.intercept_)
#
# y_pred = [np.argmax(model(x_test[i,:])) for i in range(len(x_test))] ## 方法1：
y_pred = model.predict(x_test)    ## 方法2：

print(np.sum(y_pred != y_test) / len(y_test)) # 检查是否存在差异
print(model.score(x_test, y_test))

cm = confusion_matrix(y_test, y_pred)  # 获得混淆矩阵

print('混淆矩阵：', cm)

report = classification_report(y_test, y_pred)##获得分类报告
print('输出分类报告：', report)


from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
print('accuracy:', accuracy_score(y_test, y_pred))
print('precision:', precision_score(y_test, y_pred, average='macro'))
print('recall:', recall_score(y_test, y_pred, average='macro'))

## 画出分类超平面 #####################################################################
plt.figure()
xmin, ymin = x_test.min(axis=0) - 0.5
xmax, ymax = x_test.max(axis=0) + 0.5

xx, yy = np.meshgrid(np.arange(xmin, xmax, 0.05),
                     np.arange(ymin, ymax, 0.05))  # (193, 256)

# print('xx', xx.shape, xx)
# print('yy', yy.shape, yy)

# dd = np.meshgrid(np.arange(xmin, xmax, 0.05),
#                      np.arange(ymin, ymax, 0.05))  # (193, 256)
# print('dd',  dd)
# input('暂停')



xxx = np.hstack((xx.reshape(-1, 1), yy.reshape(-1, 1)))  ##(49408, 2)
zz = model.predict(xxx)       # (49408, )  用模型预测每个坐标点
zz = zz.reshape(xx.shape)   # (193, 256)
plt.contourf(xx, yy, zz)    # 画出分类平面
plt.scatter(xx1[:, 0], xx1[:, 1], label='0', c='r', marker='x')  # 画出散点
plt.scatter(xx2[:, 0], xx2[:, 1], label='1', c='w', marker='1')
plt.scatter(xx3[:, 0], xx3[:, 1], label='2', c='g', marker='o')
plt.legend(loc='upper right')

plt.show()