﻿import numpy as np
import os

import matplotlib
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
import warnings
warnings.filterwarnings('ignore')
np.random.seed(42)

from sklearn import datasets
iris = datasets.load_iris() # 加载鸢尾花数据集
print(list(iris.keys()))
# print(iris.DESCR)

X = iris['data'][:,3:] # petal width
y = (iris['target'] == 2).astype(np.intc) # 1 如果是Iris-Virginica则为1，否则为0

# print(y)

from sklearn.linear_model import LogisticRegression
log_res = LogisticRegression()
log_res.fit(X, y)

X_new = np.linspace(0, 3, 1000).reshape(-1, 1)
y_proba = log_res.predict_proba(X_new)

# print(y_proba)

plt.figure(figsize=(12, 5))
decision_boundary = X_new[y_proba[:, 1] >= 0.5][0][0] # 边界

plt.plot([decision_boundary, decision_boundary], [-1, 2], 'k:', linewidth=2)
plt.plot(X_new, y_proba[:, 1], 'g-', label='Iris-Virginica')
plt.plot(X_new, y_proba[:, 0], 'b--', label='Not Iris-Virginica')
# print(help(plt.arrow)) # 打印API信息
plt.arrow(decision_boundary, 0.08, -0.3, 0, head_width=0.05, head_length=0.1, fc='b', ec='b')
plt.arrow(decision_boundary, 0.92, 0.3, 0, head_width=0.05, head_length=0.1, fc='g', ec='g')
plt.text(decision_boundary+0.02, 0.15, 'Decision Boundary', fontsize=16, color='k', ha='center')
plt.xlabel('Peta width(cm)', fontsize=16)
plt.ylabel('y_proba', fontsize=16)
plt.axis([0, 3, -0.02, 1.02])
plt.legend(loc='center left', fontsize=16)
plt.show()


'''
随着Peta width的增加，是Iris-Virginica的概率增加，不是Iris-Virginica的概率减少
'''