import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression  # 逻辑回归模型
from sklearn.metrics import accuracy_score  # 准确率

data = pd.read_csv('examdata.csv')

# 可视化数据
mask = data.loc[:, 'Pass'] == 1
plt.figure(figsize=(10, 10))
passed = plt.scatter(data.loc[:, 'Exam1'][mask], data.loc[:, 'Exam2'][mask], )
failed = plt.scatter(data.loc[:, 'Exam1'][~mask], data.loc[:, 'Exam2'][~mask], )
plt.title('Exam1 and Exam2')
plt.xlabel('Exam1')
plt.ylabel('Exam2')
# 实例名称
plt.legend((passed, failed), ('Pass', 'Fail'))
# plt.show()

# 训练模型
X = data.drop(['Pass'], axis=1)
Y = data.loc[:, 'Pass']
exam1 = data.loc[:, 'Exam1']
exam2 = data.loc[:, 'Exam2']

# 确认维度
# print(X.shape)
# print(Y.shape)

model = LogisticRegression()
model.fit(X, Y)

# 预测结果和评估
Y_pred = model.predict(X)
# print(accuracy_score(Y, Y_pred))

# 预测某同学是否能够通过
y_test_predict = model.predict([[70,65]])
# print(y_test_predict)

# 查看边界曲线
# print(model.coef_)
# print(model.intercept_)
theta0 = model.intercept_[0]
theta1 = model.coef_[0][0]
theta2 = model.coef_[0][1]
print(theta0,theta1,theta2)
exam2_new = -(theta0+theta1*exam1)/theta2
# print(exam2_new)
plt.plot(exam1,exam2_new)
plt.show()
