# https://blog.csdn.net/weixin_46344368/article/details/105904589
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
##############################################
# @Author: DengLibin 榆霖
# @Date: Create in 2022-03-21 14:39:05
# @Description: 逻辑回归预测考试
##############################################

import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score


def run():
    data = pd.read_csv('examdata.csv')
   #将通过考试和未通过考试的用图形来区分开
    fig2 = plt.figure()
    mask = data.loc[:,'Pass'] == 1 #如果pass等于1就是true，否则就是false
    passed = plt.scatter(data.loc[:,'Exam1'][mask],data.loc[:,'Exam2'][mask])
    failed = plt.scatter(data.loc[:,'Exam1'][~mask],data.loc[:,'Exam2'][~mask]) #这里的~mask是取反，可以理解为0
    plt.title('Exam1-Exam2')
    plt.xlabel('Exam1')
    plt.ylabel('Exam2')
    plt.legend((passed,failed),('passed','failed'))
    
    #去掉最后一列
    X = data.drop(['Pass'], axis=1)
    # 取最后一列
    Y = data.loc[:, 'Pass']
    
    x1 = data.loc[:, 'Exam1']
    x2 = data.loc[:, 'Exam2']
    
    # 一阶模型训练
    LR = LogisticRegression()
    LR.fit(X, Y)
    
    # 预测
    y_predict = LR.predict(X)
    # 评估模型 越接近1越好
    accuracy = accuracy_score(Y, y_predict)
    print("accuracy", accuracy)
    
    theta0 = LR.intercept_
    theta1,theta2 = LR.coef_[0][0],LR.coef_[0][1]
    print(theta0,theta1,theta2)
    x2_new = -(theta0+theta1*x1)/theta2
    plt.plot(x1,x2_new) #画出决策边界
    
    ######################################建立一个二阶的边界######################################
    #创建新数据
    x1_2 = x1*x1
    x2_2 = x2*x2
    x1_x2 = x1*x2
    x_new = {'x1':x1,'x2':x2,'x1_2':x1_2,'x2_2':x2_2,'x1_x2':x1_x2}#将所有数据放到一个字典里面
    x_new = pd.DataFrame(x_new) #方便后面进行模型数据的加载
    #建立并训练一个新模型
    LR2 = LogisticRegression()
    LR2.fit(x_new, Y)
    
    # 模型评估
    y2_predict = LR2.predict(x_new)
    accuracy2 = accuracy_score(Y ,y2_predict)
    print("accuracy2", accuracy2)
    x1_new = x1.sort_values() #将x1_new从小到大排序
    
    #θo,θ1,θ2，θ3,θ4，θ5
    theta0 = LR2.intercept_
    theta1,theta2,theta3,theta4,theta5 = LR2.coef_[0][0],LR2.coef_[0][1],LR2.coef_[0][2],LR2.coef_[0][3],LR2.coef_[0][4]
    a = theta4
    b = theta5*x1_new+theta2
    c = theta0+theta1*x1_new+theta3*x1_new*x1_new
    x2_new_boundary = (-b+np.sqrt(b*b-4*a*c))/(2*a)
    plt.plot(x1_new,x2_new_boundary) #画出决策边界
    
    y_predict2 = LR.predict([[89, 34]])
    print("y_predict2", y_predict2)

    plt.show()



if __name__ == '__main__':
   run()
