#!/usr/bin/python3.9
# -*- coding: utf-8 -*-
# @Time    : 2021/10/9 18:54
# @Author  : YHSimon

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.optimize as opt
from sklearn.metrics import classification_report


def sigmoid(z):
    return 1 / (1 + np.exp(-z))


def cost(theta, X, y):
    first = (-y) * np.log(sigmoid(X @ theta))  # X.T@X等价于X.T.dot(X)
    second = (1 - y) * np.log(1 - sigmoid(X @ theta))
    return np.mean(first - second)


def gradient(theta, X, y):
    return (X.T @ (sigmoid(X @ theta) - y)) / len(X)


def predit(theta, X):
    probability = sigmoid(X @ theta)
    return [1 if x >= 0.5 else 0 for x in probability]  # return a list


data = pd.read_csv('ex2data1.txt', names=['exam1', 'exam2', 'admitted'])
# print(data.head())
# print(data.describe())

positive = data[data.admitted.isin([1])]  # 1
negative = data[data.admitted.isin([0])]  # 0

fig, ax = plt.subplots(figsize=(6, 5))
ax.scatter(positive['exam1'], positive['exam2'], c='b', label='Admitted')
ax.scatter(negative['exam1'], negative['exam2'], c='r', label='NoAdmitted')
# ax.legend(loc=2)
# 设置图例显示在图的上方
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width, box.height * 0.8])
ax.legend(loc='center left', bbox_to_anchor=(0.2, 1.12), ncol=3)

ax.set_xlabel('Exam1 Score')
ax.set_ylabel('Exam2 Score')
# plt.show()

x1 = np.arange(-10, 10, 0.1)
plt.plot(x1, sigmoid(x1))
# plt.show()

# add a ones column - this makes the matrix multiplication work out easier
if 'Ones' not in data.columns:
    data.insert(0, 'Ones', 1)

# set X (training data) and y (target variable)
X = data.iloc[:, :-1]  # Convert the frame to its Numpy-array representation.
y = data.iloc[:, -1]  # Return is NOT a Numpy-matrix, rather, a Numpy-array.
theta = np.zeros(X.shape[1])

# 检查矩阵的维度
print(X.shape, theta.shape, y.shape)

# 计算cost
print(cost(theta, X, y))  # 0.6931471805599453

# 批量梯度下降
print(gradient(theta, X, y))

# 2.拟合θ
# 使用高级优化算法  Octave中的函数"fminunc" 对应python中SciPy的"optimize"
result = opt.fmin_tnc(func=cost, x0=theta, fprime=gradient, args=(X, y))
print(result)

# res = opt.minimize(fun=cost, x0=theta, args=(X, y), method='TNC', jac=gradient)
print(result[0])

# 计算代价
print(cost(result[0], X, y))  # 0.20349770158947528

# 3.评估逻辑回归
final_theta = result[0]
predictions = predit(final_theta, X)
correct = [1 if a == b else 0 for (a, b) in zip(predictions, y)]
accuracy = sum(correct) / len(X)
print(accuracy)  # 0.89
# 也可以使用sklearn中的方法检验
print(classification_report(predictions, y))

# 4.决策边界 θ_0+x1*θ1+x2*θ2=0
x1 = np.arange(150, step=0.1)
x2 = -(final_theta[0] + x1 * final_theta[1]) / final_theta[2]

fig, ax = plt.subplots(figsize=(8, 5))
ax.scatter(positive['exam1'], positive['exam2'], c='b', label='Admitted')
ax.scatter(negative['exam1'], negative['exam2'], c='r', marker='x', label='Not Admitted')
ax.plot(x1, x2)
ax.set_xlim(0, 150)
ax.set_ylim(0, 150)
ax.set_xlabel('x1')
ax.set_ylabel('x2')
ax.set_title('Decision Boundary')
plt.show()
