#!/usr/bin/python3.9
# -*- coding: utf-8 -*-
# @Time    : 2021/10/10 15:46
# @Author  : YHSimon

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.optimize as opt
from sklearn import linear_model  # 调用sklearn的线性回归包
from sklearn.metrics import classification_report

"""
    设想你是工厂的生产主管，你有一些芯片在两次测试中的测试结果。对于这两次测试，你想决定是否芯片要被接受或抛弃。
    为了帮助你做出艰难的决定，你拥有过去芯片的测试数据集，从其中你可以构建一个逻辑回归模型。
"""


def plot_data():
    positive = data2[data2['Accepted'].isin([1])]
    negative = data2[data2['Accepted'].isin([0])]
    fig, ax = plt.subplots(figsize=(8, 5))
    ax.scatter(positive['Test 1'], positive['Test 2'], c='b', marker='o', label="Accepted")
    ax.scatter(negative['Test 1'], negative['Test 2'], c='r', marker='x', label="Rejected")
    ax.legend()
    ax.set_xlabel('Test 1 Score')
    ax.set_ylabel('Test 2 Score')
    # plt.show()


def feature_mapping(x1, x2, power):
    data = {}
    for i in np.arange(power + 1):
        for p in np.arange(i + 1):
            data["f{}{}".format(i - p, p)] = np.power(x1, i - p) * np.power(x2, p)
    return pd.DataFrame(data)


def sigmoid(z):
    return 1 / (1 + np.exp(-z))


def cost(theta, X, y):
    first = (-y) * np.log(sigmoid(X @ theta))  # X.T@X等价于X.T.dot(X)
    second = (1 - y) * np.log(1 - sigmoid(X @ theta))
    return np.mean(first - second)


def costReg(theta, X, y, l=1):
    # 不惩罚第一项
    _theta = theta[1:]
    reg = (l / (2 * len(X))) * (_theta @ _theta)  # _theta@_theta 内积
    return cost(theta, X, y) + reg


def gradient(theta, X, y):
    return (X.T @ (sigmoid(X @ theta) - y)) / len(X)


def gradientReg(theta, X, y, l=1):
    reg = (1 / len(X)) * theta
    reg[0] = 0
    return gradient(theta, X, y) + reg


def predict(theta, X):
    probability = sigmoid(X @ theta)
    return [1 if x >= 0.5 else 0 for x in probability]  # return a list


data2 = pd.read_csv('ex2data2.txt', names=['Test 1', 'Test 2', 'Accepted'])
print(data2.head())
plot_data()

# 特征映射  到第六次幂 power=6
x1 = data2['Test 1']
x2 = data2['Test 2']
_data2 = feature_mapping(x1, x2, power=6)
print(_data2.head())

# 正则化代价函数
# 不惩罚第一项θ_0 先获取特征，标签以及参数theta，确保维度良好。
X = _data2
y = data2['Accepted']
theta = np.zeros(X.shape[1])
# print(costReg(theta, X, y, l=1))  # 0.6931471805599454
# print(gradientReg(theta, X, y, 1))

# 学习参数
result2 = opt.fmin_tnc(func=costReg, x0=theta, fprime=gradientReg, args=(X, y, 2))
print(result2)
# 或者使用scikit-learn
# model = linear_model.LogisticRegression(penalty='l2', C=1.0)
# model.fit(X, y.ravel())
# print(model.score(X, y))  # 0.8305084745762712

# 评估逻辑模型
final_theta = result2[0]
predictions = predict(final_theta, X)
correct = [1 if a == b else 0 for (a, b) in zip(predictions, y)]
accuracy = sum(correct) / len(correct)
print(accuracy)
# 或者用skearn中的方法来评估结果。
print(classification_report(y, predictions))

# 决策边界
x = np.linspace(-1, 1.5, 250)
xx, yy = np.meshgrid(x, x)  # 绘制网格

z = feature_mapping(xx.ravel(), yy.ravel(), 6).values
z = z @ final_theta
z = z.reshape(xx.shape)
plot_data()
plt.contour(xx, yy, z, 0)
plt.ylim(-.8, 1, 2)
plt.show()
