import pandas as pd
import numpy as np  # 矩阵运算
import matplotlib.pyplot as plt
import seaborn as sns  # 图形显示

plt.style.use('fivethirtyeight')  # 样式美化
from sklearn.metrics import classification_report  # 这个包式评价报告

# 建立一个逻辑回归模型来预测一个学生是否被大学录取，根据两次考试的结果来决定每个人的录取机会
# pyehon实现逻辑回归 目标：建立分类器(求解出三个参数 θ0，θ1，θ2)，即得出分界线，备注 θ1对应exam1 θ2对应exam2
# 设定阈值，根据阈值判断录取结果
# 备注：阈值指的是最终得到的概率值，将概率值转化为一个类别，一般是>0.5被录取，<0.5不录取

# sigmoid 函数：映射到概率的函数
# model：返回预测结果值
# cost：根据参数计算损失
# gradient：计算每个参数的梯度方向
# descent：进行参数更新
# accurancy：计算精度

data = pd.read_csv("ex2data1.txt", names=['exam1', 'exam2', 'admitted'])
print(data.head())
#  数据如下
#        exam1      exam2  admitted
# 0  34.623660  78.024693          0
# 1  30.286711  43.894998          0
# 2  35.847409  72.902198          0
# 3  60.182599  86.308552          1
# 4  79.032736  75.344376          1

print(data.describe())
#             exam1       exam2   admitted
# count  100.000000  100.000000  100.000000
# mean    65.644274   66.221998    0.600000
# std     19.458222   18.582783    0.492366
# min     30.058822   30.603263    0.000000
# 25%     50.919511   48.179205    0.000000
# 50%     67.032988   67.682381    1.000000
# 75%     80.212529   79.360605    1.000000
# max     99.827858   98.869436    1.000000

sns.set(context="notebook", style="darkgrid", palette=sns.color_palette("RdBu", 2))  # 设置样式参数
sns.lmplot(x='exam1', y='exam2', hue='admitted', data=data, height=6, fit_reg=False, scatter_kws={"s": 50})

plt.show()


def get_X(df):  # 读取特征
    ones = pd.DataFrame({"ones": np.ones(len(df))})
    data = pd.concat([ones, df], axis=1)  # 合并数据，根据列合并      axis=1的时候，concat就是行对齐，然后将不同列名称的两张表合并，加列
    # 其实就是相当于在上面的数据前加了一列全1
    return data.iloc[:, :-1].values   # 这个操作返回ndarray，不是矩阵
    # 在GH18458这个补丁中，已经把DataFrame对象和Series对象的as_matrix方法都删除了，使用的时候自然就报错了。
    # 所以使用 .values来代替


def get_Y(df):  # 读取标签
    return np.array(df.iloc[:, -1])  # df.iloc[:, -1] 是指df的最后一列


# 归一化
def normalize_feature(df):
    return df.apply(lambda column: (column - column.mean()) / column.std())  # 特征缩放再逻辑回归同样适用


X = get_X(data)
print(X.shape)
# (100, 3)

y = get_Y(data)
print(y.shape)
# (100,)


# Sigmoid函数
# g 代表一个常用的逻辑函数为S形函数，公式为 g(z) = 1 / (1+e^-z)
# 合起来，我们得到逻辑回归模型的假设函数：hθ(x) = 1 / (1+e^-θTx)


def sigmoid(z):
    return 1 / (1 + np.exp(-z))


fig, ax = plt.subplots(figsize=(8, 6))
ax.plot(np.arange(-10, 10, step=0.01), sigmoid(np.arange(-10, 10, step=0.01)))
ax.set_ylim((-0.1, 1.1))  # lim：轴线显示长度
ax.set_xlabel('z', fontsize=18)
ax.set_ylabel('g(z)', fontsize=18)
ax.set_title('sigmoid function', fontsize=18)
plt.show()


# 代价函数
theta = np.zeros(3)
print(theta)
# [0. 0. 0.]


def cost(theta, X, y):
    return np.mean(-y * np.log(sigmoid(X @ theta)) - (1 - y) * np.log(1 - sigmoid(X @ theta)))


print(cost(theta, X, y))
# 0.6931471805599453


# 梯度下降
def gradient(theta, X, y):
    return (1 / len(X)) * X.T @ (sigmoid(X @ theta) - y)


print(gradient(theta, X, y))
# [ -0.1        -12.00921659 -11.26284221]


# 拟合函数
import scipy.optimize as opt
res = opt.minimize(fun=cost, x0=theta, args=(X, y), method='Newton-CG', jac=gradient)
print(res)
#      fun: 0.20349770451259855
#      jac: array([1.62947970e-05, 1.11339134e-03, 1.07609314e-03])
#  message: 'Optimization terminated successfully.'
#     nfev: 71
#     nhev: 0
#      nit: 28
#     njev: 187
#   status: 0
#  success: True
#        x: array([-25.16576744,   0.20626712,   0.20150754])
#
# Process finished with exit code 0


def predict(x, theta):
    prob = sigmoid(x @ theta)
    return (prob >= 0.5).astype(int)  # 实现变量类型转换


final_theta = res.x
y_pred = predict(X, final_theta)
print(classification_report(y, y_pred))
#               precision    recall  f1-score   support
#
#            0       0.87      0.85      0.86        40
#            1       0.90      0.92      0.91        60
#
#     accuracy                           0.89       100
#    macro avg       0.89      0.88      0.88       100
# weighted avg       0.89      0.89      0.89       100


# 寻找决策边界
print(res.x)
# [-25.16576744   0.20626712   0.20150754]

coef = -(res.x / res.x[2])
print(coef)
# [124.88747248  -1.02361985  -1.        ]

x = np.arange(130, step=0.1)
y = coef[0] + coef[1]*x


# 绘制决策边界
sns.set(context="notebook", style="ticks", font_scale=1.5)

sns.lmplot(x='exam1', y='exam2', hue='admitted', data=data, height=6, fit_reg=False, scatter_kws={"s": 25})
plt.plot(x, y, 'grey')
plt.xlim(0, 130)
plt.ylim(0, 130)
plt.title('Decision Boundary')
plt.show()

# ===================================================================================================================
# ===================================================================================================================
# ===================================================================================================================
# ===================================================================================================================


df = pd.read_csv('ex2data2.txt', names=['test1', 'test2', 'accepted'])
print(df.head())
#       test1    test2  acccepted
# 0  0.051267  0.69956          1
# 1 -0.092742  0.68494          1
# 2 -0.213710  0.69225          1
# 3 -0.375000  0.50219          1
# 4 -0.513250  0.46564          1


sns.set(context="notebook", style="ticks", font_scale=1.5)
sns.lmplot(x="test1", y="test2", hue='accepted', data=df,
           height=6,
           fit_reg=False,
           scatter_kws={"s": 50})
plt.title('Regularized Logistic Regression')
plt.show()


# 特征映射（将一维数据映射到高维）
def feature_mapping(x, y, power, as_ndarray=False):
    data = {"f{}{}".format(i - p, p): np.power(x, i - p) * np.power(y, p)
                for i in np.arange(power + 1)
                for p in np.arange(i + 1)
            }
    if as_ndarray:
        return pd.DataFrame(data).values
    else:
        return pd.DataFrame(data)


x1 = np.array(df.test1)
x2 = np.array(df.test2)

data = feature_mapping(x1, x2, power=6)  # 2维特征映射为28维
print(data.shape)
# (118, 28)

print(data.head())
#    f00       f10      f01       f20  ...       f33       f24       f15       f06
# 0  1.0  0.051267  0.69956  0.002628  ...  0.000046  0.000629  0.008589  0.117206
# 1  1.0 -0.092742  0.68494  0.008601  ... -0.000256  0.001893 -0.013981  0.103256
# 2  1.0 -0.213710  0.69225  0.045672  ... -0.003238  0.010488 -0.033973  0.110047
# 3  1.0 -0.375000  0.50219  0.140625  ... -0.006679  0.008944 -0.011978  0.016040
# 4  1.0 -0.513250  0.46564  0.263426  ... -0.013650  0.012384 -0.011235  0.010193
# [5 rows x 28 columns]


# 正则化代价函数
theta = np.zeros(data.shape[1])
X = feature_mapping(x1, x2, power=6, as_ndarray=True)
print(X.shape)
# (118, 28)

y = get_Y(df)
print(y.shape)
# (118,)


def regularized_cost(theta, X, y, l=1):
    theta_j1_to_n = theta[1:]
    regularized_term = (l / (2 * len(X))) * np.power(theta_j1_to_n, 2).sum()
    return cost(theta, X, y) + regularized_term


print(regularized_cost(theta, X, y, l=1))
# 0.6931471805599454


# 正则化梯度
def regularized_gradient(theta, X, y, l=1):
    theta_j1_to_n = theta[1:]
    regularized_theta = (1 / len(X)) * theta_j1_to_n
    regularized_term = np.concatenate([np.array([0]), regularized_theta])
    return gradient(theta, X, y) + regularized_term


# 拟合参数
import scipy.optimize as opt
print('init cost = {}'.format(regularized_cost(theta, X, y)))
# init cost = 0.6931471805599454


res = opt.minimize(fun=regularized_cost, x0=theta, args=(X, y), method='Newton-CG', jac=regularized_gradient)
print(res)
#      fun: 0.529002729712739
#      jac: array([ 7.26089191e-08,  4.22913232e-09,  8.15815876e-09,  6.15699190e-08,
#         7.74567232e-09, -3.09360466e-08,  2.12821347e-08,  1.22156735e-08,
#         1.96058084e-08, -3.19108791e-08, -4.39405717e-09, -2.76847096e-09,
#        -2.77934021e-08,  1.23592858e-08, -7.14474161e-08,  8.98276579e-09,
#         1.45962365e-08, -1.00120216e-08, -7.32796823e-09,  1.43317535e-08,
#        -4.38679455e-08, -4.85023121e-09, -3.40732357e-10, -1.11668147e-08,
#        -5.01047274e-09, -1.44326742e-08,  8.78794915e-09, -5.71951122e-08])
#  message: 'Optimization terminated successfully.'
#     nfev: 7
#     nhev: 0
#      nit: 6
#     njev: 57
#   status: 0
#  success: True
#        x: array([ 1.27273909,  0.62527214,  1.18108783, -2.01995993, -0.91742426,
#        -1.43166279,  0.12400726, -0.36553444, -0.35723901, -0.17513021,
#        -1.45815774, -0.05098947, -0.61555653, -0.27470644, -1.19281683,
#        -0.24218793, -0.20600565, -0.04473137, -0.27778488, -0.2953778 ,
#        -0.45635711, -1.04320321,  0.02777158, -0.29243198,  0.01556636,
#        -0.32738013, -0.14388704, -0.92465213])


# 预测
final_theta = res.x
y_pred = predict(X, final_theta)

print(classification_report(y, y_pred))
#               precision    recall  f1-score   support
#
#            0       0.90      0.75      0.82        60
#            1       0.78      0.91      0.84        58
#
#     accuracy                           0.83       118
#    macro avg       0.84      0.83      0.83       118
# weighted avg       0.84      0.83      0.83       118


