import numpy as np, pandas as pd, matplotlib.pyplot as plt, sklearn.preprocessing as pp
import machine_learning.logisticRegression.method as lrm

path = "machine_learning/logisticRegression/dataSet.csv"
pdData = pd.read_csv(path, header=0, names=['exam1', 'exam2', 'admitted'])
pdData.head()

print(pdData.shape)  # (64, 3)

# A 1 下采样 ==========================================================
y_0_row = pdData[pdData["admitted"] == 0].shape[0]
y_0_index = pdData[pdData["admitted"] == 0].index
y_1_index = pdData[pdData["admitted"] == 1].index
y_1_index_underSampling = np.random.choice(y_1_index, size=y_0_row, replace=False)
pdData_1_underSampling_index = np.concatenate([y_0_index, y_1_index_underSampling],axis=0)
pdData_1_underSampling = pdData.iloc[pdData_1_underSampling_index,:]
# pdData = pdData_1_underSampling     # tip 下采样开关

# A 2 正式处理 =========================================================
# 第0列添加X0,值为1,这里X1为exam1,X2为exam2
pdData.insert(0, 'X0', 1)

# A 3 标准化,标准正态分布
scaled_pdData = pdData.copy()
scaled_pdData.iloc[:, 1:3] = pp.scale(pdData.iloc[:, 1:3]) #* 100  # 标准化*100

theta = np.zeros([1, 3])  # 构造theta矩阵

# 迭代过后,不Rerun控制台,修改迭代次数再运行该行,会接着之前的数据
size = pdData.shape[0]
# batch_size=样本总数:全部梯度下降;=1随机梯度下降;否则就是批量
# tip scaled_pdData为标准化后,pdData为原数据
(theta, theta_1) = lrm.runExp(pdData, theta, batch_size=size, stop_type=0, thresh=2000, alpha=0.000001, alpha_1=0.000004)
# (theta, theta_1) = lrm.runExp(pdData, theta, batch_size=size, stop_type=1, thresh=0.00001, alpha=0.00001, alpha_1=0.0001)
# (theta, theta_1) = lrm.runExp(pdData, theta, batch_size=size, stop_type=2, thresh=0.06, alpha=0.00001, alpha_1=0.0001)

# A 散点图 ===========================================================
scatter_data = scaled_pdData.copy()
# return the subset of row such admitted == 1
positive = scatter_data[scatter_data['admitted'] == 1]
negative = scatter_data[scatter_data['admitted'] == 0]

# 散点图
fig, ax = plt.subplots(figsize=(5, 5))
ax.scatter(positive['exam1'], positive['exam2'], s=30, c='b', marker='o', label='admitted')
ax.scatter(negative['exam1'], negative['exam2'], s=30, c='r', marker='x', label='not admitted')

# 添加标签
ax.legend()
ax.set_xlabel('exam 1 score')
ax.set_xlabel('exam 2 score')

# 画分隔线
x = np.arange(-3,3)
ax.plot(x, -(theta_1[0][2] * x / theta_1[0][1]) - (theta_1[0][0] / theta_1[0][1]))

plt.rcParams['font.sans-serif'] = ['MicroSoft Yahei']  # 显示中文标签
fig.show()

"""
不收敛的处理办法:
    1.学习率调整       (太大会不收敛,较大会降低精度[调整步长大],应该在时间允许内尽可能小)
    2.数据归一化处理   (若收敛,归一化可能导致收敛速度下降)
"""

print("theta: ", theta)
print("theta_1: ", theta_1)

# A 精度 ========================================================
# imp 1.
scaled_X = scaled_pdData.iloc[:, :3]
y = scaled_pdData.iloc[:, 3]

accuracy = lrm.accuracy(scaled_X, y, theta, cor_sta=0.5)
print("- Likelihood_accuracy = {}%".format(accuracy * 100))

accuracy_1 = lrm.accuracy(scaled_X, y, theta_1, cor_sta=0.5)
print("- error_accuracy = {}%".format(accuracy * 100))

# A 预测;上面执行过后,这两句可以单独执行
testData = pd.read_csv("machine_learning/logisticRegression/testData.csv", header=0)
lrm.custom_predict(testData, pdData, theta, cor_sta=0.5)



