import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# 为了正确显示中文导入的库
from pylab import *

mpl.rcParams['font.sans-serif'] = ['SimHei']

ftrain = 'data/ex2data1.txt'  # 训练集文件名；默认当前目录
ftest = 'data/ex2data2.txt'

# 读入数据，间隔符=delimiter
traindata = np.loadtxt(ftrain, delimiter=',')
testdata = np.loadtxt(ftest, delimiter=',')

# 数据一般是一行一条数据，每列对应一个变量；第一个数据就是行数，一般是数据条数
trainNum = traindata.shape[0]
testNum = testdata.shape[0]

# 数据初步处理，切片获取X, Y#前两列为变量，最后一列为 Y
trainX = traindata[:, :2]
testX = testdata[:, :2]

# feature scaling
# 数据缩放到 -1和1之间，可以保证算法收敛；采用归一化公式处理
# 先计算均值和样本差（或标准差），然后代入公式。
trainMu = np.mean(trainX, 0)
## ddof 自由度，样本的时候一般ddof=1；无偏；如果ddof=0,则有偏。
##如果数据是总体，则可以用缺省值0
trainSigma = np.std(trainX, 0, ddof=1)
## normalize sample data，平铺均值和标准差，和数据的行数一致，以便能够
## 对相应矩阵操作
trainX -= np.tile(trainMu, (trainNum, 1))  # tile: (trainNum,1)方向上扩充数据
trainX /= np.tile(trainSigma, (trainNum, 1))

# 数据标准化
# 缺省情况把一列数据切片时，转换为行向量，所以要用reshape变回列向量
trainY = traindata[:, -1].reshape(trainNum, 1)
testY = testdata[:, -1].reshape(testNum, 1)
print('traindata after scaling')
print(trainX)

##使用训练集的均值和样本差归一化
testX -= np.tile(trainMu, (testNum, 1))
testX /= np.tile(trainSigma, (testNum, 1))

print('testdata after scaling')
print(testX)

# 数据标准化
trainX = np.hstack((np.ones((trainNum, 1)), trainX))
print(trainX)

testX = np.hstack((np.ones((testNum, 1)), testX))


# 定义 sigmoid function
def sigmoid(X):
    return 1.0 / (1 + np.exp(-X))


# 定义代价函数
def costFunction(X, y, theata):
    m = X.shape[0]  # 样本个数
    h = sigmoid(X.dot(theata))
    J = (-1.0 / m) * (np.log(h).T.dot(y) + np.log(1.0 - h).T.dot(1.0 - y))
    return J


# 定义逻辑回归，梯度下降
def logisticRegression(X, y, alpha, num_iters):
    m, f = np.shape(X)  # 定义样本数量m, 特征个数f
    J_history = np.zeros(num_iters)  # 定义保存代价函数值
    theta = np.ones((f, 1))  # 初始化theata
    for i in range(num_iters):
        # 梯度下降
        J_history[i] = costFunction(X, y, theta)  # 计算并保存代价函数值
        h = sigmoid(np.dot(X, theta))
        deltaTheta = (1.0 / m) * (X.T.dot(h - y))  # 计算deltatheta,交叉熵代价函数
        # deltaTheta = (1.0 / m) * (X.T.dot(h - y))*(1-h)*h  #计算deltatheta,方差代价函数
        theta = theta - alpha * deltaTheta

    return theta, J_history


def testLogRegres(theta, X, y):
    n, f = np.shape(X)  # 测试集样本数量n,特征数量f
    matchCount = 0  # 匹配数目
    predictY = []  # 预测结果集
    for i in range(n):
        predict = sigmoid(np.dot(X[i, :], theta))  # 计算预测值
        predictY.append(predict)
        if bool(np.where(predict >= .5, 1, 0)) == bool(y[i, 0]):
            # print('bool(test_y[i, 0])=',bool(y[i, 0]))
            # print('np.where(predict>= .5, 1, 0)=',bool(np.where(predict>= .5, 1, 0)))
            # print('result=',bool(np.where(predict>= .5, 1, 0)) == bool(y[i, 0]))
            matchCount += 1
    accuracy = matchCount / n
    return matchCount, accuracy, predictY


# 画样本数据和分类线
def showLogRegres(theta, X, y):
    # 输入的x和y为矩阵
    m, f = np.shape(X)  # 定义样本数量m, 特征个数f

    if f != 3:
        print("Sorry! I can not draw because the dimension of your data is not 2!")
        return 1

        # 画样本数据
    for i in range(m):
        if int(y[i]) == 0:
            plt.plot(X[i, 1], X[i, 2], 'or')
        elif int(y[i]) == 1:
            plt.plot(X[i, 1], X[i, 2], 'ob')

            # 画分类线
    min_x = min(X[:, 1]);
    print('min_x=', min_x)
    max_x = max(X[:, 1]);
    print(max_x)
    y_min_x = (-theta[0] - theta[1] * min_x) / theta[2]  # 这是令sigmoid函数的输入为零，得到分界处的值
    y_max_x = (-theta[0] - theta[1] * max_x) / theta[2]  # w0+w1*x1+w2*x2=0, x1=min_x/max_x, x2=y_max_x/y_min_x
    plt.figure('分类线')
    plt.title('样本数据和分类线')
    plt.plot([min_x, max_x], [y_min_x, y_max_x], '-g')
    plt.xlabel('X1');
    plt.ylabel('X2')
    # plt.legend(loc='bottom left')
    plt.show()


# 计算逻辑回归运算
theta, J_history = logisticRegression(trainX, trainY, 0.01, 15000)
print(theta)

# 画代价函数
plt.figure('收敛曲线')  # 定义图形窗口(名称)
plt.plot(J_history, label='代价函数')  # 定义图形名称
plt.ylabel('代价');  # 定义y坐标名
plt.xlabel('迭代次数')  # 定义x坐标名
plt.title('代价函数')  # 定义图形标题
plt.legend(loc='upper right')  # 定义图形名显示位置
plt.show()  # 显示图形

# 画样本数据和分类线
fig1 = plt.figure('分类线')
showLogRegres(theta, trainX, trainY)

##计算测试结果
print("step 3: testing...")
mNum, accuracy, predY = testLogRegres(theta, testX, testY)

print("step 4: show the result...")
print('The classify positive sample accuracy is: %.3f%%' % (accuracy * 100))

print('The predicted positive matchNum is', mNum)


# 利用逻辑回归库函数求解
def LoR_model_main(X, y, predict_value):
    # Create linear logistic regression object
    regr = linear_model.LogisticRegression()
    regr.fit(X, y)
    predictions = {}
    predictions['intercept'] = regr.intercept_
    predictions['coefficient'] = regr.coef_
    predictions['predicted_value'] = regr.predict(predict_value)
    predictions['score'] = regr.score(X, y)  # Returns the mean accuracy on the given test data and labels
    return predictions


predictions = LoR_model_main(trainX, trainY, trainX)
print('intercept=', predictions['intercept'])
print('coefficient=', predictions['coefficient'])
print('predicted_value=', predictions['predicted_value'])
print('score=', predictions['score'])
# showLogRegres(predictions['coefficient'], testX, testY)

