from numpy import *
import os

path = '/Users/centling/WORKSPACE/CODE/Edwin/BackEnd/Python/math-py/src/files'
training_sample = 'trainingSample.txt'
testing_sample = 'testingSample.txt'

# 从文件中读入训练样本的数据，同上面给出的示例数据
# 下面第20行代码中的1.0表示x0 = 1
def loadDataSet(p, file_n):
    dataMat = []; labelMat = []
    with open(os.path.join(p, file_n)) as fr:
        for line in fr.readlines():
            lineArr = line.strip().split()
            dataMat.append([1.0, float(lineArr[0]), float(lineArr[1])])  # 三个特征x0, x1, x2
            labelMat.append(int(lineArr[2]))  # 标准答案y
    return dataMat,labelMat

def sigmoid(inX):
    return 1.0/(1+exp(-inX))

# 梯度下降法求回归系数a，由于样本量少，我将迭代次数改成了1000次
def gradAscent(dataMatIn, classLabels):
    dataMatrix = mat(dataMatIn)             #convert to NumPy matrix
    labelMat = mat(classLabels).transpose() #convert to NumPy matrix
    m,n = shape(dataMatrix)
    alpha = 0.001  # 学习率
    maxCycles = 10
    weights = ones((n,1))
    for k in range(maxCycles):              # heavy on matrix operations
        h = sigmoid(dataMatrix*weights)     # 模型预测值, 90 x 1
        error = h - labelMat                # 真实值与预测值之间的误差, 90 x 1
        temp = dataMatrix.transpose()* error # 交叉熵代价函数对所有参数的偏导数, 3 x 1
        # print(temp)
        weights = weights - alpha * temp  # 更新权重
    return weights

# 下面是我自己写的测试函数
def test_logistic_regression():
    dataArr, labelMat = loadDataSet(path, training_sample)  # 读入训练样本中的原始数据
    A = gradAscent(dataArr, labelMat)  # 回归系数a的值
    h = sigmoid(mat(dataArr)*A)  #预测结果h(a)的值
    print(dataArr, labelMat)
    print(A)
    print(h)
    # plotBestFit(A.getA())

test_logistic_regression()

