import numpy as np
from sklearn.model_selection import train_test_split


def sigmoid(inX):
    return 1.0 / (1 + np.exp(-inX))


def gradAscent(dataMatIn, classLabels):
    dataMatrix = np.mat(dataMatIn)  # convert to NumPy matrix
    labelMat = np.mat(classLabels).transpose()  # convert to NumPy matrix

    m, n = np.shape(dataMatrix)
    alpha = 0.001
    maxCycles = 1500
    weights = np.ones((n, 1))

    for k in range(maxCycles):  # heavy on matrix operations
        h = sigmoid(dataMatrix * weights)  # matrix mult
        error = (labelMat - h)  # vector subtraction
        weights = weights + alpha * dataMatrix.transpose() * error  # matrix mult
    return weights


def LoadData():
    dataMaztIn = []
    classLabels = []
    fr = open('C:/code/test.txt')
    for line in fr.readlines():
        lineArr = line.strip().split()
        # print(line)
        dataMaztIn.append([1.0, float(lineArr[0]), float(lineArr[1])])
        classLabels.append(float(lineArr[-1]))
        # print(lineArr[-1])
    X_train, X_test, y_train, y_test = train_test_split(dataMaztIn, classLabels, test_size=0.3, random_state=42)
    return X_train, X_test, y_train, y_test


def poltImage(weights):
    import matplotlib.pyplot as plt
    dataMaztIn = []
    classLabels = []
    fr = open('C:/code/test.txt')
    for line in fr.readlines():
        lineArr = line.strip().split()
        # print(line)
        dataMaztIn.append([1.0, float(lineArr[0]), float(lineArr[1])])
        classLabels.append(float(lineArr[-1]))
    dataArr = np.array(dataMaztIn)
    n = np.shape(dataArr)[0]
    xcord1 = []
    ycord1 = []
    xcord2 = []
    ycord2 = []
    for i in range(n):
        if int(classLabels[i]) == 1:
            xcord1.append(dataArr[i, 1])
            ycord1.append(dataArr[i, 2])
        else:
            xcord2.append(dataArr[i, 1])
            ycord2.append(dataArr[i, 2])
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.scatter(xcord1, ycord1, s=30, c='red', marker='s')
    ax.scatter(xcord2, ycord2, s=30, c='green')
    x = np.arange(-3.0, 3.0, 0.1)
    #    y=(0.48*x+4.12414)/(0.616)
    #     y = (-weights[0]-weights[1]*x)/weights[2]
    y = (-(float)(weights[0][0]) - (float)(weights[1][0]) * x) / (float)(weights[2][0])


X_train, X_test, y_train, y_test = LoadData()
weights = gradAscent(X_train, y_train)
predict = X_test * weights
count = 0
for i in range(len(y_test)):
    if sigmoid(predict[i]) > 0.5:
        print("预测值为%d实际值为%d" % (1, y_test[i]))
        if y_test[i] == 0:
            count += 1
    else:
        print("预测值为%d实际值为%d" % (0, y_test[i]))
        if y_test[i] == 1:
            count += 1
print("正确为：%f" % (1 - count / len(y_test)))
