import numpy as np


def read1():#读取训练集
    X = np.array([[1, 0], [1, 1], [1, 1], [1, 0], [1, 0], [2, 0], [2, 1], [2, 1], [2, 2], [2, 2], [3, 2], [3, 1]])
    y = np.array([-1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1])
    return X, y

def logistic(w, X):#二分类
    wx = np.dot(w, X)
    p = np.exp(wx) / (np.exp(wx) + 1)
    if p > 0.5:
        return 1
    else:
        return -1

def loss(X, y, w):#极大似然函数
    L = 0
    for i in range(X.shape[0]):
        L = L + y[i] * (np.dot(w, X[i])) - np.log(1 + np.exp(X[i]))



def train(X, y, iter=200):
    for i in range(X.shape[0]):
        np.hstack((X[i], 1))
    w = np.zeros(X.shape[1])
    h = 0.001#学习率
    for i in range(iter):
        for j in range(X.shape[0]):
            wx = np.dot(w, X[j])

            w += h * (X[j] * y[j] - (np.exp(wx) * X[j]) / (1 + np.exp(wx)))  # 极大似然函数的导数

    return w


def test(X, y, w):#测试
    error=0
    for i in range(X.shape[0]):
        np.hstack((X[i], [1]))
    for i in range(X.shape[0]):
        if y[i] != logistic(w, X[i]):
            error += 1

    tr=1-error/X.shape[0]
    print("正确率:%d"%tr)


if __name__ == "__main__":
    X, y = read1()
    w = train(X, y)
    testx = np.array([[3, 1], [3, 2], [3, 2]])
    testlabel = np.array([1, 1, -1])

    test(testx, testlabel, w)
