import numpy as np
import matplotlib.pyplot as plt

def model(x,theta):
    return x.dot(theta)

def sigmoid(z):
    return 1/(1+np.exp(-z))

def cost(h,y,lamda,theta):
    r=lamda/(2*len(x))*np.sum(theta**2)
    return -1/len(y)*np.sum(y*np.log(h)+(1-y)*np.log(1-h))+r

def grad(x,y,lamda,alpha=0.01,iter0=2000):
    m,n=x.shape
    theta=np.zeros(n)
    J=np.zeros(iter0)
    for i in range(iter0):
        z=model(x,theta)
        h=sigmoid(z)
        r=lamda/2*theta
        J[i]=cost(h,y,lamda,theta)
        dt=1/m*x.T.dot(h-y)+r
        theta-=alpha*dt
    return h,J,theta

def score(h,y):
    return np.mean(y==[h>0.5])

if __name__ == '__main__':
    data=np.loadtxt('data.txt',delimiter=',')
    x=data[:,:-1]
    y=data[:,-1]

    np.random.seed(123)
    a=np.random.permutation(len(x))
    x=x[a]
    y=y[a]

    miu=np.mean(x,axis=0)
    sigma=np.std(x,axis=0)
    x=(x-miu)/sigma

    X=np.c_[np.ones(len(x)),x]

    num=int(0.7*len(x))
    train_x,test_x=np.split(X,[num,])
    train_y,test_y=np.split(y,[num,])

    h0,J0,theta0=grad(train_x,train_y,0)
    h3,J3,theta3=grad(train_x,train_y,3)
    plt.plot(J0)
    plt.plot(J3)
    plt.show()

    z=model(test_x,theta0)
    test_h=sigmoid(z)

    print(score(test_h,test_y))

    plt.scatter(x[:,0],x[:,1],c=y)
    plt.show()
