import numpy as np
import matplotlib.pyplot as plt
# 完成数据集的读取
data1=np.loadtxt('d1.txt',delimiter=',')
data2=np.loadtxt('d2.txt',delimiter=',')
x1=data1[:,:-1]
x2=data2[:,:-1]
y1=data1[:,-1]
y2=data2[:,-1]
miu=np.mean(x1,axis=0)
sigma=np.std(x1,axis=0)
x1=(x1-miu)/sigma
miu2=np.mean(x2,axis=0)
sigma2=np.std(x2,axis=0)
x2=(x2-miu2)/sigma2
X1=np.c_[np.ones(len(x1)),x1]
X2=np.c_[np.ones(len(x2)),x2]

def model(X,theta):
    return X.dot(theta)
# 实现Sigmoid函数,并画出该函数
def sigmoid(z):
    return 1/(1+np.exp(-z))
# 实现逻辑回归的代价函数，实现正则化逻辑回归
def costfunc(h,y,lamda,theta):
    r1=lamda/(2*len(y))*np.sum(theta**2)
    return -1/(len(y))*np.sum(y*np.log(h)+(1-y)*np.log(1-h))+r1
def grad(X,y,lamda,alpha=0.01,iter0=5000):
    m,n=X.shape
    theta=np.zeros(n)
    J=np.zeros(iter0)
    for i in range(iter0):
        z=model(X,theta)
        h=sigmoid(z)
        J=costfunc(h,y,lamda,theta)
        r2=lamda/m*theta
        theta-=alpha*(1/m*X.T.dot(h-y)+r2)
    return h,J,theta
def score(h,y):
    return np.mean(y==[h>0.5])
if __name__ == '__main__':
    x0=np.linspace(-10,10,100)
    y0=sigmoid(x0)
    plt.plot(x0,y0)
    plt.show()
# 实现梯度下降函数，要求输出迭代过程中的代价函数值
    h, J, theta=grad(X1,y1,3)
    print(theta)
    print(J)
# 通过梯度下降计算回归模型，用所得模型对测试集的数据进行预测，并计算准确率
    z2=model(X2,theta)
    h2=sigmoid(z2)
    print(score(h2,y2))
# 使用X2，X3两组特征画出逻辑回归0-1分布图
    plt.scatter(x2[:,1],x2[:,2],c=y2)
    plt.show()