import numpy
from matplotlib import pyplot
import pandas
#数据处理
c=3#类别数
data=pandas.read_table("./datingTestSet.txt",sep="\t",header=None,na_filter=False)
dic={'largeDoses':0, 'smallDoses':1, 'didntLike':2}
data.iloc[:,-1]=data.iloc[:,-1].map(dic)
data=pandas.DataFrame(data,dtype=float)
data=data.values[:,[0,1,3]]
feature=data[:,0:-1]
#将feature进行归一化处理
feature=(feature-feature.min(axis=0))/(feature.max(axis=0)-feature.min(axis=0))
m,n=feature.shape
label=numpy.zeros((m,c))
pyplot.scatter(feature[:,0],feature[:,1],c=data[:,-1])
pyplot.show()
label[list(range(m)),numpy.array(data[:,-1],dtype=int)]=1
D=numpy.ones(m)/m
weight=numpy.zeros((m,c))
weight[list(range(m)),numpy.array(data[:,-1],dtype=int)]=1
weight[weight!=1]=D[0]/2
def sigmoid(x):
    return 1/(1+numpy.exp(-x))
def get_weaklearn(feature,label,p,D,w,b,eta,epoch):
    weaklearner={}
    for i in range(epoch):
        temp=2*p*D.reshape(-1,1)*(sigmoid(numpy.dot(feature,w.T)+b)-label)*sigmoid(numpy.dot(feature,w.T)+b)*(1-sigmoid(numpy.dot(feature,w.T)+b))
        dw=temp.T@feature
        db=numpy.sum(temp,axis=0)
        w=w-eta*dw
        b=b-eta*db
    h=sigmoid(numpy.dot(feature,w.T)+b)
    weaklearner["w"]=w
    weaklearner["b"]=b
    return h,weaklearner
epoch=1000#每个弱分类器的迭代次数,每个弱神经网络要得到充分训练
weaklearners_num=20#弱分类器的个数
eta=0.1#学习率  学习率的设置非常重要
aggregate_predict=numpy.empty((weaklearners_num,m,c))
aggregate_beta=numpy.empty(weaklearners_num)
weaklearners=[]
w = numpy.ones((c, n))
b = numpy.ones(c)
for i in range(weaklearners_num):
    D=weight.sum(axis=1,where=(weight!=1))/numpy.sum(weight.sum(axis=1,where=(weight!=1)))
    p=weight.copy()
    s=numpy.sum(p,axis=1,where=(p!=1))
    p[p == 1] = s
    p=p/s.reshape(-1,1)
    h,weaklearner=get_weaklearn(feature, label, p, m*D, w.copy(),b.copy(), eta, epoch)
    aggregate_predict[i]=h
    #计算错误率
    h[p==1]=-h[p==1]
    Error=1/2*numpy.dot(D,(1+numpy.sum(p*h,axis=1)))
    beta=Error/(1-Error)
    #更新权重
    tmp=1 - h
    tmp=tmp-h[p==1].reshape(-1,1)
    tmp[weight==1]=0
    tmp=0.5*tmp
    weight=weight*beta**tmp
    aggregate_beta[i]=numpy.log(1/beta)
    weaklearner["confidence"]=aggregate_beta[i]
    #计算综合结果
    f_res = numpy.empty((c, m))
    for k in range(c):
        f_res[k, :] = numpy.sum((aggregate_predict[0:i + 1, :,k]) * aggregate_beta[0:i + 1].reshape(-1, 1), axis=0)
    accuracy = numpy.sum(numpy.argmax(f_res, axis=0) == data[:,-1]) / m
    print(accuracy,Error)
    weaklearners.append(weaklearner)

