#1.每个弱神经网络要得到充分训练   2.增加网路复杂度  3.权重缩放因子
import numpy
from matplotlib import pyplot
def load_data(seed=2020264):  # 🍕🍕🍕生成数据集🍕🍕🍕
    numpy.random.seed(seed)  # 设置随机数种子
    N = 100  # 各类的样本数
    DIM = 2  # 数据的元素个数
    CLS_NUM = 2  # 类别数

    x = numpy.zeros((N * CLS_NUM, DIM))
    t = numpy.zeros((N * CLS_NUM, CLS_NUM), dtype=int)

    for j in range(CLS_NUM):
        for i in range(N):  # N*j, N*(j+1)):
            rate = i / N
            radius = 1.0 * rate
            theta = j * 4.0 + 4.0 * rate + numpy.random.randn() * 0.2

            ix = N * j + i
            x[ix] = numpy.array([radius * numpy.sin(theta),
                              radius * numpy.cos(theta)]).flatten()
            t[ix, j] = 1

    return x, t
feature, label = load_data()
label=numpy.argmax(label,axis=1)
data=numpy.hstack((feature,label.reshape((-1,1))))
# c=3
# feature=(feature-feature.min(axis=0))/(feature.max(axis=0)-feature.min(axis=0))
# m,n=feature.shape
# label=numpy.zeros((m,c))
pyplot.scatter(feature[:,0],feature[:,1],c=data[:,-1])
pyplot.show()
# label[list(range(m)),numpy.array(data[:,-1],dtype=int)]=1
# D=numpy.ones(m)/m
# weight=numpy.zeros((m,c))
# weight[list(range(m)),numpy.array(data[:,-1],dtype=int)]=1
# weight[weight!=1]=D[0]/2
def sigmoid(x):
    return 1/(1+numpy.exp(-x))
# def get_weaklearn(feature,label,p,D,w,b,eta,epoch):
#     weaklearner={}
#     for i in range(epoch):
#         temp=2*p*D.reshape(-1,1)*(sigmoid(numpy.dot(feature,w.T)+b)-label)*sigmoid(numpy.dot(feature,w.T)+b)*(1-sigmoid(numpy.dot(feature,w.T)+b))
#         dw=temp.T@feature
#         db=numpy.sum(temp,axis=0)
#         w=w-eta*dw
#         b=b-eta*db
#     h=sigmoid(numpy.dot(feature,w.T)+b)
#     weaklearner["w"]=w
#     weaklearner["b"]=b
#     return h,weaklearner
# epoch=1000#每个弱分类器的迭代次数,每个弱神经网络要得到充分训练
# weaklearners_num=2000#弱分类器的个数
# eta=0.1#学习率  学习率的设置非常重要
# aggregate_predict=numpy.empty((weaklearners_num,m,c))
# aggregate_beta=numpy.empty(weaklearners_num)
# weaklearners=[]
# w = numpy.ones((c, n))
# b = numpy.ones(c)
# for i in range(weaklearners_num):
#     D=weight.sum(axis=1,where=(weight!=1))/numpy.sum(weight.sum(axis=1,where=(weight!=1)))
#     p=weight.copy()
#     s=numpy.sum(p,axis=1,where=(p!=1))
#     p[p == 1] = s
#     p=p/s.reshape(-1,1)
#     h,weaklearner=get_weaklearn(feature, label, p, 1000*D, w.copy(),b.copy(), eta, epoch)
#     aggregate_predict[i]=h
#     #计算错误率
#     h[p==1]=-h[p==1]
#     Error=1/2*numpy.dot(D,(1+numpy.sum(p*h,axis=1)))
#     beta=Error/(1-Error)
#     #更新权重
#     tmp=1 - h
#     tmp=tmp-h[p==1].reshape(-1,1)
#     tmp[weight==1]=0
#     tmp=0.5*tmp
#     weight=weight*beta**tmp
#     aggregate_beta[i]=numpy.log(1/beta)
#     weaklearner["confidence"]=aggregate_beta[i]
#     #计算综合结果
#     f_res = numpy.empty((c, m))
#     for k in range(c):
#         f_res[k, :] = numpy.sum((aggregate_predict[0:i + 1, :,k]) * aggregate_beta[0:i + 1].reshape(-1, 1), axis=0)
#     accuracy = numpy.sum(numpy.argmax(f_res, axis=0) == data[:,-1]) / m
#     print(accuracy,Error)
#     weaklearners.append(weaklearner)
n=len(data)
def regression(w,b,epoch,eta,p):
    weaklearner={}
    for i in range(epoch):
        #temp=p*(sigmoid(numpy.dot(data[:,0:-1],w)+b)-data[:,-1])
        temp = 2*p*(sigmoid(numpy.dot(data[:,0:-1], w) + b) - data[:,-1]) * sigmoid(numpy.dot(data[:,0:-1], w) + b) * \
               (1 - sigmoid(numpy.dot(data[:,0:-1], w) + b))
        dw=numpy.dot(temp,data[:,0:-1])
        db=numpy.sum(temp)
        w = w - eta * dw
        b = b - eta * db
        # res = sigmoid(numpy.dot(data[:,0:-1], w) + b)
        # res = numpy.where(res < 0.5, 0, 1)
        # percent = numpy.sum(res == data[:,-1]) / n
    weaklearner["w"] = w
    weaklearner["b"] = b
    h = sigmoid(numpy.dot(data[:,0:-1], w.T) + b)
    return h,weaklearner
w=numpy.random.randn(2)
b=numpy.random.randn(1)
weaklearners_num=2000#弱分类器的个数
epoch=2000#弱分类器训练次数
eta=0.1
#w,b=regression(w,b,1000,0.0001)
m,n=data[:,0:-1].shape
weight=numpy.ones((m))/m#定义初始权重
aggregate_predict=numpy.empty((weaklearners_num,m))
aggregate_beta=numpy.empty(weaklearners_num)
weak_learners=[]
for i in range(weaklearners_num):
    p=weight/numpy.sum(weight)#权重转化为数据分布
    h,weaklearner=regression(w, b,epoch,eta,p*1000)
    #计算beta
    Error=numpy.dot(p,numpy.abs(h-data[:,-1]))
    aggregate_predict[i,:]=h#记录每个弱学习器的预测结果
    beta=Error/(1-Error)
    aggregate_beta[i]=numpy.log(1/beta)
    #在beta和预测值的基础上计算新的weight
    weight=weight*beta**(1-numpy.abs(h-data[:,-1]))#注意权重更新时的不同
    #计算综合结果
    res=numpy.where(numpy.sum(aggregate_predict[0:i+1,:]*aggregate_beta[0:i+1].reshape(-1,1),axis=0)>=(0.5*numpy.sum(aggregate_beta[0:i+1])),1,0)
    weak_learners.append(weaklearner)
    print(numpy.sum(res==data[:,-1])/m,Error)