from sklearn.neighbors import KDTree
import numpy
import matplotlib.pyplot as plt
from sklearn.datasets import make_moons

# 生成月形数据
data, y = make_moons(n_samples=500, noise=0.05, random_state=42)
# 可视化数据
plt.scatter(data[:, 0], data[:, 1])
plt.title("Moon-shaped Dataset")
plt.xlabel("Feature 1")
plt.ylabel("Feature 2")
plt.show()
def DBSCAN(data,min_samples,r):
    tree=KDTree(data)
    n_neighbors=tree.query_radius(data,r)
    is_core=numpy.array([len(n_neighbors[i])>=min_samples for i in range(len(n_neighbors))])
    label_num = 0  # 用来记录簇的编号
    label=-1*numpy.ones(data.shape[0],dtype=int)#算法执行完毕后,还是-1的则是异常点
    stack = []  # 深度优先遍历时用的栈
    for i in range(label.shape[0]):
        if label[i] != -1 or not is_core[i]:#如果已访问过或者不是核心对象则跳过
            continue
        idx = i
        while True:#栈不空循环
            if label[idx] == -1:#如果未访问过则赋予标记
                label[idx] = label_num
            if is_core[idx]:#如果是核心对象则将所有未访问的密度直达点加入栈，然后依次处理每个点
                neighb = n_neighbors[idx]
                for j in range(neighb.shape[0]):
                    v = neighb[j]
                    if label[v] == -1 and v not in stack:#未访问的点加入栈
                        #防止密度直达点的重复
                        stack.append(v)
            if len(stack) == 0:  #
                break
            idx = stack.pop()
        label_num += 1
    return label
old_label=DBSCAN(data,4,0.2)
plt.scatter(data[:, 0], data[:, 1],c=old_label)
plt.title("Moon-shaped Dataset")
plt.xlabel("Feature 1")
plt.ylabel("Feature 2")
plt.show()
c=2#类别数
m,n=data.shape
label=numpy.zeros((m,c))
label[list(range(m)),old_label]=1
D=numpy.ones(m)/m
weight=numpy.zeros((m,c))
weight[list(range(m)),numpy.array(old_label,dtype=int)]=1
weight[weight!=1]=D[0]/(c-1)
def sigmoid(x):
    return 1/(1+numpy.exp(-x))
def get_weaklearn(feature,label,p,D,w,b,eta,epoch):
    weaklearner={}
    for i in range(epoch):
        temp=2*p*D.reshape(-1,1)*(sigmoid(numpy.dot(feature,w.T)+b)-label)*sigmoid(numpy.dot(feature,w.T)+b)*(1-sigmoid(numpy.dot(feature,w.T)+b))
        dw=temp.T@feature
        db=numpy.sum(temp,axis=0)
        w=w-eta*dw
        b=b-eta*db
    h=sigmoid(numpy.dot(feature,w.T)+b)
    weaklearner["w"]=w
    weaklearner["b"]=b
    return h,weaklearner
epoch=1000#每个弱分类器的迭代次数0
weaklearners_num=200#弱分类器的个数
eta=0.01#学习率  学习率的设置非常重要
aggregate_predict=numpy.empty((weaklearners_num,m,c))
aggregate_beta=numpy.empty(weaklearners_num)
weaklearners=[]
w = numpy.random.rand(c, n)
b = numpy.random.rand(c)
for i in range(weaklearners_num):
    D=weight.sum(axis=1,where=(weight!=1))/numpy.sum(weight.sum(axis=1,where=(weight!=1)))
    p=weight.copy()
    s=numpy.sum(p,axis=1,where=(p!=1))
    p[label == 1] = s
    p=p/s.reshape(-1,1)
    h,weaklearner=get_weaklearn(data, label, p, m*D, w.copy(),b.copy(), eta, epoch)
    aggregate_predict[i]=h
    #计算错误率
    h[label==1]=-h[label==1]
    Error=1/2*numpy.dot(D,(1+numpy.sum(p*h,axis=1)))
    beta=Error/(1-Error)
    #更新权重
    tmp=1 - h
    tmp=tmp-h[label==1].reshape(-1,1)
    tmp[weight==1]=0
    tmp=0.5*tmp
    weight=weight*beta**tmp
    aggregate_beta[i]=numpy.log(1/beta)
    weaklearner["confidence"]=aggregate_beta[i]
    #计算综合结果
    f_res = numpy.empty((c, m))
    for k in range(c):
        f_res[k, :] = numpy.sum((aggregate_predict[0:i + 1, :,k]) * aggregate_beta[0:i + 1].reshape(-1, 1), axis=0)
    accuracy = numpy.sum(numpy.argmax(f_res, axis=0) == old_label) / m
    print(accuracy,Error)
    weaklearners.append(weaklearner)

