import math
import numpy as np

# 异或问题的四个点
x = np.array([[0,0], [0,1], [1,0], [1,1]])
y = np.array([[0], [1], [1], [0]])

hidden_num = 10 # 隐层神经元数量
p = np.random.rand(hidden_num, 1) # 初始化高斯径向基函数的值
beta = np.random.rand(hidden_num, 1) # 初始化待学习参数beta
c = np.random.rand(hidden_num, 2) # 初始化数据中心c

output = np.random.rand(1, 1) # 初始化输出层的输出
weight = np.random.rand(hidden_num, 1) # 初始化隐层到输出层的权值

old_loss = 0.0 # 上次迭代的误差
step = 0
stable_time = 0 # 误差保持不变的迭代次数
learning_rate = 0.5
while 1:
    step += 1
    cur_loss = 0.0 # 当前样本的误差
    # 参数的调整值
    dw = np.zeros([hidden_num, 1])
    db = np.zeros([hidden_num, 1])

    for i in range(len(x)):
        for j in range(hidden_num):
            # 计算每个神经元径向基函数的值
            p[j, 0] = math.exp(
                -beta[j, 0] * sum((x[i, :] - c[j, :]) * (x[i, :] - c[j, :]))
            ) # 公式(5.19)

        # 计算输出层的值
        output[0, 0] = np.dot(p.T, weight) # 公式(5.18)

        for j in range(hidden_num):
            # 计算参数的调整值
            dw[j, 0] += (output[0, 0] - y[i, 0]) * p[j, 0]
            db[j, 0] -= (output[0, 0] - y[i, 0]) * weight[j, 0] * sum((x[i, :] - c[j, :]) * (x[i, :] - c[j, :])) * p[j, 0]

        cur_loss += (output[0, 0] - y[i]) * (output[0, 0] - y[i])

    cur_loss = cur_loss / len(x)

    # 参数调整
    beta -= learning_rate * db
    weight -= learning_rate * dw

    if abs(cur_loss - old_loss) < 0.0001:
        stable_time += 1
        if stable_time == 10:
            # 误差在10轮迭代中保持不变
            break
    else:
        old_loss = cur_loss
        stable_time = 0

# 对训练集进行测试
predict = []
for i in range(len(x)):
    for j in range(hidden_num):
        p[j, 0] = math.exp(-beta[j, 0] * sum((x[i, :] - c[j, :]) * (x[i, :] - c[j, :])))
    
    result = np.dot(p.T, weight)
    predict.append(result)

print('predict\t\t\tlabels')
for i in range(len(predict)):
    print(predict[i][0], '\t\t', y[i, 0], '\t', x[i])
