import numpy as np


# 定义神经网络的激活函数（这里使用Sigmoid函数）
def sigmoid(x):
    return 1 / (1 + np.exp(-x))

    
# 定义神经网络的输入数据
X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])

# 定义神经网络的标签（这里是一个简单的逻辑门任务，AND门）
y = np.array([[0], [0], [0], [1]])

# 初始化神经网络的权重和偏置
np.random.seed(0)
weights = 2 * np.random.random((2, 1)) - 1
bias = np.random.random()

# 定义训练轮数和学习率
epochs = 10000
learning_rate = 0.1

# 训练神经网络
for epoch in range(epochs):
    # 前向传播
    input_layer = X
    output_layer = sigmoid(np.dot(input_layer, weights) + bias)

    # 计算损失
    loss = y - output_layer

    # 反向传播
    d_weights = np.dot(input_layer.T, loss * (output_layer * (1 - output_layer)))
    d_bias = np.sum(loss * (output_layer * (1 - output_layer)))

    # 更新权重和偏置
    weights += learning_rate * d_weights
    bias += learning_rate * d_bias

# 对新数据进行预测
new_data = np.array([[1, 1]])
prediction = sigmoid(np.dot(new_data, weights) + bias)
print(f"预测结果: {prediction}")
