﻿import torch
import torch.nn as nn
import matplotlib.pyplot as plt
import numpy as np

# 1. 输入数据和对应的标签

X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y = np.array([[0], [1], [1], [0]])


# 2. 激活函数及其导数
def sigmoid(x):
    return 1 / (1 + np.exp(-x))


def sigmoid_derivative(fx):
    return fx * (1 - fx)


# 3. 初始化参数

# 设置随机种子
np.random.seed(42)

# 初始化权重和偏置
input_layer_neurons = 2
hidden_layer_neurons = 2
output_neurons = 1

# 权重初始化
W_hidden = np.random.uniform(size=(input_layer_neurons, hidden_layer_neurons))
b_hidden = np.random.uniform(size=(1, hidden_layer_neurons))
W_output = np.random.uniform(size=(hidden_layer_neurons, output_neurons))
b_output = np.random.uniform(size=(1, output_neurons))


# 4. 前向传播
def forward_propagation(X):
    # 隐藏层输入
    hidden_input = np.dot(X, W_hidden) + b_hidden
    # 隐藏层输出
    hidden_output = sigmoid(hidden_input)

    # 输出层输入
    output_layer_input = np.dot(hidden_output, W_output) + b_output
    # 输出层输出
    predicted_output = sigmoid(output_layer_input)

    return hidden_output, predicted_output

def backward_propagation(X, y, hidden_output, predicted_output, learning_rate):
    global W_output, b_output
    global W_hidden, b_hidden
    
    # 计算输出层误差
    output_error = y - predicted_output
    tmp = sigmoid_derivative(predicted_output)
    output_delta = output_error * tmp
    
    # 计算隐藏层误差
    hidden_error = output_delta.dot(W_output.T)
    hidden_delta = hidden_error * sigmoid_derivative(hidden_output)
    
    # 更新输出层权重和偏置
    W_output += hidden_output.T.dot(output_delta) * learning_rate
    b_output += np.sum(output_delta, axis=0, keepdims=True) * learning_rate
    
    # 更新隐藏层权重和偏置
    W_hidden += X.T.dot(hidden_delta) * learning_rate
    b_hidden += np.sum(hidden_delta, axis=0, keepdims=True) * learning_rate   



# 6. 训练过程

# 设置学习率和训练次数
learning_rate = 0.1
epochs = 10000

# 训练模型
for epoch in range(epochs):
    # 前向传播
    hidden_output, predicted_output = forward_propagation(X)

    # 反向传播
    backward_propagation(X, y, hidden_output, predicted_output, learning_rate)

    # 每隔1000次输出一次损失值
    if epoch % 1000 == 0:
        loss = np.mean((y - predicted_output) ** 2)
        print(f"Epoch {epoch}, Loss: {loss:.4f}")

# 测试模型
print("Predicted outputs after training:")
print(predicted_output.round())

# # huatu
# plt.ion()
# num_epochs = 4000
# for epoch in range(num_epochs):

#     # model.train()

#     # 前向传播
#     outputs = model(X_train_tensor)
#     loss = criterion(outputs, y_train_tensor)

#     # 反向传播和优化
#     optimizer.zero_grad()
#     loss.backward()
#     optimizer.step()

#     if epoch % 10 == 0:
#         plt.cla()
#         plt.scatter(X[y == 0][:, 0], X[y == 0][:, 1], color="red", label="Class 0")
#         plt.scatter(X[y == 1][:, 0], X[y == 1][:, 1], color="blue", label="Class 1")
#         plt.title("Logical Classification Dataset")
#         plt.xlabel("Feature 1")
#         plt.ylabel("Feature 2")
#         plt.legend()

#         # 获取模型的权重和偏置
#         [w, b] = model.linear.parameters()
#         w1, w2 = w[0][0].item(), w[0][1].item()
#         b = b.item()
#         # 计算决策边界
#         x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
#         x2_min, x2_max = -(w1 * x1_min + b) / w2, -(w1 * x1_max + b) / w2
#         # 绘制拟合直线
#         plt.plot([x1_min, x1_max], [x2_min, x2_max], "green", label="Decision Boundary")
#         # mod_x = X_train_tensor.squeeze().data.numpy()
#         # mod_y = -(w1 * mod_x + b) / w2
#         # plt.plot(mod_x, mod_y, "green", label="Decision Boundary")
#         plt.text(
#             1,
#             -2,
#             "epoch=%d,loss=%.4f, k=%.2f, b=%.2f"
#             % (epoch, loss.item(), -(w1 / w2), -(b / w2)),
#         )
#         plt.pause(0.01)

# plt.ioff()
# plt.show()
