import numpy as np
import matplotlib.pyplot as plt

# 输入
X = np.array([
    [1,2,3],
    [4,5,6],
    [7,8,9]
])

# 标签
T = np.array([
    [10,10],
    [10,10]
])

# 初始化卷积核
W = np.array([
    [1.0, -1.0],
    [0.0, 2.0]
])

# 学习率
eta = 0.001

# 卷积操作
def conv2d(input, kernel):
    h, w = input.shape
    kh, kw = kernel.shape
    out = np.zeros((h - kh + 1, w - kw + 1))
    for i in range(out.shape[0]):
        for j in range(out.shape[1]):
            region = input[i:i+kh, j:j+kw]
            out[i,j] = np.sum(region * kernel)
    return out

# MSE损失
def mse(y, t):
    return np.mean((y - t)**2)

# MSE对输出梯度
def mse_grad(y, t):
    N = y.size
    return 2*(y - t)/N

# 记录loss
loss_history = []

# 训练7轮
for epoch in range(1, 1000):
    if epoch % 100 == 0:
        eta *= 0.8
    print(f"\n=== 第{epoch}轮 ===")
    # 前向
    Y = conv2d(X, W)
    print("前向输出 Y:\n", Y)
    
    # 损失
    L = mse(Y, T)
    print("损失 L:", L)
    loss_history.append(L)
    
    # 预测误差
    prediction_error = Y - T
    print("预测误差 (Y - T):\n", prediction_error)
    
    # 输出梯度
    G = mse_grad(Y, T)
    print("输出梯度 G:\n", G)
    
    # 计算卷积核梯度
    grad_W = np.zeros_like(W)
    for m in range(W.shape[0]):
        for n in range(W.shape[1]):
            s = 0
            for i in range(G.shape[0]):
                for j in range(G.shape[1]):
                    s += G[i,j] * X[i+m,j+n]
            grad_W[m,n] = s
    print("卷积核梯度 grad_W:\n", grad_W)
    
    # 更新
    W -= eta * grad_W
    print("更新后的卷积核 W:\n", W)

# 绘图
plt.plot(range(1,1000), loss_history, marker='o')
plt.xlabel("Epoch")
plt.ylabel("MSE Loss")
plt.title("Loss Curve over Epochs")
plt.grid()
plt.show()
