import numpy as np
import matplotlib.pyplot as plt

# 输入
X = np.array([
    [1,2,3],
    [4,5,6],
    [7,8,9]
])

# 标签
T = np.array([
    [10,10],
    [10,10]
])

# 初始化卷积核 + bias
W = np.array([
    [1.0, -1.0],
    [0.0, 2.0]
])
b = 0.0

eta = 0.001

# 卷积
def conv2d(input, kernel, bias):
    h, w = input.shape
    kh, kw = kernel.shape
    out = np.zeros((h - kh + 1, w - kw + 1))
    for i in range(out.shape[0]):
        for j in range(out.shape[1]):
            region = input[i:i+kh, j:j+kw]
            out[i,j] = np.sum(region * kernel) + bias
    return out

def mse(y, t):
    return np.mean((y - t)**2)

def mse_grad(y, t):
    N = y.size
    return 2*(y - t)/N

loss_history = []

for epoch in range(1, 501):
    # forward
    Y = conv2d(X, W, b)
    
    # loss
    L = mse(Y, T)
    loss_history.append(L)
    
    if epoch % 50 == 0 or epoch <= 5:
        print(f"Epoch {epoch} Loss {L:.4f}")
    
    # backward
    G = mse_grad(Y, T)
    
    grad_W = np.zeros_like(W)
    for m in range(W.shape[0]):
        for n in range(W.shape[1]):
            s = 0
            for i in range(G.shape[0]):
                for j in range(G.shape[1]):
                    s += G[i,j] * X[i+m,j+n]
            grad_W[m,n] = s
    
    grad_b = np.sum(G)
    
    # update
    W -= eta * grad_W
    b -= eta * grad_b

plt.plot(loss_history)
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.title("Loss with Bias + SGD")
plt.grid()
plt.show()
