import numpy as np
import matplotlib.pyplot as plt


def conv2d(input, kernel, bias):
    h, w = input.shape
    kh, kw = kernel.shape
    out = np.zeros((h - kh + 1, w - kw + 1))
    for i in range(out.shape[0]):
        for j in range(out.shape[1]):
            out[i, j] = np.sum(input[i:i + kh, j:j + kw] * kernel) + bias
    return out


def mse(y, t):
    return np.mean((y - t) ** 2)


def mse_grad(y, t):
    N = y.size
    return 2 * (y - t) / N


# ======================
# 数据
# ======================
X = np.array([
    [1, 2, 3],
    [4, 5, 6],
    [7, 8, 9]
])

# 标签 2×2
T = np.array([
    [10, 10],
    [10, 10]
])

# ======================
# 权重 + 偏置
# ======================
np.random.seed(42)
W1 = np.random.randn(2, 2) * 0.01
b1 = 0.0
W2 = np.random.randn(1, 1) * 0.01  # 1x1 卷积
b2 = 0.0

# 学习率
eta = 0.002

# ======================
# 训练
# ======================
loss_history = []

for epoch in range(1, 500):
    # layer 1
    Y1 = conv2d(X, W1, b1)  # 3x3 -> 2x2
    # layer 2
    Y2 = conv2d(Y1, W2, b2)  # 2x2 -> 2x2

    # loss
    L = mse(Y2, T)
    loss_history.append(L)

    if epoch % 50 == 0 or epoch <= 5:
        print(f"Epoch {epoch} Loss {L:.6f}")
        print("前向输出 Y2:\n", Y2)

    # backward
    '''求G2梯度'''
    G2 = mse_grad(Y2, T)  # 2x2

    '''
    求W2梯度
     $$
    '''
    grad_W2 = np.zeros_like(W2)
    grad_b2 = np.sum(G2)
    for m in range(W2.shape[0]):
        for n in range(W2.shape[1]):
            s = 0
            for i in range(G2.shape[0]):
                for j in range(G2.shape[1]):
                    if i + m < Y1.shape[0] and j + n < Y1.shape[1]:
                        s += G2[i, j] * Y1[i + m, j + n]
            grad_W2[m, n] = s
    '''求Y1梯度'''
    dY1 = np.zeros_like(Y1)
    for i in range(G2.shape[0]):
        for j in range(G2.shape[1]):
            for m in range(W2.shape[0]):
                for n in range(W2.shape[1]):
                    if i + m < dY1.shape[0] and j + n < dY1.shape[1]:
                        dY1[i + m, j + n] += G2[i, j] * W2[m, n]

    '''求W1梯度'''
    grad_W1 = np.zeros_like(W1)
    grad_b1 = np.sum(dY1)
    for m in range(W1.shape[0]):
        for n in range(W1.shape[1]):
            s = 0
            for i in range(dY1.shape[0]):
                for j in range(dY1.shape[1]):
                    if i + m < X.shape[0] and j + n < X.shape[1]:
                        s += dY1[i, j] * X[i + m, j + n]
            grad_W1[m, n] = s

    # SGD 更新
    W1 -= eta * grad_W1
    b1 -= eta * grad_b1
    W2 -= eta * grad_W2
    b2 -= eta * grad_b2

# ======================
# 绘图
# ======================
plt.plot(loss_history)
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.title("Multi-layer CNN (2x2 output) + Bias + SGD")
plt.grid()
plt.show()

# ======================
# 最终结果
# ======================
final_Y1 = conv2d(X, W1, b1)
final_Y2 = conv2d(final_Y1, W2, b2)
print("✅ 训练完成前的输出 W1:\n", W1)
print("✅ 训练完成前的输出 B1:\n", b1)
print("✅ 训练完成前的输出 W2:\n", W2)
print("✅ 训练完成前的输出 B2:\n", b2)
print("✅ 最终训练完成后的输出 Y1:\n", final_Y1)
print("✅ 最终训练完成后的输出 Y2:\n", final_Y2)

# 参数可视化
w1_flat = W1.flatten()
w2_flat = W2.flatten()

plt.figure()
plt.plot(range(len(w1_flat)), w1_flat, marker='o', label="W1 weights")
plt.plot(range(len(w1_flat), len(w1_flat)+1), [b1], marker='x', markersize=10, label="b1")

plt.plot(range(len(w1_flat)+1, len(w1_flat)+1+len(w2_flat)), w2_flat, marker='s', label="W2 weights")
plt.plot(range(len(w1_flat)+1+len(w2_flat), len(w1_flat)+2+len(w2_flat)), [b2], marker='x', markersize=10, label="b2")

plt.xlabel("Parameter Index")
plt.ylabel("Parameter Value")
plt.title("Trained Parameter Linear Plot")
plt.legend()
plt.grid()
plt.show()
