import numpy as np
# 1. 基础函数实现
def linear_layer(x,w,b):
    return np.dot(x,w)+b
def relu(x):
    return np.maximum(0,x)
def flatten(x):
    return x.reshape(x.shape[0],-1)
# 2. 实验数据准备
x = np.array([[-2],[-1],[0],[1],[2]])
w1,b1 = np.array([[2]]),np.array([-1])
w2,b2 = np.array([[-1]]),np.array([0.5])
print("输入数据x:")
print(x)
print("\n网络参数:")
print(f"w1: {w1},b1: {b1}")
print(f"w2: {w2},b2: {b2}")
# 3. 纯线性网络A实验
print("\n=== 纯线性网络A ===")
layer1_a = linear_layer(x,w1,b1)
output_a = linear_layer(layer1_a,w2,b2)
print("第一层输出:",layer1_a.flatten())
print("最终输出A:",output_a.flatten())
# 4. 引入非线性的网络B实验
print("\n=== 引入非线性的网络B ===")
layer1_b = linear_layer(x,w1,b1)
print("ReLU前:",layer1_b.flatten())
relu_output = relu(layer1_b)
print("ReLU后:",relu_output.flatten())
output_b = linear_layer(relu_output,w2,b2)
print("最终输出B:",output_b.flatten())
# 对比结果
print("\n=== 结果对比 ===")
print("网络A输出:",output_a.flatten())
print("网络B输出:",output_b.flatten())
