import numpy as np
import matplotlib.pyplot as plt


# 定义激活函数
def sigmoid(x):
    return 1 / (1 + np.exp(-x))


def relu(x):
    return np.maximum(0, x)


def linear(x):
    return x


# 定义激活函数的导数
def sigmoid_derivative(x):
    return x * (1 - x)


def relu_derivative(x):
    return np.where(x > 0, 1, 0)


def linear_derivative(x):
    return np.ones_like(x)


# 定义激活函数字典，便于查找
ACTIVATION_FUNCTIONS = {
    'sigmoid': (sigmoid, sigmoid_derivative),
    'relu': (relu, relu_derivative),
    'linear': (linear, linear_derivative),
}


class LinearNetwork:
    def __init__(self, input_size, hidden_size, output_size, activation='relu'):
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.output_size = output_size
        self.weights1 = np.random.randn(input_size, hidden_size)
        self.bias1 = np.zeros(hidden_size)
        self.weights2 = np.random.randn(hidden_size, output_size)
        self.bias2 = np.zeros(output_size)

        # 根据指定的激活函数类型获取对应的激活函数和其导数
        activation_func, activation_derivative_func = ACTIVATION_FUNCTIONS[activation]
        self.activation = activation_func
        self.activation_derivative = activation_derivative_func

    def forward(self, X):
        self.layer1_output = self.activation(np.dot(X, self.weights1) + self.bias1)
        self.output = self.activation(np.dot(self.layer1_output, self.weights2) + self.bias2)
        return self.output

    def backward(self, X, y, learning_rate=0.01):
        m = len(y)
        delta2 = (self.output - y) / m
        dw2 = np.dot(self.layer1_output.T, delta2)
        db2 = np.sum(delta2, axis=0)
        delta1 = np.dot(delta2, self.weights2.T) * self.activation_derivative(self.layer1_output)
        dw1 = np.dot(X.T, delta1)
        db1 = np.sum(delta1, axis=0)

        self.weights2 -= learning_rate * dw2
        self.bias2 -= learning_rate * db2
        self.weights1 -= learning_rate * dw1
        self.bias1 -= learning_rate * db1

    def fit(self, X, y, epochs=100, batch_size=32):
        for _ in range(epochs):
            for i in range(0, len(X), batch_size):
                X_batch = X[i:i + batch_size]
                y_batch = y[i:i + batch_size]
                self.forward(X_batch)
                self.backward(X_batch, y_batch)

    def loss(self, y_true, y_pred):
        return np.mean((y_true - y_pred) ** 2)

    def predict(self, X):
        return self.forward(X)

# 构建数据集
# 3. 构建数据集
def generate_data(n_samples, noise_std=1):
    x = np.linspace(-5, 5, n_samples).reshape(-1, 1)
    y = x**2 + np.random.normal(scale=noise_std, size=(n_samples, 1))
    return x, y

X, y = generate_data(100)

# 训练模型
model = LinearNetwork(input_size=1, hidden_size=10, output_size=1, activation='relu')
model.fit(X, y, epochs=1000, batch_size=32)

# 计算损失
loss = model.loss(y, model.predict(X))

# 可视化结果
plt.scatter(X, y, label='Data')
plt.plot(X, model.predict(X), label='Predicted', color='r')
plt.title(f'Loss: {loss:.3f}')
plt.legend()
plt.show()