import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm

# 1. 读取数据
data = np.loadtxt('../data/xor_dataset.csv', delimiter=',', skiprows=1)  # 跳过标题行

# 2. 正确拆分特征和标签
x_all = data[:, :2]  # (N,2)
y_all = data[:, 2:]  # (N,1)

# 3. 打乱并划分训练集 / 测试集
ratio = 0.8
split = int(len(x_all) * ratio)

np.random.seed(0)
idx = np.random.permutation(len(x_all))
x_train = x_all[idx[:split]]
y_train = y_all[idx[:split]]
x_test = x_all[idx[split:]]
y_test = y_all[idx[split:]]


# 基类
class Layer:
    # 前向传播函数，根据输出x计算该层的输出y
    def forward(self, x):
        raise NotImplementedError

    # 反向传播函数，输入上一层回传的梯度grad，输出当前层的梯度
    def backward(self, grad):
        raise NotImplementedError

    # 更新函数，用于更新当前层的参数
    def update(self, learning_rate):
        pass


# 线性层
class Linear(Layer):
    def __init__(self, num_in, num_out, use_bias=True):
        self.num_in = num_in
        self.num_out = num_out
        self.use_bias = use_bias

        # 初始化参数
        self.W = np.random.normal(loc=0, scale=0.1, size=(num_in, num_out))  # 减小初始权重
        if use_bias:
            self.b = np.zeros((1, num_out))

    def forward(self, x):
        # 前向传播 y = Wx + b
        # W (num_in,num_out)
        # b (1,num_out)
        # x (batch_size,num_in)
        # y (batch_size,num_out)
        self.x = x
        self.y = x @ self.W
        if self.use_bias:
            self.y += self.b
        return self.y

    def backward(self, grad):
        # 计算梯度
        # grad = dJ/dy
        # grad_W = grad @ dy/dW = grad @ x
        # grad (batch_size,num_out)
        # x (batch_size,num_in)
        # grad_W (num_in,num_out)
        # grad_b = grad @ dy/db = grad
        batch_size = grad.shape[0]
        self.grad_W = self.x.T @ grad / batch_size
        if self.use_bias:
            self.grad_b = np.sum(grad, axis=0, keepdims=True) / batch_size
        # 传递到前一层的梯度
        grad = grad @ self.W.T
        return grad

    def update(self, learning_rate):
        self.W -= learning_rate * self.grad_W
        if self.use_bias:
            self.b -= learning_rate * self.grad_b


class Identity(Layer):
    def forward(self, x):
        return x

    def backward(self, grad):
        return grad


class Sigmoid(Layer):
    def forward(self, x):
        self.x = x
        self.y = 1 / (1 + np.exp(-x))
        return self.y

    def backward(self, grad):
        return grad * self.y * (1 - self.y)


class Tanh(Layer):
    def forward(self, x):
        self.x = x
        self.y = np.tanh(x)
        return self.y

    def backward(self, grad):
        return grad * (1 - self.y ** 2)


class ReLU(Layer):
    def forward(self, x):
        self.x = x
        self.y = np.maximum(0, x)
        return self.y

    def backward(self, grad):
        return grad * (self.x >= 0)


activation_dict = {
    "identity": Identity,
    "sigmoid": Sigmoid,
    "tanh": Tanh,
    "relu": ReLU,
}


class MLP(Layer):
    def __init__(
            self,
            layer_sizes,
            use_bias=True,
            activation='relu',
            out_activation='identity'
    ):
        self.layers = []
        num_in = layer_sizes[0]
        for num_out in layer_sizes[1:-1]:
            # 添加全连接层
            self.layers.append(Linear(num_in, num_out, use_bias=use_bias))
            self.layers.append(activation_dict[activation]())
            num_in = num_out
        # 最后一层做特殊处理
        self.layers.append(Linear(num_in, layer_sizes[-1], use_bias=use_bias))
        self.layers.append(activation_dict[out_activation]())

    def forward(self, x):
        for layer in self.layers:
            x = layer.forward(x)
        return x

    def backward(self, grad):
        for layer in reversed(self.layers):
            grad = layer.backward(grad)
        return grad

    def update(self, learning_rate):
        for layer in self.layers:
            layer.update(learning_rate)


# 参数调整
num_epochs = 2000  # 增加迭代次数
learning_rate = 0.1
batch_size = 8  # 减小批大小
eps = 1e-7

mlp = MLP(layer_sizes=(2, 4, 1), use_bias=True, out_activation="sigmoid")

losses = []
test_losses = []
test_accs = []
for epoch in tqdm(range(num_epochs)):
    st = 0
    total_loss = 0.0
    while st < len(x_train):
        ed = min(st + batch_size, len(x_train))
        x = x_train[st:ed]
        y = y_train[st:ed]
        st = ed

        # 前向传播
        y_pred = mlp.forward(x)

        # 计算损失和梯度
        loss = -np.sum(y * np.log(y_pred + eps) + (1 - y) * np.log(1 - y_pred + eps))
        total_loss += loss

        # 计算梯度 (dL/dy_pred)
        # L = − [ y log ŷ + (1-y) log(1-ŷ) ]
        # (dL/ŷ) = - [y/ŷ - (1-y)/(1-ŷ)] = (ŷ-y)/ŷ(1-ŷ)
        grad = (y_pred - y) / (y_pred * (1 - y_pred) + eps) / len(y)

        # 反向传播
        mlp.backward(grad)

        # 更新参数
        mlp.update(learning_rate)

    # 记录平均训练损失
    avg_loss = total_loss / len(x_train)
    losses.append(avg_loss)

    # 计算测试集上的损失和准确率
    y_test_pred = mlp.forward(x_test)
    test_loss = -np.mean(y_test * np.log(y_test_pred + eps) + (1 - y_test) * np.log(1 - y_test_pred + eps))
    test_losses.append(test_loss)

    # 计算准确率
    test_acc = np.mean((y_test_pred > 0.5) == y_test)
    test_accs.append(test_acc)

# 绘制训练曲线
plt.figure(figsize=(12, 4))

plt.subplot(1, 2, 1)
plt.plot(losses, label='Train Loss')
plt.plot(test_losses, label='Test Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
plt.title('Loss Curve')

plt.subplot(1, 2, 2)
plt.plot(test_accs, label='Test Accuracy', color='green')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()
plt.title('Accuracy Curve')

plt.tight_layout()
plt.show()


# 可视化决策边界
def plot_decision_boundary(model, X, y):
    # 设置网格范围
    x_min, x_max = X[:, 0].min() - 0.5, X[:, 0].max() + 0.5
    y_min, y_max = X[:, 1].min() - 0.5, X[:, 1].max() + 0.5
    h = 0.01
    xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))

    # 预测整个网格
    Z = model.forward(np.c_[xx.ravel(), yy.ravel()])
    Z = Z.reshape(xx.shape)

    # 绘制决策边界和数据点
    plt.figure(figsize=(8, 6))
    plt.contourf(xx, yy, Z, alpha=0.8, cmap=plt.cm.RdBu)
    plt.scatter(X[:, 0], X[:, 1], c=y.flatten(), edgecolors='k', marker='o', cmap=plt.cm.RdBu)
    plt.colorbar()
    plt.title('Decision Boundary')
    plt.xlabel('Feature 1')
    plt.ylabel('Feature 2')
    plt.show()


# 绘制决策边界
plot_decision_boundary(mlp, x_all, y_all)
