import numpy as np


class Neural_Network:
    def __init__(self, *, layers: list, alpha: float = 0.01):
        self.layers = layers
        self.alpha = alpha
        self.W = []
        for i in np.arange(0, len(layers) - 2):
            w = np.random.randn(layers[i] + 1, layers[i + 1] + 1)
            self.W.append(w / np.sqrt(layers[i]))
        # 最后两层
        w = np.random.randn(layers[-2] + 1, layers[-1])
        self.W.append(w / np.sqrt(layers[-2]))

    def __repr__(self):
        return f"神经网络：{'-'.join(str(i) for i in self.layers)}"

    def sigmoid(self, *, x: np.ndarray) -> np.ndarray:
        return 1.0 / (1 + np.exp(-x))

    def sigmoid_deriv(self, *, x: np.ndarray) -> np.ndarray:
        return x * (1 - x)

    def fit(self, *, x: np.ndarray, y: np.ndarray, epochs: int = 1000,
            display_update: int = 100):
        x = np.c_[x, np.ones((x.shape[0]))]
        for epoch in np.arange(0, epochs):
            for data, target in zip(x, y):
                self.fit_partial(x=data, y=target)
            if epoch == 0 or (epoch + 1) % display_update == 0:
                loss = self.calculate_loss(x=x, targets=y)
                print(f"[info]:{epoch=} {loss=:.7f}")

    def fit_partial(self, *, x: np.ndarray, y: np.ndarray):
        A = [np.atleast_2d(x)]  # 该列表负责存储每个层的输出激活
        for layer in np.arange(0, len(self.W)):
            net = A[layer].dot(self.W[layer])
            out = self.sigmoid(x=net)
            A.append(out)
        error = A[-1] - y
        D = [(error * self.sigmoid_deriv(x=A[-1]))]
        for layer in np.arange(len(A) - 2, 0, -1):
            delta = D[-1].dot(self.W[layer].T)
            delta = delta * self.sigmoid_deriv(x=A[layer])
            D.append(delta)
        D = D[::-1]
        for layer in np.arange(0, len(self.W)):
            self.W[layer] += -self.alpha * A[layer].T.dot(D[layer])

    def predict(self, *, x: np.ndarray, addBias: bool = True) -> np.ndarray:
        p = np.atleast_2d(x)
        if addBias:
            p = np.c_[p, np.ones((p.shape[0]))]
        for layer in np.arange(0, len(self.W)):
            p = self.sigmoid(x=np.dot(p, self.W[layer]))
        return p

    def calculate_loss(self, *, x: np.ndarray, targets: np.ndarray) -> float:
        targets = np.atleast_2d(targets)
        predictions = self.predict(x=x, addBias=False)
        loss = 0.5 * np.sum((predictions - targets) ** 2)
        return loss


def mnist_for_NN():
    from sklearn.metrics import classification_report
    from sklearn.preprocessing import LabelBinarizer
    from sklearn.model_selection import train_test_split
    from sklearn import datasets

    print("[INFO] 加载 MNIST (sample) 数据集...")
    digits = datasets.load_digits()
    data = digits.data.astype("float")
    data = (data - data.min()) / (data.max() - data.min())
    print("[INFO] samples: {}, dim: {}".format(data.shape[0], data.shape[1]))
    train_x, test_x, train_y, test_y = train_test_split(data, digits.target, test_size=0.25)
    test_y = LabelBinarizer().fit_transform(test_y)
    train_y = LabelBinarizer().fit_transform(train_y)

    nn = Neural_Network(layers=[train_x.shape[1], 32, 16, 10])
    nn.fit(x=train_x, y=train_y)
    print("[INFO] 评估网络中...")
    preds = nn.predict(x=test_x)
    predictions = preds.argmax(axis=1)
    print(classification_report(test_y.argmax(axis=1), predictions))


if __name__ == '__main__':
    nn = Neural_Network(layers=[2, 2, 1], alpha=0.5)
    X = np.array([[0,0], [0,1], [1,0], [1,1]])
    y = np.array([[0], [1], [1], [0]])
    nn.fit(x=X, y=y, epochs=20000)
    for (x, target) in zip(X, y):
        pred = nn.predict(x=x)[0][0]
        step = 1 if pred > 0.5 else 0
        print(f"[INFO] data={x}, ground-truth={target[0]}, {pred=:.4f}, {step=}")
    # mnist_for_NN()
