import numpy as np
from sklearn.base import BaseEstimator, ClassifierMixin


class CustomMLP(BaseEstimator, ClassifierMixin):
    def __init__(self, input_shape, hidden_units, learning_rate=0.01, epochs=100, batch_size=32, alpha=0.0001,verbose=True):
        self.input_shape = input_shape
        self.hidden_units = hidden_units
        self.learning_rate = learning_rate
        self.epochs = epochs
        self.batch_size = batch_size
        self.alpha = alpha
        self.verbose = verbose
        self.weights = []
        self.biases = []

    def initialize_weights(self):
        # 使用 He 初始化 (更适合 ReLU)
        self.weights = []
        self.biases = []

        # 输入层到第一个隐藏层
        limit = np.sqrt(2 / self.input_shape)
        self.weights.append(np.random.randn(self.input_shape, self.hidden_units[0]) * limit)
        self.biases.append(np.zeros(self.hidden_units[0]))

        # 隐藏层之间
        for i in range(1, len(self.hidden_units)):
            limit = np.sqrt(2 / self.hidden_units[i - 1])
            self.weights.append(np.random.randn(self.hidden_units[i - 1], self.hidden_units[i]) * limit)
            self.biases.append(np.zeros(self.hidden_units[i]))

        # 最后一层到输出层
        limit = np.sqrt(2 / self.hidden_units[-1])
        self.weights.append(np.random.randn(self.hidden_units[-1], 1) * limit)
        self.biases.append(np.zeros(1))

    def relu(self, z):
        # 内存高效的 ReLU 实现
        return np.maximum(z, 0)

    def relu_derivative(self, z):
        return (z > 0).astype(np.float32)

    def sigmoid(self, z):
        # 数值稳定的 sigmoid 实现
        return np.where(z >= 0,
                        1 / (1 + np.exp(-z)),
                        np.exp(z) / (1 + np.exp(z)))

    def forward(self, X, store_activations=False):
        a = X
        if store_activations:
            self.activations = [a]
            self.z_values = []

        # 隐藏层前向传播
        for i in range(len(self.hidden_units)):
            z = np.dot(a, self.weights[i]) + self.biases[i]
            a = self.relu(z)
            if store_activations:
                self.z_values.append(z)
                self.activations.append(a)

        # 输出层
        z_out = np.dot(a, self.weights[-1]) + self.biases[-1]
        a_out = self.sigmoid(z_out)
        if store_activations:
            self.z_values.append(z_out)
            self.activations.append(a_out)

        return a_out

    def backward(self, X, y, y_pred):
        m = X.shape[0]
        grads_w = [np.zeros_like(w) for w in self.weights]
        grads_b = [np.zeros_like(b) for b in self.biases]

        # 输出层误差
        error = y_pred - y.reshape(-1, 1)
        delta = error

        # 反向传播 (不存储中间结果)
        a_prev = X
        activations = [X]
        z_values = []

        # 前向传播计算中间结果 (不存储所有层)
        for i in range(len(self.hidden_units)):
            z = np.dot(a_prev, self.weights[i]) + self.biases[i]
            a_prev = self.relu(z)
            activations.append(a_prev)
            z_values.append(z)

        # 反向传播
        for i in range(len(self.weights) - 1, -1, -1):
            a_prev = activations[i]

            # 计算梯度
            grads_w[i] = (1 / m) * np.dot(a_prev.T, delta) + (self.alpha / m) * self.weights[i]
            grads_b[i] = (1 / m) * np.sum(delta, axis=0)

            # 如果不是第一层，计算上一层的误差
            if i > 0:
                delta = np.dot(delta, self.weights[i].T) * self.relu_derivative(z_values[i - 1])

        # 梯度裁剪
        max_grad_norm = 1.0
        for j in range(len(grads_w)):
            grad_norm = np.linalg.norm(grads_w[j])
            if grad_norm > max_grad_norm:
                grads_w[j] = grads_w[j] * (max_grad_norm / grad_norm)

        return grads_w, grads_b

    def fit(self, X, y, X_val=None, y_val=None, verbose=True):
        self.initialize_weights()

        m = X.shape[0]
        best_loss = float('inf')
        no_improvement_count = 0
        patience = 1

        for epoch in range(1):
            # 打乱数据
            indices = np.arange(m)
            np.random.shuffle(indices)
            X_shuffled = X[indices]
            y_shuffled = y[indices]

            epoch_loss = 0.0
            batch_count = 0

            # 小批量训练
            for i in range(0, m, self.batch_size):
                start_idx = i
                end_idx = min(i + self.batch_size, m)
                X_batch = X_shuffled[start_idx:end_idx]
                y_batch = y_shuffled[start_idx:end_idx]

                # 前向传播
                y_pred = self.forward(X_batch)

                # 反向传播
                grads_w, grads_b = self.backward(X_batch, y_batch, y_pred)

                # 更新参数
                for j in range(len(self.weights)):
                    self.weights[j] -= self.learning_rate * grads_w[j]
                    self.biases[j] -= self.learning_rate * grads_b[j]

                # 计算损失
                batch_loss = self.compute_loss(y_batch, y_pred)
                epoch_loss += batch_loss
                batch_count += 1

            # 平均损失
            epoch_loss /= batch_count

            if verbose:
                print(f"Epoch {epoch}/1 - Loss: {epoch_loss:.4f}")

            # 不再继续循环，直接返回
            return self

        return self

    def compute_loss(self, y, y_pred):
        m = y.shape[0]
        # 交叉熵损失
        cross_entropy = -np.mean(y * np.log(y_pred + 1e-8) + (1 - y) * np.log(1 - y_pred + 1e-8))
        return cross_entropy

    def compute_accuracy(self, y, y_pred):
        predictions = (y_pred > 0.5).astype(int).flatten()
        return np.mean(predictions == y)

    def predict(self, X):
        # 分批预测以避免内存问题
        predictions = []
        batch_size = 1000
        for i in range(0, X.shape[0], batch_size):
            X_batch = X[i:i + batch_size]
            y_pred_batch = self.forward(X_batch)
            predictions.extend((y_pred_batch > 0.5).astype(int).flatten())
        return np.array(predictions)

    def predict_proba(self, X):
        # 分批预测以避免内存问题
        probas = []
        batch_size = 1000
        for i in range(0, X.shape[0], batch_size):
            X_batch = X[i:i + batch_size]
            y_pred_batch = self.forward(X_batch)
            probas.extend(y_pred_batch.flatten())
        return np.array(probas)

    def set_params(self, **params):
        for param, value in params.items():
            setattr(self, param, value)
        return self
