import numpy as np

class MLP:
    def __init__(self, input_dim, hidden_dims=[32, 16], output_dim=1, lr=0.01):
        self.layers = []
        dims = [input_dim] + hidden_dims + [output_dim]
        for i in range(len(dims)-1):
            self.layers.append({
                'W': np.random.randn(dims[i], dims[i+1]) * 0.1,
                'b': np.zeros((1, dims[i+1]))
            })
        self.lr = lr

    def _activation(self, x):
        return np.tanh(x)

    def _activation_deriv(self, x):
        return 1 - np.tanh(x) ** 2

    def forward(self, X):
        out = X
        cache = [X]
        for i, layer in enumerate(self.layers[:-1]):
            out = self._activation(out @ layer['W'] + layer['b'])
            cache.append(out)
        out = out @ self.layers[-1]['W'] + self.layers[-1]['b']
        cache.append(out)
        return out, cache

    def backward(self, X, y, out, cache):
        grads = []
        delta = (out - y)
        for i in reversed(range(len(self.layers))):
            a_prev = cache[i]
            dW = a_prev.T @ delta / X.shape[0]
            db = np.mean(delta, axis=0, keepdims=True)
            grads.insert(0, (dW, db))
            if i > 0:
                delta = (delta @ self.layers[i]['W'].T) * self._activation_deriv(cache[i])
        return grads

    def fit(self, X, y, epochs=200, batch_size=32):
        for epoch in range(epochs):
            idx = np.random.permutation(len(X))
            X, y = X[idx], y[idx]
            for i in range(0, len(X), batch_size):
                Xb, yb = X[i:i+batch_size], y[i:i+batch_size]
                out, cache = self.forward(Xb)
                grads = self.backward(Xb, yb, out, cache)
                for j, layer in enumerate(self.layers):
                    layer['W'] -= self.lr * grads[j][0]
                    layer['b'] -= self.lr * grads[j][1]

    def predict(self, X):
        out, _ = self.forward(X)
        return out 