#pragma once

#include <iostream>
#include <vector>

#include "activation.h"
#include "linearlayer.h"

template <typename T>
class MLP
{
private:
	LinearLayer<T, ReLU<T>> linear1;
	LinearLayer<T, ReLU<T>> linear2;

public:
	MLP() : linear1(2, 4), linear2(4, 1) {}

	Tensor<T> forward(const Tensor<T>& x) {
		Tensor<T> hidden = linear1.forward(x);  // 第一层输出：[batch,4]
		Tensor<T> output = linear2.forward(hidden);  // 第二层输出：[batch,1]
		return output;
	}

	std::vector<LinearLayer<T, ReLU<T>>&> get_layers() {
		return { linear1, linear2 };
	}

	void print_layer_info() const {
		std::println("Layer 1 (ReLU):");
		std::print("  weight shape: ");
		for (int dim : linear1.get_weight().get_shape()) {
			std::print("{} ", dim);
		}
		std::print("\n bias shape: ");
		for (int dim : linear1.get_bias().get_shape()) {
			std::print("{} ", dim);
		}
		std::println("\n Layer 2 (Sigmod): ");
		std::print("  weight shape: ");
		for (int dim : linear2.get_weight().get_shape()) {
			std::print("{} ", dim);
		}
		std::print("\n bias shape: ");
		for (int dim : linear2.get_bias().get_shape()) {
			std::println("{}", dim);
		}
	}
};

