#pragma once

#include <iostream>

#include "tensor.h"

template<typename T, typename Activation>
class LinearLayer {
private:
	Tensor<T> weight;
	Tensor<T> bias;
	int in_dim;
	int out_dim;

public:
	LinearLayer(int in_dim, int out_dim) : in_dim(in_dim), out_dim(out_dim) {
		weight = Tensor<T>({ in_dim, out_dim });
		weight.random(-1. / sqrt(in_dim), 1. / sqrt(in_dim));
		bias = Tensor<T>::zeros({ out_dim });
	}

	Tensor<T> forward(const Tensor<T>& X) {
		Tensor<T> pre_activation = X.matmul(weight) + bias;  // 先计算线性部分
		// 关键修改：用张量的apply方法，对每个元素应用激活函数
		return pre_activation.apply([](T x) { return Activation::apply(x); });
	}

	Tensor<T>& get_weight() {
		return weight;
	}

	Tensor<T>& get_bias() {
		return bias;
	}

	const Tensor<T>& get_weight() const {
		return weight; 
	}
	const Tensor<T>& get_bias() const { 
		return bias; 
	} 
};