#ifndef __FUNCTIONAL__
#define __FUNCTIONAL__

//functional is cpp library, so rename as _functional


#include <memory>
#include <vector>
#include <optional>
#include <string>
#include <cmath>  // For exp and log functions
#include "../Tensor.hpp"
#include "_reduction.hpp"



#if 0
// Assuming Tensor is a class that has been defined elsewhere
template <typename T = float>
class Tensor {
public:
	// Tensor implementation details
	Tensor softmax(int dim) const;  // Assuming a softmax method is available
	Tensor log() const;  // Assuming a log method is available
	static Tensor exp(const Tensor& t);  // Assuming a static exp method
};
#endif

namespace DimN {

#if 0
	input data
		tensor([[-0.2853, -0.5133, -1.7535],
			[1.0327, -1.7639, -1.8965],
			[0.0518, -0.5768, 1.7733]] )

		target
		tensor([1, 2, 0])

		crossentropyloss_output:
	tensor(1.9786)
#endif

		// Helper function to compute cross entropy loss
		template <typename T = float>
	static Tensor<T>
		cross_entropy_loss(Tensor<T>& input, Tensor<T>& target,
			Tensor<T>& weight, std::string reduction = "mean",
			int ignore_index = -1, float label_smoothing = 0.0) {
		Tensor<T> log_probs = input.log_softmax(1); 

		if (label_smoothing > 0.0f) {
			T n_classes = input.size(1);
			Tensor<T> smooth_value =
				Tensor<T>::full(1, 1, log(1.0 / n_classes)); // ���� full ������ʵ��
			log_probs = (log_probs * (1 - label_smoothing));
			//	log_probs * (1 - label_smoothing) + smooth_value * label_smoothing;
		}

		Tensor<T> loss = Tensor<T>::zeros(input.size(0)); // ���� zeros ������ʵ��

		for (int i = 0; i < target.size(0); ++i) {
			if (target[i] != ignore_index) {
				//loss[i] = -log_probs[i][target[i]];
				loss[i] = -log_probs(i,target[i]);

			}
		}

		if (!weight.is_empty()) { 
			for (int i = 0; i < target.size(0); ++i) {
				if (target[i] != ignore_index) {
					loss[i] *= weight[target[i]];
				}
			}
		}

		if (reduction == "mean") {
			loss = loss.sum() / static_cast<T>(target.size(0)); 
		}
		else if (reduction == "sum") {
			loss = loss.sum();
		}

		return loss;
	}

	template <typename T = float>
	static Tensor<T>
		cross_entropy(Tensor<T>& input, Tensor<T>& target,
			Tensor<T> weight = Tensor<T>(),
			const bool size_average = false, int ignore_index = -100,
			const bool reduce = false, std::string reduction = "mean",
			float label_smoothing = 0.0f) {
		// Handle deprecated size_average and reduce parameters
		if (size_average || reduce) {
			reduction = DimN::_Reduction::legacy_get_string(size_average, reduce);
		}
		// Compute the cross entropy loss
		return cross_entropy_loss(input, target, weight, reduction, ignore_index,
			label_smoothing);
	}

}; // namespace DimN
#endif