#ifndef __LOSS__
#define __LOSS__

#include "Module.hpp"
#include "../_reduction.hpp"
#include "../_functional.hpp"

namespace DimN {


#if 0
	static std::vector<std::string> __all__ = {
		"L1Loss", "NLLLoss", "NLLLoss2d", "PoissonNLLLoss", "GaussianNLLLoss", "KLDivLoss",
		"MSELoss", "BCELoss", "BCEWithLogitsLoss", "HingeEmbeddingLoss", "MultiLabelMarginLoss",
		"SmoothL1Loss", "HuberLoss", "SoftMarginLoss", "CrossEntropyLoss", "MultiLabelSoftMarginLoss",
		"CosineEmbeddingLoss", "MarginRankingLoss", "MultiMarginLoss", "TripletMarginLoss",
		"TripletMarginWithDistanceLoss", "CTCLoss"
	};
#endif


	template<typename _T = float>
	class _Loss : public Module<_T> {
	protected:
		std::string reduction;

	public:
		_Loss(const std::optional<bool>& size_average = std::nullopt,
			const std::optional<bool>& reduce = std::nullopt,
			const std::string& reduction = "mean")
		{
			if (size_average.has_value() || reduce.has_value()) {
				this->reduction = DimN::_Reduction::legacy_get_string(size_average, reduce);
			}
			else {
				this->reduction = reduction;
			}
		}

		// Ensure there's a virtual destructor if _Loss is intended to be a base class
		virtual ~_Loss() = default;

	}; //end class _Loss



	template <typename _T = float>
	class _WeightedLoss : public _Loss<_T> {
	protected:
		std::optional<std::shared_ptr<Tensor<_T>>> weight;

	public:
		_WeightedLoss(const std::optional<std::shared_ptr<Tensor<_T>>>& weight = std::nullopt,
			const std::optional<bool>& size_average = std::nullopt,
			const std::optional<bool>& reduce = std::nullopt,
			const std::string& reduction = "mean")
			: _Loss(size_average, reduce, reduction), weight(weight) {
			// Optionally register buffer if needed
		}

	};

	template <typename T = float>
	class CrossEntropyLoss : public _WeightedLoss<T> {
	private:
		int ignore_index;
		float label_smoothing;

	public:
		explicit CrossEntropyLoss(const std::optional<std::shared_ptr<Tensor<T>>>& weight = std::nullopt,
			const std::optional<bool>& size_average = std::nullopt,
			int ignore_index = -100,
			const std::optional<bool>& reduce = std::nullopt,
			const std::string& reduction = "mean",
			float label_smoothing = 0.0)
			: _WeightedLoss<T>(weight, size_average, reduce, reduction),
			ignore_index(ignore_index),
			label_smoothing(label_smoothing) {}

		Tensor<T> forward(const Tensor<T>& input, const Tensor<T>& target) {
			// Here you would implement the actual computation of the cross entropy loss.
			// This is a placeholder for the actual implementation.
			return input; // Placeholder return
		}
	};


};

#endif