#ifndef __TENSOR__
#define __TENSOR__

#include <functional>
#include <iomanip> // 
#include <iostream>
#include <numeric>
#include <random>
#include <stdexcept>
#include <vector>
#include <sstream>



#define DEBUG

// Define the ASSERT_THROW macro
#ifndef ASSERT_THROW
#define ASSERT_THROW(cond, msg)                                                \
  if (!(cond)) {                                                               \
    std::ostringstream oss;                                                    \
    oss << "Dim Assertion failed: (" << #cond << "), function "                \
        << __FUNCTION__ << ", file " << __FILE__ << ", line " << __LINE__      \
        << ". " << msg;                                                        \
    throw std::runtime_error(oss.str());                                       \
  }
#endif

namespace DimN {

template <typename _T = float> class Tensor {
private:
  std::string ErrorInfo;
  size_t rows = 0, cols = 0;
    class _node {
    public:
        Tensor* Data;
        std::function<Tensor<_T>(const Tensor<_T>&)> Func;
        _node(Tensor* data, std::function<Tensor<_T>(const Tensor<_T>&)> func)
            : Data(data), Func(func) {}

        _node(const Tensor* data, std::function<Tensor<_T>(const Tensor<_T>&)> func)
            : Data(const_cast<Tensor*>(data)), Func(func) {}
    };
  std::vector<_node> ParentNodes;
 
public:
  _T gradient = (_T)0.01;
  std::vector<_T> data;

  Tensor() : rows(0), cols(0) {}
  //Tensor(_T value) : rows(1), cols(1) { data.resize(1, value);}
  Tensor(size_t r, size_t c) : rows(r), cols(c), data(r * c) { randomize(); }
  Tensor(size_t r, size_t c, _T fill_value) : rows(r), cols(c) {
    data.resize(rows * cols, fill_value);
  }
  Tensor(const Tensor& other) : rows(other.rows), cols(other.cols), data(other.data) {}

  Tensor& operator=(const _T value) {
        rows = cols = 1;
        data.resize(1, value);
        return *this;
  }
  // rows, columns
  static Tensor<_T> full(size_t rows, size_t cols, _T fill_value) {
    return Tensor<_T>(rows, cols, fill_value);
  }

  static Tensor<_T> zeros(size_t rows, size_t cols = 1) {
    return Tensor<_T>(rows, cols, _T(0)); // Assuming 0 columns for simplicity
  }

  void fill(_T value) {
    for (auto &row : data) {
      std::fill(row.begin(), row.end(), value);
    }
  }


  // Softmax function
  Tensor softmax(int dim) const {
    if (dim != 1) {
      throw std::invalid_argument(
          "Softmax currently only supports dimension 1.");
    }
    Tensor result(rows, cols);
    for (size_t i = 0; i < rows; ++i) {
      _T max_elem = *std::max_element(data.begin() + i * cols,
                                      data.begin() + (i + 1) * cols);
      _T sum = 0;
      for (size_t j = 0; j < cols; ++j) {
        result(i, j) =
            exp(data[i * cols + j] -
                max_elem); // Subtract max_elem for numerical stability
        sum += result(i, j);
      }
      for (size_t j = 0; j < cols; ++j) {
        result(i, j) /= sum;
      }
    }
    return result;
  }

  // Log softmax function
  Tensor log_softmax(int dim) const {
    Tensor softmax_result = softmax(dim);
    for (size_t i = 0; i < softmax_result.data.size(); ++i) {
      softmax_result.data[i] = log(softmax_result.data[i]);
    }
    return softmax_result;
  }

  Tensor<_T> sum() const {
    _T total = std::accumulate(data.begin(), data.end(), _T(0));
    return Tensor<_T>(1, 1, total);
  }

  bool is_column_vector() const { return cols == 1; }
  bool is_empty() { return data.empty(); }
  size_t dim(int dimension) const { return size(dimension);}
  size_t size(int dimension = -1) const {
    switch(dimension){
      case -1: return (rows*cols);
      case 0: return rows;
      case 1: return cols;
      default:
      throw std::out_of_range("Dimension out of range");
    }
  }

  _T &operator()(size_t i, size_t j) { return data[i * cols + j]; }
  const _T &operator()(size_t i, size_t j) const { return data[i * cols + j]; }
  _T &operator[](size_t index) { return data[index]; }
  const _T &operator[](size_t index) const { return data[index]; }

  // bp start
  //+
    Tensor& operator+(const Tensor& other) {
        ASSERT_THROW(rows == other.rows && cols == other.cols, "Tensor dimensions must agree.");
        auto result = MakeIntermediateTensor(new Tensor(rows,cols));
        for (size_t i = 0; i < data.size(); ++i) {result->data[i] = data[i] + other.data[i];}
        result->ParentNodes.push_back(_node(this, [&](const Tensor<_T>& g) { return g; }));
        result->ParentNodes.push_back(_node(const_cast<Tensor*>(&other), [&](const Tensor<_T>& g) { return g; }));
        return *result;
    }

  //-
    Tensor& operator-(const Tensor& other) {
        ASSERT_THROW(rows == other.rows && cols == other.cols, "Tensor dimensions must agree.");
        auto result = MakeIntermediateTensor(new Tensor(rows,cols));
        for (size_t i = 0; i < data.size(); ++i) { result->data[i] = data[i] - other.data[i];}
        result->ParentNodes.push_back(_node(this, [&](const Tensor<_T>& g) { return g; }));
        result->ParentNodes.push_back(_node(const_cast<Tensor*>(&other), [&](const Tensor<_T>& g) {return g*(-1.0); }));
        return *result;
    }

    //avoid recusrion use
    Tensor mult(const Tensor& other) {
        ASSERT_THROW(cols == other.rows, "Matrix multiply dimensions must agree. rows " + 
                     std::to_string(other.rows) + " cols " + std::to_string(cols));
        Tensor result(rows, other.cols);
        for (size_t i = 0; i < rows; ++i) {
            for (size_t j = 0; j < other.cols; ++j) {
                _T sum = 0;
                for (size_t k = 0; k < cols; ++k) {
                    sum += (*this)(i, k) * other(k, j);
                }
                result(i, j) = sum;
            }
        }
        return result;
    }

    //*
    Tensor& operator*(const Tensor& other) {
        ASSERT_THROW(cols == other.rows, "Matrix multiply dimensions must agree. rows " + 
                     std::to_string(other.rows) + " cols " + std::to_string(cols));
        auto mat = new Tensor(mult(other));
        auto result = MakeIntermediateTensor(mat);

        result->ParentNodes.push_back(_node(this, 
        [&](const Tensor<_T>& g) { return const_cast<Tensor&>(other).mult(g); })
        );

        result->ParentNodes.push_back(_node(&other, 
        [&](const Tensor<_T>& g) { return mult(g); })
        );

        return *result;
    }


  #if 0
    result.parent_node_gradient_functions.append(lambda g: g / other.value)
    result.parent_node_gradient_functions.append(lambda g: -self.value * g / (other.value * other.value))
  #endif

//it not right, need rewrite
#if 0
  Tensor operator/(const Tensor &other) const {
	ASSERT_THROW(((data.size() > 0) && data.at(0) != 0.0), "");
    Tensor result(this->value / other.value);
    // result.parent_nodes.push_back(const_cast<Tensor *>(this));
    // result.parent_nodes.push_back(const_cast<Tensor *>(&other));
    // result.parent_node_gradient_functions.push_back(
    //     [other](_T g) { return other * (1.0/g); });
    // //bug 
    // result.parent_node_gradient_functions.push_back(
    //     [other](_T g) { return (*this) * (g); });
    return result;
  }
  #endif

  // Backward method for automatic differentiation
  void backward(const Tensor<_T> &g = Tensor<_T>(1, 1, (_T)1.0))
  {
    for (size_t i = 0; i < data.size(); ++i)
    {
      gradient += g.data[i];
    }
    for (size_t i = 0; i < ParentNodes.size(); ++i)
    {
      auto grad_tensor = ParentNodes[i].Func(g);
      ParentNodes[i].Data->backward(grad_tensor);
    }
  }

  // Overload the division operator to support division
  Tensor<_T> operator/(const _T &divisor) const
  {
    Tensor<_T> result(rows, cols);
    for (size_t i = 0; i < data.size(); i++)
    {
      result.data[i] = data[i] / divisor;
    }
    return result;
  }

  // Overload the multiplication operator to support
  // multiplication
  Tensor<_T> operator*(const _T &factor) const
  {
    Tensor<_T> result;
    for (const auto &value : data){
      result.data.push_back(value * factor);
    }
    return result;
  }

  //for save node, avoid tensor.backward() can not found
  static std::vector<Tensor*> &getTensorBuff(){
    static std::vector<Tensor*> Data;
    return Data;
  }

  static Tensor* MakeIntermediateTensor(Tensor * data) {
    getTensorBuff().push_back(data);
    return data;
  }
  // bp end



  void print() const
  {
    std::cout << "tensor data:\n";
    for (size_t i = 0; i < rows; ++i)
    {
      for (size_t j = 0; j < cols; ++j)
      {
        std::cout << std::setw(10) << std::setprecision(4) << std::fixed
                  << (*this)(i, j) << " ";
      }
      std::cout << std::endl;
    }
    std::cout << std::endl;
  }


  void randomize() {
    std::random_device rd;
    std::mt19937 gen(rd());
    std::uniform_real_distribution<_T> dist(0.0, 1.0);
    for (auto &element : data) {
      element = dist(gen);
    }
  }

};


typedef DimN::Tensor<float> TensorF;
typedef DimN::Tensor<double> TensorD;

}; // namespace DimN

#endif
