#include <pybind11/pybind11.h>
#include <pybind11/numpy.h>
#include <cmath>
#include <cassert>
#include <cstring>
#include <iostream>
#include <memory>

namespace py = pybind11;

class Matrix {
public:
	Matrix(size_t row, size_t col, float *data = nullptr) : row_(row), col_(col), data_(data) {
		if (data_ == nullptr) {
			holder_ = std::make_unique<float[]>(sizeof(float) * row * col);
			data_ = holder_.get();
		}
	}
	float &value(size_t r, size_t c) {
		return data_[r * col_ + c];
	}
	const float &value(size_t r, size_t c) const {
		return const_cast<Matrix *>(this)->value(r, c);
	}

private:
	template <typename Fn>
	Matrix &operator|(Fn fn) {
		for (size_t r = 0; r < row_; r++) {
			for (size_t c = 0; c < col_; c++) {
				value(r, c) = fn(r, c);
			}
		}
		return *this;
	}
	template <typename Fn>
	void operator|(Fn fn) const {
		for (size_t r = 0; r < row_; r++) {
			for (size_t c = 0; c < col_; c++) {
				fn(r, c);
			}
		}
	}
	#define for_each(mat, ...) (mat) | [this, ##__VA_ARGS__](size_t r, size_t c)
	#define for_each_pure(mat, ...) (static_cast<const Matrix &>(mat)) | [this, ##__VA_ARGS__](size_t r, size_t c)

public:
    //
    // Operations that consume the lhs Matrix.
    //
	Matrix exp() && {
		for_each(*this) { return std::exp(this->value(r, c)); };
		return std::move(*this);
	}
	Matrix normalize() && {
		auto sum = std::make_unique<float[]>(row_);
		for_each_pure(*this, &sum) {
			if (c == 0) {
				sum[r] = 0;
			}
			sum[r] += this->value(r, c);
		};
		for_each(*this, &sum) { return this->value(r, c) / sum[r]; };
		return std::move(*this);
	}
	Matrix operator*(float f32) && {
		for_each(*this, f32) { return this->value(r, c) * f32; };
		return std::move(*this);
	}
	Matrix operator-(const Matrix &rhs) && {
		for_each(*this, &rhs) { return this->value(r, c) - rhs.value(r, c); };
		return std::move(*this);
	}

    //
    // Operations that work inplace.
    //
	Matrix &operator-=(const Matrix &rhs) {
        for_each(*this, &rhs) { return this->value(r, c) - rhs.value(r, c); };
        return *this;
	}

    //
    // Operations that create new Matrix.
    //
	Matrix transpose() const {
		Matrix mat(col_, row_);
		for_each(mat) { return this->value(c, r); };
		return mat;
	}
	Matrix operator*(const Matrix &rhs) const {
		Matrix mat(row_, rhs.col_);
		for_each(mat, &rhs) {
			float dot = 0;
			for (size_t k = 0; k < this->col_; k++) {
				dot += this->value(r, k) * rhs.value(k, c);
			}
			return dot;
		};
		return mat;
	}
	static Matrix row_permutation(size_t row, size_t col, const uint8_t *labels) {
		Matrix mat(row, col);
		for (size_t r = 0; r < row; r++) {
			mat.value(r, labels[r]) = 1;
		}
		return mat;
	}
private:
	const size_t row_;
	const size_t col_;
	float *data_;
	std::unique_ptr<float[]> holder_;
};

void softmax_regression_epoch_cpp(const float *X, const unsigned char *y,
								  float *theta, size_t m, size_t n, size_t k,
								  float lr, size_t batch)
{
    /**
     * A C++ version of the softmax regression epoch code.  This should run a
     * single epoch over the data defined by X and y (and sizes m,n,k), and
     * modify theta in place.  Your function will probably want to allocate
     * (and then delete) some helper arrays to store the logits and gradients.
     *
     * Args:
     *     X (const float *): pointer to X data, of size m*n, stored in row
     *          major (C) format
     *     y (const unsigned char *): pointer to y data, of size m
     *     theta (float *): pointer to theta data, of size n*k, stored in row
     *          major (C) format
     *     m (size_t): number of examples
     *     n (size_t): input dimension
     *     k (size_t): number of classes
     *     lr (float): learning rate / SGD step size
     *     batch (int): SGD minibatch size
     *
     * Returns:
     *     (None)
     */

    /// BEGIN YOUR CODE
	auto th = Matrix(n, k, theta);
	for (size_t off = 0; off < m; off += batch) {
		const size_t step = std::min(batch, m - off);
		const auto xn = Matrix(step, n, const_cast<float *>(&X[off * n]));
		const auto rpyn = Matrix::row_permutation(step, k, &y[off * 1]);

		th -= xn.transpose() * ((xn * th).exp().normalize() - rpyn) * (lr / step);
	}
    /// END YOUR CODE
}


/**
 * This is the pybind11 code that wraps the function above.  It's only role is
 * wrap the function above in a Python module, and you do not need to make any
 * edits to the code
 */
PYBIND11_MODULE(simple_ml_ext, m) {
    m.def("softmax_regression_epoch_cpp",
    	[](py::array_t<float, py::array::c_style> X,
           py::array_t<unsigned char, py::array::c_style> y,
           py::array_t<float, py::array::c_style> theta,
           float lr,
           int batch) {
        softmax_regression_epoch_cpp(
        	static_cast<const float*>(X.request().ptr),
            static_cast<const unsigned char*>(y.request().ptr),
            static_cast<float*>(theta.request().ptr),
            X.request().shape[0],
            X.request().shape[1],
            theta.request().shape[1],
            lr,
            batch
           );
    },
    py::arg("X"), py::arg("y"), py::arg("theta"),
    py::arg("lr"), py::arg("batch"));
}
