#include "core/main.h"

// TODO: Stick this stuff into a namespace

inline double dot(const vector<double> &v1, const vector<double> &v2)
{
    assert(v1.size() == v2.size());
    double sum = 0;
    for (UINT i = 0; i < v1.size(); i++) {
        sum += v1[i] * v2[i];
    }
    return sum;
}

inline double norm(const vector<double> &v)
{
    return sqrt(dot(v, v));
}

#ifdef USE_EIGEN
inline vector<double> linearRegressionEigen(const vector<const vector<double> > &xs, const vector<double> &ys)
{
    UINT m = xs.size();
    UINT n = xs[0].size();

    Eigen::MatrixXd X(m, n);
    Eigen::VectorXd Y(m);
    for (UINT i = 0; i < m; i++) {
        Y(i) = ys[i];
        for (UINT j = 0; j < n; j++) {
            X(i, j) = xs[i][j];
        }
    }

    Eigen::VectorXd theta = X.jacobiSvd(Eigen::ComputeThinU | Eigen::ComputeThinV).solve(Y);
    vector<double> ret;
    for (UINT i = 0; i < n; i++) {
        ret.push_back(theta(i));
    }
    return ret;
}
#endif

inline vector<double> linearRegressionMatrix(const vector<const vector<double> > &xs, const vector<double> &ys)
{
    UINT m = xs.size();
    UINT n = xs[0].size();

    DenseMatrix<double> X(m, n);
    for (UINT i = 0; i < m; i++) {
        for (UINT j = 0; j < n; j++) {
            X(i, j) = xs[i][j];
        }
    }

    return DenseMatrix<double>::LinearRegression(X, ys);
}

// Performs linear regression using batch gradient descent
inline vector<double> linearRegressionBatch(const vector<const vector<double> > &xs, const vector<double> &ys, double alpha)
{
    cout << "Entering linear regression..." << endl;
    UINT m = xs.size();
    UINT n = xs[0].size();

    cout << "n = " << n << endl;
    cout << "m = " << m << endl;

    // Check that all x vectors are the same size
    for (UINT i = 0; i < xs.size(); i++) {
        assert(xs[i].size() == n);
    }

    vector<double> theta(n, 0);

    while (1) {
        vector<double> dtheta(n, 0);
        for (UINT i = 0; i < m; i++) {
            double ye = ys[i] - dot(theta, xs[i]);
            for (UINT j = 0; j < n; j++) {
                dtheta[j] += ye * xs[i][j];
            }
        }
        for (UINT j = 0; j < n; j++) {
            theta[j] += alpha * dtheta[j];
        }
        double ndt = norm(dtheta);
        cout << "|dtheta| = " << ndt << endl;
        if (ndt < 0.1) break;
    }

    return theta;
}

/*
inline void testLinearRegression()
{
    vector<const vector<double> > xs;
    vector<double> ys;

    vector<double> x;
    x.push_back(1.0);
    ys.push_back(0.0);

    xs.push_back(x);
    ys.push_back(0.0);

    x[1] = 1.0;
    xs.push_back(x);
    ys.push_back(1.0);

    xs.push_back(x);
    ys.push_back(2.0);

    vector<double> theta = linearRegressionBatch(xs, ys, 0.05);
}
*/

inline void testLinearRegressionMatrix()
{
    const int n = 100;
    const int dimensions = 2;
    const double noise = 1.0;

    const double theta0 = 10.0;
    const double theta1 = -3.0;

    DenseMatrix<double> X(n, dimensions);
    vector<double> Y(n);
    
    for(UINT i = 0; i < n; i++)
    {
        double x0 = pmrnd() * 10.0;
        double x1 = pmrnd() * 10.0;

        X[i][0] = x0;
        X[i][1] = x1;

        Y[i] = x0 * theta0 + x1 * theta1 + pmrnd() * noise;
    }

    vector<double> thetas = DenseMatrix<double>::LinearRegression(X, Y);
    cout << "theta0 expected:  " << theta0 << endl;
    cout << "theta0 estimated: " << thetas[0] << endl;
    cout << "theta1 expected:  " << theta1 << endl;
    cout << "theta1 estimated: " << thetas[1] << endl;
}