#include <iostream>
#include <vector>

#include "../include/tensor.h"

using namespace std;

void train()
{
    // config
    int n_steps = 200;

    // one-dimen network
    Value a(0.11, "a");
    Value b(0.35, "b");
    Value c = a + b;
    c.label = "c";
    Value d = a * c;
    d.label = "d";
    Value e = d - b;
    e.label = "e";
    Value f = 2 + e;
    f.label = "f";
    Value g = f.tanh();
    g.label = "g";
    Value h = 1.1 * g;
    h.label = "h";
    vector<Value *> params{&a, &b, &c, &d, &e, &f, &g, &h};

    // train
    for (int step = 0; step < n_steps; step++)
    {
        // forward
        Value loss = h * d; // A no-sense example of a loss function
        // backward
        for (auto &p : params)
            p->grad = 0.0;
        loss.backward();
        // update
        double lr = 3e-4 * pow(0.99, step);
        for (auto &p : params)
            p->data += -lr * p->grad;
        if (step % 20 == 0)
            cout << step << "/" << n_steps << ": loss=" << loss.data << endl;
    }
}

void train_model()
{
    // TODO tanh layer
    // model
    Layer layer1(3, 16, true, "layer1-");
    Layer layer2(16, 16, true, "layer2-");
    Layer layer3(16, 1, true, "layer3-");
    vector<Layer> model{layer1, layer2, layer3};
    vector<Value *> params;
    for (auto &layer : model)
        for (auto &p : layer.parameters())
            params.push_back(p);
    int n_params = params.size();
    cout << "n_params=" << n_params << endl;
    // data
    // point for iter

    // data
    // TODO(mx) add validate data and loss
    // add data_loader
    vector<vector<Value *>> X_train = {
        {new Value(2.0), new Value(3.0), new Value(-1)},
        {new Value(2.0), new Value(1.0), new Value(-0.3)},
        {new Value(4.2), new Value(3.0), new Value(7)},
        {new Value(7.0), new Value(1.0), new Value(1)},
        {new Value(1.0), new Value(2.0), new Value(1)},
        {new Value(3.0), new Value(4.0), new Value(0.2)}};
    vector<vector<Value>> y_train = {{1}, {-1}, {1}, {1}, {-1}, {-1}};

    int batch_size = 2;
    vector<Value *> y_pred = {};
    Value loss;
    // train
    int n_steps = 1000;
    srand(time(NULL));
    for (int step = 0; step < n_steps; step++)
    {
        // 打乱xs, ys
        for (int i = 0; i < X_train.size(); i++)
        {
            int j = rand() % batch_size;
            swap(X_train[i], X_train[j]);
            swap(y_train[i], y_train[j]);
        }
        vector<double> batch_loss;
        for (int i = 0; i < batch_size; i++)
        {
            vector<Value *> xs = X_train[i];
            vector<Value> ys = y_train[i];
            // forward
            for (auto &layer : model)
                xs = layer(xs);
            y_pred = xs;
            // one-click loss
            double loss_data = y_pred[0]->data - ys[0].data;
            if (loss_data < 0)
                loss = Value(ys[0] - *y_pred[0]); // one dim
            else
                loss = Value(*y_pred[0] - ys[0]);
            batch_loss.push_back(loss.data);

            // backward
            for (auto &p : params)
            {
                p->grad = 0.0;
            }
            loss.backward();

            // update
            double lr = 0.0006 * pow(0.99, step);
            for (auto &p : params)
            {
                // cout << p->label << " grad: " << p->grad << endl;
                p->data += -lr * p->grad;
            }
        }
        double mean_loss = accumulate(batch_loss.begin(), batch_loss.end(), 0.0) / batch_size;
        if (step % 10 == 0)
            cout << step << "/" << n_steps << ": loss=" << mean_loss << endl;
    }
}

int main()
{
    train();
    // train_model();
}