#include "utility.hpp"
#include <chrono>
#include <iostream>
using namespace std::chrono;

vector<string> split (const string &s, const string & delim) {
    vector<string> ret;

    auto start = 0U;
    auto end = s.find(delim);
    while (end != std::string::npos)
    {
        ret.push_back(s.substr(start, end - start));
        start = end + delim.length();
        end = s.find(delim, start);
    }

    ret.push_back(s.substr(start, end - start));
    return ret;
}

void test(Network &network) {
    int n_workers = 0; // auto-detect
    lace_init(n_workers, 1000000);
    lace_startup(0, NULL, NULL);

    // use at most 512 MB, nodes:cache ratio 2:1, initial size 1/32 of maximum
    LACE_ME;

    sylvan_set_sizes(1LL<<20, 1LL<<24, 1LL<<18, 1LL<<22);
    // sylvan_set_limits(512*1024*1024, 1, 5);
    sylvan_init_package();
    sylvan_set_granularity(3); // granularity 3 is decent value for this small problem - 1 means "use cache for every operation"
    sylvan_init_mtbdd();

    /* ... do stuff ... */
    double t, size = network.initialRules.size();
    auto t0 = high_resolution_clock::now();
    Model model(network);
    auto t1 = high_resolution_clock::now();
    t = duration_cast<microseconds>(t1 - t0).count();
    cout << "Initialization " << t << " us in total" << endl;
    model.insertMiniBatch(network.initialRules);
    auto t2 = high_resolution_clock::now();
    t = duration_cast<microseconds>(t2 - t1).count();
    cout << "Rules => Changes " << t / size << " us per-update" << endl;
    cout << "#Changes " << model.changeSize() << endl;
    model.consumeChanges();
    auto t3 = high_resolution_clock::now();
    t = duration_cast<microseconds>(t3 - t2).count();
    cout << "Changes => ECs " << t / size << " us per-update" << endl;
    cout << "#EC " << model.predicateSize() << endl;

    sylvan_stats_report(stdout);
    sylvan_quit();
    lace_exit();
}