#include <iostream>
#include <mpi.h>
#include "s-clique_free.cpp"
#include "IO.cpp"
#include <vector>
#include <boost/dynamic_bitset.hpp>
#include <string>
#include <utility>

using namespace std;
using namespace boost;

int main(int argc, char** argv)
{

    int id, nproc;
    int root = 0;
    MPI_Status status;
    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &id);
    MPI_Comm_size(MPI_COMM_WORLD, &nproc);

    if(argc != 5) {
        cout<<"Usage: ./filename 'graph_name' 'checkpoint_name s - int k - int"<<endl;
        return 0;
    }

    int s = atoi(argv[3]);
    int k = atoi(argv[4]);
    if(s < 3) {
        cout<<"s must be a positive integer!"<<endl;
        return 0;
    }

    string filename( argv[1] );
    string checkpointname( argv[2] );

    vector<dynamic_bitset<> > graph;
    int nodes, edges;
    readgraph_bitset(filename, graph, nodes, edges);
    vector<pair<int, int> > edges_to_delete;
    vector<pair<int, int> > deleted_edges;
    vector<dynamic_bitset<> > quasi_colors;
    string checkpoint_file = loadcheckpoint(quasi_colors, edges_to_delete, deleted_edges, checkpointname, s, nodes, k);
    ofstream fout;
    fout.open(checkpoint_file.c_str(), std::ofstream::app);
    fout.seekp(0, fout.end);
    ofstream result;
    checkpointname += ".log";
    checkpointname.insert(0, "parallel_logs/");
    result.open(checkpointname.c_str(), std::ofstream::app);
    MPI_Barrier(MPI_COMM_WORLD);
    /**
     * A root várakozik a kérésekre, majd ha kapott egyet, akkor
     * kiad egy feladatot.
     * Addig vár a kérésekre, amíg van feladat, vagy valamelyik slave azt nem mondta,
     * hogy talált k-2 klikket.
     * Ha nem találtak k-2 klikket és elfogytak a feladatok, akkor minden slave még egyszer
     * kérni fog a mastertől, ekkor a master elküldi a leállító feltételt: feladat index helyett -1et.
     * Ezzel a slave-ek kilépnek a ciklusukból és leállnak.
     * Ha valamelyik slave nem az id-jával küldött kérést, az jelzi a masternek, hogy a slave
     * talált egy k-2 klikket, ilyenkor MPI_Abort()-al le lehet állítani az összes szolgát.
     */
    if(id == root) {
        double start_root, end_root, start_tasks, end_tasks;
        start_root = MPI_Wtime();
        int proc_id;
        int index = 0;
        result<<endl<<endl<<"------------------"<<checkpointname<<"-----------------"<<endl<<endl;
        if(deleted_edges.size()) {
            for(int i = 0; i < edges_to_delete.size(); ++i) {
                for(int j = 0; j < deleted_edges.size(); ++j) {
                    if(edges_to_delete[i] == deleted_edges[j]) {
                        edges_to_delete[i].first = -1;
                        edges_to_delete[i].second = -1;
                    }
                }
            }
        }

        int num_of_tasks = edges_to_delete.size();
        cout<<"Num of tasks: "<<num_of_tasks<<endl;
        start_tasks = MPI_Wtime();
        do {
            MPI_Recv(&proc_id, 1, MPI_INT, MPI_ANY_SOURCE, 1, MPI_COMM_WORLD, &status);
            if(proc_id > 0) {
                while(edges_to_delete[index].first < 0) {
                    cout<<index++<<endl;
                }
                MPI_Send(&index, 1, MPI_INT, proc_id, 2, MPI_COMM_WORLD);
                ++index;
            } else {
                end_root = MPI_Wtime();
                end_tasks = MPI_Wtime();
                result<<" root all time: "<<end_root-start_root<<"task time: "<<end_tasks-start_tasks<<endl;
                cout<<" root all time: "<<end_root-start_root<<"task time: "<<end_tasks-start_tasks<<endl;
                MPI_Abort(MPI_COMM_WORLD, proc_id);
            }

        } while(index < num_of_tasks);
        index = -1;

        for(int i = 1; i < nproc; ++i) {
            MPI_Recv(&proc_id, 1, MPI_INT, MPI_ANY_SOURCE, 1, MPI_COMM_WORLD, &status);
            if(proc_id < 0) {
                end_root = MPI_Wtime();
                end_tasks = MPI_Wtime();
                result<<" root all time: "<<end_root-start_root<<"task time: "<<end_tasks-start_tasks<<endl;
                cout<<" root all time: "<<end_root-start_root<<"task time: "<<end_tasks-start_tasks<<endl;
                MPI_Abort(MPI_COMM_WORLD, proc_id);
            }
            MPI_Send(&index, 1, MPI_INT, proc_id, 2, MPI_COMM_WORLD);
            cout<<i<<" : "<<nproc<<endl;
        }
        end_tasks = MPI_Wtime();
        end_root = MPI_Wtime();
        cout<<"There is no "<<k<<" clique."<<" root all time: "<<end_root-start_root<<"task time: "<<end_tasks-start_tasks<<endl;
        result<<"There is no "<<k<<" clique."<<" root all time: "<<end_root-start_root<<"task time: "<<end_tasks-start_tasks<<endl;
    } else {
        int task_index = 0;
        int i = 0;
        dynamic_bitset<>  edge(nodes);
        vector<dynamic_bitset<> > stretched_graph;
        while(true) {

            MPI_Send(&id, 1, MPI_INT, root, 1, MPI_COMM_WORLD);
            MPI_Recv(&task_index, 1, MPI_INT, root, 2, MPI_COMM_WORLD, &status);

            if(task_index < 0) {
                break;
            }

            for( ; i < task_index; ++i) {
                graph[edges_to_delete[i].first][edges_to_delete[i].second] = 0;
                graph[edges_to_delete[i].second][edges_to_delete[i].first] = 0;
            }

            edge = (graph[edges_to_delete[task_index].first]) & (graph[edges_to_delete[task_index].second]);
            stretched_graph = stretch_graph(graph, edge);
            double start = MPI_Wtime();
            if(k_clique(stretched_graph, k-2)) {
                id *= -1;
                cout<<"K-klikk: edge: "<<edges_to_delete[task_index].first<<" : "<<edges_to_delete[task_index].second<<" index: "<<task_index<<endl;
                result<<"K-klikk: edge: "<<edges_to_delete[task_index].first<<" : "<<edges_to_delete[task_index].second<<" index: "<<task_index<<endl;
            }
            double end = MPI_Wtime();
            fout<<"e "<<edges_to_delete[task_index].first + 1<<" "<<edges_to_delete[task_index].second + 1<<endl;
            result<<"edge: "<<edges_to_delete[task_index].first<<" : "<<edges_to_delete[task_index].second<<" index: "<<task_index<<" time: "<<end-start<<endl;
            cout<<"edge: "<<edges_to_delete[task_index].first<<" : "<<edges_to_delete[task_index].second<<" index: "<<task_index<<" time: "<<end-start<<endl;
        }
        for(int i = 0; i < stretched_graph.size(); ++i) {
            ~stretched_graph[i];
        }
        ~edge;
    }
    for(int i = 0; i < graph.size(); ++i) {
        ~graph[i];
    }
    MPI_Finalize();
    return 0;
}
