/*
* Copyright (c) 2010 Michael Collins
*
* This file is part of TerraFirma.
*
* TerraFirma is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* TerraFirma is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with TerraFirma.  If not, see <http://www.gnu.org/licenses/>.
*/
#include "mpi.h"
#include "context.h"
#include "network_pipe.h"
#include <boost/thread/locks.hpp>
#include <map>

namespace engine
{
    mpi::~mpi()
    {
        //dtor
    }

    //////*** Message Passing Interface ***/////
    bool mpi::get_task(task_id tid, message_queue<>** const ptr)
    {
        //Find out if there is a valid mapping
        std::map<task_id, message_queue<>*>::const_iterator it = listeners.find(tid);
        if(it == listeners.end()) //This is where it points to upon failure
            return false;
        else {
            *ptr = it->second;
            return true;
        }
    }

    message_queue<>* mpi::get_task(task_id tid)
    {
        std::map<task_id, message_queue<>*>::const_iterator it = listeners.find(tid);
        if(it == listeners.end()) //This is where it points to upon failure
            return NULL;
        else
            return it->second;
    }

    bool mpi::get_task_exists(const task_id tid)
    {
        //Find out if there is a valid mapping
        std::map<task_id, message_queue<>*>::const_iterator it = listeners.find(tid);
        return !(it == listeners.end());
    }


    bool mpi::add_listener(message_queue<>* queue, const task_id tid)
    {
        if(get_task_exists(tid))
            return false; //There's a hashcode collision here!

        listeners[tid] = (queue);
        return true;
    }

    bool mpi::remove_listener(const task_id tid)
    {
        return listeners.erase(tid); //Returns true if that key existed, false otherwise
    }

    /** Enqueue functions **/
    void mpi::enqueue_local(const message_header* msg)
    {
        message_queue<>* queue;
        if( get_task(msg->dst_task, &queue) )
            queue->enqueue(msg);
    }

    void mpi::enqueue_remote(hostid dst_host, const message_header* msg)
    {
        if(dst_host == context::Instance().hostid)
            enqueue_local(msg);
        else if(netpipe)
        {
            netpipe->enqueue(dst_host, msg);
        }
    }

    void mpi::enqueue_shared(const message_header* msg)
    {
        enqueue_local(msg);
        #ifdef SERVING
        return; //We don't sent a second message if we are serving
        #else
        enqueue_server(msg);
        #endif
    }

    void mpi::enqueue_server(const message_header* msg)
    {
        enqueue_remote(0, msg);
    }

    void mpi::broadcast_local(const message_header* msg)
    {
        std::map<task_id, message_queue<>*>::iterator it;
        for(it = listeners.begin(); it != listeners.end(); it++)
            it->second->enqueue_header(msg);
    }

    void mpi::broadcast_close()
    {
        message_header msg(0, BCAST_CLOSE);
        //msg.type = message_header::BCAST_CLOSE;
        broadcast_local(&msg);
    }

    void mpi::broadcast_pause()
    {
        message_header msg(0, BCAST_PAUSE);
        //msg.type = message_header::BCAST_PAUSE;
        broadcast_local(&msg);
    }

    #ifdef SERVING
    void mpi::enqueue_all_remote_hosts(const message_header* msg)
    {
        static_heap<host>::const_iterator it = context::Instance().hosts.begin();
        for(; it < context::Instance().hosts.end(); it++) {
            enqueue_remote((*it).id, msg);
        }
    }

    void mpi::enqueue_all_other_hosts(hostid expt_host, const message_header* msg)
    {
        static_heap<host>::const_iterator it = context::Instance().hosts.begin();
        for(; it < context::Instance().hosts.end(); it++) {
            if((*it).id != expt_host)
                enqueue_remote((*it).id, msg);
        }
    }
    #endif

    //Global singleton (totally not a singleton yet, nor does it really need to be)
    mpi MPI;
};

