/*
 * TODO
 * . create a new rta_fix_point
 * . use edf_interfering_workload
 * . let the code update task response times
 * . test faster variant for self_interf
 * . use response field in ts
 *
 */
#include <algorithm>
#include <assert.h>

#include "tasks.h"
#include "schedulability.h"

#include "edf/rta.h"

#include <iostream>
#include "task_io.h"

using namespace std;

static void rta_interfering_workload(const Task &t_i,
                                     unsigned long response_time,
                                     unsigned long slack_i,
                                     integral_t &inf)
{
    integral_t interval = response_time + t_i.get_deadline() - t_i.get_wcet() -
	    slack_i;

    inf = t_i.get_wcet() * ( interval / t_i.get_period() );

    interval %= t_i.get_period();
    if (interval > t_i.get_wcet())
        inf += t_i.get_wcet();
    else
        inf += interval;
}

// Eq. 8
static void cool_rta_interfering_workload(const Task &t_i,
					  unsigned long response_time,
					  integral_t &inf)
{
	integral_t interval = response_time - t_i.get_wcet() +
		t_i.get_response();

    inf = t_i.get_wcet() * ( interval / t_i.get_period() );

    interval %= t_i.get_period();
    if (interval > t_i.get_wcet())
        inf += t_i.get_wcet();
    else
        inf += interval;
    cout<<"inter_workload finished"<<endl;
}

// Eq. 9, but to update
static void edf_interfering_workload(const Task &t_i,
                                     const Task &t_k,
                                     unsigned long slack_i,
                                     integral_t &inf)
{
    /* implicit floor in integer division */
	unsigned long njobs =
		(t_k.get_deadline() - t_i.get_deadline()) / t_i.get_period() +
		1;

    inf  = njobs;
    inf *= t_i.get_wcet();

    integral_t tmp = t_k.get_deadline() - njobs * t_i.get_period() -
	    slack_i;
    if (tmp < 0)
	    tmp = 0;

    if (tmp > t_i.get_wcet())
	    inf += t_i.get_wcet();
    else
	    inf += tmp;
}

bool RTAGedf::response_estimate(unsigned int k,
                                const TaskSet &ts,
                                unsigned long const *slack,
                                unsigned long response,
                                unsigned long &new_response)
{
    integral_t other_work = 0;
    integral_t inf_edf;
    integral_t inf_rta;
    integral_t inf_bound = response - ts[k].get_wcet() + 1;

    for (unsigned int i = 0; i < ts.get_task_count(); i++)
        if (k != i)
        {
	    edf_interfering_workload(ts[i], ts[k], slack[i], inf_edf);
            rta_interfering_workload(ts[i], response, slack[i], inf_rta);
            other_work += min(min(inf_edf, inf_rta), inf_bound);
        }
    /* implicit floor */
    other_work /= m;
    other_work += ts[k].get_wcet();
    if (other_work.fits_ulong_p())
    {
        new_response = other_work.get_ui();
        return true;
    }
    else
    {
        /* overflowed => reponse time > deadline */
        return false;
    }
}

// Eq. 6 modified for arbitrary deadlines (see Marko's paper notes)
bool RTAGedf::cool_response_estimate(unsigned int k,
                                const TaskSet &ts,
                                unsigned long response,
                                unsigned long &new_response)
{
    integral_t other_work = 0;
    integral_t inf_edf;
    integral_t inf_rta;
    integral_t njobs = ceil(response/ts[k].get_period());
    integral_t self_interf =
	    (response/ts[k].get_period()) * ts[k].get_wcet() +
	    min(ts[k].get_wcet(), response % ts[k].get_period());
    integral_t inf_bound = response - self_interf + 1;

    for (unsigned int i = 0; i < ts.get_task_count(); i++)
        if (k != i)
        {
		cool_rta_interfering_workload(ts[i], response, inf_rta);
#if 0
	    edf_interfering_workload(ts[i], response, inf_edf);
            other_work += min(min(inf_edf, inf_rta), inf_bound);
#else
            other_work += min(inf_rta, inf_bound);
#endif
        }
    
    other_work /= m; /* implicit floor */
    other_work += njobs * ts[k].get_wcet();

    if (other_work.fits_ulong_p())
    {
        new_response = other_work.get_ui();
        return true;
    }
    else
    {
        /* overflowed => reponse time > deadline */
        return false;
    }
}


bool RTAGedf::rta_fixpoint(unsigned int k,
                           const TaskSet &ts,
                           unsigned long const *slack,
                           unsigned long &response)
{
    unsigned long last;
    bool ok;

    last = ts[k].get_wcet();
    ok = response_estimate(k, ts, slack, last, response);

    while (ok && last != response && response <= ts[k].get_deadline())
    {
        if (last < response && response  - last < min_delta)
            last = min(last + min_delta, ts[k].get_deadline());
        else
            last = response;
        ok = response_estimate(k, ts, slack, last, response);
    }

    return ok && response <= ts[k].get_deadline();
}

// fixed-point iteration for k-th task
bool RTAGedf::cool_rta_fixpoint(unsigned int k,
                           const TaskSet &ts,
                           unsigned long &new_response)
{
    unsigned long last;
    bool ok;

    last = ts[k].get_wcet();
    cout<<"invoking first cool_response_estimate, k = "<<k<<endl;

    ok = cool_response_estimate(k, ts, last, new_response);
    cout<<"after first cool_response_estimate, k = "<<k<<endl;

    while (ok && last != new_response)
    {
	    if (last > new_response) {
		    cout<<"Error"<<endl ;
		    exit(1) ;
	    }
            last = new_response;
	    cout<<"invoking cool_response_estimate, k = "<<k<<endl;

	    ok = cool_response_estimate(k, ts, last, new_response);
    }
    cout<<"fixpoint finished"<<endl;


    return ok && new_response <= ts[k].get_deadline();
}

bool RTAGedf::is_schedulable(const TaskSet &ts, bool check_preconditions)
{
    if (check_preconditions)
	{
        if (!(ts.has_only_feasible_tasks()
              && ts.is_not_overutilized(m)
              && ts.has_only_constrained_deadlines()))
            return false;
        if (ts.get_task_count() == 0)
            return true;
    }

    unsigned long* slack = new unsigned long[ts.get_task_count()];

    for (unsigned int i = 0; i < ts.get_task_count(); i++)
        slack[i] = 0;

    unsigned long round = 0;
    bool schedulable = false;
    bool updated     = true;

    while (updated && !schedulable && (max_rounds == 0 || round < max_rounds))
    {
        round++;
        schedulable = true;
        updated     = false;
        for (unsigned int k = 0; k < ts.get_task_count(); k++)
        {
            unsigned long response, new_slack;
            if (rta_fixpoint(k, ts, slack, response))
            {
                new_slack = ts[k].get_deadline() - response;
                if (new_slack != slack[k])
                {
                    slack[k] = new_slack;
                    updated = true;
                }
            }
            else
            {
                schedulable = false;
            }
        }
    }

    return schedulable;
}

// fixed-point iteration for whole task set
bool RTAGedf::cool_response_times(TaskSet &ts, bool check_preconditions)
{
   if (check_preconditions)
	{
        if (!(ts.has_only_feasible_tasks()
              && ts.is_not_overutilized(m)
              && ts.has_only_constrained_deadlines()))
            return false;
        if (ts.get_task_count() == 0)
            return true;
    }

    for (unsigned int i = 0; i < ts.get_task_count(); i++)
	    ts[i].set_response(ts[i].get_wcet());

 
    bool schedulable = false;
    bool updated     = true;

    while (updated)
    {
        schedulable = true;
        updated     = false;
        for (unsigned int k = 0; k < ts.get_task_count(); k++)
        {
            unsigned long new_response;
	    if (! cool_rta_fixpoint(k, ts, new_response))
		    schedulable = false;

	    if (new_response != ts[k].get_response()) {
                    ts[k].set_response(new_response);
                    updated = true;
	    }

        }
    }

    return schedulable;
}
