package skewreduce.framework.scheduler;

import java.util.List;
import java.util.Set;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;

import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;

import skewreduce.framework.EaggPlan;
import skewreduce.framework.MergeOpTree;
import skewreduce.framework.SchedulerAdaptor;
import skewreduce.framework.physical.PhysicalOp;
import skewreduce.lib.Partition;


public class SimpleScheduler extends SchedulerAdaptor implements Configurable {
    private static Log LOG = LogFactory.getLog("PLAN");

	protected final static String SCHEDULE_MAX_RETRY = "skewreduce.schedule.max.retry";
	private int maxRetry;
	
	@Override
	public Configuration getConf() {
		return null;
	}

	@Override
	public void setConf(Configuration conf) {
		maxRetry = conf.getInt(SCHEDULE_MAX_RETRY, 0);
	}

	@Override
	public void handleCompletion(PhysicalOp op) {
		EaggPlan plan = op.getPlan();
		plan.complete(op);
		
		// FIXME: do something smart for merge.
	}

	@Override
	public void handleError(PhysicalOp op, Throwable ex) {
		LOG.error(op.getID(),ex);
		doRetry(op);
	}
	
	private boolean doRetry(PhysicalOp op) {
		boolean rescheduled = false;
		
		EaggPlan plan = op.getPlan();
		if ( op.getRetryCount() < maxRetry ) {
			op.incrRetryCount();
			LOG.info(String.format("Retrying %s : %d times",op.getID(),op.getRetryCount()));
			plan.retry(op);
			rescheduled = true;
		} else {
			plan.fail(op);
		}
		return rescheduled;
	}
	
	private void splitWork(PhysicalOp op) {
		EaggPlan plan = op.getPlan();
		List<PhysicalOp> downOps = plan.getDownStreamOps(op);
		if ( downOps.size() != 1 )
			throw new IllegalStateException(String.format("%d operators depend on %s",downOps.size(),op.getID()));
		
		Partition context = op.getBound();
        if ( LOG.isDebugEnabled() )
            LOG.debug(context.toLongString());

		PhysicalOp sampleOp = plan.createSampleOp(context);
		PhysicalOp reoptOp  = plan.createReoptimizeOp(context);

		sampleOp.addInputPath(op.getInputPath());
		reoptOp.addInputPath(op.getInputPath());

        // replicating dependencies
        for ( PhysicalOp upOp : plan.getUpStreamOps(op) ) {
            plan.addDependency(upOp,sampleOp,
                    EaggPlan.Dependency.Status.COMPLETE);
        }
		plan.addDependency(sampleOp,reoptOp);
		plan.addDependency(reoptOp,downOps.get(0));
		
		plan.fail(op); // finally mark it as failed
	}
	
	private void splitMerge(PhysicalOp op) {
		EaggPlan plan = op.getPlan();
		// first whether we can recover from this failure.
		// we can only do this if we have skipped several merges.
		if ( plan.isPipelined(op) ) {
			// So, we have some hope. Simply, split into two.
			// simply reset the pipelined edges as WAITING
			// the intermediate merges are already setup, thus
			// changing flag would trigger the upstream merges.
			plan.reset(op);
		} else {
			// Gaaah! we are doomed! :-(
			plan.fail(op);
		}
	}

	@Override
	public void handleFailed(PhysicalOp op) {
		LOG.info("Failed: "+op.getID());
		
		switch ( op.getType() ) {
		case SAMPLE:
		case PARTITION:
		case FINALIZE:
			doRetry(op);
			break;
		case WORK:
			splitWork(op);
			break;
		case MERGE:
			splitMerge(op);
			break;
		}
	}

	@Override
	public void handleKilled(PhysicalOp op) {
	}

	@Override
	public void handleRunningSlow(PhysicalOp op) {
	}

    /* needs:
     *  given node X, get parent node Y
     *  given node Y, traverse all descendent and do computation per node.
     *
     *  termination condition of traverse
     *    status of node (or edge) is COMPLETE (or FAIL).
     *
     *  while traversing, cache per node statistics
     *
     *  interesting statistics are following:
     *    - input size (in bytes, in tuples?)
     *        simply aggregate
     *    - slowest progress (estimated time to complete)
     *      (estimating runtime)
     *        estimate based on statistics?
     *        loop counter?
     *    - number of chunks to be completed
     *  
     *  critical condition
     *    based on data size?
     *    based on time metric?
     *    based on estimated memory utilization?
     *
     *  merging at higher level vs. merging at lower levels
     *    high level = wait time (1) + high merge time (2)
     *    low level =low merge time (3) + wait time (synch) (4) + merge time (5)
     *
     * (1) : from estimation
     * (2) : assuming merge time is linear to data size, this is summation
     * of all incoming data
     * (3) : from estimation
     * (4) : synchronization time (may be negligiable?)
     * (5) : merge time assuming selectivity X
     *
     * each merge task has following cost
     *   startup/cleanup cost + I/O + processing
     *   = \alpha + O(N) + O(?)
     *
     * processing -- O(N) or O(N log N) or whatever...
     *
     */

    @Override
    public Set<PhysicalOp> reoptimize(Set<PhysicalOp> ops) {
        /*
        HashSet<PhysicalOp> mergeTasks = new HashSet<PhysicalOp>();
        for ( PhysicalOp op : ops ) {
            if ( op.getType() == LogicalOp.Type.MERGE ) {
                mergeTasks.add(op);
            }
        }

        ops.removeAll(mergeTasks);

        if ( !mergeTasks.isEmpty() ) {
            // check merge decision

            // if this is operator is ready to be scheduled,
            // we assume that this should work -- i.e., both down
            // stream operators completed and this operation should
            // success. Thus, we only need to check upstream operators
            // and whether they could run now.

            // assuming some size constraint, the highest level of merge
            // can occur where the size constraint is still satisfied.
            // as climing up the merge hierachy, the size doubles.

            // strategy 1. bottom-up. greedy. launch as a merge is ready
            //
            // otherwise, if current violates size constraint, launch.
            //
            // strategy 2.
            //   wait until parent node is ready (i.e., all sibilings
            //   are ready). then check whether the size constraints meet.
            //   if so, wait one-level up parents ready, else launch
            //   lower-level. -- synchronized. wait until parent level
            //   merge is impossible, then go
            //
            // strategy 3.
            //   estimate the size. if it does not fit, go.
            //   whenever a merge is firing, eo and 
            //
            // strategy 4.
            //   top-down. greedy. try merge at top-level then fall back
            //
            // strategy 5.
            //   top-down. cost. based on rough selectivity.
            //
            //

            // come up with new set
        }

        ops.addAll(mergeTasks); // new merge tasks
        */

        return ops;
    }

    private void perNodeProcessing(MergeOpTree.Node node) {
        if ( node.isLeaf() ) {
            // should collect from Work node
        } else {
            if ( node.getOp().isComplete() ) {
                // should copy this statistics
            } else {
                // simply aggregate left and right children
            }
        }
    }

    private void strategy2(MergeOpTree tree) {
        for ( MergeOpTree.Node node : tree ) {
            // post-order traversal
            // collect per node statistics

            // check whether it is ready to start
            // if so, remove any descendent.
        }
        // merge nodes below.
    }
}
