package skewreduce.framework.physical;

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.Job;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import skewreduce.framework.EaggDriver;
import skewreduce.framework.EaggPlanCompiler;
import skewreduce.framework.SchedulerEvent;
import skewreduce.framework.logical.LogicalOp;


/**
 * maybe misleading naming since it does not do real optimization.
 * @author yongchul
 *
 */
public class PGenerateOp extends PhysicalOp {
	private static final Logger LOG = LoggerFactory.getLogger("PLAN");
	private PhysicalOp pop_partition; // preceeding partition op
	
	public void setPartitionOp(PhysicalOp op) {
		pop_partition = op;
	}

	@Override
	public void setup() throws IOException {}
	
	@Override
	protected Job createJob(Configuration conf) throws IOException {
		throw new UnsupportedOperationException("GENERATE operator does not run a Map-Reduce job");
	}

    @Override
	protected Path getOutputByContext(PhysicalOp op) {
    	return pop_partition.getOutputByContext(op);
	}

	@Override
	public SchedulerEvent call() throws Exception {
		// FIXME check difference between estimated runtime on sample and estimated runtime on real data
		// if estimated runtime goes beyond some error threshold, insert partition -- generate operator
		// for that partition
        if ( LOG.isInfoEnabled() ) {
        	LOG.info("EXECUTE {}",getID());
        }
		
		if ( getBound().getLevel() == 0 ) {
			EaggPlanCompiler.generateInitialPlan(plan,this);
		} else {
			EaggPlanCompiler.generateIncrementalPlan(plan, this);
		}
      
        setStatus(Status.COMPLETE);
		return new SchedulerEvent.Completion(this);
	}
	
	public static LogicalOp createLocalOp(EaggDriver driver) {
		return new LogicalOp(driver,LogicalOp.Type.GENERATE,0) {
			@Override
			public PhysicalOp createInstance() {
				return new PGenerateOp();
			}
		};
	}
}
