package skewreduce.framework.physical;

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.Job;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import skewreduce.framework.EaggPlan;
import skewreduce.framework.SchedulerEvent;
import skewreduce.framework.logical.LogicalOp;


public class PArrayPartitionOp extends PPartitionOp {
    private static final Logger LOG = LoggerFactory.getLogger("PLAN");

    @Override
    protected Path getOutputByContext(PhysicalOp op) {
        if ( op.getType() == LogicalOp.Type.MERGE 
                || op.getType() == LogicalOp.Type.FINALIZE) {
            throw new IllegalArgumentException();
        }
        
        // should be a single file. array split will do the partitioning
        return getInputPath();
    }

    /**
     * partition should leverage distributed cache
     */
    @Override
    public void setup(EaggPlan plan) throws IOException {
        setupInputOutput(plan);
    }
    
	@Override
	public SchedulerEvent call() throws Exception {
		// this operation does nothing. array split for worker will do the job.
		setup();
		setStatus(Status.COMPLETE);
		return new SchedulerEvent.Completion(this);
	}

	@Override
	protected Job createJob(Configuration conf) throws IOException {
        return null;
	}
}
