package skewreduce.framework.planner;

import java.io.DataInput;
import java.io.IOException;
import java.io.InputStream;
import java.util.List;

import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.ReflectionUtils;

import skewreduce.framework.planner.axis.Axis;
import skewreduce.lib.ITuple;
import skewreduce.lib.Partition;


public abstract class EaggCostModel implements Configurable {
	public static final String MODEL_CLASS_PROP = "skewreduce.planner.costmodel.class";
	
	protected Configuration conf;
	protected double sigma;
	protected double costWorkPerSec;
	protected double costMergePerSec;
	protected double costDelay;
	protected double bandWidth;
	protected double minLengthFactor;
	
	protected long workMemoryMinLimit;
	protected long workMemoryMaxLimit;
	protected long mergeMemoryLimit;
	
	@Override
	public final Configuration getConf() {
		return conf;
	}
	
	@Override
	public final void setConf(Configuration arg0) {
		this.conf = arg0;
		sigma = conf.getFloat("skewreduce.planner.sampleRate",Float.NaN);
		costWorkPerSec = conf.getFloat("skewreduce.planner.cost.work2sec",1.0f);
		costMergePerSec = conf.getFloat("skewreduce.planner.cost.merge2sec",1.0f);
		costDelay = conf.getFloat("skewreduce.planner.scheduler.delay", 10.0f);
		bandWidth = conf.getFloat("skewreduce.planner.partition.bandWidth",0.0f);
		minLengthFactor = conf.getFloat("skewreduce.planner.partition.minLengthFactor",4.0f);
		
		workMemoryMinLimit = conf.getLong("skewreduce.planner.cost.memory.work.min", 0);
		workMemoryMaxLimit = conf.getLong("skewreduce.planner.cost.memory.work.max", 128*1024*1024);
		mergeMemoryLimit = conf.getLong("skewreduce.planner.cost.memory.merge", 128*1024*1024);
		
		init(conf);
	}

	public boolean isMergeParallelizable() { return false; }
	public double getMinLengthFactor() { return minLengthFactor; }
	public double getBandWidth() { return bandWidth; }
	public boolean isPartitionable(double v) { return v > bandWidth*minLengthFactor*2.0; }
	public double getMinLength() { return bandWidth * minLengthFactor; }
	public double getSampleRate() { return sigma; }
	public double getWork2Sec() { return costWorkPerSec; }
	public double getMerge2Sec() { return costMergePerSec; }
	public double getScheduleDelay() { return costDelay; }
	
	public long getMinWorkDataItems() { return workMemoryMinLimit / getWorkRecordSize(); }
	public long getMaxWorkDataItems() { return workMemoryMaxLimit / getWorkRecordSize(); }
	public long getMaxMergeDataItems() { return mergeMemoryLimit / getMergeRecordSize(); }
	
	public void setMinWorkMemoryLimit(long l) { workMemoryMinLimit = l; }
	public void setMaxWorkMemoryLimit(long l) { workMemoryMaxLimit = l; }
	public void setMaxMergeMemoryLimit(long l) { mergeMemoryLimit = l; }
	
	/**
	 * help extrapolate the real cost
	 * @author yongchul
	 *
	 */
	public static enum Complexity {
		LOG,
		N,
		NLOGN,
		N2,
		N3;
		
		public double expectedCost(double sigma,int n,double cost) {
			switch ( this ) {
			case LOG: return cost - Math.log(sigma);
			case N: return cost / sigma;
			case NLOGN: return  (cost - n * Math.log(sigma)) / sigma;
			case N2: return cost / (sigma*sigma);
			case N3: return cost / (sigma*sigma*sigma);
			default:
				throw new IllegalArgumentException();
			}
		}
	}
	
	protected void init(Configuration conf) {
	}
	
	public abstract Complexity getWorkComplexity();
	public final double costWork(PartitionNode node) {
		return costWork(node.getPartition(),node.getSample());
	}
	public final double costWork(Partition p,List<? extends ITuple> s) {
		return costWork(p,s,0,s.size());
	}
	public abstract double costWork(Partition p,List<? extends ITuple> s,int from,int to);
	
	public abstract Complexity getMergeComplexity();

	public final double costMerge(Partition p,Partition p1,Partition p2,List<? extends ITuple> s,int split) {
		return costMerge(p,p1,s,0,split,p2,s,split,s.size());
	}
	public final double costMerge(Partition p,Partition p1,List<? extends ITuple> s1,Partition p2,List<? extends ITuple> s2) {
		return costMerge(p,p1,s1,0,s1.size(),p2,s2,0,s2.size());
	}
	public final double costMerge(PartitionNode parent,PartitionNode l,PartitionNode r) {
		return costMerge(parent.getPartition(),l.getPartition(),l.getSample(),r.getPartition(),r.getSample());
	}
	public final double costMerge(PartitionNode parent,PartitionNode... children) {
		return costMerge(parent,children[0],children[1]);
	}
	public abstract double costMerge(Partition p,Partition p1,List<? extends ITuple> s1,int src1,int to1,Partition p2,List<? extends ITuple> s2,int src2,int to2);

	
	public abstract Partition getRootPartition();
	
	public abstract ITuple createTuple(DataInput in) throws IOException;
	
	public abstract int getWorkRecordSize();
	public abstract int getMergeRecordSize();
	
	public static final EaggCostModel getInstance(Configuration conf) {
		Class<? extends EaggCostModel> cls = conf.getClass(MODEL_CLASS_PROP, null, EaggCostModel.class);
		return ReflectionUtils.newInstance(cls, conf);
	}
	
	public abstract CostEstimator createWorkCostEstimator();
	
	public boolean forcePartition(Partition part,List<ITuple> sample) {
		return false;
	}

	public abstract void initAxis(Partition partition, Axis axis);
	
	public float adjustSplit(Axis axis,float split) {
		return 0.0f;
	}
}
