package skewreduce.framework.physical;

import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.EOFException;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.PriorityQueue;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;

import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.ReflectionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import skewreduce.framework.Schedule;
import skewreduce.framework.planner.EaggCostModel;
import skewreduce.framework.planner.PartitionNode;
import skewreduce.framework.planner.PriorityScheduler;
import skewreduce.framework.planner.partition.Partitioner;
import skewreduce.framework.planner.search.GreedyGlobalSchedule;
import skewreduce.framework.planner.search.SearchStrategy;
import skewreduce.lib.ChunkedList;
import skewreduce.lib.ITuple;
import skewreduce.lib.Partition;
import skewreduce.lib.Point3D;


public class Repartitioner<KEY,VAL extends ITuple> implements Configurable {
	private static final Logger LOG = LoggerFactory.getLogger("PLAN.REPARTITION");
	private static final Logger SLOG = LoggerFactory.getLogger("PLAN.REPARTITION.SPECULATE");
	private Configuration conf;

	private EaggCostModel model;
	private Partition rootPartition;
	private Partitioner partitioner;
	private PartitionNode rootNode;
	private Set<PartitionNode> newPartitions;
	private ChunkedList<ITuple> sampleBuf;
	private double costWorkPerSec;
	private PriorityScheduler scheduler;
	
	private SearchStrategy searchStrategy;
	
	private int lookAheadLimit;

    public Repartitioner() {}
    
    public void add(KEY k,VAL v) {
    	sampleBuf.add(new Pair<KEY, VAL>(k,v));
    }
    
    // FIXME implement different strategies
    // SOCC
    // Look-ahead -- if we stuck, decrement counter and keep split
    // Balanced   -- force repartition those expected to run longer than 0 < \alpha < 1.0 of the work phase

    public PartitionNode evalTime() {
    	ITuple[] tmp = new ITuple[sampleBuf.size()];
    	sampleBuf.toArray(tmp);
    	sampleBuf.clear();
    	List<ITuple> sample = Arrays.asList(tmp);
    	
    	if ( LOG.isInfoEnabled() ) {
    		LOG.info("Total {} input tuples have been loaded",tmp.length);
    	}
    	
    	rootNode = new PartitionNode(rootPartition,sample,false);
    	
		// do repartition
		double costwork = model.costWork(rootNode);
		rootNode.setWorkCost(costwork, costWorkPerSec);
		
		return rootNode;
    }
    
    public Set<PartitionNode> partitionPhase1() {
    	ITuple[] tmp = new ITuple[sampleBuf.size()];
    	sampleBuf.toArray(tmp);
    	sampleBuf.clear();
    	List<ITuple> sample = Arrays.asList(tmp);
    	
    	if ( LOG.isInfoEnabled() ) {
    		LOG.info("Total {} input tuples have been loaded",tmp.length);
    	}
    	

		PriorityQueue<PartitionNode> queue = new PriorityQueue<PartitionNode>();
		Set<PartitionNode> leaves = new HashSet<PartitionNode>();
		double sampleRate = model.getSampleRate();
		
		rootNode = new PartitionNode(rootPartition,sample,false);
		rootNode.setWorkCost(model.costWork(rootNode.getPartition(), rootNode.getSample()),costWorkPerSec);
		leaves.add(rootNode);
	
		// FIXME add all leaves to leaves and queue
		queue.addAll(leaves);
		
		if ( LOG.isInfoEnabled() ) {
			LOG.info("BEGIN FIRST PHASE... SAMPLE RATE = {}", sampleRate);
		}
		
		// first, partition until we have all leaf nodes satisfy the memory requirements
		while ( ! queue.isEmpty() ) {
			PartitionNode node = queue.poll();
			if ( LOG.isDebugEnabled() ) {
				LOG.debug("Considering partitioning {} - cost {}",node.getPartition(),node.getCost());
			}
			
			boolean forcePartition = node.getExpectedItems(sampleRate) > model.getMaxWorkDataItems();
			if ( ! forcePartition ) {
				if ( LOG.isDebugEnabled() ) {
					LOG.debug("Skipping partition {} since it already satisfies memory requirement",node.getPartition());
				}
				continue;
			}
				
			PartitionNode[] children = partitioner.partition(node);
			if ( children == null ) {
				if ( LOG.isInfoEnabled() ) {
					LOG.info("Can't partition further {}",node.getPartition());
				}
				continue;
			}
			
			PartitionNode l = children[0];
			PartitionNode r = children[1];

			if ( LOG.isDebugEnabled() ) {
				LOG.debug("expected items = {} / max items = {}",node.getExpectedItems(sampleRate), model.getMaxWorkDataItems() );
			}
			
			leaves.add(l); leaves.add(r);
			queue.add(l); queue.add(r);
			leaves.remove(node);
		}
		
		if ( LOG.isInfoEnabled() ) {
			LOG.info("DONE FIRST PHASE. {} leaf partition have been created",leaves.size());
		}
		
		if ( ! conf.getBoolean("sampler.planner.onephase",false) ) {
			// second phase. for each leaf partition, run SOCC algorithm assuming the number of machines is 1
			SortedSet<PartitionNode> singlePlan = new TreeSet<PartitionNode>(); // for scheduler
			HashSet<PartitionNode> newLeaves = new HashSet<PartitionNode>();

			for ( PartitionNode leafPartition : leaves ) {
				singlePlan.add(leafPartition);
				queue.add(leafPartition);
				newLeaves.add(leafPartition);

				//bestCost = leafPartition.getWorkSecs();
				
				searchStrategy.reset();
				searchStrategy.init(leafPartition);
				searchStrategy.setNumMachines(1);
				
				while ( ! queue.isEmpty() ) {
					PartitionNode node = queue.poll();
					if ( node.canPartition() ) {
						if ( LOG.isDebugEnabled() ) {
							LOG.debug("Considering partitioning {} - cost {}",node.getPartition(),node.getCost());
						}
						
						PartitionNode[] children = partitioner.partition(node);
						if ( children == null ) {
							// FIXME leave a log message
							continue;
						}
						PartitionNode l = children[0];
						PartitionNode r = children[1];
						
						if ( LOG.isDebugEnabled() ) {
							LOG.debug("expected items = {} / max items = {}",node.getExpectedItems(sampleRate), model.getMaxWorkDataItems() );
						}
	
						if ( node.getTotalMinSecs(model.getScheduleDelay()) < node.getWorkSecs() ) {
							if ( searchStrategy.shouldPartition(node, l, r, false) ) {
								if ( LOG.isDebugEnabled() ) {
									LOG.debug("Keep partition {}",node.getPartition().toSpec());
								}
								
								// add two split nodes to leaves and insert to the queue
								queue.add(l); queue.add(r);
								newLeaves.add(l); newLeaves.add(r);
								newLeaves.remove(node);
								singlePlan.add(l);
								singlePlan.add(r);
							} else {
								if ( LOG.isDebugEnabled() ) {
									LOG.debug("Discard partition {}",node.getPartition().toSpec());
								}
								node.rollback(); // drop children
							}
						} else {
							if ( LOG.isDebugEnabled() ) {
								LOG.debug("Discard partition {}, min = {}, work = {}",new Object[] { node.getPartition().toSpec(), node.getTotalMinSecs(model.getScheduleDelay()), node.getWorkSecs() });
							}
							node.rollback();
						}
					} // just ignore
				}

				singlePlan.clear();
			}
			
			leaves = newLeaves;
		}
		
		return leaves;
    }
    
    public Set<PartitionNode> partition() {
    	List<ITuple> sample = new ArrayList<ITuple>(sampleBuf);
    	sampleBuf.clear();
    	//Arrays.asList(sampleBuf);
    	
    	if ( LOG.isInfoEnabled() ) {
    		LOG.info("Total {} input tuples have been loaded",sample.size());
    	}
    	
    	rootNode = new PartitionNode(rootPartition,sample,false);
    	
		// do repartition
		double costwork = rootNode.getWorkCost();
		if ( costwork == 0.0 || Double.isNaN(costwork) ) {
			costwork = model.costWork(rootNode);
			rootNode.setWorkCost(costwork, costWorkPerSec);
		}
		double bestCost = rootNode.getWorkSecs();
		//double partitionLimit = model.getScheduleDelay() * 2.0;

		PriorityQueue<PartitionNode> queue = new PriorityQueue<PartitionNode>();
		queue.add(rootNode);
		newPartitions.add(rootNode);
		
		searchStrategy.reset();
		searchStrategy.init(rootNode);
		
		//SpeculationContext context = new SpeculationContext();
		
		while ( ! queue.isEmpty() ) {
			PartitionNode node = queue.poll();
			
			PartitionNode[] children = partitioner.partition(node);
			if ( children == null ) {
				continue;
			} else {
				PartitionNode l = children[0];
				PartitionNode r = children[1];
				
				if ( LOG.isDebugEnabled() ) {
					LOG.debug("Total min time = {}, Previous time = {}", node.getTotalMinSecs(model.getScheduleDelay()), node.getWorkSecs());
					LOG.debug("Merge cost = {}/time = {}", node.getCost(), node.getMergeSecs());
					LOG.debug("Left = {} -- cost = {}/time = {}", new Object[] { l.getPartition(), l.getCost(), l.getWorkSecs() });
					LOG.debug("Right = {} -- cost = {}/time = {}", new Object[] { r.getPartition(), r.getCost(), r.getWorkSecs() });	
				}
				
				// TODO backtrack if we run out of slack count but fail to improve runtime.
				if ( node.getTotalMinSecs(model.getScheduleDelay()) < node.getWorkSecs() ) {
					if ( searchStrategy.shouldPartition(node, l, r, false) ) {
						if ( LOG.isDebugEnabled() ) {
							LOG.debug("partition {}, total cost = {}",node.getPartition().toSpec(), bestCost);
						}
						
						// add two split nodes to leaves and insert to the queue
						newPartitions.add(l); newPartitions.add(r);
						queue.add(l); queue.add(r);
						node.trim(); // we know that we should keep this
						if ( LOG.isDebugEnabled() ) {
							LOG.debug("KEEP PARTITION");
							LOG.debug(String.format("SCHEDULE A,N,%s,%e,%e,%e,%e",node.getPartition().getPartitionID(),node.getWorkSecs(),node.getMergeSecs(),l.getWorkSecs(),r.getWorkSecs()));
						}
						l.setSlack(0); r.setSlack(0);
					} else {
						/*
						context.set(node, l, r);

						if ( partitionLimit < node.getWorkSecs() && lookAheadLimit > 0 && speculate(context,lookAheadLimit) ) {
							// we found better cost. patch previous.
							newPartitions.addAll(context.leaves);
							queue.addAll(context.queue);
							
							scheduler.reset(newPartitions);
							schedCost = scheduler.estimateSchedule();
							if ( LOG.isDebugEnabled() ) {
								LOG.debug("Previous cost = {}, After speculation = {}",bestCost,schedCost);
							}
							bestCost = schedCost;
						} else {
							if ( LOG.isDebugEnabled() ) {
								LOG.debug("DISCARD PARTITION - no schedule benefit");
								LOG.debug(String.format("SCHEDULE R,N,%s,%e,%e,%e,%e",node.getPartition().getPartitionID(),node.getWorkSecs(),node.getMergeSecs(),l.getWorkSecs(),r.getWorkSecs()));
							}
							node.rollback(); // drop children
						}
						*/
						if ( LOG.isDebugEnabled() ) {
							LOG.debug("DISCARD PARTITION - no schedule benefit");
							LOG.debug(String.format("SCHEDULE R,N,%s,%e,%e,%e,%e",node.getPartition().getPartitionID(),node.getWorkSecs(),node.getMergeSecs(),l.getWorkSecs(),r.getWorkSecs()));
						}
						node.rollback(); // drop children
					}
				} else {
					if ( LOG.isDebugEnabled() ) {
						LOG.debug("DISCARD PARTITION - no benefit");
						LOG.debug(String.format("SCHEDULE R,N,%s,%e,%e,%e,%e",node.getPartition().getPartitionID(),node.getWorkSecs(),node.getMergeSecs(),l.getWorkSecs(),r.getWorkSecs()));
					}
					node.rollback(); // drop children
				}
			}
		}
		
		return newPartitions;
	}
    
    public Partition getRootPartition() { return rootPartition; }
    public Set<PartitionNode> getNewPartitions() { return newPartitions; }
    public EaggCostModel getModel() { return model; }
    public List<Schedule> getPartitionSchedule() {
    	List<Schedule> ret = new ArrayList<Schedule>(newPartitions.size());
    	for ( PartitionNode node : newPartitions ) {
    		ret.add(node.getSchedule());
    	}
    	return ret;
    }
    
    public Set<PartitionNode> getLeafPartitions() {
    	HashSet<PartitionNode> leaves = new HashSet<PartitionNode>();
    	for ( PartitionNode p : newPartitions ) {
    		if ( p.isLeaf() )
    			leaves.add(p);
    	}
    	return leaves;
    }

	@Override
	public Configuration getConf() {
		return conf;
	}

	@Override
	public void setConf(Configuration conf) {
		this.conf = conf;
		model = EaggCostModel.getInstance(conf);
		rootPartition = model.getRootPartition();
		String spec = conf.get(PhysicalOp.PARTITION_SPEC_ATTR);
		if ( spec != null ) {
			rootPartition.set(conf.get(PhysicalOp.PARTITION_SPEC_ATTR));
		}
		try {
			partitioner = Partitioner.getInstance(model,null);
		} catch (Exception e) {
			throw new IllegalArgumentException("Failed to instantiate repartitioner",e);
		}
		costWorkPerSec = conf.getFloat("skewreduce.planner.cost.work2sec", 1.0f);
		lookAheadLimit = conf.getInt("skewreduce.planner.lookahead",0); // zero means no tolerance
		
		newPartitions = new TreeSet<PartitionNode>();
		sampleBuf = new ChunkedList<ITuple>();
		scheduler = new PriorityScheduler(conf);
		scheduler.setNumSlots(1); // TODO other values?
		
		Class<? extends SearchStrategy> clsStrategy = conf.getClass("skewreduce.planner.search.strategy", GreedyGlobalSchedule.class, SearchStrategy.class);
		searchStrategy = ReflectionUtils.newInstance(clsStrategy,conf);
		searchStrategy.setNumMachines(1);
	}
	
	
	class SpeculationContext implements Iterable<PartitionNode> {
		double bestCost; // best cost before speculation
		double bestSpec; // current best speculative cost
		
		final PriorityQueue<PartitionNode> queue = new PriorityQueue<PartitionNode>(); // nodes should be visited
		final SortedSet<PartitionNode> leaves = new TreeSet<PartitionNode>(); // nodes to keep
		
		public void set(PartitionNode node,PartitionNode l,PartitionNode r) {
			reset();
			
			bestCost = node.getWorkSecs();
			bestSpec = node.getMergeSecs() + l.getWorkSecs() + r.getWorkSecs() + 2*scheduler.getScheduleOverhead();

			queue.add(l);
			queue.add(r);
			leaves.add(node);
			leaves.add(l);
			leaves.add(r);
		}
		
		public void add(PartitionNode... nodes) {
			for ( PartitionNode n : nodes ) {
				leaves.add(n);
				if ( n.getWorkSecs() > 2.0 * scheduler.getScheduleOverhead() )
					queue.add(n);
			}
		}
		
		public void reset() {
			queue.clear();
			leaves.clear();
			bestCost = Double.NaN;
			bestSpec = Double.NaN;
		}
		
		public double estimateSchedule(PartitionNode l,PartitionNode r) {
			scheduler.reset(leaves);
			scheduler.add(l);
			scheduler.add(r);
			return scheduler.estimateSchedule();
		}
		
		public double getBestCost() { return bestCost; }
		public double getBestSpecCost() { return bestSpec; }
		public void setSpecCost(double c) { bestSpec = c; }
		
		@Override
		public Iterator<PartitionNode> iterator() {
			return new Iterator<PartitionNode>() {
				@Override
				public boolean hasNext() {
					return ! queue.isEmpty();
				}

				@Override
				public PartitionNode next() {
					return queue.poll();
				}

				@Override
				public void remove() {
					throw new UnsupportedOperationException();
				}
			};
		}
	}
	
	// assuming single node scheduler -- speculate up to depth k
	public boolean speculate(SpeculationContext context,int k) {
		if ( SLOG.isDebugEnabled() ) {
			SLOG.debug("Current best = {}, Starting cost = {}",context.getBestCost(),context.getBestSpecCost());
		}
		
		for ( PartitionNode node : context ) {
			PartitionNode[] children = partitioner.partition(node);

			if ( children == null ) {
				continue;
			} else {
				PartitionNode l = children[0];
				PartitionNode r = children[1];
				
				if ( SLOG.isDebugEnabled() ) {
					SLOG.debug("Total min time = {}, Previous time = {}", node.getTotalMinSecs(model.getScheduleDelay()), node.getWorkSecs());
					SLOG.debug("Merge cost = {}/time = {}", node.getCost(), node.getMergeSecs());
					SLOG.debug("Left = {} -- cost = {}/time = {}", new Object[] { l.getPartition(), l.getCost(), l.getWorkSecs() });
					SLOG.debug("Right = {} -- cost = {}/time = {}", new Object[] { r.getPartition(), r.getCost(), r.getWorkSecs() });	
				}
				
				if ( node.getTotalMinSecs(model.getScheduleDelay()) < node.getWorkSecs() ) {
					double schedCost = context.estimateSchedule(l, r);
					
					if ( SLOG.isDebugEnabled() ) {
						SLOG.debug("best cost = {}, current = {}",context.getBestCost(),schedCost);
					}
					
					if ( schedCost < context.getBestCost() ) {
						// end speculation
						context.add(children);
						context.setSpecCost(schedCost);
						l.setSlack(0); r.setSlack(0);
						if ( SLOG.isDebugEnabled() ) {
							SLOG.debug("KEEP PARTITION - improved best time at slack {}",node.getSlack());
							SLOG.debug(String.format("SCHEDULE A,S,%s,%e,%e,%e,%e",node.getPartition().getPartitionID(),node.getWorkSecs(),node.getMergeSecs(),l.getWorkSecs(),r.getWorkSecs()));
						}
						break;
					} else if ( schedCost < context.getBestSpecCost() || node.getSlack() < k ) {
						// continue
						if ( schedCost < context.getBestSpecCost() ) {
							context.setSpecCost(schedCost);
							if ( SLOG.isDebugEnabled() ) {
								SLOG.debug(String.format("SCHEDULE G,S,%s,%e,%e,%e,%e",node.getPartition().getPartitionID(),node.getWorkSecs(),node.getMergeSecs(),l.getWorkSecs(),r.getWorkSecs()));
							}
						} else {
							// increment slack counter
							int slack = node.getSlack();
							l.setSlack(slack+1); r.setSlack(slack+1);
							if ( SLOG.isDebugEnabled() ) {
								SLOG.debug("KEEP PARTITION - increasing slack {}",slack+1);
								SLOG.debug(String.format("SCHEDULE B,S,%s,%e,%e,%e,%e",node.getPartition().getPartitionID(),node.getWorkSecs(),node.getMergeSecs(),l.getWorkSecs(),r.getWorkSecs()));
							}
						}
						context.add(children);
					} else {
						// no hope. roll back
						if ( SLOG.isDebugEnabled() ) {
							SLOG.debug("DISCARD PARTITION - running out of slack");
							SLOG.debug(String.format("SCHEDULE R,S,%s,%e,%e,%e,%e",node.getPartition().getPartitionID(),node.getWorkSecs(),node.getMergeSecs(),l.getWorkSecs(),r.getWorkSecs()));
						}

						node.rollback();
					}
				} else {
					// no hope. roll back
					if ( SLOG.isDebugEnabled() ) {
						SLOG.debug("DISCARD PARTITION - no benefit");
						SLOG.debug(String.format("SCHEDULE R,S,%s,%e,%e,%e,%e",node.getPartition().getPartitionID(),node.getWorkSecs(),node.getMergeSecs(),l.getWorkSecs(),r.getWorkSecs()));
					}
					node.rollback();
				}
			}
		}
		
		if ( SLOG.isDebugEnabled() ) {
			SLOG.debug("Final: current best = {}, speculate cost = {}",context.getBestCost(),context.getBestSpecCost());
		}
		
		return context.getBestSpecCost() < context.getBestCost();
	}
	
	public static void main(String[] args) throws Exception {
        GenericOptionsParser options = new GenericOptionsParser(new Configuration(),args);
        Configuration conf = options.getConfiguration();
        String[] rest = options.getRemainingArgs();

		Repartitioner<LongWritable,Point3D> rp = new Repartitioner<LongWritable,Point3D>();
		rp.setConf(conf);
		
		DataInputStream in = new DataInputStream(new FileInputStream(rest[0]));
		ITuple t = null;
		try {
			while ( (t = rp.getModel().createTuple(in)) != null ) {
				rp.add(new LongWritable(0), (Point3D)t);
			}
		} catch ( EOFException ignore ) {
			// we simply ignore this exception and throw all other IOExceptions
		} finally {
			if ( in != null ) try { in.close(); } catch ( IOException ignore ) {}
			in = null;
		}
		
		//PartitionNode node = rp.evalTime();
		//System.out.printf("work cost = %e, work secs = %e\n",node.getWorkCost(),node.getWorkSecs());
		rp.partition();

		java.io.PrintStream out = new java.io.PrintStream(rest[0]+".txt");
		rp.getRootPartition().print(out);
		out.close();
		
		DataOutputStream dataOut = new DataOutputStream(new FileOutputStream(rest[0]+".pf"));
		rp.getRootPartition().write(dataOut);
		dataOut.close();
		
		out = new java.io.PrintStream(rest[0]+".schedule");
		for ( Schedule s : rp.getPartitionSchedule() ) {
			out.println(s);
		}
		out.close();
	}
}
