package skewreduce.framework.planner;

import java.io.BufferedOutputStream;
import java.io.DataOutputStream;
import java.io.EOFException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.PriorityQueue;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.ClusterStatus;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.ReflectionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import skewreduce.framework.Schedule;
import skewreduce.framework.planner.partition.Partitioner;
import skewreduce.framework.planner.search.GreedyGlobalSchedule;
import skewreduce.framework.planner.search.SearchStrategy;
import skewreduce.lib.ITuple;
import skewreduce.lib.Partition;


public class Planner {
	private static final Logger LOG = LoggerFactory.getLogger(Planner.class);
	
	enum SearchType {
		GREEDY,
		ONEPHASE,
		TWOPHASE
	}
	
	final Configuration conf;
	final FileSystem fs;
	List<ITuple> sample = new ArrayList<ITuple>();
	//Partitioner[] partitioner;
	Partitioner partitioner;

	private EaggCostModel model;
	private PartitionNode rootNode;
	private Partition rootPartition;
	private double sampleRate;
	private String outPrefix;
	private int minMachines = 1;
	private int maxMachines;
	private double bandWidth;

	private double costWorkPerSec;
	private double costMergePerSec;
	
	private boolean verbose;
	
	
	private SearchStrategy searchStrategy;
	
	private SearchType searchType = SearchType.GREEDY;

	private void doPlan() throws Exception {
		long beginOpt = System.currentTimeMillis();

		List<Partition> bestPlans = new ArrayList<Partition>(maxMachines - minMachines + 1);
		List<Double> bestCosts = new ArrayList<Double>(maxMachines - minMachines + 1);
		List<List<Schedule>> bestSchedule = new ArrayList<List<Schedule>>(maxMachines - minMachines + 1);

		SortedSet<PartitionNode> currentPlan = new TreeSet<PartitionNode>(); // for scheduler
		PriorityQueue<PartitionNode> queue = new PriorityQueue<PartitionNode>();
		Set<PartitionNode> leaves = new HashSet<PartitionNode>();
		
		// FIXME get initial cost by scheduling
		PriorityScheduler scheduler = new PriorityScheduler(conf);
		
		rootNode = new PartitionNode(rootPartition,sample);
		double bestCost = model.costWork(rootNode.getPartition(), rootNode.getSample());
		rootNode.setWorkCost(bestCost,costWorkPerSec);
		
		currentPlan.add(rootNode);
		leaves.add(rootNode);
		searchStrategy.init(rootNode);
		
		for ( int m = minMachines; m <= maxMachines; ++m ) {
			// FIXME add all leaves to leaves and queue
			queue.addAll(leaves);
			searchStrategy.setNumMachines(m);

			while ( ! queue.isEmpty() ) {
				PartitionNode node = queue.poll();
				if ( node.canPartition() ) {
					if ( LOG.isDebugEnabled() ) {
						LOG.debug("Considering partitioning {} - cost {}",node.getPartition(),node.getCost());
					}
					
					long expectedItems = node.getExpectedItems(sampleRate);
					boolean forcePartition = expectedItems > model.getMaxWorkDataItems();
					if ( ! forcePartition ) {
						boolean skip = model.getMinWorkDataItems() == 0 || expectedItems < model.getMinWorkDataItems();
						if ( skip ) {
							if ( LOG.isDebugEnabled() ) {
								LOG.debug("Skipping partition {} since it already satisfies memory requirement ({}/{})",new Object[] { node.getPartition(), expectedItems, model.getMinWorkDataItems() });
							}
							continue;
						}
					}
					
					//PartitionNode[] children = chooseBestPartitioner(node);
					PartitionNode[] children = partitioner.partition(node);
					if ( children == null ) {
						// FIXME leave a log message
						continue;
					}
					PartitionNode l = children[0];
					PartitionNode r = children[1];
					
					if ( LOG.isDebugEnabled() ) {
						LOG.debug("expected items = {} / max items = {}",node.getExpectedItems(sampleRate), model.getMaxWorkDataItems() );
					}

					if ( node.getTotalMinSecs(model.getScheduleDelay()) < node.getWorkSecs() || forcePartition ) {				
						if ( searchStrategy.shouldPartition(node, l, r, forcePartition) ) {
							// add two split nodes to leaves and insert to the queue
							leaves.add(l); leaves.add(r);
							queue.add(l); queue.add(r);
							currentPlan.add(l);
							currentPlan.add(r);
							leaves.remove(node);
						} else {
							node.rollback(); // drop children
						}
					} else {
						node.rollback();
					}
				} // just ignore
			}
			
			// FIXME do second path?
			
			bestPlans.add(rootPartition.deepCopy()); // add plan
			scheduler.setNumSlots(m);
			scheduler.reset(currentPlan);
			double bestTime = scheduler.estimateSchedule();
			bestCosts.add(bestTime);
			bestSchedule.add(scheduler.getSchedule());
		}
		
		long endOpt = System.currentTimeMillis();

		// print statistics
		for ( int i = 0; i < bestCosts.size(); ++i ) {
			System.out.printf("%d,%f,%f,%f,%d\n",minMachines+i,sampleRate,bestCosts.get(i),(endOpt-beginOpt)/1000.0,partitioner.getNumUnsafeDecisions());
			
			if ( verbose )
				bestPlans.get(i).print(System.out);
			
			// store as file
			
			if ( outPrefix != null ) {
				FileOutputStream fos = null;
				DataOutputStream out = null;
				try {
					fos = new FileOutputStream(String.format("%s.%d.partition",outPrefix,minMachines+i));
					out = new DataOutputStream(new BufferedOutputStream(fos,4096*1024));
					bestPlans.get(i).write(out);
				} finally {
					if ( out != null ) try { out.close(); } catch ( IOException ignore ) {}
					out = null;
					if ( fos != null ) try { fos.close(); } catch ( IOException ignore ) {}
					fos = null;
				}
				
				PrintStream scheduleOut = null;
				try {
					scheduleOut = new PrintStream(String.format("%s.%d.schedule",outPrefix,minMachines+i));
					for ( Schedule s : bestSchedule.get(i) ) {
						scheduleOut.println(s);
					}
				} finally {
					if ( scheduleOut != null ) scheduleOut.close();
					scheduleOut = null;
				}
			}
		}
	}

	
	private void doPlan2() throws Exception {
		long beginOpt = System.currentTimeMillis();
		
		rootNode = new PartitionNode(rootPartition,sample);

		SortedSet<PartitionNode> currentPlan = new TreeSet<PartitionNode>(); // for scheduler
		PriorityQueue<PartitionNode> queue = new PriorityQueue<PartitionNode>();
		Set<PartitionNode> leaves = new HashSet<PartitionNode>();
		
		PriorityScheduler scheduler = new PriorityScheduler(conf);
		currentPlan.add(rootNode);
		leaves.add(rootNode);
		
		rootNode.setWorkCost(model.costWork(rootNode.getPartition(), rootNode.getSample()),costWorkPerSec);
		
		// FIXME add all leaves to leaves and queue
		queue.addAll(leaves);
		scheduler.setNumSlots(1);
		
		if ( LOG.isInfoEnabled() ) {
			LOG.info("BEGIN FIRST PHASE...");
		}
		
		// first, partition until we have all leaf nodes satisfy the memory requirements
		while ( ! queue.isEmpty() ) {
			PartitionNode node = queue.poll();
			if ( LOG.isDebugEnabled() ) {
				LOG.debug("Considering partitioning {} - cost {}",node.getPartition(),node.getCost());
			}
			
			long expectedItems = node.getExpectedItems(sampleRate);
			boolean forcePartition = expectedItems > model.getMaxWorkDataItems();
			if ( ! forcePartition ) {
				boolean skip = model.getMinWorkDataItems() == 0 || expectedItems < model.getMinWorkDataItems();
				if ( skip ) {
					if ( LOG.isDebugEnabled() ) {
						LOG.debug("Skipping partition {} since it already satisfies memory requirement",node.getPartition());
					}
					continue;
				}
			}
				
			PartitionNode[] children = partitioner.partition(node);
			if ( children == null ) {
				if ( LOG.isInfoEnabled() ) {
					LOG.info("Can't partition further {}",node.getPartition());
				}
				continue;
			}
			
			PartitionNode l = children[0];
			PartitionNode r = children[1];

			if ( LOG.isDebugEnabled() ) {
				LOG.debug("expected items = {} / max items = {}",node.getExpectedItems(sampleRate), model.getMaxWorkDataItems() );
			}
			
			leaves.add(l); leaves.add(r);
			queue.add(l); queue.add(r);
			currentPlan.add(l);
			currentPlan.add(r);
			leaves.remove(node);
		}
		
		if ( LOG.isInfoEnabled() ) {
			LOG.info("DONE FIRST PHASE. {} leaf partition have been created",leaves.size());
		}

		double bestCost = rootNode.getWorkSecs();

		if ( searchType == SearchType.TWOPHASE ) {
			// second phase. for each leaf partition, run SOCC algorithm assuming the number of machines is 1
			SortedSet<PartitionNode> singlePlan = new TreeSet<PartitionNode>(); // for scheduler

			for ( PartitionNode leafPartition : leaves ) {
				singlePlan.add(leafPartition);
				queue.add(leafPartition);

				bestCost = leafPartition.getWorkSecs();
				
				while ( ! queue.isEmpty() ) {
					PartitionNode node = queue.poll();
					if ( node.canPartition() ) {
						if ( LOG.isDebugEnabled() ) {
							LOG.debug("Considering partitioning {} - cost {}",node.getPartition(),node.getCost());
						}
						
						PartitionNode[] children = partitioner.partition(node);
						if ( children == null ) {
							// FIXME leave a log message
							continue;
						}
						PartitionNode l = children[0];
						PartitionNode r = children[1];
						
						if ( LOG.isDebugEnabled() ) {
							LOG.debug("expected items = {} / max items = {}",node.getExpectedItems(sampleRate), model.getMaxWorkDataItems() );
						}
	
						if ( node.getTotalMinSecs(model.getScheduleDelay()) < node.getWorkSecs() ) {
							scheduler.reset(singlePlan);
							scheduler.add(l);
							scheduler.add(r);
							double schedCost = scheduler.estimateSchedule();
							
							if ( LOG.isDebugEnabled() ) {
								LOG.debug("best cost = {}, current = {}",bestCost,schedCost);
							}
							
							if ( schedCost < bestCost ) {
								if ( LOG.isDebugEnabled() ) {
									LOG.debug("Keep partition {}, current = {}, best = {}",new Object[] { node.partition.toSpec(), schedCost, bestCost });
								}
								
								bestCost = schedCost;
	
								// add two split nodes to leaves and insert to the queue
								queue.add(l); queue.add(r);
								singlePlan.add(l);
								singlePlan.add(r);
							} else {
								if ( LOG.isDebugEnabled() ) {
									LOG.debug("Discard partition {}, current = {}, best = {}",new Object[] { node.partition.toSpec(), schedCost, bestCost });
								}
								node.rollback(); // drop children
							}
						} else {
							if ( LOG.isDebugEnabled() ) {
								LOG.debug("Discard partition {}, min = {}, work = {}",new Object[] { node.partition.toSpec(), node.getTotalMinSecs(model.getScheduleDelay()), node.getWorkSecs() });
							}
							node.rollback();
						}
					} // just ignore
				}
				
				if ( singlePlan.size() > 1 ) {
					currentPlan.addAll(singlePlan);
				}
				singlePlan.clear();
			}
		}
		
		long endOpt = System.currentTimeMillis();
		
		// print statistics
		System.out.printf("%f,%f,%f,%d\n",sampleRate,bestCost,(endOpt-beginOpt)/1000.0,partitioner.getNumUnsafeDecisions());
		
		if ( verbose )
			rootPartition.print(System.out);
		
		// store as file
		
		if ( outPrefix != null ) {
			FileOutputStream fos = null;
			DataOutputStream out = null;
			try {
				fos = new FileOutputStream(String.format("%s.partition",outPrefix));
				out = new DataOutputStream(new BufferedOutputStream(fos,4096*1024));
				rootPartition.write(out);
			} finally {
				if ( out != null ) try { out.close(); } catch ( IOException ignore ) {}
				out = null;
				if ( fos != null ) try { fos.close(); } catch ( IOException ignore ) {}
				fos = null;
			}

			scheduler.reset(currentPlan);
			scheduler.estimateSchedule();

			PrintStream scheduleOut = null;
			try {
				scheduleOut = new PrintStream(String.format("%s.schedule",outPrefix));
				for ( Schedule s : scheduler.getSchedule() ) {
					scheduleOut.println(s);
				}
			} finally {
				if ( scheduleOut != null ) scheduleOut.close();
				scheduleOut = null;
			}
		}
	}
	
	//////////////////////////////
	
	Planner(Configuration cnf) throws IOException {
		conf = cnf;
		fs = FileSystem.get(this.conf);
		
		costWorkPerSec = conf.getFloat("skewreduce.planner.cost.work2sec", 1.0f);
		costMergePerSec = conf.getFloat("skewreduce.planner.cost.merge2sec",1.0f);
	}
	
	private int loadSample(Path fn) throws IOException {
		int before = sample.size();
		FSDataInputStream in = null;
		ITuple t = null;
		try {
			in = fs.open(fn);
			while ( (t = model.createTuple(in)) != null ) {
				sample.add(t);
			}
		} catch ( EOFException ignore ) {
			// we simply ignore this exception and throw all other IOExceptions
		} finally {
			if ( in != null ) try { in.close(); } catch ( IOException ignore ) {}
			in = null;
		}
		return sample.size() - before;
	}
	
	private void printHelp() {
		System.out.printf("%s [options] sample files\n",Planner.class);
		GenericOptionsParser.printGenericCommandUsage(System.out);
		System.out.println("Optimizer Options");
		System.out.println("-sampleRate rate\tSample rate of given sample files");
		System.out.println("-prefix prefix\tPrefix of output file name");
		System.out.println("-model className\tFully qualified class name of cost model");
		System.out.println("-minMachines N\tMinimum number of target machines to optimize (default: 1)");
		System.out.println("-maxMachines N\tMaximum number of target machines to optimize (default: Max Map tasks)");
		System.out.println("-bandWidth bw\tPartition bandwidth");
		System.out.println("-p2s costPerSec\tcost of PROCESS");
		System.out.println("-m2s costPerSec\tcost per MERGE");
		System.out.println("-scheduleDelay secs\tScheduling delay in seconds");
		System.out.println("-axisSearch searchType\tPartitioning axes search method (default: BRUTEFORCE)");
		System.out.println("-verbose\tprint out debugging information (default: not verbose)");
		System.out.println("-search searchType\tPartitioning point search method (default: GREEDY)");
		System.out.println("-minPartitionSize size\tMinimum partition size. Support postfixes");
		System.out.println("-maxPartitionSize size\tMaximum partition size. Support postfixes");
	}
	
	private long parseSize(String arg) {
		long sz = 0;
		char unit = arg.charAt(arg.length()-1);
		if ( '0' <= unit && unit <= '9' ) {
			sz = Integer.parseInt(arg);
		} else {
			sz = Integer.parseInt(arg.substring(0,arg.length()-1));
			switch ( unit ) {
				case 'g': case 'G':
					sz <<= 10;
				case 'm': case 'M':
					sz <<= 10;
				case 'k': case 'K':
					sz <<= 10;
					break;
				default:
				{
					throw new IllegalArgumentException("Invalid size unit: "+arg);
				}
			}
		}
		return sz;
	}
	
	private void doMain(String[] args) throws Exception {
		boolean bad = false;
		int i = 0;
		long maxPartitionSize = 0;
		long minPartitionSize = 0;
		String axisSearch = null;
		
		for ( ; i < args.length; ++i ) {
			if ( args[i].charAt(0) != '-' )
				break;
			
			if ( "-sampleRate".equals(args[i]) ) {
				sampleRate = Double.parseDouble(args[++i]);
				conf.setFloat("skewreduce.planner.sampleRate", (float)sampleRate);
			} else if ( "-prefix".equals(args[i]) ) {
				outPrefix = args[++i];
			} else if ( "-model".equals(args[i]) ) {
				conf.set(EaggCostModel.MODEL_CLASS_PROP, args[++i]);
			} else if ( "-minMachines".equals(args[i]) ) {
				minMachines = Integer.parseInt(args[++i]);
			} else if ( "-maxMachines".equals(args[i]) ) {
				maxMachines = Integer.parseInt(args[++i]);
			} else if ( "-bandWidth".equals(args[i]) ) {
				bandWidth = Double.parseDouble(args[++i]);
				conf.setFloat("skewreduce.planner.partition.bandWidth", (float)bandWidth);
			} else if ( "-p2s".equals(args[i]) ) {
				costWorkPerSec = Float.parseFloat(args[++i]);
				conf.setFloat("skewreduce.planner.cost.work2sec", (float)costWorkPerSec);
			} else if ( "-m2s".equals(args[i]) ) {
				costMergePerSec = Float.parseFloat(args[++i]);
				conf.setFloat("skewreduce.planner.cost.merge2sec", (float)costMergePerSec);
			} else if ( "-scheduleDelay".equals(args[i]) ) {
				float v = Float.parseFloat(args[++i]);
				conf.setFloat("skewreduce.planner.scheduler.delay",v);
			} else if ( "-axisSearch".equals(args[i]) ) {
				axisSearch = args[++i];
			} else if ( "-verbose".equals(args[i]) || "-v".equals(args[i]) ) {
				verbose = true;
			} else if ( "-search".equals(args[i]) ) {
				conf.set("skewreduce.planner.search.strategy",args[++i]);				
			} else if ( "-maxPartitionSize".equals(args[i]) ) {
				maxPartitionSize = parseSize(args[++i]);
			} else if ( "-minPartitionSize".equals(args[i]) ) {
				minPartitionSize = parseSize(args[++i]);
			} else {
				System.err.println("Unknown option: "+args[i]);
				bad = true;
			}
		}
		
		model = EaggCostModel.getInstance(conf);
		Class<? extends SearchStrategy> clsStrategy = conf.getClass("skewreduce.planner.search.strategy", GreedyGlobalSchedule.class, SearchStrategy.class);
		searchStrategy = ReflectionUtils.newInstance(clsStrategy,conf);
		if ( LOG.isInfoEnabled() ) {
			LOG.info("Using search strategy {}",clsStrategy);
		}
		
		bad = bad || model == null;

		if ( ! bad ) {
			for ( ; i < args.length; ++i ) {
				FileStatus[] fns = fs.globStatus(new Path(args[i]));
				for ( FileStatus fn : fns ) {
					int nsample = loadSample(fn.getPath());
					LOG.info("{} samples have loaded from {}",nsample,fn.getPath());
	
				}
			}
		}
		
		if ( bad || sample.isEmpty() ) {
			printHelp();
			System.exit(-1);
		}
		
		if ( maxMachines == 0 ) {
			JobClient job = new JobClient(new JobConf(conf));
			ClusterStatus clusterStat = job.getClusterStatus();
			maxMachines = clusterStat.getMaxMapTasks();
			LOG.info("Use default number of machines from MapReduce configuration: {}",maxMachines);
		}
		
		// set partition size information
		if ( maxPartitionSize == 0 ) {
			maxPartitionSize = fs.getDefaultBlockSize();
		}
		
		System.err.println("MinPartitionSize = "+minPartitionSize);
		System.err.println("MaxPartitionSize = "+maxPartitionSize);
		
		model.setMinWorkMemoryLimit(minPartitionSize);
		model.setMaxWorkMemoryLimit(maxPartitionSize);
		model.setMaxMergeMemoryLimit(maxPartitionSize);
		
		rootPartition = model.getRootPartition();
		partitioner = Partitioner.getInstance(model,axisSearch);
		//partitioner = PartitionerCollection.getPartitioners(rootPartition,conf,model);
		
		LOG.info("Total {} samples have been loaded",sample.size());
		
		switch ( searchType ) {
			case ONEPHASE:
			case TWOPHASE:
				doPlan2();
				break;
			case GREEDY:
			default:
				doPlan();
		}
	}

	/**
	 * @param args
	 */
	public static void main(String[] args) throws Exception {
        GenericOptionsParser options = new GenericOptionsParser(new Configuration(),args);
        Configuration conf = options.getConfiguration();
        String[] rest = options.getRemainingArgs();
        
        //LOG.info("current working directory = {}",System.getProperty("user.dir"));

        if ( rest == null || rest.length == 0 ) {
            System.err.println("No class argument is given!");
            System.exit(1);
        }
        
        Planner prog = new Planner(conf);
        prog.doMain(rest);		
	}
}
