package org.apache.hadoop.mapred;

import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.TreeSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Executors;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker;
import org.apache.hadoop.mapreduce.TaskType;

import org.apache.hadoop.mapred.TaskTrackerStatus.TaskTrackerFlowStatus;
import org.apache.hadoop.mapred.TaskTrackerStatus.*;

import org.apache.hadoop.http.HttpServer;
import javax.servlet.ServletContext;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.servlet.ServletInputStream;

import javax.servlet.*;
import javax.servlet.http.*;
import java.io.*;
import java.net.*;


public class NetAwareScheduler extends TaskScheduler {
  public static final Log LOG = LogFactory.getLog(
      "org.apache.hadoop.mapred.NetAwareScheduler");


  // How often update thread runs (fair shares are re-calculated)
  protected long updateInterval = 250;
 
  private JobInitializer jobInitializer; //inited in start()
  
  // per-job scheduling variables
  protected Map<JobInProgress, JobInfo> infos = new HashMap<JobInProgress, JobInfo>();

 
  protected PoolManager poolMgr;

  protected boolean initialized;  // Are we initialized? set true in start()
  protected volatile boolean running; // Are we running? set true in start()
  private Clock clock;  //inited in constructor

  protected long lastUpdateTime;           // Time when we last updated infos

  private JobListener jobListener; //inited in constructor
 
  private boolean mockMode; // Used for unit tests; disables background updates
                            // and scheduler event log  




	//node, job, # of reduce on this node for this job
	private Map<String, Map<JobInProgress, Integer> > pending_reduce_tasks = new HashMap<String, Map<JobInProgress, Integer> >();

	//tracker IP, free slots
	private Map<String, Integer> free_reduce_slots = new HashMap<String, Integer>();

	//jip, # pending tasks for this job
	private Map<JobInProgress, Integer> pending_reduce_tasks_for_job = new HashMap<JobInProgress, Integer>();

	//task tracker name to IP
	private Map<String, String> DNS = new HashMap<String, String>();

	//map task locations of the job
	//static in order to use in public static class ReverseServlet extends HttpServlet
	//jobID, location IP
	private static Map<String, ArrayList<String> > jobMapLocations = new HashMap<String, ArrayList<String> >();
	
//ycb-add
	//got at the time of assinging map tasks
	private static Map<String, ArrayList<String> > jobMapLocations2 = new HashMap<String, ArrayList<String> >();
	private static Map<String, Integer> jobNumMapScheduled = new HashMap<String, Integer>();

	//flow graph
	//static in order to use in public static class ReverseServlet extends HttpServlet
	private static Graph flow_graph = new Graph();
	
	///////////////////////////////////////////////////////////////////////////////////////////
	//compute metric functions
	public double reduce_min_flows_per_link_metric(String srcIP, String jobid) {
	
     	//ArrayList<String> mapLocs = jobMapLocations.get(jobid);
     	ArrayList<String> mapLocs = jobMapLocations2.get(jobid);
     	
		if(mapLocs == null)
			throw new IllegalArgumentException("reduce_min_flows_per_link_metric mapLocs == null!");
		
		ArrayList<Double> edge_values = new ArrayList<Double>();
	
		//set dummy to be num of flows
		flow_graph.traverse_edge("set_flow_dummy", edge_values);

		for(String maploc : mapLocs) {
		
			if(!maploc.equals(srcIP))
				flow_graph.flow_change(srcIP, maploc, "add_flow_dummy");
		
		}

		//get NORMALIZED dummy after adding flow
		flow_graph.traverse_edge("get_flow_dummy", edge_values);
	
		//get max # of flows/link in the cluster
		double metric = Collections.max(edge_values);
		
		return metric;	
	
	}

	public static class TTmetric implements Comparable<TTmetric> {

		private String IP;
		private double metric;
		
		public TTmetric(String IP, double m) { this.IP = IP; this.metric = m;}

		public String getIP() { return this.IP; }
		public int compareTo(TTmetric other)
		{
			if(metric < other.metric)
				return -1;//negtive if a comes before b
			if(metric > other.metric)
				return 1; 
			return 0;

		}
	}
	
	//input: metric, return: the node IP to start reduce task
	public String compute_reduce_metric(String metric, String jobid) {

		ArrayList<TTmetric> pq_metric = new ArrayList<TTmetric>();	
		
		boolean iffind = false;
		
		
		//debug
		String log_frs = "";
		String log_metric = "";
		
		
		//private Map<String, Integer> free_reduce_slots
		for(String ip : free_reduce_slots.keySet()) {
	
			Integer numSlots = free_reduce_slots.get(ip);
		
			if(numSlots > 0) {
			
				log_frs+=ip;
				log_frs+=" : ";
				log_frs+=numSlots;
				log_frs+=" # ";
		
				double metric_value = 0.0;
		
				if(metric == "MIN_FLOWS_PER_LINK") {
						metric_value = reduce_min_flows_per_link_metric(ip, jobid);
						
						log_metric+=ip;
						log_metric+=" : ";
						log_metric+=metric_value;
						log_metric+=" # ";
				}
				else {
						throw new IllegalArgumentException("Not recognized reduce metric!");
				}
		
				pq_metric.add(new TTmetric(ip, metric_value));
				
				iffind = true;//at least one ip has free reduce slots
			
			}
	
		}//for ip
		
		LOG.info("ycb-compute-reduce-metric: ip that have free slots "+log_frs);
		LOG.info("ycb-compute-reduce-metirc: each metric value "+log_metric);
		
		if(!iffind)
			return null;
	
		Collections.sort(pq_metric);
		
		return pq_metric.get(0).getIP();

	}


	///////////////////////////////////////////////////////////////////////////////////////////
	//auxiliary functions
	
	//ReduceTask.getTaskID(): attempt_201204101534_0001_r_000000_0
	public String reduceAttemptIDtoTaskID(String attemptID) {


			String[] temp;
			String delimiter = "_";	
			temp = attemptID.split(delimiter);
			String TaskID = temp[1]+"_"+temp[2];
			return TaskID;

	}	

	public String trackerToIP(String trackerName) {

		return DNS.get(trackerName);
	
	}


	//String mapLoc: concatenated string from jobinprogress, return deduplicated map IP list
	//return the num of actual map tasks
	public static int parseMapLocation(String mapLoc, ArrayList<String> result) {
	
		String[] temp;
		String delimiter = "#";	
		temp = mapLoc.split(delimiter);
		
		int size = temp.length;

		Set<String> dedupMapLoc = new TreeSet<String>();
		
		for(String eachMapLoc: temp) {
		
			String[] temp2;
			String delimiter2 = "/";
			temp2 = eachMapLoc.split(delimiter2);
			
			String delimiter3 = ":";
			String[] temp3;
			temp3 = temp2[0].split(delimiter3);
			
			dedupMapLoc.add(temp3[0]);

		}

		result.addAll(dedupMapLoc);
		
		return size;
	}


/*
	//just use one reduce task to infer the map locations
	public ArrayList<String> lookupMapLocations (String reduceID, JobInProgress jip) {
	  	
	  	for(TaskInProgress tip: jip.maps) {
		
			if(reduceID.equals(tip.)) {
				String reduceLocation = "";
				for(String location: tip.getSplitLocations()) {
					reduceLocation += location;
				}
			
			}
		}
			
	
	}
*/

	////////////////////////////////////////////////////////////////////////////////////

  public NetAwareScheduler() {
    this(new Clock(), false);
  }
  
  /**
   * Constructor used for tests, which can change the clock and disable updates.
   */
  protected NetAwareScheduler(Clock clock, boolean mockMode) {
    this.clock = clock;
    this.mockMode = mockMode;
    this.jobListener = new JobListener();
  }



  /**
   * A class for holding per-job scheduler variables. These always contain the
   * values of the variables at the last update().
   */
  static class JobInfo {

	boolean runnable = false;//to be marked as runnable
	volatile boolean needsInitializing = true;
	
    public NetAwareJobSchedulable mapSchedulable;
    public NetAwareJobSchedulable reduceSchedulable;
    // Variables used for delay scheduling
    LocalityLevel lastMapLocalityLevel; // Locality level of last map launched
    long timeWaitedForLocalMap; // Time waiting for local map since last map
    boolean skippedAtLastHeartbeat;  // Was job skipped at previous assignTasks?
                                     // (used to update timeWaitedForLocalMap)
    public JobInfo(NetAwareJobSchedulable mapSched, NetAwareJobSchedulable reduceSched) {
      this.mapSchedulable = mapSched;
      this.reduceSchedulable = reduceSched;
      this.lastMapLocalityLevel = LocalityLevel.NODE;
    }
  }




  /**
   * Counterpart of class EagerTaskInitializationListener in FIFO scheduler. Initialize a job, but after put into JobListener
   * The core is TaskTrackerManager.initJob(job); //the job will be in PREP state.
   */
  private class JobInitializer {
    private final int DEFAULT_NUM_THREADS = 1;
    private ExecutorService threadPool;
    private TaskTrackerManager ttm;
    
    public JobInitializer(TaskTrackerManager ttm) {
      threadPool = Executors.newFixedThreadPool(DEFAULT_NUM_THREADS);
      this.ttm = ttm;
    }
    
    //call stack: JobListener, UpdateThread -> update() -> updateRunnability -> initJob
    public void initJob(JobInfo jobInfo, JobInProgress job) {
        threadPool.execute(new InitJob(jobInfo, job));
    }
    
    class InitJob implements Runnable {
      private JobInfo jobInfo;
      private JobInProgress job;
      
      public InitJob(JobInfo jobInfo, JobInProgress job) {
        this.jobInfo = jobInfo;
        this.job = job;
      }
      
      public void run() {
        ttm.initJob(job);
      }
    }
    
    void terminate() {
      LOG.info("Shutting down thread pool");
      threadPool.shutdownNow();
      try {
        threadPool.awaitTermination(1, TimeUnit.MINUTES);
      } catch (InterruptedException e) {
        // Ignore, we are in shutdown anyway.
      }
    }
  }

/**
   * Used to listen for jobs added/removed by our {@link TaskTrackerManager}.
   */
  private class JobListener extends JobInProgressListener {
    @Override
    public void jobAdded(JobInProgress job) {
      synchronized (NetAwareScheduler.this) {
        LOG.info("JOB_ADDED: "+job.getJobID());
        JobInfo info = new JobInfo(new NetAwareJobSchedulable(NetAwareScheduler.this, job, TaskType.MAP),
            new NetAwareJobSchedulable(NetAwareScheduler.this, job, TaskType.REDUCE));
        infos.put(job, info);
        
        
        //pending reduce tasks
        pending_reduce_tasks_for_job.put(job, 0);
        


//        poolMgr.addJob(job); // Also adds job into the right PoolScheduable
        update();
      }
    }
    
    @Override
    public void jobRemoved(JobInProgress job) {
      synchronized (NetAwareScheduler.this) {
        LOG.info("JOB_REMOVED: "+job.getJobID());
//        jobNoLongerRunning(job);
      }
    }
  
    @Override
    //e.g. after JOB_SETUP task finish, change from PREP to running, at this time set jobInfo.runnable = true;
    public void jobUpdated(JobChangeEvent event) {
      LOG.info("JOB_UPDATED: "+event.getJobInProgress().getJobID());
    }
  }

  /**
   * A thread which calls update() every updateInterval milliseconds.
   */
  private class UpdateThread extends Thread {
    private UpdateThread() {
      super("NetAwareScheduler update thread");
    }

    public void run() {
      while (running) {
        try {
          Thread.sleep(updateInterval);
          update();

        } catch (Exception e) {
          LOG.error("Exception in netaware scheduler UpdateThread", e);
        }
      }
    }
  }
  
    
  /**
   * Recompute the internal variables used by the scheduler - per-job weights,
   * fair shares, deficits, minimum slot allocations, and numbers of running
   * and needed tasks of each type. 
   */
  protected void update() {
  
	synchronized (this) {  	
  		
		updateRunnability(); // Set job runnability
		
	}		
  
  }

 
  @Override
  public void start() {
    try {
    	super.start();
    //	String hostname = "localhost";
    	String hostname = "boromir.openflow.cs.wisc.edu";
        if (taskTrackerManager instanceof JobTracker) {
          hostname = ((JobTracker) taskTrackerManager).getJobTrackerMachine();
        }
        
        jobInitializer = new JobInitializer(taskTrackerManager);
        taskTrackerManager.addJobInProgressListener(jobListener);
        
		initialized = true;
      	running = true;//while running, keep update()
      	lastUpdateTime = clock.getTime();
      	
      	//new UpdateThread().start();???        

		LOG.info("YIzheng===================before ready for modify===============");
      
      // Register servlet with JobTracker's Jetty server
      if (taskTrackerManager instanceof JobTracker) {
        JobTracker jobTracker = (JobTracker) taskTrackerManager;
        HttpServer infoServer = jobTracker.infoServer;
        infoServer.setAttribute("scheduler", this);//set a value in the webapp context, value is available to the jsp pages
/*
        infoServer.addInternalServlet("scheduler1", "/scheduler",//diff servlet with diff name
            NetAwareSchedulerServlet.class);
*/

        infoServer.addInternalServlet("scheduler", "/res",
            ReverseServlet.class);  
            
        LOG.info("YIzheng===================after ready for modify=========info ======"+infoServer.toString());
      }
      
      //hardcoding task tracker name to IP
//      DNS.put("localhost","127.0.0.1");
      //DNS.put("boromir.openflow.cs.wisc.edu","10.10.101.33");
      //DNS.put("pippin.openflow.cs.wisc.edu","10.10.101.37");
      //DNS.put("aragorn.openflow.cs.wisc.edu","10.10.101.39");
      //DNS.put("elrond.openflow.cs.wisc.edu","10.10.101.40");
 
 
        DNS.put("magpie.openflow.cs.wisc.edu","tracker_magpie.openflow.cs.wisc.edu"); 
        DNS.put("raven.openflow.cs.wisc.edu","tracker_raven.openflow.cs.wisc.edu");
        DNS.put("vulture.openflow.cs.wisc.edu","tracker_vulture.openflow.cs.wisc.edu");         
        DNS.put("lyrebird.openflow.cs.wisc.edu","tracker_lyrebird.openflow.cs.wisc.edu");  
        DNS.put("thrush.openflow.cs.wisc.edu","tracker_thrush.openflow.cs.wisc.edu");                
        DNS.put("sparrow.openflow.cs.wisc.edu","tracker_sparrow.openflow.cs.wisc.edu");
        DNS.put("wren.openflow.cs.wisc.edu","tracker_wren.openflow.cs.wisc.edu");
/*
        DNS.put("elrond.openflow.cs.wisc.edu","tracker_elrond.openflow.cs.wisc.edu"); 
        DNS.put("faramir.openflow.cs.wisc.edu","tracker_faramir.openflow.cs.wisc.edu"); 
        DNS.put("meriadoc.openflow.cs.wisc.edu","tracker_meriadoc.openflow.cs.wisc.edu"); 
        DNS.put("saruman.openflow.cs.wisc.edu","tracker_saruman.openflow.cs.wisc.edu"); 
        DNS.put("sam.openflow.cs.wisc.edu","tracker_sam.openflow.cs.wisc.edu"); 
        DNS.put("gimli.openflow.cs.wisc.edu","tracker_gimli.openflow.cs.wisc.edu");  
*/       
      //specify topology
      //flow_graph.jobtracker = "10.10.101.33";//boromir
      flow_graph.jobtracker = "tracker_magpie.openflow.cs.wisc.edu";//boromir
      
      
      //void graph_add_edge(String node1, int type1, String node2, int type2, double bw) 
      //flow_graph.graph_add_edge("0.0.0.0", 0, "10.10.101.33", 1, 1.0);
      //flow_graph.graph_add_edge("0.0.0.0", 0, "10.10.101.37", 1, 1.0);
      //flow_graph.graph_add_edge("0.0.0.0", 0, "10.10.101.39", 1, 1.0);
      //flow_graph.graph_add_edge("0.0.0.0", 0, "10.10.101.40", 1, 1.0);

      flow_graph.graph_add_edge("0.0.0.0", 0, "tracker_magpie.openflow.cs.wisc.edu", 1, 1.0);
      flow_graph.graph_add_edge("0.0.0.0", 0, "0.0.0.1", 0, 1.0);
      flow_graph.graph_add_edge("0.0.0.0", 0, "0.0.0.2", 0, 1.0);
      flow_graph.graph_add_edge("0.0.0.0", 0, "0.0.0.3", 0, 1.0);
      
/*
      flow_graph.graph_add_edge("0.0.0.1", 0, "0.0.0.3", 0, 1.0);      
      flow_graph.graph_add_edge("0.0.0.1", 0, "0.0.0.4", 0, 1.0);
      flow_graph.graph_add_edge("0.0.0.1", 0, "0.0.0.5", 0, 1.0);            

      flow_graph.graph_add_edge("0.0.0.2", 0, "0.0.0.6", 0, 1.0);      
      flow_graph.graph_add_edge("0.0.0.2", 0, "0.0.0.7", 0, 1.0);
      flow_graph.graph_add_edge("0.0.0.2", 0, "0.0.0.8", 0, 1.0);  
*/
      
      flow_graph.graph_add_edge("0.0.0.1", 0, "tracker_raven.openflow.cs.wisc.edu", 1, 1.0);
      flow_graph.graph_add_edge("0.0.0.1", 0, "tracker_vulture.openflow.cs.wisc.edu", 1, 1.0);
      
      flow_graph.graph_add_edge("0.0.0.2", 0, "tracker_lyrebird.openflow.cs.wisc.edu", 1, 1.0);
      flow_graph.graph_add_edge("0.0.0.2", 0, "tracker_thrush.openflow.cs.wisc.edu", 1, 1.0);      
            
      flow_graph.graph_add_edge("0.0.0.3", 0, "tracker_sparrow.openflow.cs.wisc.edu", 1, 1.0);
      flow_graph.graph_add_edge("0.0.0.3", 0, "tracker_wren.openflow.cs.wisc.edu", 1, 1.0);
      
/*
      flow_graph.graph_add_edge("0.0.0.6", 0, "tracker_elrond.openflow.cs.wisc.edu", 1, 1.0);
      flow_graph.graph_add_edge("0.0.0.6", 0, "tracker_faramir.openflow.cs.wisc.edu", 1, 1.0);
      
      flow_graph.graph_add_edge("0.0.0.7", 0, "tracker_meriadoc.openflow.cs.wisc.edu", 1, 1.0);
      flow_graph.graph_add_edge("0.0.0.7", 0, "tracker_saruman.openflow.cs.wisc.edu", 1, 1.0);
      
      flow_graph.graph_add_edge("0.0.0.8", 0, "tracker_sam.openflow.cs.wisc.edu", 1, 1.0);
      flow_graph.graph_add_edge("0.0.0.8", 0, "tracker_gimli.openflow.cs.wisc.edu", 1, 1.0);

*/      
/*      
      flow_graph.graph_add_edge("0.0.0.0", 0, "tracker_boromir.openflow.cs.wisc.edu", 1, 1.0);
      flow_graph.graph_add_edge("0.0.0.0", 0, "tracker_frodo.openflow.cs.wisc.edu", 1, 1.0);
      flow_graph.graph_add_edge("0.0.0.0", 0, "tracker_gollum.openflow.cs.wisc.edu", 1, 1.0);
      flow_graph.graph_add_edge("0.0.0.0", 0, "tracker_bilbo.openflow.cs.wisc.edu", 1, 1.0);            
      flow_graph.graph_add_edge("0.0.0.0", 0, "tracker_pippin.openflow.cs.wisc.edu", 1, 1.0);
      flow_graph.graph_add_edge("0.0.0.0", 0, "tracker_aragorn.openflow.cs.wisc.edu", 1, 1.0);
      flow_graph.graph_add_edge("0.0.0.0", 0, "tracker_elrond.openflow.cs.wisc.edu", 1, 1.0);
      flow_graph.graph_add_edge("0.0.0.0", 0, "tracker_faramir.openflow.cs.wisc.edu", 1, 1.0);
      flow_graph.graph_add_edge("0.0.0.0", 0, "tracker_meriadoc.openflow.cs.wisc.edu", 1, 1.0);
      flow_graph.graph_add_edge("0.0.0.0", 0, "tracker_saruman.openflow.cs.wisc.edu", 1, 1.0);
      flow_graph.graph_add_edge("0.0.0.0", 0, "tracker_sam.openflow.cs.wisc.edu", 1, 1.0);
      flow_graph.graph_add_edge("0.0.0.0", 0, "tracker_gimli.openflow.cs.wisc.edu", 1, 1.0);
*/      
      flow_graph.compute_path();
      	 
        
    } catch (Exception e) {
      throw new RuntimeException("Failed to start NetAwareScheduler", e);
    }
    LOG.info("Successfully configured NetAwareScheduler");
  }



 
  @Override
  public void terminate() throws IOException {
	jobInitializer.terminate();
	LOG.info("Successfully terminate NetAwareScheduler");
  }

//XXX:depracated
	public static class NetAwareSchedulerServlet extends HttpServlet {

	  private NetAwareScheduler scheduler;
	  private JobTracker jobTracker;
	  
	  @Override
	  public void init() throws ServletException {
		super.init();
		ServletContext servletContext = this.getServletContext();
		this.scheduler = (NetAwareScheduler) servletContext.getAttribute("scheduler");
		this.jobTracker = (JobTracker) scheduler.taskTrackerManager;
		
		LOG.info("Servlet test: ====YIZHENG==== init() suc!");
	  }

		  @Override
		  public void doGet(HttpServletRequest request, HttpServletResponse response)
			throws ServletException, IOException {
			
			String test = request.getParameter("reducer");
    
    		LOG.info("Servlet test: ====YIZHENG===="+test);
    
    	}
    	
    	
    	 @Override
    	public void doPost(HttpServletRequest req, HttpServletResponse resp) {
        
        	try{
            int len = req.getContentLength();
            byte[] input = new byte[len];
        
            ServletInputStream sin = req.getInputStream();
            int c, count = 0 ;
            while ((c = sin.read(input, count, input.length-count)) != -1) {
                count +=c;
            }
            sin.close();
        
            String inString = new String(input);
            int index = inString.indexOf("=");
            if (index == -1) {
                resp.setStatus(HttpServletResponse.SC_BAD_REQUEST);
                resp.getWriter().print("error during servlet processing");
                resp.getWriter().close();
                return;
            }
            String value = inString.substring(index + 1);
    	
			LOG.info("Servlet test: ====YIZHENG==POST=="+value);
			} catch (IOException e) {
			
			
			}
		}
	
	}



//====this is the one used to communicate with Reduce task==================
public static class ReverseServlet extends HttpServlet
{
    private static String message = "Error during Servlet processing";
    
    public void doPost(HttpServletRequest req, HttpServletResponse resp) {
        try {
            int len = req.getContentLength();
            byte[] input = new byte[len];
        
            ServletInputStream sin = req.getInputStream();
            int c, count = 0 ;
            while ((c = sin.read(input, count, input.length-count)) != -1) {
                count +=c;
            }
            sin.close();
        
            String inString = new String(input);
            int index = inString.indexOf("=");
            if (index == -1) {
                resp.setStatus(HttpServletResponse.SC_BAD_REQUEST);
                resp.getWriter().print(message);
                resp.getWriter().close();
                return;
            }
            String value = inString.substring(index + 1);
            
            //out.write("jobID="+ReduceTask.this.getJobID().toString()+"TRACKER"+getTaskID());
            //value is in the form: getJobID(): job_201204101353_0001, getTaskID(): attempt_201204101534_0001_r_000000_0
            
            //(old)value received is JobID#tasktracker ip:     job_201204111946_0001#127.0.0.1
            //(new)value received is JobID#tasktracker ip:     job_201204111946_0001#tracker_boromir.openflow.cs.wisc.edu
            LOG.info("====YIZHENG=shuffle done from reduce task===:"+value);
            //LOG.info("====YIZHENG=shuffle done from reduce task==URL:"+req.getRequestURL().toString());//URL: http://127.0.0.1:50030/res
            
            
            //decode application/x-www-form-urlencoded string
            String decodedString = URLDecoder.decode(value, "UTF-8");
            
            //reverse the String
            String reverseStr = (new StringBuffer(decodedString)).reverse().toString();
            
            // set the response code and write the response data
            resp.setStatus(HttpServletResponse.SC_OK);
            OutputStreamWriter writer = new OutputStreamWriter(resp.getOutputStream());
            
            writer.write(reverseStr);
            writer.flush();
            writer.close();
            
            
            //update global flow info------------------------------------------start

            String[] temp;
         	temp = value.split("#");
         	String jobid = temp[0];
         	String srcIP = temp[1];
         	
         	//ArrayList<String> mapLocs = jobMapLocations.get(jobid);
         	ArrayList<String> mapLocs = jobMapLocations2.get(jobid);
         	
			if(mapLocs == null)
				throw new IllegalArgumentException("mapLocs == null!");
				
			for(String maploc : mapLocs) {
			
				if(!maploc.equals(srcIP))
					flow_graph.flow_change(srcIP, maploc, "finish");
			
			}	       	
         	//update global flow info------------------------------------------end
            
        } catch (IOException e) {
        
        	LOG.info("YIZHENG==========IOEXCEPTON HERE!");
            try{
                resp.setStatus(HttpServletResponse.SC_BAD_REQUEST);
                resp.getWriter().print(e.getMessage());
                resp.getWriter().close();
            } catch (IOException ioe) {
            }
        }
        
    }  
        
}


//==============================end===================

	public void non_preempt_assign_reduce_share(int numTotalReduceSlots) {

		ArrayList<JobInfo> reduce_runnable_jobs = new ArrayList<JobInfo>();	
		
		for(JobInProgress jip: infos.keySet()) {
			
			JobInfo jinfo = infos.get(jip);
			
			NetAwareJobSchedulable reduceSchedulable = jinfo.reduceSchedulable;
			
			//only check jobs ready to launch reduce tasks
		//	if( jip.allMapCompleted() && reduceSchedulable.num_finished < reduceSchedulable.num_total )
		//		reduce_runnable_jobs.add(jinfo);

			Integer numMapSched = jobNumMapScheduled.get(jip.getJobID().toString());
			
			if(numMapSched != null) {
				
				int numMS = numMapSched;
				if( (jip.numMapTasks == numMS) && reduceSchedulable.num_finished < reduceSchedulable.num_total )
					reduce_runnable_jobs.add(jinfo);
					
			}


		
		}
		
		
		if(reduce_runnable_jobs.size() == 0) {
		
			//LOG.info("reduce_runnable_jobs.size() == 0");
			return;
		}
	
		int total_running = 0;
		int total_remain = 0;
		
		
		for(int i = 0; i < reduce_runnable_jobs.size(); i++)
		{
			JobInfo jinfo = reduce_runnable_jobs.get(i);
			NetAwareJobSchedulable reduceSchedulable = jinfo.reduceSchedulable;
			
			total_running += reduceSchedulable.num_running;
			total_remain += ( reduceSchedulable.num_total - reduceSchedulable.num_finished );
		
		}
		
		// cluster has more slots than all jobs need. Just give every job all it needs.
		if (total_remain <= numTotalReduceSlots) {
		
			for(int i = 0; i < reduce_runnable_jobs.size(); i++)
			{
				JobInfo jinfo = reduce_runnable_jobs.get(i);
				NetAwareJobSchedulable reduceSchedulable = jinfo.reduceSchedulable;
			
				reduceSchedulable.share = reduceSchedulable.num_total - reduceSchedulable.num_finished;

			}

			LOG.info("ycb-share: total_remain <= numTotalReduceSlots: "+total_remain+" < "+numTotalReduceSlots);

			return;		
		
		}
		
		if (total_running == numTotalReduceSlots) {
			LOG.info("ycb-share: total_running == numTotalReduceSlots: "+total_running);
			return;
		}

//debug
		for(int i = 0; i < reduce_runnable_jobs.size(); i++) {	
			JobInfo jinfo = reduce_runnable_jobs.get(i);
			NetAwareJobSchedulable reduceSchedulable = jinfo.reduceSchedulable;
					
					
			JobInProgress jip = reduceSchedulable.job;					
			
			LOG.info("ycb-share-before: job: total slots "+numTotalReduceSlots+" # "+jip.getJobID().toString()+" share: "+reduceSchedulable.share+" running: "+reduceSchedulable.num_running+" not-start: "+reduceSchedulable.num_not_start
			+" finished: "+reduceSchedulable.num_finished+" total: "+reduceSchedulable.num_total);
		
		}		





		// compute fair share algorithm adapted from FairShare scheduler in
		// Hadoop 0.20.0 or 0.21.0
		double cap = numTotalReduceSlots;
		double Rmax = 1.0;
		while (reduce_slots_at_R(Rmax, reduce_runnable_jobs) < cap) {	//invoke reduce_compute_share
			Rmax *= 2.0;
		}
		double left = 0;
		double right = Rmax;
		for (int round = 0; round < 25; round ++) {
			if (left >= right) {
				break;
			}
			double middle = (left+right)/2;
			if (reduce_slots_at_R(middle, reduce_runnable_jobs) < cap) {	//invoke reduce_compute_share
				left = middle;
			} else {
				right = middle;
			}
			//print("left=%lf, right=%lf\n", left, right);
		}

		double R = right;
		for(int i = 0; i < reduce_runnable_jobs.size(); i++)
		{
			JobInfo jinfo = reduce_runnable_jobs.get(i);
			NetAwareJobSchedulable reduceSchedulable = jinfo.reduceSchedulable;
		
			reduceSchedulable.share = reduce_compute_share(jinfo, R, reduce_runnable_jobs);	//set reduce share! 

		}

		int total_reduce_share = 0;
		
		for(int i = 0; i < reduce_runnable_jobs.size(); i++)
		{
			JobInfo jinfo = reduce_runnable_jobs.get(i);
			NetAwareJobSchedulable reduceSchedulable = jinfo.reduceSchedulable;
		
			if(reduceSchedulable.share < reduceSchedulable.num_running || reduceSchedulable.share > (reduceSchedulable.num_total - reduceSchedulable.num_finished))
				throw new IllegalArgumentException("share < running || share > remaining");
				
			total_reduce_share += reduceSchedulable.share;

	//debug		
			JobInProgress jip = reduceSchedulable.job;					
			
			LOG.info("ycb-share-after: job: total slots "+numTotalReduceSlots+" # "+jip.getJobID().toString()+" share: "+reduceSchedulable.share+" running: "+reduceSchedulable.num_running+" not-start: "+reduceSchedulable.num_not_start
			+" finished: "+reduceSchedulable.num_finished+" total: "+reduceSchedulable.num_total);
		

		}
		
		String log_frs ="";
		for(String ip : free_reduce_slots.keySet()) {
	
			Integer numSlots = free_reduce_slots.get(ip);
		
			if(numSlots > 0) {
			
				log_frs+=ip;
				log_frs+=" : ";
				log_frs+=numSlots;
				log_frs+=" # ";		
			}
		}
		
		LOG.info("ycb-share free slots for each ip: "+log_frs);
		LOG.info("ycb-share total share: "+total_reduce_share+"#");		

		


	}


	public double reduce_compute_share(JobInfo job, double R, ArrayList<JobInfo> reduceRunnableJobs)
	{
		double weight = 1.0/reduceRunnableJobs.size();
		
		NetAwareJobSchedulable reduceSchedulable = job.reduceSchedulable;

		int job_remaining_reduce = reduceSchedulable.num_total - reduceSchedulable.num_finished;
		
		if (R*weight > job_remaining_reduce) {
			return (double)job_remaining_reduce;
		} else if (R*weight < reduceSchedulable.num_running) {
			return (double)reduceSchedulable.num_running;
		} else {
			return R*weight;
		}

	}


	public double reduce_slots_at_R(double R, ArrayList<JobInfo> reduceRunnableJobs)
	{
		double slots = 0;
		
		for(int i = 0; i < reduceRunnableJobs.size(); i++) {
			JobInfo job = reduceRunnableJobs.get(i);
			slots += reduce_compute_share(job, R, reduceRunnableJobs);
		
		}
		
		return slots;
	
	}


	//launch until all map done
	public boolean if_launch_new_reduce(ArrayList<JobInfo> reduce_runnable_jobs) {
	
		//check jobs ready to launch reduce tasks && if there is free slots in the cluster
		for(JobInProgress jip: infos.keySet()) {
			
			JobInfo jinfo = infos.get(jip);
			
			NetAwareJobSchedulable reduceSchedulable = jinfo.reduceSchedulable;
			
			//only check jobs ready to launch reduce tasks
			//NOTE: different condition to non_preempt_assign_reduce_share, because there all jobs that finished < total need to compurte share, but if num_not_start == 0, no need to schedule!
			if( jip.allMapCompleted() && reduceSchedulable.num_not_start > 0 )
				reduce_runnable_jobs.add(jinfo);
		
		}

		int total_free_reduce_slots = 0;

		for(String nodeIP : free_reduce_slots.keySet()) {
		
			int freeSlots = free_reduce_slots.get(nodeIP);
			total_free_reduce_slots += freeSlots;
		
		}


		if(reduce_runnable_jobs.size() > 0 && total_free_reduce_slots > 0)
			return true;
		else
			return false;

		
	}
	
	//launch until all map placed
	public boolean if_launch_new_reduce2(ArrayList<JobInfo> reduce_runnable_jobs) {
	
		//check jobs ready to launch reduce tasks && if there is free slots in the cluster
		for(JobInProgress jip: infos.keySet()) {
			
			JobInfo jinfo = infos.get(jip);
			
			NetAwareJobSchedulable reduceSchedulable = jinfo.reduceSchedulable;
			
			//only check jobs ready to launch reduce tasks
			//NOTE: different condition to non_preempt_assign_reduce_share, because there all jobs that finished < total need to compurte share, but if num_not_start == 0, no need to schedule!
			
			Integer numMapSched = jobNumMapScheduled.get(jip.getJobID().toString());
			
			if(numMapSched != null) {
				
				int numMS = numMapSched;

//LOG.info("ycb-if_launch_new_reduce2: numMapScheduled:"+numMS+" numMapTotal:"+ jip.numMapTasks + " for job "+ jip.getJobID().toString() );

				if( (jip.numMapTasks == numMS) && reduceSchedulable.num_not_start > 0 )
					reduce_runnable_jobs.add(jinfo);
					
			}
		
		}

		int total_free_reduce_slots = 0;

		for(String nodeIP : free_reduce_slots.keySet()) {
		
			int freeSlots = free_reduce_slots.get(nodeIP);
			total_free_reduce_slots += freeSlots;
		
		}


//		LOG.info("ycb-if_launch_new_reduce2 reduce-runnable:"+reduce_runnable_jobs.size()+" total free slots: "+total_free_reduce_slots);

		if(reduce_runnable_jobs.size() > 0 && total_free_reduce_slots > 0)
			return true;
		else
			return false;

		
	}	
	


/*
	//tracker_localhost:localhost/127.0.0.1:50747 to 127.0.0.1
	public String parseTrackerNametoIP(String trackerName) {

		String str = trackerName; //"tracker_localhost:localhost/127.0.0.1:50747";
		String[] temp;
		String delimiter = "/";	
		temp = str.split(delimiter);
		//for(int i =0; i < temp.length ; i++)
		//	System.out.println(temp[i]);
		delimiter = ":";	
		String[] temp2 = temp[1].split(delimiter);
		//for(int i =0; i < temp2.length ; i++)
		//	System.out.println(temp2[i]);	
		return temp2[0];
	}
*/

	//tracker_aragorn.openflow.cs.wisc.edu:localhost/127.0.0.1:49220 to tracker_aragorn.openflow.cs.wisc.edu
	public String parseTrackerNametoIP(String trackerName) {

		String str = trackerName; //"tracker_localhost:localhost/127.0.0.1:50747";
		String[] temp;
		String delimiter = "/";	
		temp = str.split(delimiter);
		//for(int i =0; i < temp.length ; i++)
		//	System.out.println(temp[i]);
		delimiter = ":";	
		String[] temp2 = temp[0].split(delimiter);
		//for(int i =0; i < temp2.length ; i++)
		//	System.out.println(temp2[i]);	
		return temp2[0];
	}
  
  @Override
  public synchronized List<Task> assignTasks(TaskTracker tracker)
      throws IOException {
    if (!initialized) // Don't try to assign tasks if we haven't yet started up
      return null;
    String trackerName = tracker.getTrackerName();
    
//    LOG.info("HEARTBEAT from: "+trackerName);
    long currentTime = clock.getTime();
    
//    LOG.info("HEARTBEAT time is: "+currentTime);
    
	ClusterStatus clusterStatus = taskTrackerManager.getClusterStatus();
	
    // Compute total map/reduce slots in the cluster
    int totalMapSlots = getTotalSlots(TaskType.MAP, clusterStatus);
    int totalReduceSlots = getTotalSlots(TaskType.REDUCE, clusterStatus);       
    
    final int numTaskTrackers = clusterStatus.getTaskTrackers();
       
//    LOG.info("totalMapSlots: "+totalMapSlots+" totalReduceSlots: "+totalReduceSlots);
       
	TaskTrackerStatus tts = tracker.getStatus(); 
	
       
       /*
	           
       getMaxMapSlots() - countOccupiedMapSlots();
       getMaxReduceSlots() - countOccupiedReduceSlots();
       countMapTasks() countReduceTasks()
    */
    
    /*
          LOG.info("getMaxMapSlots(): "+tts.getMaxMapSlots()+" getMaxReduceSlots(): "+tts.getMaxReduceSlots()
    +"countOccupiedMapSlots(): "+tts.countOccupiedMapSlots()+" countMapTasks(): "+tts.countMapTasks() 
    +"countOccupiedReduceSlots()"+tts.countOccupiedReduceSlots()+" countReduceTasks() "+tts.countReduceTasks()  ); 
    */    
       String trackerIP = parseTrackerNametoIP(trackerName);
      
       
       
       //private Map<String, Map<JobInProgress, Integer> > pending_reduce_tasks
       //private Map<String, Integer> free_reduce_slots = new Map<String, Integer>();
       
		//all tasktracker hb received and initialized?
		boolean all_nodes_received;
		int num_nodes_received = pending_reduce_tasks.keySet().size();
		if(num_nodes_received < numTaskTrackers)
			all_nodes_received = false;
		else {
		
			if(num_nodes_received != numTaskTrackers)
				throw new IllegalArgumentException("num_nodes_received != numTaskTrackers");
			all_nodes_received = true;
		}
       
       
       if(!all_nodes_received) {
       
			//initialize
			Map<JobInProgress, Integer> pendingReduce = pending_reduce_tasks.get(trackerIP);
			if(pendingReduce == null)
			{
				Map<JobInProgress, Integer> pendingReduceMap = new HashMap<JobInProgress, Integer>();
				pending_reduce_tasks.put(trackerIP, pendingReduceMap);//pending task queue on the node
				
				free_reduce_slots.put(trackerIP, tts.getMaxReduceSlots());//free reduce slots on the node
			}
			
			return null;
    
       }
       else {	//all received
       
       		int pendingReduceNumber;
       		int availabeReduceOnNode;
       	

       		int actual_total_free_reduce_slots = 0;
       		int total_reduce_slots_for_share = totalReduceSlots;
	
       		//compute actual free reduce slots on the node
       		for(String nodeIP : pending_reduce_tasks.keySet()) {
       		
       			pendingReduceNumber = 0;
       		
       			Map<JobInProgress, Integer> pendingReduce = pending_reduce_tasks.get(nodeIP);
       			
       			for(JobInProgress jip : pendingReduce.keySet()) {
       			
       				int pendingReduceForJob = pendingReduce.get(jip);
       				pendingReduceNumber += pendingReduceForJob;
       			
       			}
       		
       			if(nodeIP.equals(trackerIP)) { //use tts.getAvailableReduceSlots() to update
       		
   					int last_free_reduce_on_node = free_reduce_slots.get(nodeIP);
   					availabeReduceOnNode = tts.getAvailableReduceSlots() - pendingReduceNumber;
   					
   					//some reduce done, more slots available
   					//OR job setup/cleanup task will steal a slot
   					if(availabeReduceOnNode < last_free_reduce_on_node) {
   						
   						if( (last_free_reduce_on_node - availabeReduceOnNode) != 1)
   							throw new IllegalArgumentException("last_free_reduce_on_node > availabeReduceOnNode & (last_free_reduce_on_node - availabeReduceOnNode) != 1");
   							
						//if(availabeReduceOnNode >= 0) do nothing
						
						//(last_free_reduce_on_node - availabeReduceOnNode) == 1, no room for that pending task! simply return.
						if(last_free_reduce_on_node == 0) {
   						
   							free_reduce_slots.put(nodeIP, 0);
   							return null;
   						
   						}

   						total_reduce_slots_for_share = totalReduceSlots - 1;

					}
       
       			}
       			else {
       			
       				availabeReduceOnNode = free_reduce_slots.get(nodeIP);//XXX: when schedule new pending for the node, update free_reduce_slots for the node
       
       			}
       			
       			free_reduce_slots.put(nodeIP, availabeReduceOnNode);

			actual_total_free_reduce_slots += availabeReduceOnNode;

   			
   			}//for(String nodeIP : pending_reduce_tasks.keySet())


			//update # to start, running, finished reduce tasks for the job
			for(JobInProgress jip: infos.keySet()) {

				JobInfo jinfo = infos.get(jip);
				NetAwareJobSchedulable reduceSchedulable = jinfo.reduceSchedulable;
			
				//private Map<JobInProgress, Integer> pending_reduce_tasks_for_job
				int numScheduledPending = pending_reduce_tasks_for_job.get(jip);
				reduceSchedulable.updateDemand(numScheduledPending);

			}

			//update # to start, running, finished map tasks for the job
			for(JobInProgress jip: infos.keySet()) {

				JobInfo jinfo = infos.get(jip);
				NetAwareJobSchedulable mapSchedulable = jinfo.mapSchedulable;
			
				//public void updateDemand(int numScheduledPending),  numScheduledPending == 0
				mapSchedulable.updateDemand(0);

			}


			List<JobInProgress> jobs = new ArrayList<JobInProgress>(infos.keySet()); 

			for (JobInProgress job: jobs) {
				if (job.getStatus().getRunState() == JobStatus.RUNNING) {
						LOG.info("jobStatus.RUNNING");
					  }
				if (job.getStatus().getRunState() == JobStatus.PREP) {
						LOG.info("JobStatus.PREP");
					  }          
			}

			int mapsAssigned = 0; // loop counter for map in the below while loop
			//int mapCapacity = maxTasksToAssign(TaskType.MAP, tts);
			//boolean mapRejected = false; // flag used for ending the loop
						    
			ArrayList<Task> tasks = new ArrayList<Task>();
		
			//first assign map tasks
			//compute and sort map share for jobs
			/*
			while (true) {
			 
				if (!mapRejected) {
					if (mapsAssigned == mapCapacity ||	//# of free slots all allocated
						runningMaps == runnableMaps) {
					  LOG.info("INFO Can't assign another MAP to " + trackerName);
					  mapRejected = true;
					}
				}
				if (mapRejected)     
					break; // This is the only exit of the while (true) loop
		
				TaskType taskType = TaskType.MAP;
					 
		  
			 }
			*/
			//////////////////////////////
		
			int availableMapSlots = maxTasksToAssign(TaskType.MAP, tts);

			non_preempt_assign_map_share(totalMapSlots);
			
			ArrayList<JobInfo> map_runnable_jobs = new ArrayList<JobInfo>();
			
			scheduleMaps:
			for (int i=0; i < availableMapSlots; ++i) {
				synchronized (infos) {
				
					if(if_launch_new_map(map_runnable_jobs)) {
				
						//JobInProgress
						//JobInProgress jip_to_launch = assign_map_task(map_runnable_jobs);
						
						Task t = null;
						//public Task assign_map_task(ArrayList<JobInfo> map_runnable_jobs, TaskTrackerStatus tts, int numTaskTrackers)
						t = assign_map_task(trackerIP, map_runnable_jobs, tts, numTaskTrackers);
						
						//t should not be null here, the case in which t may be null is dealt with in assign_map_task(...)
						if (t != null) {
							tasks.add(t);
							++mapsAssigned;	
						}		

						map_runnable_jobs.clear();
  				
  						if(t == null)
  							break scheduleMaps;	
					}
					else
						break scheduleMaps;
				
				}//synchronized (infos)
			
			}//for (int i=0; i < availableMapSlots; ++i)

			/*
			//then assign reduce tasks
			int availableReduceSlots = maxTasksToAssign(TaskType.REDUCE, tts);
			*/
			
			non_preempt_assign_reduce_share(total_reduce_slots_for_share);  	

  			ArrayList<JobInfo> reduce_runnable_jobs = new ArrayList<JobInfo>();
  	
  			//job reduce num_not_start > 0 && free slots > 0
  			//while(if_launch_new_reduce(reduce_runnable_jobs)) {
			while(if_launch_new_reduce2(reduce_runnable_jobs)) {
  			
  				boolean scheduleNewTask = assign_reduce_task(reduce_runnable_jobs, trackerIP);
  				
  				reduce_runnable_jobs.clear();
  				
  				if(scheduleNewTask == false)
  					break;

  			}
  			
  			
  			Map<JobInProgress, Integer> pendingToLaunchOnNode = pending_reduce_tasks.get(trackerIP);
  			
  			for(JobInProgress jip: pendingToLaunchOnNode.keySet()) {
  				
  				Integer numToLaunch = pendingToLaunchOnNode.get(jip);
  			
int temp = free_reduce_slots.get(trackerIP);
LOG.info("ycb-hang: pendingToLaunchOnNode "+numToLaunch+" free-reduce-slots: "+temp);
//LOG.info();

	
  				for(int i = 0; i < numToLaunch; i++) {
  			
  					Task task = jip.simplyObtainNewReduceTask(tts, numTaskTrackers, taskTrackerManager.getNumberOfUniqueHosts());
  					
	  				if(task == null)
	  					throw new IllegalArgumentException("jip.obtainNewReduceTask == null"); 					
	  					
	  					
  					//test map task locations
  					/*
  					for(TaskInProgress tip: jip.maps) {
  						
  						String reduceLocation = "";
  						for(String location: tip.getSplitLocations()) {
  							reduceLocation += location;
						}
  						
  						//map split locations, map task id
  						LOG.info("reducelocation:"+trackerToIP(reduceLocation)+"TIP getTIPId:"+tip.getTIPId().toString());	//reducelocation:localhost
  					}
					*/
	
  					tasks.add(task);
  			
  				}
  				
  				/*
  				//test
  				//in JIP.java:  completedMapLocation.add(status.getTaskTracker());
  				//each item in form: tracker_localhost:localhost/127.0.0.1:47478
  				//tracker_localhost:localhost/127.0.0.1:47478$tracker_localhost:localhost/127.0.0.1:47478
  				LOG.info("map completed locs:"+jip.completedMapLoc());
  				
  				ArrayList<String> mapLocs = new ArrayList<String>();
  				
  				int numMapTask = parseMapLocation(jip.completedMapLoc(), mapLocs);
  				
  				if(numMapTask != jip.desiredMaps())
  					throw new IllegalArgumentException("numMapTask != jip.desiredMaps");
  				
  				for(String mapLoc:mapLocs) {
  					LOG.info("each map:"+mapLoc);
  				}
  				
  				//job_201204102254_0002
  				//LOG.info("JOBID TEST:"+jip.getJobID().toString());
  				*/

  				Integer total_pending_reduce_for_job = pending_reduce_tasks_for_job.get(jip);
  				
  				if(total_pending_reduce_for_job == null)
  					throw new IllegalArgumentException("total_pending_reduce_for_job == null");
  					
  				pending_reduce_tasks_for_job.put(jip, total_pending_reduce_for_job - numToLaunch);
  			
  			}//for(JobInProgress jip: pendingToLaunchOnNode.keySet())
  			
  			pendingToLaunchOnNode.clear();

			// If no tasks were found, return null
			return tasks.isEmpty() ? null : tasks;
    
		}//all nodes hb received and initialized
  }


	//return true: at least one job: running < share, and insert min running job into pending queue
	public boolean assign_reduce_task(ArrayList<JobInfo> reduce_runnable_jobs, String currentIP) {

		if(reduce_runnable_jobs.size() <= 0)
			throw new IllegalArgumentException("reduce_runnable_jobs.size() <= 0");

		for(int i = 0; i < reduce_runnable_jobs.size(); i++) {	
			JobInfo jinfo = reduce_runnable_jobs.get(i);
			NetAwareJobSchedulable reduceSchedulable = jinfo.reduceSchedulable;
					
					
			JobInProgress jip = reduceSchedulable.job;					
			
			LOG.info("ycb-assign_reduce_task job: "+jip.getJobID().toString()+" share: "+reduceSchedulable.share+" running: "+reduceSchedulable.num_running+" not-start: "+reduceSchedulable.num_not_start
			+" finished: "+reduceSchedulable.num_finished+" total: "+reduceSchedulable.num_total);
		
		}
			
		//pick a job first
		int first = -1;
		for(int i = 0; i < reduce_runnable_jobs.size(); i++) {
			JobInfo jinfo = reduce_runnable_jobs.get(i);
			NetAwareJobSchedulable reduceSchedulable = jinfo.reduceSchedulable;
			
			if(reduceSchedulable.num_running < reduceSchedulable.share)
			{
				first = i;
				break;
			}
		}
		
		if(first == -1)
			return false;
			
		//at least one job: running < share
		JobInfo firstjinfo = reduce_runnable_jobs.get(first);
		NetAwareJobSchedulable firstreduceSchedulable = firstjinfo.reduceSchedulable;
		
		if( firstreduceSchedulable.num_running >= firstreduceSchedulable.share || firstreduceSchedulable.num_not_start <= 0 )
			throw new IllegalArgumentException("no job running < share!");
			
		List<NetAwareJobSchedulable> candidateJobs = new ArrayList<NetAwareJobSchedulable>();
		for(int i = first; i < reduce_runnable_jobs.size(); i++) {

			JobInfo jinfo = reduce_runnable_jobs.get(i);
			NetAwareJobSchedulable reduceSchedulable = jinfo.reduceSchedulable;
			
			if(reduceSchedulable.num_running < reduceSchedulable.share)
			{		
				candidateJobs.add(reduceSchedulable);
			}		
		
		}
		
		
		//find the min running job
		//altenative to Collections.sort(candidateJobs, new NetAwareSchedulingAlgorithms.FairShareComparator());
		int min_running = -1;
		NetAwareJobSchedulable min_running_reduce_schedulable = new NetAwareJobSchedulable();
		for(NetAwareJobSchedulable najs : candidateJobs) {
		
			if(min_running == -1) {
				min_running = najs.num_running;
				min_running_reduce_schedulable = najs;	
				
			}
			else {
	
				if(najs.num_running < min_running) {
					min_running = najs.num_running;
					min_running_reduce_schedulable = najs;	
				}
	
			}
		
		}
		
		JobInProgress jip = min_running_reduce_schedulable.job;
	
		//job_201204102254_0002
		LOG.info("ycb-assign redcue task SCHEDULE A REDUCE FOR JOB!: "+jip.getJobID().toString());		
/*		
//////////////////RECORD Completed MAP tasks LOCATIONS -- not split locations!


		
		//jip to map locations
		//in JIP.java:  completedMapLocation.add(status.getTaskTracker());
		//each item in form: tracker_localhost:localhost/127.0.0.1:47478
		//tracker_localhost:localhost/127.0.0.1:47478#tracker_localhost:localhost/127.0.0.1:47478		
		ArrayList<String> mapLocs = new ArrayList<String>();
		

		int numMapTask = parseMapLocation(jip.completedMapLoc(), mapLocs);
		
		if(numMapTask != jip.desiredMaps())
			throw new IllegalArgumentException("numMapTask != jip.desiredMaps");
		
		
		if(jobMapLocations.get(jip.getJobID().toString()) == null)
			jobMapLocations.put(jip.getJobID().toString(), mapLocs);
		
		LOG.info("update global flow info: map locations list: ");
		
		for(String mapLoc:mapLocs) {
			LOG.info("jobMapLocations:"+mapLoc);
		}
*/
	
//for: reduce start when map placed
//double check jip.numMapTasks == numMS
		Integer numMapSched = jobNumMapScheduled.get(jip.getJobID().toString());
		
		if(numMapSched == null)
			throw new IllegalArgumentException("numMapSched == null");
			
		int numMS = numMapSched;
		if(jip.numMapTasks != numMS)
			throw new IllegalArgumentException("jip.numMapTasks != numMS");

		ArrayList<String> mapLocs = jobMapLocations2.get(jip.getJobID().toString());
		if(mapLocs == null)
			throw new IllegalArgumentException("mapLocs == null");
					
		LOG.info("ycb-assign reduce task: Map scheduled info: map scheduled locations list for this job: ");
			LOG.info("-----------------------------------------");
		for(String mapLoc:mapLocs) {
			LOG.info("Map Locations:"+mapLoc);
		}
				
			LOG.info("-----------------------------------------");
		
///////////////////////////pick a reduce slot in the cluster
		//IP of tasktrcker to launch reduce task
		String TTtoLaunchIP = "";
		int FreeReduceSlotforTT = -1;
		
		//TEST: only schedule to current node
		if(false) {

			TTtoLaunchIP = currentIP;
			FreeReduceSlotforTT = free_reduce_slots.get(TTtoLaunchIP);
			if(FreeReduceSlotforTT == 0)
				return false;
			
			if(FreeReduceSlotforTT < 0)
				throw new IllegalArgumentException("FreeReduceSlotforTT < 0");				
		}//if(true)
		
		
		//if (flow graph based scheduling)
		if(true) {

			
			TTtoLaunchIP = compute_reduce_metric("MIN_FLOWS_PER_LINK", jip.getJobID().toString() );
			
			LOG.info("ycb-assign reduce task: NAPS metric schedule "+jip.getJobID().toString()+" at "+TTtoLaunchIP);
			
			if(TTtoLaunchIP == null)
				return false;
				
			//LOG.info("TTtoLaunchIP "+TTtoLaunchIP);
			
			FreeReduceSlotforTT = free_reduce_slots.get(TTtoLaunchIP);
			
		}//if (flow graph based scheduling)
			
			
//////////////////UPDATE related variable!

		//private Map<String, Map<JobInProgress, Integer> > pending_reduce_tasks = new HashMap<String, Map<JobInProgress, Integer> >();
		Map<JobInProgress, Integer> pendingMapForNode = pending_reduce_tasks.get(TTtoLaunchIP);
		
		if(pendingMapForNode == null)
			throw new IllegalArgumentException("pendingMapForNode == null");
			
		///////////////////UPDATE pending_reduce_tasks here!
		Integer pendingForJobOnNode = pendingMapForNode.get(jip);
		if(pendingForJobOnNode == null)
			pendingMapForNode.put(jip, 1);
		else
			pendingMapForNode.put(jip, pendingForJobOnNode + 1);
			
		//////////////////UPDATE private Map<String, Integer> free_reduce_slots
		free_reduce_slots.put(TTtoLaunchIP, FreeReduceSlotforTT - 1);
		
		//////////////////UPDATE private Map<JobInProgress, Integer> pending_reduce_tasks_for_job
		Integer pendingForJob = pending_reduce_tasks_for_job.get(jip);
		if(pendingForJob == null)
			throw new IllegalArgumentException("pendingForJob == null");
		pending_reduce_tasks_for_job.put(jip, pendingForJob + 1);
		
		////////////////////UPDATE job status:
		//XXX:NOTE job status will be reset and re-calculate by demand() at each hb
		min_running_reduce_schedulable.num_not_start--;
		min_running_reduce_schedulable.num_running++;
		

/////////////update global flow info///////////
		//XXX: IP-CHANGE		
		//UPDATE flow graph
 		for(String maploc : mapLocs) {
			if(!maploc.equals(TTtoLaunchIP))
				flow_graph.flow_change(TTtoLaunchIP, maploc, "start");	
		}

		
		return true;

	}



  @Override
  public synchronized Collection<JobInProgress> getJobs(String queueName) {
    Pool myJobPool = poolMgr.getPool(queueName);
    return myJobPool.getJobs();
  }

  public JobInfo getJobInfo(JobInProgress job) {
    return infos.get(job);
  }
  
  private int getTotalSlots(TaskType type, ClusterStatus clusterStatus) {
    return (type == TaskType.MAP ?
      clusterStatus.getMaxMapTasks() : clusterStatus.getMaxReduceTasks());
  }  


  private void updateRunnability() {
    // Start by marking everything as not runnable
    for (JobInfo info: infos.values()) {
      info.runnable = false;
    }
 
    List<JobInProgress> jobs = new ArrayList<JobInProgress>(infos.keySet());
 
    for (JobInProgress job: jobs) {
 
        if (job.getStatus().getRunState() == JobStatus.RUNNING ||
            job.getStatus().getRunState() == JobStatus.PREP) {

          JobInfo jobInfo = infos.get(job);
          if (job.getStatus().getRunState() == JobStatus.RUNNING) {
            jobInfo.runnable = true;
          } else {
            // The job is in the PREP state. Give it to the job initializer
            // for initialization if we have not already done it.
            if (jobInfo.needsInitializing) {
              jobInfo.needsInitializing = false;
              jobInitializer.initJob(jobInfo, job);
            }
          }
        }
    }
  }
  
  //max # = free slots
  private int maxTasksToAssign(TaskType type, TaskTrackerStatus tts) {
      return (type == TaskType.MAP) ?
          tts.getAvailableMapSlots(): tts.getAvailableReduceSlots();
  } 
  
  
  //lock infos before call this method
  public List<NetAwareJobSchedulable> mapJobSchedulable() {
  
	List<NetAwareJobSchedulable> mapScheds = new ArrayList<NetAwareJobSchedulable>();
  
  	for (JobInfo info: infos.values()) {
  	
  		mapScheds.add(info.mapSchedulable);
  		
  	
  	}
  	
  	return mapScheds;
  
  }
  
   //lock infos before call this method
  public List<NetAwareJobSchedulable> reduceJobSchedulable() {
  
	List<NetAwareJobSchedulable> reduceScheds = new ArrayList<NetAwareJobSchedulable>();
  
  	for (JobInfo info: infos.values()) {
  	
  		reduceScheds.add(info.reduceSchedulable);
  		
  	
  	}
  	
  	return reduceScheds;
  
  }  
  
  
  
  public Clock getClock() {
    return clock;
  }  
 
 
 

	public void non_preempt_assign_map_share(int numTotalMapSlots) {

		ArrayList<JobInfo> map_runnable_jobs = new ArrayList<JobInfo>();	
		
		for(JobInProgress jip: infos.keySet()) {
			
			JobInfo jinfo = infos.get(jip);
			
			NetAwareJobSchedulable mapSchedulable = jinfo.mapSchedulable;
			
			//only check jobs ready to launch map tasks
			if( mapSchedulable.num_finished < mapSchedulable.num_total )
				map_runnable_jobs.add(jinfo);
		
		}
		
		if(map_runnable_jobs.size() == 0)
			return;
			
		int total_running = 0;
		int total_remain = 0;
		
		
		for(int i = 0; i < map_runnable_jobs.size(); i++)
		{
			JobInfo jinfo = map_runnable_jobs.get(i);
			NetAwareJobSchedulable mapSchedulable = jinfo.mapSchedulable;
			
			total_running += mapSchedulable.num_running;
			total_remain += ( mapSchedulable.num_total - mapSchedulable.num_finished );
		
		}
		
		// cluster has more slots than all jobs need. Just give every job all it needs.
		if (total_remain <= numTotalMapSlots) {
		
			for(int i = 0; i < map_runnable_jobs.size(); i++)
			{
				JobInfo jinfo = map_runnable_jobs.get(i);
				NetAwareJobSchedulable mapSchedulable = jinfo.mapSchedulable;
			
				mapSchedulable.share = mapSchedulable.num_total - mapSchedulable.num_finished;

			}
			
			return;		
		
		}
		
		if (total_running == numTotalMapSlots)
			return;

		// compute fair share algorithm adapted from FairShare scheduler in
		// Hadoop 0.20.0 or 0.21.0
		double cap = numTotalMapSlots;
		double Rmax = 1.0;
		while (map_slots_at_R(Rmax, map_runnable_jobs) < cap) {	//invoke map_compute_share
			Rmax *= 2.0;
		}
		double left = 0;
		double right = Rmax;
		for (int round = 0; round < 25; round ++) {
			if (left >= right) {
				break;
			}
			double middle = (left+right)/2;
			if (map_slots_at_R(middle, map_runnable_jobs) < cap) {	//invoke map_compute_share
				left = middle;
			} else {
				right = middle;
			}
			//print("left=%lf, right=%lf\n", left, right);
		}

		double R = right;
		for(int i = 0; i < map_runnable_jobs.size(); i++)
		{
			JobInfo jinfo = map_runnable_jobs.get(i);
			NetAwareJobSchedulable mapSchedulable = jinfo.mapSchedulable;
		
			mapSchedulable.share = map_compute_share(jinfo, R, map_runnable_jobs);	//set map share! 

		}

		int total_map_share = 0;
		
		for(int i = 0; i < map_runnable_jobs.size(); i++)
		{
			JobInfo jinfo = map_runnable_jobs.get(i);
			NetAwareJobSchedulable mapSchedulable = jinfo.mapSchedulable;
		
			if(mapSchedulable.share < mapSchedulable.num_running || mapSchedulable.share > (mapSchedulable.num_total - mapSchedulable.num_finished))
				throw new IllegalArgumentException("map share < running || map share > remaining");
				
			total_map_share += mapSchedulable.share;
			

		}
		
	}


	public double map_compute_share(JobInfo job, double R, ArrayList<JobInfo> mapRunnableJobs)
	{
		double weight = 1.0/mapRunnableJobs.size();
		
		NetAwareJobSchedulable mapSchedulable = job.mapSchedulable;

		int job_remaining_map = mapSchedulable.num_total - mapSchedulable.num_finished;
		
		if (R*weight > job_remaining_map) {
			return (double)job_remaining_map;
		} else if (R*weight < mapSchedulable.num_running) {
			return (double)mapSchedulable.num_running;
		} else {
			return R*weight;
		}

	}


	public double map_slots_at_R(double R, ArrayList<JobInfo> mapRunnableJobs)
	{
		double slots = 0;
		
		for(int i = 0; i < mapRunnableJobs.size(); i++) {
			JobInfo job = mapRunnableJobs.get(i);
			slots += map_compute_share(job, R, mapRunnableJobs);
		
		}
		
		return slots;
	
	}  

	public boolean if_launch_new_map(ArrayList<JobInfo> map_runnable_jobs) {
	
		//check jobs need to launch map tasks
		for(JobInProgress jip: infos.keySet()) {
			
			JobInfo jinfo = infos.get(jip);
			
			NetAwareJobSchedulable mapSchedulable = jinfo.mapSchedulable;
			
			//only check jobs need to launch map tasks
			//NOTE: different condition to non_preempt_assign_map_share, because there all jobs that finished < total need to compute share, but if num_not_start == 0, no need to schedule!
			if( jip.getStatus().getRunState() == JobStatus.RUNNING && mapSchedulable.num_not_start > 0 )
				map_runnable_jobs.add(jinfo);
		
		}

		if(map_runnable_jobs.size() > 0)
			return true;
		else
			return false;
		
	}


	//return task: at least one job: running < share
	//map_runnable_jobs are jobs: in RUNNING state and number_to_start > 0
//ycb-add
	public Task assign_map_task(String trackerIP, ArrayList<JobInfo> map_runnable_jobs, TaskTrackerStatus tts, int numTaskTrackers) 
		throws IOException {

		if(map_runnable_jobs.size() <= 0)
			throw new IllegalArgumentException("map_runnable_jobs.size() <= 0");
			
		//pick a job first
		int first = -1;
		for(int i = 0; i < map_runnable_jobs.size(); i++) {
			JobInfo jinfo = map_runnable_jobs.get(i);
			NetAwareJobSchedulable mapSchedulable = jinfo.mapSchedulable;
			
			if(mapSchedulable.num_running < mapSchedulable.share)
			{
				first = i;
				break;
			}
		}
		
		if(first == -1)
			return null;
			
		//at least one job: running < share
		JobInfo firstjinfo = map_runnable_jobs.get(first);
		NetAwareJobSchedulable firstMapSchedulable = firstjinfo.mapSchedulable;
		
		if( firstMapSchedulable.num_running >= firstMapSchedulable.share || firstMapSchedulable.num_not_start <= 0 )
			throw new IllegalArgumentException("no job map running < share or num_not_start > 0!");
			
		List<NetAwareJobSchedulable> candidateJobs = new ArrayList<NetAwareJobSchedulable>();
		
		for(int i = first; i < map_runnable_jobs.size(); i++) {

			JobInfo jinfo = map_runnable_jobs.get(i);
			NetAwareJobSchedulable mapSchedulable = jinfo.mapSchedulable;
			
			if(mapSchedulable.num_running < mapSchedulable.share)
			{		
				candidateJobs.add(mapSchedulable);
			}		
		
		}

		//find the min running job
		//altenative to Collections.sort(candidateJobs, new NetAwareSchedulingAlgorithms.FairShareComparator());
		int min_running = -1;
		NetAwareJobSchedulable min_running_map_schedulable = new NetAwareJobSchedulable();
		for(NetAwareJobSchedulable najs : candidateJobs) {
		
			if(min_running == -1) {
				min_running = najs.num_running;
				min_running_map_schedulable = najs;	
				
			}
			else {
	
				if(najs.num_running < min_running) {
					min_running = najs.num_running;
					min_running_map_schedulable = najs;	
				}
	
			}
		
		}
		
		JobInProgress jip = min_running_map_schedulable.job;

//////////////////UPDATE related variable!
		////////////////////UPDATE job status:
		//XXX:NOTE job status will be reset and re-calculate by demand() at each hb
		min_running_map_schedulable.num_not_start--;
		min_running_map_schedulable.num_running++;

//ycb-add
		ArrayList<String> mapLocs = jobMapLocations2.get(jip.getJobID().toString());
		
		//private static Map<String, ArrayList<String> > jobMapLocations2 = new HashMap<String, ArrayList<String> >();
		//the first map of the job
		if(mapLocs == null) {
		

			if(jobNumMapScheduled.get(jip.getJobID().toString()) != null)
				throw new IllegalArgumentException("jobNumMapScheduled.get(jip.getJobID().toString()) != null");

			mapLocs = new ArrayList<String>();
			mapLocs.add(trackerIP);
						
			jobMapLocations2.put(jip.getJobID().toString(), mapLocs);
			jobNumMapScheduled.put(jip.getJobID().toString(), 1);
		}
		else {

			Integer numMapSched = jobNumMapScheduled.get(jip.getJobID().toString());
			
			if( numMapSched == null)
				throw new IllegalArgumentException("jobNumMapScheduled.get(jip.getJobID().toString()) is null");
		
			jobNumMapScheduled.put(jip.getJobID().toString(), numMapSched+1);
			
			if(!mapLocs.contains(trackerIP))
				mapLocs.add(trackerIP);			

		}
		
		

		Task t = null;

		// Try to schedule a node-local or rack-local Map task
		t = 
			jip.obtainNewNodeOrRackLocalMapTask(tts, 
				numTaskTrackers, taskTrackerManager.getNumberOfUniqueHosts());
		if (t != null)
			return t;

		// Try to schedule a node-local map task for this job
		  //get local task in Jobschedulable assignTask()
		t = 
			jip.obtainNewNonLocalMapTask(tts, numTaskTrackers,
				               taskTrackerManager.getNumberOfUniqueHosts());

		if (t != null)
			return t;
		else
			throw new IllegalArgumentException("CANNOT OBTAIN A NEW MAP TASK!");
	
		/*	
		//Map<JobInProgress, JobInfo> infos
		JobInfo ji = infos.get(jip);
		if(ji == null)
			throw new IllegalArgumentException("ji == null !");

		boolean remove_suc = map_runnable_jobs.remove(ji);
		if(remove_suc == false)
			throw new IllegalArgumentException("map_runnable_jobs.remove failed!");

		and repeat the job picking process!
		*/


	}
	  
}
