package skewreduce.framework;

import java.io.DataInputStream;
import java.io.IOException;
import java.net.InetAddress;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Set;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.Callable;
import java.util.concurrent.Executors;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.ExecutorCompletionService;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;

import javax.servlet.ServletException;
import javax.servlet.ServletInputStream;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;

import org.mortbay.jetty.Connector;
import org.mortbay.jetty.HttpConnection;
import org.mortbay.jetty.Request;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.handler.AbstractHandler;
import org.mortbay.jetty.nio.SelectChannelConnector;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.mapred.ClusterStatus;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobID;
import org.apache.hadoop.util.ReflectionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import skewreduce.framework.physical.BlockingPhysicalOp;
import skewreduce.framework.physical.MonitoredPhysicalOp;
import skewreduce.framework.physical.PPartitionOp;
import skewreduce.framework.physical.PSampleOp;
import skewreduce.framework.physical.PhysicalOp;
import skewreduce.lib.Partition;


/**
 * design
 *
 * for simplicity, a thread will be dedicated to each MR job.
 * the thread will monitor progress of MR job and report 
 * completion, failure. the central scheduler thread is waiting
 * on 
 *
 */

// FIXME: implement graceful shutdown

class EaggExecutor extends Configured implements Callable<Boolean> {
    static final Logger LOG = LoggerFactory.getLogger(EaggExecutor.class);

    private final EaggDriver      driver;
    private final EaggPlan        plan;
    private boolean               failFast;
    private SchedulerEventHandler scheduler;
    private Server server;

//    private SchedulerEventCompletionService svc;
    private PhysicalOpExecutor.CompletionService svc;
    private ExecutorService executor;
    private LinkedBlockingQueue<Future<SchedulerEvent>> eventQueue;
    
//    OperatorSchedule scheduleComp;
    
    long jobStatusPeriod;       // 5 seconds?
    long jobProgressPeriod;     // must be multiples of state period 
    long jobGracePeriod;        // initial monitoring period

    String notificationUri;

    EaggExecutor(Configuration conf,EaggDriver driver,EaggPlan plan)
    throws IOException {
        super(conf);
        this.driver = driver;
        this.plan = plan;
        this.failFast = getConf().getBoolean("skewreduce.scheduler.failfast",false);
        eventQueue = new LinkedBlockingQueue<Future<SchedulerEvent>>();
        scheduler = ReflectionUtils.newInstance(
                getConf().getClass(
                    "skewreduce.scheduler.class",
                    SchedulerAdaptor.class,
                    SchedulerEventHandler.class),
                getConf());
        if ( LOG.isInfoEnabled() )
            LOG.info("Using scheduler {}",scheduler.getClass() );

        MonitoredPhysicalOp.getWakeupMonitor().init(getConf());
        
        //scheduleComp = new OperatorSchedule(conf);

        // configure number of concurrent jobs
        JobClient jobClient = new JobClient(new JobConf(getConf()));
        ClusterStatus stat = jobClient.getClusterStatus();
        int maxMap = stat.getMaxMapTasks(); // this is critical?
        int maxReduce = stat.getMaxReduceTasks(); // or this is critical?
        int numConcurrentJobs = (int)(maxMap*1.2);
        int listenPort = getConf().getInt("skewreduce.listener.port", 8080);

        LOG.info("Setting number of concurrent jobs to {}",numConcurrentJobs);

        //ExecutorService execSvc = Executors.newCachedThreadPool();
        //ExecutorService execSvc = Executors.newFixedThreadPool(numConcurrentJobs);
        PhysicalOpExecutor execSvc = new PhysicalOpExecutor(plan,numConcurrentJobs,eventQueue);
        //svc = new SchedulerEventCompletionService(execSvc,eventQueue);
        svc = execSvc.getCompletionService();

        InetAddress localAddr = InetAddress.getLocalHost();

        JobCompletionHandler h = new JobCompletionHandler();
        
        server = new Server();
        SelectChannelConnector connector = new SelectChannelConnector();
        connector.setPort(listenPort);
        connector.setReuseAddress(true);
        server.setConnectors(new Connector[] { connector });
        server.setHandler(h);
        server.setStopAtShutdown(true);
        try {
			server.start();
		} catch (Exception e) {
			throw new IOException("Failed to start notification listner",e);
		}

        notificationUri = String.format("http://%s:%d",localAddr.getHostAddress(),listenPort);

        conf.set(EaggPlan.EAGG_HTTP_RPC_URI_ATTR, notificationUri);
		
		LOG.info(notificationUri);
    }

    //////////////////////////////////////////////////////////////////////////

    public boolean execute() {
        if ( executor != null )
            throw new IllegalStateException("EaggExecutor is already running");

        executor = Executors.newSingleThreadExecutor();
        Future<Boolean> future = executor.submit(this);
        boolean r;
        try {
            r = future.get();
        } catch ( Throwable ex ) {
            LOG.warn("Exception caught by main thread",ex);
            // should shutdown everything
            // shutdown();
            r = false;
        }
        return r;
    }

    public synchronized void shutdown() {
        if ( executor != null ) {
            executor.shutdownNow();
            executor = null;
        }
        if ( server != null ) {
            try {
                server.stop();
            } catch ( Exception ignore ) {}
            server = null;
        }
        if ( svc != null ) {
            svc.shutdown( ! (plan != null && plan.isCompleted()) );
            svc = null;

            // grace period
            try { Thread.sleep(3000); } catch ( InterruptedException ignore) {}
        }
    }

    /**
     * launch all new tasks. schedule only up to cluster limit.
     */
    private void scheduleNewTasks() {
        // schedule up-to cluster limit --

        Set<PhysicalOp> newTasks = plan.getReadyTasks();
        newTasks = scheduler.reoptimize(newTasks);
        if ( ! newTasks.isEmpty() ) {
            // call submit in order of priority
//            PhysicalOp[] ops = newTasks.toArray(new PhysicalOp[0]);
//            Arrays.sort(ops,scheduleComp);
            // FIXME sorting must reflect the partitioning result
        	PhysicalOp[] ops = plan.getSchedule().sort(newTasks);

            for ( PhysicalOp newTask : ops ) {
                if ( LOG.isDebugEnabled() )
                    LOG.debug("scheduling {}",newTask);

                svc.submit(newTask);
            }
            plan.scheduled( newTasks );
        }
    }

    // FIXME: call complete and cleanup
    public Boolean call() throws Exception {
        boolean failed = false;

        LOG.info("Starting EAGG executor");
        try {
            List<Future<SchedulerEvent>> events = new ArrayList<Future<SchedulerEvent>>(64);

            scheduleNewTasks();

            while ( ! plan.isCompleted() && ! failed ) {
                // when is terminating?
                events.clear();
                int n = svc.takeAndDrainTo(events);
                if ( LOG.isDebugEnabled() ) {
                    LOG.debug("dispatching {} schedule events",n);
                }

                if ( failFast ) {
                    // check fail conditions
                    for ( Future<SchedulerEvent> future : events ) {
                        SchedulerEvent e = future.get();
                        if ( e.isFailure() ) {
                            LOG.info("Fail fast due to event {}",e);
                            if ( e instanceof SchedulerEvent.Error ) {
                            	LOG.error("Error cause",((SchedulerEvent.Error)e).getCause());
                            }
                            failed = true;
                            break;
                        }
                    }

                    if ( failed )
                        continue;   // we won't schedule any new jobs
                }

                for ( Future<SchedulerEvent> future : events ) {
                    SchedulerEvent e = future.get();
                    if ( LOG.isDebugEnabled() ) {
                        LOG.debug(e.toString());
                    }
                    scheduler.handle(e);
                }

                // FIXME: construct a new job
                scheduleNewTasks();
            }
        } catch ( InterruptedException x ) {
            // kill all running futures
            LOG.info("Interrupting executor");
        }

        if ( plan.isCompleted() ) {
        	svc.shutdown(false); // wait for all monitoring tasks terminate
        	executor.awaitTermination(5, TimeUnit.SECONDS);
        }
        
        LOG.info("Shutting down EAGG executor");
        
        return plan.isCompleted();
    }

    final class KillTask implements Callable<SchedulerEvent> {
        final PhysicalOp op;
        public KillTask(PhysicalOp op) { this.op = op; }
        public SchedulerEvent call() throws Exception {
            op.kill();
            return new SchedulerEvent.Killed(op);
        }
    }

    final class SchedulerEventCompletionService
    extends ExecutorCompletionService<SchedulerEvent> {
        private final BlockingQueue<Future<SchedulerEvent>> queue;
        private final ExecutorService executor;

        public SchedulerEventCompletionService(ExecutorService exec,BlockingQueue<Future<SchedulerEvent>> queue) {
            super(exec,queue);
            this.executor = exec;
            this.queue = queue;
        }

        public int drainTo(Collection<? super Future<SchedulerEvent>> c) {
            return queue.drainTo(c);
        }

        public int takeAndDrainTo(Collection<? super Future<SchedulerEvent>> c)
        throws InterruptedException {
            int n = queue.drainTo(c);
            if ( n == 0 ) {
                Future<SchedulerEvent> e = queue.take();
                c.add(e);
                n = queue.drainTo(c) + 1;
            }
            return n;
        }

        public void shutdown(boolean force) {
            if ( force ) {
                executor.shutdownNow();
            } else {
                executor.shutdown();
            }
        }
    }
    
    final class JobCompletionHandler extends AbstractHandler {
    	private final Logger LOG = LoggerFactory.getLogger("skewreduce.RPC");
    	
    	// URI scheme
    	// /e/OPID?(SUCCEEDED|FAIL) -- job end notification
    	// /p/OPID -- put request for partition
    	// /s/OPID -- put request for scheduler information
    	// /t/OPID?estimatedTime -- get request for estimated running time
    	    	
    	public void handle(String target,HttpServletRequest request,HttpServletResponse response,int dispatch) throws IOException, ServletException {
    		Request base_request = (request instanceof Request) ? (Request)request : HttpConnection.getCurrentConnection().getRequest();
    		base_request.setHandled(true);
    		
    		String[] targets = target.split("/");
    		boolean badRequest = false;
    		if ( targets.length != 3 || targets[0].length() != 0 ) {
    			badRequest = true;
    		} else {
    			if ( LOG.isDebugEnabled() ) {
    				LOG.debug("Request from {}: {}",request.getRemoteAddr(), target);
    			}
    			char ch = targets[1].charAt(0);
    			switch ( ch ) {
    			case 'e':
    				badRequest = ! handleJobCompletion(targets[2],request);
    				break;
    			case 'p':
    				badRequest = ! handleAddPartition(targets[2],request);
    				break;
    			case 's':
    				badRequest = ! handleAddSchedule(targets[2],request);
    				break;
    			case 't':
    				badRequest = ! handleAddTiming(targets[2],request);
    				break;
    			default:
    				badRequest = true;
    				break;
    			}
    		}
    		
            if ( badRequest ) {
                response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
                LOG.error("{}: Invalid job name? = '{}'",request.getRemoteAddr(),target);
            } else {
                response.setStatus(HttpServletResponse.SC_OK);
            }
    		response.setContentType("text/html");
            response.setContentLength(0);
            response.flushBuffer();
        }

		private boolean handleJobCompletion(String opid,HttpServletRequest request) throws IOException, ServletException {
            PhysicalOp op = plan.unregisterRunningPhysicalOp(opid);
            
            if ( op == null ) return false;

            String status = request.getQueryString();

            if ( LOG.isDebugEnabled() ) {
                LOG.debug("End notification - {} ({})",op.getID(),status);
            }

            //PhysicalOp op = plan.unregisterRunningPhysicalOp(jobid);
            //assert op != null;

            boolean success = "SUCCEEDED".equals(status);
            Throwable error = null;

            try {
                if ( success ) {
                    op.setComplete();
                    op.onSuccess();
                    if ( op instanceof BlockingPhysicalOp || op instanceof PSampleOp) {
                        LOG.info(String.format("EAGG-MR\t%s\t%s\t%d\t%d",op.getID(),op.getJobID(),op.getScheduledAt(),op.getCompletedAt()));
                    }
                } else {
                    op.setFailed();
                    op.onFail();
                }
            } catch ( Exception ex ) {
                error = ex;
            } finally {
                if ( op != null )
                    op.cleanup();
            }
 
            // FIXME: post completion or fail event
            final SchedulerEvent result = ( success && error == null )
                    ? new SchedulerEvent.Completion(op)
                    : ( ( error == null ) 
                            ? new SchedulerEvent.Failed(op)
                            : new SchedulerEvent.Error(op,error) 
                    );

            eventQueue.add(new Future<SchedulerEvent> () {
                @Override
                public boolean cancel(boolean b) { return false; }
                @Override
                public SchedulerEvent get() { return result; }
                @Override
                public SchedulerEvent get(long to,TimeUnit u) { return result; }
                @Override
                public boolean isCancelled() { return false; }
                @Override
                public boolean isDone() { return true; }
            });
            
            return true;
    	}

		private boolean handleAddSchedule(String opid,
				HttpServletRequest request) throws IOException, ServletException {
			boolean ok = true;
			ServletInputStream sin = request.getInputStream();
			DataInputStream din = new DataInputStream(sin);
			try {
				int n = din.readInt();
				HashMap<PhysicalOp.ID,Schedule> tmp = new HashMap<PhysicalOp.ID,Schedule>();
				for ( int i = 0; i < n; ++i ) {
					Schedule s = Schedule.createSchedule(din);
					tmp.put(s.getID(), s);
				}
				plan.getSchedule().addAll(tmp);
				if ( LOG.isDebugEnabled() ) {
					for ( Schedule s : tmp.values() ) {
						LOG.debug("schedule {}: {}",opid,s);
					}
				}
			} catch ( IOException ex ) {
				LOG.error("Invalid partition format: "+opid,ex);
				ok = false;
			} finally {
				if ( din != null ) din.close();
				din = null;
			}
			
			return ok;
		}

		private boolean handleAddPartition(String opid,
				HttpServletRequest request) throws IOException, ServletException {
			boolean ok = true;
			ServletInputStream sin = request.getInputStream();
			DataInputStream din = new DataInputStream(sin);
			try {
				// read-in the object
				Partition newPart = plan.getPartitionInfo().read(din);
				Partition.ID pid = new Partition.ID(opid);
				Partition target = plan.getPartitionInfo().lookup(pid);
				if ( target == null ) {
					LOG.error("Unknown partition: {}",opid);
					ok = false;
				} else {
					target.set(newPart);
					if ( LOG.isDebugEnabled() ) {
						LOG.debug("Received partition info:\n{}",newPart.toLongString());
					}
				}
			} catch ( IOException ex ) {
				LOG.error("Invalid partition format: "+opid,ex);
				ok = false;
			} finally {
				if ( din != null ) din.close();
				din = null;
			}
			
			return ok;
		}
		

    	private boolean handleAddTiming(String opid,HttpServletRequest request) {
    		String q = request.getQueryString();
    		String[] fields = q.split("&");
    		if ( fields.length != 2 ) return false;
			double t = 0.0;
			double c = 0.0;
			try {
				t = Double.parseDouble(fields[0]);
				c = Double.parseDouble(fields[1]);
			} catch ( NumberFormatException ex ) {
				return false;
			}
			if ( LOG.isDebugEnabled() ) {
				LOG.debug("estimated timing for {} = {} secs",opid,t);
			}
			// FIXME append the new schedule information to the manual schedule?
			plan.getSchedule().add(new Partition.ID(opid),t,c);
			return true;
		}
    }
}
