package com.flute.haflute.jobbox.master;

import java.rmi.Remote;
import java.rmi.RemoteException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.flute.haflute.common.ClusterConstants;
import com.flute.haflute.common.ClusterContext;
import com.flute.haflute.jobbox.base.ClientJobListener;
import com.flute.haflute.jobbox.base.CloudJobCenterController;
import com.flute.haflute.jobbox.base.CloudJobConf;
import com.flute.haflute.jobbox.base.CloudResourceBase;
import com.flute.haflute.jobbox.base.InputResource;
import com.flute.haflute.jobbox.base.JobConf;
import com.flute.haflute.jobbox.base.RunProcessInterface;
import com.flute.haflute.jobbox.base.StatusReporter.JOB_STATUS;
import com.flute.haflute.jobbox.base.mapreduce.MRJobConf;
import com.flute.haflute.jobbox.common.CloudContext;
import com.flute.haflute.jobbox.common.CloudResouceProxy;
import com.flute.haflute.jobbox.communication.MasterCommunicationService;
import com.flute.haflute.jobbox.communication.NotifyService;
import com.flute.haflute.jobbox.communication.messages.CloudJobReqMessage;
import com.flute.haflute.jobbox.communication.messages.MapperReqMessage;
import com.flute.haflute.jobbox.communication.messages.RequestMessage;
import com.flute.haflute.monitor.JobMonitorInfo;
import com.flute.haflute.net.RMINetClient;
import com.flute.haflute.tools.ClusterUtils;

/**
 * 主节点进行任务调度的后台线程 进行任务划分、环境初始化、任务分发等
 * 
 */
public class JobTracker extends Thread implements RunProcessInterface {
	
	private static Logger logger = LoggerFactory.getLogger(JobTracker.class);

	private static JobTracker instance = null;
	/** 是否已经初始化 */
	private boolean initiallized = false;

	/** 等待执行的Job列表 */
	private List<JobConf> waitList;

	/** 用于跟踪Job执行状态的列表 */
	private Map<MRJob, List<String>> runningJobs;

	MasterCommunicationService communicationCenter;
	private NotifyService notifier;

	/** 是否优雅推出：执行完所有任务 */
	private boolean gracefullyShutdonw = false;

	private boolean shutdown = false;

	/** 上次接收消息的子节点 */
	private List<String> childsOfLastNotification;

	/** 已经运行的可扩展任务请求的缓存，以后加入的子节点将获得这些任务请求并加入执行 */
	private List<CloudJob> runningCloudJobs;

	/** 已经运行的可扩展任务请求的缓存，以后加入的子节点将获得这些任务请求并加入执行 */
	private Map<String, ClientJobListener> clientJobListeners;

	/** Job列表waitList的专用锁 */
	final private Object lockFlag = new Object();

	private JobStatusReporter clusterJobStatusReporter;

	public JobTracker() {
		super("JOBTRACKER");
		init();
	}

	/** 初始化环境参数 */
	private void init() {
		initiallized = false;
		logger.info("JobTracker initializing...");

		try {
			communicationCenter = new MasterCommunicationService();
			notifier = (NotifyService) communicationCenter.getNotifyService();
		} catch (RemoteException e) {
			e.printStackTrace();
			ClusterUtils.exitGracefully();
		}

		waitList = new ArrayList<JobConf>(10);
		runningJobs = new HashMap<MRJob, List<String>>();
		runningCloudJobs = new ArrayList<CloudJob>();
		clientJobListeners = new ConcurrentHashMap<String, ClientJobListener>();
		clusterJobStatusReporter = new JobStatusReporter();

		instance = this;
		logger.info("JobTracker start OK");
	}

	public void executeNewJob(JobConf jobConf, ClientJobListener listener) {
		jobConf.setJobId("tmp" + System.currentTimeMillis());
		if (listener != null) {
			clientJobListeners.put(jobConf.getJobId(), listener);
		}
		synchronized (lockFlag) {
			waitList.add(jobConf);
		}
		logger.info("one Job request added to pool");
	}

	public static synchronized JobTracker getInstance() {
		int totalTime = 1000;
		int trys = 0;
		// 如果JobTasker还没有完成初始化，等待一次，直到等待超时
		while ((instance == null || !instance.initiallized)
				&& (trys++ < totalTime)) {
			doWait();
		}

		// 等待超时后还没有完成初始化，什么环节出现了问题，
		// 先启动一个实例保障当前任务的正常进行
		if (instance == null) {
			instance = new JobTracker();
			instance.start();
		}
		return instance;
	}

	private static void doWait() {
		try {
			Thread.sleep(200L);
		} catch (Exception e) {
		}
	}

	private void doWait(long time) {
		try {
			Thread.sleep(time);
		} catch (Exception e) {
		}
	}

	public void run() {
		while (!shutdown || (gracefullyShutdonw && waitList.size() > 0)) {
			checkRequests4Fresh();
			long waitTime = 200L;
			if (waitList.size() > 0) {
				logger.info("JOBTRACKER exec once wait size={}",
						waitList.size());

				List<JobConf> runList = null;
				synchronized (lockFlag) {
					runList = waitList;
					waitList = new ArrayList<JobConf>();
				}

				try {
					for (JobConf jobConf : runList) {
						scheduleJob(jobConf);
					}
				} catch (Throwable e) {
					logger.error("execute once error", e);
				}
				waitTime = 2000L;// 有作业执行了，下一次调度就要慢一些
			} // if waitList size > 0
			doWait(waitTime);
		}
	}

	/**
	 * 1、检查并更新任务状态， 2、跟进任务执行
	 */
	private void checkRequests4Fresh() {
		checkJobStatus();
		traceJobs4FreshChilds();
	}

	private void traceJobs4FreshChilds() {
		if (runningCloudJobs.size() == 0)
			return;
		if (CloudContext.getFreshChids().size() == 0)
			return;
		List<String> childs4Now = new ArrayList<String>();
		childs4Now.addAll(CloudContext.getFreshChids());
		CloudContext.getFreshChids().clear();
		childs4Now.removeAll(childsOfLastNotification);

		synchronized (runningCloudJobs) {
			for (String child : childs4Now) {
				for (CloudJob job : runningCloudJobs) {
					try {
						if (job.isScalable() && job.isRunning()) {
							notifier.send(child, job.getJobRequestMessage());
						}
					} catch (RemoteException e) {
						logger.error("send scalable request to child: {}",
								child, e);
					}
				}
			}
		}

	}

	private void checkJobStatus() {
		if (runningCloudJobs.size() == 0)
			return;
		synchronized (runningCloudJobs) {
			Iterator<CloudJob> it = runningCloudJobs.iterator();
			while (it.hasNext()) {
				CloudJob job = it.next();
				if (job.getStatus() == JOB_STATUS.FINISHED) {
					it.remove();
					doWait(11000L);// wait for more info notified because
									// crawler will report periodly
					notifyJobFinished(job);
					removeResourceFromCommunicationPlatform(job
							.getJobRequestMessage().getMessagerId());
					removeResourceFromCommunicationPlatform(job
							.getJobRequestMessage().getSourceId());
					logger.info("Job <{}> FINISHED", job.getJobRequestMessage()
							.getJobConf().getJobId());
				}
			}
		}
	}

	private void notifyJobFinished(CloudJob job) {
		clusterJobStatusReporter.jobFinished(job);
		ClientJobListener listener = clientJobListeners.remove(job
				.getJobRequestMessage().getJobConf().getJobId());
		if (listener == null)
			return;
		try {
			listener.jobFinished();
		} catch (Exception e) {
			logger.info("notify finishing state error for Job: " + job, e);
		}
		// temptarily shutdown my jvm for crawler special 2010-10-27, should
		// change when new strategy found
		if (!shutdown && runningCloudJobs.size() == 0 && waitList.size() == 0)
			ClusterUtils.exitGracefully();
	}

	private void scheduleJob(JobConf jobConf) throws Exception {
		if (jobConf instanceof MRJobConf)
			runAndTraceMRJob((MRJobConf) jobConf);
		else if (jobConf instanceof CloudJobConf)
			runCloudJob((CloudJobConf) jobConf);
	}

	/** MapReduce（MR）任务的调度执行 */
	private void runAndTraceMRJob(MRJobConf jobConf) throws Exception {
		MRJob job = new MRJob(jobConf);
		List<String> hosts = new ArrayList<String>();

		runningJobs.put(job, hosts);
		addResource2CommunicationPlatform(job.getJobId() + "_INPUT",
				job.getInputSource());

		// TODO 只有部分实验性实现，未完成流程
		int numOfMaps = job.getNumMapTasks();
		MapperReqMessage mapReq = new MapperReqMessage(jobConf);

		// 调度任务执行
		scheduleMRJob(numOfMaps, mapReq);
		logger.debug("runAndTraceJob job map number=" + numOfMaps);

		// TODO 后续Reduce过程
		/*
		 * int numOfReduces = job.getNumReduceTasks(); if (numOfReduces > 0) {
		 * // TODO }
		 */
	}

	/**
	 * 非MR任务环境准备和提交调度
	 * 
	 * @throws Exception
	 */
	private void runCloudJob(CloudJobConf jobConf) throws Exception {
		CloudJobReqMessage req = new CloudJobReqMessage();
		req.setContent(jobConf);
		CloudJob runningJob = new CloudJob(req);
		runningJob.setScalable(jobConf.getScalable());

		String jobId = "JOB_" + CloudContext.getLocalIP() + "_"
				+ System.currentTimeMillis();
		ClientJobListener listener = clientJobListeners.remove(jobConf
				.getJobId());
		if (listener != null)
			clientJobListeners.put(jobId, listener);
		jobConf.setJobId(jobId);

		Class<? extends InputResource> clazz = jobConf.getInputResource();
		if (clazz != null) {
			initInputResource(clazz, runningJob);
		}

		Class<? extends CloudJobCenterController> controllerClazz = jobConf
				.getCenterJobController();
		if (controllerClazz != null) {
			startJobController(controllerClazz, runningJob);
		}

		childsOfLastNotification = sendCloudJobRequest2Childs(req);
		runningJob.setStatus(JOB_STATUS.RUNNING, "send to all childs ok");
		logger.info("-----------new Job is running, ID = {}", jobId);
		checkAndCacheJob(runningJob);
	}

	private void startJobController(
			Class<? extends CloudJobCenterController> controllerClazz,
			CloudJob runningJob) throws Exception {
		try {
			CloudJobCenterController controller = controllerClazz.newInstance();
			String id = controllerClazz.getSimpleName()
					+ System.currentTimeMillis();
			controller.initJob(runningJob.getJobRequestMessage().getJobConf()
					.getJobContext());
			if (controller.getJobAgent() != null) {
				addResource2CommunicationPlatform(id, controller.getJobAgent());
				runningJob.getJobRequestMessage().setMessagerId(id);
			}
			controller.start(runningJob);
		} catch (Throwable e) {
			logger.error("start job error", e);
			runningJob.setStatus(JOB_STATUS.FAILED,
					"start JobController error, for details, refer to logs");
			notifyJobFinished(runningJob);
			throw new Exception("Start Job Controller error", e);
		}
	}

	private void initInputResource(Class<? extends InputResource> clazz,
			CloudJob runningJob) throws Exception {
		try {
			CloudResourceBase realSource = null;
			InputResource inputResource = clazz.newInstance();
			if (inputResource instanceof CloudResourceBase)
				realSource = (CloudResourceBase) inputResource;
			else
				realSource = new CloudResouceProxy(inputResource);
			String resourceId = realSource.getId() + clazz.getSimpleName();
			addResource2CommunicationPlatform(resourceId, realSource);
			runningJob.getJobRequestMessage().setSourceId(resourceId);
		} catch (Exception e) {
			logger.error("start job error", e);
			runningJob.setStatus(JOB_STATUS.FAILED,
					"init InputResource error, for details, refer to logs");
			notifyJobFinished(runningJob);
			throw new Exception("initial Input Resource error", e);
		}
	}

	/**
	 * 非MR任务调度
	 * 
	 * @return 接收消息的子节点
	 */
	private List<String> sendCloudJobRequest2Childs(CloudJobReqMessage req) {
		List<String> hostList = new ArrayList<String>();
		List<String> failedHosts = new ArrayList<String>();
		hostList.addAll(CloudContext.getChids());

		// 发送任务执行请求到子节点
		for (String host : hostList) {
			logger.info("sending one request to " + host);
			try {
				notifier.send(host, req);
				logger.info("sucessfully sent one request to " + host);
			} catch (RemoteException e) {
				failedHosts.add(host);
				logger.warn("ERROR send request to " + host, e);
			}
		}
		hostList.removeAll(failedHosts);
		return hostList;
	}

	private void checkAndCacheJob(CloudJob job) {
		synchronized (runningCloudJobs) {
			runningCloudJobs.add(job);
		}
	}

	/** 调度MapReduce任务 */
	public void scheduleMRJob(int numOfSplits, RequestMessage req)
			throws RemoteException {
		List<String> hostList = CloudContext.getChids();
		String[] allHosts = (String[]) hostList.toArray(new String[hostList
				.size()]);

		// 发送任务执行请求到子节点
		for (int i = 0; i < numOfSplits; i++) {
			String oneHost = allHosts[i % allHosts.length];
			notifier.send(oneHost, req);
			logger.debug("send one request to " + oneHost);
		}
	}

	/** 向通信平台增加资源，如消息器、输入等 */
	private void addResource2CommunicationPlatform(String id, Remote resource)
			throws RemoteException {
		communicationCenter.putMessager(id, resource);
	}

	/** 向通信平台增加资源，如消息器、输入等 */
	private void removeResourceFromCommunicationPlatform(String id) {
		if (id == null)
			return;

		try {
			communicationCenter.removeMessager(id);
		} catch (RemoteException e) {
			logger.info(id, e);
		}
	}

	@Override
	public void callMain(String... params) {
		if (instance != null) {
			instance.start();
			initiallized = true;
		} else
			logger.error("JobTracker not initialized, some unexpected error happened, please check");
	}

	@Override
	public boolean isInitialized() {
		return initiallized;
	}

	@Override
	public void shutdown() {
		shutdown = true;
		gracefullyShutdonw = false;
		Iterator<CloudJob> it = runningCloudJobs.iterator();
		while (it.hasNext()) {
			CloudJob job = it.next();
			it.remove();
			logger.error("====shutdown one job: {}", job.getJobRequestMessage()
					.getJobConf());
			job.setStatus(JOB_STATUS.USER_ABORTED, "Manually ABORTED the job");
			notifyJobFinished(job);
		}
	}

	public List<CloudJob> runningJobList() {
		if (runningCloudJobs.size() == 0) {
			return null;
		} else {
			return new ArrayList<CloudJob>(runningCloudJobs);
		}
	}

	private static class JobStatusReporter {
		Object clusterStatusMonitor;

		private JobStatusReporter() {
			try {
				String refId = ClusterConstants.CLUSTER_MONITOR_REF;
				logger.info("lookup remote object: {}", refId);
				clusterStatusMonitor = RMINetClient.getServiceObject(refId);
				logger.info("remote object = {}", clusterStatusMonitor);
			} catch (RemoteException e) {
				logger.error("can NOT CONNECT to DM", e);
				clusterStatusMonitor = null;
			} catch (Exception e) {
				logger.error("error", e);
				clusterStatusMonitor = null;
			}
		}

		private synchronized void jobFinished(CloudJob job) {
			job.finishTime = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
					.format(new java.util.Date(System.currentTimeMillis()));
			logger.info("one job status to notify: {}", job);
			if (clusterStatusMonitor == null) {
				return;
			}
			try {
				List<String> childs = CloudContext.getChids();
				JobMonitorInfo jobinfo = new JobMonitorInfo(ClusterContext
						.getLocalAddress().getHostAddress());
				jobinfo.setJobid(job.getJobRequestMessage().getJobConf()
						.getJobId());
				jobinfo.setJobDescription(job.getJobRequestMessage()
						.getJobConf().getJobDescription());
				jobinfo.setJobstatus(job.getStatus());
				jobinfo.setReceiveDate(job.createTime);
				jobinfo.setFinishedDate(job.finishTime);
				jobinfo.setChilds(childs);

				RMINetClient.invoke(clusterStatusMonitor, "jobStateFinished",
						jobinfo);
			} catch (Throwable e) {
				logger.error("can not report to DM about job status, job:"
						+ job.getJobRequestMessage().getJobConf(), e);
			}

			logger.info("notify finished");
		}
	}

	@Override
	public void restart() {
	}
}
