package cn.sinobest.pe;

import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import javax.sql.DataSource;

import org.pentaho.di.core.logging.KettleLogStore;
import org.pentaho.di.core.logging.LoggingObjectInterface;
import org.pentaho.di.core.logging.LoggingRegistry;
import org.pentaho.di.job.Job;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.jdbc.support.lob.DefaultLobHandler;

import cn.com.hnisi.framework.util.Configuration;
import cn.sinobest.knob.core.meta.TaskStatusMeta;
import cn.sinobest.knob.core.rpc.TaskState;
import cn.sinobest.knob.worker.Worker;
import cn.sinobest.knob.worker.WorkerFactory;
import cn.sinobest.knob.worker.WorkerService;
import cn.sinobest.octopus.rpc.Monitor;
import cn.sinobest.pe.cons.PECons;
import cn.sinobest.pe.cons.RepositorySingleton;
import cn.sinobest.pe.cons.Task;
import cn.sinobest.pe.engine.excutor.TaskExecutor;
import cn.sinobest.pe.factory.DBFactory;
import cn.sinobest.pe.service.impl.JdbcServiceImpl;

public class Excutor extends Thread{
	
	private final Logger LOG = LoggerFactory.getLogger(getClass());
	
	/**
	 * pdi-engine环境配置
	 */
	public static Configuration peConfig;
	
	/**
	 * worker中的task
	 */
	public static Map<String, Task> taskMap;
	
	ScheduledExecutorService scheduler = Executors.newSingleThreadScheduledExecutor();
	
	/**
	 * 数据库服务
	 */
	public static JdbcServiceImpl dbService;
	
	public Excutor(Configuration config) {
		peConfig = config;
		taskMap = new ConcurrentHashMap<String, Task>();
		
		
	    DataSource dataSource = DBFactory.getDataSource(config);  
		dbService = new JdbcServiceImpl(dataSource, new DefaultLobHandler());
		
		/**
		 * 初始化资源库实例
		 */
		if(!RepositorySingleton.init()){
			LOG.error("repository enviro inited failed!");
		}
		
		long period = Integer.parseInt(Excutor.peConfig.getValue("pe.watcher.period", PECons.watcherDefaultPeriod));
	    long delay = Integer.parseInt(Excutor.peConfig.getValue("pe.watcher.delay", PECons.watcherDefaultDelay));
	    
		scheduler.scheduleAtFixedRate(new Runnable() {
			public void run(){
				if(Excutor.taskMap.size()==0){
					return;
				}
				
				for (Entry<String, Task> e: Excutor.taskMap.entrySet()) {
					Task task = e.getValue();
					TaskStatusMeta tsm = task.getTaskWatcher().getTaskStatusMeta();
					TaskState taskState = tsm.getState();
					if (taskState == TaskState.ABORTED) {
						LOG.info("update task status:{}---{}---{}",  tsm.getJobIdentity() , task.getJob().getName() , taskState);
						task.getTaskWatcher().updateStatus(tsm);
						if(task.getJob().getRep().isConnected()){
							task.getJob().getRep().disconnect();
						}
						Excutor.taskMap.remove(e.getKey());
						KettleLogStore.discardLines(tsm.getTaskId() , true);
						LoggingRegistry.getInstance().removeIncludingChildren(tsm.getTaskId());//wxx 20161019 需要在作业完成时清理此日志缓存
					} else {
						Job job = task.getJob();
						taskState = getJobstate(task.getJob());
//						tsm.setState(taskState);
//						Result result = task.getJob().getResult();
//						jsm.setProgress(result == null ? 0 : result.getNrLinesOutput());
//						jsm.setDuration(job.getStartDate()==null ? 0 : new Date().getTime() - job.getStartDate().getTime());
						LOG.info("update task status:{}---{}---{}",  tsm.getJobIdentity() , task.getJob().getName() , taskState);
						TaskStatusMeta tsmNew = new TaskStatusMeta(tsm.getJobId(), tsm.getTaskId(), tsm.getJobIdentity(), taskState, tsm.getBegintime(), 
								tsm.getDuration(), tsm.getProgress());//20160615 wxx use new tsm to update
						task.getTaskWatcher().updateStatus(tsmNew);
						if(TaskState.RUNNING != taskState ){
							if(job.getRep().isConnected()){
								job.getRep().disconnect();
							}
							Excutor.taskMap.remove(e.getKey());
							KettleLogStore.discardLines(tsm.getTaskId() , true);
							LoggingRegistry.getInstance().removeIncludingChildren( tsm.getTaskId() );//wxx 20161019 需要在作业完成时清理此日志缓存
						}
					}
				}
				
				//wxx 20161025 对执行完的作业日志进行清理，通过fromMap取到key，再在taskMap中判断该job是否已执行完，并进行清理；
				try{
					Map<String, String> map = LoggingRegistry.getInstance().getFromMap();
					Map<String, LoggingObjectInterface> logMap = LoggingRegistry.getInstance().getMap();
					for (String key : map.keySet()) {
						String from = map.get(key);
						if(Excutor.taskMap.get(from)==null ){
							logMap.remove(key);
							map.remove(key);
						} 
					}
				} catch (Throwable e) {
					LOG.error("clear log registry failed:", e);
				}
			}
		}, delay, period, TimeUnit.MILLISECONDS);
	}
	
	public void start(){
		//启动knob-worker;
		try {
			final WorkerService ws = new WorkerService(peConfig, new WorkerFactory() {

				@Override
				public Worker create(Configuration conf) {
					try {
						return new Worker(peConfig, new TaskExecutor());
					} catch (Exception e) {
						LOG.error("worker instance exception:" , e);
						return null;
					}
				}
			});
			ws.start();
			
			new Thread(){
				public void run() {
					Monitor mon = new Monitor(ws.getWorker(), Integer.parseInt(peConfig.getValue("octopusPort", "6901")));
					try {
						mon.serve();
					} catch (Exception e) {
						LOG.error("monitor instance exception:" , e);
					}
				}
			}.start();
		} catch (Exception e) {
			LOG.error("start worker exception:" , e);
		}
		
	}
	
	/**
	 * 获取JOB状态
	 * @param job
	 * @return
	 */
	private TaskState getJobstate(Job task) {
		if (!task.isInitialized()) {
			return TaskState.RUNNING;
	    } else {
	      if (task.isActive()) {
	      	if (task.isStopped()) {
	      		return TaskState.ABORTED;
	        } else {
	        	return TaskState.RUNNING;
	        }
	      } else {
	        if (task.getResult() != null && task.getResult().getNrErrors() > 0) {
	        	return TaskState.FAILED;
	        } else {
	        	return TaskState.FINISHED;
	        }
	      }
	    }
	}
}
