package cas.ihep.hmss;

import cas.ihep.hmss.queue.JobQueue;
import com.google.common.io.Closer;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.JobStatus;
import org.apache.hadoop.mapred.RunningJob;
import org.apache.hadoop.util.GenericOptionsParser;

import java.io.Closeable;
import java.io.IOException;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.logging.Logger;

public class SubmitThread extends Thread{
    private JobQueue queue;
    volatile boolean stop;

    SubmitThread(JobQueue q){
        queue=q;
        stop=false;
    }

    static class MyJobClient extends JobClient implements Closeable{
        MyJobClient(JobConf conf)throws IOException{
            super(conf);
        }
    }

    static class MyJob {
        Job myjob;
        RunningJob hjob;
        MyJob(Job j1,RunningJob j2){
            myjob=j1;
            hjob=j2;
        }
    }

    public void run(){
        //ThreadLocal<String[]> localCommands=new ThreadLocal<>();
        final Logger logger=Logger.getLogger(getClass().getName());
        LinkedList<MyJob> submittedJobs=new LinkedList<>();
        //final ExecutorService pool=Executors.newScheduledThreadPool(16);
        JobConf jconf=new JobConf();
        try {
            new GenericOptionsParser(jconf,new String[]{"-libjars","/workfs/cc/weizc/hmss-runtime/GridFTPJar.jar,/workfs/cc/weizc/hmss-runtime/guava-19.0.jar"});
        } catch (IOException e) {
            e.printStackTrace();
        }
        try (Closer closer= Closer.create()){
            MyJobClient jclient=closer.register(new MyJobClient(jconf));
            while (!stop) {
                int preOccupied=0;
                Iterator<MyJob> iter=submittedJobs.iterator();
                while(iter.hasNext()){
                    MyJob job=iter.next();
                    int state=job.hjob.getJobState();
                    if(state== JobStatus.PREP){
                        preOccupied+=job.myjob.cores;
                    }else if(state!=JobStatus.RUNNING){
                        iter.remove();
                    }
                }
                int mapUsed=jclient.getClusterStatus().getMapTasks()+preOccupied;
                if (mapUsed<10) {
                    int s = 56 - mapUsed;
                    Job subJob = queue.dequeue(s);
                    if (subJob == null) {
                        Thread.sleep(200);
                        continue;
                    }
                    if (System.getProperty("hmss.debug") != null) {
                        System.out.println(subJob.length);
                    } else {
                        //将要传输的文件名 做成一个临时文件list，后续hms读list的每一行
                        subJob.cores = s;
                        //pool.submit(new Task(jclient,subJob));
                        org.apache.hadoop.mapreduce.Job v2job=new Task(jclient,subJob).call();
                        long time=System.currentTimeMillis();
                        org.apache.hadoop.mapreduce.JobID v2id=v2job.getJobID();
                        StringBuilder builder=new StringBuilder(subJob.jid);
                        builder.append('\t').append(v2id.toString()).append('\t').append((time-subJob.enqueueTime)/1000.0);
                        logger.info(builder.toString());
                        submittedJobs.add(new MyJob(subJob,jclient.getJob(new org.apache.hadoop.mapred.JobID(v2id.getJtIdentifier(),v2id.getId()))));
                    }
                } else {
                    Thread.sleep(5000);
                }
            }
        }catch (Exception e){
            e.printStackTrace();
        }
//        pool.shutdown();
//        try {
//            pool.awaitTermination(1, TimeUnit.DAYS);
//        }catch (InterruptedException ignored){}
    }
}
