/*
 *  Copyright 2013 National Institute of Advanced Industrial Science and Technology
 *  
 *  Licensed under the Apache License, Version 2.0 (the "License");
 *  you may not use this file except in compliance with the License.
 *  You may obtain a copy of the License at
 *  
 *      http://www.apache.org/licenses/LICENSE-2.0
 *  
 *  Unless required by applicable law or agreed to in writing, software
 *  distributed under the License is distributed on an "AS IS" BASIS,
 *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 *  See the License for the specific language governing permissions and
 *  limitations under the License.
 */

package org.sss.client;

import java.io.Closeable;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.UUID;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.rmi.RemoteException;

import org.slf4j.Logger;
import org.sss.common.SssServerRemote;
import org.sss.mapreduce.Configuration;
import org.sss.mapreduce.ConfigurationOwner;
import org.sss.mapreduce.GroupID;
import org.sss.mapreduce.SssException;
import org.sss.util.SortedProperties;
import org.sss.util.StopWatch;
import org.sss.util.UnaryCallable;

/**
 * <code>JobEngine</code> is the primary interface for the user-job to interact
 * with the cluster.
 *
 * <code>JobEngine</code> provides facilities to create a job tree,
 * execute its jobs, access execution information etc.
 */
public class JobEngine implements ConfigurationOwner, Closeable {
  private static Logger logger = SssClient.getLogger(JobEngine.class);
  private enum RunMode { normal, dryrun };
  private RunMode runMode = RunMode.normal;
  private final List<Job> jobs = new ArrayList<Job>();
  private final Map<GroupID, TupleGroupStatus> gids = new HashMap<GroupID, TupleGroupStatus>();
  private long interval = 1000L; // default: 1 sec.
  private final List<byte[]> jarfiles = new ArrayList<byte[]>();
  Properties execInfo = new SortedProperties(); // access from Job class.
  private final ClusterManager clusterManager;
  private final SssClient client;
  private UUID taskSetID = null;
  private final Configuration conf; // job tree wide configuration
  private ScheduledExecutorService scheduler = new ScheduledThreadPoolExecutor(1);
  private ScheduledFuture<?> future = null;

  /**
   * Build a <code>JobEngine</code> with the given {@link SssClient}.
   *
   * @param   client  the <code>SssClient</code>
   * @throws  SssException
   * @see     org.sss.client.SssClient
   */
  public JobEngine(SssClient client) throws SssException {
    this.clusterManager = client.getClusterManager();
    this.client = client;
    this.conf = client.getConfigurationCopy();

    client.registerJobEngine(this);
    try {
      registerJarFile(client.getJarFiles());
    } catch (IOException e) {
      throw new SssException(e);
    }
  }
  /**
   * Get the <code>ClusterManager</code>
   *
   * @return  the cluster manager.
   *
   * NOTE: This method is used by only this module.
   */
  ClusterManager getClusterManager() {
    return clusterManager;
  }
  /**
   * Get the <code>SssClient</code>
   *
   * @return  the client.
   */
  public SssClient getClient() {
    return client;
  }
  /**
   * Get the {@link Configuration}.
   */
  @Override
  public Configuration getConfiguration() {
    return conf;
  }
  /**
   * Get a new <code>Job.Builder</code> to create a new job for the job tree.
   *
   * @param   name    the name for a new <code>Job</code>
   * @param   proc    the class object to be executed by a new <code>Job</code>.
   *                  This is {@link org.sss.mapreduce.Mapper} of {@link org.sss.mapreduce.Reducer}.
   * @return  the job builder instance to create a new <code>Job</code>
   * @throws  SssException
   */
  public Job.Builder getJobBuilder(String name, Class<?> proc) throws SssException {
    return new Job.Builder(name, proc, this);
  }
  /**
   * Get a new <code>Job.Builder</code> to create a new job for the job tree.
   * The name of the job will be set with the name of <tt>proc.getName()</tt> method.
   *
   * @param   proc    the class object to be executed by a new <code>Job</code>.
   *                  This is {@link org.sss.mapreduce.Mapper} of {@link org.sss.mapreduce.Reducer}.
   * @return  the job builder instance to create a new <code>Job</code>
   * @throws  SssException
   */
  public Job.Builder getJobBuilder(Class<?> proc) throws SssException {
    return new Job.Builder(proc.getName(), proc, this);
  }
  /**
   * Put a <code>Job</code> instance under the control of this <code>JobEngine</code>.
   * <p>
   * NOTE: This method called by only @{Job}.
   * </p>
   *
   * @param  job  the job instance to be added.
   */
  void addJob(Job job) {
    jobs.add(job);
  }

  /**
   * <p>
   * NOTE: This method called by only {@link Job}.
   * </p>
   */
  TupleGroupStatus getTupleGroupStatus(GroupID gid) {
    TupleGroupStatus g = null;
    if (gids.containsKey(gid)) {
      g = gids.get(gid);
    }
    else {
      g = new TupleGroupStatus(gid);
      gids.put(gid, g);
    }
    return g;
  }

  /**
   * Set run mode of this <code>JobEngine</code> "dry-run".
   * <p>
   * In dry-run mode, running <code>JobEngine</code> by calling its <tt>#execute</tt>
   * method won't invoke any job's procedure classes.  Just mimics running jobs
   * and produces logs.  Users could see if their job tree is constructed as
   * they expected without running heavy jobs on the cluster.
   * </p>
   */
  public void setDryRunMode() {
    runMode = RunMode.dryrun;
  }
  /**
   * Execute the job tree continuously.
   *
   * @return information of the running jobs.
   * @throws SssException
   */
  public Properties contExec() throws SssException {
    if (runMode == RunMode.dryrun) {
      exec_();
      return execInfo;
    }
    startTaskSet();
    for (Job job: jobs) {
      execInfo.setProperty("job." + job.getName() + ".id", job.getJobID().toString());
    }
    execInfo.setProperty("job.taskset.id", taskSetID.toString());
    this.future = scheduler.scheduleAtFixedRate(new Runnable(){
        private boolean upload = true;
        @Override
        public void run() {
          Throwable exp = null;
          try {
            StopWatch jobWatch = StopWatch.start();
            if (upload) {
              uploadJars(taskSetID);
              upload = false;
            }
            executeJobTree(false);
            jobWatch.stop();
            synchronized (execInfo) {
              execInfo.setProperty("job.time", jobWatch.toString());
            }
            if (conf.getBoolean("job.output_log", true)) {
              writeLogFile();
            }
            for (Job job: jobs) {
              job.reset();
            }
          }
          catch (IOException e) { exp = e; }
          catch (SssException e) { exp = e; }

          if (exp != null) {
            // FIXME: WHAT SHOULD WE DO HERE?
            logger.error("exception in timer thread.", exp);
          }
        }
      }, 0L, interval, TimeUnit.MILLISECONDS);
    return execInfo;
  }
  public Properties stopContExec(boolean mayInterruptIfRunning) throws SssException {
    if (future != null) {
      future.cancel(mayInterruptIfRunning);
    }
    scheduler.shutdown();
    Properties info = execInfo;
    reset();
    future = null;
    scheduler = new ScheduledThreadPoolExecutor(1);
    return info;
  }
  public boolean isRunning() {
    return future != null && future.getDelay(TimeUnit.MILLISECONDS) <= 0;
  }
  /**
   * Execute the job tree.
   *
   * @return information of the executed jobs.
   * @throws SssException
   */
  public Properties exec() throws SssException {
    if (runMode == RunMode.dryrun) {
      exec_();
      return execInfo;
    }
    try {
      StopWatch jobWatch = StopWatch.start();
      startTaskSet();
      uploadJars(taskSetID);
      executeJobTree(false);
      jobWatch.stop();
      execInfo.setProperty("job.time", jobWatch.toString());
      writeLogFile();
      return execInfo;
    }
    catch (IOException e) { throw new SssException(e); }
    finally { reset(); }
  }

  private void executeJobTree(boolean dryrun) throws SssException {
    try {
      int doneCount = 0;
      while (doneCount < jobs.size()) {
        List<Job> readyJobs = new ArrayList<Job>();
        for (Job job: jobs) {
          if (job.isReadyToGo()) {
            readyJobs.add(job);
          }
        }
        for (Job job: readyJobs) {
          if (dryrun) {
            job.go_();
          }
          else {
            job.go(taskSetID);
          }
          doneCount++;
        }
        if (dryrun) {
          dump();
        }
      }
    }
    catch (RemoteException e) {
      throw new SssException(e);
    }
    catch (InterruptedException e) {
      throw new SssException(e);
    }
  }

  /**
   * Execute the job tree in dry-run mode.
   */
  private void exec_() throws SssException {
    executeJobTree(true);
    logger.info("done");
  }
  /**
   * Log jobs and TupleGroup traversed in dry-run.
   */
  private void dump() {
    for (Job job: jobs) {
      logger.info(job + " ");
    }
    for (TupleGroupStatus c: gids.values()) {
      logger.info(c + " ");
    }
  }
  /**
   * Register a jar file to this <code>JobEngine</code>
   *
   * @param   fileName  path to a jar file
   * @throws  IOException
   */
  public void registerJarFile(String fileName) throws IOException {
    File file = new File(fileName);
    InputStream is = new FileInputStream(file);
    try {
      byte[] fileBuf = new byte[(int) file.length()];
      int read = is.read(fileBuf, 0, fileBuf.length);
      if (read <= 0) {
        logger.error("failed in reading jarfile - {}.", file.getName());
        throw new IOException("failed in reading jarfile - " + file.getName());
      }
      this.jarfiles.add(fileBuf);
    }
    finally {
      is.close();
    }
  }
  /**
   * Register jar files to this <code>JobEngine</code>
   *
   * @param   jarFiles  collection of <code>String</code> which represent path to a jar file
   * @throws  IOException
   */
  public void registerJarFile(Collection<String> jarFiles) throws IOException {
    for (String jarfile: jarFiles) {
      registerJarFile(jarfile);
    }
  }

  private void reset() throws SssException {
    jobs.clear();
    gids.clear();
    jarfiles.clear();
    execInfo = new SortedProperties();
  }

  /**
   * Dispose this JobEngine.
   *
   * Relese server side resources.
   */
  public synchronized void dispose() throws SssException {
    try {
      if (taskSetID != null) {
        clusterManager.callEachServerInParallel(
            new UnaryCallable<Void, SssServerRemote>() {
              @Override
              public Void call(SssServerRemote server) throws Exception {
                server.endTaskSet(taskSetID);
                return null;
              }
            });
      }
    }
    finally {
      taskSetID = null;
    }
  }

  @Override
  public void close() throws IOException {
    try {
      dispose();
    }
    catch (SssException e) {
      throw new IOException(e);
    }
  }

  private synchronized void startTaskSet() throws SssException {
    if (taskSetID == null) {
      final UUID id = UUID.randomUUID();
      clusterManager.callEachServerInParallel(
          new UnaryCallable<Void, SssServerRemote>() {
            @Override
            public Void call(SssServerRemote server) throws Exception {
              server.startTaskSet(id);
              return null;
            }
          });
      taskSetID = id;
    }
  }
  /**
   * Send jar file(s) via the network.
   *
   * @param  servers
   * @param  taskSetID
   */
  private void uploadJars(final UUID taskSetID) throws SssException {
    if (!jarfiles.isEmpty()) {
      clusterManager.callEachServerInParallel(
          new UnaryCallable<Void, SssServerRemote>() {
            @Override
            public Void call(SssServerRemote server) throws Exception {
              for (byte[] j: jarfiles) {
                server.appendJarFile(taskSetID, j);
              }
              return null;
            }
          });
    }
  }
  /**
   * Log output using execution information.
   */
  private void writeLogFile() throws IOException, SssException {
    if (conf.getBoolean("job.output_log", true)) {
      client.writeInfo(execInfo, "SSS JobEngine Execution information");
    }
  }

  @Override
  public void finalize() {
    try {
      dispose();
    }
    catch (SssException e) {
      logger.error("error in finalize()", e);
    }
  }
  /**
   * Set an interval time (in milliseconds) for JobTree execution.
   *
   * @param  interval  interval in milliseconds
   */
  public void setInterval(long interval) {
    this.interval = interval;
  }
}
