/*
 *  Copyright 2013 National Institute of Advanced Industrial Science and Technology
 *  
 *  Licensed under the Apache License, Version 2.0 (the "License");
 *  you may not use this file except in compliance with the License.
 *  You may obtain a copy of the License at
 *  
 *      http://www.apache.org/licenses/LICENSE-2.0
 *  
 *  Unless required by applicable law or agreed to in writing, software
 *  distributed under the License is distributed on an "AS IS" BASIS,
 *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 *  See the License for the specific language governing permissions and
 *  limitations under the License.
 */

package org.sss.client;

import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.lang.ref.WeakReference;
import java.rmi.RemoteException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.UUID;
import java.util.WeakHashMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.sss.common.GetStatus;
import org.sss.common.SentinelThreadPool;
import org.sss.common.SssServerRemote;
import org.sss.common.TaskInfo;
import org.sss.common.TaskStatus;
import org.sss.common.io.IOManager;
import org.sss.common.io.IOThread;
import org.sss.mapreduce.Cluster;
import org.sss.mapreduce.Configuration;
import org.sss.mapreduce.ConfigurationOwner;
import org.sss.mapreduce.Encodings;
import org.sss.mapreduce.GroupID;
import org.sss.mapreduce.Resources;
import org.sss.mapreduce.SssException;
import org.sss.mapreduce.StorageNode;
import org.sss.mapreduce.TupleGroupScaner;
import org.sss.mapreduce.datatype.Packable;
import org.sss.server.SssServerConfiguration; // Layer violation
import org.sss.util.Misc;
import org.sss.util.UnaryCallable;

/**
 * <code>SssClient</code> acts as handle to the SSS.
 */
public class SssClient implements ConfigurationOwner {
  static {
    // ***Caution! this is extremely dangerous!***
    Misc.setDangerousSecurityManagerToSystem();
  }
  private static Logger logger = SssClient.getLogger(SssClient.class);

  /**
   * Returns a logger named corresponding to the class passed as parameter.
   *
   * NOTE: This method is accessed by this module.
   *
   * @param  klass  the returned logger will be named after klass
   * @return logger object for the specified class
   */
  static Logger getLogger(Class<?> klass) {
    return LoggerFactory.getLogger(klass);
  }

  private final ClusterManager clusterManager;
  private final Cluster        cluster;
  private final Configuration  defaultConf;
  private final Set<String>    defaultJars;
  private final String[]       args;
  private final WeakHashMap<JobWaiter<?>, Void>
    waiters = new WeakHashMap<JobWaiter<?>, Void>();
  private final WeakHashMap<JobEngine, Void>
    engines = new WeakHashMap<JobEngine, Void>();
  private OutputStream jobinfo = null;
  private SentinelThreadPool iotp_r = null;
  private SentinelThreadPool iotp_w = null;
  private final IOManager ioManager;
  private final SssServerConfiguration serverConf;
  private String logFileName = null;
  private Encodings encodings = new Encodings();

  /**
   * Create a <code>SssClient</code> object with default configuration file.
   */
  public SssClient(String[] args) throws SssException {
    this(SssClientParams.parseArgs(args, loadDefaultConfigFile()));
  }

  /**
   * Create a <code>SssClient</code> object.
   *
   * NOTE: This does not read default configuratio file "conf/mapreduce.client.conf".
   * NOTE: This for test module.
   *
   * @param  params Parameters for SSS Client. This must not be null.
   * @see    org.sss.mapreduce.Cluster
   */
  public SssClient(SssClientParams params) throws SssException {
    this(Cluster.loadDefaultConfigFile(), SssServerConfiguration.readFile(), params);
  }

  /**
   * Create a <code>SssClient</code> object.
   *
   * NOTE: This does not read default configuratio file "conf/mapreduce.client.conf".
   *
   * @param  cluster       SSS cluster information to be accessed by this client. This must not be null.
   * @param  serverConf    SSS Server Configuration. This must not be null.
   * @param  params        Parameters for SSS Client. This must not be null.
   * @see    org.sss.mapreduce.Cluster
   */
  public SssClient(Cluster cluster, SssServerConfiguration serverConf, SssClientParams params) throws SssException {
    if (cluster == null) {
      throw new IllegalArgumentException("cluster is null");
    }
    if (serverConf == null) {
      throw new IllegalArgumentException("server configuration is null");
    }
    if (params == null) {
      throw new IllegalArgumentException("client parameters is null");
    }
    registerShutdownHook(this);

    this.cluster = cluster;
    this.defaultConf = params.getConfiguration();
    this.defaultJars = params.getJars();
    this.args = params.getArgs();
    this.clusterManager = new ClusterManager(cluster, defaultConf);
    this.serverConf = serverConf;
    this.serverConf.setServerId(19999);
    /*
     * ID is used by writer to make unique key.
     * SssClient uses as ID the value which does not become
     * the same as ID of server if possible.
     */
    this.ioManager = new IOManager(serverConf.ioConf);

    initJobInfoFilename();
  }
  /**
   * Returns the {@link Cluster} of this <code>SssClient</code>.
   *
   * @return the {@link Cluster} of this <code>SssClient</code>
   */
  public Cluster getCluster() {
    return cluster;
  }
  /**
   * Returns the <code>ClusterManager</code> of this <code>SssClient</code>.
   *
   * @return the <code>ClusterManager</code> of this <code>SssClient</code>
   */
  public ClusterManager getClusterManager() {
    return clusterManager;
  }
  /**
   * Returns the <code>Configuration</code> of this <code>SssClient</code>.
   *
   * @return the <code>Configuration</code> of this <code>SssClient</code>
   */
  @Override
  public Configuration getConfiguration() {
    return defaultConf;
  }
  /**
   * Returns a copy of the <code>Configuration</code> of this <code>SssClient</code>.
   *
   * @return a copy of the <code>Configuration</code> of this <code>SssClient</code>
   */
  public Configuration getConfigurationCopy() {
    Configuration conf = new Configuration();
    conf.merge(defaultConf);
    return conf;
  }
  /**
   * Returns a set of <tt>String</tt> that represent jar files of this <code>SssClient</code>.
   *
   * @return a set of <tt>String</tt> that represent jar files of this <code>SssClient</code>
   */
  public Set<String> getJarFiles() {
    return Collections.unmodifiableSet(defaultJars);
  }
  /**
   * Get the arguments that are not known SSS Client.
   *
   * @return  the rest of arguments.
   */
  public String[] getArgs() {
    return args;
  }

  /**
   * Get client log name.
   *
   * Maybe null.
   */
  public String getLogFileName() {
    return logFileName;
  }

  /**
   * Get number of tuples of the specified TupleGroup.
   *
   * @param gid ID of TupleGroup
   * @return number of tuples of the specified TupleGroup.
   */
  public long getNumberOfTuples(GroupID gid) throws SssException {
    UUID jobid = UUID.randomUUID();
    JobWaiter<GetStatus> waiter = new JobWaiter<GetStatus>(this, jobid);
    Map<StorageNode, GetStatus> status = null;
    ClusterManager clusterManager = this.getClusterManager();
    List<StorageNode> storageNodes = clusterManager.getStorageNodes();

    try {
      for (StorageNode sn: storageNodes) {
        ClusterManager.Server server = clusterManager.get(sn);
        try {
          server.get().submitCountTask(jobid, sn, getConfiguration(),
              gid, waiter.newListener(sn));
        } finally {
          server.unlock();
        }
        if (gid.isBroadcast()) {
          break;
        }
      }
      status = waiter.await();
    } catch (RemoteException e) {
      throw new SssException(e);
    } catch (InterruptedException e) {
      throw new SssException(e);
    }
    finally {
      waiter.dispose();
    }
    for (Map.Entry<StorageNode, GetStatus> e: status.entrySet()) {
      GetStatus s = e.getValue();
      if (s.exception != null) {
      // TODO: put in all exception?
        throw new SssException("Exception in " + e.getKey(), s.exception);
      }
    }
    long nTuples = 0;
    for (GetStatus s: status.values()) {
      nTuples += s.inputTupleCount;
    }
    return nTuples;
  }

  /**
   * Get all tuple group IDs in storage nodes.
   */
  public Set<GroupID> getAllTupleGroups() throws SssException {
    ClusterManager.Server s = clusterManager.getAny();
    try {
      return s.get().getAllTuleGroupIDs();
    } catch (RemoteException e) {
      throw new SssException(e);
    } finally {
      s.unlock();
    }
  }

  /**
   * Remove all tuples in the specified Tuple Group.
   *
   * @return number of removed tuples.
   */
  public long removeTupleGroup(GroupID gid) throws SssException {
    ClusterManager.Server s = clusterManager.getAny();
    try {
      return s.get().removeTupleGroup(gid);
    } catch (RemoteException e) {
      throw new SssException(e);
    } finally {
      s.unlock();
    }
  }

  /**
   * Scan the specified Tuple Group.
   *
   * Read tuples in the specified Tuple Group and call scaner.set method with each read tuple.
   *
   * @param keyClass         Class of key type.
   * @param valueClass       Class of value type.
   * @param input            Input <code>GroupID</code>
   * @param scaner           Listener on reading a tuple.
   */
  public <K extends Packable, V extends Packable>
  void scanTupleGroup(Class<K> keyClass, Class<V> valueClass, GroupID input, TupleGroupScaner<K, V> scaner) throws SssException {
    DataScaner<K, V> ds = new DataScaner<K, V>(this, keyClass, valueClass, input, scaner);
    try {
      ds.scan();
    }
    finally {
      try {
        ds.close();
      }
      catch (IOException e) {
        throw new SssException(e);
      }
    }
  }
  /**
   * Get the specified continous task staus.
   */
  public TaskStatus getTaskStatus(final UUID jobID)
    throws SssException {

    List<TaskStatus> ss = clusterManager.callEachServerInParallel(
        new UnaryCallable<TaskStatus, SssServerRemote>() {
          @Override
          public TaskStatus call(SssServerRemote server) throws Exception {
            return server.getTaskStatus(jobID);
          }
        });

    TaskStatus collected = new TaskStatus(jobID);
    boolean valid = false;
    for (TaskStatus status: ss) {
      if (status != null) {
        assert jobID.equals(status.getJobID()):
          "jobID mismatch " + jobID.toString() + ", " + status.getJobID().toString();
        collected.accumulate(status, false);
        valid = true;
      }
    }
    return valid?collected:null;
  }
  
  /**
   * Cancel task set
   *
   * @param taskSetID identifier to specified task set.
   */
  public boolean cancelTaskSet(final UUID taskSetID) throws SssException {
    List<Boolean> results = clusterManager.callEachServerInParallel(
        new UnaryCallable<Boolean, SssServerRemote>() {
          @Override
          public Boolean call(SssServerRemote server) throws Exception {
            TaskInfo ti = server.getTaskInfo();
            if (ti != null && ti.taskMap.containsKey(taskSetID)) {
              server.endTaskSet(taskSetID);
              return true;
            }
            return false;
          }
        });
    boolean result = false;
    for (Boolean r: results) {
      result = r || result;
    }
    return result;
  }
  
  /**
   * Get all task information.
   */
  public TaskInfo getAllTaskInfo() throws SssException {
    List<TaskInfo> taskInfos = clusterManager.callEachServerInParallel(
        new UnaryCallable<TaskInfo, SssServerRemote>() {
          @Override
          public TaskInfo call(SssServerRemote server) throws Exception {
            return server.getTaskInfo();
          }
        });
      
    final Map<UUID, Set<UUID>> collected = new HashMap<UUID, Set<UUID>>();
    for (TaskInfo ti: taskInfos) {
      for (Map.Entry<UUID, List<UUID>> e: ti.taskMap.entrySet()) {
        Set<UUID> jobids = collected.get(e.getKey());
        if (jobids == null) {
          collected.put(e.getKey(), new HashSet<UUID>(e.getValue()));
        }
        else {
          jobids.addAll(e.getValue());
        }
      }
    }
    
    TaskInfo ti = new TaskInfo();
    for (Map.Entry<UUID, Set<UUID>> e: collected.entrySet()) {
      ti.taskMap.put(e.getKey(), new ArrayList<UUID>(e.getValue()));
    }

    return ti;
  }

  /**
   * .
   * NOTE: This method used by only this module.
   */
  synchronized void writeInfo(Properties props, String comments) throws SssException {
    if (logFileName == null) {
      return;
    }
    if (jobinfo == null) {
      try {
        this.jobinfo = new FileOutputStream(logFileName,
            defaultConf.getBoolean("client.jobinfo.append", true));
      }
      catch (IOException e) {
        throw new SssException(e);
      }
    }
    try {
      // TODO: require close?
      props.store(jobinfo, comments);
      jobinfo.flush();
    }
    catch (IOException e) {
      throw new SssException(e);
    }
  }

  /**
   * Add a <code>JobWaiter</code> to waiters list.
   *
   * @param  waiter <code>JobWaiter</code> to be added
   *
   * NOTE: This method is used by only this module.
   */
  synchronized void registerJobWaiter(JobWaiter<?> waiter) {
    waiters.put(waiter, null);
  }

  /**
   * Register JobEngine.
   */
  synchronized void registerJobEngine(JobEngine engine) {
    engines.put(engine, null);
  }

  /**
   * initiaze job information filename
   */
  private void initJobInfoFilename() throws SssException {
    if (defaultConf.getBoolean("client.jobinfo.output", true)) {
      logFileName = defaultConf.get("client.jobinfo.filename");
      if (logFileName != null) {
        logFileName = logFileName.trim();
      }
      if (logFileName == null || logFileName.equals("")) {
        logFileName =  String.format("sssjob-%s.log", UUID.randomUUID());
      }
    }
  }

  /**
   * load client configuration
   */
  private static Configuration loadDefaultConfigFile() throws SssException {
    Configuration conf = new Configuration();
    File f = new File(Resources.getConfigDirectory(), "mapreduce.client.properties");
    if (f.isFile()) {
      conf.load(f.toString());
    }
    return conf;
  }

  /**
   * Cancel all jobs.
   */
  private synchronized void cancelAllJobs() {
    for (JobWaiter<?> w: waiters.keySet()) {
      w.cancel();
    }
  }

  /**
   * Cancel all jobs.
   */
  private synchronized void disposeAllJobEngines() {
    for (JobEngine engine: engines.keySet()) {
      try {
        engine.dispose();
      } catch (SssException e) {
        // Ignore Exception.
        logger.error("Error in dispose JobEngine.", e);
      }
    }
  }

  /**
   * .
   * NOTE: this is used in only this module.
   */
  IOManager getIOManager() {
    return ioManager;
  }

  /**
   * Predicates to use stream protocol or not.
   */
  public boolean isStreamProtocol() {
    return ioManager.isStreamProtocol();
  }

  /**
   * .
   * NOTE: this is used in only this module.
   */
  synchronized SentinelThreadPool getIOTPWrite() {
    if (iotp_w == null) {
      this.iotp_w = IOThread.createPool(serverConf.ioConf, serverConf.output_threads_max,
          serverConf.outputtp_workqueue_limit, cluster.getStorageNodes(), "IOW", false, true);
    }
    return iotp_w;
  }

  /**
   * .
   * NOTE: this is used in only this module.
   */
  Encodings getEncodings() {
    return encodings;
  }

  /**
   * .
   * NOTE: this is used in only this module.
   */
  SentinelThreadPool getIOTPRead() {
    if (iotp_r == null) {
      this.iotp_r = IOThread.createPool(serverConf.ioConf, serverConf.input_threads_max,
          serverConf.inputtp_workqueue_limit, cluster.getStorageNodes(), "IOR", false, true);
    }
    return iotp_r;
  }

  /**
   * Register shutdown hook to this process.
   */
  private static synchronized void registerShutdownHook(SssClient client) {
    clientList.add(new WeakReference<SssClient>(client));
    if (shutdownHook == null) {
      shutdownHook = new Thread(new ShutdownHook());
      Runtime.getRuntime().addShutdownHook(shutdownHook);
    }
  }
  private static Thread shutdownHook = null;
  private static List<WeakReference<SssClient>> clientList = new ArrayList<WeakReference<SssClient>>();
  /**
   * Body of shutdown hook.
   */
  private static class ShutdownHook implements Runnable {
    @Override
    public void run() {
      synchronized (SssClient.class) {
        for (WeakReference<SssClient> ref: clientList) {
          SssClient client = ref.get();
          if (client != null) {
            client.cancelAllJobs();
            client.disposeAllJobEngines();
          }
        }
      }
    }
  }
}
