/*
    Copyright 2010 Northbranchlogic, Inc.

    Licensed under the Apache License, Version 2.0 (the "License");
    you may not use this file except in compliance with the License.
    You may obtain a copy of the License at

        http://www.apache.org/licenses/LICENSE-2.0

    Unless required by applicable law or agreed to in writing, software
    distributed under the License is distributed on an "AS IS" BASIS,
    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    See the License for the specific language governing permissions and
    limitations under the License.
 */

package com.norbl.runjrun;

import com.norbl.util.*;
import com.norbl.util.ec2.*;
import com.norbl.util.s3.*;
import com.norbl.util.sqs.*;
import com.norbl.util.sdb.*;
import com.xerox.amazonws.ec2.*;
import com.xerox.amazonws.sdb.SDBException;
import java.util.*;
import java.io.*;
import gnu.trove.*;
import org.jets3t.service.S3ServiceException;

/** <tt>Client</tt> contains methods for
 *  <ul>
 *  <li>Uploading jars to S3</li>
 *  <li>Launching and terminating EC2 instances.</li>
 *  <li>Submiting tasks for excecution and retrieving 'cooked' tasks.</li>
 *  <li>Clearing task and compute node information in SDB.</li>
 *  </ul>
 *  These are are all the operations needed to use <i>RunJRun</i>.
 *  You may want to use {@link Monitor} to watch the status of tasks
 *  and the compute nodes (<tt>Monitor</tt> also supports some management
 *  operations on tasks and nodes).
 *
 *
 * @author Barnet Wagman
 */
public class Client extends NodeTaskOps {

    private String awsAccessID,awsSecretKey;

    private Ec2Ops eops;
    private S3Ops sops;
    private SdbOps dops;
    private SqsOps qops;

    private String bucketName;
    private String outputS3KeyPrefix;

    private long maxS3PutWaitMillis;

    private HashMap<String,Integer> indexHt;


        /** Creates a <tt>Client</tt> object using default values for
         *  all parameters.  (See the other constructor for the full list of
         *  parameters; default values are defined in {@link Constants}.
         *
         * @param awsAccessID
         * @param awsSecretKey
         * @param bucketName the S3 bucket that will be used to hold jars, raw
         *  task objects, cooked task objects, and serialized task exceptions.
         *  If this bucket does not exist, it will be created. <i>Note</i>
         *  that S3 bucket names must be unique across all S3 users.  If you
         *  specify a bucketname that already exists, an exception will be
         *  thrown.
         */
    public Client(String awsAccessID,String awsSecretKey,
                  String bucketName) {
        this(awsAccessID,
             awsSecretKey,
             Constants.TASKQUEUE_NAME_DEFAULT,
             bucketName,
             Constants.INPUT_S3KEY_PREFIX_DEFAULT,
             Constants.OUTPUT_S3KEY_PREFIX_DEFAULT,
             Constants.S3_RETRY_INTERVAL_MILLIS_DEFAULT,
             Constants.S3_MAX_WAIT_MILLIS_DEFAULT,
             Constants.SDB_RETRY_INTERVL_MILLIS_DEFAULT,
             Constants.SDB_MAX_WAIT_MILLIS_DEFAULT,
             Constants.PULSE_INTER_MILLIS_DEFAULT,
             Constants.MAX_S3_PUT_WAIT_MILLIS_DEFAULT,
             true);
    }

        /** Creates a client object.
         *
         * @param awsAccessID
         * @param awsSecretKey
         * @param taskQueueName the name of the SQS queue that will hold
         *  task description messages.
         * @param bucketName
         * @param inputS3KeyPrefix the prefix of raw task objects
         *  in S3.
         * @param outputS3KeyPrefix  the prefix of cooked task objects 
         *  in S3.         
         * @param s3RetryIntervalMillis the time interval (in milliseconds)
         *  between retries of failed S3 operations.
         * @param s3MaxWaitMillis the max total time (in milliseconds) which
         * may elapse while retries of failed S3 operations are attempted.
         * If this time is exceeded without a successful operation,
         * an exception is thrown.
         * @param sdbRetryIntervalMillis  the time interval (in milliseconds)
         *  between retries of failed SDB operations.
         * @param sdbMaxWaitMillis the max total time (in milliseconds) which
         * may elapse while retries of failed SDB operations are attempted.
         * If this time is exceeded without a successful operation,
         * an exception is thrown.
         * @param runnerNodePulseIntervalMillis A {@link RunnerNode} periodically
         *  updates a 'pulse' field in it's SDB record. This variable defines
         *  the period (in milliseconds).
         * @param maxS3PutWaitMillis When an object is written to S3, the
         *  'write' method blocks until the object is actually available in S3.
         *  This variable specifies the maximum time (in millis) to wait.  If
         *  an object is not available within this amount of time, an exception
         * is thrown.
         * @param createBucket if true and the specified bucket does not
         *  already exist, it will be created in your default region.
         *   <i>Note</i> that S3 bucketnames must be unique across all S3 users.
         *  If you specify a bucketname that already exists (and someone else
         *  owns), an exception will be thrown.
         */
    public Client(String awsAccessID,
                  String awsSecretKey,
                  String taskQueueName,
                  String bucketName,
                  String inputS3KeyPrefix,
                  String outputS3KeyPrefix,
                  long s3RetryIntervalMillis,
                  long s3MaxWaitMillis,
                  long sdbRetryIntervalMillis,
                  long sdbMaxWaitMillis,
                  long runnerNodePulseIntervalMillis,
                  long maxS3PutWaitMillis,
                  boolean createBucket) {
        super();
        try {

            if ( inputS3KeyPrefix.equals(outputS3KeyPrefix) )
                throw new RunJRunException("The s3 input and output " +
                    "key prefixes are both " + outputS3KeyPrefix +
                    "; they must be different.");

            System.setProperty("org.apache.commons.logging.Log",
                               "com.norbl.util.s3.NilLog");
            
            this.awsAccessID = awsAccessID;
            this.awsSecretKey = awsSecretKey;
            this.bucketName = bucketName;
            this.inputS3KeyPrefix = inputS3KeyPrefix;
            this.outputS3KeyPrefix = outputS3KeyPrefix;
            this.maxS3PutWaitMillis = maxS3PutWaitMillis;

            eops = new Ec2Ops(awsAccessID, awsSecretKey);
            sops = new S3Ops(awsAccessID, awsSecretKey,
                             s3RetryIntervalMillis,s3MaxWaitMillis);

            dops = new SdbOps(awsAccessID, awsSecretKey,
                              sdbRetryIntervalMillis,sdbMaxWaitMillis);

            qops = new SqsOps(awsAccessID, awsSecretKey, taskQueueName);

            nodeStateTable = 
                new NodeStateTable(dops,eops,runnerNodePulseIntervalMillis);

            taskStateTable = new TaskStateTable(dops);

            super.init(eops, sops, dops, qops,
                        nodeStateTable, taskStateTable,
                        bucketName,inputS3KeyPrefix);

            taskStateTable.setOps(this);

            indexHt = new HashMap<String,Integer>();

            if ( !sops.bucketExists(bucketName) ) {
                if ( createBucket ) {
                    try {
                        sops.createBucket(bucketName);
                    }
                    catch(S3ServiceException sx) {
                        throw new RunJRunException("Could not create bucket " +
                                bucketName + "\n" + sx.getMessage());
                    }
                }
                else throw new RunJRunException("Bucket " + bucketName +
                            " does not exist.");
            }
        }
        catch(Throwable sx) { throw new RunJRunException(sx); }
                
    }

        /** Closes the jets3t S3 service objects used to access S3.
         *  Using this method can free up resources used in http
         *  communications.  It not usually necessary to do
         *  use this method.
         */
    public void shutdown() {
        sops.shutdown();
    }

        // --------- Instance ops ---------------------------

        /** Requests EC2 instances.  This method returns as soon as
         *  the requests have been processed.  It does <i>not</i>
         *  wait for the instances to boot and become available for use.
         *  <p>
         *  Note that and EC2 instance is always created in the AWS region
         *  where the specified Amazon Machine Image (AMI) resides.
         *
         * @param instanceType
         * @param imageID the ID of an Amazon Machine Image (AMI).
         *  Obviously this must be an AMI that has <i>RunJRun</i> installed.
         * @param kernelID set this parameter to <tt>null</tt> to get the
         *  default kernel.
         * @param nInstances
         * @param zone an AWS region contains a number of "availability zones".
         *  Network communications within a zone <i>may</i> be faster than
         *  between zones.  This parameter lets you specify a zone.
         *  <b><i>Note</i></b> that specifying a zone may substantially
         *  increase the time it takes to get instances. <i>You may set this
         *  parameter to</i> <tt>null</tt>.  In that, Amazon will choose
         *  zones for you.  You are likely to get instances in more than one
         *  zone.  Having instances in multiple zones does not effect
         *  <i>RunJRun</i> performance (unless your tasks directly communicate
         *  with each other).
         *
         * @return IDs that uniquely identify your EC2 instances.
         *
         * @throws Ec2OpException
         */
    public List<String> requestInstances(InstanceType instanceType,
                                            String imageID,
                                            String kernelID,
                                            int nInstances,
                                            String zone,
                                            NodeParams params)
        throws Ec2OpException {
        try {
            return( Ec2Ops.getInstanceIDs(
                eops.requestInstances(instanceType,
                                          imageID,
                                          kernelID,
                                          nInstances,
                                          zone,
                                          params.toMessageString() ) ) );
        }
        catch(EC2Exception x) { throw new Ec2OpException(x); }
   }

        /** Requests EC2 instances.  This method returns as soon as
         *  the requests have been processed.  It does <i>not</i>
         *  wait for the instances to boot and become available for use.
         *  <p>
         *  Note that and EC2 instance is always created in the AWS region
         *  where the specified Amazon Machine Image (AMI) resides.<p>
         *
         *  This method sets the availability zone to <tt>null</tt>, so
         *  your instances may not all be in the same zone. Having instances in multiple zones does not effect
         *  <i>RunJRun</i> performance (unless your tasks directly communicate
         *  with each other).
         * 
         * @param instanceType        
         * @param imageID the ID of an Amazon Machine Image (AMI).
         *  Obviously this must be an AMI that has <i>RunJRun</i> installed.        
         * @param nInstances
         * @return IDs that uniquely identify your EC2 instances.
         * @throws Ec2OpException
         */
   public List<String> requestInstances(InstanceType instanceType,
                                           String imageID,
                                           int nInstances)
       throws Ec2OpException {

       NodeParams nodeParams = NodeParams.createDefault(awsAccessID,
                                                        awsSecretKey,
                                                        bucketName,
                                                chooseMaxHeapGb(instanceType));
       return( requestInstances(instanceType,
                            imageID,
                            null, // Constants.KERNEL_ID_DEFAULT,
                            nInstances,
                            null, // zone
                            nodeParams) );
   }

       /** Requests instances and monitor the request until the
        *  full number requested are running.<p>
        *
        *  While the number of running instances is fewer than requested, this
        *  method monitors the states of the requests and the
        *  instances.  If an instance terminates or if a request
        *  fails, another instance is requested.  Once the number
        *  of running instances reaches the specified number,
        *  the monitoring is terminated.<p>
        *
        *  This method launches threads to perform the monitoring and
        *  then returns.  Once the requested number of instances are
        *  running, the monitoring threads are stopped.
        *
        * @param instanceType
        * @param imageID the ID of an Amazon Machine Image (AMI).
         *  Obviously this must be an AMI that has <i>RunJRun</i> installed.
        * @param nInstances        
        * @return IDs that uniquely identify your EC2 instances.
        * @throws Ec2OpException
        */
   public InstanceMonitor requestInstancesMonitored(InstanceType instanceType,
                                            String imageID,
                                            int nInstances)
        throws Ec2OpException {
        try {
            NodeParams nodeParams = NodeParams.createDefault(awsAccessID,
                                                             awsSecretKey,
                                                             bucketName,
                                                     chooseMaxHeapGb(instanceType));
           
            InstanceSpec spec =
                new InstanceSpec(instanceType,imageID,nInstances,nodeParams);

            
            List<ReservationDescription.Instance> rs =
                            spec.requestInstances(eops, nInstances);
            InstanceMonitor m = new InstanceMonitor(eops,
                                    Constants.INSTANCE_MONITOR_INTERVAL_MILLIS,
                                    rs);
            m.setInstanceSpec(spec);

            (new Thread(m)).start();

            (new Thread(
                new InstanceCountWatcher(m,nInstances,
                        Constants.INSTANCE_MONITOR_INTERVAL_MILLIS))).start();

            return(m);
        }
        catch(EC2Exception ex) { throw new Ec2OpException(ex); }
   }

   private class InstanceCountWatcher implements Runnable {

       InstanceMonitor m;
       int nDesired;
       long interval;

       InstanceCountWatcher(InstanceMonitor m, int nDesired,
                                long interval) {
           this.m = m;
           this.nDesired = nDesired;
           this.interval = interval;
       }

       public void run() {
           try {
               while ( m.getNRunning() < nDesired ) {
                   Time.nap(interval);
               }
           }
           catch(Throwable x) {
               System.out.println(StringUtil.throwableToString(x));
           }
           finally {
               m.shutdown();
           }
       }
   }


        /** Terminates the specified instances.
         *
         * @param instanceIDs
         */
   public void terminateInstances(List<String> instanceIDs) {
        try {
            if ( instanceIDs.size() < 1 ) return;
            eops.terminateInstances(instanceIDs);            
        }
        catch(Throwable xxx) {
            if ( xxx instanceof SDBException )
                throw new SdbAccessException(xxx);
            else if ( xxx instanceof EC2Exception )
                throw new Ec2OpException(xxx);
            else throw new RunJRunException(xxx);
        }
   }

       /** Terminates the specified instance.
        *
        * @param instanceID
        */
   public void terminateInstance(String instanceID) {
        try {
            eops.terminateInstance(instanceID);
        }
        catch(EC2Exception ex) { throw new Ec2OpException(ex); }
   }

        /** Gets the instance IDs of all running ec2 instances that are owned
         *  by the user (per <tt>awsAccessID</tt>) and that are running
         *  {@link RunnerNode}.
         *
         * @return instance IDs
         */
   public List<String> getNodeInstanceIDs() {

       List<String> iids = new ArrayList<String>();
       
       NEXT: for ( NodeState s : nodeStateTable.currentStates ) {
           if ( !s.isOnEc2Instance() ) continue NEXT;
           if ( s.instanceState.equals(NodeState.InstanceState.running) ) {
               iids.add(s.instanceID);
           }
       }

       return(iids);
   }  

       /** If there are no tasks waiting (i.e. with status 'waiting', 'orphan'
        *  or 'missingRunnable'), this methods terminates
        *  any EC2 instances that are idle. If the task queue is
        *  not empty, this method does nothing.
        */
   public void terminatedUnneededInstances() {
       try { 
           while ( nodeStateTable.update() ) {
               Time.nap(100L);
           }

           taskStateTable.update(nodeStateTable.getNodeStates(), sops, qops);

           int nWaiting = 0;
           for ( TaskState s : taskStateTable.currentStates ) {
               if ( TaskState.Status.waiting.equals(s.status) ||
                    TaskState.Status.orphan.equals(s.status) ||
                    TaskState.Status.missingRunnable.equals(s.status) ) {
                   ++nWaiting;
               }
           }

           if ( nWaiting > 0 ) return;

           // CONTINUE HERE
       }
       catch(SDBException sx) { throw new SdbAccessException(sx); }
       catch(EC2Exception cx) { throw new Ec2OpException(cx); }

   }

        // ---------- Upload jars ---------------------------

        /** Uploads the jar files in the specified directory to S3.
         *  Only jar that are not already in S3 (with the specified prefix)
         *  or which have changed are uploaded.
         *
         * @param jarBucketName the bucket that will received the jars.
         * @param jarS3KeyPrefix the S3 key for each jar its filename
         *  prefixed with this string.
         * @param jarDir the jar source directory.  All jars in this directory
         *  will be uploaded, unless an identical object already exists in S3.
         */
    public void uploadJars(String jarBucketName, String jarS3KeyPrefix,
                           File jarDir) {        
       sops.uploadJars(jarBucketName, jarS3KeyPrefix, jarDir);
    }

        /** Uploads the jar files in the specified directory to S3.
         *  Only jar that are not already in S3 (with the specified prefix)
         *  or which have changed are uploaded.
         *
         * @param jarBucketName
         * @param jarDir
         */
    public void uploadJars(String jarBucketName, File jarDir) {
        uploadJars(jarBucketName,Constants.JAR_S3_KEY_PREFIX_DEFAULT,jarDir);
    }

        // -------- Submit methods -----------------------

        /** Submits a list of tasks for execution on <i>RunJRun</i>
         *  compute nodes.
         *
         * @param tasks these objects must be <tt>Serializable</tt> as well
         *  as <tt>Runnable</tt>.
         * @param s3ObjectNamePrefix <tt>submit()</tt> generates a random
         *  name for your task object which is guaranteed to be unique.
         *  You can specify a prefix for this name, or set this param
         *  to <tt>null</tt>.  The S3 key for each task object is
         *  &lt;default raw object prefix&gt;/[&lt;s3ObjectNamePrefix&gt;]/
         *  &lt;unique object name&gt;.
         * @return descriptions of each task.
         * @throws NotSerializableException
         */
    public List<TaskDescription> submit(List<Runnable> tasks,
                                        String s3ObjectNamePrefix)
        throws NotSerializableException {
        try {
            List<S3Bko> bkos = new ArrayList<S3Bko>();
            List<TaskDescription> des = new ArrayList<TaskDescription>();


            for ( Runnable t : tasks ) {

                long tm = System.currentTimeMillis();

                String inputKey = buildDefaultS3Key(t,
                                                    inputS3KeyPrefix,
                                                    tm,
                                                    s3ObjectNamePrefix);
                String outputKey = buildDefaultS3Key(t,
                                                     outputS3KeyPrefix,
                                                     tm,
                                                     s3ObjectNamePrefix);

                des.add(new TaskDescription(bucketName, inputKey, outputKey));
                bkos.add(new S3Bko(bucketName,inputKey,(Serializable) t));
            }

                // Write the tasks to S3
            sops.add(bkos,maxS3PutWaitMillis);

                // Put the task des into the queue and record the task
                // in the task table
            for ( TaskDescription d : des ) {
                qops.queue.sendMessage(d.toMessageString());
                taskStateTable.recordNewTask(d,System.currentTimeMillis());
            }

            return(des);
        }
        catch(Throwable x) {
            if ( x instanceof NotSerializableException )
                throw ((NotSerializableException) x);
            else throw new RunJRunException(x);
        }
    }

        /** Submits a list of tasks for execution on <i>RunJRun</i>
         *  compute nodes.
         *
         * @param tasks these objects must be <tt>Serializable</tt> as well
         *  as <tt>Runnable</tt>.
         * @return descriptions of each task.
         * @throws NotSerializableException
         */
    public List<TaskDescription> submit(List<Runnable> tasks)
        throws NotSerializableException {
        return(submit(tasks,null));
    }

        /** Submits a task for execution on a <i>RunJRun</i>
         *  computed node.
         *
         * @param task this object must be <tt>Serializable</tt> as well
         *  as <tt>Runnable</tt>.
         * @param s3ObjectNamePrefix <tt>submit()</tt> generates a random
         *  name for your task object which is guaranteed to be unique.
         *  You can specify a prefix for this name, or set this param
         *  to <tt>null</tt>.  The S3 key for a task object is
         *  &lt;default raw object prefix&gt;/[&lt;s3ObjectNamePrefix&gt;]/
         *  &lt;unique object name&gt;.
         * @return description of the task.
         * @throws NotSerializableException
         */
    public List<TaskDescription> submit(Runnable task,
                                        String s3ObjectNamePrefix)
        throws NotSerializableException {
        List<Runnable> tasks = new ArrayList<Runnable>();
        tasks.add(task);
        return(submit(task,s3ObjectNamePrefix));
    }


        /** Submits a task for execution on a <i>RunJRun</i>
         *  compute node.
         *
         * @param task this object must be <tt>Serializable</tt> as well
         *  as <tt>Runnable</tt>.
         *
         * @return description of the task.
         * @throws NotSerializableException
         */
    public List<TaskDescription> submit(Runnable task)
        throws NotSerializableException {
        return(submit(task,null));
    }

       // ------ Retrieve methods -------------------------

        /** Retrieves the 'cooked' version of a task object (from S3).
         *  This method blocks until the cooked task is available.
         *  <b>Warning</b>: this method can block forever.
         *
         * @param des
         * @return cooked task.
         * @throws S3AccessException
         */   
    public Runnable getCooked(TaskDescription des)
        throws S3AccessException {
        try {
            while ( !sops.objectExists(des.outputBucketName,
                                       des.outputS3Key,
                                       true) ) {
                Time.nap(Constants.GET_COOKED_TASK_NAP_MILLIS);
            }

            return( (Runnable)
                    sops.getObject(des.outputBucketName, des.outputS3Key,
                                   true) );
        }
        catch(Exception sx) {
            throw new S3AccessException(sx);
        }
    }

        /** Retrieves the 'cooked' versions of task objects (from S3).
         *  This method blocks until all the cooked tasks are available.
         *  <b>Warning</b>: this method can block forever.
         *
         * @param des
         * @return 'cooked' task objects.
         * @throws S3AccessException
         */
    public List<Runnable> getCooked(List<TaskDescription> des)
        throws S3AccessException {
        try {
            List<TaskDescription> needed = new ArrayList<TaskDescription>();
            for ( TaskDescription d : des ) needed.add(d);

            List<Runnable> cooked = new ArrayList<Runnable>();

            while ( needed.size() > 0 ) {
                TIntArrayList idxsMissing = new TIntArrayList();
                for ( int i = 0; i < needed.size(); i++ ) {
                    Runnable r = get(needed.get(i));
                    if ( r != null ) cooked.add(r);
                    else idxsMissing.add(i);
                }
                needed = getForIndexes(needed,idxsMissing.toNativeArray());
                if ( needed.size() > 0 ) {
                    Time.nap(Constants.GET_COOKED_TASK_NAP_MILLIS);
                }
            }
            return(cooked);
        }
        catch(Exception x) { throw new S3AccessException(x); }
    }

        /** Deletes the 'cooked' versions of task objects from S3.
         * 
         * @param des
         */
    public void deleteCooked(List<TaskDescription> des) {
        try {
            for ( TaskDescription d : des ) {
                sops.deleteObject(bucketName,
                                  d.outputS3Key,
                                  true);
            }
        }
        catch(Throwable x) { throw new RuntimeException(x); }
    }


         // ------------------------------------------

         /** Clears the node and task tables.  This method also
          *  clears the SQS message queue of tasks and deletes any
          *  raw tasks in S3.  Cooked tasks are not effected.
          */
    public void cleanup() {
        try {
            clearAllNodes();
            clearAllTasksEverywhere();
        }
        catch(Exception xxx) { throw new RunJRunException(xxx); }
    }

    public Ec2Ops getEops() { return(eops); }

        // ----------- Static utils -----------------

        /** Get the max heap for each instance types, in gigabytes.
         *  The heap is assigned all available ram.
         *
         * @param instanceType
         * @return max heap size in GB
         */
    public static int chooseMaxHeapGb(InstanceType instanceType) {

        if ( InstanceType.DEFAULT.equals(instanceType) )
            return(1); // m1.small
        if ( InstanceType.LARGE.equals(instanceType) )
            return(7); // m1.large
        if ( InstanceType.XLARGE.equals(instanceType) ) 
            return(15);  // m1.xlarge

        if ( InstanceType.MEDIUM_HCPU.equals(instanceType) )
            return(1); // c1.medium
        if ( InstanceType.XLARGE_HCPU.equals(instanceType) )
            return(7); // c1.xlarge

        if ( InstanceType.XLARGE_HMEM.equals(instanceType) )
            return(17); // m2.xlarge
        if ( InstanceType.XLARGE_DOUBLE_HMEM.equals(instanceType) )
            return(34); // m2.2xlarge
        if ( InstanceType.XLARGE_QUAD_HMEM.equals(instanceType) )
            return(68); // m2.4xlarge

        throw new RunJRunException("Undefined instanceType=" + instanceType);
    }
    
         // ------------------------------------------

    private String buildDefaultS3Key(Object r, String prefix, long curTime,
                                     String objectNamePrefix) {

        String pre = prefix  + "/";
        if ( objectNamePrefix != null )
            pre += objectNamePrefix + "-";
        pre += r.getClass().getSimpleName() + "-" +
                Time.toDateTimeMilliKeyString(curTime) + "-";

        return(pre +
               Integer.toString(StringUtil.getNextIndex(indexHt,pre)) +
               "." + S3Ops.GZSER);
    }

    private List<TaskDescription> getForIndexes(List<TaskDescription> src,
                                             int[] idxs) {

        List<TaskDescription> keep = new ArrayList<TaskDescription>();
        for ( int i : idxs ) {
            keep.add(src.get(i));
        }
        return(keep);
    }

    private Runnable get(TaskDescription des) throws Exception {

        if ( sops.objectExists(des.outputBucketName,
                               des.outputS3Key,
                               true) ) {
            return( (Runnable)
                    sops.getObject(des.outputBucketName, des.outputS3Key,
                                   true) );
        }
        else return(null);
    }
}
