#!/usr/bin/env python

import cPickle
import getpass
import glob
import optparse
import os
import sys
import subprocess
import time

class BadCommandLineError(Exception):
    def __init__(self, msg, retcode):
        self.msg = msg
        self.retcode = retcode

    def __str__(self):
        return self.msg


class JobQueue(object):
    def __init__(self, script_count_dict):
        self._scd = script_count_dict

        self._incomplete = self._scd.keys()
        self._incomplete.sort()
        
        self._nji = 0

    def update_completed(self, completed_jobs_dict):
        for key in completed_jobs_dict:
            if key not in self._incomplete:
                continue

            if completed_jobs_dict[key] >= self._scd[key]:
                self._scd[key] = 0
                if self._incomplete[self._nji] == key:
                    self._nji += 1
                                
                self._incomplete.remove(key)

                if len(self._incomplete) == 0:
                    raise JobQueueComplete

                if self._nji >= len(self._incomplete):
                    self._nji = 0
                    
    def get_next_n_jobs(self, n):
        if n > len(self._incomplete):
            n = len(self._incomplete)
        
        joblist = []
        
        while len(joblist) < n:
            try:
                job = '{0}.{1}x.sh'.format(self._incomplete[self._nji], self._scd[self._incomplete[self._nji]])
            except IndexError:
                print "EXCEPTION", len(self._incomplete), self._nji, len(joblist)
                raise

            joblist.append(job)
            self._nji += 1
            if self._nji >= len(self._incomplete):
                self._nji = 0

        joblist.sort()
        return joblist

            
class JobQueueComplete(Exception):
    pass
        
            
def script_counts(scripts_dir):
    d = {}
    for script in glob.iglob(os.path.join(scripts_dir, '*.sh')):
        tag, counts, sh = os.path.basename(script).rsplit('.', 2)
    
        d[tag] = int(counts[:-1])

    return d

def check_queue():
    username = getpass.getuser()

    cmdargs = 'squeue -a -u {0}'.format(username)

    showq_pipe = subprocess.Popen(cmdargs.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)

    stdout, stderr = showq_pipe.communicate()

    if showq_pipe.returncode != 0:
        sys.stderr.write("Subprocess call failed -- `{0}':".format(cmdargs))
        sys.stderr.write(str(stderr) + '\n')
        sys.stderr.write("Aborting...\n")
        raise OSError

    lines = [line for line in stdout.split('\n') if username in line]

    return len(lines)
    
def update_status(status, output_dir):
    keys = status.keys()
    keys.sort()

    dirlisting = glob.glob(os.path.join(output_dir, '*'))

    for key in keys:        
        status[key] = 0
        for path in dirlisting:
            if key in path:
                status[key] += 1
        


def submit(joblist, scripts_dir):    
    for job in joblist:        
        script = os.path.join(scripts_dir, job)
        cmdargs = 'sbatch -p shared {0}'.format(script)

        sbatch_pipe = subprocess.Popen(cmdargs.split(),
                                     stdout=subprocess.PIPE,
                                     stderr=subprocess.PIPE)

        stdout, stderr = sbatch_pipe.communicate()

        if sbatch_pipe.returncode != 0:
            sys.stderr.write("Subprocess call failed -- `{0}':".format(cmdargs))
            sys.stderr.write(str(stderr) + '\n')
            sys.stderr.write("Continuing...\n")
            break
                                                                        
    print len(joblist), "jobs submitted b/t", joblist[0], 'and', joblist[-1], 'at', time.asctime()
            

def main():
    '''
    slurm_manage_jobs.py [opts] scripts_dir output_dir

    Starts a daemon which monitors the SLURM shared queue and keeps it filled with jobs
    from `scripts_dir'. 
    
    The scripts in `scripts_dir' are expected to be named according to the form:
    
    tag.Nx.sh

    where `tag' is a unique identifier and `N' is the number of successful jobs required.

    '''

    op = optparse.OptionParser(usage=main.__doc__)

    opts, args = op.parse_args()


    if len(args) < 2:
        raise BadCommandLineError("Please supply a path to a directory of MOAB scripts and the directory for output", -1)

    if not os.path.isdir(args[0]):
        raise BadCommandLineError("`{0}' is not a directory".format(args[0]), -2)

    if not os.path.isdir(args[1]):
        raise BadCommandLineError("`{1}' is not a directory".format(args[1]), -2)

    scripts_dir = os.path.abspath(args[0])
    output_dir  = os.path.abspath(args[1])

    status = {}
    for script in glob.iglob(os.path.join(scripts_dir, '*.sh')):
        tag, count, sh = os.path.basename(script).rsplit('.', 2)
            
        status[tag] = 0

    goal = script_counts(scripts_dir)

    njobs_total = sum(goal.values())

    keys = status.keys()
    keys.sort()
    
    max_jobs = 1000

    job_queue = JobQueue(goal)


    while True:
        njobs_running = check_queue()

        open_slots = max_jobs - njobs_running
        
        if open_slots:           
            print "calling update_status", time.asctime()
            update_status(status, output_dir)
            print "update_status call complete at", time.asctime(), "; {0}/{1} jobs complete".format(sum(status.values()), njobs_total)

            statkeys = status.keys()
            statkeys.sort()
            job_queue.update_completed(status)                   


            joblist = job_queue.get_next_n_jobs(open_slots)

            submit(joblist, scripts_dir)


        time.sleep(60)



if __name__ == "__main__":
    try:
        sys.exit(main())
    except BadCommandLineError, e:
        sys.stderr.write(str(e) + "\n")
        sys.stderr.write("See `hpc_manage_jobs2.py -h' for more.\n")
        sys.stderr.flush()
        sys.exit(e.retcode)
