#!/usr/bin/env python
"""
Generate scripts that will be passed as user data to EC2 instances,
and the local scripts that will start the instances themselves.
"""
import ConfigParser
import datetime
import logging
import optparse
import os
import signal
from subprocess import Popen, PIPE, STDOUT
import sys
import time
import traceback
#
import S3

g_settings = None
g_params = None

# shorthand
_isonow = lambda : datetime.datetime.now().isoformat()

# string with date of run, down to microsecond
g_run_date = _isonow()

g_log = logging.getLogger("run_ec2")
_h = logging.StreamHandler()
_h.setFormatter(logging.Formatter("%(levelname)-6s %(name)s.%(funcName)-20s | %(message)s"))
_h.setLevel(logging.NOTSET)
g_log.addHandler(_h)

_opj = os.path.join

# These should match [headings] in the config file
SERVER = 'server'
MASTER = 'master'
CONSUMER = 'consumer'
WORKER = 'worker'

# For verbosity flags
VB_SFX = '_vb'

# Use for data file shebang
SHEBANG = "#!/bin/sh"

# Special S3 key for doneness
DONE_KEY = '__done__'

def signal_stop(signo, frame):
    g_log.warn("abort on signal = {sig}".format(sig=signo))
    shutdown(code=1)

class Parameters:
    RES_ID = 'resid'
    SRV_IP = 'serverip'
    NW = 'nw' # num workers
    NN = 'nn' # num nodes
    NODE = 'node'
    DFILES = 'datafile'

    def __init__(self, values={}):
        self.values = { self.RES_ID: [ ],
                        self.SRV_IP: "",
                        self.NODE : "U",
                        self.NW : 0,
                        self.NN : 0,
                        self.DFILES : [ ] }
        self.values.update(values)

    def _setget(self, value, key):
        if value is None:
            return self.values[key]
        else:
            if isinstance(self.values[key], list):
                if isinstance(value, list):
                    self.values[key] = value
                else:
                    self.values[key].append(value)
            else:
                self.values[key] = value
            return self # for chaining

    def workers_per_node(self, node=0):
        workers, nodes = self.values[self.NW], self.values[self.NN]
        wpn = workers // nodes
        if node < workers - wpn * nodes:
            # this node gets an 'extra' worker
            wpn += 1
        return wpn

    def res_ids(self, value=None):
        return self._setget(value, self.RES_ID)

    def server_ip(self, value=None):
        return self._setget(value, self.SRV_IP)

    def cur_node(self, value=None):
        return self._setget(value, self.NODE)
    
    def last_run(self):
        return "{0:d}".format(self.values['runs'] - 1)

    def vb_flags(self, component):
        """Verbose flags for component, one of MASTER, CONSUMER, or WORKER."""
        key = component + VB_SFX
        return ' -v ' * self.values[key]

    def data_files(self, value=None):
        return self._setget(value, self.DFILES)
    
    def __getattr__(self, key):
        if '_' in key:
            return self.__dict__[key]
        else:
            return self.values[key]

def bq(cmd, *args):
    """Run a command and return stdout as a string, as in shell backquotes."""
    return Popen([cmd] + list(args), stdout=PIPE, stderr=STDOUT).communicate()[0]

def get_server_ips():
    """Return a dictinary of all server ip addrs, keyed by reservation ID."""
    if g_params.norun:
        return { 'server-fake_reservation' : '0.0.0.0' }
    result = { }
    cur_res = None
    srv_ami = g_settings.get(SERVER, 'ami')
    output = bq('ec2din').split('\n')
    for line in output:
        fields = tuple(line.split())
        if not fields: continue
        #g_log.debug("fields = {fld}".format(fld=','.join(fields)))
        if fields[0] == 'RESERVATION':
            cur_res = fields[1]
        elif cur_res and ((fields[0] == 'INSTANCE') and
                          (fields[2] == srv_ami) and
                          (fields[5] == 'running')):
                #result[cur_res] = fields[-2]  XXX: this is busted
                result[cur_res] = fields[-4]
        else:
            pass
            # XXX: this is choking on input like this due out of range index[5]:
            # ('TAG', 'instance', 'i-8fff4be3', 'Name', 'Base')
            #g_log.debug("no match. cur_res={res}, INSTANCE?={fields[0]}, "
            #            "{srv_ami}?={fields[2]}, running?={fields[5]}".format(
            #                res=cur_res, srv_ami=srv_ami, fields=fields))
    return result

def start_component(section, vb=""):
    """Start EC2 instances for a given component."""
    g_log.info("start {comp}".format(comp=section))
    ami, type_ = g_settings.get(section,'ami'),  g_settings.get(section, 'type')
    data_file = open(_opj('.',"{comp}-{ts:f}-data.sh".format(
        comp=section, ts=time.time())), 'w')
    write_user_data(data_file, section, vb=vb)
    data_file.close()
    g_params.data_files(data_file.name)
    action = ('ec2run', ami, '-t', type_, '-f', data_file.name)
    if g_params.norun:
        print("RUN: {action}".format(action=action))
        output = ["RESERVATION {comp}-fake_reservation".format(comp=section)]
    else:
        output = bq(*action).split('\n')
    rid = None
    for line in output:
        if line.startswith('RESERVATION'):
            rid = line.split()[1]
            break
    if rid is None:
        raise RuntimeError("Error starting {comp} instance: {msg}".format(
            comp=section, msg=output))
    g_log.info("run {comp} reservation = {rid}".format(comp=section, rid=rid))
    # os.unlink(data_file.name)
    return rid

NODE_HDR = "#!/bin/bash"
NODE_INIT = """
export SWBASE="/sw/mtags-amqp/src"
cd $SWBASE
hg pull
hg update
"""
NODE_LOOP = """# Main loop
for (( i=0 ; i < {runs:d} ; ++i )); do
export DPIPE_RUN=$i
"""

PROG = "./dpipe"
def write_user_data(ofile, comp, vb=""):
    """Write user data to 'ofile' for component type 'comp'."""
    ofile.write(NODE_HDR)
    pre = NODE_INIT
    cmd = NODE_LOOP.format(runs=g_params.runs)
    post = "done # end of loop\n"
    consumer_csv_file = "consumer-out.csv"
    if comp == SERVER:
        vmstat_file = '/tmp/server-vmstat.out'
        filenr_file = '/tmp/server-filenr.csv'
        pre += "/usr/bin/vmstat 5 >{0} &\n".format(vmstat_file)
        pre += "vmjob=$!\n"
        cmd = """printf "alloc free_alloc max\n" >{0}
while [ 1 ]
do
  cat /proc/sys/fs/file-nr >>{0}
  sleep 5
done &
""".format(filenr_file)
        cmd += "fsjob=$!\n"
        # wait for consumer to finish
        cmd += s3_wait_final_script(consumer_csv_file)
        # kill probes
        cmd += "kill -9 $fsjob\nkill -9 $vmjob\n"
        # save probe data to S3
        post = s3_save_script(infile=vmstat_file, name='server-vmstat.out', run="")
        post += s3_save_script(infile=filenr_file, name='server-filenr.csv', run="")
        # save dummy file to S3 indicating all-done
        sdata = "datetime.datetime.now().isoformat()"
        post += s3_save_script(name=DONE_KEY, data=sdata, imports=("datetime",), run="")
    elif comp == CONSUMER:
        consumer_ofile = '/tmp/consumer-run$DPIPE_RUN.out'
        g_params.values['ofile'] = consumer_ofile
        cmd += "{prog} c --host={server} --stages={ns:d} --tasks={nt:d} " \
              "--workers={nw:d} --procs={np:d} --file={ofile} {vb_flags} " \
              "> /tmp/consumer-run$DPIPE_RUN.log 2>&1\n".format(
            prog=PROG, server=g_params.server_ip(), ns=g_params.ns,
            nt=g_params.nt, nw=g_params.nw, np=g_params.np,
            ofile=g_params.ofile, vb_flags=vb)
        # statements to dump params into comments at bottom of output file
        cmd += "cat >>{ofile} <<EOF\n".format(ofile=g_params.ofile)
        for key, value in g_params.values.iteritems():
            cmd += "# {key} = {value}\n".format(key=key, value=str(value))
        cmd += "EOF\n"
        # save output file to s3
        cmd += s3_save_script(infile=g_params.ofile, name=consumer_csv_file)
    elif comp == MASTER:
        cmd += "{prog} p --host={server} --tasks={nt:d} " \
              "--workers={nw:d} --procs={np:d} --size={sz:d} {vb_flags} " \
              "> /tmp/producer-run$DPIPE_RUN.log 2>&1\n".format(
            prog=PROG, server=g_params.server_ip(), nt=g_params.nt,
            nw=g_params.nw, np=g_params.np, sz=g_params.sz, vb_flags=vb)
        cmd += "sleep 10\n" # why not
    elif comp == WORKER:
        node = g_params.cur_node()
        wpn = g_params.workers_per_node(node)
        g_log.debug("wpn={0} workers={1:d} nodes={2:d}".
                    format(wpn, g_params.nw, g_params.nn))
        for worker in xrange(wpn):
            is_last = worker == wpn - 1
            cmd += "export DPIPE_NODE={n:d}-{w:d}\n".format(n=node, w=worker)
            cmd += "{prog} w --host={server} --stages={ns:d} " \
                  "--procs={np:d} --size={sz:d} --delay={slp:f} --sigma={sig:f} {tx} {vb_flags} "\
                  "> /tmp/worker{w:d}-run$DPIPE_RUN.log 2>&1 {bg}\n".format(
                prog=PROG, server=g_params.server_ip(), ns=g_params.ns,
                np=g_params.np, sz=g_params.sz, slp=g_params.slp, sig=g_params.sig,
                tx=('', '--tx')[g_params.tx], bg=('&','')[is_last], w=worker,
                vb_flags=vb)
    startup_script = "{pre}\n{cmd}\n{post}\n".format(pre=pre, cmd=cmd, post=post)
    if g_params.norun:
        dashes = ("=" * 60) + '\n'
        print("{d}{comp}\n{d}{script}{d}end {comp}\n".format(
            comp=comp, script=startup_script, d=dashes))
    else:
        ofile.write(startup_script)

def s3_path(run=None):
    """Return the S3 key prefix for this run. This prefix looks like
    a path
    If run is None it will be set to the env. var $DPIPE_RUN
    If run is "" it will be omitted from the key entirely, which means the
      path will be at pseudo-directory level above the run directories.
    Otherwise, the given run value is placed in the path.
    """
    if run is None:
        run = "$DPIPE_RUN/"
    elif len(run) > 0:
        run += "/"
    if g_params.label:
        label_str = g_params.label + '-'
    else:
        label_str = ''
    return "{prefix}n{nn:d}w{nw:d}p{np:d}m{sz:d}d{slp}s{sig}/{dt}/{_run}".format(
        prefix=label_str, dt=g_run_date, _run=run, **g_params.values)

def s3_save_script(name='', infile=None, data=None, imports=None, run=None):
    """Return script to save the data from 'infile' as 'name' in S3,
    in the right spot for this run as given by s3_path().
    """
    path = s3_path(run=run)
    params = {'akey' : g_settings.get('auth', 'access_key'),
              'skey' : g_settings.get('auth', 'secret_key'),
              'bucket' : 'amqp_results',
              'item' : path + name }
    if infile is not None:
        get_data = "data = S3.S3Object(open('{0}','r').read())".format(infile)
    elif data is not None:
        get_data = "data = {0}".format(data)
    else:
        raise ValueError("no data to save: 'infile' and 'data' both empty")
    if imports:
        imports_str = "," + (','.join(imports))
    else:
        imports_str = ""
    raw_script = '\n'.join((
        "python -c \"import S3" + imports_str,
        "conn = S3.AWSAuthConnection('{akey}','{skey}')",
        get_data,
        "conn.put('{bucket}', '{item}', data, {{'x-amz-acl':'public-read',",
        "'Content-Type': 'text/plain' }})\"\n"))
    script = raw_script.format(**params)
    g_log.debug("s3-save script: " + script)
    return script

def s3_wait_final_script(name):
    """Return script to wait for directory (S3 path prefix)
    for final run to have a file matching 'name'.
    The number of the final run is given by (g.params.runs - 1).
    """
    path = s3_path(run=g_params.last_run())
    params = {'akey' : g_settings.get('auth', 'access_key'),
              'skey' : g_settings.get('auth', 'secret_key'),
              'bucket' : 'amqp_results',
              'item' : path + name }
    script = '\n'.join((
        'python -c "import S3, time',
        "conn = S3.AWSAuthConnection('{akey}','{skey}')",
        "while conn.get('{bucket}', '{item}').http_response.status == 404:",
        "    time.sleep(1)")).format(**params)
    script += '\n"\n'
    g_log.debug("s3-wait script: {0}".format(script))
    return script

def stop_instances(res_id_list):
    """Stop instances whose reservation id's match one of those in the
    list 'res_id_list'.
    """
    if g_params.norun or g_params.noterm:
        return
    d = dict.fromkeys(res_id_list, True)
    stopme, instances = False, [ ]
    for fields in (tuple(line.split()) for line in bq('ec2din').split('\n')):
        if not fields:
            continue
        if fields[0] == 'RESERVATION':
            cur_res = fields[1]
            stopme = cur_res in d
        elif fields[0] == 'INSTANCE' and stopme:
            instances.append(fields[1])
    if instances:
        result = bq('ec2-terminate-instances', *instances).strip()
        if not result.endswith('shutting-down'):
            raise RuntimeError("Error stopping instance(s) {inst}: {reason}".
                               format(inst=instances, reason=result))

def startup():
    """Start master, consumer, workers.
    """
    # start server first, get its IP
    rid = start_component('server')
    g_params.res_ids(rid)
    server_ip = None
    g_log.info("wait for server to boot up")
    while server_ip is None:
        g_log.debug("wait 5 seconds for reservation {rid}".format(rid=rid))
        if not g_params.norun:
            time.sleep(5)
        server_ip = get_server_ips().get(rid, None)
    g_log.info("server is up. ip = {0}".format(server_ip))
    g_params.server_ip(server_ip)
    # start master and consumer
    for comp in MASTER, CONSUMER:
        rid = start_component(comp, vb=g_params.vb_flags(comp))
        g_params.res_ids(rid)
    # start workers
    for node in xrange(g_params.nn):
        g_params.cur_node(node)
        rid = start_component(WORKER, vb=g_params.vb_flags(WORKER))
        g_params.res_ids(rid)
    g_log.debug("reservation ids: {0}".format(' '.join(g_params.res_ids())))
    return 0

def wait(sleep_interval=30):
    g_log.info("wait for experiment to end")
    path = s3_path(run="")
    bucket = 'amqp_results'
    key = path + DONE_KEY
    conn = S3.AWSAuthConnection(g_settings.get('auth', 'access_key'),
                                g_settings.get('auth', 'secret_key'))
    if g_params.norun:
        print("wait for key {k} in bucket {b}".format(k=key, b=bucket))
    else:
        done, retries, max_retries = False, 0, 10
        while not done:
            g_log.debug("wait for key {k} in bucket {b}".format(k=key, b=bucket))
            try:
                status = conn.get(bucket, key).http_response.status
                done = status == 200
            except:
                g_log.error("while waiting for experiment to end: {exc}".format(
                    exc=traceback.format_exc()))
                retries += 1
                if retries <= max_retries:
                    g_log.info("retry on error. errors={r} limit={m}".
                               format(r=retries, m=max_retries))
                    time.sleep(5)
                else:
                    g_log.error("retries exhausted, abort. total-errors={r}".
                                format(r=retries))
                    done = True
            if not done: time.sleep(sleep_interval)
    g_log.info("experiment ended")
    
def shutdown(code=0):
    """Shutdown master, consumer, workers.
    """
    instance_ids = g_params.res_ids()
    if instance_ids:
        g_log.info("stop running instances: {ids}".format(
            ids=' '.join(instance_ids)))
        stop_instances(instance_ids)
        g_params.res_ids([])
        g_log.info("done")
        g_log.info("remove data files")
        for dfile in g_params.data_files():
            try:
                os.unlink(dfile)
            except os.error, err:
                g_log.error("failed to remove '{0}'".format(dfile))
    sys.exit(code)
    
def main(cmdline=None):
    global g_settings, g_params
    if cmdline is None:
        cmdline = sys.argv[1:]
    op = optparse.OptionParser(usage="%prog [options] config-file(s)..")
    op.add_option('--runs', dest='runs', default=1, type='int',
                  help="number of experiment runs (default=%default)")
    op.add_option('--nodes', dest='nn', default=-1, type='int',
                  help='num. machines to run on (required)')
    op.add_option('--stages', dest='ns', default=-1, type='int',
                  help='num. pipeline stages (required)')
    op.add_option('--tasks', dest='nt', default=-1, type='int',
                  help='num. tasks per worker-process (required)')
    op.add_option('--workers', dest='nw', default=-1, type='int',
                  help='num. workers (required)')    
    op.add_option('--procs', dest='np', default=-1, type='int',
                  help='num. processes for this worker (required)')
    op.add_option('--delay', dest='slp', default=1.0, type='float',
                  help='seconds to sleep for each item of work (default=%default)')
    op.add_option('--sigma', dest='sig', default=0.0, type='float',
                  help='gauss std dev to randomize sleep times (default=%default)')
    op.add_option('--size', dest='sz', default=1024, type='int',
                  help='message size in bytes (default=%default)')
    op.add_option('--tx', dest='tx', default=False, action='store_true', 
                  help='flag for transactional mode')
    op.add_option('-v', '--verbose', dest='vb', action='count', default=0,
                  help='Increase verbosity for this program')
    op.add_option('-C', dest=CONSUMER +  VB_SFX, action='count', default=0,
                  help='Increase verbosity for consumer nodes')
    op.add_option('-M', dest=MASTER + VB_SFX, action='count', default=0,
                  help='Increase verbosity for master(producer) nodes')
    op.add_option('-W', dest=WORKER + VB_SFX, action='count', default=0,
                  help='Increase verbosity for worker nodes')
    op.add_option('--norun', dest='norun', action='store_true', default=False,
                  help='For debugging, print actions but do not perform them')
    op.add_option('--noterm', dest='noterm', action='store_true', default=False,
                  help='Do not terminate EC2 instances')
    op.add_option('--label', dest='label', action='store', default="",
                  help='Label for run (default=none)')
    options, args = op.parse_args(cmdline)
    if len(args) == 0:
        op.error("At least one configuration file is required")
    if options.nn < 1:
        op.error("--nodes is required, must be > 0")
    if options.ns < 1:
        op.error("--stages is required, must be > 0")    
    if options.nt < 1:
        op.error("--tasks is required, must be > 0")
    if options.nw < 1:
        op.error("--workers is required, must be > 0")
    if options.np < 1:
        op.error("--procs per worker is required, must be > 0")
    if options.nn > options.nw:
        op.error("number of nodes cannot be more than number of workers")
    if options.vb == 0:
        g_log.setLevel(logging.WARN)
    elif options.vb == 1:
        g_log.setLevel(logging.INFO)
    else:
        g_log.setLevel(logging.DEBUG)
    g_params = Parameters(options.__dict__)
    # check workers/node
    if 1. * options.nw / options.nn != g_params.workers_per_node(0):
        g_log.warn("num. workers {w:d} is not an even multiple of num. nodes {n:d}".
                   format(w=options.nw, n=options.nn))
    g_log.debug("args look OK")
    # init signal handler
    for signo in signal.SIGHUP, signal.SIGINT, signal.SIGTERM:
        signal.signal(signo, signal_stop)
    # read static config options from file
    g_log.info("read config from {f}".format(f=' '.join(args)))
    g_settings = ConfigParser.ConfigParser()
    g_settings.read(args)
    # run
    try:
        startup()
        wait()
    except Exception, err:
        msg = traceback.format_exc()
        g_log.error("error:\n{msg}".format(msg=msg))
    finally:
        shutdown()

if __name__ == '__main__':
    main()
