#!/usr/bin/python -S
"""
xmap_lib.py

TODO:

- Lots of error conditions:
  - nonexistent executables, etc.
  - permission denied on executables (and ones executed by ssh!)
  - executables that don't read stdin
  - typo in IP address/host name!

- Torn select() loop appears to have issues with closing (Python 2.5)

- Allow naming of worker processes?  (by machine)

- Two buffering modes:
  - Fill up stdin for all worker processes
  - OR, use the COUNT of pending tasks.  This will make the '+' key more
    responsive.  This is --buffer-size, or call it --task-buffer-size.

- Buffering
  - 1000 items received, 956 processed (= 44 pending)
  - I guess there is another state, you can either be in xmap memory, or you can
    be in the pipe

- Error conditions:
  - specified process that died, like ls
  - nothing on stdin?  It will just count forever
  - Ctrl-C -- needs to be handled in the event loop

- PGI_INPUT and PGI_OUTPUT == named pipes
  pipe:foo
  pipe:bar
  later, could be unix-socket:bar, etc.
  memory:foo  # shared memory

- Add input DATA RATE
  - in on_input, just count the string size
- Also OUTPUT data rate

- Grep out lines with PGI monitoring?  Maybe just print ':'
  do I need a line iterator than?
  - could have --stderr-regexp
  - then display live stats with this info?

- how to handle redirecting output to file?
- or input to file?
  - require cat?  This is the simplest solution, perhaps.
  - test if input is disk file, then spawn cat for them?
  - use stat.IS_REG
  - could start another thread

- Try a shuffle then reduce?

INTERFACE:

xmap: Press ? for help.
3 tasks in 2 seconds (1.5 tasks/s) using 4 processes
  pending {1054: 2}
  finished {1054: 3}
+) Increase max processes  r) add remote machine?

kill processes?  Or decrease?
tail logs?  you can do that in another shell
"""

__author__ = 'Andy Chu'


import collections
import subprocess
import sys
import time

from torn import base
from torn import child_process
from torn import ioloop

import streams  # this package
import util
from util import log


class ParallelPump(base.EventEmitter):
  """Wraps a stream of input values, and emits a stream of output values.

  It starts up to max_procs processes to process the events.

  Inputs:
    input: chunks to go to workers
    terminal: interactive control from user's keyboard

  Outputs:
    output: chunks after processing from workers
    status: progress
    TODO: multiple stderr streams

  Also supports demultiplexing output and status streams from children.
  The status stream can demux from input status streams.  And then will it also
  aggregate over them?

  You get num_finished on from each child stream.
  Each child stream initializes itself too.

  Then you can multiplex them into the UI?  Or factor out
  StatusState() -- holds the start time?

  What clock do you use for times?  Machines have different clocks.

  And that emits messages to the UI



  Emits:
    error: fatal error
  """
  EMITS = [
      'child',  # new child process, with PID and (optional) status stream
      'error',  # couldn't start child process
      ]

  def __init__(self, in_stream, argv_list, buffer_size, child_demux=None,
               multi_stderr=None, event_loop=None):
    """
    Args:
      in_stream: emits 'value' and 'close' events
      buffer_size: see options.buffer_size
    """
    base.EventEmitter.__init__(self, event_loop=event_loop)
    self.argv_list = argv_list
    self.buffer_size = buffer_size
    self.child_demux = child_demux

    self.multi_stderr = multi_stderr

    self.num_pending = 0
    self.num_finished = 0
    self.input_bytes = 0

    self.children = {}  # PID -> ChildProcess instance
    self.pending = {}  # PID -> num pending
    self.finished = {}  # PID -> num finished
    self.all_written = {}  # PID -> was last task fully written to stdin?
    self.start_times = collections.defaultdict(list) # PID -> list of start timeS
    self.stdout_closed = {}  # PID -> stdout closed
    self.all_stdout_closed = False

    in_stream.on('chunk', self.on_input)
    in_stream.on('close', self.on_input_close)
    self.in_stream = in_stream

    # for the UI
    self.status = streams.ValueStream()

    # after after one first iteration of event loop, sent our status.  Can't do
    # it here because no one is listening to status yet!
    self.event_loop.add_callback(self.init_status)

    # output stream
    self.output = streams.ChunkStream()

    # Keep track of pausing self.in_stream, so we don't register too many drain
    # events
    self.input_paused = False

  def init_status(self):
    now = time.time()
    self.status.emit('value', {
        'max_procs': self.argv_list.max_procs(),
        # this is approximate -- would it be better to start from the event loop
        # start time, or first record, or process start time?
        'start_time': now,
        'cur_time': now,
        })

  def _start_process(self):
    """Starts a new replica.

    Wraps stdout in a streams.TnetValueStream.
    """
    if self.multi_stderr:
      stderr = subprocess.PIPE  # pipe it into the process so we can redirect it
    else:
      stderr = None

    argv = self.argv_list.next()
    try:
      c = child_process.ChildProcess(argv, stdin=subprocess.PIPE,
                                     stdout=subprocess.PIPE, stderr=stderr)
    except OSError, e:
      self.emit('error', "Can't start child process %s: %s" % (argv, e))
      return

    log('Started process %d', c.pid)
    if self.multi_stderr:
      log('Redirecting stderr')
      self.multi_stderr.redirect(c.stderr, '%d.txt' % c.pid)

    # NOTE: PIDs can be reused, but that shouldn't matter here
    self.pending[c.pid] = 0
    self.finished[c.pid] = 0
    self.all_written[c.pid] = True  # trivially true on creation
    self.children[c.pid] = c
    self.stdout_closed[c.pid] = False

    c.on('exit', self.on_exit)

    # Wrap stdout so it emits 'values'

    # NOTE: We're never getting a close event for this stream, because the
    # process is always up.  We COULD sent a sentinel value on the input... or a
    # PGI "close" command.  That would make this cleaner at least.
    # Or I guess the proper thing would be to do stdin.close(), and that would
    # propagate all the way through, rather than relying on the "pending" count.

    stdout_chunks = streams.TnetValueStream(c.stdout)

    def on_output(value):
      self.on_output(value, c.pid)

    # If --child-demux was passed, then wrap the TNET stream with a multiplexer.
    # 'output', 'status'  == 1, 2
    child_status = None
    if self.child_demux:
      d = streams.Demultiplexer(stdout_chunks)

      # Create a chunk stream, and direct substream 1 to it.
      real_output = streams.ChunkStream()
      d.redirect(1, real_output)
      real_output.on('chunk', on_output)

      # Create a chunk stream for the status
      child_status = streams.ChunkStream()
      d.redirect(2, child_status)

    else:
      stdout_chunks.on('chunk', on_output)

    stdout_chunks.on('close', lambda: self.on_worker_stdout_close(c.pid))

    self.emit('child', c.pid, child_status)

    return c

  def on_worker_stdout_close(self, pid):
    """
    Called when each worker's stdout closes.  If all are closed, then we can
    emit 'close' for our 'output' and 'status' streams.

    Before close, the status stream will show the final status.

    We have to wait for all worker stdout to close, because the last 'status'
    event will come after the last value.
    lose the last 'status' streainput event
    """
    self.stdout_closed[pid] = True
    log('WORKER CLOSE %s', self.stdout_closed)
    if all(self.stdout_closed.itervalues()):
      self.all_stdout_closed = True
      self.maybe_close()

  def _pick_process(self):
    """Return either an EXISTING process, or create a new one.

    NOTE: If we wanted to "shuffle" here, we would depend on the input record,
    and read options.shuffle_key and options.partition_func.  And maybe
    max_procs would have to be exact?  Then that would determine which process
    to send it to.
    """
    # TODO: Keep track of how many are pending for each process?
    # TODO: We need to init the processes
    #   - measure how long it takes them to come up
    #   - have a timeout for init
    # Start a new process only if each existing process has 1 pending?

    # First search for any free processes
    # Or less than buffer size?
    for pid, count in self.pending.iteritems():
      if count == 0:
        log('Using PID %d since it has nothing pending', pid)
        return self.children[pid]

    max_procs = self.argv_list.max_procs()
    if len(self.children) < max_procs:
      log('# children %d, max procs: %d', len(self.children), max_procs)
      return self._start_process()

    # Pick the one with the fewest
    min_pid = min(self.pending, key=self.pending.get)
    log('min pid: %d', min_pid)
    return self.children[min_pid]

  def resume(self):
    log('RESUMING')
    self.in_stream.resume()

  def on_drain(self, pid):
    self.all_written[pid] = True

    # If ANY child stdin was drained, then we can resume our input stream, and
    # none of the others have to listen to it.
    log('REMOVING listeners')

    # NOTE: Some new processes may have never had 'drain' registered, but that's
    # OK with all_off()
    for child in self.children.itervalues():
      child.stdin.all_off('drain')

    # Don't need to resume if already paused
    if not self.input_paused:
      return

    log('RESUME input')
    print >>sys.stderr,'RESUME input'
    self.in_stream.resume()
    self.input_paused = False

  def pause_input(self):
    """
    Pause the input stream.  Also register 'drain' events, so that when any
    stdin is drained, we can resume.

    TODO: We could unpause when self.pending clears?  So check in on_output?
    """
    log('ALL FULL, pausing input stream')
    print >>sys.stderr, 'ALL FULL, pausing input stream'
    self.in_stream.pause()
    self.input_paused = True
    # Unpause when ANY worker stdin is drained
    for child in self.children.itervalues():
      child.stdin.on('drain', lambda: self.on_drain(child.pid))

  def on_input(self, value):
    """Called when we have a new input value."""
    log('value %s', value)
    assert value, 'on_input Got empty value'

    # TODO: could turn this into get_or_start
    # if it's a new process, you could update all your state
    # pass in self.pending?

    c = self._pick_process()
    self.input_bytes += len(value)

    # Write value to process stdin
    all_written = c.stdin.write(value)
    if not all_written:
      #print >>sys.stderr, 'NOT ALL WRITTEN %d' % c.pid
      self.all_written[c.pid] = False

      # Every time you try to a single process, check if its stdin full.  Then
      # check if this caused the state where ALL processes have full stdin.
      #
      # If so, pause.  Then register a drain event for ALL children, since
      # one being unblocked is enough for us to resume.

      if not self.input_paused:
        # if there's not at least one worker stdin that isn't full
        if not any(self.all_written.values()) :
          self.pause_input()

    self.num_pending += 1
    self.pending[c.pid] += 1
    # Hm, this isn't quite accurate because the next event loop tick is the one
    # that writes it
    self.start_times[c.pid].append(time.time())

    # Close it for now -- later we will need to close all at the end of the
    # whole stream
    # TODO: just flush it?
    #c.stdin.close()

  def on_output(self, value, pid):
    """Called when we have a new output value."""
    start_time = self.start_times[pid].pop(0)

    # ISSUE: This doesn't actually time how long it took, because it counts
    # queue time, and there's no way to know how long it sits in the processes
    # queue.
    #
    # For front end PGI, we care about latency, so we only queue one at a time.
    # For xmap, we care about throughput, so we queue multiple.  TODO: get rid
    # of this?  I guess if we want timing, PGI apps will have to return it in
    # the response.
    
    elapsed = time.time() - start_time
    log('pid: %d elapsed: %f', pid, elapsed)

    self.output.emit('chunk', value)

    self.num_finished += 1
    self.num_pending -= 1
    self.finished[pid] += 1
    self.pending[pid] -= 1

    if self.num_pending == 0:
      self.maybe_close()

  def maybe_close(self):
    if not self.all_stdout_closed:
      log("** Couldn't close because all worker stdout not closed: %d",
          self.stdout_closed)
      return

    if self.num_pending != 0:
      log("** Couldn't close num_pending == %d", self.num_pending)
      return

    log('QUEUE CLOSE')
    # Just to be safe, close on the next tick.  TODO: examine if this is
    # necessary.
    self.event_loop.add_callback(self.close)
  
  def close(self):
    """Called when the ParallelPump will emit no more outputs.
    # todo: rename end?
    """
    # Show the final number of tasks, so we aren't off by one
    if util._verbose > 0:
      self.emit_status()

    log('EMIT CLOSING (for all outputs)')
    self.output.emit('close')
    self.status.emit('close')

  def on_exit(self, exit_code, signum):
    """Deal with (unexpected?) process  death."""
    log('Child exited with code %s, signal %s', exit_code, signum)

  def on_input_close(self):
    # Now we know that there are no more values that are coming.  So we can
    # close all the input streams of the children.
    for child in self.children.itervalues():
      log('Closing stdin for %s', child)
      child.stdin.close()

  def on_user_input(self, chunk):
    print >>sys.stderr, 'Got %r' % chunk
    # NOTE: This only works if we haven't buffered all the inputs already!  SO
    # we need the backpressure.
    if chunk == '+':
      message = self.argv_list.add_replica()
      print >>sys.stderr, message

  def emit_status(self):
    s = {
        'cur_time': time.time(),
        'num_pending': self.num_pending,
        'num_finished': self.num_finished,
        'pending': self.pending,
        'finished': self.finished,
        }
    self.status.emit('value', s)


class XmapTool(object):

  def __init__(self, p, ui, stdout, cleanup_func, output, status_out, loop=None):
    """
    Args:
      p: ParallelPump (the input)
      stdout: output
      output, status_out: integer stream IDs, or None for default
      (stderr/stdout)
    """
    self.p = p
    self.ui = ui
    self.stdout = stdout
    self.cleanup_func = cleanup_func
    self.loop = loop or ioloop.instance()

    # e.g. Couldn't start process
    p.on('error', self.on_error)

    # TODO: These should BOTH be set to a stream ID, or NEITHER
    if output is not None and status_out is not None:
      m = streams.Multiplexer()
    else:
      m = None

    if m:
      m.redirect(p.output, output)
      # If multiplexing, we have to encode the status stream as TNET.  Turning
      # it from a value stream to a chunk stream.
      encoded_status = streams.TnetOutput(p.status)
      m.redirect(encoded_status, status_out)

      # Write the mux output to stdout
      m.on('chunk', self.echo)
      m.on('close', self.done)

    else:
      p.output.on('chunk', self.echo)
      p.output.on('close', self.done)

      # This could really be self.pipe() -- pipe the status stream into the UI
      # object
      p.status.on('value', self.ui.on_value)
      p.status.on('close', self.ui.on_close)

      p.on('child', self.maybe_register)

    # Schedule a callback to show the status of processes
    if util._verbose > 0:
      periodic = ioloop.PeriodicCallback(self.p.emit_status, 1000)
      periodic.start()

    self.all_written = True

  def maybe_register(self, pid, stream):
    if stream:
      #print >>sys.stderr, 'REGISTERING', pid
      stream.on('chunk', lambda chunk: self.ui.on_child_chunk(pid, chunk))
      stream.on('close', lambda: self.ui.on_child_close(pid))

  def echo(self, chunk):
    log('CHUNK %r', chunk)
    self.all_written = self.stdout.write(chunk)

  def done(self):
    log('DONE')

    self.cleanup_func()

    # This signals that we're not going to send any more values on stdout -- I
    # guess it's closed automatically on process exit, but just do it for
    # cleanliness.
    # NOTE: If we add this, Python gives an error message: "close failed in file
    # object destructor"
    #stdout.close()
    if self.all_written:
      self.loop.stop()
    else:
      self.stdout.on('drain', self.loop.stop)

  def on_error(msg):
    # Fatal error
    log('FATAL: %s', msg)
    self.cleanup_func()
    loop.stop()
