#!/usr/bin/python -S
"""
xmap.py

Command line driver.
"""

import errno
import Queue
import optparse
import sys
import threading
import termios
import tty
try:
  import multiprocessing  # Python 2.6 only
except ImportError:
  multiprocessing = None

import tnet

from torn import child_process
from torn import log as torn_log
from torn import ioloop
from torn import stream  # for Stdin/Stdout

# this package
import streams
import ui
import util
from util import log
import xmap_lib


class Error(Exception):
  pass


def cpu_count():
  """Return the # of CPUs on this machine, or None if it can't be detected."""
  if multiprocessing:
    return multiprocessing.cpu_count()
  return None


class ArgvList(object):
  """
  Abstracts the policy of choosing a process.
  """
  def max_procs(self):
    """Maximum number of processes that can be started."""
    raise NotImplementedError

  def next(self):
    """Choose an existing process or start a new one.

    Args:
      pending: List of PID -> how many records are on stdin.

    Returns:
      (c, is_new): Pair 
        ChildProcess instance
        whether it was just created
    """
    raise NotImplementedError

  def add_replica(self):
    """Call when the user wants to add a replica of a process."""
    raise NotImplementedError


class UniformList(ArgvList):
  """
  Variable size pool, all on the same machine.
  """
  def __init__(self, argv, max_procs):
    self.argv = argv
    self._max_procs = max_procs

  def max_procs(self):
    return self._max_procs

  def next(self):
    return self.argv

  def add_replica(self):
    self._max_procs += 1
    return 'Max processes = %d' % self._max_procs


class MixedList(ArgvList):
  """
  Fixed size pool with different command lines.

  A typical use of this is to spawn more copies of xmap, with varying capacities.

  GOAL: if 2 machines have wildly different capacities, they should each be
  fully utilized and the whole job should complete in the minimum time.
  """
  def __init__(self, argv_list):
    self.argv_list = argv_list
    self.i = 0

  def max_procs(self):
    return len(self.argv_list)

  def next(self):
    if self.i >= self.max_procs():
      raise AssertionError("Trying to start too many processes.")
    argv  = self.argv_list[self.i]
    self.i += 1
    return argv

  def add_replica(self):
    return (
        "Can't increase the number of processes with "
        "--argv-list/--argv-strings")


def make_options():
  parser = optparse.OptionParser()

  # INPUTS and OUTPUTS
  # can be @1, @2, etc. to enable multiplexing
  parser.add_option(
      '-o', '--output', dest='output', type='str', default='-',
      help='Output from worker processes.  Defaults to stdout, but could '
           'be sent to a (remote) file.')
  parser.add_option(
      '--status-out', dest='status_out', type='str', default='-',
      help='The status stream gives info on task completion progress.  By '
           'default it is displayed on the terminal.  You can pass @1 to '
           'multiplex it onto stdout, e.g. for network operation.')

  parser.add_option(
      '-i', '--input', dest='input', type='str', default='-',
      help='Input to send to worker processes.')
  parser.add_option(
      '--control-in', dest='control_in', type='str', default='-',
      help='Interacitve control input stream.  By default this is the '
           'connected terminal.')

  parser.add_option(
      '-d', '--child-demux', dest='child_demux', type='str', default='',
      help='Demux the list of given streams from stdout of the child workers.'
           'For example, if the workers were started with --output @1 '
           '--status @2, then the parent xmap process should be started with '
           '--child-demux output,status')

  # Degenerate case: If there is only 1 process, xmap is still useful because it
  # turns a PGI process into a unix filter.  
  # TODO: Should xmap be run as a PGI-style app too?  Or only a unix
  # start-and-stop app?
  parser.add_option(
      '-P', '--max-procs', dest='max_procs', type='int', default=0,
      help='Number of PGI processes to use.  Attempts to detect and use the '
           'number of CPUs on this machine if not specified.  If there are '
           'fewer records than processes, then only one process per record '
           'will be used.')

  # TODO: For symmetry, should we have --argv-list - and --input /foo/bar to
  # take processes from stdin and inputs from a named file?
  parser.add_option(
      '--argv-list', dest='argv_list', type='str', default='',
      help='Name of a file that contains a TNET-encoded list of N command lines '
           '(which are themselves lists of strings).  xmap will start N worker '
           'processes: one with each command line.  Terminal control of the '
           'number of processes will be disabled.  Not compatible with '
           '--max-procs.')

  parser.add_option(
      '--argv-strings', dest='argv_strings', type='str', default='',
      help='Name of a file that contains N command lines, each on a line, with '
           'arguments separated by spaces.  This is exactly like --argv-list, '
           'except for the parsing rules.  The shell is NOT invoked; instead the '
           'processes are spawned directly.  If you need arguments containing '
           'spaces then use --argv-list.')

  parser.add_option(
      '-b', '--buffer-size', dest='buffer_size', type='int', default=10,
      help='Maximum number of values to queue on to each PGI '
           'process.  Setting this to 1 is generally not optimal, because '
           'a worker process would have be blocked and do a context switch '
           "after every value.  Having it be infinite isn't desirable either, "
           'as a very large input stream would consume all memory in xmap ')

  parser.add_option(
      '--log-dir', dest='log_dir', type='str', default='',
      help='Redirect worker stderr into files in this directory (named by '
           'PID).  Must exist.')

  # This could be in a wrapper for each process, but for the purposes of
  # cleaning up the pipes, it's easier if it's here.
  parser.add_option(
      '-n', '--named-pipes', dest='named_pipes', action='store_true',
      default=False,
      help='For each process, create a pair of named pipes, and use those for '
           'communication rather than stdin/stdout of process.')

  parser.add_option(
      '--no-term', dest='terminal', action='store_false',
      default=True,
      help="Disable terminal control of xmap")

  parser.add_option(
      '--no-animate', dest='animate', action='store_false',
      default=True,
      help="Disable ANSI animation in xmap UI")

  parser.add_option(
      '-v', '--verbose', dest='verbose', type='int', default=1,
      help='Verbosity level for logging.  0 is completely silent; 1 is the '
           'default; 2 shows debug logs; 3 shows event loop logs.')

  return parser


def _ParseArgvStrings(f):
  lists = []
  for line in f:
    line = line.strip()
    if not line:
      continue
    if line.startswith('#'):
      continue
    # split by space only
    lists.append(line.split())
  return lists


def _ParseStreamId(flag_value):
  if flag_value == '-':
    return None
  if flag_value.startswith('@'):
    return int(flag_value[1:])
  raise Error('Invalid stream ID: %r' % flag_value)


def main(argv):
  (options, argv) = make_options().parse_args(argv)

  util._verbose = options.verbose

  # 2 gets xmap logs; 3 gets torn logs
  if util._verbose >= 3:
    torn_log.install_default_logger()

  loop = ioloop.instance()

  # Needed for exit event to work
  child_process.RegisterSignalHandlers()

  if options.log_dir:
    # NOTE: Later we could replace this with a single INTERLEAVED file, rather
    # multiple files.  It won't require changing ParallelPump.

    disk_chan = Queue.Queue()
    multi_stderr = streams.QueueMultiplexer(disk_chan)
    disk_thread = streams.DiskThread(disk_chan, options.log_dir)
    log('Starting disk thread; writing to %s', options.log_dir)
    t = threading.Thread(target=disk_thread.run)
    t.start()
  else:
    multi_stderr = None
    disk_chan = None

  if options.max_procs:
    max_procs = options.max_procs
  else:
    max_procs = cpu_count() 
    if max_procs:
      log('Detected %d CPUs', max_procs)
    else:
      max_procs = 1
      log("Couldn't detect number of CPUs; using %d", max_procs)

  # stdin for this process
  stdout = stream.Stdout()
  stdin = stream.Stdin()
  t = streams.TnetValueStream(stdin)
  used = 0
  if options.argv_strings:
    used += 1
  if options.argv_list:
    used += 1
  if used == 2:
    raise Error("Can't combine --argv-strings and --argv-list")
  if used == 1:
    if options.max_procs:
      raise Error("--max-procs not compatible with --argv-strings/--argv-list")
    if argv:
      raise Error("Got extra arguments '%s' with --argv-strings/--argv-list" %
                  ' '.join(argv))

  if options.argv_strings:
    try:
      f = open(options.argv_strings)
    except IOError, e:
      raise Error("Error opening '%s'" % options.argv_strings)
    a = _ParseArgvStrings(f)
    f.close()
    argv_list = MixedList(a)
  elif options.argv_list:
    try:
      f = open(options.argv_list)
    except IOError, e:
      raise Error("Error opening '%s'" % options.argv_list)
    a = tnet.load(f)
    f.close()
    argv_list = MixedList(a)
  else:
    if not argv:
      raise Error('No command specified.')
    argv_list = UniformList(argv, max_procs)

  # Open the connecting terminal and pass it to the process manager
  # NOTE: Do this change AFTER all option error checking, so we don't change the
  # terminal and then have to restore.
  if options.terminal:
    log('Setting cbreak mode on controlling terminal')
    try:
      term_handle = open("/dev/tty")
    except IOError, e:
      # This will happen if you SSH in, and there is no controlling terminal
      if e.errno != errno.ENXIO:
        raise
      log('Error opening terminal; disabling terminal control')
      term_handle = None
      terminal = None
      old_term = None
    else:
      terminal = stream.ReadableStream(term_handle)
      old_term = termios.tcgetattr(term_handle)
      tty.setcbreak(term_handle.fileno())
  else:
    log('Terminal control disabled')
    term_handle = None
    terminal = None
    old_term = None

  # TODO: Parse 'output,status' to a dict {output: 1 status: 2}
  child_demux = options.child_demux
  p = xmap_lib.ParallelPump(t, argv_list, options.buffer_size,
                            child_demux=child_demux, multi_stderr=multi_stderr)

  if terminal:
    terminal.on('data', p.on_user_input)

  def cleanup():
    """Clean up resources constructed in this function."""

    # Restore terminal settings
    # TODO: This appears not to be called if you Ctrl-C during torn's poll()
    # call.  Then the terminal is hosed.
    if term_handle:
      termios.tcsetattr(term_handle, termios.TCSADRAIN, old_term)

    # Stop the disk thread
    if disk_chan:
      disk_chan.put(streams.SENTINEL)

  output = _ParseStreamId(options.output)
  status_out = _ParseStreamId(options.status_out)

  xmap_ui = ui.XmapUi(animate=options.animate)
  tool = xmap_lib.XmapTool(p, xmap_ui, stdout, cleanup, output, status_out)

  loop.start()


if __name__ == '__main__':
  try:
    sys.exit(main(sys.argv[1:]))
  except Error, e:
    print >> sys.stderr, e.args[0]
    sys.exit(1)
