#!/usr/bin/python -S
"""
Usage:
  fly [options] run [--] <dir> <args>...
  fly [options] stop <dir>
  fly [options] state <dir>
  fly -h | --help
  fly --version

Actions:
  run      Run the given command.  If a process doesn't exist already, it will
           start it.  It won't start more than once process per state directory.

  stop     Stop a process.  The only reason to use this is to reclaim resources
           on your machine.

  state    Show process states.

Options:
  -t --timeout=SECS        How long to wait for a lock.
  -p --poll-interval=SECS  How often to check for failed processes.
  -p --poll-interval=SECS  How often to check for failed processes.

Example:

  $ fly run -- ~/fly/count --sleep 1   # Run 'count', passing it a flag.
  $ fly stop ~/fly/count               # Stop it
  $ fly state ~/fly/                   # Show state

A command 'fly run -- ~/fly/count' is usually wrapped in a shell script called
'count'.
"""

# TODO:
# 
#   - ISSUE: If you Ctrl-C, then data can be sitting around in the pipe without
#     getting read.  And then it will appear the NEXT time.  Need to fix this up.
# 
#   - Mitigated for now
#     - BUG: same issue as xmap.  CHILD process stdout can get blocked with
#       EWOULDBLOCK.  Can the child use blocking I/O and the parent use
#       nonblocking?
# 
#   - Proxy stdin.  It should just be a field like argv?  I guess there are tools
#     which take small stdin.  This requires more "wrapping" of main().  main()
#     should probably look like main(argv, stdin) then.
#     - stdin_boundary
#     - stdin
# 
#   - Test out fly with xmap.
#     - Use case: Load some data in memory and do a join?
#     - That's a use case for xmap alone.
#   - Load some big data and do queries from the command line.
#     - Some pre-aggregations?  e.g. guar pattern stats
#     - If it takes really long, should you init the process?  I think that's
#       better.
#
#   - ISSUE: If the child process unexpectly exits (e.g. a Python stack trace),
#     then the fly error can be obscure -- you can get "Interrupted system call"
#     on tnet read.  This is with count_.py.
# 
# Helpers:
#   - Install script.  Generates:
#     fly run state/PROG -- "$@"
# 
# IDEA: You could proxy it?  Fly itself could start as a PGI process.  For a
# given run, it would record all processes that it started.  And then you can
# kill them, and kill fly.  Is it fly_?
# Or maybe fly?
# 
# 
# Concurrency:
# 
#   Uses <state-dir>/mutex.
# 
# User considerations:
# 
#   One process per user, per machine.  So the state root could be in ~ if it's
#   local dir.  If ~ is NFS, then it should be somewhere else on local disk.

__author__ = 'Andy Chu'


import errno
import fcntl
import optparse
import os
import select
import shutil
import signal
import subprocess
import sys
import threading
import time

import tnet


class Error(Exception):
  pass


ANSI_BOLD = '\033[1m'
ANSI_RESET = '\033[0;0m'
ANSI_RED = '\033[31m'


_output_writer = None


class OutputWriter(object):

  def __init__(self, verbose):
    self.verbose = verbose


class PlainOutputWriter(OutputWriter):

  def log(self, s):
    if self.verbose:
      self._log(s)

  def _log(self, s):
    print >>sys.stderr, 'fly: ' + s

  error = _log

  def stderr(self, s):
    # No newline
    sys.stderr.write(s)


class ColorOutputWriter(OutputWriter):

  def log(self, s):
    if self.verbose:
      self._log(s)

  def _log(self, s):
    print >>sys.stderr, ANSI_BOLD + ANSI_RED + 'fly: ' + ANSI_RESET + s

  error = _log

  def stderr(self, s):
    # No newline
    sys.stderr.write(ANSI_BOLD + s + ANSI_RESET)


def log(msg, *args):
  if args:
    msg = msg % args
  _output_writer.log(msg)


def IsRunning(pid):
  # Unix signal 0 is a pseudo-signal that can be used to check if a process with
  # the given PID exists.
  try:
    os.kill(pid, 0)
  except OSError, e:
    if e.errno == errno.ESRCH:
      return False
    else:
      raise
  return True


def ReadPid(pid_name):
  try:
    f = open(pid_name)
  except IOError:
    return None
  pid_str = f.read()
  f.close()

  try:
    pid = int(pid_str)
  except ValueError:
    raise Error('Invalid contents of PID file %r: %r' % (pid_name, pid_str))
  return pid


def GetWorkerState(bundle_dir):
  """
  Check the process state and return the PID and handles to open pipes.

  Returns either:
    None if no process
    or (pid, in_fd, out_fd, err_fd) ?
  """
  id_str = 'live'

  # NOTE: You don't need the PID in a lot of cases
  # PID checks if the state is "finished", since that is written AFTER
  # stdin/etc.
  pid_name = os.path.join(bundle_dir, id_str, 'pid.txt')

  pid = ReadPid(pid_name)
  # No PID file -- assume there is no process running, and we have to create
  # it
  if pid is None:
    return None

  in_name = os.path.join(bundle_dir, id_str, 'stdin')
  out_name = os.path.join(bundle_dir, id_str, 'stdout')
  err_name = os.path.join(bundle_dir, id_str, 'stderr')

  in_fd = os.open(in_name, os.O_RDWR)
  out_fd = os.open(out_name, os.O_RDWR)
  err_fd = os.open(err_name, os.O_RDWR)
  return pid, in_fd, out_fd, err_fd


def StartAndCreateState(bundle_dir):
  # 'command' must be a symlink to the program (or the program itself)
  unix_argv = [os.path.join(bundle_dir, 'command')]
  log('Starting %s', unix_argv)

  # No running process should be creating a directory with this name on the
  # machine.  We can't use the CHILD (PGI) process ID because we need to create
  # the named pipes in this dir before we know its PID!
  pipe_dir = os.path.join(bundle_dir, 'live')
  try:
    os.mkdir(pipe_dir)
  except OSError, e:
    if e.errno == errno.EEXIST:
      # pid.txt doesn't exist but 'live' does.  This is technically an
      # inconsistent state, but we can easily bring it back to consistent.
      pass
    else:
      raise

  in_name = os.path.join(pipe_dir, 'stdin')
  out_name = os.path.join(pipe_dir, 'stdout')
  err_name = os.path.join(pipe_dir, 'stderr')

  os.mkfifo(in_name)
  os.mkfifo(out_name)
  os.mkfifo(err_name)

  # TODO: Open for read or write only?
  in_fd = os.open(in_name, os.O_RDWR)
  out_fd = os.open(out_name, os.O_RDWR)
  err_fd = os.open(err_name, os.O_RDWR)

  # Add PGI
  env = dict(os.environ)
  env['PGI'] = '1'
  # Suggestion -- child processes can override it
  env['PGI_BOUNDARY'] = '__pgi_end__'

  # It appears we don't need to daemonize the PGI process, because it's
  # connected to named pipes rather than the terminal.  So it shouldn't die when
  # the terminal goes away.
  p = subprocess.Popen(unix_argv, stdin=in_fd, stdout=out_fd, stderr=err_fd, env=env)

  pid_name = os.path.join(pipe_dir, 'pid.txt')
  f = open(pid_name, 'w')
  f.write(str(p.pid) + '\n')
  f.close()

  return p.pid, in_fd, out_fd, err_fd


# TODO: Make it the pipe size?
CHUNK_SIZE = 4096

def ReadLines(fd, boundary, lines, write_func):
  done = False
  while True:
    try:
      # ISSUE: You can read too much here!  Do you really have to read char by
      # char?  That seems lame.
      # I guess you want to avoid reading the trailer.
      chunk = os.read(fd, CHUNK_SIZE)
    except OSError, e:
      if e.errno == errno.EWOULDBLOCK:
        log('EWOULDBLOCK')
        break

    lines += chunk.splitlines(True)
    log('%d byte chunk', len(chunk))
    log('chunk end %r', chunk[-20:])
    i = 0
    for line in lines:
      if line.endswith(boundary):
        log('PGI END DONE')
        done = True  # outer-outer loop
        # +1 for the boundary
        lines = lines[i+1:]
        break  # inner loop
      write_func(line)
      i += 1  # lines printed
  return done, lines


# BUG: I think we are setting the CHILD's fd too somehow.  Need to sort it out.
def SetNonblocking(fd):
  flags = fcntl.fcntl(fd, fcntl.F_GETFL)
  fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)


def SetBlocking(fd):
  flags = fcntl.fcntl(fd, fcntl.F_GETFL)
  fcntl.fcntl(fd, fcntl.F_SETFL, flags & ~os.O_NONBLOCK)


def Run(argv, bundle_dir, writer, exit_fd, poll_thread):
  # Desired invariant: 
  #
  # EITHER:
  # 1) There are no PGI processes and the 'live' dir is empty
  # 2) There is one PGI process and the 'live' dir contains 3 named pipes, and a
  # 'pid.txt' file.
  #
  # Inconsistent conditions that need to be fixed:
  #
  # 1) There is a 'live' dir (and possibly some named pipes), but no process is
  # up.  This can be checked by IsRunning().
  # 2) There is a process up, but no 'live' dir.  This can't be detected.  Thus,
  # in the 'shutdown' action, we should make sure the process is really killed
  # before removing the state dir.

  result = GetWorkerState(bundle_dir)
  start_new = True
  if result:
    pid, in_fd, out_fd, err_fd = result
    # Check that it's running
    if IsRunning(pid):
      start_new = False
      log('Reusing existing process with PID %d', pid)
    else:
      log('pid.txt contains %d but process is not up; cleaning up', pid)
      RemoveLive(bundle_dir)

  if start_new:
    pid, in_fd, out_fd, err_fd = StartAndCreateState(bundle_dir)
    log('Started new process with PID %d', pid)

  # Start watching the pid now
  poll_thread.set_pid(pid)
  poll_thread.start()

  env = dict(os.environ)  # os.environ is apparently not a plain dict
  request = {'argv': argv, 'cwd': os.getcwd(), 'env': env}
  req_str = tnet.dumps(request)
  log('Writing request %r', req_str)
  os.write(in_fd, req_str)

  # Hm this is dumb:
  # http://stackoverflow.com/questions/2804543/read-subprocess-stdout-line-by-line
  #p.stdin.flush()

  #outfile = os.fdopen(out_fd)
  #errfile = os.fdopen(err_fd)

  log('Waiting for initial TNET record on stdout')

  # TODO: See bug above.  For the 'stdout' case, catch EINTR here?
  obj = tnet.loadfd(out_fd)

  if 'stdout' in obj:  # Whole response was returned
    sys.stdout.write(obj['stdout'])

    # Hm -- should we process stderr in the same way?  Some things like
    # "Segmentation fault" are not controlled by the application.  It isn't
    # likely that they can redirect it to stderr.

    # Should we just look at ACTUAL?  I think that may make more sense.  We
    # could enter the select loop.  Need some examples.
    s = obj.get('stderr')
    if s:
      writer.stderr(s)

    status = obj.get('status', 0)

  elif 'stdout_boundary' in obj:  # Response will be streamed

    boundary = obj['stdout_boundary']
    boundary += '\n'
    log('stdout_boundary = %r', boundary)

    log('Setting stdout/stderr nonblocking')
    SetNonblocking(out_fd)
    SetNonblocking(err_fd)

    # Enter select loop
    stdout_done = False
    stdout_lines = []
    stderr_done = False
    stderr_lines = []

    readable = []
    while not stdout_done or not stderr_done:
      fd_set = [out_fd, err_fd, exit_fd]
      log('asking : %s', fd_set)
      try:
        readable, _, _ = select.select(fd_set, [], [])
      except select.error, e:
        # If SIGCHLD occurs while we're waiting, EINTR will happen.  Are there
        # any other cases where EINTR happens?
        if e.args[0] == errno.EINTR:
          # TODO: Add pid to message
          # TODO: Make sure pipes are empty?
          # TODO: Clean up PID file and so forth
          log('Got EINTR')
          try:
            RemoveLive(bundle_dir)
          finally:
            raise Error('Got unexpected child death (EINTR)')

      log('readable: %s', readable)

      # If we're not the parent, we should get an error here, rather than when
      # waiting for select().
      # TODO: We could disable interrupts in select() and rely on the pipe?
      if exit_fd in readable:
        try:
          RemoveLive(bundle_dir)
        finally:
          raise Error('Got unexpected child death (exit_fd = %d)' % exit_fd)

      # ISSUE: we can read more than we were entitled to!
      if err_fd in readable:
        # stderr should be unbuffered, so just read one line.  We can't use
        # readline() because of buffering again.  Doh.
        try:
          line = ''
          while True:
            c = os.read(err_fd, 1)
            line += c
            if c == '\n':
              break
        except OSError, e:
          if e.errno == errno.EWOULDBLOCK:
            log('EWOULDBLOCK')
            break

        if line.endswith(boundary):
          log('STDERR DONE')
          stderr_done = True

        if not stderr_done:
          writer.stderr(line)

        #stderr_done, stderr_lines = ReadLines(
        #    err_fd, boundary, stderr_lines, writer.stderr)

      # TODO: Could add a flag to buffer stdout like stderr, at the expense of
      # speed.
      if out_fd in readable:
        stdout_done, stdout_lines = ReadLines(
            out_fd, boundary, stdout_lines, sys.stdout.write)

    rest = ''.join(stdout_lines)
    log('Reading trailer from %r', rest)
    trailer, rest = tnet.loads_prefix(rest)
    if rest:
      log('WARNING: Unexpected trailing bytes on stdout: %r', rest)
    status = trailer.get('status', 0)

  else:
    raise Error(
        "Expected 'stdout' or 'stdout_boundary' to be in response %r" % obj)

  # set back to blocking
  SetBlocking(out_fd)
  SetBlocking(err_fd)

  log('Done')
  return status


def RemoveLive(bundle_dir):
  live_dir = os.path.join(bundle_dir, 'live')
  # Possible errors:
  #   errno.ENOENT: 'live' directory doesn't exist.  fine to ignore this
  #   errno 39: directory not empty.  Not sure why we would get this but it may
  #   be a bug in rmtree.  Passing ignore_errors seems safe.
  shutil.rmtree(live_dir, ignore_errors=True)
  log('Removed %s', live_dir)


def Stop(bundle_dir):
  """
  This can also be implemented in shell like:

  $ find <dir> -name pid.txt | xargs cat | xargs kill
  """
  live_dir = os.path.join(bundle_dir, 'live')
  pid_filename = os.path.join(live_dir, 'pid.txt')

  try:
    f = open(pid_filename)
  except IOError, e:
    if e.errno == errno.ENOENT:
      log("%r doesn't exist", pid_filename)
    else:
      log("Couldn't open %r", pid_filename)
  else:
    pid_str = f.read()
    f.close()
    try:
      pid = int(pid_str)
    except ValueError:
      raise Error("Invalid PID file contents %r" % pid_str)

    try:
      log('Sending SIGTERM to %d', pid)
      os.kill(pid, signal.SIGTERM)
    except OSError, e:
      if e.errno == errno.ESRCH:
        log("Tried to kill PID %d but it isn't running.", pid)
    else:
      for i in xrange(10):
        if not IsRunning(pid):
          log('Process %d successfully stopped.', pid)
          break
        time.sleep(0.1)
      else:
        log("Process %d didn't stop after 1 second.", pid)

  # Remove it even if we didn't kill a process
  RemoveLive(bundle_dir)


def State(root_dir):
  """
  Args:
    root_dir: the PARENT of fly bundles
  """
  rows = []

  for name in os.listdir(root_dir):
    bundle_dir = os.path.join(root_dir, name)
    pid_name = os.path.join(bundle_dir, 'live/pid.txt')

    command = os.path.join(bundle_dir, 'command')
    try:
      target = os.readlink(command)
    except OSError:
      continue

    pid = ReadPid(pid_name)
    rows.append((bundle_dir, target, pid))

  if not rows:
    _output_writer.error("No bundles in %r" % root_dir)
    return 1

  # TODO: print structured version of this table too
  template = '%5s  %s  %-20s  %s'
  print template % ('PID', '?', 'command', 'bundle')
  for bundle_dir, target, pid in rows:
    if pid:
      pid_str = str(pid)
      if IsRunning(pid):
        r = 'Y'
      else:
        # TODO: flag this for cleanup
        r = 'N'
    else:
      pid_str = ' '
      r = ' '
    print template % (pid_str, r, target, bundle_dir)

  return 0


class Mutex(object):

  def __init__(self, bundle_dir):
    path = os.path.join(bundle_dir, 'mutex')
    try:
      # Need to open for write to do lockf() !
      self.f = open(path, 'w')
    except IOError:
      raise Error("Invalid installation: %r doesn't exist" % path)

  def acquire(self):
    log('Waiting for lock on %r', self.f)
    fcntl.lockf(self.f.fileno(), fcntl.LOCK_EX)
    # For some reason, flock() create locks that are inherited by the child
    # process!  (even though documentation says otherwise).  So it is unusable
    # here.  Not sure what the real difference is between lockf and flock, but
    # lockf works.
    #fcntl.flock(self.f.fileno(), fcntl.LOCK_EX)

  def release(self):
    self.f.close()
    log('RELEASED mutex')


_wait_count = 0

def SigchldHandler(signum, frame):
  """
  Python runs signal handlers on its interpreter loop, not from the actual C
  signal handler, so we can do real work here.
  """
  # signum is always 17, probably not useful

  # BUG: if there is an exception in the child (file doesn't exist), then
  # subprocess calls os.waitpid, reaping the child.  we get the signal here but
  # have nothing to reap.

  global _wait_count
  log('WAITING (count = %d)', _wait_count)

  # In case signals are coalesced, do it multiple times
  # I think this is correct in Torn, but is actually not needed here, because we
  # only start one child process!  TODO: fix
  while True:
    try:
      pid, status = os.wait()
    except OSError, e:
      if e.errno == errno.ECHILD:  # "No child processes"
        break
      else:
        raise
    log('pid = %s, status =%s', pid, status)
    _wait_count += 1

  log('WAITED %d times', _wait_count)


class ProcessPoller(threading.Thread):
  """Polls to see if a given PID is running.

  It's annoying to have to poll the child process.  The other option was to
  create "fly parent" process that was the parent of all PGI processes.  It's
  job would be to handle SIGCHLD and notify the fly clients.  See README.txt for
  details on why we didn't choose that.
  """

  def __init__(self, poll_interval, exit_pipe_write_fd):
    threading.Thread.__init__(self)
    self.pid = None
    self.poll_interval = poll_interval
    self.exit_pipe_write_fd = exit_pipe_write_fd

  def set_pid(self, pid):
    """This is set after construction."""
    self.pid = pid

  def run(self):
    assert self.pid is not None
    while True:
      time.sleep(self.poll_interval)
      log('Checking PID %d', self.pid)
      # send signal 0
      if not IsRunning(self.pid):
        # Wake up the thread
        os.write(self.exit_pipe_write_fd, 'x')


def CreateOptionsParser():
  parser = optparse.OptionParser()

  parser.add_option(
      '-p', '--poll-interval', dest='poll_interval', type='float', default=0.5,
      help='However often to poll for the existence of the worker process.  This '
           'is only necessary for protection against PGI processes that '
           'improperly exit, rather than reporting errors through stdout/stderr. '
           "If you're sure that the PGI process won't exit unexpectedly, it's "
           'safe to set this to 0 to disable polling.')
  parser.add_option(
      '-t', '--timeout', dest='timeout', type='int', default=3,
      help='Number of seconds to wait to acquire a lock.')
  # TODO: Timeout action?  You could launch a real copy of the original process
  # after a timeout?

  # TODO: Hook verbose up
  parser.add_option(
      '-v', '--verbose', dest='verbose', action='store_true', default=False,
      help='Show verbose logging output.')

  parser.add_option(
      '--color', dest='color', default='always',
      choices=['none', 'always'],
      help='Whether to print stderr in color.')

  # NOTE: stdout protocol is detected by the first record from the PGI process.
  parser.add_option(
      '--stdin', dest='stdin', default='none',
      choices=['none', 'record', 'streaming'],
      help='How to handle stdin.  "none" disallows stdin.  "record" buffers '
           'the entire stdin stream in the fly process, then passes it '
           'to the PGI process.  "streaming" tells fly to stream its stdin '
           'to the PGI process stdin.')

  return parser


def main(argv):
  (options, argv) = CreateOptionsParser().parse_args(argv)

  # This has to come first because it's used for handling all errors.
  global _output_writer
  if options.color == 'always':
    _output_writer = ColorOutputWriter(options.verbose)
  elif options.color == 'none':
    _output_writer = PlainOutputWriter(options.verbose)
  else:
    raise AssertionError

  try:
    action = argv[0]
    dir_name = argv[1]
  except IndexError:
    raise Error('Usage: fly <action> <state dir> [args]')

  # We need to call os.wait() to avoid zombie processes.  This is because we
  # NEVER call p.wait() on the subprocess handle, as most programs do.
  # (Instead, this fly process dies and orphans the PGI process.)
  #
  # To keep the logic simple, we don't try to notify of process death in the
  # signal handler.  This would only happen on invocation 1 because that's the
  # only time that fly is the parent of the PGI process.
  signal.signal(signal.SIGCHLD, SigchldHandler)

  exit_pipe_read_fd, exit_pipe_write_fd = os.pipe()

  if options.poll_interval:
    poll_thread = ProcessPoller(options.poll_interval, exit_pipe_write_fd)
    poll_thread.setDaemon(True)
  else:
    log('Not checking for PGI process death')

  if action == 'run':
    bundle_dir = dir_name
    m = Mutex(bundle_dir)
    m.acquire()  # TODO: can add a timeout on acquisition
    try:
      return Run(argv[2:], bundle_dir, _output_writer, exit_pipe_read_fd,
                 poll_thread)
    finally:
      # TODO: technically this could be released when stdin is done writing, so
      # another process could write on stdin.  But do that later.
      m.release()

  elif action == 'stop':
    bundle_dir = dir_name
    m = Mutex(bundle_dir)
    m.acquire()
    try:
      return Stop(bundle_dir)
    finally:
      m.release()

  elif action == 'state':
    return State(dir_name)

  else:
    raise Error('Unknown action %r' % action)

  # Other actions:
  # - Stop all processes?  Nah that should be separate, can be done with "find"
  # over state dir root.


if __name__ == '__main__':
  try:
    sys.exit(main(sys.argv[1:]))
  except Error, e:
    _output_writer.error(e.args[0])
    # A somewhat random 7-bit return value.  This is used to distinguish fly
    # errors from errors in the called process.
    sys.exit(121)
