#!/usr/bin/python -S
"""
stream.py

Stream abstraction.
"""

__author__ = 'Andy Chu'


import errno
import fcntl
import os
import sys

import base
import ioloop
import log


# system events
READ = ioloop.IOLoop.READ
WRITE = ioloop.IOLoop.WRITE
ERROR = ioloop.IOLoop.ERROR


def _SetNonblocking(fd):
  fcntl.fcntl(fd, fcntl.F_SETFL, os.O_NONBLOCK)


# Default pipe size on Linux, and also the size Tornado uses.  Might want to
# customize this.
# Empircally, doing something like 'seq 10000 | examples/cat.py | wc' takes ~2200
# ticks.  BUT really we should count context switches, not ticks.
_CHUNK_SIZE = 4096

class AbstractReadable(base.EventEmitter):
  """
  This is instantiated on startup?

  1. add the file descriptor to the event loop
  2. pause() so it can be resumed/
  """
  EMITS = [
      'data',  # bytes of data
      'close',  # stream closed (by writer)
      ]

  def __init__(self, event_loop=None):

    base.EventEmitter.__init__(self, event_loop=event_loop)
    _SetNonblocking(self.fileno())
    # Whether we are listening
    self.waiting = False
    # When event emitters created, they generally register themselves as
    # handlers with the event loop.
    self.resume()

  def fileno(self):
    raise NotImplementedError

  def resume(self):
    """resume yourself as someone to to receive SYSTEM events."""
    if self.waiting:
      return
    self.waiting = True
    log.trace('AbstractReadable registering %s', self.fileno())
    self.event_loop.add_handler( self.fileno(), self, ioloop.IOLoop.READ)

  def pause(self):
    self.event_loop.remove_handler(self.fileno())
    self.waiting = False

  def pipe(self, dest, end=True):
    """Pipe this readable stream into a writable stream.

    Pass end=False to keep the destination open.

    Test case:

    seq 10000 | ./cat.py | slow.py

    where slow.py only reads 10 bytes/sec.
    Make sure that cat.py doesn't get ballooned.  Print its buffer size.

    """
    source = self
    def on_data(chunk):
      all_written = dest.write(chunk)
      if not all_written:
        source.pause()

    source.on('data', on_data)

    def on_drain():
      source.resume()

    dest.on('drain', on_drain)

    # NOTE: node.js has BOTH end and close events
    # one calls dest.end(), the other calls dest.destroy()
    def on_close():
      cleanup()  # remove listeners
      dest.close()

    def on_end():
      cleanup()  # remove listeners
      dest.end()

    if end:
      # When the input stream is ended, end the output stream
      source.on('end', on_end)
      source.on('close', on_close)

    def cleanup():
      """Remove all the event listeners that were added."""
      source.remove_listener('data', on_data);
      dest.remove_listener('drain', on_drain);

      #source.remove_listener('end', onend);
      if end:
        source.remove_listener('close', on_close);

      #source.remove_listener('error', onerror);
      #dest.remove_listener('error', onerror);

      source.remove_listener('end', cleanup);
      source.remove_listener('close', cleanup);

      dest.remove_listener('end', cleanup);
      dest.remove_listener('close', cleanup);

    source.on('end', cleanup)
    source.on('close', cleanup)

    dest.on('end', cleanup)
    dest.on('close', cleanup)

    #dest.emit('pipe', source);

    # Allow for unix-like usage: A.pipe(B).pipe(C)
    return dest

  def __call__(self, fd, events):
    """Called when a SYSTEM event is received to produce USER events."""
    # TODO: dispatch on READ -> data, etc.

    # TODO: what about errors -- I think I have to check for broken pipe by
    # catching IOError here.  turn that into an event?

    # Nonblocking zo
    log.trace('AbstractReadable __call__  fd: %s events: %s', fd, events)

    bytes = ''

    if events & READ:
      log.trace('event: READ')
      # Hm, is there any way not to have at least 2 read() calls here?  Check
      # EWOULDBLOCK?  I think you don't need to empty the buffer here.  But if
      # you get a close() event, then you need to empty the buffer first... hm.
      while 1:
        log.trace('read %d', _CHUNK_SIZE)
        try:
          chunk = os.read(fd, _CHUNK_SIZE)
        except OSError, e:
          if e.errno == errno.EWOULDBLOCK:
            log.trace('EWOULDBLOCK')
            break
          else:
            raise
        if not chunk:
          break
        bytes += chunk
      log.trace('read %d bytes this tick', len(bytes))
      # We got some bytes; emit data event.
      self.emit('data', bytes)

    # ERROR means the pipe was closed, presumably.
    if events & ERROR:
      # Is this right?
      log.trace("ERROR event on read, removing handler for fd %s, emit 'close'",
          self.fileno())
      self.emit('close')
      self.event_loop.remove_handler(self.fileno())

    # emit close event
    #if not bytes:
    #  log.trace(
    #      'GOT 0 bytes, removing handler for %s and emitting close event',
    #      self.fileno())
    #  self.event_loop.remove_handler(self.fileno())
    #  self.emit('close')
    #  return


class Stdin(AbstractReadable):
  """This process's stdin."""

  def fileno(self):
    return sys.stdin.fileno()


class ReadableStream(AbstractReadable):
  """
  Wraps a Python file-like object.  Should be a pipe or a socket.  It should NOT
  be backed by a disk file!  Because we can't wait for I/O on a disk file.
  """
  def __init__(self, f, event_loop=None):
    self.f = f
    AbstractReadable.__init__(self, event_loop=event_loop)

  def fileno(self):
    return self.f.fileno()


class AbstractWritable(base.EventEmitter):
  """Events: drain, error, close, pipe
  """
  EMITS = [
      'drain',  # our (user-space) buffer was emptied (after we previously
                # returned False after write())
      ]

  def __init__(self, event_loop=None):
    base.EventEmitter.__init__(self, event_loop=event_loop)
    _SetNonblocking(self.fileno())

    self.buf = ''
    self.closed = False  # close() called?

    # When event emitters created, they generally register themselves as
    # handlers with the event loop.
    self.waiting = False  # waiting to write?
    self.needs_drain = False

  def resume(self):
    """resume notifications for the fd ready to write."""
    log.trace('registering fd %s for write', self.fileno())
    self.event_loop.add_handler(self.fileno(), self, ioloop.IOLoop.WRITE)
    self.waiting = True

  def pause(self):
    self.event_loop.remove_handler(self.fileno())
    self.waiting = False

  def fileno(self):
    raise NotImplementedError

  def _try_write(self):
    """Returns whether ALL bytes were written (to the kernel)."""
    log.trace('trying to write %d bytes', len(self.buf))
    assert self.buf

    # Attempt to write our entire buffer to the fd, no matter how big it is.
    try:
      n = os.write(self.fileno(), self.buf)
    except OSError, e:
      # this happens if you do seq 10000 | examples/cat.py | wc
      if e.errno == errno.EWOULDBLOCK:
        return False  # we have something in the buffer
      else:
        raise

    # Then remove what was written from our buffer.
    if n < len(self.buf):
      log.trace('Wrote %d out of %d', n, len(self.buf))
      self.buf = self.buf[n:]
      return False
    else:
      # Everything was written.  Reset buffer; remove handler, let application
      # know (if they registered something on 'drain').
      self.buf = ''
      # NOTE: we might not have ever resume()d!  epoll doesn't mind this though.
      self.pause()
      if self.needs_drain:  # we returned False to the user previously
        log.trace('DRAIN')
        self.emit('drain')
        self.needs_drain = False
      return True

  def __call__(self, fd, events):
    """Notified by the event loop that the fd is writeable."""

    log.trace('Writing more on next tick (%d bytes left)', len(self.buf))
    all_written = self._try_write()

    # Only close after you've emptied the buffer!
    if all_written and self.closed:
      fd = self.fileno()
      log('AbstractWritable CLOSING %d', fd)
      os.close(fd)

  def write(self, chunk):
    """
    Return whether ALL bytes were written (on this tick).  If they weren't, then
    the application may want to register a "drain" event, in order to be
    notified when 'chunk' was fully written (into the kernel).

    Internally: if we don't write all the bytes, then we register this fd for
    notification in the event loop, and on the next tick we'll write it.
    """
    if self.closed:
      # TODO: better exception?
      raise RuntimeError("Can't write to closed stream")
    assert isinstance(chunk, str)  # NOT unicode
    assert chunk, 'got chunk of length 0'

    log.trace('write() %d bytes', len(chunk))

    # Python 2.5+ all optimize this so doing it in a loop isn't quadratic.
    # It's easier to append to this buffer that to manage a list of chunks.
    self.buf += chunk
    all_written = self._try_write()
    if not all_written:
      if not self.waiting:  # can't register a fd twice!
        self.resume()
      self.needs_drain = True
    return all_written

  def close(self):
    #if self.closed:
    #  raise RuntimeError("Stream already closed")
    # If we wrote everything, then close it right here.  Otherwise, mark it as
    # closed and make sure we'll eventually call.
    if self.waiting:
      self.closed = True
    else:
      fd = self.fileno()
      log.trace('AbstractWritable close() %d', fd)
      os.close(fd)


class Stdout(AbstractWritable):
  """This process's stdout."""

  def fileno(self):
    return sys.stdout.fileno()


class WriteableStream(AbstractWritable):
  """Wraps a Python file-like object.  Should be a pipe or a socket.
  """
  def __init__(self, f, event_loop=None):
    self.f = f
    AbstractWritable.__init__(self, event_loop=event_loop)

  def fileno(self):
    return self.f.fileno()


class DiskFileStream(AbstractReadable):
  """
  You could turn a file into a pipe with a self pipe... read the file in another
  thread and pipe it

  Or you could use a Queue().Queue()

  How many disk threads should you start?  reading threads and writing threads?
  I think 2 would make sense... then there can be 2 outstanding reads.

  HAHA -- should I just fork 'cat'?  I think that makes sense.  No gil.  Run on
  other processes.  Any security vulnerability?
  I could create a pool of cat processes.
  """
