#!/usr/bin/env python
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

"""FUSE filesystem for App Engine.

Usage:
  aefs.py host:9999 path/to/mountpoint
"""

__author__ = 'bslatkin@gmail.com (Brett Slatkin)'

import logging
logging.basicConfig(
    level=logging.INFO, format='AEFS %(levelname)-8s] %(message)s')

import cStringIO
import errno
import hashlib
import httplib
import fuse
import os
import simplejson
import socket
import stat
import sys
import time
import urlparse

# TODO:
# - Add option for separate shared keys for reading and writing
# - Add option for using Google account login for read/write
# - Cache reads/writes to temporary disk
# - Consider having a second HTTP connection always open for uploads
# - Support HTTPS for read and write

################################################################################

DEBUG = False

if DEBUG:
  logging.getLogger().setLevel(logging.DEBUG)

# Cache file attributes locally.
ENABLE_ATTR_CACHE = True

# How many times to retry opening a connection before giving up.
MAX_CONNECTION_RETRIES = 3

# How long to wait for a connection before giving up on it, in seconds.
CONNECTION_TIMEOUT = 5

################################################################################

class LRUCache(object):
  """Least-recently used cache."""

  def __init__(self):
    """Initializer."""
    self.cache = {}

  def get(self, key):
    """Retrieves an item from the cache, None if missing."""
    return self.cache.get(key)

  def insert(self, key, value):
    """Adds an item to the cache."""
    # TODO: Make this actually an LRU
    self.cache[key] = value

  def purge(self, key):
    """Purges an item from the cache; does nothing if the key is not present."""
    if key in self.cache:
      del self.cache[key]


class AttrCache(object):
  """Caches Inode attributes."""

  def __init__(self, getattr_inode):
    """Initializer.

    Args:
      getattr_inode: Callable that takes an Inode number or path and returns an
        attribute dictionary; should raise an OSError if the Inode is unknown
        or unreachable.
    """
    self.attr_cache = LRUCache()
    self.path_cache = LRUCache()
    self.getattr_inode = getattr_inode

  def access(self, path_or_number, mode):
    """Determines if the inode or path is accessible with the given mode."""
    attr = self.getattr(path_or_number)
    if mode == os.F_OK:
      return
    mode_bits = attr['st_mode']
    if (mode_bits & mode or
        mode_bits & (mode << 3) or
        mode_bits & (mode << 6)):
      return
    raise OSError(errno.EACCES, 'Permission denied')

  def getattr(self, path_or_number):
    """Get attributes of an Inode or path."""
    # TODO: Cache paths that are missing.
    path = None
    number = None
    if isinstance(path_or_number, basestring):
      path = path_or_number
      number = self.path_cache.get(path)
    else:
      number = path_or_number

    attr = self.attr_cache.get(number)
    if attr is None:
      attr = self.getattr_inode(number or path) 
      assert attr is not None
      self.insert(path, attr)
    logging.debug('Cached access for %s: %r', path_or_number, attr)
    return attr

  def insert(self, path, attr):
    """Inserts an inode and path's attributes into the cache."""
    if not ENABLE_ATTR_CACHE:
      return
    number = attr['st_ino']
    if path is not None:
      self.path_cache.insert(path, number)
    self.attr_cache.insert(number, attr)

  def purge(self, path_or_number):
    """Purges an Inode from the cache."""
    if isinstance(path_or_number, basestring):
      number = self.path_cache.get(path_or_number)
      if number is None:
        # Unknown path being purged.
        logging.debug('Purge request for unknown path=%s', path_or_number)
      else:
        self.path_cache.purge(path_or_number)
        logging.debug('Purged %s from attribute cache', path_or_number)
    else:
      number = path_or_number
    self.attr_cache.purge(number)


class Inode(object):
  """An open Inode whose data has been cached locally."""

  def __init__(self, number, read_inode, write_inode):
    """Initializer.

    Args:
      number: This Inode's unique number.
      read_inode: Callable that takes an Inode number and returns its entire
        content as a string.
      write_inode: Callable that takes an Inode number and a data string and
        writes it to the server as the entire contents of that Inode.
    """
    self.number = number
    self.read_inode = read_inode
    self.write_inode = write_inode
    self.buffer = cStringIO.StringIO()
    self.dirty = False
    self.was_read = False  # If the cache has been populated yet.

  def flush(self):
    """Flushes this Inode to the server if needed. Returns True when writing."""
    if self.was_read and self.dirty:
      self.write_inode(self.number, self.buffer.getvalue())
      self.dirty = False

  def length(self):
    """Returns the current length of the buffer or None if not known."""
    if not self.was_read:
      return None
    buffer.seek(0, 2)
    return buffer.tell()

  def read(self, size, offset):
    """Reads data from the Inode."""
    if not self.was_read:
      self.buffer.write(self.read_inode(self.number))
      self.was_read = True
    self.buffer.seek(offset)
    return self.buffer.read(size)

  def truncate(self, size):
    """Truncates the data to the given size."""
    if size == 0:
      # Speed-up zero-truncate case, where we don't need to read the contents.
      self.was_read = True
    elif not self.was_read:
      self.read(0, 0)
    self.buffer.truncate(size)

  def write(self, data, offset):
    """Writes data into the cache."""
    if not self.was_read:
      self.read(0, 0)
    self.buffer.seek(offset)
    self.buffer.write(data)
    self.dirty = True
    return len(data)


class FdCache(object):
  """A mapping of file descriptors to cached Inodes."""

  def __init__(self):
    """Initializer."""
    self.fd_number_map = {}  # Maps FD numbers to Inode numbers
    self.inode_cache = {}  # Maps Inode numbers to Inode cache instances
    self.inode_ref_count = {}  # Maps Inode numbers to their reference count

  def get(self, fd):
    """Returns an Inode cache instance by FD number, None if missing."""
    number = self.fd_number_map.get(fd)
    if not number:
      return None
    inode = self.inode_cache.get(number)
    if not inode:
      return None
    return inode

  def get_inode(self, number):
    """Returns an Inode cache instance directly by number, None if missing."""
    return self.inode_cache.get(number)

  def insert(self, inode):
    """Inserts a new Inode cache instance."""
    logging.info('Inserting inode=%d into FdCache', inode.number)
    self.inode_cache[inode.number] = inode

  def map(self, fd, number):
    """Maps an FD number to Inode number.

    Returns:
      True if the Inode is present, False if it needs to be populated.
    """
    self.fd_number_map[fd] = number
    self.inode_ref_count[number] = self.inode_ref_count.get(number, 0) + 1
    return number in self.inode_cache

  def unmap(self, fd):
    """Unmaps an FD number to Inode number."""
    number = self.fd_number_map[fd]
    ref_count = self.inode_ref_count[number]
    ref_count -= 1
    if ref_count == 0:
      logging.info('Evicting inode=%d from FdCache', number)
      del self.inode_ref_count[number]
      if number in self.inode_cache:
        # The cached Inode may not be present if the insert never happened.
        del self.inode_cache[number]
    else:
      self.inode_ref_count[number] = ref_count


def purge_cache(method, num_args=1):
  """Decorator purges an entry in the AttrCache after the wrapped call.

  Args:
    method: The name of the method to call, or a callable function to execute
      before the purge occurs.
    num_args: Number of positional arguments that contain path names that need
      to be purged after this operation completes.
  """
  def wrapped(self, *args, **kwargs):
    try:
      if callable(method):
        return method(self, *args, **kwargs)
      else:
        return self._dispatch_rpc(method, *args, **kwargs)
    finally:
      # Purge the files and their parent directories from the cache.
      all_paths = set(args[:num_args])
      for path in list(all_paths):
        dir_path, base_path = os.path.split(path)
        all_paths.add(dir_path)
      for path in all_paths:
        self.attr_cache.purge(path)
  return wrapped


class AeFs(object):
  """App Engine file system."""

  def __init__(self, host_port):
    """Initializer.

    Args:
      host_port: String in the form 'example.com:1234'. Port is optional.
    """
    self.host_port = host_port
    self.base_path = '/fuse/'
    self.conn = None
    self.fd_next = 0  # The next FD number to hand out
    self.fd_cache = FdCache()
    self.attr_cache = AttrCache(lambda n: self._dispatch_rpc('getattr', n))

  def __call__(self, op, *args):
    """Dispatches operation hooks to this class's methods."""
    method = getattr(self, op, None)
    try:
      if not method:
        raise OSError(EFAULT, 'Could not find method "%s"' % op)
      return method(*args)
    except AssertionError:
      logging.exception('Failed assertion in %s(%s)', op, args)
      raise
    except OSError, e:
      if e.errno == errno.ENOSYS:
        logging.debug('Not implemented: %s(*%s)', op, args)
      elif e.errno in (errno.ENOENT, errno.ENODATA, errno.EEXIST):
        # The allowed errnos here should be expected by the caller as a
        # sign of success (e.g., "the file does not exist").
        logging.debug('Not found: %s(*%s)', op, args)
      else:
        logging.exception('Unexpected error')
      raise

  def __getattr__(self, name):
    """Default handler will dispatch to the server and return its value."""
    def closure(*args, **kwargs):
      return self._dispatch_rpc(name, *args, **kwargs)
    return closure

  def _dispatch_internal(self, verb, path, payload=None, headers={}):
    """Dispatches a request to the server.

    Args:
      verb: String of HTTP verb to use.
      path: Path on server to use.
      payload: If not None, the body to use for the request.
      headers: Dictionary of extra headers to send.

    Returns:
      (status_code, data) where status_code is an integer and data is a string.
    """
    for i in xrange(MAX_CONNECTION_RETRIES):
      try:
        if not self.conn:
          logging.debug('Opening connection to %s', self.host_port)
          self.conn = httplib.HTTPConnection(self.host_port)
      except (IOError, OSError, socket.error, httplib.HTTPException), e:
        logging.exception('Could not open server connection')
        raise OSError(errno.EFAULT, str(e))

      try:
        self.conn.request(verb, path, payload, headers)
        resp = self.conn.getresponse()
        status_code = resp.status
        data = resp.read()
      except (httplib.ImproperConnectionState, httplib.NotConnected), e:
        logging.debug('Connection dropped. %s: %s', e.__class__.__name__, e)
        self.conn = None
        continue  # Connection retry loop
      except httplib.HTTPException, e:
        logging.exception('Encountered HTTP exception for %s %s',
                          verb, path)
        raise OSError(errno.EFAULT, str(e))

      logging.debug('Received status_code = %s, headers = %r, data = %r',
                    status_code, resp.getheaders(), data)
      return status_code, data
    else:  # End of the for loop
      raise OSError(errno.EFAULT, 'Too many connection retries')

  def _dispatch_rpc(self, method, *args, **kwargs):
    """Dispatches an RPC to the server side with the given arguments.

    Args:
      method: Method name to invoke.
      *args: Positional arguments.

    Returns:
      Scalar, list, or dictionary, depending on the method.

    Raises:
      OSError with an errno set based on any server-side error messages.
    """
    uid, gid, unused_pid = fuse.fuse_get_context()
    payload = simplejson.dumps({
      'args': args,
      'kwargs': kwargs,
      'uid': uid,
      'gid': gid,
    })
    logging.debug('Sending %s(*%s, **%s)', method, args, kwargs)
    status_code, data = self._dispatch_internal(
        'POST', self.base_path + method, payload=payload)
    if status_code != 200:
      raise OSError(errno.EFAULT, 'Bad response status: %s' % status_code)

    result_dict = simplejson.loads(data)
    if not isinstance(result_dict, dict):
      raise OSError(errno.EFAULT, 'Server returned non-dictionary response')

    error_tuple = result_dict.get('error')
    result_value = result_dict.get('result')
    if not ('result' in result_dict or error_tuple):
      raise OSError(errno.EFAULT, 'Server response malformed')

    if not DEBUG:
      logging.info('%s(*%s, **%s) -> %s', method, args, kwargs,
                   error_tuple or result_value)

    if error_tuple:
      if (not isinstance(error_tuple, list) or
          len(error_tuple) != 2 or
          not isinstance(error_tuple[0], (int, long)) or
          not isinstance(error_tuple[1], basestring)):
        raise OSError(errno.EFAULT, 'Malformed error returned by server')
      raise OSError(*error_tuple)

    return result_value

  def _read_inode(self, number):
    """Reads an Inode number, returning its entire contents as a string."""
    attr = self.attr_cache.getattr(number)
    if attr['st_size'] == 0:
      # Don't bother downloading zero-sized entries, which happens a lot
      # after mknod() calls initially create a file.
      logging.info('Skipping fetch of zero-sized inode=%d', number)
      return ''

    logging.info('Fetching content for inode=%d', number)
    status_code, data = self._dispatch_internal(
      'GET', '%sread?inode=%d' % (self.base_path, number))
    if status_code == 404:
      raise OSError(errno.ENOENT, 'Inode %d returned status %d' %
                    (number, status_code))
    elif status_code != 200:
      raise OSError(errno.EFAULT, 'Inode %d returned error status %d' %
                    (number, status_code))
    return data

  def _write_inode(self, number, data):
    """Writes the data of an entire Inode from a string."""
    upload_url = self._dispatch_rpc('get_blob_uploadurl')
    parts = urlparse.urlsplit(upload_url)
    relative_path = parts.path
    host_port = parts.netloc

    boundary = hashlib.sha1(str(time.time())).hexdigest()
    headers = {
      'Content-Type': 'multipart/form-data; boundary="%s"' % boundary,
    }
    payload = UPLOAD_PAYLOAD % {
      'boundary': boundary,
      'base_path': 'unknown',
      'inode': number,
      'file_data': data,
    }

    # Clear the connection so we can override the host/port
    self.conn = None
    old_host_port = self.host_port
    self.host_port = host_port
    logging.info('Uploading %d bytes for inode=%d', len(data), number)
    try:
      status_code, data = self._dispatch_internal(
          'POST', relative_path, payload=payload, headers=headers)
    finally:
      self.conn = None
      self.host_port = old_host_port

    # Always purge the stat cache after writes!
    self.attr_cache.purge(number)

    if status_code != 303:
      # TODO: Hack for now 303 means success, 302 means error.
      raise OSError(errno.EFAULT, 'Bad status on write: %d' % status_code)

  ### Fuse plug-in life-cycle
  def init(self, path):
    """Set up the root path for this mount."""
    socket.setdefaulttimeout(CONNECTION_TIMEOUT)
    try:
      stat_dict = self._dispatch_rpc('getattr', '/')
    except OSError, e:
      if e.errno == errno.ENOENT:
        self._dispatch_rpc('mkdir', '/', 0777)
    else:
      if not stat.S_ISDIR(stat_dict['st_mode']):
        raise OSError(errno.ENOTDIR, 'Not a directory: %s' % path)
    logging.info('Mounted %s on %s', sys.argv[1], sys.argv[2])

  def destroy(self, path):
    """Destroys this module when the filesystem is umounted."""
    if self.conn:
      self.conn.close()

  ### FUSE functions that are unused or unimplemented.
  def not_implemented(self, *args):
    """Method is not implemented."""
    raise OSError(errno.ENOSYS, 'Not implemented')

  lock = not_implemented

  def releasedir(self, path, fd):
    """Release a directory that was opened before."""
    pass

  ### FUSE functions that do pass through but update local caches.
  chmod = purge_cache('chmod')
  chown = purge_cache('chown')
  link = purge_cache('link', num_args=2)
  mkdir = purge_cache('mkdir')
  mknod = purge_cache('mknod')
  rmdir = purge_cache('rmdir')
  rename = purge_cache('rename', num_args=2)
  symlink = purge_cache('symlink', num_args=2)
  unlink = purge_cache('unlink')
  utimens = purge_cache('utimens')

  ### FUSE functions that do not use default behavior.
  def access(self, path, mode):
    """Checks access restrictions on a path."""
    self.attr_cache.access(path, mode)

  def create(self, path, mode):
    """Creates a file for writing, potentially overwriting it."""
    return self.open(path, os.O_CREAT | os.O_TRUNC, mode=mode)

  def fgetattr(self, path, fd):
    """Get an attribute for an open file."""
    inode = self.fd_cache.get(fd)
    assert inode
    return self.attr_cache.getattr(inode.number)

  def flush(self, path, fd):
    """Flushes a file handle."""
    inode = self.fd_cache.get(fd)
    if not inode:
      return
    else:
      inode.flush()

  def fsync(self, path, datasync, fd):
    """Flushes a file handle to disk (in this FS it's equivalent to flush)."""
    self.flush(path, fd)

  def ftruncate(self, path, length, fd):
    """Truncate an open file."""
    inode = self.fd_cache.get(fd)
    assert inode
    inode.truncate(length)

  def getattr(self, path, unused=None):
    """Gets a path or Inode's attributes."""
    return self.attr_cache.getattr(path)

  @purge_cache
  def open(self, *args, **kwargs):
    """Open a new file descriptor."""
    number = self._dispatch_rpc('open_path', *args, **kwargs)
    fd = self.fd_next
    self.fd_next += 1
    if not self.fd_cache.map(fd, number):
      inode = Inode(number, self._read_inode, self._write_inode)
      self.fd_cache.insert(inode)
    return fd

  def release(self, path, fd):
    """Release a file-handle that was opened before."""
    self.fd_cache.unmap(fd)

  def read(self, path, size, offset, fd):
    """Read bytes from a file descriptor."""
    inode = self.fd_cache.get(fd)
    assert inode
    logging.info('read(%r, %d, %d, inode=%d)', path, size, offset, inode.number)
    return inode.read(size, offset)

  def readdir(self, path, *args):
    """Reads the contents of a directory."""
    results = self._dispatch_rpc('readdir', path, *args)
    for name, stat_dict, offset in results:
      self.attr_cache.insert(os.path.join(path, name), stat_dict)
    return results

  @purge_cache
  def truncate(self, path, length, fd=None):
    """Truncates a path on disk."""
    logging.info('truncate(%r, %d)', path, length)
    number = self._dispatch_rpc('open_path', path, 0)
    inode = self.fd_cache.get_inode(number)
    if inode is None and length == 0:
      # Optimized case; can truncate server-side.
      self._dispatch_rpc('truncate', path, length)
    else:
      fd = self.open(path, 0)
      try:
        self.ftruncate(None, length, fd)
        self.flush(None, fd)
      finally:
        self.release(None, fd)

  @purge_cache
  def write(self, path, data, offset, fd):
    """Write bytes to an open file descriptor."""
    inode = self.fd_cache.get(fd)
    assert inode

    # Depending on the default write block size on your system this call
    # can get _very_ chatty.
    logging_func = logging.debug
    if offset == 0:
      logging_func = logging.info
    logging_func('write(%r, %d, offset=%d, inode=%d)',
                 path, len(data), offset, inode.number)
    return inode.write(data, offset)

################################################################################
# Constant templates

"""Content-Type: multipart/form-data; boundary="%(boundary)s"
Content-Length: %(content_length)s
"""

UPLOAD_PAYLOAD = \
"""--%(boundary)s\r
Content-Disposition: form-data; name="inode"\r
\r
%(inode)s\r
--%(boundary)s\r
Content-Disposition: form-data; name="file"; filename="%(base_path)s"\r
Content-Type: application/octet-stream\r
\r
%(file_data)s\r
--%(boundary)s--"""

################################################################################

def main(argv):
  # TODO: Better parameter sanitizing.
  command = fuse.FUSE(AeFs(argv[1]), argv[2], foreground=True, nothreads=True)


if __name__ == '__main__':
  main(sys.argv)
