#!/usr/bin/env python
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

"""FUSE filesystem backend on App Engine.

                 +---------+
                 |  Inode  |
                 +---------+
                     /'\
                      |
                      |
           +----------+------------+
           |                       |
    +------+-------+      +--------+-------+
    | DeletedInode |      | AttributeEntry |
    +--------------+      +----------------+

Each file blob has a corresponding Inode. The Inode has N associated base paths
("foo.txt") and dir paths ("/bar/meep/") to indicate where the file is hard
linked. This gives a simple query to find a file by name (WHERE dir_path = ...
AND base_path = ...) and makes it easy to recursively query for existence of
files beneath a path (WHERE dir_path >= ...). Inode numbers are auto-assigned
by the Datastore.

When blobs are overwritten, the old blob reference is preserved transactionally
in a DeletedInode entry as a child of the Inode. These DeletedInode entities
are garbage collected later, and could possibly be restored. A special
DeletedInode child entity (with the name "deleted") acts as a tombstone
for Inodes that have had all path mounts removed but still may be accessible
by Inode number; this is necessary for Inode resurrection cases, where a file
is opened, then unlinked, then relinked to a new location.

Each Inode has a single set of permissions, owner, and group. Inodes have one or
more child AttributeEntry for each extended attribute associated with them.
Attributes are updated transactionally under the Inode and are shared across all
hard links.
"""

__author__ = 'bslatkin@gmail.com (Brett Slatkin)'

import cgi
import datetime
import errno
import itertools
import logging
import os
import simplejson
import stat
import time

from google.appengine.ext import blobstore
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util

################################################################################
# TODOs
# - P1: Properly translate errno and open flags across mac/linux constants
# - P1: Add datastore error handling
# - P1: Disallow chmod/chown/writes/reads based on uid/gid
# - P2: Add support for different mountpoints and namespaces (for shared mounts)
# - P2: Support file locking
# - P3: Add option for random-access block-based blobs using an extended attribute
# - P4: Allow for extended attributes more than 1MB in size.

################################################################################
# Config

DEBUG = False

# Maximum length of a path in characters.
MAX_PATH_LENGTH = 500

# Number of entries in a directory to return at a time when listing contents.
DIRECTORY_CHUNK_SIZE = 200

# How many DeletedInode instances to garbage collect at a time.
GARBAGE_COLLECTION_CHUNK_SIZE = 100

# How old DeletedInode instances should be before being cleaned up.
GARBAGE_COLLECTION_AGE_SECONDS = 1

################################################################################
# Helpers

def make_stamp(date_time):
  """Converts a datetime.datetime to a UNIX timestamp."""
  return long(time.mktime(date_time.timetuple()))

################################################################################
# Models

class Inode(db.Model):
  """Represents an Inode on disk.

  ID of the entity is auto-assigned for uniqueness.
  """

  # These list properties together make a series of tuples, each corresponding
  # to hardlinks of the file. dir_path will be the path the file is present in,
  # and can be used for directory listings. base_path is the file's name
  # relative to the dir_path.
  dir_path = db.StringListProperty()
  base_path = db.StringListProperty()

  # UNIX stat bits for the file
  mode = db.IntegerProperty(required=True, indexed=False)

  # UID and GID owners of the file.
  uid = db.IntegerProperty(required=True, indexed=False)
  gid = db.IntegerProperty(required=True, indexed=False)

  # Last modified time.
  mtime = db.DateTimeProperty(required=True, indexed=False)

  # Link/unlink/Inode change times.
  ctime = db.DateTimeProperty(auto_now=True, indexed=False)

  # How big the file is, including all blocks.
  size = db.IntegerProperty(default=0, indexed=False)

  # BlobKeys that comprise the file. For now this is just one. Keep these
  # properties indexed so we can do proper garbage collection.
  blobs = db.ListProperty(blobstore.BlobKey)

  # If this Inode is a symlink, the value it points to (not necessarily
  # a path, since apps often put stupid stuff in symlinks).
  target = db.StringProperty(indexed=False)

  def number(self):
    """Returns the Inode number of this inode."""
    return self.key().id()

  def find_link_index(self, path):
    """Finds the Inode tuple index of the given link path, -1 if missing."""
    dir_path, base_path = os.path.split(path)
    for i in xrange(len(self.dir_path)):
      if self.dir_path[i] == dir_path and self.base_path[i] == base_path:
        return i
    return -1

  def stat(self):
    """Returns a dictionary of 'stat' attributes for this Inode."""
    return {
      'st_mode': self.mode,
      'st_ino': self.number(),
      'st_dev': 0,
      'st_nlink': len(self.dir_path),
      'st_uid': self.uid,
      'st_gid': self.gid,
      'st_size': self.size,
      'st_atime': make_stamp(self.mtime),
      'st_mtime': make_stamp(self.mtime),
      'st_ctime': make_stamp(self.ctime),
    }


class DeletedInode(db.Model):
  """Marker indicating an inode that has been deleted.

  In the same entity group as the Inode for transactionality. Has the key name
  of KEY_NAME if deleted by unlinking. Otherwise, may have an ID in the case
  the blob was truncated one or more times.
  """

  # Reserved key-name for when an Inode has been deleted by having all hard
  # links removed.
  KEY_NAME = 'deleted'

  # Last path the file had before deletion. Assumes that deletion means
  # hardlinks count goes to zero.
  path = db.StringProperty()

  # BlobKeys that comprise the file that should be deleted.
  blobs = db.ListProperty(blobstore.BlobKey)

  # Set to the time when the file was deleted. Used to ensure that garbage
  # collection only happens for files beyond a certain age.
  delete_time = db.DateTimeProperty(auto_now_add=True)


class AttributeEntry(db.Model):
  """Extended attribute of an Inode.

  Key name is the name of the attribute. Will always be a child of the
  Inode model that has the attribute.
  """

  # Flags from <sys/xattr.h>
  XATTR_NOFOLLOW = 0x0001
  XATTR_CREATE = 0x0002
  XATTR_REPLACE = 0x0004

  # The data stored in the attribute.
  value = db.TextProperty()

################################################################################
# Base operation functionality

class VfsOp(object):
  """Base class and helpers for VFS operations."""

  def __init__(self, uid, gid):
    """Initializer.

    Args:
      uid: UID this operation should run as.
      gid: GID this operation should run as.
    """
    self.args = ()
    self.kwargs = {}
    self.uid = uid
    self.gid = gid

  def name(self):
    """Returns the name of this operation."""
    return self.__class__.__name__

  def __call__(self, *args, **kwargs):
    """Run the method, potentially adding debug tracing, etc."""
    self.args = args
    self.kwargs = kwargs
    logging.debug('%s(*args=%r, **kwargs=%r) Entering',
                  self.name(), self.args, self.kwargs)
    try:
      try:
        return self.run(*args, **kwargs)
      except AssertionError:
        logging.exception('%s(*args=%r, **kwargs=%r) Assertion failed',
                          self.name(), self.args, self.kwargs)
        self.invalid_request()
      except OSError, e:
        logging.debug('Encountered OSError(%s)', e.args)
        raise
      except:
        logging.exception('%s(*args=%r, **kwargs=%r) Error',
                          self.name(), self.args, self.kwargs)
        raise
    finally:
      logging.debug('%s(*args=%r, **kwargs=%r) Exiting',
                    self.name(), self.args, self.kwargs)

  def assert_path_missing(self, path):
    """Asserts that a path does not exist, raising an EEXIST if it does."""
    try:
      old_inode = self.get_inode_or_die(path)
    except OSError, e:
      if e.errno != errno.ENOENT:
        raise
    else:
      raise OSError(errno.EEXIST, 'File exists: %s' % path)

  def delegate(self, op_class, *args, **kwargs):
    """Delegates one VfsOp to another."""
    op = op_class(self.uid, self.gid)
    return op(*args, **kwargs)

  def get_inode_or_die(self, path_or_number, allow_deleted=False):
    """Retrieves an Inode by path or number, without any access restriction.

    Raises ENOENT if the file is not present.
    """
    original_path_or_number = path_or_number
    was_inode_lookup = isinstance(path_or_number, (int, long))
    if was_inode_lookup:
      inode = Inode.get_by_id(path_or_number)
    elif len(path_or_number) > MAX_PATH_LENGTH:
      raise OSError(errno.ENAMETOOLONG,
                    'Max path length is %d' % MAX_PATH_LENGTH)

    logging.debug('get_inode_or_die(%s) Entering', path_or_number)
    try:
      logging.debug('Resolving Inode(%s)', path_or_number)

      # Fetch the target Inode if not already found by its number previously.
      if not was_inode_lookup:
        directory, base = os.path.split(path_or_number)
        query = (
            Inode.all()
            .filter('dir_path =', directory)
            .filter('base_path =', base))

        # Note: We could find a matching file that has the same base_name in
        # another directory and still match this query. We need to actually grab
        # N inodes here and then look to see which one has the matching tuple in
        # its list properties.
        inode = None
        for result in query:
          for found_dir, found_base in zip(result.dir_path, result.base_path):
            if found_dir == directory and found_base == base:
              inode = result
              break

      if not inode:
        logging.debug('get_inode_or_die(%s) Inode not found', path_or_number)
        raise OSError(errno.ENOENT, 'Matching Inode not found')
      elif not allow_deleted:
        # Look for deletion records, which override existence.
        deleted = DeletedInode.get_by_key_name(
            DeletedInode.KEY_NAME, parent=inode.key())
        if deleted:
          logging.debug('get_inode_or_die(%s) Inode = %s; DeletedInode = %s',
                        path_or_number, inode, deleted)
          raise OSError(
              errno.ENOENT, 'Inode %d already deleted' % inode.number())

      logging.debug('get_inode_or_die(%s) Inode = %s',
                    path_or_number, inode)
      return inode
    finally:
      logging.debug('get_inode_or_die(%s) Exiting', path_or_number)

  def invalid_request(self):
    """Request arguments were invalid."""
    raise OSError(
        errno.EINVAL,
        'Arguments invalid: %s(*args=%r, **kwargs=%r)' %
            (self.name(), self.args, self.kwargs))

  def update_link(self, path, new_path=None, delete_old=True):
    """Updates a path link for an inode.

    When called with no arguments, this is unlink().
    When called with a new path, this is rename().
    When called with a new path and delete_old=False, this is link().

    Args:
      path: The current path of the file; must exist.
      new_path: The new path to add for the file. May be none if being unlinked.
      delete_old: The old 'path' argument should be removed from the Inode,
        potentially garbage collecting the file.
    """
    original_inode = self.get_inode_or_die(path)
    number = original_inode.number()

    def txn():
      inode = Inode.get_by_id(number)
      if not inode:
        raise OSError(errno.ENOENT, 'Inode does not exist: %d' % number)

      # How many hardlinks we had at the beginning
      original_hard_links = len(inode.dir_path)

      # Unlink the old file.
      if delete_old:
        old_index = inode.find_link_index(path)
        if old_index == -1:
          raise OSError(
              errno.ENOENT, 'Path unlinked during transaction: %s' % path)
        else:
          del inode.dir_path[old_index]
          del inode.base_path[old_index]

      # Add a new link.
      if new_path:
        new_dir_path, new_base_path = os.path.split(new_path)
        # TODO: Add an assertion for de-duping on the server side, even though
        # FUSE should de-dupe links client-side.
        inode.dir_path.append(new_dir_path)
        inode.base_path.append(new_base_path)

      if not inode.dir_path:
        # Last unlink of the file, mark it for future cleanup.
        deleted_inode = DeletedInode(
            parent=inode.key(),
            key_name=DeletedInode.KEY_NAME,
            path=path,
            blobs=inode.blobs)
        deleted_inode.put()
      elif original_hard_links == 0:
        # File has been resurrected, so delete any garbage collection markers.
        # Blind delete will not report error status on failure.
        db.delete(db.Key.from_path(
            DeletedInode.kind(), DeletedInode.KEY_NAME,
            parent=inode.key()))

      inode.mtime = datetime.datetime.now()
      inode.put()

    db.run_in_transaction(txn)

  def run(self, *args, **kwargs):
    raise NotImplementedError(
        '%s.run was not implemented' % self.__class__.__name__)

################################################################################
# VFS operations

class chmod(VfsOp):
  """Change permission bits for a file."""

  def run(self, path, mode):
    original_inode = self.get_inode_or_die(path)
    number = original_inode.number()

    def txn():
      inode = Inode.get_by_id(number)
      if not inode:
        raise OSError(errno.ENOENT, 'Inode does not exist: %d' % number)
      inode.mode &= ~07777  # Clear permission bits.
      inode.mode |= (07777 & mode)  # Add permission bits in allowed range.
      inode.mtime = datetime.datetime.now()
      inode.put()
    db.run_in_transaction(txn)


class chown(VfsOp):
  """Change user and group owner of a file."""

  def run(self, path, uid, gid):
    original_inode = self.get_inode_or_die(path)
    number = original_inode.number()

    def txn():
      inode = Inode.get_by_id(number)
      if not inode:
        raise OSError(errno.ENOENT, 'Inode does not exist: %d' % number)
      inode.uid = uid
      inode.gid = gid
      inode.mtime = datetime.datetime.now()
      inode.put()
    db.run_in_transaction(txn)


class getattr(VfsOp):
  """Returns the attribute bits of a path."""

  def run(self, path_or_number):
    inode = self.get_inode_or_die(path_or_number)
    return inode.stat()


class getxattr(VfsOp):
  """Retrieves the value of an extended attribute with the given key."""

  def run(self, path, key, position=0):
    inode = self.get_inode_or_die(path)
    attr = AttributeEntry.get_by_key_name(key, parent=inode.key())
    if not attr:
      raise OSError(
          errno.ENODATA, 'Attribute "%s" does not exist for path %s' %
          (key, path))
    return attr.value


class get_blobkey(VfsOp):
  """Finds a BlobKey header given an Inode."""

  def run(self, number):
    inode = self.get_inode_or_die(number)
    if inode.size == 0:
      return None
    else:
      assert len(inode.blobs) == 1
      return inode.blobs[0]


class get_blob_uploadurl(VfsOp):
  """Returns a URL to use for uploading a blob and write to an Inode."""

  def run(self):
    return blobstore.create_upload_url('/fuse/write')


class link(VfsOp):
  """Adds a hardlink for a file at the given path."""

  def run(self, path, target):
    self.assert_path_missing(path)
    self.update_link(target, path, delete_old=False)


class listxattr(VfsOp):
  """Lists all extended attributes associated with a path."""

  def run(self, path):
    inode = self.get_inode_or_die(path)
    key_iter = AttributeEntry.all(keys_only=True).ancestor(inode)
    return [key.name() for key in key_iter]


class mkdir(VfsOp):
  """Makes a new directory at the given path."""

  def run(self, path, mode):
    if len(path) > MAX_PATH_LENGTH:
      raise OSError(errno.ENAMETOOLONG,
                    'Max path length is %d' % MAX_PATH_LENGTH)
    self.assert_path_missing(path)
    dir_path, base_path = os.path.split(path)
    mode |= stat.S_IFDIR
    inode = Inode(dir_path=[dir_path],
                  base_path=[base_path],
                  mode=mode,
                  mtime=datetime.datetime.now(),
                  uid=self.uid,
                  gid=self.gid)
    inode.put()


class mknod(VfsOp):
  """Creates a new inode that's unlinked."""

  def run(self, path, mode, unused_device=None):
    return self.delegate(open_path, path, os.O_CREAT | os.O_EXCL, mode)


class opendir(VfsOp):
  """Retrieve an Inode number for a directory."""

  def run(self, path):
    inode = self.get_inode_or_die(path)
    return inode.number()


class open_path(VfsOp):
  """Opens a path and determines its inode number."""

  def run(self, path, flags, mode=0):
    if len(path) > MAX_PATH_LENGTH:
      raise OSError(errno.ENAMETOOLONG,
                    'Max path length is %d' % MAX_PATH_LENGTH)
    try:
      inode = self.get_inode_or_die(path)
      if flags & os.O_EXCL and flags & os.O_CREAT:
        raise OSError(errno.EEXIST, 'Path %s already exists' % path)
    except OSError, e:
      if flags & os.O_CREAT and e.errno == errno.ENOENT:
        dir_path, base_path = os.path.split(path)
        mode |= stat.S_IFREG
        inode = Inode(
            dir_path=[dir_path],
            base_path=[base_path],
            mode=mode,
            mtime=datetime.datetime.now(),
            uid=self.uid,
            gid=self.gid)
        inode.put()
      else:
        raise

    # If zero-length truncation should happen, do it now.
    if flags & os.O_TRUNC:
      self.delegate(truncate, path, 0)

    return inode.number()


class readdir(VfsOp):
  """Reads a directory, starting after an optional offset key prefix."""

  def run(self, path, unused_offset):
    results = []  # (path, stat_dict, 0) tuples

    # Include the current and parent Inode entries in the results.
    directory_inode = self.get_inode_or_die(path)
    results.append(('.', directory_inode.stat(), 0))
    if path == '/':
      # Root nodes have '..' entries pointing to themselves.
      results.append(('..', directory_inode.stat(), 0))
    else:
      parent, current = os.path.split(path)
      parent_inode = self.get_inode_or_die(parent)
      results.append(('..', parent_inode.stat(), 0))

    last_inode = None
    while True:
      query = Inode.all().filter('dir_path =', path)
      if last_inode:
        query.filter('__key__ >', db.Key.from_path(Inode.kind(), last_inode))
      query.order('__key__')
      inode_list = query.fetch(DIRECTORY_CHUNK_SIZE)
      if inode_list:
        last_inode = inode_list[-1].number()
      else:
        break

      # Inodes may be mounted in multiple places, so traverse its list
      # of link points to find the actual base-names that match.
      for inode in inode_list:
        for match_dir, match_base in itertools.izip(
            inode.dir_path, inode.base_path):
          if match_dir == path and match_base:
            results.append((match_base, inode.stat(), 0))
            # Don't break; there may be multiple matches for one Inode.

    return results


class readlink(VfsOp):
  """Returns the target path of the symlink at the path."""

  def run(self, path):
    inode = self.get_inode_or_die(path)
    assert stat.S_ISLNK(inode.mode)
    return inode.target


class rmdir(VfsOp):
  """Deletes a directory if it's already empty."""

  def run(self, path):
    adjusted_path = unicode(path)
    if not path.endswith('/'):
      adjusted_path += u'/'

    inode = self.get_inode_or_die(path)
    if not stat.S_ISDIR(inode.mode):
      raise OSError(errno.ENOTDIR, 'Not a directory: %s' % path)

    # Check for immediate children.
    children = (
      Inode.all()
      .filter('dir_path =', path)
      .fetch(2))
    empty = len(children) == 0

    # Check for children in subdirectories.
    if not empty:
      subdir_children = (
          Inode.all()
          .filter('dir_path >', adjusted_path)
          .filter('dir_path <', adjusted_path + u'\xEF\xBF\xBD')
          .fetch(2))
      empty = len(subdir_children) == 0

    if not empty:
      raise OSError(errno.ENOTEMPTY, 'Directory not empty: %s' % path)

    self.update_link(path)


class symlink(VfsOp):
  """Symlinks a target path to another location."""

  def run(self, path, target):
    if len(path) > MAX_PATH_LENGTH or len(target) > MAX_PATH_LENGTH:
      raise OSError(errno.ENAMETOOLONG,
                    'Max path length is %d' % MAX_PATH_LENGTH)
    self.assert_path_missing(path)
    dir_path, base_path = os.path.split(path)
    inode = Inode(
        dir_path=[dir_path],
        base_path=[base_path],
        mode=stat.S_IFLNK | 0777,
        target=target,
        mtime=datetime.datetime.now(),
        uid=self.uid,
        gid=self.gid)
    inode.put()


class rename(VfsOp):
  """Moves a file to another location."""

  def run(self, old, new):
    if len(old) > MAX_PATH_LENGTH or len(new) > MAX_PATH_LENGTH:
      raise OSError(errno.ENAMETOOLONG,
                    'Max path length is %d' % MAX_PATH_LENGTH)
    self.assert_path_missing(new)
    self.update_link(old, new)


class setxattr(VfsOp):
  """Sets an extended attribute for a path."""

  def run(self, path, key, value, flags, position=0):
    original_inode = self.get_inode_or_die(path)
    number = original_inode.number()

    def txn():
      inode = Inode.get_by_id(number)
      if not inode:
        raise OSError(errno.ENOENT, 'Inode does not exist: %d' % number)

      attr = AttributeEntry.get_by_key_name(key, parent=inode.key())
      if flags & AttributeEntry.XATTR_CREATE and attr:
        raise OSError(errno.EEXIST, 'Attribute "%s" exists for path %s' %
                      (key, path))
      elif flags & AttributeEntry.XATTR_REPLACE and not attr:
        raise OSError(
            errno.ENOATTR, 'Attribute "%s" does not exist for path %s' %
            (key, path))
      elif not attr:
        attr = AttributeEntry(key_name=key, parent=inode.key())

      attr.value = db.Text(value)
      attr.put()

    db.run_in_transaction(txn)


class statfs(VfsOp):
  """Returns the filesystem stats."""

  def run(self, path):
    # TODO: Use some actual quota accounting here.
    return dict(f_bsize=512, f_blocks=1048576, f_bavail=1048576)


class truncate(VfsOp):
  """Truncates a file to a given length."""

  def run(self, path_or_number, length):
    original_inode = self.get_inode_or_die(path_or_number)
    number = original_inode.number()

    def txn():
      inode = Inode.get_by_id(number)
      if not inode:
        raise OSError(errno.ENOENT, 'Inode does not exist: %d' % number)
      assert length == 0
      # Special case: the blob has been deleted by being truncated to zero.
      # No keyname on the DeletedInode entry because we do not actually want
      # to delete the inode, only delete the blob that was referenced before.
      deleted_inode = DeletedInode(
          parent=inode.key(),
          blobs=inode.blobs)
      if isinstance(path_or_number, basestring):
        deleted_inode.path = path_or_number
      deleted_inode.put()
      inode.blobs = []
      inode.size = 0
      inode.mtime = datetime.datetime.now()
      inode.put()

    db.run_in_transaction(txn)


class write_blob(VfsOp):
  """Writes a blob to an Inode."""

  def run(self, number, size, blobkeys):
    def txn():
      inode = Inode.get_by_id(number)
      if not inode:
        raise OSError(errno.ENOENT, 'Inode does not exist: %d' % number)

      # Mark old blobs for clean-up.
      if inode.blobs:
        deleted_inode = DeletedInode(
            parent=inode.key(),
            blobs=inode.blobs)
        deleted_inode.put()

      # Overwrite new content.
      inode.blobs = blobkeys
      inode.size = size
      inode.mtime = datetime.datetime.now()
      inode.put()
    db.run_in_transaction(txn)


class unlink(VfsOp):
  """Unlink a hardlink, potentially marking the file for garbage collection."""

  def run(self, path):
    self.update_link(path)


class utimens(VfsOp):
  """Sets the accessed and modified time for a file."""

  def run(self, path, times):
    original_inode = self.get_inode_or_die(path)
    number = original_inode.number()
    mtime_dt = datetime.datetime.utcfromtimestamp(times[1])

    def txn():
      inode = Inode.get_by_id(number)
      if not inode:
        raise OSError(errno.ENOENT, 'Inode does not exist: %d' % number)
      inode.mtime = mtime_dt
      inode.put()

    db.run_in_transaction(txn)

################################################################################
# FUSE RPCs

class FuseEndpoint(webapp.RequestHandler):
  """Receives JSON-RPCs from AeFs."""

  def post(self, method):
    if DEBUG:
      logging.getLogger().setLevel(logging.DEBUG)
      logging.debug('Received Fuse.%s(%s)', method, self.request.body)
    try:
      try:
        payload = simplejson.loads(self.request.body)
        if not isinstance(payload, dict):
          raise OSError(errno.EFAULT, 'Request was not a dictionary')

        args = payload.get('args')
        kwargs = payload.get('kwargs')
        uid = payload.get('uid')
        gid = payload.get('gid')
        if (not isinstance(args, list) or
            not isinstance(kwargs, dict) or
            not isinstance(uid, (int, long)) or
            not isinstance(gid, (int, long))):
          raise OSError(errno.EFAULT, 'Request arguments were malformed')

        # Un-unicodize the keyword arguments or Python will barf.
        kwargs = dict((str(k), v) for k, v in kwargs.iteritems())

        op_class = globals().get(method)
        if not op_class or not issubclass(op_class, VfsOp):
          raise OSError(errno.EFAULT, 'Invalid method "%s"' % method)

        op = op_class(uid, gid)
        out = op(*args, **kwargs)
        out_key = 'result'
      except EnvironmentError, e:
        out = [e.errno, e.strerror]
        out_key = 'error'
      except Exception, e:
        logging.exception('Unhandled exception. method=%r, payload=%r',
                          method, payload)
        out = [errno.EFAULT, str(e)]
        out_key = 'error'
        self.response.set_status(500)
    finally:
      payload = simplejson.dumps({out_key: out})
      if DEBUG:
        logging.debug('Sent JSON response: %s', payload)
        logging.getLogger().setLevel(logging.INFO)
      self.response.out.write(payload)

################################################################################
# Read/write endpoints, which are special

class ReadEndpoint(webapp.RequestHandler):
  """Serves a BlobKey for a given inode."""

  def get(self):
    number = self.request.get('inode')
    op = get_blobkey(None, None)  # TODO: Include UID/GID
    try:
      try:
        blob_key = op(int(number))
      except OSError, e:
        if e.errno == errno.ENOENT:
          self.response.set_status(404)
        else:
          raise
    except:
      logging.exception('Unexpected error finding blobkey for inode = %s',
                        number)
      self.response.set_status(500)

    if blob_key:
      self.response.headers[blobstore.BLOB_KEY_HEADER] = blob_key
      del self.response.headers['Content-Type']
    else:
      # This case handles when the file exists but it has no associated blobs,
      # which can happen right at file creation time or after truncation.
      self.response.headers['Content-Type'] = 'text/plain'


class WriteEndpoint(webapp.RequestHandler):
  """Receives upload complete notifications.

  Expects two multipart/form-data fields: One BlobKey file, and 'inode', with
  the file number of the Inode to overwrite with this blob.
  """

  STATUS_KEY = 'X-Fuse-Gae-Upload-Status'

  # TODO: Remove the hack of overriding 302 and 303 status codes to mean
  # failure and success, respectively.

  def post(self):
    # Blobstore wants a location for the redirect, doesn't need to be valid
    self.response.headers['Location'] = 'http://www.example.com'
    number = self.request.get('inode')
    if not number:
      logging.error('Could not find inode number in request')
      self.response.set_status(302)
      self.response.headers[self.STATUS_KEY] = 'Missing inode'
      return

    key = blobstore.BlobKey(self.request.POST['file'].type_options['blob-key'])
    blob_info = blobstore.get(key)
    if not blob_info:
      logging.error('Could not find blob info in request')
      self.response.set_status(302)
      self.response.headers[self.STATUS_KEY] = 'Missing blob_info'
      return

    number = long(number)
    content_type = blob_info.content_type
    size = blob_info.size
    logging.info('Upload successful. Inode=%s, Size=%r, Type=%r BlobKey=%r',
                 number, size, content_type, key)
    op = write_blob(None, None)  # TODO: Include UID/GID
    try:
      op(number, size, [key])
    except OSError, e:
      logging.exception('Could not record new blob write')
      self.response.set_status(302)
      self.response.headers[self.STATUS_KEY] = 'Error recording blob write'
      return

    # Success.
    self.response.set_status(303)

################################################################################
# Cron jobs

class GarbageCollector(webapp.RequestHandler):
  """Queries for and collects DeletedInode garbage."""

  def get(self):
    collection_time = (
        datetime.datetime.now() -
        datetime.timedelta(seconds=GARBAGE_COLLECTION_AGE_SECONDS))
    deleted_list = (
        DeletedInode.all()
        .filter('delete_time <', collection_time)
        .fetch(GARBAGE_COLLECTION_CHUNK_SIZE))
    for original_deleted_inode in deleted_list:
      try:
        def txn():
          deleted_inode = db.get(original_deleted_inode.key())
          if not deleted_inode:
            return

          # Remove the parent if this is an Inode tombstone.
          if deleted_inode.key().name() == 'deleted':
            db.delete(deleted_inode.key().parent())

          # Clean the Blobs
          for blob_key in deleted_inode.blobs:
            try:
              blobstore.delete(blob_key)
            except blobstore.Error:
              logging.exception('Could not delete BlobKey %r', blob_key)

          db.delete(deleted_inode.key())

        if 'Development' not in os.environ['SERVER_SOFTWARE']:
          db.run_in_transaction(txn)
        else:
          # TODO: Remove this hack once the blob transaction bug is fixed.
          txn()
      except db.Error:
        logging.exception('Could not delete %r', original_deleted_inode.key())

################################################################################

application = webapp.WSGIApplication(
  [
    ('/fuse/read', ReadEndpoint),
    ('/fuse/write', WriteEndpoint),
    ('/fuse/([a-z_-]+)', FuseEndpoint),
    ('/cron/garbage', GarbageCollector),
  ], debug=DEBUG)

def main():
  util.run_wsgi_app(application)

if __name__ == '__main__':
  main()
