#!/usr/bin/env python

"""
 Fuse for Google Docs.

A cloud-based filesystem using Google Docs storage for the back end. (FUSE, GData API, Python)

Deps:
  python2.4 or so
  python-fuse
  gdata api v14:  http://code.google.com/p/gdata-python-client/downloads/detail?name=gdata-2.0.14.zip&can=2&q=

To use:
  Download the source.
  Create a .gdfs_user file with a single line: your Google Docs username (same as your gmail username, for example test222443@gmail.com):
    echo test222443@gmail.com > .gdfs_user
  Then run
    mkdir target_dir
    gdfs.py target_dir
  You'll be prompted for your password (you will probably need to enter an application-specific password), and a gdata authentication token will be stored in .gdfs_token (so you don't have to enter the password next time).
  Your docs storage will be mounted at target_dir.
  To unmount
    fusermount -u target_dir

NOTES/CAVEATS:
  Due to some strangeness in the gdata API, gdfs doesn't support files in the root directory (target_dir above), so you must make subdirectories! For example:
    mkdir target_dir/sub_dir
    cp my_file target_dir/sub_dir
  Timestamps are controlled by the server, so if you use rsync, use
    rsync -rv
  Filesystem interactions are currently logged to /tmp/gdfs.log. Very useful for debugging.

Todos: 
  faster mknod (no upload?)
  faster unlink
  compress + encrypt (use encfs for now)
  threading?
"""

__author__ = 'Nathaniel Fairfield <nathanielfairfield@gmail.com>'

import errno
import fuse
import gdata.docs.client
import getpass
import logging
import os
import stat
import time
import tempfile

from fuse import Fuse

fuse.fuse_python_api = (0, 2)
# Logging to /tmp
logging.basicConfig(filename='/tmp/gdfs.log', level=logging.DEBUG)
# Timestamps are 'Z' (UTC).
os.environ['TZ'] = 'Z'
time.tzset()

FLAGS_MASK = 0x7FF
FLAGS_MAP = {
  0x0: 'r',
  0x1: 'w',
  0x2: 'r+',
  0x401: 'a',
  0x402: 'a+',
}
        
# Bug in GetDocList: the entry.GetEditLink, etc., all change if you call GetDocList with a uri, versus without.  The ones with a uri are broken.
# feed1 = client.GetDocList()
# feed2 = client.GetDocList('/feeds/default/private/full/folder%3Aroot/contents')
# # Find the same entry
# entry1.GetEditLink() != entry2.GetEditLink()
# 
# The use of MakeHackUri is bad because it doesn't allow the use of eTags.
def MakeHackUri(entry):
  """ Something is busted in GetDocList. """
  hack_uri = '/feeds/default/private/full/' + entry.resource_id.text
  hack_uri = hack_uri.replace(':', '%3A')
  return hack_uri

def TimeStringToUnixTime(s):
  """ s is of the form '2010-12-04T22:42:09.612Z' """
  logging.info(s)
  Y, M, D = map(int, s[:10].split('-'))
  h, m, s = map(int, s[11:19].split(':'))
  t = time.mktime((Y, M, D, h, m, s, 0, 0, 0))
  return int(t)

class GDStat(fuse.Stat):
  def __init__(self):
    self.st_mode = stat.S_IFDIR | 0755
    self.st_ino = 0
    self.st_dev = 0
    self.st_nlink = 2
    self.st_uid = os.getuid()
    self.st_gid = os.getgid()
    self.st_size = 4096
    self.st_atime = 0
    self.st_mtime = 0
    self.st_ctime = 0

class GDFS(fuse.Fuse):
  def __init__(self, email, *args, **kw):
    super(GDFS, self).__init__(*args, **kw)

    # Disable multithreaded until reads, etc., are thread safe.
    self.multithreaded = False

    self.client = gdata.docs.client.DocsClient()
    self.client.ssl = True  # Force all API requests through HTTPS
    self.client.http_client.debug = False  # True for debugging HTTP requests
    try:
      token_string = open('.gdfs_token').read()
      self.client.auth_token = gdata.gauth.token_from_blob(token_string)
    except:
      token = self.client.RequestClientLoginToken(
          email, getpass.getpass(), 'gdfs')
      self.client.auth_token = token
      open('.gdfs_token', 'w').write(gdata.gauth.token_to_blob(token))
    # Stat cache is a map of the form {path: stat}
    self.stat_cache = {}
    # {path: entry}
    self.entry_cache = {}
    # local tmp file cache
    self.file_cache = {}
    self.modified_files = {}

  def getattr(self, path):
    logging.debug('getattr "%s"' % path)
    self.stat_cache.setdefault('/', GDStat())
    if path in self.stat_cache:
      return self.stat_cache[path]
    else:
      logging.debug('  stat cache miss for "%s"' % path)
      if not path in self.entry_cache: 
        logging.debug('  entry cache miss for "%s"' % path)
        dirname, basename = os.path.split(path)
        if dirname == '/':
          feed_uri = '/feeds/default/private/full/folder%3Aroot/contents'
        else:
          feed_uri = self.entry_cache[dirname].content.src
        feed_uri += '?title=' + basename 
        # BUG There is a bug in title-exact -- if its specified, then it
        # returns all entries, ignoring the folder!
        ## If using title-exact, scan through all the returned values to find
        ## the one thats in our folder.
        #feed_uri += '&title-exact=true'
        #for entry in feed.entry:
        #  logging.info('  yo ' + entry.title.text)
        #  for folder in entry.InFolders():
        #    logging.info('   in ' + folder.title)
        feed = self.client.GetDocList(feed_uri)
        if not feed.entry or feed.entry[0].title.text != basename:
          return -errno.ENOENT
        self.entry_cache[path] = feed.entry[0]
      entry = self.entry_cache[path]
        
      st = GDStat()
      try: st.st_atime = TimeStringToUnixTime(entry.lastViewed.text)
      except: pass
      st.st_mtime = TimeStringToUnixTime(entry.updated.text)
      st.st_ctime = TimeStringToUnixTime(entry.published.text)

      if entry.GetDocumentType() != 'folder':
        st.st_mode = stat.S_IFREG | 0755
        st.st_nlink = 1
        st.st_size = int(entry.quota_bytes_used.text)
      self.stat_cache[path] = st
      return st

  def readdir(self, path, unused_offset):
    logging.debug('readdir "%s", %s' % (path, unused_offset))
    yield fuse.Direntry('.')
    yield fuse.Direntry('..')
    if path == '/': # Root
      feed_uri = '/feeds/default/private/full/folder%3Aroot/contents'
    else:
      feed_uri = self.entry_cache[path].content.src
    # Get entries 100 at a time.
    while True:
      feed = self.client.GetDocList(feed_uri)
      for entry in feed.entry:
        # Filter out non-folders in the root directory, because something is
        # busted in GetDocList in root.
        if path == '/' and entry.GetDocumentType() != 'folder':
          continue
        entry_path = os.path.join(path, entry.title.text)
        logging.debug('Adding entry cache: "%s"' % entry_path)
        self.entry_cache[entry_path] = entry
        yield fuse.Direntry(entry.title.text)
      next_link = feed.GetNextLink()
      if not next_link:
        break
      feed_uri = next_link.href

  def open(self, path, flags):
    logging.debug('open "%s" %d' % (path, flags))
    if os.path.dirname(path) == '/':
      return -errno.EACCES
    if not path in self.entry_cache:
      return -errno.ENOENT
    f, tmp_path = tempfile.mkstemp()
    os.close(f)
    self.file_cache[path] = tmp_path
    self.client.Download(self.entry_cache[path], tmp_path)
    flags = flags & FLAGS_MASK
    logging.debug('  flags masked to %d' % flags)
    return open(tmp_path, FLAGS_MAP[flags])

  def read(self, path, size, offset, f):
    logging.debug('read "%s" %d %d' % (path, size, offset))
    f.seek(offset)
    buf = f.read(size)
    return buf

  def mkdir(self, path, unused_mode):
    logging.debug('mkdir: "%s"' % path)
    if path in self.entry_cache:
      return -errno.EEXIST
    dirname, basename = os.path.split(path)
    if dirname == '/':
      self.entry_cache[path] = self.client.Create('folder', basename)
    elif dirname in self.entry_cache:
      self.entry_cache[path] = self.client.Create('folder', basename,
                                                  self.entry_cache[dirname])
    else:
      return -errno.ENOENT
    return 0

  def rmdir(self, path):
    logging.debug('rmdir: "%s"' % path)
    if path in self.entry_cache:
      # NOTE fuse already checks if this is a directory
      feed_uri = self.entry_cache[path].content.src
      feed = self.client.GetDocList(feed_uri)
      if feed.entry:
        return -errno.ENOTEMPTY
      hack_uri = MakeHackUri(self.entry_cache[path])
      response = self.client.Delete(hack_uri, force=True)
      if (response.reason != 'OK'):
        logging.debug('  delete failed?: ' + path)
        return -errno.EFAULT
      del self.entry_cache[path]
      del self.stat_cache[path]
    else:
      logging.debug('  "%s" not in ' % (path, str(self.entry_cache.keys())))
      return -errno.ENOENT
    return 0

  def unlink(self, path):
    logging.debug('unlink: "%s"' % path)
    if path in self.entry_cache:
      hack_uri = MakeHackUri(self.entry_cache[path]) # + '?delete=true'
      response = self.client.Delete(hack_uri, force=True)
      if (response.reason != 'OK'):
        logging.debug('  delete failed?: ' + path)
        return -errno.EFAULT
      del self.entry_cache[path]
      del self.stat_cache[path]
    else:
      logging.debug('  "%s" not in ' % (path, str(self.entry_cache.keys())))
      return -errno.ENOENT
    return 0

  def mknod(self, path, unused_mode, unused_dev):
    logging.debug('mknod %s' % path)
    if os.path.dirname(path) == '/':
      return -errno.EACCES
    if path in self.entry_cache:
      return -errno.EEXIST
    # Create a local file, since Upload wants something to sink its teeth
    # into.
    f, tmp_path = tempfile.mkstemp()
    os.close(f)
    logging.debug('  tmp_path ' + tmp_path)
    dirname, basename = os.path.split(path)
    logging.debug('  dirname ' + dirname)
    folder_entry = None
    if dirname in self.entry_cache:
      folder_entry = self.entry_cache[dirname]
    self.entry_cache[path] = self.client.Upload(tmp_path, basename,
                                                folder_or_uri=folder_entry,
                                                content_type='application/pdf')
    os.unlink(tmp_path)
    return 0

#  def create(self, path, mode, dev):
#    return self.mknod(path, mode, dev)

  def write(self, path, buf, offset, f):
    logging.debug('write ' + path)
    if path in self.file_cache:
      f.seek(offset)
      f.write(buf)
      self.modified_files[path] = True
      return len(buf)
    else:
      return -errno.ENOENT

  def flush(self, path, f = None):
    if f:
      f.flush()

  def truncate(self, path, length):
    logging.debug('truncate %s %d' %(path, length))
    if not path in self.entry_cache:
      return -errno.ENOENT
    if not path in self.file_cache:
      f, tmp_path = tempfile.mkstemp()
      os.close(f)
      self.file_cache[path] = tmp_path
      self.client.Download(self.entry_cache[path], tmp_path)
    open(self.file_cache[path], 'w').truncate(length)
    self.modified_files[path] = True
    return 0

  def release(self, path, unused_flags, f):
    logging.debug('release ' + path)
    f.close()
    if path in self.file_cache:
      if path in self.modified_files:
        ms = gdata.data.MediaSource(file_path=self.file_cache[path],
                                    content_type='application/pdf')
        self.entry_cache[path] = self.client.Update(self.entry_cache[path],
                                                    media_source=ms,
                                                    force=True)
        del self.stat_cache[path]
        del self.modified_files[path]
      logging.debug('  unlink local ' + self.file_cache[path])
      os.unlink(self.file_cache[path])
      del self.file_cache[path]
    else:
      return -errno.ENOENT

  def rename(self, pathfrom, pathto):
    logging.debug('rename from %s to %s' % (pathfrom, pathto))
    if not pathfrom in self.entry_cache:
      return -errno.ENOENT
    if pathfrom == pathto:
      return -errno.EEXIST
    elif os.path.dirname(pathfrom) == os.path.dirname(pathto):
      # If the target exists, remove it first.
      if pathto in self.entry_cache:
        logging.debug('  unlinking before name update')
        self.unlink(pathto) 
      logging.debug('  name update')
      entry = self.entry_cache[pathfrom]
      entry.title.text = os.path.basename(pathto) 
      self.entry_cache[pathto] = self.client.Update(entry)
      del self.entry_cache[pathfrom]
      try: del self.stat_cache[pathfrom]
      except KeyError: pass
    else:
      dirto = os.path.dirname(pathto)
      if dirto == '/':
        return -errno.EACCES
      if pathfrom in self.entry_cache and dirto in self.entry_cache:
       # If the target exists, remove it first.
        if pathto in self.entry_cache:
          logging.debug('  unlinking before full move')
          self.unlink(pathto) 
        logging.debug('  full move')
        self.entry_cache[pathto] = self.client.Move(
            self.entry_cache[pathfrom], self.entry_cache[dirto])
        del self.entry_cache[pathfrom]
        try: del self.stat_cache[pathfrom]
        except KeyError: pass
      else:
        return -errno.EEXIST

def main():
  usage = 'GDFS ' + Fuse.fusage
  try:
    username = open('.gdfs_user').read()
  except:
    raise Exception("Could not read username from .gdfs_user") 
  gdfs = GDFS(username,
              version = "%prog " + fuse.__version__,
              usage = usage,
              dash_s_do='setsingle')
  gdfs.parse(errex=1)
  gdfs.main()

if __name__ == '__main__':
  main()
