#!/usr/bin/python

import hashlib
import httplib2
import logging
import store
import StringIO
import gdata.auth
import gdata.docs.client
import platform
import re
import time
import zlib

import filehandles

from gdata.data import MediaSource
from oauth2client.file import Storage
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.tools import run
from os.path import expanduser

def DoRequestWithExponentialBackoff(label, f):
  sleepinterval = 1
  while True:
    try:
      logging.debug(label)
      return f()
    except gdata.client.RequestError as e:
      if (not (e.status == 503 and 'rateLimitExceeded' in e.body) and not
          (e.status == 500 and 'An unknown error' in e.body) and not
          (e.status == 400 and 'Invalid request' in e.body)):
        logging.info("Unrecognized error %s, retrying anyway after %d: %s" %
                     (label, sleepinterval, e))
      else:
        logging.debug("rate limit exceeded, sleeping %d" % sleepinterval)
      time.sleep(sleepinterval)
      sleepinterval *= 2
    except Exception as e:
      logging.error("%s failed as %s" % (label, e))
      raise


class GdriveStore(store.StoreInterface):
    def __init__(self, collection_name, parent_collection_name=None):
        self.unique_id = 0
        self.cached_metadata = {}
        self.cached_data = {}

        # Set up a Flow object to be used if we need to
        # authenticate. This sample uses OAuth 2.0, and we set up the
        # OAuth2WebServerFlow with the information it needs to
        # authenticate. Note that it is called the Web Server Flow,
        # but it can also handle the flow for native applications
        # <http://code.google.com/apis/accounts/docs/OAuth2.html#IA>
        # The client_id client_secret are copied from the API Access
        # tab on the Google APIs Console
        # <http://code.google.com/apis/console>. When creating
        # credentials for this application be sure to choose an
        # Application type of "Installed application".
        FLOW = OAuth2WebServerFlow(
            client_id='759325989888.apps.googleusercontent.com',
            client_secret='6aiiiCsLIFppgHgh_0VFu2jy',
            scope=['https://docs.google.com/feeds/',
                   # It's unclear whether we want this or not.  It'll give us
                   # MD5 checksums, unlike the pure docs API.
                   'https://www.googleapis.com/auth/drive.file',
                   ],
            user_agent='gsync/1.0')

        # If the Credentials don't exist or are invalid run through
        # the native client flow. The Storage object will ensure that
        # if successful the good Credentials will get written back to
        # a file.
        storage = Storage(expanduser('~/.gsync.oauthstorage'))
        credentials = storage.get()
        if credentials is None or credentials.invalid:
            credentials = run(FLOW, storage)

        auth2token = gdata.gauth.OAuth2Token(
            client_id='759325989888.apps.googleusercontent.com',
            client_secret='6aiiiCsLIFppgHgh_0VFu2jy',
            scope=['https://docs.google.com/feeds/',
                   'https://www.googleapis.com/auth/drive.file', ],
            access_token=credentials.access_token,
            refresh_token=credentials.refresh_token,
            user_agent='gsync/1.0')

        self.client = auth2token.authorize(gdata.docs.client.DocsClient())

        # Create an httplib2.Http object to handle our HTTP requests
        # and authorize with our good Credentials.
        http = httplib2.Http()
        http = credentials.authorize(http)

        all_collections = self.client.GetAllResources(
            uri='https://docs.google.com/feeds/default/private/full/-/folder')

        backup_collections = []
        parent_collection = None
        for c in all_collections:
            if c.title.text == collection_name:
                backup_collections.append(c)
            if parent_collection_name and c.title.text == parent_collection_name:
                parent_collection = c
        if len(backup_collections) > 1:
            raise Exception('multiple folders with name')
        if len(backup_collections) < 1:
            if parent_collection_name and not parent_collection:
                parent_collection = self.client.CreateResource(
                    gdata.docs.data.Resource(type='folder',
                                             title=parent_collection_name))
            self.backup_collection = self.client.CreateResource(
                gdata.docs.data.Resource(type='folder', title=collection_name),
                collection = parent_collection)
            self.data_collection = self.client.CreateResource(
                gdata.docs.data.Resource(
                    type='folder', title="gsync data"),
                collection = self.backup_collection)
            self.metadata_collection = self.client.CreateResource(
                gdata.docs.data.Resource(
                    type='folder', title="gsync metadata"),
                collection = self.backup_collection)
        else:
            self.backup_collection = backup_collections[0]
            logging.debug("backup collection " + self.backup_collection.content.src + "/contents/-/folder")
            subfolders = self.client.GetAllResources(
                uri = self.backup_collection.content.src + "/-/folder")
            data_collections = []
            metadata_collections = []
            for c in subfolders:
                if c.title.text == "gsync metadata":
                    metadata_collections.append(c)
                if c.title.text == "gsync data":
                    data_collections.append(c)
            if (len(data_collections) != 1 or len(metadata_collections) != 1):
                raise Exception(
                    "corrupted repository %d %d"
                    % (len(data_collections), len(metadata_collections)))
            self.data_collection = data_collections[0]
            self.metadata_collection = metadata_collections[0]

    # Returns a map from metadata version number to timestamp
    def ListMetadata(self):
        logging.debug("metadata collection " + self.metadata_collection.content.src)
        md = self.client.GetAllResources(
            uri = self.metadata_collection.content.src)
        toreturn = {}
        self.cached_metadata = {}
        for i in md:
            match = re.match(r'metadata-(\d*)-(.*)', i.title.text)
            if match:
                toreturn[int(match.group(1))] = match.group(2)
                self.cached_metadata[int(match.group(1))] = i
        return toreturn

    # Returns a string containing the contents of the given version number
    def GetMetadata(self, version_number):
        if version_number not in self.cached_metadata:
            self.ListMetadata()
        if version_number not in self.cached_metadata:
            raise Exception("metadata %d not found" % version_number)
        return zlib.decompress(
            self.client.DownloadResourceToMemory(
                self.cached_metadata[version_number]))

    # Attempts to store 'contents' as metadata version
    # 'version_number'.  It returns false if it already exists (and also
    # forbid concurrent puts of the same new version).
    # TODO(dpeng): Is this actually implementable??
    def PutMetadata(self, version_number, contents):
        contents = zlib.compress(contents, 9)
        filename = ('metadata-%d-%s'
                    % (version_number, time.asctime(time.gmtime())))
        self.client.CreateResource(
            gdata.docs.data.Resource(
                type='file',
                title=filename),
            create_uri=self.metadata_collection.GetResumableCreateMediaLink().href + '?convert=false',
            media=MediaSource(
                file_handle = StringIO.StringIO(contents),
                content_type='application/json',
                content_length = len(contents),
                file_name = filename))
        return True  #TODO(dpeng): locking

    # Stores the filehandle's contents.
    def PutChunk(self, filehandle_factory, expected_md5, expected_length):
        finalfilename = "data-%s-%d" % (expected_md5, expected_length)
        tempfilename = ("temp-file-%s-%s-%d"
                        % (platform.node(), time.asctime(time.localtime()), self.unique_id))
	self.unique_id  = self.unique_id + 1

        wrappedfh_ref = []  # side return path for the lambda to give us back the wrappedfh
        def CreateLambda():
          wrappedfh = filehandles.ChecksummingFilehandle(filehandle_factory(), hashlib.md5())
          wrappedfh_ref.append(wrappedfh)
          return self.client.CreateResource(
            gdata.docs.data.Resource(type='file', title=tempfilename),
            create_uri=self.data_collection.GetResumableCreateMediaLink().href + '?convert=false',
            media=MediaSource(
              file_handle = wrappedfh,
              content_type='application/octet-stream',
              content_length = expected_length,
              file_name = tempfilename))
        resource = DoRequestWithExponentialBackoff(
          "Creating %s -> %s" % (tempfilename, finalfilename), CreateLambda)
        wrappedfh = wrappedfh_ref.pop()
        if (expected_length != wrappedfh.bytes_read):
            e = Exception("wrong length %d vs %d"
                            % (expected_length, wrappedfh.bytes_read))
            logging.error(e)
            raise(e)
        if (expected_md5 != wrappedfh.hasher.hexdigest()):
            e = Exception("wrong checksum %s vs %s, "
                            % (expected_md5, wrappedfh.hasher.hexdigest()))
            logging.error(e)
            raise(e)
        def RenameLambda():
          myresource = resource
          attempt = 0
          while True:
            try:
              myresource.title.text = finalfilename
              self.client.UpdateResource(myresource)
              return
            except gdata.client.RequestError as e:
              # Sometimes the etag changes just on its own.
              if (not (e.status == 412 and 'Mismatch: etags = ' in e.body)):
                logging.error(e)
                raise
              original_etag = myresource.etag
              myresource = self.client.GetResource(myresource)
              new_etag = myresource.etag
              logging.info("ETag for attempt %d of %s before %s after %s" % (attempt, finalfilename, original_etag, new_etag))
              attempt = attempt + 1
        DoRequestWithExponentialBackoff("Renaming %s to %s" % (tempfilename, finalfilename), RenameLambda)


    # Fetches the file indexed by 'md5sum' and writes it to 'filehandle'.
    def GetChunk(self, md5sum, expected_length, filehandle):
        chunk = (md5sum, expected_length)
        if chunk not in self.cached_data:
            self.ListChunks()   
        if chunk not in self.cached_data:
            raise Exception("No such chunk %s length %d" % (md5sum, expected_length))
        server_response = self.client.request(
            'GET', self.cached_data[chunk].content.src)
        if server_response.status != 200:
            raise gdata.client.RequestError, {'status': server_response.status,
                                              'reason': server_response.reason,
                                              'body': server_response.read()}
        byte_count = 0
        bytes_read = server_response.read(1048576)
        hasher = hashlib.md5()
        while bytes_read:
            byte_count += len(bytes_read)
            hasher.update(bytes_read)
            filehandle.write(bytes_read)
            bytes_read = server_response.read(1048576)
        if (byte_count != expected_length):
            raise Exception("Oops, corrupted file %d vs %d length"
                            % (expected_length, byte_count))
        if (md5sum != hasher.hexdigest()):
            raise Exception("Oops, corrupted file %s vs %s checksum"
                            % (md5sum, hasher.hexdigest()))

    # Returns a list of (md5sum, length)
    def ListChunks(self):
        md = self.client.GetAllResources(
            uri = self.data_collection.content.src)
        toreturn = []
        self.cached_data = {}
        for i in md:
            match = re.match(r'data-([0-9a-fA-F]*)-(\d*)', i.title.text)
            if match:
                chunk = (match.group(1), int(match.group(2)))
                toreturn.append(chunk)
                self.cached_data[chunk] = i
        return toreturn
