#!/usr/bin/python

import filehandles;
import localstore;

import os;
import gflags
import hashlib;
import json;
import logging
import sys
import time;

from stat import *
import threadpool

FLAGS = gflags.FLAGS

gflags.DEFINE_integer('num_threads', 10,
                      'Number of threads to use')

def SplitChunks(fh, chunksize):
    chunks = []
    ateof = False
    while not ateof:
        m = hashlib.md5()
        bytes_read = 0;
        while bytes_read < chunksize:
            data = fh.read(min(1048576, chunksize - bytes_read))
            bytes_read += len(data)
            if data:
                m.update(data)
            else:
                ateof = True
                break
        if bytes_read > 0:
            chunks.append((m.hexdigest(), bytes_read))
    return chunks

def PutChunkAndClose(store, fh_factory, length, md5):
    store.PutChunk(fh_factory, md5, length)
    logging.debug("closing md5 %s %d" % (md5, length))

# Ok, this is baffling, but we must force python to make a copy of
# full_path, offset, and length for the factory.
def MakeFilehandleFactory(full_path, offset, length):
    def fh_factory():
        fh = open(full_path, "rb");
        fh.seek(offset)
        return filehandles.LimitedFilehandle(fh, length)
    return fh_factory

def BackupDirectory(store, localpath, status=None):
    storedchunks = set(store.ListChunks())

    all_dirs = []
    all_files = {}
    executor = threadpool.ThreadPool(FLAGS.num_threads, 1)
    for root, dirs, files in os.walk(localpath):
        if not root.endswith(os.sep):
            root += os.sep
        all_dirs.append(root)
        for name in files:
            full_path = os.path.join(root, name)
            if (status != None):
                status(full_path, 0.0)
            stat = os.lstat(full_path)
            if (S_ISLNK(stat.st_mode)):
                all_files[full_path] = { "link" : os.readlink(full_path) }
            else:
                try:
                    fh = open(full_path, "rb");
                    chunks = SplitChunks(fh, 1048576 * 1024)
                    fh.close()
                    offset = 0
                    for chunk in chunks:
                        (md5, length) = chunk
                        if chunk not in storedchunks:
                            logging.info("Putting %d bytes of %s at offset %d: %s",
                                         length, full_path, offset, md5)
                            executor.putRequest(threadpool.WorkRequest(
                                PutChunkAndClose,
                                args = [store, MakeFilehandleFactory(full_path, offset, length), length, md5]))
                            storedchunks.add(chunk)
                        else:
                            logging.debug("Already have %d bytes of %s at offset %d",
                                          length, full_path, offset)                      
                        offset += length
                    all_files[full_path] = { "chunks" : chunks,
                                             "size_bytes" : stat.st_size}
                except IOError as (errno, strerror):
                    print("Couldn't open {0}: {1}".format(full_path, strerror))
    executor.wait()
    return (all_dirs, all_files)

def ConvertLocalFilenameToRemote(localfilename, localprefix, remoteprefix):
    assert localprefix.endswith(os.sep)
    assert remoteprefix.endswith('/')
    if localfilename == localprefix:
        return remoteprefix
    assert localfilename.startswith(localprefix)
    return remoteprefix + localfilename[len(localprefix):].replace(os.sep, '/')

def ConvertRemoteFilenameToLocal(remotefilename, localprefix, remoteprefix):
    assert localprefix.endswith(os.sep)
    assert remoteprefix.endswith('/')
    if remotefilename == remoteprefix:
        return localprefix
    assert remotefilename.startswith(remoteprefix)
    return localprefix + remotefilename[len(remoteprefix):].replace('/', os.sep)

def AllParentDirectories(remoteprefix):
    assert remoteprefix.endswith('/')
    remoteprefix = remoteprefix.rstrip('/')

    ret = [ '/' ]
    d = remoteprefix.rsplit('/', 1)[0]
    while (d):
        ret.append(d + '/')
        d = d.rsplit('/', 1)[0]
    return ret
    
def SyncDirectory(store, localpath, remotepath, status=None):
    localprefix = localpath
    if not localprefix.endswith(os.sep):
        localprefix += os.sep
    remoteprefix = remotepath
    if not remoteprefix.endswith('/'):
        remoteprefix += '/'

    stat = os.lstat(localprefix)
    if (not S_ISDIR(stat.st_mode)):
        raise Exception("Source is not a directory")
    (dirs, files) = BackupDirectory(store, localprefix, status)
    existing_backups = store.ListMetadata()
    if (len(existing_backups) > 0):
        last_version = max(existing_backups.keys())
        last_metadata = json.loads(store.GetMetadata(last_version))
        next_version = last_version + 1
    else:
        next_version = 1;
        last_metadata = { "files": {},
                          "dirs": [ '/' ] }
    next_metadata = { }
    if remoteprefix in last_metadata["files"]:
        raise Exception("Backup already contains a file named %s, "
                    "cannot overwrite with directory" % remoteprefix)

    next_metadata["dirs"] = [x for x in last_metadata["dirs"]
                             if not x.startswith(remoteprefix) and not x == remoteprefix]
    next_metadata["files"] = {}
    for k,v in last_metadata["files"].iteritems():
        if not k.startswith(remoteprefix):
            next_metadata["files"][k] = v
    next_metadata["dirs"].extend([ConvertLocalFilenameToRemote(x, localprefix, remoteprefix) for x in dirs])
    for k, v in files.iteritems():
        next_metadata["files"][ConvertLocalFilenameToRemote(k, localprefix, remoteprefix)] = v

    # Create all upstream directories
    for d in AllParentDirectories(remoteprefix):
        if d not in next_metadata["dirs"]:
            next_metadata["dirs"].append(d)
    
    next_metadata["dirs"].sort()
    store.PutMetadata(next_version, json.dumps(next_metadata))
    return next_version

def RestoreSnapshot(store, remotepath, localpath, version):
    localprefix = localpath
    if not localprefix.endswith(os.sep):
        localprefix += os.sep
    remoteprefix = remotepath
    if not remoteprefix.endswith('/'):
        remoteprefix += '/'

    md = json.loads(store.GetMetadata(version))
    if remoteprefix not in md["dirs"]:
        raise Exception("Backup does not contain a directory %s"
                        % remoteprefix)

    for d in md["dirs"]:
        if d.startswith(remoteprefix):
            d_converted = ConvertRemoteFilenameToLocal(d, localprefix, remoteprefix)
            if not os.path.isdir(d_converted):
                os.makedirs(d_converted)

    for k,v in md["files"].iteritems():
        if k.startswith(remoteprefix):
            filename = ConvertRemoteFilenameToLocal(k, localprefix, remoteprefix)
            if "link" in v:
                os.symlink(v["link"], filename)
            else:
                fh = open(filename, "wb")
                for (md5, length) in v["chunks"]:
                    logging.info("Retrieving %d bytes of %s to %s", length, k, filename)
                    store.GetChunk(md5, length, fh)

def PrintStatus(file, percent):
    print "Processing: {0}... {1}% done".format(file, percent),
    sys.stdout.flush()
    print "\033[80D",

if __name__ == '__main__':
    store = localstore.LocalStore("/tmp")
    version = BackupDirectory(store, "/boot", PrintStatus)
    RestoreSnapshot(store, version)
