#!/usr/bin/python
from __future__ import with_statement
import StringIO
import tempfile
from pgdump_rsync.dump_file import DumpFile
try:
    import librsync
except:
    import duplicity.librsync as librsync


class Config(object):

    def __init__(self, min_rsync_size=None, blobs_ignore_hash=None):
        self.min_rsync_size = 2**16 if min_rsync_size is None else min_rsync_size
        self.blobs_ignore_hash = False if blobs_ignore_hash is None else blobs_ignore_hash


def sync_dumps(old_dump_filename, server, target_filename, config):
    with open(old_dump_filename, "rb") as old_fd, \
         open(target_filename, "wb+") as target_fd:

        # Read and prepare old_dump
        old_dump = DumpFile(old_fd)
        old_dump.calc_sizes()
        old_dump.calc_blobs_toc()

        # Get new dump metadata and write it to target_file
        metadata = server.get_metadata()
        target_fd.write(metadata)
        target_fd.seek(0)
        new_dump = DumpFile(target_fd)

        # For each entry:
        #   If doesn't exists in old_dump, just get it
        #   If exists but sizes differ, sync
        #   If size match but hashes differ, sync
        #   else use the data from old_dump
        #   Write it to the file
        for new_entry in new_dump.get_data_entries():
            old_entry = old_dump.find_entry(lambda e: e.tag == new_entry.tag and
                                                      e.section == new_entry.section)
            if not old_entry:
                # Does not exists in old dump
                data = server.get_entry_data(new_entry.dump_id)
                new_entry.size = len(data)  # update entry_size, needed to write
            else:
                new_entry.size, new_entry.hash = server.get_size_and_hash(new_entry.dump_id)
                if old_entry.size != new_entry.size or \
                    (old_entry.hash and old_dump.calc_hash(old_entry)) != new_entry.hash:
                    data = _sync_file(old_dump, old_entry, new_entry, server, config)
                else: # Size and hash match, data doesn't changed
                    data = old_dump.read_data(old_entry)
            new_dump.write_data(new_entry, data)

        if new_dump.write_blobs_header():
            for blob in server.get_blob_entries():
                old_blob = old_dump.find_blob(blob.oid)
                if not old_blob:
                    # Does not exists in old dump
                    data = server.get_blob_data(blob.oid)
                else:
                    if old_blob.size != blob.size or \
                       (old_blob.hash and old_dump.calc_blob_hash(old_blob)) != blob.hash:
                        data = server.get_blob_data(blob.oid)
                    else: # Size and hash match, blob doesn't changed
                        data = old_dump.read_blob(old_blob.oid)
                new_dump.write_blob(blob, data)
            new_dump.write_blobs_footer()
        # else: the dump doesn't haves blobs

def _sync_file(old_dump, old_entry, new_entry, server, config):
    if new_entry.size < config.min_rsync_size or old_entry.size < config.min_rsync_size:
        # Just get it
        return server.get_entry_data(new_entry.dump_id)
    old_data = old_dump.read_data(old_entry)
    tmp_file = tempfile.TemporaryFile()
    tmp_file.write(old_data)
    tmp_file.seek(0)

    sig_file = librsync.SigFile(tmp_file)
    # Get delta del server
    delta = server.calc_delta(new_entry.dump_id, sig_file.read())
    del sig_file

    # Write tmp_file again, SigFile closes it
    tmp_file = tempfile.TemporaryFile()
    tmp_file.write(old_data)
    tmp_file.seek(0)

    # Apply patch
    target_file = librsync.PatchedFile(tmp_file, StringIO.StringIO(delta))
    return target_file.read()


if __name__ == "__main__":
    # Does a "local" sync
    # It doensn't makes sense really (you better do cp <last-dump> <target-file> ),
    # just for testing purposes

    import sys
    from server import SyncServer

    if len(sys.argv) != 4:
        print >> sys.stderr, "Usage: %s <last-dump> <prev-dump> <target-file>" % sys.argv[0]
        sys.exit(0)

    last_dump, old_dump, target_file = sys.argv[1:4]

    server = SyncServer(last_dump)
    config = Config(min_rsync_size=2**16, blobs_ignore_hash=False)
    sync_dumps(old_dump, server, target_file, config)
