#!/usr/bin/python

import struct
from collections import namedtuple
from hashlib import md5

HEADER_FMT = "5sBBBBBB"

SECTIONS = ["None", "PreData", "Data", "PostData"]

OFFSET_FLAGS = ["None", "POS_NO_SET", "POS_SET", "NO_DATA"]

BLK_TYPE_DATA = 1

BLK_TYPE_BLOBS = 3

SIZE_TO_FORMAT = {1: 'B', 2: 'H', 4: 'L', 8: 'Q'}

FileHeader = namedtuple("FileHeader", "magic vmaj vmin vrev int_size offset_size format")

DumpHeader = namedtuple("DumpHeader", "compression sec min hour day mon yr isdst "
                                      "dbname remote_version pgdump_version toc_count")


class TocEntry(object):
    _TocEntry = namedtuple("TocEntry", "dump_id data_dumper tableoid oid tag desc "
                                      "section defn drop_statement copy_statement "
                                      "namespace tablespace owner with_oids dependencies "
                                      "extra_offset_flag extra_offset")

    def __init__(self, *args, **kargs):
        self._tuple = self._TocEntry(*args, **kargs)
        self.size = None
        self.hash = None

    def __getattr__(self, attrName):
        return getattr(self._tuple, attrName)

    def __str__(self):
        return str((self._tuple, self.size, self.hash))


class BlobTocEntry(object):

    def __init__(self, oid, size, offset, hash=None):
        self.oid = oid
        self.size = size
        self.offset = offset
        self.hash = hash


class DumpFile(object):
    __slots__ = [
        "fd",       # DumpFile
        "version",  # Version (major, minor, rev)
        "file_header", # Full file_header, just in case
        "dump_header", # Full dump_header, just in case
        "toc",
        "data_start", # offset where TOC ends and data begins
        "blobs_toc",   # List of BlobTocEntry with size and offset for each blob
        ]

    def __init__(self, fd):
        self.fd = fd
        self.read_file_header()
        self.read_dump_header()
        self.toc = []
        for x in xrange(self.toc_count):
            self.toc.append(self.read_toc_entry())
        self.data_start = self.fd.tell() # save full TOC and header size

    def _read_int(self):
        #sign = struct.unpack("B", fd.read(1))[0]
        format = "<B" + SIZE_TO_FORMAT[self.int_size]
        sign, ret = struct.unpack(format, self.fd.read(self.int_size + 1))
        if sign:
            return -ret
        else:
            return ret

    def _read_string(self):
        length = self._read_int()
        if length > 0:
            return self.fd.read(length)
        return ""

    def read_all_metadata(self):
        self.fd.seek(0)
        return self.fd.read(self.data_start)

    def read_file_header(self):
        length = struct.calcsize(HEADER_FMT)
        s = self.fd.read(length)
        if len(s) != length:
            raise RuntimeError("Invalid header")

        header = FileHeader(*struct.unpack(HEADER_FMT, s))
        if header.magic != "PGDMP":
            raise RuntimeError("Error, not a valid pg_dump file")
        self.file_header = header
        return header

    @property
    def int_size(self):
        return self.file_header.int_size

    @property
    def offset_size(self):
        return self.file_header.offset_size

    @property
    def version(self):
        return self.file_header.vmaj, self.file_header.vmin, self.file_header.vrev

    @property
    def toc_count(self):
        return self.dump_header.toc_count

    def read_dump_header(self):
        values = {}
        for x in "compression sec min hour day mon yr isdst".split():
            values[x] = self._read_int()
        values["yr"] += 1900
        values["mon"] += 1
        for x in "dbname remote_version pgdump_version".split():
            values[x] = self._read_string()
        values["toc_count"] = self._read_int()
        ret = DumpHeader(**values)
        self.dump_header = ret
        return ret

    def read_toc_entry(self):
        fields = {}
        fields["dump_id"] = self._read_int()
        fields["data_dumper"] = self._read_int()
        fields["tableoid"] = int("0" + self._read_string())
        fields["oid"] = int("0" + self._read_string())
        fields["tag"] = self._read_string()
        fields["desc"] = self._read_string()
        fields["section"] = SECTIONS[self._read_int() - 1]
        fields["defn"] = self._read_string()
        fields["drop_statement"] = self._read_string()
        fields["copy_statement"] = self._read_string()
        fields["namespace"] = self._read_string()
        fields["tablespace"] = self._read_string()
        fields["owner"] = self._read_string()
        fields["with_oids"] = self._read_string()
        fields["dependencies"] = []
        while 1:
            s = self._read_string()
            if not s:
                break
            fields["dependencies"].append(s)
        fields["extra_offset_flag"] = struct.unpack("B", self.fd.read(1))[0]
        fields["extra_offset"] = struct.unpack(SIZE_TO_FORMAT[self.offset_size],
                                               self.fd.read(self.offset_size))[0]
        return TocEntry(**fields)

    def calc_sizes(self):
        for toc_entry in (tc for tc in self.toc if tc.extra_offset):
            self.fd.seek(toc_entry.extra_offset)

            # Reads and validates block type and dump_id
            blk_type = struct.unpack("B", self.fd.read(1))[0]
            dump_id = self._read_int()
            if dump_id != toc_entry.dump_id:
                raise RuntimeError("Corrupted dump!")
            if blk_type != BLK_TYPE_DATA:
                continue # Skip BLK_TYPE_BLOBS

            size = self._read_int()
            total_size = 1 + self.int_size
            while size:
                self.fd.seek(size, 1) # Skip
                total_size += size
                size = self._read_int()
                total_size += 1 + self.int_size
            toc_entry.size = total_size

    def calc_blobs_toc(self):
        self.blobs_toc = []

        # Find blobs entry
        blobs_toc_entry = self.find_entry(lambda tc: tc.tag == "BLOBS" and
                                                     tc.desc == "BLOBS" and tc.extra_offset)
        if not blobs_toc_entry:
            return # The dump doesn't haves blobs?

        self.fd.seek(blobs_toc_entry.extra_offset)

        # Reads and validates blk_type and dump_id
        blk_type = struct.unpack("B", self.fd.read(1))[0]
        dump_id = self._read_int()
        if blk_type != BLK_TYPE_BLOBS:
            raise RuntimeError("Error, in BLOBS's offset if found a block of wrong type: %s" % blk_type)
        if dump_id != blobs_toc_entry.dump_id:
            raise RuntimeError("Corrupted dump!")

        oid = self._read_int()
        blob_offset = blobs_toc_entry.extra_offset + 1 + 1 + self.int_size + 1 + self.int_size
                      # extra_offset + blk_type + dump_id + oid
        assert blob_offset == self.fd.tell()
        blob_size = 0
        while oid:
            size = self._read_int()
            blob_size += 1 + self.int_size
            while size:
                self.fd.seek(size, 1) # Skip forward
                blob_size += size
                size = self._read_int()
                blob_size += 1 + self.int_size
            self.blobs_toc.append(BlobTocEntry(oid=oid, size=blob_size, offset=blob_offset))
            oid = self._read_int()
            blob_offset += blob_size + 1 + self.int_size
            blob_size = 0
            assert blob_offset == self.fd.tell()

    def write_blobs_header(self):
        blobs_toc_entry = self.find_entry(lambda tc: tc.tag == "BLOBS" and
                                                     tc.desc == "BLOBS" and tc.extra_offset)
        if not blobs_toc_entry:
            return False # The dump doesn't haves blobs?
        self.fd.seek(blobs_toc_entry.extra_offset)
        fmt = "<BB" + SIZE_TO_FORMAT[self.int_size]
        self.fd.write(struct.pack(fmt, BLK_TYPE_BLOBS, 0, blobs_toc_entry.dump_id))
        return True

    def write_blob(self, blob, data):
        """Writes blob to the file. Does not seeks, assumes we are in the right position"""
        fmt = "<B" + SIZE_TO_FORMAT[self.int_size]
        self.fd.write(struct.pack(fmt, 0, blob.oid))
        self.fd.write(data)

    def write_blobs_footer(self):
        """Writes zero oid, to identify end of blobs"""
        fmt = "<B" + SIZE_TO_FORMAT[self.int_size]
        self.fd.write(struct.pack(fmt, 0, 0))

    def read_blob(self, oid):
        """Returns blob's data"""
        blob_te = self.find_blob(oid)
        self.fd.seek(blob_te.offset)
        return self.fd.read(blob_te.size)

    def find_blob(self, oid):
        """Finds a blobs entry"""
        blob_te = [te for te in self.blobs_toc if te.oid == oid]
        if len(blob_te) == 0:
            return None
        if len(blob_te) != 1:
            raise RuntimeError("More than one blob with the same oid: %s" % oid)
        return blob_te[0]

    def read_data(self, entry):
        """Returns data for that entry"""
        self.fd.seek(entry.extra_offset + 1 + 1 + self.int_size) # 1+1+int_size --> skip blk_type and dump_id
        return self.fd.read(entry.size)

    def write_data(self, entry, data):
        """Writes data for that entry"""
        if len(data) != entry.size:
            raise RuntimeError("Invalid data size, it should be %s bytes" % entry.size)
        self.fd.seek(entry.extra_offset)
        fmt = "<BB" + SIZE_TO_FORMAT[self.int_size]
        self.fd.write(struct.pack(fmt, BLK_TYPE_DATA, 0, entry.dump_id))
        self.fd.write(data)

    def calc_hash(self, entry):
        """Calculates hash for the given entry. Updates the entry itself and returns de hash"""
        data = self.read_data(entry)
        entry.hash = md5(data).digest()
        return entry.hash

    def calc_blob_hash(self, blob_entry):
        """Calculates hash for the given blob. Updates the blob_entry itself and returns de hash"""
        data = self.read_blob(blob_entry.oid)
        blob_entry.hash = md5(data).digest()
        return blob_entry.hash

    def get_data_entries(self):
        return [te for te in self.toc if te.extra_offset and te.tag != "BLOBS"]

    def find_entry(self, toc_predicate):
        """Finds and entry matching the predicate. Only one allowed"""
        tes = [te for te in self.toc if toc_predicate(te)]
        if not tes:
            return None # Not Found
        if len(tes) != 1:
            raise RuntimeError("More than one toc_entry matching")
        return tes[0]

if __name__ == "__main__":
    import sys

    fd = open(sys.argv[1])
    dump_file = DumpFile(fd)
#    print dump_file.file_header
#    print dump_file.dump_header
#    print dump_file.toc
    dump_file.calc_sizes()
    dump_file.calc_blobs_toc()
    #print dump_file.block_sizes
    #print dump_file.blobs_toc

    for x in dump_file.toc:
        if x.extra_offset and x.tag != "BLOBS" and x.desc == "TABLE DATA":
            print x

##     dump_header = read_dump_header(fd, header)
##     print dump_header
##
##     toc_entries = []
##     for x in xrange(dump_header.toc_count):
##         toc_entry = read_toc_entry(fd, header.int_size, header.offset_size)
##         toc_entries.append(toc_entry)
##         print toc_entry
