#! /usr/bin/python
#
# Stephen Poletto (spoletto)
# Peter Wilmot (pbwilmot)
# CSCI1580 - Web Search
# Spring 2011 - Brown University
#
# This program takes in 2 files:
# a postings list file to be compressed
# and a file to output the compressed
# postings to.

from bitarray import bitarray
import random
import sys
import re
import os

WORD_SIZE = 4
CHUNK_SIZE = WORD_SIZE - 1
BYTE_SIZE = 8
bit_buffer = bitarray()
termID_to_position_in_file = { 0 : 0 }
bytes_written = 0
curr_term = 1

tmp_file_name = "tmp" + str(random.randint(0,100000)) + ".dat"
new_line = '10001000'
eof = '1000100010001000'
colon = ''

def write_term_positions_to_file(compressed_file, final_dst):

    differences = []
    byte_positions = termID_to_position_in_file.values()
    for i in range(0, len(byte_positions)):
        if len(differences) == 0:
            differences.append(byte_positions[i])
        else:
             differences.append(byte_positions[i] - byte_positions[i-1])
    for num in differences:
        int_to_byte_int(num, final_dst)

    # The header will always write two special chars to separate it
    # from the body.
    global new_line
    old = new_line
    new_line = '10001000'
    write_separator_char(final_dst)
    new_line = old

    compressed_file.close()
    compressed_file = open(tmp_file_name, 'r')

    while 1:
        copy_buffer = compressed_file.readline()
        if copy_buffer:
            final_dst.write(copy_buffer)
        else:
            break

    compressed_file.close()
    os.remove(tmp_file_name)
    final_dst.close()

def flush_bit_buffer(compressed_file):
    global bit_buffer
    bit_buffer.fill()
    assert(bit_buffer.length() % BYTE_SIZE == 0)
    write_bit_buffer_to_file(compressed_file)

def write_colon(compressed_file):
    global bit_buffer
    bv = bitarray(colon)
    bit_buffer.extend(bv)
    write_bit_buffer_to_file(compressed_file)

def write_separator_char(compressed_file):
    global bit_buffer
    global termID_to_position_in_file
    global curr_term
    bv = bitarray(new_line)
    bit_buffer.extend(bv)
    flush_bit_buffer(compressed_file)
    termID_to_position_in_file[curr_term] = bytes_written
    curr_term += 1

def write_bit_buffer_to_file(compressed_file):
    global bit_buffer
    global bytes_written
    while bit_buffer.length() >= BYTE_SIZE:
        bit_buffer[0:BYTE_SIZE].tofile(compressed_file)
        bytes_written += 1
        bit_buffer = bit_buffer[BYTE_SIZE:]

def finalize_file(compressed_file):
    global bit_buffer
    bv = bitarray(eof)
    bit_buffer.extend(bv)
    flush_bit_buffer(compressed_file)

def int_to_byte_int(num, file):
    global bit_buffer
    bv = bitarray(bin(num)[2:])
    # Break BitVector into groups of 4.
    # If it's the last group of 4, add 16 (so the MSB is '1')
    # Else, pad with leading '0's.
    # Then append each of these vectors to big buffer
    leading_zeros = CHUNK_SIZE - (bv.length() % CHUNK_SIZE)
    if leading_zeros == CHUNK_SIZE:
        leading_zeros = 0
    to_add = bitarray(leading_zeros)
    to_add.setall(0)
    bv.reverse()
    bv.extend(to_add)
    bv.reverse()

    #bv.pad_from_left(leading_zeros)
    assert(bv.length() % CHUNK_SIZE == 0)
    num_chunks = bv.length() / CHUNK_SIZE
    for i in range(0, num_chunks):
        if (i == num_chunks - 1):
            # This is the final chunk
            # Add a leading '1' as the MSB by adding 16
            curr_chunk = bv[0:CHUNK_SIZE]
            curr_chunk.reverse()
            curr_chunk.extend('1')
            curr_chunk.reverse()
            #new_val = bv[0:CHUNK_SIZE].intValue() + 16
            #curr_chunk = BitVector.BitVector(intVal = new_val)
            assert(curr_chunk.length() == WORD_SIZE)
            bit_buffer.extend(curr_chunk)
            write_bit_buffer_to_file(file)
        else:
            # Add a leading '0' as the MSB
            curr_chunk = bv[0:CHUNK_SIZE]
            curr_chunk.reverse()
            curr_chunk.extend('0')
            curr_chunk.reverse()
            assert(curr_chunk.length() == WORD_SIZE)
            bit_buffer.extend(curr_chunk)
            write_bit_buffer_to_file(file)
            # Shift the bit vector and use the next lesser significant bits
            bv = bv[CHUNK_SIZE:]

def add_differences_to_buffer(differences, compressed_file):
    for num in differences:
        int_to_byte_int(num, compressed_file)
    write_separator_char(compressed_file)

def process_positional_posting(posting_list, compressed_file):
    x = re.compile(r'([0-9]+):')
    t = re.compile(r'[0-9]+:(.*)')
    count = 0
    for line in posting_list.readlines():
        docID_diff = 0
        docIDs = []
        first_diff = True
        docs = line.split()
        for i in range(0, len(docs)):
            doc = docs[i]
            docID = int(x.findall(doc)[0])
            docIDs.append(docID)
            positions = t.findall(doc)[0].split(',')
            int_valued_positions = [int(k) for k in positions]
            if first_diff:
                docID_diff = docID
                first_diff = False
            else:
                docID_diff = docID - docIDs[i-1]

            # Write the current doc ID difference to tmp
            int_to_byte_int(docID_diff, compressed_file)
            write_colon(compressed_file)
#check if we get same nums over and over again
            first_pos_diff = True
            for j in range(0, len(int_valued_positions)):
                positional_diff = 0
                if first_pos_diff:
                    positional_diff = int_valued_positions[j]
                    first_pos_diff = False
                else:
                    positional_diff = int_valued_positions[j] - int_valued_positions[j-1]
                # Now write the difference in positions.
                int_to_byte_int(positional_diff, compressed_file)
        write_separator_char(compressed_file)

def process_non_positional_posting(posting_list, compressed_file):
    regex = re.compile(r'\b[0-9]+\b')
    for line in posting_list.readlines():
        differences = []
        string_array = regex.findall(line)
        int_array = [int(x) for x in string_array]
        #print int_array
        for i in range(0, len(int_array)):
            if len(differences) == 0:
                differences.append(int_array[i])
            else:
                differences.append(int_array[i] - int_array[i-1])
        add_differences_to_buffer(differences, compressed_file)
    finalize_file(compressed_file)

if __name__ == "__main__":
    if (len(sys.argv) != 3):
        print ""
        print "usage: createIndex <postings_list.dat.np | postings_list.dat.p> <compressed list>"
        print ""
        sys.exit()

    # Initial setup:

    postinglistFile = open(sys.argv[1], "r")
    compressedFile = open(tmp_file_name, 'wb')

    if(sys.argv[1].endswith('.dat.np')):
        process_non_positional_posting(postinglistFile, compressedFile)

    elif(sys.argv[1].endswith('.dat.p')):
        colon = '10001000'
        new_line = '1000100010001000'
        process_positional_posting(postinglistFile, compressedFile)

    else:
        print ""
        print 'the postings list must end with either .dat.np or .dat.p'
        print 'postings_list.dat.np | postings_list.dat.p> <compressed list>'
        print ""
        sys.exit()

    write_term_positions_to_file(compressedFile, open(sys.argv[2], 'wb'))

    # Cleanup
    postinglistFile.close()
    compressedFile.close()