#! /usr/bin/python
#
# Stephen Poletto (spoletto)
# Peter Wilmot (pbwilmot)
# CSCI1580 - Web Search
# Spring 2011 - Brown University


from bitarray import bitarray
import sys

WORD_SIZE = 4
CHUNK_SIZE = WORD_SIZE - 1
BYTE_SIZE = 8
NO_SPECIAL_CHAR = -1
NEWLINE = 0
SPECIAL_CHAR = '1000'
curr_file_offset = 0
largest_term = 0
header_ends = 0


# Print the results to stdout
def print_results(docIDs):
    if len(docIDs) > 0:
        for id in docIDs[0:-1]:
            sys.stdout.write(str(id) + " ")
        sys.stdout.write(str(docIDs[-1]))
    sys.stdout.write('\n')

def num_special_chars(bv):
    file_offset = curr_file_offset
    count = 0
    curr_word = bv[file_offset:file_offset+WORD_SIZE]
    while curr_word == bitarray(SPECIAL_CHAR):
        file_offset += WORD_SIZE
        count += 1
        curr_word = bv[file_offset:file_offset+WORD_SIZE]
    return count

def get_next_word(bv):
    global curr_file_offset
    word = bv[curr_file_offset:curr_file_offset + WORD_SIZE]
    curr_file_offset += WORD_SIZE
    return word

def print_positions(positions):
    if len(positions) > 0:
        for pos in positions[0:-1]:
            sys.stdout.write(str(pos) + ",")
        sys.stdout.write(str(positions[-1]))

def find_positions(bv, lastDocID, bytes_to_read):
    global curr_file_offset
    sys.stdout.write(str(lastDocID) + ":")
    positions = []
    while True:
        
        special = num_special_chars(bv)

        if special == 7:
            # num:0\n
            curr_file_offset += 7*WORD_SIZE
            popped = positions.pop()
            lastDocID += popped
            lastDocID -= positions[-1]
            print_positions(positions)
            sys.stdout.write(" ")
            sys.stdout.write(str(lastDocID) + ":")
            positions = [0]
            print_positions(positions)
            sys.stdout.write('\n')
            break

        if special == 5:
            curr_file_offset -= 2*WORD_SIZE
            if (num_special_chars(bv) == 7):
            # This is a special case for
            # :0\n
                curr_file_offset += 7*WORD_SIZE
                positions = [0]
                print_positions(positions)
                sys.stdout.write('\n')
                break
        elif special >= 4:
            # Have we read the full term section?
            if curr_file_offset >= (bytes_to_read-4)*BYTE_SIZE:
                curr_file_offset += 4*WORD_SIZE
                print_positions(positions)
                sys.stdout.write('\n')
                break
        elif special >= 2:
            curr_file_offset += 2*WORD_SIZE
            popped = positions.pop()
            lastDocID += popped
            lastDocID -= positions[-1]
            print_positions(positions)
            sys.stdout.write(" ")
            positions = []
            sys.stdout.write(str(lastDocID) + ":")
            continue
        full_word = get_next_word(bv)
        curr_word = full_word
        full_word = full_word[1:WORD_SIZE]
        while (curr_word[0] != 1):
            curr_word = get_next_word(bv)
            full_word.extend(curr_word[1:WORD_SIZE])
        curr_diff_val = int(full_word.to01(), 2)
        if len(positions) == 0:
            positions.append(curr_diff_val)
        else:
            positions.append(curr_diff_val + positions[-1])

def process_query_positional(query):
    global curr_file_offset
    bv = bitarray()
    curr_file_offset = 0
    bytes_to_read = 0
    if (query < largest_term):
        bytes_to_read = termID_to_position_in_file[query+1] - termID_to_position_in_file[query]
        file.seek(termID_to_position_in_file[query]+header_ends)
        bv.fromfile(file, bytes_to_read)
    else:
        # Read unti EOF
        file.seek(termID_to_position_in_file[query]+header_ends)
        bv.fromfile(file)

    full_word = get_next_word(bv)
    curr_word = full_word
    full_word = full_word[1:WORD_SIZE]
    while (curr_word[0] != 1):
        curr_word = get_next_word(bv)
        full_word.extend(curr_word[1:WORD_SIZE])
    initialDocID = int(full_word.to01(), 2)
    # There is necessarily a colon following, so increment the file pointer
    curr_file_offset += 2*WORD_SIZE
    find_positions(bv, initialDocID, bytes_to_read)

def process_query_nonpositional(query):
    global curr_file_offset
    bv = bitarray()
    curr_file_offset = 0
    if (query < largest_term):
        bytes_to_read = termID_to_position_in_file[query+1] - termID_to_position_in_file[query]
        file.seek(termID_to_position_in_file[query]+header_ends)
        bv.fromfile(file, bytes_to_read)
    else:
        # Read unti EOF
        file.seek(termID_to_position_in_file[query]+header_ends)
        bv.fromfile(file)
    docIDs = []
    while True:
        special = num_special_chars(bv)
        if special == 2:
            curr_file_offset += 2*WORD_SIZE
            break
        full_word = get_next_word(bv)
        curr_word = full_word
        full_word = full_word[1:WORD_SIZE]
        while (curr_word[0] != 1):
            curr_word = get_next_word(bv)
            full_word.extend(curr_word[1:WORD_SIZE])
        curr_diff_val = int(full_word.to01(), 2)
        if len(docIDs) == 0:
            docIDs.append(curr_diff_val)
        else:
            docIDs.append(curr_diff_val + docIDs[-1])
    print_results(docIDs)

if __name__ == "__main__":
    if (len(sys.argv) != 2):
        print ""
        print "usage: query <compressed_list.dat.np | compressed_list.dat.p>"
        print ""

    file = open(sys.argv[1], 'rb')
    bv = bitarray()
    buffer = bitarray()
    termID_to_position_in_file = {}
    curr_term = 0
    byte_position = 0
    first_term = True
    while True:
        if (bv[curr_file_offset:].length() < WORD_SIZE):
            bv.fromfile(file, 1)
        full_word = get_next_word(bv)
        if full_word == bitarray('1000'):
            if (bv[curr_file_offset:].length() < WORD_SIZE):
                bv.fromfile(file, 1)
            next_word = bv[curr_file_offset:curr_file_offset + WORD_SIZE]
            if next_word == bitarray('1000'):
                header_ends = file.tell()
                break
        curr_word = full_word
        full_word = full_word[1:WORD_SIZE]
        while (curr_word[0] != 1):
            if (bv[curr_file_offset:].length() < WORD_SIZE):
                bv.fromfile(file, 1)
                # Read a byte
            curr_word = get_next_word(bv)
            full_word.extend(curr_word[1:WORD_SIZE])

        if first_term:
            byte_position = int(full_word.to01(), 2)
            first_term = False
        else:
            byte_position += int(full_word.to01(), 2)
        termID_to_position_in_file[curr_term] = byte_position

        curr_term += 1
    curr_file_offset = 0
    largest_term = curr_term

    # Main run loop.
    # Read input from stdin and process
    # each incoming query.
    if sys.argv[1].endswith('.dat.np'):
        while True:
            try:
                query = raw_input()
                process_query_nonpositional(int(query.rstrip('\n')))
            except EOFError:
                break
    elif sys.argv[1].endswith('.dat.p'):
        while True:
            try:
                query = raw_input()
                process_query_positional(int(query.rstrip('\n')))
            except EOFError:
                break