#! /usr/bin/python
#
# Stephen Poletto (spoletto)
# Peter Wilmot (pbwilmot)
# CSCI1580 - Web Search
# Spring 2011 - Brown University
#
# Given a list of stop words and
# a collection of documents, generates
# an inverted index and associated
# title index.

from positionalIndex import *
from porterStemmer import *
import marshal
import string
import sys
import re
    
if (len(sys.argv) != 5):
    print ""
    print "usage: createIndex <stopwords> <collection> <index> <titleindex>"
    print ""
    sys.exit()

# Initial setup:
CHUNK_SIZE = 100000
stopWordsFile = open(sys.argv[1], "r")
collectionFile = open(sys.argv[2], "r")
titleIndexFile = open(sys.argv[4], 'wb')
stemmer = PorterStemmer()
index = PositionalIndex()

stopWords = []
for stop in stopWordsFile.readlines():
    word = stop.rstrip("\n")
    stopWords.append(word)
stopWordsFile.close()

def process_page(page):
    id = re.compile(r'<id>(\d*)</id>').findall(page)[0]
    title = re.compile(r'<title>(.*?)</title>', re.DOTALL).findall(page)[0]
    text = re.compile('<text>((.)*?)</text>', re.DOTALL).findall(page)[0][0]
    text = title + '\n' + text
    text = string.replace(text, '\r', ' ').lower()
    text = string.replace(text, '\t', ' ')
    text = string.replace(text, '\n', ' ')
    text = string.replace(text, '_', ' ')
    text = re.compile(r'\b[a-z0-9]+\b').findall(text)
    currWordNumber = 1
    for word in text:
        isStopWord = False;
        for stopWord in stopWords:
            if word == stopWord:
                isStopWord = True
        # If the word is in stopwords, ignore it.
        if not isStopWord:
            keyword = stemmer.stem(word, 0, len(word)-1)
            index.insert(keyword, id, currWordNumber)
            currWordNumber = currWordNumber + 1
    titleIndexFile.write(id + " " + title + "\n")

# Main run loop.
# Read the collection in as chunks of data.
# Process the pages to build the index.
chunk = ""
while True:
    lines = collectionFile.readlines(CHUNK_SIZE)
    if not lines:
        # EOF
        break
    for line in lines:
        chunk = chunk + line
    before, found, page = chunk.partition("<page>")
    while found:
        page, found, after = page.partition("</page>")
        if found:
            process_page(page)
        else:
            chunk = "<page>" + page
            break
        before, found, page = after.partition("<page>")
collectionFile.close()    

# Write the index out to disk
serializedIndex = open(sys.argv[3], 'wb')
marshal.dump(index.dict, serializedIndex, 2)

# Cleanup
serializedIndex.close()
titleIndexFile.close()


