#! /usr/bin/python
#
# Stephen Poletto (spoletto)
# Peter Wilmot (pbwilmot)
# CSCI1580 - Web Search
# Spring 2011 - Brown University
#
# Given a list of stop words,
# a collection of documents, and
# a list of important features,
# generates a vector representation
# of the features present in each
# document in the collection.

from porterStemmer import *
import string
import sys
import re

CHUNK_SIZE = 100000
    
if (len(sys.argv) != 5):
    print ""
    print "usage: vecrep <stopWords> <fullCollection> <features> <outputfile>"
    print ""
    sys.exit()

# Initial setup:
stopWordsFile = open(sys.argv[1], "r")
collectionFile = open(sys.argv[2], "r")
featuresFile = open(sys.argv[3], "r")
outputFile = open(sys.argv[4], 'wb')
stemmer = PorterStemmer()

stopWords = []
for stop in stopWordsFile.readlines():
    word = stop.rstrip("\n")
    stopWords.append(word)
stopWords = set(stopWords)
stopWordsFile.close()

features = {}
featureID = 0
for feature in featuresFile.readlines():
    feature = feature.rstrip("\n")
    features[feature] = featureID
    featureID += 1
featuresFile.close()

def process_page(page):
    docID = re.compile(r'<id>(\d*)</id>').findall(page)[0]
    title = re.compile(r'<title>(.*?)</title>', re.DOTALL).findall(page)[0]
    text = re.compile('<text>((.)*?)</text>', re.DOTALL).findall(page)[0][0]
    text = title + '\n' + text
    text = string.replace(text, '\r', ' ').lower()
    text = string.replace(text, '\t', ' ')
    text = string.replace(text, '\n', ' ')
    text = string.replace(text, '_', ' ')
    text = re.compile(r'\b[a-z0-9]+\b').findall(text)

    feature_id_to_occurrence_count = {}
    for word in text:
        # If the word is in stopwords, ignore it.
        if word not in stopWords:
            keyword = stemmer.stem(word, 0, len(word)-1)
            # Now check to see if the keyword is a feature we care about:
            if keyword in features:
                featureID = features[keyword]
                if featureID in feature_id_to_occurrence_count:
                    feature_id_to_occurrence_count[featureID] += 1
                else:
                    feature_id_to_occurrence_count[featureID] = 1
    
    # Need to write out the vector representation as
    # d sum_d f_i:occ_i ...
    outputFile.write(str(docID) + " ")
    sum_d = 0
    for term_count in feature_id_to_occurrence_count.values():
        sum_d += (term_count * term_count)
    outputFile.write(str(sum_d) + " ")
    for featureID in feature_id_to_occurrence_count:
        outputFile.write(str(featureID) + ":" + str(feature_id_to_occurrence_count[featureID]) + " ")
    outputFile.write("\n")

# Main run loop.
# Read the collection in as chunks of data.
# Process the pages to build the vector representations.
chunk = ""
while True:
    lines = collectionFile.readlines(CHUNK_SIZE)
    if not lines:
        # EOF
        break
    for line in lines:
        chunk = chunk + line
    before, found, page = chunk.partition("<page>")
    while found:
        page, found, after = page.partition("</page>")
        if found:
            process_page(page)
        else:
            chunk = "<page>" + page
            break
        before, found, page = after.partition("<page>")
collectionFile.close()    

# Cleanup
outputFile.close()

