#!/usr/bin/python
import nltk
import json
import sys
import getopt
import codecs
import struct
import timeit


IGNORE_STOPWORDS = True  # toggling the option for ignoring stopwords
IGNORE_NUMBERS = True  # toggling the option for ignoring numbers
IGNORE_SINGLES = True  # toggling the option for ignoring single character tokens
RECORD_TIME = True  # toggling for recording the time taken for indexer
BYTE_SIZE = 4  # docID is in int


def index(sourcedocument_file, dictionary_file, postings_file):
    '''
    Indexer which produces dictionary and postings file.
    Params:
        dictionary_file:    dictionary of terms
        postings_file:      postings file for all terms in dictionary
    '''

    stemmer = nltk.stem.porter.PorterStemmer()
    stopwords = nltk.corpus.stopwords.words('english')  # stopwords
    docs_indexed = 0  # counter for the number of docs indexed
    dictionary = {}  # key: term, value: [postings list]

    with codecs.open(sourcedocument_file, "r", "utf-8") as test:
        docID = 0
        for line in test:
            newRecord = json.loads(line)
            #title.append(newRecord["title"])
            document = newRecord["content"]
            #content.append(document)
            tokens = nltk.word_tokenize(document)  # list of word tokens from document
            docID += 1
            # for each term in document
            for word in tokens:
                term = word.lower()  # casefolding
                if (IGNORE_STOPWORDS and term in stopwords):    continue  # if ignoring stopwords
                if (IGNORE_NUMBERS and is_number(term)):        continue  # if ignoring numbers
                term = stemmer.stem(term)  # stemming
                if (term[-1] == "'"):
                    term = term[:-1]  # remove apostrophe
                if (IGNORE_SINGLES and len(term) == 1):         continue  # if ignoring single terms

                # if term not already in dictionary
                if (term not in dictionary):
                    dictionary[term] = [docID]  # define new term in in dictionary
                    #print(term, dictionary[term])
                # else if term is already in dictionary
                else:
                    # if current docID is not yet in the postings list for term, append it
                    if (dictionary[term][-1] != docID):
                        dictionary[term].append(docID)
                #print(term, dictionary[term])
            docs_indexed += 1
        test.close()

    # open files for writing
    dict_file = codecs.open(dictionary_file, 'w', encoding='utf-8')
    post_file = open(postings_file, 'wb')

    byte_offset = 0  # byte offset for pointers to postings file

    # write list of docIDs indexed to first line of dictionary
    dict_file.write('Indexed from docIDs:')
    for i in range(1, docs_indexed+1):
        dict_file.write(str(i) + ',')
    dict_file.write('\n')

    # build dictionary file and postings file
    for term, postings_list in dictionary.items():
        df = len(postings_list)  # document frequency is the same as length of postings list

        # write each posting into postings file
        for docID in postings_list:
            posting = struct.pack('I', docID)  # pack docID into a byte array of size 4
            post_file.write(posting)

        # write to dictionary file and update byte offset
        dict_file.write(term + " " + str(df) + " " + str(byte_offset) + "\n")
        byte_offset += BYTE_SIZE * df

    # close files
    dict_file.close()
    post_file.close()


def is_number(token):
    '''
    Returns True if the token is a number else false.
    Param:
        token:  token string
    '''
    token = token.replace(",", "")  # ignore commas in token
    # tries if token can be parsed as float
    try:
        float(token)
        return True
    except ValueError:
        return False


def print_usage():
    '''
    Prints the proper command usage.
    '''
    print("usage: " + sys.argv[0] + " -s sourcedocument_file -d dictionary-file -p postings-file")


if __name__ == '__main__':
    document_file = dictionary_file = postings_file = None
    try:
        opts, args = getopt.getopt(sys.argv[1:], 's:d:p:')
    # if illegal arguments, print usage to user and exit
    except (getopt.GetoptError):
        print_usage()
        sys.exit(2)
    # for each option parsed
    for o, a in opts:
        if o == '-s':
            document_file = a
        elif o == '-d':
            dictionary_file = a
        elif o == '-p':
            postings_file = a
        else:
            assert False, "unhandled option"

    # if missing out on an argument, print usage to user and exit
    if document_file == None or dictionary_file == None or postings_file == None:
        print_usage()
        sys.exit(2)

    if (RECORD_TIME): start = timeit.default_timer()  # start time
    index(document_file, dictionary_file, postings_file)  # call the indexer
    if (RECORD_TIME): stop = timeit.default_timer()  # stop time
    if (RECORD_TIME): print('Indexing time:' + str(stop - start))  # print time taken
