#array, with mu vector for each chunk, mu vector consists of a dictionary for synsets and a dictionary for TF words
#{author1: [chunkData,...], author2:[chunkData,...], .. }
unclustered_mu = dict()
iterm_mu = dict()
clustered_mu = dict()
#authorName:amt_of_chunks_associated_with_author
authors = dict()
Julia_authors = set()

"""
#probs between 500-1500 for novels
http://answers.google.com/answers/threadview?id=608972
says that modern novels is 250 words/page, 25 lines/page, 20 pages/chapter
"""
amt_lines = 200 
max_authors = 6

#turn nltk stoplist into a dictionary for easy use
stopwords = dict()

#irrelevant stopwords
extended_stopList = dict()

orig_chunk_list = []
chunk_list = []

eps = 0.05
theta1 = 0.7071
theta2 = 0.3

results = []
num_chunks_per_author = dict()

Julia_words_encountered = set()

#since for small documents word occurence is very slight I choose to multiply it by a constant
"""
Justification:
since we are increasing the word amount (i.e. squaring it) the amount of documents shoudl 
probably also go up, so multiply the second log by the constant, and then in the division after, 
multiply the amount of chunks per author by constant/numAuthors
"""

constant = 10