from nltk import *
import globals
import functions
from nltk.corpus import wordnet as wn
from nltk.corpus import stopwords as st

"""
Comments:
Right now we are taking the first synset for that part of speech, we should
think of a better way to choose which synset is appropriate for that word

using some statistics from Brown corpus (?) find the most common use of the word
and use the synset related to that

other than that we should be good
"""

#for each file that the user inputs
def process(pair, ch):
    
    chunks = functions.make_chunks(open(pair[0],'r'),ch)
    
    #number of documents per author
    if(not(globals.authors.has_key(pair[1]))):
        globals.authors[pair[1]] = 1
    else:
        globals.authors[pair[1]] += 1
    
    #num chunks per author
    if(not(globals.num_chunks_per_author.has_key(pair[1]))):
        globals.num_chunks_per_author[pair[1]] = len(chunks)
    else:
        globals.num_chunks_per_author[pair[1]] += len(chunks)
    
    klk=0
    print(len(chunks))
    for chunk in chunks:
        print "=============================== ", klk
        klk+=1
        words_encountered = dict()
        tot_words = 0
        this_chunk = functions.ChunkData(pair[1])
        
        for sent in chunk:
            words = word_tokenize(sent)
            tot_words += len(words)
            pos = pos_tag(words)
            
            for word in words:
                word = word.lower()
                #if its a stoplist word then do TF
                if(word in st.words("english")):
                    if(not(this_chunk.tf.has_key(word))):
                        this_chunk.tf[word] = 1
                    else:
                        this_chunk.tf[word] += 1
                #if the word is in the stoplist it probably doesn't even have synonyms and if it does, thats irrelevant, i'd think
                #Synset type words
                #also keeps training and testing seperate!
                else:
                    
                    
                    syn_set = functions.Synset()
                    #Finding the proper synset based on the POS
                    #remove words that are in your extended stoplist that are not in above
                    if(not(globals.extended_stopList.has_key(word))):                        
                        for w,n in pos:
                            #just finding the tag for that word based on pos- the parsed sentence
                            if(w == word):
                                #reduce penn-treebank tags to those in WordNet
                                p = functions.resolve_pos(n)
                                
                                #if you are something other than the 4 we care about- IGNORE
                                if(p == "other"):
                                    continue
                                    
                                #if we already saw this word and the synset for it already exists
                                if(words_encountered.has_key(w+'.'+p)):
                                    orig = words_encountered[w+'.'+p]

                                    #update that word's weight
                                    set_of_words = this_chunk.synsets[orig].syn_freqz
                                    set_of_words[w+'.'+p]+=1
                                    this_chunk.synsets[orig].syn_freqz = set_of_words

                                else:
                                    #put in the encountered words dictionary, mapping back to the original word
                                    #put the list in as a dictionary. values:words, keys: amt encountered. "Synset":synset

                                    """
                                    OPTION 1- IGNORE word if there is more than 1 synset/POS
                                    OPTION 2- if there is more than 1 synset for POS, concatenate all those synset

                                    but with option 1 most likely we wont have any data, sooooo
                                    option 2 wins

                                    #option 1 implemented
                                    #if there is more than 1 for that POS ignore altogether
                                    if(len(wn.synset(w,p))>1):
                                        continue
                                    """
                                    #we are only interested in the words that are 
                                    #also in the document, and of course with POS
                                    
                                    """
                                    PARADIGM SHIFT
                                    instead of concatenating all the synsets for a POS, we ignore all the words that 
                                    have more than one synset for that POS
                                    """
                                    
#                                    if(len(wn.synsets(w,p)) > 1):
#                                        continue
                                    
                                    globals.Julia_words_encountered.add(w+'.'+p)
                                    syn = set()
                                    for sy in wn.synsets(w,p):
                                        for wooo in sy.lemma_names:
                                            syn.add(wooo+'.'+p)

                                    for word in syn:
                                        words_encountered[word] = w+'.'+p
                                        syn_set.syn_freqz[word] = 0

                                    syn_set.syn_freqz[w+'.'+p] = 1
                                    this_chunk.synsets[w+'.'+p] = syn_set
                                break

                    #at the end of the chunk, need to properly normalize the weights inside the synset dictionaries

        
        
        #This is done for each chunk
        this_chunk.synsets = functions.normalize1(this_chunk.synsets)
        this_chunk.synsets = functions.flatten(this_chunk.synsets)
        this_chunk.tf = functions.normalize2(this_chunk.tf, tot_words)
        if(not(globals.unclustered_mu.has_key(pair[1]))):
            globals.unclustered_mu[pair[1]] = [this_chunk]
        else:
            globals.unclustered_mu[pair[1]].append(this_chunk)
        
        globals.chunk_list.append(this_chunk)
    
    """
    for chu in globals.chunk_list:
        for key, val in chu[0].synsets.iteritems():
            print key, val"""
            