from __future__ import division
from nltk import *
from nltk.corpus import stopwords
import globals, math

def get_docs(num):
    
    inf = []
    for i in range(int(num)):
        a = []
        fir = raw_input("What is the path to document:\t")
        a.append(fir)
        sec = raw_input("What author number is this?:\t")
        a.append(sec)

        inf.append(a)
    
    return inf


#for now use the built in nltk stoplist, but might want to make a more comprehensive one
#include: conjunctions, interjections, prepositions, pronouns
def make_stoplist():
    for word in stopwords.words('english'):
        globals.stopwords[word] = 1
    

#array of array of sentences (required for POS tagging)
def make_chunks(file,ch):
    
    sentences = sent_tokenize(''.join(file))
    if(ch == 'Y'):
        chunks = []
        txt = []

        for ind in range(len(sentences)):
            if(not(ind % globals.amt_lines == 0) or ind == 0 or ind == len(sentences)-1):
                txt.append(sentences[ind])
            elif(ind%globals.amt_lines == 0 and not(ind==0)):
                chunks.append(txt)
                txt = []
    elif(ch == 'N'):
        print "This is to implement"
        #Here we take each file in-put as a chunk. perhaps forget this function and just have a for loop
        #for each document by this author, save as chunk into an array
    else:
        print "Invalid option"

    return chunks

#def find_synset():
    #use the wordNet framework (i.e. keep synset info for the word under which you saved the synset)

#def synset_cosine_metric():

#reduce penn-treebank tags to those in WordNet
def resolve_pos(p):
    
    #adjectives
    if(p[0] == 'J'):
        k = 'a'
    #ignore proper nouns
    elif(p == "NN" or p == "NNS"):
        k = 'n'
    #adverbs
    elif(p == "RB" or p == "RBR" or p == "RBS"):
        k = 'r'
    #versbs
    elif(p[0] == 'V'):
        k = 'v'
    else:
        k = "other"
    
    return k


def normalize1(syns):
    
    #print "******************************"
    #print syns
    ret = dict()
    
    for orig_word, syn_set in syns.iteritems():
        
        sum = 0
        
    #    syn_set.synPrint()
        for vals in syn_set.syn_freqz.itervalues():
            sum+=vals
        
        
        for word, weight in syn_set.syn_freqz.iteritems():
            syn_set.syn_freqz[word] = weight/sum
        
        ret[orig_word] = syn_set
        
    return ret

def flatten(syns):
    
    fin_syn = dict()
    
    for sset in syns.itervalues():
        for word, weight in sset.syn_freqz.iteritems():
            fin_syn[word] = weight
    return fin_syn
    

def normalize2(tf, count):
    
    dre = dict()
    for key, val in tf.iteritems():
        dre[key] = val/count
    
    return dre

#Given the computed centroids (from lloyd), we determine which chunks are 
#near enough to the centroids. Override condition: If there is only one chunk in an
#author, don't delete it because we don't want to loose a cluster
#parameter: centroids, chunks/author
#output: updated chunk_list (i.e. those close enough)
def regroup_chunks(inter_centroids, chunksInAuth, amtIter):
    
    #print inter_centroids
    temp_chunkList = []
    
    for chunk in globals.chunk_list:
        similarity = dict()
        for author in inter_centroids.iterkeys():
            similarity[author] = 0
            chunk_i = 0
            centr_i = 0
            for orig_word, or_freq in inter_centroids[author].iteritems():
                #only consider synsets that are in both (as per paper)
                #since things changed-
                """
                so- each word is always mapped to a unique synset, as such if a word appears in both
                the chunk and the centroid then the synset is in both as required.
                """
                #print orig_word
                if(chunk.synsets.has_key(orig_word)):
                    #print or_freq, chunk.synsets[orig_word]
                    similarity[author] += or_freq * chunk.synsets[orig_word]
                    centr_i += or_freq**2
                    chunk_i += chunk.synsets[orig_word]**2
            
            """
            we are not clustering based on TF
            
            for sWord, tf in inter_cetroids[author]["tf"].iteritems():
                if(chunk[1].has_key(sWord)):
                    similarity[author] += tf * chunk[1][word]

                #no matter whether the word is both
                centr_i += tf**2
                chunk_i += chunk[1][word]**2
            """

            #normalize the similarities
            #print chunk_i, centr_i
            if(chunk_i == 0 or centr_i == 0):
                #very large value should not be close to anything
                similarity[author] = 1000000000
            else:
                similarity[author] *= (centr_i**(-1/2) * chunk_i**(-1/2))
        
        #print similarity
        new_author = determine_cat(similarity)
       # print "new author", new_author, "old author", chunk.OrigAuthor
#        sim_sorted = sorted(similarity.iteritems(),key = itemgetter(1), reverse=True)
#       
        #print chunksInAuth, chunk.tempAuth, new_author, amtIter
        if(not(new_author == "")):
            #print "chunk appended"
            chunk.tempAuth = new_author
            temp_chunkList.append(chunk)
        elif(not(amtIter == 0) and len(chunksInAuth[chunk.tempAuth]) == 1):
        #ie by deleting this chunk from this cluster we would make it empty so we dont
        #second condi- because the assigned author names change from cluster name
            temp_chunkList.append(chunk)
        elif(new_author == "" and not(amtIter == 0) and len(chunksInAuth[chunk.tempAuth]) == 1):
            chunk.tempAuth = ""
            
    
    #only includes chunks that are close enough to a centroid
    globals.chunk_list = temp_chunkList
    

#returns the author to which the cluster is closest, returns nothing if the 
#chunk isn't close enough to a core

#for each author- if chunk is close to that author AND far from the rest
#then assign to that author
#determines category based on inner product
def determine_cat(sims):
    
    candidates = dict()
    print sims
    for auth in sims.iterkeys():
        #print "loop- auth", auth
        #if close enough to this
        #print "auth",auth, sims[auth]
        #cos 1 => 0 deg!! therefore if less than theta1 then not close enough
        if(sims[auth] < globals.theta1):
            continue
        
        #AND far enough from the rest
        k=0
        for othAuth in sims.iterkeys():
            if(auth == othAuth):
                continue
            
            #cosine is 0 for 90 deg
           # print "othAuth", othAuth, sims[othAuth]
            if(sims[othAuth] < globals.theta2):
                k+=1
        
        #then we've got a winner
        if(k == len(sims.keys())-1):
            #save multiple ones in the event of a tie
            candidates[auth] = sims[auth]
            
    if(len(candidates) > 1):
        print "more than one candidate here was possible"
        cand = sorted(candidates.iteritems, key=itemgetter(1))
        print cand[0][0]
        return cand[0][0]
    elif(len(candidates) == 1):
        print candidates.keys()[0]
        return candidates.keys()[0]
    else:
        return ""
    
    """
    this check need not be here, it should be immidiately when we find a winner
    because you won't be able to find 2 winnners...
    
    if(len(cands) > 0):
        cands = sorted(candidates.iteritems, key=itemgetter(1))
        return cand[0][0]
    else:
        return ""
    """
 
class Synset(object):
    
    def __init__(self):
        self.syn_freqz = dict()
    
    def synPrint(self):
        print self.syn_freqz
        

class ChunkData(object):
    
    def __init__(self, auth):
        self.synsets = dict()
        self.OrigAuthor = auth
        self.tf = dict()
        self.tempAuth = auth
    
    def __str__(self):
        return "<TF Dict: ", self.tf, ">"


class stats(object):
    def __init__(self, nAuth, stat):
        self.num_auths = nAuth
        #dictionary per author for each keep a list of what percent of 
        #document (ordinally) was classified to this author
        self.per_auth_stats = stat

#return a dictionary with {word:freq} per author
#use the same weighting scheme as TFIDF (IDF here is the amount of chunks)
#this should be for the clusters computed in cluster()
def make_tf_characteristic_vectors():
    
    #print globals.clustered_mu
    
    tf_auth = dict()
    amtdocs_words_shows_in_across_authors = dict()
    #print len(globals.orig_chunk_list)
    for auth in globals.clustered_mu.iterkeys():
        #print "******** ",auth, len(globals.clustered_mu[auth])
        tf_auth[auth] = dict()
        for chunk in globals.clustered_mu[auth]:
            for word, freq in chunk.tf.iteritems():
                if(not(tf_auth[auth].has_key(word))):
                    tf_auth[auth][word] = 1
                    if(not(amtdocs_words_shows_in_across_authors.has_key(word)) ):
                        amtdocs_words_shows_in_across_authors[word] = set()
                        amtdocs_words_shows_in_across_authors[word].add(chunk)
                else:
                    tf_auth[auth][word] += 1
                    #how often this word comes up in all the chunks
                    amtdocs_words_shows_in_across_authors[word].add(chunk)
        
    #use what's in clustered mu because those we are most sure of to be part of that centroid
    for auth in globals.clustered_mu.iterkeys():
        for word in tf_auth[auth].iterkeys():
         #   print word, tf_auth[auth][word], len(amtdocs_words_shows_in_across_authors[word])
            #tf_auth[auth][word] =  (math.log(tf_auth[auth][word]**2,2) + globals.constant*math.log(len(globals.orig_chunk_list)/len(amtdocs_words_shows_in_across_authors[word]),2) )/(len(globals.clustered_mu[auth])*globals.constant/len(globals.Julia_authors))
            tf_auth[auth][word] =  (math.log(tf_auth[auth][word],2) + math.log(len(globals.orig_chunk_list)/len(amtdocs_words_shows_in_across_authors[word]),2) )/(len(globals.clustered_mu[auth]))
    #print tf_auth
    #print tf_auth
    return tf_auth, amtdocs_words_shows_in_across_authors



"""
Unused artifacts
"""

#TO BE IMPLEMENTED BY JULIA
#Makes a characteristic vector for each author based on the synsets
#inside the chunks associated with each author
#
#returns a dictionary with keys authors, values the characteristic vector for said author
#vector is a dictionary {orig_word:Synset_object}
def compute_centroids():
    
    inter_ci = dict()
    loc = dict()
    for author in globals.authors.iteritems():
        inter_ci[author] = dict()
        loc = dict()
        for mu_chunk in globals.unclustered_mu[author]:
            #all synsets for this chunk for this author:
            #mu_chunk[0]
            new_this_chunk = ChunkData(author)
            for orig_word, syn_set in mu_chunk.synsets.iteritems():
                if(not(new_this_chunk.synsets.has_key(orig_word))):
                    new_this_chunk.synsets[orig_word] = syn_set
                    loc[orig_word] = 1
                else:
                    Fsynset = new_this_chunk.synsets[assoc_synset["synset"]]
                    for syn in Fsynset.iterkeys():
                        if(not(syn == "synset")):
                            #this should not be necessary
                            if(not(assoc_synset.has_key(syn))):
                                break
                            else:
                                Fsynset[syn] += assoc_synset[syn]
                    
                    inter_ci[author][assoc_synset["synset"]] = Fsynset
                    inter_ci["occurence"][assoc_synset["synset"]]+=1
            
            """
            #We are not using TF for the clustering!
            #all TF for this chunk for this author
            #mu_chunk[1]
            for word in mu_chunk[1].iterkeys():
                if(not(inter_ci[author].has_key(word))):
                    inter_ci[author][word] = mu_chunk[1][word]
                    inter_ci[author][word]["type"] = 1
                    inter_ci["occurence"][word] = 1
                else:
                    inter_ci[author][word] += mu_chunk[1][word]
                    inter_ci["occurence"][word] += 1
            """
            
        #normalization
        #dividing by occurence, maybe want to divide by total amount of chunks or something
        #i.e. i am averaging over occurences, not over document size
        for key in inter_ci[author].iterkeys():
            if(inter_ci[author][key][type] == 0):
                inter_ci[author]["synsets"][key] = inter_ci[author][key]/inter_ci["occurence"][key]
    
    return inter_ci


#returns whether the newly computed centroid vector for each author is the same 
#as the one computed previously- this would indicate convergence
#params are dictionaries (per author) of synonym words (see explanation in regroup_chunks for why it
#is acceptable to use the words and the synsets interchangeably
def same(new, old):
    
    for author in new.iterkeys():
        for nword,nfreq in new[author].iteritems():
            #if a new Synset has been introduced then the centroid obviously changed
            if(not(old[author].has_key(nword))):
                return False
            
            #if each word's weight is approximately the same then there was no movement
            if(old[author][nword] - nfreq >= globals.eps):
                return False
        
        """
        
        #we are disregarding TF!
        for sWord, tf in new[author]["tf"].iteritems():
            if(not(old.has_key(sWord))):
                return False
            
            if(not(old[sWord] - tf >= eps)):
                return False
        """
    return True


#seems to be a misguided function

"""
Returns a stats object for this comparison
num_authors
has a dictionary per author with a list to each which has percentages
for each document how much was mapped to that author
"""
def display():
    
    same = dict()
    diff = dict()
    for auth in globals.Julia_authors:
        same[auth] = 0
        diff[auth] = dict()
    
    print "diff", diff
    print "same", same
    for res in globals.results:
        print res
        if(res[0] == res[1]):
            same[res[0]]+=1/globals.num_chunks_per_author[res[0]]
        else:
            print "here", res
            if(not(diff[res[0]].has_key(res[1]))):
                diff[res[0]][res[1]] = 1/globals.num_chunks_per_author[res[0]]
            else:
                diff[res[0]][res[1]] += 1/globals.num_chunks_per_author[res[0]]
    
    print "Percent of chunks that remained in the same category"
    for key,amt in same.iteritems():
        print key, amt
    
    print "Percent of chunks that were mapped to a different author"
    for auth in diff.iterkeys():
        print "for ", auth
        for new_auth, amt in diff[auth].iteritems():
            print amt," were mapped to ", new_auth
            

def equals(chunks_left):
    
    #not enough data yet
    if(len(chunks_left) < 6):
        return False
    
    #print "equals"
    ind = len(chunks_left) - 1
    break_cond = len(chunks_left)-7
    curr = chunks_left[ind]
    ind-=1
    #print len(chunks_left)
    while(ind > break_cond):
        #print ind
        if(not(curr == chunks_left[ind])):
            return False
        ind-=1
    
    if(ind == break_cond):
        return True