from __future__ import division
from nltk import *
import globals
import functions, math
from operator import itemgetter

#use clustered- to figure out to what author each chunk belongs
#here we use the TF for the chunks that are considered in the centroid 
#for that author
def anti_plagiarize():
    
    print "anti_plagiarize check!!"
    #{author:{word:freq ...}, auth2:{}..}
    auth_tf_vect, occsAcrossAuths = functions.make_tf_characteristic_vectors()
    
    print len(globals.orig_chunk_list)
    for auth in globals.clustered_mu:
        print auth, len(globals.clustered_mu[auth])
    
    for chunk in globals.orig_chunk_list:
        score = dict()
        for auth in auth_tf_vect.iterkeys():
            score[auth] = 0
            for mot, val in chunk.tf.iteritems():
                if(auth_tf_vect[auth].has_key(mot)):
                    #print mot, val, len(occsAcrossAuths[mot])
                    #print math.log(val,2), math.log(len(globals.orig_chunk_list)/len(occsAcrossAuths[mot]) ,2)
                    #score[auth] += auth_tf_vect[auth][mot]*(math.log(val**2,2) + globals.constant*math.log(len(globals.orig_chunk_list)/len(occsAcrossAuths[mot]) ,2))
                    score[auth] += auth_tf_vect[auth][mot]*(math.log(val,2) + math.log(len(globals.orig_chunk_list)/len(occsAcrossAuths[mot]) ,2))
            """
            #Here we are not considering synsets!

            score[author] = 0
            for synset in chunk[0].itervalues():
                if(globals.clustered_mu[author]["synset"].has_key(synset["synset"])): 
                    for word in globals.clustered_mu[author]["synset"].iterkeys():
                        if(not(word == "synset")):
                            score[author] += globals.clustered_mu[author]["synset"][word]*synset[word]
            """

        sorted_score = sorted(score.iteritems(),key=itemgetter(1),  reverse=True)
        print sorted_score, chunk.tempAuth
        globals.results.append([sorted_score[0][0], chunk.OrigAuthor])