#from __future__ import division
import json
import nltk

def calcKeywords(in_data):
    f = open("keyword/nouns.txt")
    commonRate = json.load(f)
    f.close()
    f = open("keyword/stopwords.txt")
    stopwords = set(json.load(f))
    f.close()
    cluster_data = in_data
    useful_pos = ["NN", "NNS", "NNP", "NNPS"]
    tokenizer = nltk.word_tokenize
    stemmer = nltk.stem.lancaster.LancasterStemmer()
    #tagger = nltk.pos_tag
    tagger = nltk.UnigramTagger(nltk.corpus.brown.tagged_sents())
    for cluster_key in cluster_data:
        originalWords = dict()
        count = dict()
        nodes = cluster_data[cluster_key].nodes
        for node in nodes:
            try:
                tmpstr = node['title']
            except:
                tmpstr = ""
            #try:
            #    tmpstr += " " + node['abstract']
            #except:
            #    tmpstr += ""
            tmpstr = tmpstr.lower()
            tokens = tokenizer(tmpstr)
            tagged = tagger.tag(tokens)
            for w in tagged:
                if w[1] in useful_pos and not w[0] in stopwords:
                    stemmed = stemmer.stem(w[0])
                    if originalWords.get(stemmed) == None or len(originalWords[stemmed]) > len(w[0]):
                        originalWords[stemmed] = w[0]
                    if count.get(stemmed) == None:
                        count[stemmed] = 0
                    count[stemmed] += 1
        for c in count:
            if originalWords[c] in commonRate:
                count[c] = count[c] / commonRate[ originalWords[c] ]
        wordsList = list(count)
        wordsList = sorted(wordsList, key = count.get, reverse = True)[:7]
        origins = list()
        for w in wordsList:
            origins.append( originalWords[w] )
        cluster_data[cluster_key].keywords = origins
    print('Got Keywords')