import nltk
from operator import itemgetter, attrgetter
from nltk.stem.lancaster import LancasterStemmer
class Word:
    def __init__(self):
        self.stemmed_word=''
        self.num=0
        self.origin_word=''
class GetKeyWords:
    def __init__(self):
        self.cluster_data={}
        self.tokenizer= nltk.word_tokenize
        self.stemmer=LancasterStemmer()
        self.pos_tagger=nltk.pos_tag
        self.useful_pos=["NN","NNS","NNP","NNPS"]
        self.noun={}
        self.stopWordList=[
            "a", "about", "above", "across", "afore", "aforesaid", "after", "again", "against", "agin",
            "ago", "aint", "albeit", "all", "almost", "alone", "along", "alongside", "already", "also",
            "although", "always", "am", "american", "amid", "amidst", "among", "amongst", "an", "and",
            "anent", "another", "any", "anybody", "anyone", "anything", "are", "aren't", "around", "as",
            "aslant", "astride", "at", "athwart", "away", "b", "back", "bar", "barring", "be",
            "because", "been", "before", "behind", "being", "below", "beneath", "beside", "besides", "best",
            "better", "between", "betwixt", "beyond", "both", "but", "by", "c", "can", "cannot",
            "can't", "certain", "circa", "close", "concerning", "considering", "cos", "could", "couldn't", "couldst",
            "d", "dare", "dared", "daren't", "dares", "daring", "despite", "did", "didn't", "different",
            "directly", "do", "does", "doesn't", "doing", "done", "don't", "dost", "doth", "down",
            "during", "durst", "e", "each", "early", "either", "em", "english", "enough", "ere",
            "even", "ever", "every", "everybody", "everyone", "everything", "except", "excepting", "f", "failing",
            "far", "few", "first", "five", "following", "for", "four", "from", "g", "gonna",
            "gotta", "h", "had", "hadn't", "hard", "has", "hasn't", "hast", "hath", "have",
            "haven't", "having", "he", "he'd", "he'll", "her", "here", "here's", "hers", "herself",
            "he's", "high", "him", "himself", "his", "home", "how", "howbeit", "however", "how's",
            "i", "id", "if", "ill", "i'm", "immediately", "important", "in", "inside", "instantly",
            "into", "is", "isn't", "it", "it'll", "it's", "its", "itself", "i've", "j",
            "just", "k", "l", "large", "last", "later", "least", "left", "less", "lest",
            "let's", "like", "likewise", "little", "living", "long", "m", "many", "may", "mayn't",
            "me", "mid", "midst", "might", "mightn't", "mine", "minus", "miss", "ms", "mr",
            "mrs", "more", "most", "much", "must", "mustn't", "my", "myself", "n", "n't",
            "near", "'neath", "need", "needed", "needing", "needn't", "needs", "neither", "never", "nevertheless",
            "new", "next", "nigh", "nigher", "nighest", "nisi", "no", "no-one", "nobody", "none",
            "nor", "not", "nothing", "notwithstanding", "now", "o", "o'er", "of", "off", "often",
            "on", "once", "one", "oneself", "only", "onto", "open", "or", "other", "otherwise",
            "ought", "oughtn't", "our", "ours", "ourselves", "out", "outside", "over", "own", "p",
            "past", "pending", "per", "perhaps", "plus", "possible", "present", "probably", "provided", "providing",
            "public", "q", "qua", "quite", "r", "rather", "re", "real", "really", "respecting",
            "right", "round", "s", "same", "sans", "save", "saving", "second", "several", "shall",
            "shalt", "shan't", "she", "shed", "shell", "she's", "short", "should", "shouldn't", "since",
            "six", "small", "so", "some", "somebody", "someone", "something", "sometimes", "soon", "special",
            "still", "such", "summat", "supposing", "sure", "t", "than", "thank", "thanks", "that",
            "that'd", "that'll", "that's", "the", "thee", "their", "theirs", "their's", "them", "themselves",
            "then", "there", "there's", "these", "they", "they'd", "they'll", "they're", "they've", "thine",
            "this", "tho", "those", "thou", "though", "three", "thro'", "through", "throughout", "thru",
            "thyself", "till", "to", "today", "together", "too", "touching", "toward", "towards", "true",
            "'twas", "'tween", "'twere", "'twill", "'twixt", "two", "'twould", "u", "under", "underneath",
            "unless", "unlike", "until", "unto", "up", "upon", "us", "used", "usually", "v",
            "versus", "very", "via", "vice", "vis-a-vis", "w", "wanna", "wanting", "was", "wasn't",
            "way", "we", "we'd", "well", "were", "weren't", "wert", "we've", "what", "whatever",
            "what'll", "what's", "when", "whencesoever", "whenever", "when's", "whereas", "where's", "whether", "which",
            "whichever", "whichsoever", "while", "whilst", "who", "who'd", "whoever", "whole", "who'll", "whom",
            "whore", "who's", "whose", "whoso", "whosoever", "will", "with", "within", "without", "wont",
            "would", "wouldn't", "wouldst", "x", "y", "ye", "yet", "you", "you'd", "you'll",
            "your", "you're", "yours", "yourself", "yourselves", "you've", "z",",",".","[","]","(",")","{","}","!",":"
        ]
    def dataLoader(self,data):
        self.cluster_data=data
    def nounloader(self):
        path='keyword/noun.txt'
        f=open(path,'r')
        while(1):
            line=f.readline()
            if not line:
                break
            line=line.split('\t')
            word=line[1]
            num=int(line[2])
            self.noun[word]=num
    def main(self):
        for key in self.cluster_data:
            if key!='300':
                wordList=[]
                wordDic={}
                origin_stemmed_wordDic={}
                nodes=self.cluster_data[key].nodes

                for node in nodes:
                    #if node['abstract']:
                    #    token_str=node['title']+' '+node['abstract']
                    #else:
                    #    token_str=node['title']
                    token_str = node['title']
                    token=self.tokenizer(token_str)
                    newToken=[]
                    pos_tag=self.pos_tagger(token)
                    for tag in pos_tag:
                        if tag[1] in self.useful_pos:
                            newToken.append(tag[0])
                    for i in range(len(newToken)):
                        word=self.stemmer.stem(newToken[i])
                        if word in wordDic:
                            wordDic[word]+=1
                        else:
                            wordDic[word]=1
                        if word not in origin_stemmed_wordDic:
                            origin_stemmed_wordDic[word]=newToken[i]
                        else:
                            origin_stemmed_wordDic[word]=(origin_stemmed_wordDic[word] if len(newToken[i])>len(origin_stemmed_wordDic[word]) else newToken[i])
                for wordkey in wordDic:
                    if wordkey not in self.stopWordList:
                        word=Word()
                        word.stemmed_word=wordkey
                        word.num=wordDic[wordkey]
                        word.origin_word=origin_stemmed_wordDic[wordkey]
                        wordList.append(word)
                for i in wordList:
                    if i.origin_word.lower() in self.noun:
                        i.num=i.num/self.noun[i.origin_word.lower()]
                wordList = sorted(wordList, key=attrgetter('stemmed_word'))
                wordList = sorted(wordList, key=attrgetter('num'),reverse=True)
                #keywords=[wordList[0].origin_word, wordList[1].origin_word, wordList[2].origin_word]
                keywords = []
                wordList = wordList[:7]
                for W in wordList:
                    keywords.append(W.origin_word)
                # path='keyword/'+key+'.txt'
                # f=open(path,'w')
                # for i in wordList:
                #     line=i.stemmed_word+' : '+str(i.num)+' '+i.origin_word+'\n'
                #
                #     f.write(line)
                # f.close()
                self.cluster_data[key].keywords=keywords


print 1