from nltk.parse import stanford
from nltk.corpus import stopwords
from nltk.tokenize import sent_tokenize,word_tokenize
from nltk.corpus import wordnet
from nltk import word_tokenize, pos_tag
from nltk.stem import WordNetLemmatizer

import pymongo
from pymongo import MongoClient
import gridfs

conn =  pymongo.MongoClient('127.0.0.1', 27017)
db = conn['vocavola']
collection = db['voca_packed']

list_stopWords=list(set(stopwords.words('english')))
english_punctuations = [',', '.', ':', ';', '?', '(', ')', '[', ']', '&', '!', '*', '@', '#', '$', '%','\'s']

class Sentence (dict):
    def  __init__(self, sentence):
        self.update({"sentence":sentence})
        return None 
    def sent_token(self):
        sentence= self.get('sentence')
        sent_token=sent_tokenize(sentence)
        self.update({"sent_token":sent_token})
    def get_wordnet_pos(self,treebank_tag):
        if treebank_tag.startswith('J'):
            return wordnet.ADJ
        elif treebank_tag.startswith('V'):
            return wordnet.VERB
        elif treebank_tag.startswith('N'):
            return wordnet.NOUN
        elif treebank_tag.startswith('R'):
            return wordnet.ADV
        else:
            return None


    def lemmatize_sentence(self):
        sentence= self.get('sentence')
        res = []
        lemmatizer = WordNetLemmatizer()
        for word, pos in pos_tag(word_tokenize(sentence)):
            wordnet_pos = self.get_wordnet_pos(pos) or wordnet.NOUN
            res.append(lemmatizer.lemmatize(word, pos=wordnet_pos))

        return res
    
    
    def grade_text(self):
        sentence= self.get('sentence')
        

        words_list=self.lemmatize_sentence()
        words_list=[w.lower() for w in words_list ]

        #print(words_list)
        filtered_words=[w for w in words_list if not w in english_punctuations]

        temp=[w for w in filtered_words if not w in list_stopWords]
        #print(temp)
        res=[]
        for word in temp:
            try:
                worddata=collection.find_one({'word':word})
                word_index=worddata.get('word_index')
                res.append(int(word_index))
            except:
                res.append(0)
                
        return res