import string
import nltk
import os
from nltk.collocations import *
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
from nltk import SnowballStemmer

def nat_lang(strng):
    word = lmt.lemmatize(strng)
    # word = stemmer.stem(strng)
    return word


module_path = os.path.dirname(__file__)
f = open(module_path + '/data.csv','r', encoding="gbk")
raw = f.read()
lmt = WordNetLemmatizer()
stemmer = SnowballStemmer("english")

stop_words = stopwords.words('english') + list(string.punctuation)

tokens = nltk.word_tokenize(raw.lower())
output = [nat_lang(w) for w in tokens if not w in stop_words]


#Create your bigrams
bgs = nltk.bigrams(output)
tgs = nltk.trigrams(output)

#compute frequency distribution for all the bigrams in the text
fdist_bgs = nltk.FreqDist(bgs)
fdist_tgs = nltk.FreqDist(tgs)
print(*sorted((fdist_bgs+fdist_tgs).items(), key=lambda t: (-t[1], t[0]))[:50], sep='$$$$')


