import re

from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer

from nltk.stem.snowball import SnowballStemmer


lemmatizer = WordNetLemmatizer()
stemmer = SnowballStemmer('english')

def remove_digit(sentence):
    sentence = re.sub(r'\d+', '', sentence)
    return sentence

def remove_punctuation(sentence):
    # punctuation = r"""!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
    punctuation = r"""[·’!"\#$%&\'()＃！（）*+,./:;<=>?\@，：?￥★、…．＞【】［］《》？“”‘’\[\\]^_`{|}~]+"""
    sentence = sentence.translate(str.maketrans({punct: "" for punct in punctuation}))
    return sentence


stop_words = set([remove_punctuation(s) for s in stopwords.words('english')+['']])

def wordlevel_lemmatize(sentence):
    sentence = remove_punctuation(sentence).replace('-', ' ')
    return ' '.join([lemmatizer.lemmatize(word) for word in sentence.lower().split(' ') if not word in stop_words])

def lemmatize_dict(sentence):
    sentence = sentence.lower()
    pointer = 0
    l_dict = {}
    res = ''
    for word in sentence.split(' '):
        if word not in stop_words:
            compound_word = remove_punctuation(word).replace("-", " ").split(' ')
            pos = pointer = sentence.find(word, pointer)
            for every_word in compound_word:
                lemma_word = lemmatizer.lemmatize(every_word)
                hyphen_pos = sentence.find('-', pointer, pos+len(word))
                if hyphen_pos != -1:
                    l_dict[(len(res), len(res) + len(lemma_word))] = (pointer, hyphen_pos)
                    pointer = hyphen_pos + 1
                else:
                    l_dict[(len(res), len(res) + len(lemma_word))] = (pointer, pos+len(word))
                    pointer = pos + len(word)
                res = f'{res}{lemma_word} '
                while pointer < len(sentence) and sentence[pointer] in [' ', '-']:
                    pointer += 1
    return res.strip(), l_dict


def wordlevel_stem(sentence):
    sentence = remove_punctuation(sentence.lower()).replace('-', ' ')
    return ' '.join([stemmer.stem(w) for w in sentence.split(' ')])


def old_lemmatize_dict(sentence):
    sentence = remove_punctuation(sentence.lower())
    pointer = 0
    l_dict = {}
    res = ''
    for word in sentence.split(' '):
        if not word in stop_words:
            pos = sentence.find(word, pointer)
            l_dict[len(res)] = pos
            pointer = pos+len(word)
            if pointer<len(sentence) and sentence[pointer] == ' ':
                pointer += 1
            res = f'{res}{lemmatizer.lemmatize(word.replace("-", ""))} '
    return res.strip(), l_dict


if __name__ == '__main__':
    print(wordlevel_stem('Meera Hahn-me is testing the result of runned transition'))
