# -*- coding: UTF-8 -*-

from nltk.corpus import stopwords, wordnet
from nltk.stem import WordNetLemmatizer
import re
import nltk
from nltk.stem.snowball import SnowballStemmer
import nltk.data  

para_tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')  
stemmer = SnowballStemmer("english")
lemmatizer = WordNetLemmatizer()
stopworddic = set(stopwords.words('english'))

# 段落分句
def split_sentence(paragraph):  
    return para_tokenizer.tokenize(paragraph) 

# 正则表达式分词
pattern = r"""(?x)                     # set flag to allow verbose regexps 
	            (?:[A-Z]\.)+           # abbreviations, e.g. U.S.A. 
	            |\d+(?:\.\d+)?%?       # numbers, incl. currency and percentages 
	            |\w+(?:[-']\w+)*       # words w/ optional internal hyphens/apostrophe 
	            |\.\.\.                # ellipsis 
	            |(?:[.,;"'?():-_`])    # special characters with meanings 
	        """

# 符号
characters = [',', '.', ':', ';', '?',
              '(', ')', '[', ']', '&', '!', '*', '@', '#', '$', '%', '-', '...', '^', '{', '}']

# 大写转小写
def to_lower_case(text):
    return text.lower()

# 分词
def sentence_segment(text):
    return nltk.regexp_tokenize(text, pattern)

# 词性标注
def pos_tag(word_list):
    return nltk.pos_tag(word_list)

# 去除标点符号
def remove_characters(word_list):
    return [word for word in word_list if word not in characters]

# 词干提取 by SnowBall
def stem(word_list):
    return [word if word[-1] == 'e' else stemmer.stem(word) for word in word_list]

# 词性还原
def lemmatize(word_list):
    def get_wordnet_pos(treebank_tag):
        if treebank_tag.startswith('J'):
            return wordnet.ADJ
        elif treebank_tag.startswith('V'):
            return wordnet.VERB
        elif treebank_tag.startswith('N'):
            return wordnet.NOUN
        elif treebank_tag.startswith('R'):
            return wordnet.ADV
        else:
            return None
    res = []
    for word, pos in nltk.pos_tag(word_list):
        wordnet_pos = get_wordnet_pos(pos) or wordnet.NOUN
        res.append(lemmatizer.lemmatize(word, pos=wordnet_pos))
    return res

# 去停用词
def remove_stopwords(word_list):
    return [word for word in word_list if word not in stopworddic]

# 句子预处理，list
def sentence_preprocess(sentence):
    to_lower_result = to_lower_case(sentence)
    # print(to_lower_result)

    seg_result = sentence_segment(to_lower_result)
    # print(seg_result)

    remove_character_result = remove_characters(seg_result)
    # print(remove_character_result)

    stem_result = stem(remove_character_result)
    # print(stem_result)
    # stem_result = remove_character_result

    remove_stopword_result = remove_stopwords(stem_result)
    # print(remove_stopword_result)

    return remove_stopword_result

def sentence_preprocess_1(sentence):
    to_lower_result = to_lower_case(sentence)
    seg_result = sentence_segment(to_lower_result)
    remove_character_result = remove_characters(seg_result)
    remove_stopword_result = remove_stopwords(remove_character_result)
    return remove_stopword_result

def sentence_preprocess_2(word_list):
    stem_result = stem(word_list)
    return remove_stopwords(stem_result)

def word_preprocess(word):
    arr = sentence_preprocess(word)
    # print(arr)
    return '' if len(arr) == 0 else arr[0]

# 段落预处理，二维list
def paragraph_preprocess(paragraph):
    res = []
    for sentence in split_sentence(paragraph):
        res.append(sentence_preprocess(sentence))
    return res
    
'''wnl.lemmatize(word) if wnl.lemmatize(word).endswith('e') else porter.stem(word) deal with e''' 
if __name__ == '__main__':
    test_text = 'I was just a kid, and loved it very much! What a fantastic song! My name is Tom. I am a boy. I like soccer!'
    print(paragraph_preprocess(test_text))
    import utils
    mashups_raw = utils.load_json('mashup.json')
    print(len(mashups_raw))
    mashups = remove_useless_mashup(mashups_raw)
    print(len(mashups))
    apis_raw = utils.load_json('apis.json')
    print(len(apis_raw))
    apis = remove_useless_api(apis_raw)
    print(len(apis))