import spacy
import re

"""

词向量下载
https://www.kaggle.com/datasets/anindya2906/glove6b
"""
nlp = spacy.load('en_core_web_sm')  # 英语的管道


def clean_text(text):
    # 去掉网址
    text = re.sub(r"https?://\S+", "", text)
    text = re.sub(r"what's", "what is", text)  # 可以删
    text = re.sub(r"Won't", "will not", text)  # 不能删
    text = re.sub(r"can't", "can not", text)  # 不能删
    text = re.sub(r"\'s", " ", text)
    text = re.sub(r"\'ve", " have", text)
    text = re.sub(r"n't", " not", text)
    text = re.sub(r"i'm", "i am", text)
    text = re.sub(r"\'re", " are", text)
    text = re.sub(r"\'d", " would", text)
    text = re.sub(r"\'ll", " will", text)
    text = re.sub(r"e - mail", "email", text)
    text = re.sub("\d+ ", "NUM", text)
    text = re.sub(r"<br />", '', text)
    text = re.sub(r'[\u0000-\u0019\u0021-\u002f\u003a-\u0040\u005b-\u0060\u007b-\uffff]', '', text)  # 除了空格'英语字母全去掉

    return text


def tokenize(s):
    doc = nlp(s)  # 分词对象
    word_list = []
    for word in doc:
        if not word.is_stop: # 不是停用词
            if word.lemma_.strip(): # 不是纯空格
                word_list.append(word.lemma_)
    return word_list


# 1) 词的清洗 2）去标点 3）分词 4）词的标准化
def tokenize_en(text):
    """
    Tokenizes English text from a string into a list of strings (tokens)
    """
    text = clean_text(text)
    text = tokenize(text)
    return text


if __name__ == "__main__":
    news = 'attacks Horrific attack on wife by muslim in Italy http://t.co/nY3l1oRZQb LiveLeak #News'
    print(tokenize_en(news))



