import re
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords

input_str = '  @hello   #goodbug &1234; \
   RT @Amila #Test\nTom\'s newly listed Co &amp; Mary\'s unlisted  \
    Group to supply tech for nlTK.\nh $TSLA $AAPL http://t.co/x3wu2u32ush'


def clean(input_str):
    """
    这一些会被去除
    @hello
    #goodbug
    &1234;
    """
    text_no_special_entities = re.sub(r'&\w*;|#\w*|@\w*', '', input_str)
    print("去除特殊标签后：", text_no_special_entities, '\n')

    text_no_tickers = re.sub(r'\$\w*', '', text_no_special_entities)
    print("去掉价值符号后：", text_no_tickers, '\n')

    # 去除超链接
    text_no_hyperlinks = re.sub(r'https?:\/\/.*\/\w*', '', text_no_tickers)
    print("去除超链接：", text_no_hyperlinks, '\n')

    # 去掉专门名词缩写
    text_no_small_words = re.sub(r'\b\w{1,2}\b', '', text_no_hyperlinks)
    print("去掉名词缩写：", text_no_small_words, '\n')

    # 去掉多余的空格
    text_no_whitespace = re.sub(r'\s\s+', ' ', text_no_small_words)
    text_no_whitespace = text_no_whitespace.strip(' ')
    print("去掉多余空格:", text_no_whitespace, '\n')

    tokens = word_tokenize(text_no_whitespace)
    print("分词结果:", tokens, '\n')

    # 去停用词
    list_no_stopwords = [i for i in tokens if i not in stopwords.words('english')]
    print("去停用词：", list_no_stopwords, '\n')

    text_filtered = ' '.join(list_no_stopwords)
    print('过滤后:', text_filtered)

    pass


clean(input_str)
