from torchtext.data.utils import get_tokenizer
import torchtext

tokenizer = get_tokenizer('basic_english')
text = 'Could have done better.Could'
print(tokenizer(text))

from torchtext.vocab import vocab
from collections import Counter, OrderedDict

# BOW 统计
counter = Counter(tokenizer(text))
# 依出现次数降幂排列
sorted_by_freq_tuples = sorted(counter.items(),
                       key=lambda x: x[1], reverse=True)
# 建立词汇字典
ordered_dict = OrderedDict(sorted_by_freq_tuples)

# 建立词汇表物件，并加一个未知单字(unknown)的索引值
vocab_object = torchtext.vocab.vocab(ordered_dict, specials=["<unk>"])
# 设定词汇表预设值为未知单字(unknown)的索引值
vocab_object.set_default_index(vocab_object["<unk>"])

# 测试
print(vocab_object['done'])

import string


def create_vocabulary(text_list):
    # 取得标点符号
    stopwords = list(string.punctuation)

    # 去除标点符号
    clean_text_list = []
    clean_tokens_list = []
    for text in text_list:
        tokens = tokenizer(text)
        clean_tokens = []
        for w in tokens:
            if w not in stopwords:
                clean_tokens.append(w)
        clean_tokens_list += clean_tokens
        clean_text_list.append(' '.join(clean_tokens))

        # 建立词汇表物件
    counter = Counter(clean_tokens_list)
    sorted_by_freq_tuples = sorted(counter.items(),
                                   key=lambda x: x[1], reverse=True)
    ordered_dict = OrderedDict(sorted_by_freq_tuples)
    vocab_object = torchtext.vocab.vocab(ordered_dict, specials=["<unk>"])
    vocab_object.set_default_index(vocab_object["<unk>"])

    # 将输入字串转为索引值：自词汇表物件查询索引值
    clean_index_list = []
    for clean_tokens_list in clean_text_list:
        clean_index_list.append(
            vocab_object.lookup_indices(clean_tokens_list.split(' ')))

    # 输出 词汇表物件、去除标点符号的字串阵列、字串阵列的索引值
    return vocab_object, clean_text_list, clean_index_list