
from nltk import word_tokenize, pos_tag, ne_chunk
import numpy as np

def t1():
    """
    数据分词测试
    """
    en = []
    text = "媽咪，我可以去游泳嗎？"
    tokens = word_tokenize(text)
    # tagged_tokens = pos_tag(tokens)
    # entities = ne_chunk(tagged_tokens)
    "[['BOS', '媽', '咪', '，', '我', '可', '以', '去', '游', '泳', '嗎', '？', 'EOS']]"
    "[['BOS', '媽', '咪', '，', '我', '可', '以', '去', '游', '泳', '嗎', '？', 'EOS']]"
    en.append(["BOS"] + list(text) + ["EOS"])
    print(en)

def t2():
    """
    测试生成dict
    """
    list1 = ['a', 'b', 'c']
    dict1 = {k: idx+2 for idx, k in enumerate(list1)}
    dict1['UNK'] = 0
    print(dict1)

def t3():
    list1 = np.arange(1, 3)
    print(list1)
if __name__ == '__main__':
    # t1()
    # t2()
    t3()