﻿#conda install torchtext==0.14.1 torch==1.13.1 -c pytorch
import torch
import torch.nn as nn
from torch.nn import functional as F 


from torchtext.datasets import PennTreebank
from torchtext.vocab import build_vocab_from_iterator
from torchtext.data.utils import get_tokenizer


batch_size,num_steps = 32,35
train_iter,test_iter,valid_item = PennTreebank(root='./data',split=('train','test','valid'))

train_list = list(train_iter)
# data = train_iter
data = list(train_list[8:9])

print('train_data:\n',data)

#分词器
tokenizer = get_tokenizer('basic_english')

#构造迭代器，取出分词
def yield_tokens(data_iter):
    for line in data_iter:
        yield tokenizer(line) 

vocab = build_vocab_from_iterator(yield_tokens(data), specials=['<unk>','<pad>', '<bos>', '<eos>']) #构建词汇表,并标记处未知、空格、开始、结束
vocab.set_default_index(vocab['<unk>']) #设置默认索引

#打印词汇表,提词器默认排序方式优先是频率高的，然后是字母顺序
for idx, token in enumerate(vocab.get_itos()):
        print(f'{idx}:{token}')

# 生成索引序列
# indices_list= [[vocab[token] for token in tokenizer(data[centence])] for centence in data]
indices_list = [[vocab[token] for token in tokenizer(sentence)] for sentence in data]

print("索引：",indices_list)

# one_hot_vectors = [F.one_hot(torch.tensor(indices))for indices in indices_list]
# for i, one_hot_vector in enumerate(one_hot_vectors):
#     print(f'one_hot_vector[{i}]:\n',one_hot_vector)

embedding = nn.Embedding(len(vocab),100)
for indices in indices_list:
    embedded = embedding(torch.tensor(indices))
    print(f'embedded[{indices}]:\n',embedded)


