# -*- coding: utf-8 -*-
# @Time    : 2020/12/20 下午11:12
# @Author  : lilong

import numpy as np

import torch
from bert_torch.model.embedding.token import TokenEmbedding
from bert_torch.model.embedding.position import PositionalEmbedding


# 序列数据
seq = np.arange(1, 10)
print("seq-1:", seq)
np.random.shuffle(seq)
print("seq-2", seq)
seq = torch.from_numpy(seq)
print("seq-size:", seq.size())

# 由[9]构造成[1，9，1]
seq = torch.unsqueeze(seq, dim=0)
print("seq-unsequeeze-size1:", seq.size())  # 维度扩展
seq = torch.unsqueeze(seq, dim=2)
print("seq-unsequeeze-size2:", seq.size())  # 维度扩展
print("----------------------------")

# 测试token的embedding
token_obj = TokenEmbedding(vocab_size=10, embed_size=6)
t = token_obj(seq)
print("token shape:", t.size())
print("生成token的随机化初始矩阵:", t)
print("----------------------------")

# 测试位置信息的embedding，主要是尺寸的大小
pos = PositionalEmbedding(6, max_len=512)
obj_pos = pos(seq)
print("pos size:", obj_pos.size())
print(obj_pos)


# label_ = np.ones(10)
# print(label_)
