import numpy as np
import torch
import torch.nn as nn
from transformers import BertTokenizer
# a=2*torch.ones(2,5,10)
# b=3*torch.ones(2,3,10)
#
#
#
# c=torch.bmm(a,b.transpose(1,2))
# print(c.shape)
# print(c)
# s=nn.Softmax(dim=2)
# c=s(c)
# print(c)
#
# d=torch.bmm(c,b)
#
#
# print(d.shape)

# from transformers import BertTokenizer
#
# tokenizer = BertTokenizer.from_pretrained('hfl/chinese-bert-wwm', do_lower_case=True)
# tgtt = tokenizer.convert_ids_to_tokens(list(tgt))
#         print(tgtt)
#         print(ctxt)

tgt='被害人贡某某某系钝性外力致重度颅脑损伤死亡。'
tokenizer = BertTokenizer.from_pretrained('hfl/chinese-bert-wwm', do_lower_case=True)
# tgtt = tokenizer.convert_ids_to_tokens(list(tgt))
tgt_list=tokenizer.tokenize(tgt)
subtokens=tokenizer.convert_tokens_to_ids(tgt_list)
print(subtokens)
tgtt=tokenizer.convert_ids_to_tokens(subtokens)
print(tgtt)

# [1, 6158, 2154, 782, 6567, 3378, 3378, 3378, 5143, 7162, 2595, 1912, 1213, 5636, 7028, 2428, 7565, 5554, 2938, 839, 3647, 767, 511, 2]
# 被害人贡某某某系钝性外力致重度颅脑损伤死亡。



# [1, 6162, 1444, 786, 676, 3382, 3382, 4310, 5393, 716, 864, 6720, 823, 6848, 3423, 515, 2]
# ['[unused1]', '裂', '呓', '仃', '三', '柔', '柔', '犹', '罵', '丿', '佘', '躺', '伊', '选', '桁', '〈', '[unused2]']
#被告人丁某某犯罪主体身份适格。
