iioSnail commited on
Commit
601e637
•
1 Parent(s): b71d0a4

Upload 8 files

Browse files
Files changed (4) hide show
  1. bert_tokenizer.py +85 -0
  2. config.json +1 -1
  3. modeling_glycebert.py +205 -56
  4. tokenizer_config.json +1 -1
bert_tokenizer.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from pathlib import Path
4
+ from typing import List
5
+
6
+ import tokenizers
7
+ import torch
8
+ from pypinyin import pinyin, Style
9
+
10
+ try:
11
+ from tokenizers import BertWordPieceTokenizer
12
+ except:
13
+ from tokenizers.implementations import BertWordPieceTokenizer
14
+
15
+ from transformers import BertTokenizerFast
16
+
17
+
18
+ class ChineseBertTokenizer(BertTokenizerFast):
19
+
20
+ def __init__(self, **kwargs):
21
+ super(ChineseBertTokenizer, self).__init__(**kwargs)
22
+
23
+ bert_path = Path(os.path.abspath(__file__)).parent
24
+ print("bert_path", bert_path)
25
+ vocab_file = os.path.join(bert_path, 'vocab.txt')
26
+ config_path = os.path.join(bert_path, 'config')
27
+ self.max_length = 512
28
+ self.tokenizer = BertWordPieceTokenizer(vocab_file)
29
+
30
+ # load pinyin map dict
31
+ with open(os.path.join(config_path, 'pinyin_map.json'), encoding='utf8') as fin:
32
+ self.pinyin_dict = json.load(fin)
33
+ # load char id map tensor
34
+ with open(os.path.join(config_path, 'id2pinyin.json'), encoding='utf8') as fin:
35
+ self.id2pinyin = json.load(fin)
36
+ # load pinyin map tensor
37
+ with open(os.path.join(config_path, 'pinyin2tensor.json'), encoding='utf8') as fin:
38
+ self.pinyin2tensor = json.load(fin)
39
+
40
+ def tokenize_sentence(self, sentence):
41
+ # convert sentence to ids
42
+ tokenizer_output = self.tokenizer.encode(sentence)
43
+ bert_tokens = tokenizer_output.ids
44
+ pinyin_tokens = self.convert_sentence_to_pinyin_ids(sentence, tokenizer_output)
45
+ # assert,token nums should be same as pinyin token nums
46
+ assert len(bert_tokens) <= self.max_length
47
+ assert len(bert_tokens) == len(pinyin_tokens)
48
+ # convert list to tensor
49
+ input_ids = torch.LongTensor(bert_tokens)
50
+ pinyin_ids = torch.LongTensor(pinyin_tokens).view(-1)
51
+ return input_ids, pinyin_ids
52
+
53
+ def convert_sentence_to_pinyin_ids(self, sentence: str, tokenizer_output: tokenizers.Encoding) -> List[List[int]]:
54
+ # get pinyin of a sentence
55
+ pinyin_list = pinyin(sentence, style=Style.TONE3, heteronym=True, errors=lambda x: [['not chinese'] for _ in x])
56
+ pinyin_locs = {}
57
+ # get pinyin of each location
58
+ for index, item in enumerate(pinyin_list):
59
+ pinyin_string = item[0]
60
+ # not a Chinese character, pass
61
+ if pinyin_string == "not chinese":
62
+ continue
63
+ if pinyin_string in self.pinyin2tensor:
64
+ pinyin_locs[index] = self.pinyin2tensor[pinyin_string]
65
+ else:
66
+ ids = [0] * 8
67
+ for i, p in enumerate(pinyin_string):
68
+ if p not in self.pinyin_dict["char2idx"]:
69
+ ids = [0] * 8
70
+ break
71
+ ids[i] = self.pinyin_dict["char2idx"][p]
72
+ pinyin_locs[index] = ids
73
+
74
+ # find chinese character location, and generate pinyin ids
75
+ pinyin_ids = []
76
+ for idx, (token, offset) in enumerate(zip(tokenizer_output.tokens, tokenizer_output.offsets)):
77
+ if offset[1] - offset[0] != 1:
78
+ pinyin_ids.append([0] * 8)
79
+ continue
80
+ if offset[0] in pinyin_locs:
81
+ pinyin_ids.append(pinyin_locs[offset[0]])
82
+ else:
83
+ pinyin_ids.append([0] * 8)
84
+
85
+ return pinyin_ids
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "./chinesebert-base",
3
  "architectures": [
4
  "GlyceBertForMaskedLM"
5
  ],
 
1
  {
2
+ "_name_or_path": "../ChineseBERT-base",
3
  "architectures": [
4
  "GlyceBertForMaskedLM"
5
  ],
modeling_glycebert.py CHANGED
@@ -8,21 +8,26 @@
8
  @version: 1.0
9
  @desc : ChineseBert Model
10
  """
 
 
11
  import warnings
 
12
 
 
13
  import torch
14
  from torch import nn
15
  from torch.nn import CrossEntropyLoss, MSELoss
 
 
16
  try:
17
  from transformers.modeling_bert import BertEncoder, BertPooler, BertOnlyMLMHead, BertPreTrainedModel, BertModel
18
  except:
19
- from transformers.models.bert.modeling_bert import BertEncoder, BertPooler, BertOnlyMLMHead, BertPreTrainedModel, BertModel
 
20
 
21
  from transformers.modeling_outputs import BaseModelOutputWithPooling, MaskedLMOutput, SequenceClassifierOutput, \
22
  QuestionAnsweringModelOutput, TokenClassifierOutput
23
 
24
- from models.fusion_embedding import FusionBertEmbeddings
25
- from models.classifier import BertMLP
26
 
27
  class GlyceBertModel(BertModel):
28
  r"""
@@ -65,19 +70,19 @@ class GlyceBertModel(BertModel):
65
  self.init_weights()
66
 
67
  def forward(
68
- self,
69
- input_ids=None,
70
- pinyin_ids=None,
71
- attention_mask=None,
72
- token_type_ids=None,
73
- position_ids=None,
74
- head_mask=None,
75
- inputs_embeds=None,
76
- encoder_hidden_states=None,
77
- encoder_attention_mask=None,
78
- output_attentions=None,
79
- output_hidden_states=None,
80
- return_dict=None,
81
  ):
82
  r"""
83
  encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
@@ -176,21 +181,21 @@ class GlyceBertForMaskedLM(BertPreTrainedModel):
176
  return self.cls.predictions.decoder
177
 
178
  def forward(
179
- self,
180
- input_ids=None,
181
- pinyin_ids=None,
182
- attention_mask=None,
183
- token_type_ids=None,
184
- position_ids=None,
185
- head_mask=None,
186
- inputs_embeds=None,
187
- encoder_hidden_states=None,
188
- encoder_attention_mask=None,
189
- labels=None,
190
- output_attentions=None,
191
- output_hidden_states=None,
192
- return_dict=None,
193
- **kwargs
194
  ):
195
  r"""
196
  labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
@@ -259,18 +264,18 @@ class GlyceBertForSequenceClassification(BertPreTrainedModel):
259
  self.init_weights()
260
 
261
  def forward(
262
- self,
263
- input_ids=None,
264
- pinyin_ids=None,
265
- attention_mask=None,
266
- token_type_ids=None,
267
- position_ids=None,
268
- head_mask=None,
269
- inputs_embeds=None,
270
- labels=None,
271
- output_attentions=None,
272
- output_hidden_states=None,
273
- return_dict=None,
274
  ):
275
  r"""
276
  labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
@@ -379,19 +384,19 @@ class GlyceBertForQuestionAnswering(BertPreTrainedModel):
379
  self.init_weights()
380
 
381
  def forward(
382
- self,
383
- input_ids=None,
384
- pinyin_ids=None,
385
- attention_mask=None,
386
- token_type_ids=None,
387
- position_ids=None,
388
- head_mask=None,
389
- inputs_embeds=None,
390
- start_positions=None,
391
- end_positions=None,
392
- output_attentions=None,
393
- output_hidden_states=None,
394
- return_dict=None,
395
  ):
396
  r"""
397
  start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
@@ -454,6 +459,7 @@ class GlyceBertForQuestionAnswering(BertPreTrainedModel):
454
  attentions=outputs.attentions,
455
  )
456
 
 
457
  class GlyceBertForTokenClassification(BertPreTrainedModel):
458
  def __init__(self, config, mlp=False):
459
  super().__init__(config)
@@ -530,3 +536,146 @@ class GlyceBertForTokenClassification(BertPreTrainedModel):
530
  hidden_states=outputs.hidden_states,
531
  attentions=outputs.attentions,
532
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  @version: 1.0
9
  @desc : ChineseBert Model
10
  """
11
+ import json
12
+ import os
13
  import warnings
14
+ from typing import List
15
 
16
+ import numpy as np
17
  import torch
18
  from torch import nn
19
  from torch.nn import CrossEntropyLoss, MSELoss
20
+ from torch.nn import functional as F
21
+
22
  try:
23
  from transformers.modeling_bert import BertEncoder, BertPooler, BertOnlyMLMHead, BertPreTrainedModel, BertModel
24
  except:
25
+ from transformers.models.bert.modeling_bert import BertEncoder, BertPooler, BertOnlyMLMHead, BertPreTrainedModel, \
26
+ BertModel
27
 
28
  from transformers.modeling_outputs import BaseModelOutputWithPooling, MaskedLMOutput, SequenceClassifierOutput, \
29
  QuestionAnsweringModelOutput, TokenClassifierOutput
30
 
 
 
31
 
32
  class GlyceBertModel(BertModel):
33
  r"""
 
70
  self.init_weights()
71
 
72
  def forward(
73
+ self,
74
+ input_ids=None,
75
+ pinyin_ids=None,
76
+ attention_mask=None,
77
+ token_type_ids=None,
78
+ position_ids=None,
79
+ head_mask=None,
80
+ inputs_embeds=None,
81
+ encoder_hidden_states=None,
82
+ encoder_attention_mask=None,
83
+ output_attentions=None,
84
+ output_hidden_states=None,
85
+ return_dict=None,
86
  ):
87
  r"""
88
  encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
 
181
  return self.cls.predictions.decoder
182
 
183
  def forward(
184
+ self,
185
+ input_ids=None,
186
+ pinyin_ids=None,
187
+ attention_mask=None,
188
+ token_type_ids=None,
189
+ position_ids=None,
190
+ head_mask=None,
191
+ inputs_embeds=None,
192
+ encoder_hidden_states=None,
193
+ encoder_attention_mask=None,
194
+ labels=None,
195
+ output_attentions=None,
196
+ output_hidden_states=None,
197
+ return_dict=None,
198
+ **kwargs
199
  ):
200
  r"""
201
  labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
 
264
  self.init_weights()
265
 
266
  def forward(
267
+ self,
268
+ input_ids=None,
269
+ pinyin_ids=None,
270
+ attention_mask=None,
271
+ token_type_ids=None,
272
+ position_ids=None,
273
+ head_mask=None,
274
+ inputs_embeds=None,
275
+ labels=None,
276
+ output_attentions=None,
277
+ output_hidden_states=None,
278
+ return_dict=None,
279
  ):
280
  r"""
281
  labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
 
384
  self.init_weights()
385
 
386
  def forward(
387
+ self,
388
+ input_ids=None,
389
+ pinyin_ids=None,
390
+ attention_mask=None,
391
+ token_type_ids=None,
392
+ position_ids=None,
393
+ head_mask=None,
394
+ inputs_embeds=None,
395
+ start_positions=None,
396
+ end_positions=None,
397
+ output_attentions=None,
398
+ output_hidden_states=None,
399
+ return_dict=None,
400
  ):
401
  r"""
402
  start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
 
459
  attentions=outputs.attentions,
460
  )
461
 
462
+
463
  class GlyceBertForTokenClassification(BertPreTrainedModel):
464
  def __init__(self, config, mlp=False):
465
  super().__init__(config)
 
536
  hidden_states=outputs.hidden_states,
537
  attentions=outputs.attentions,
538
  )
539
+
540
+
541
+ class FusionBertEmbeddings(nn.Module):
542
+ """
543
+ Construct the embeddings from word, position, glyph, pinyin and token_type embeddings.
544
+ """
545
+
546
+ def __init__(self, config):
547
+ super(FusionBertEmbeddings, self).__init__()
548
+ config_path = os.path.join(config.name_or_path, 'config')
549
+ font_files = []
550
+ for file in os.listdir(config_path):
551
+ if file.endswith(".npy"):
552
+ font_files.append(os.path.join(config_path, file))
553
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
554
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
555
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
556
+ self.pinyin_embeddings = PinyinEmbedding(embedding_size=128, pinyin_out_dim=config.hidden_size,
557
+ config_path=config_path)
558
+ self.glyph_embeddings = GlyphEmbedding(font_npy_files=font_files)
559
+
560
+ # self.LayerNorm is not snake-cased to stick with TensorFlow models variable name and be able to load
561
+ # any TensorFlow checkpoint file
562
+ self.glyph_map = nn.Linear(1728, config.hidden_size)
563
+ self.map_fc = nn.Linear(config.hidden_size * 3, config.hidden_size)
564
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
565
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
566
+
567
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
568
+ self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
569
+
570
+ def forward(self, input_ids=None, pinyin_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
571
+ if input_ids is not None:
572
+ input_shape = input_ids.size()
573
+ else:
574
+ input_shape = inputs_embeds.size()[:-1]
575
+
576
+ seq_length = input_shape[1]
577
+
578
+ if position_ids is None:
579
+ position_ids = self.position_ids[:, :seq_length]
580
+
581
+ if token_type_ids is None:
582
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
583
+
584
+ if inputs_embeds is None:
585
+ inputs_embeds = self.word_embeddings(input_ids)
586
+
587
+ # get char embedding, pinyin embedding and glyph embedding
588
+ word_embeddings = inputs_embeds # [bs,l,hidden_size]
589
+ pinyin_embeddings = self.pinyin_embeddings(pinyin_ids) # [bs,l,hidden_size]
590
+ glyph_embeddings = self.glyph_map(self.glyph_embeddings(input_ids)) # [bs,l,hidden_size]
591
+ # fusion layer
592
+ concat_embeddings = torch.cat((word_embeddings, pinyin_embeddings, glyph_embeddings), 2)
593
+ inputs_embeds = self.map_fc(concat_embeddings)
594
+
595
+ position_embeddings = self.position_embeddings(position_ids)
596
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
597
+
598
+ embeddings = inputs_embeds + position_embeddings + token_type_embeddings
599
+ embeddings = self.LayerNorm(embeddings)
600
+ embeddings = self.dropout(embeddings)
601
+ return embeddings
602
+
603
+
604
+ class PinyinEmbedding(nn.Module):
605
+ def __init__(self, embedding_size: int, pinyin_out_dim: int, config_path):
606
+ """
607
+ Pinyin Embedding Module
608
+ Args:
609
+ embedding_size: the size of each embedding vector
610
+ pinyin_out_dim: kernel number of conv
611
+ """
612
+ super(PinyinEmbedding, self).__init__()
613
+ with open(os.path.join(config_path, 'pinyin_map.json')) as fin:
614
+ pinyin_dict = json.load(fin)
615
+ self.pinyin_out_dim = pinyin_out_dim
616
+ self.embedding = nn.Embedding(len(pinyin_dict['idx2char']), embedding_size)
617
+ self.conv = nn.Conv1d(in_channels=embedding_size, out_channels=self.pinyin_out_dim, kernel_size=2,
618
+ stride=1, padding=0)
619
+
620
+ def forward(self, pinyin_ids):
621
+ """
622
+ Args:
623
+ pinyin_ids: (bs*sentence_length*pinyin_locs)
624
+
625
+ Returns:
626
+ pinyin_embed: (bs,sentence_length,pinyin_out_dim)
627
+ """
628
+ # input pinyin ids for 1-D conv
629
+ embed = self.embedding(pinyin_ids) # [bs,sentence_length,pinyin_locs,embed_size]
630
+ bs, sentence_length, pinyin_locs, embed_size = embed.shape
631
+ view_embed = embed.view(-1, pinyin_locs, embed_size) # [(bs*sentence_length),pinyin_locs,embed_size]
632
+ input_embed = view_embed.permute(0, 2, 1) # [(bs*sentence_length), embed_size, pinyin_locs]
633
+ # conv + max_pooling
634
+ pinyin_conv = self.conv(input_embed) # [(bs*sentence_length),pinyin_out_dim,H]
635
+ pinyin_embed = F.max_pool1d(pinyin_conv, pinyin_conv.shape[-1]) # [(bs*sentence_length),pinyin_out_dim,1]
636
+ return pinyin_embed.view(bs, sentence_length, self.pinyin_out_dim) # [bs,sentence_length,pinyin_out_dim]
637
+
638
+
639
+ class BertMLP(nn.Module):
640
+ def __init__(self, config, ):
641
+ super().__init__()
642
+ self.dense_layer = nn.Linear(config.hidden_size, config.hidden_size)
643
+ self.dense_to_labels_layer = nn.Linear(config.hidden_size, config.num_labels)
644
+ self.activation = nn.Tanh()
645
+
646
+ def forward(self, sequence_hidden_states):
647
+ sequence_output = self.dense_layer(sequence_hidden_states)
648
+ sequence_output = self.activation(sequence_output)
649
+ sequence_output = self.dense_to_labels_layer(sequence_output)
650
+ return sequence_output
651
+
652
+
653
+ class GlyphEmbedding(nn.Module):
654
+ """Glyph2Image Embedding"""
655
+
656
+ def __init__(self, font_npy_files: List[str]):
657
+ super(GlyphEmbedding, self).__init__()
658
+ font_arrays = [
659
+ np.load(np_file).astype(np.float32) for np_file in font_npy_files
660
+ ]
661
+ self.vocab_size = font_arrays[0].shape[0]
662
+ self.font_num = len(font_arrays)
663
+ self.font_size = font_arrays[0].shape[-1]
664
+ # N, C, H, W
665
+ font_array = np.stack(font_arrays, axis=1)
666
+ self.embedding = nn.Embedding(
667
+ num_embeddings=self.vocab_size,
668
+ embedding_dim=self.font_size ** 2 * self.font_num,
669
+ _weight=torch.from_numpy(font_array.reshape([self.vocab_size, -1]))
670
+ )
671
+
672
+ def forward(self, input_ids):
673
+ """
674
+ get glyph images for batch inputs
675
+ Args:
676
+ input_ids: [batch, sentence_length]
677
+ Returns:
678
+ images: [batch, sentence_length, self.font_num*self.font_size*self.font_size]
679
+ """
680
+ # return self.embedding(input_ids).view([-1, self.font_num, self.font_size, self.font_size])
681
+ return self.embedding(input_ids)
tokenizer_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "auto_map": {
3
  "AutoTokenizer": [
4
- "tokenizer.ChineseBertTokenizer",
5
  null
6
  ]
7
  },
 
1
  {
2
  "auto_map": {
3
  "AutoTokenizer": [
4
+ "bert_tokenizer.ChineseBertTokenizer",
5
  null
6
  ]
7
  },