MZhaovo commited on
Commit
ddb4de0
1 Parent(s): 1b69971

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. Data/TalkFlower_CNzh/config.json +96 -0
  2. README.md +2 -8
  3. __pycache__/attentions.cpython-39.pyc +0 -0
  4. __pycache__/commons.cpython-39.pyc +0 -0
  5. __pycache__/config.cpython-39.pyc +0 -0
  6. __pycache__/infer.cpython-39.pyc +0 -0
  7. __pycache__/models.cpython-39.pyc +0 -0
  8. __pycache__/modules.cpython-39.pyc +0 -0
  9. __pycache__/re_matching.cpython-39.pyc +0 -0
  10. __pycache__/transforms.cpython-39.pyc +0 -0
  11. __pycache__/utils.cpython-39.pyc +0 -0
  12. __pycache__/webui.cpython-39.pyc +0 -0
  13. app.py +107 -0
  14. assets/flower-2x.webp +0 -0
  15. assets/flower-glow-2x.webp +0 -0
  16. assets/hero-header-layer-1-large-up-2x.jpg +0 -0
  17. attentions.py +464 -0
  18. bert/bert-base-japanese-v3/.gitattributes +34 -0
  19. bert/bert-base-japanese-v3/README.md +53 -0
  20. bert/bert-base-japanese-v3/config.json +19 -0
  21. bert/bert-base-japanese-v3/pytorch_model.bin +3 -0
  22. bert/bert-base-japanese-v3/tokenizer_config.json +10 -0
  23. bert/bert-base-japanese-v3/vocab.txt +0 -0
  24. bert/bert-large-japanese-v2/.gitattributes +34 -0
  25. bert/bert-large-japanese-v2/README.md +53 -0
  26. bert/bert-large-japanese-v2/config.json +19 -0
  27. bert/bert-large-japanese-v2/pytorch_model.bin +3 -0
  28. bert/bert-large-japanese-v2/tokenizer_config.json +10 -0
  29. bert/bert-large-japanese-v2/vocab.txt +0 -0
  30. bert/bert_models.json +14 -0
  31. bert/chinese-roberta-wwm-ext-large/.gitattributes +9 -0
  32. bert/chinese-roberta-wwm-ext-large/README.md +57 -0
  33. bert/chinese-roberta-wwm-ext-large/added_tokens.json +1 -0
  34. bert/chinese-roberta-wwm-ext-large/config.json +28 -0
  35. bert/chinese-roberta-wwm-ext-large/pytorch_model.bin +3 -0
  36. bert/chinese-roberta-wwm-ext-large/special_tokens_map.json +1 -0
  37. bert/chinese-roberta-wwm-ext-large/tokenizer.json +0 -0
  38. bert/chinese-roberta-wwm-ext-large/tokenizer_config.json +1 -0
  39. bert/chinese-roberta-wwm-ext-large/vocab.txt +0 -0
  40. bert/deberta-v2-large-japanese/.gitattributes +34 -0
  41. bert/deberta-v2-large-japanese/README.md +111 -0
  42. bert/deberta-v2-large-japanese/config.json +38 -0
  43. bert/deberta-v2-large-japanese/pytorch_model.bin +3 -0
  44. bert/deberta-v2-large-japanese/special_tokens_map.json +9 -0
  45. bert/deberta-v2-large-japanese/spm.model +3 -0
  46. bert/deberta-v2-large-japanese/tokenizer.json +0 -0
  47. bert/deberta-v2-large-japanese/tokenizer_config.json +15 -0
  48. bert/deberta-v3-large/.gitattributes +27 -0
  49. bert/deberta-v3-large/README.md +93 -0
  50. bert/deberta-v3-large/config.json +22 -0
Data/TalkFlower_CNzh/config.json ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "log_interval": 200,
4
+ "eval_interval": 1000,
5
+ "seed": 42,
6
+ "epochs": 1000,
7
+ "learning_rate": 0.0002,
8
+ "betas": [
9
+ 0.8,
10
+ 0.99
11
+ ],
12
+ "eps": 1e-09,
13
+ "batch_size": 12,
14
+ "fp16_run": false,
15
+ "lr_decay": 0.99995,
16
+ "segment_size": 16384,
17
+ "init_lr_ratio": 1,
18
+ "warmup_epochs": 0,
19
+ "c_mel": 45,
20
+ "c_kl": 1.0,
21
+ "skip_optimizer": true
22
+ },
23
+ "data": {
24
+ "training_files": "filelists/train.list",
25
+ "validation_files": "filelists/val.list",
26
+ "max_wav_value": 32768.0,
27
+ "sampling_rate": 44100,
28
+ "filter_length": 2048,
29
+ "hop_length": 512,
30
+ "win_length": 2048,
31
+ "n_mel_channels": 128,
32
+ "mel_fmin": 0.0,
33
+ "mel_fmax": null,
34
+ "add_blank": true,
35
+ "n_speakers": 700,
36
+ "cleaned_text": true,
37
+ "spk2id": {
38
+ "TalkFlower_CNzh": 0
39
+ }
40
+ },
41
+ "model": {
42
+ "use_spk_conditioned_encoder": true,
43
+ "use_noise_scaled_mas": true,
44
+ "use_mel_posterior_encoder": false,
45
+ "use_duration_discriminator": true,
46
+ "inter_channels": 192,
47
+ "hidden_channels": 192,
48
+ "filter_channels": 768,
49
+ "n_heads": 2,
50
+ "n_layers": 6,
51
+ "kernel_size": 3,
52
+ "p_dropout": 0.1,
53
+ "resblock": "1",
54
+ "resblock_kernel_sizes": [
55
+ 3,
56
+ 7,
57
+ 11
58
+ ],
59
+ "resblock_dilation_sizes": [
60
+ [
61
+ 1,
62
+ 3,
63
+ 5
64
+ ],
65
+ [
66
+ 1,
67
+ 3,
68
+ 5
69
+ ],
70
+ [
71
+ 1,
72
+ 3,
73
+ 5
74
+ ]
75
+ ],
76
+ "upsample_rates": [
77
+ 8,
78
+ 8,
79
+ 2,
80
+ 2,
81
+ 2
82
+ ],
83
+ "upsample_initial_channel": 512,
84
+ "upsample_kernel_sizes": [
85
+ 16,
86
+ 16,
87
+ 8,
88
+ 2,
89
+ 2
90
+ ],
91
+ "n_layers_q": 3,
92
+ "use_spectral_norm": false,
93
+ "gin_channels": 256
94
+ },
95
+ "version": "2.0"
96
+ }
README.md CHANGED
@@ -1,12 +1,6 @@
1
  ---
2
- title: AI TalkingFlower
3
- emoji: 👀
4
- colorFrom: yellow
5
- colorTo: green
6
  sdk: gradio
7
  sdk_version: 4.5.0
8
- app_file: app.py
9
- pinned: false
10
  ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: AI_TalkingFlower
3
+ app_file: app.py
 
 
4
  sdk: gradio
5
  sdk_version: 4.5.0
 
 
6
  ---
 
 
__pycache__/attentions.cpython-39.pyc ADDED
Binary file (11.1 kB). View file
 
__pycache__/commons.cpython-39.pyc ADDED
Binary file (5.91 kB). View file
 
__pycache__/config.cpython-39.pyc ADDED
Binary file (7.13 kB). View file
 
__pycache__/infer.cpython-39.pyc ADDED
Binary file (3.98 kB). View file
 
__pycache__/models.cpython-39.pyc ADDED
Binary file (20.9 kB). View file
 
__pycache__/modules.cpython-39.pyc ADDED
Binary file (13 kB). View file
 
__pycache__/re_matching.cpython-39.pyc ADDED
Binary file (2.55 kB). View file
 
__pycache__/transforms.cpython-39.pyc ADDED
Binary file (3.92 kB). View file
 
__pycache__/utils.cpython-39.pyc ADDED
Binary file (11.9 kB). View file
 
__pycache__/webui.cpython-39.pyc ADDED
Binary file (2.4 kB). View file
 
app.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import torch
4
+ import os
5
+ import re_matching
6
+ from tools.sentence import split_by_language, sentence_split
7
+ import utils
8
+ from infer import infer, latest_version, get_net_g
9
+ import gradio as gr
10
+ import webbrowser
11
+ from config import config
12
+ from tools.translate import translate
13
+
14
+ from webui import reload_javascript
15
+
16
+ device = config.webui_config.device
17
+ if device == "mps":
18
+ os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
19
+
20
+ def generate_audio(
21
+ slices,
22
+ sdp_ratio,
23
+ noise_scale,
24
+ noise_scale_w,
25
+ length_scale,
26
+ speaker,
27
+ language,
28
+ ):
29
+ audio_list = []
30
+ silence = np.zeros(hps.data.sampling_rate // 2, dtype=np.int16)
31
+ with torch.no_grad():
32
+ for piece in slices:
33
+ audio = infer(
34
+ piece,
35
+ sdp_ratio=sdp_ratio,
36
+ noise_scale=noise_scale,
37
+ noise_scale_w=noise_scale_w,
38
+ length_scale=length_scale,
39
+ sid=speaker,
40
+ language=language,
41
+ hps=hps,
42
+ net_g=net_g,
43
+ device=device,
44
+ )
45
+ audio16bit = gr.processing_utils.convert_to_16_bit_wav(audio)
46
+ audio_list.append(audio16bit)
47
+ audio_list.append(silence) # 将静音添加到列表中
48
+ return audio_list
49
+
50
+ def speak(
51
+ text: str,
52
+ speaker="TalkFlower_CNzh",
53
+ sdp_ratio=0.2, # SDP/DP混合比
54
+ noise_scale=0.6, # 感情
55
+ noise_scale_w=0.6, # 音素长度
56
+ length_scale=0.9, # 语速
57
+ language="ZH"
58
+ ):
59
+ audio_list = []
60
+ audio_list.extend(
61
+ generate_audio(
62
+ text.split("|"),
63
+ sdp_ratio,
64
+ noise_scale,
65
+ noise_scale_w,
66
+ length_scale,
67
+ speaker,
68
+ language,
69
+ )
70
+ )
71
+
72
+ audio_concat = np.concatenate(audio_list)
73
+ return (hps.data.sampling_rate, audio_concat)
74
+
75
+
76
+ with open("./css/style.css", "r", encoding="utf-8") as f:
77
+ customCSS = f.read()
78
+
79
+ with gr.Blocks(css=customCSS) as demo:
80
+ # talkingFlowerModel = gr.HTML("""<div id="talking_flower_model">123</div>""")
81
+ talkingFlowerPic = gr.HTML("""<img src="file=assets/flower-2x.webp" alt="TalkingFlowerPic">""", elem_id="talking_flower_pic")
82
+ input_text = gr.Textbox(lines=1, label="Talking Flower will say:", elem_classes="wonder-card", elem_id="input_text")
83
+ speak_button = gr.Button("Speak!", elem_id="comfirm_button", elem_classes="button wonder-card")
84
+ audio_output = gr.Audio(label="输出音频", show_label=False, autoplay=True, elem_id="audio_output", elem_classes="wonder-card")
85
+
86
+ speak_button.click(
87
+ speak,
88
+ inputs=[input_text],
89
+ outputs=[audio_output]
90
+ )
91
+
92
+
93
+ if __name__ == "__main__":
94
+ hps = utils.get_hparams_from_file(config.webui_config.config_path)
95
+ version = hps.version if hasattr(hps, "version") else latest_version
96
+ net_g = get_net_g(
97
+ model_path=config.webui_config.model, version=version, device=device, hps=hps
98
+ )
99
+ reload_javascript()
100
+ demo.queue().launch(
101
+ allowed_paths=["./assets"],
102
+ show_api=False,
103
+ # server_name=server_name,
104
+ # server_port=server_port,
105
+ share=True,
106
+ inbrowser=True, # 禁止在docker下开启inbrowser
107
+ )
assets/flower-2x.webp ADDED
assets/flower-glow-2x.webp ADDED
assets/hero-header-layer-1-large-up-2x.jpg ADDED
attentions.py ADDED
@@ -0,0 +1,464 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ from torch import nn
4
+ from torch.nn import functional as F
5
+
6
+ import commons
7
+ import logging
8
+
9
+ logger = logging.getLogger(__name__)
10
+
11
+
12
+ class LayerNorm(nn.Module):
13
+ def __init__(self, channels, eps=1e-5):
14
+ super().__init__()
15
+ self.channels = channels
16
+ self.eps = eps
17
+
18
+ self.gamma = nn.Parameter(torch.ones(channels))
19
+ self.beta = nn.Parameter(torch.zeros(channels))
20
+
21
+ def forward(self, x):
22
+ x = x.transpose(1, -1)
23
+ x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
24
+ return x.transpose(1, -1)
25
+
26
+
27
+ @torch.jit.script
28
+ def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
29
+ n_channels_int = n_channels[0]
30
+ in_act = input_a + input_b
31
+ t_act = torch.tanh(in_act[:, :n_channels_int, :])
32
+ s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
33
+ acts = t_act * s_act
34
+ return acts
35
+
36
+
37
+ class Encoder(nn.Module):
38
+ def __init__(
39
+ self,
40
+ hidden_channels,
41
+ filter_channels,
42
+ n_heads,
43
+ n_layers,
44
+ kernel_size=1,
45
+ p_dropout=0.0,
46
+ window_size=4,
47
+ isflow=True,
48
+ **kwargs
49
+ ):
50
+ super().__init__()
51
+ self.hidden_channels = hidden_channels
52
+ self.filter_channels = filter_channels
53
+ self.n_heads = n_heads
54
+ self.n_layers = n_layers
55
+ self.kernel_size = kernel_size
56
+ self.p_dropout = p_dropout
57
+ self.window_size = window_size
58
+ # if isflow:
59
+ # cond_layer = torch.nn.Conv1d(256, 2*hidden_channels*n_layers, 1)
60
+ # self.cond_pre = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, 1)
61
+ # self.cond_layer = weight_norm(cond_layer, name='weight')
62
+ # self.gin_channels = 256
63
+ self.cond_layer_idx = self.n_layers
64
+ if "gin_channels" in kwargs:
65
+ self.gin_channels = kwargs["gin_channels"]
66
+ if self.gin_channels != 0:
67
+ self.spk_emb_linear = nn.Linear(self.gin_channels, self.hidden_channels)
68
+ # vits2 says 3rd block, so idx is 2 by default
69
+ self.cond_layer_idx = (
70
+ kwargs["cond_layer_idx"] if "cond_layer_idx" in kwargs else 2
71
+ )
72
+ logging.debug(self.gin_channels, self.cond_layer_idx)
73
+ assert (
74
+ self.cond_layer_idx < self.n_layers
75
+ ), "cond_layer_idx should be less than n_layers"
76
+ self.drop = nn.Dropout(p_dropout)
77
+ self.attn_layers = nn.ModuleList()
78
+ self.norm_layers_1 = nn.ModuleList()
79
+ self.ffn_layers = nn.ModuleList()
80
+ self.norm_layers_2 = nn.ModuleList()
81
+ for i in range(self.n_layers):
82
+ self.attn_layers.append(
83
+ MultiHeadAttention(
84
+ hidden_channels,
85
+ hidden_channels,
86
+ n_heads,
87
+ p_dropout=p_dropout,
88
+ window_size=window_size,
89
+ )
90
+ )
91
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
92
+ self.ffn_layers.append(
93
+ FFN(
94
+ hidden_channels,
95
+ hidden_channels,
96
+ filter_channels,
97
+ kernel_size,
98
+ p_dropout=p_dropout,
99
+ )
100
+ )
101
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
102
+
103
+ def forward(self, x, x_mask, g=None):
104
+ attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
105
+ x = x * x_mask
106
+ for i in range(self.n_layers):
107
+ if i == self.cond_layer_idx and g is not None:
108
+ g = self.spk_emb_linear(g.transpose(1, 2))
109
+ g = g.transpose(1, 2)
110
+ x = x + g
111
+ x = x * x_mask
112
+ y = self.attn_layers[i](x, x, attn_mask)
113
+ y = self.drop(y)
114
+ x = self.norm_layers_1[i](x + y)
115
+
116
+ y = self.ffn_layers[i](x, x_mask)
117
+ y = self.drop(y)
118
+ x = self.norm_layers_2[i](x + y)
119
+ x = x * x_mask
120
+ return x
121
+
122
+
123
+ class Decoder(nn.Module):
124
+ def __init__(
125
+ self,
126
+ hidden_channels,
127
+ filter_channels,
128
+ n_heads,
129
+ n_layers,
130
+ kernel_size=1,
131
+ p_dropout=0.0,
132
+ proximal_bias=False,
133
+ proximal_init=True,
134
+ **kwargs
135
+ ):
136
+ super().__init__()
137
+ self.hidden_channels = hidden_channels
138
+ self.filter_channels = filter_channels
139
+ self.n_heads = n_heads
140
+ self.n_layers = n_layers
141
+ self.kernel_size = kernel_size
142
+ self.p_dropout = p_dropout
143
+ self.proximal_bias = proximal_bias
144
+ self.proximal_init = proximal_init
145
+
146
+ self.drop = nn.Dropout(p_dropout)
147
+ self.self_attn_layers = nn.ModuleList()
148
+ self.norm_layers_0 = nn.ModuleList()
149
+ self.encdec_attn_layers = nn.ModuleList()
150
+ self.norm_layers_1 = nn.ModuleList()
151
+ self.ffn_layers = nn.ModuleList()
152
+ self.norm_layers_2 = nn.ModuleList()
153
+ for i in range(self.n_layers):
154
+ self.self_attn_layers.append(
155
+ MultiHeadAttention(
156
+ hidden_channels,
157
+ hidden_channels,
158
+ n_heads,
159
+ p_dropout=p_dropout,
160
+ proximal_bias=proximal_bias,
161
+ proximal_init=proximal_init,
162
+ )
163
+ )
164
+ self.norm_layers_0.append(LayerNorm(hidden_channels))
165
+ self.encdec_attn_layers.append(
166
+ MultiHeadAttention(
167
+ hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout
168
+ )
169
+ )
170
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
171
+ self.ffn_layers.append(
172
+ FFN(
173
+ hidden_channels,
174
+ hidden_channels,
175
+ filter_channels,
176
+ kernel_size,
177
+ p_dropout=p_dropout,
178
+ causal=True,
179
+ )
180
+ )
181
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
182
+
183
+ def forward(self, x, x_mask, h, h_mask):
184
+ """
185
+ x: decoder input
186
+ h: encoder output
187
+ """
188
+ self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(
189
+ device=x.device, dtype=x.dtype
190
+ )
191
+ encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
192
+ x = x * x_mask
193
+ for i in range(self.n_layers):
194
+ y = self.self_attn_layers[i](x, x, self_attn_mask)
195
+ y = self.drop(y)
196
+ x = self.norm_layers_0[i](x + y)
197
+
198
+ y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
199
+ y = self.drop(y)
200
+ x = self.norm_layers_1[i](x + y)
201
+
202
+ y = self.ffn_layers[i](x, x_mask)
203
+ y = self.drop(y)
204
+ x = self.norm_layers_2[i](x + y)
205
+ x = x * x_mask
206
+ return x
207
+
208
+
209
+ class MultiHeadAttention(nn.Module):
210
+ def __init__(
211
+ self,
212
+ channels,
213
+ out_channels,
214
+ n_heads,
215
+ p_dropout=0.0,
216
+ window_size=None,
217
+ heads_share=True,
218
+ block_length=None,
219
+ proximal_bias=False,
220
+ proximal_init=False,
221
+ ):
222
+ super().__init__()
223
+ assert channels % n_heads == 0
224
+
225
+ self.channels = channels
226
+ self.out_channels = out_channels
227
+ self.n_heads = n_heads
228
+ self.p_dropout = p_dropout
229
+ self.window_size = window_size
230
+ self.heads_share = heads_share
231
+ self.block_length = block_length
232
+ self.proximal_bias = proximal_bias
233
+ self.proximal_init = proximal_init
234
+ self.attn = None
235
+
236
+ self.k_channels = channels // n_heads
237
+ self.conv_q = nn.Conv1d(channels, channels, 1)
238
+ self.conv_k = nn.Conv1d(channels, channels, 1)
239
+ self.conv_v = nn.Conv1d(channels, channels, 1)
240
+ self.conv_o = nn.Conv1d(channels, out_channels, 1)
241
+ self.drop = nn.Dropout(p_dropout)
242
+
243
+ if window_size is not None:
244
+ n_heads_rel = 1 if heads_share else n_heads
245
+ rel_stddev = self.k_channels**-0.5
246
+ self.emb_rel_k = nn.Parameter(
247
+ torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
248
+ * rel_stddev
249
+ )
250
+ self.emb_rel_v = nn.Parameter(
251
+ torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
252
+ * rel_stddev
253
+ )
254
+
255
+ nn.init.xavier_uniform_(self.conv_q.weight)
256
+ nn.init.xavier_uniform_(self.conv_k.weight)
257
+ nn.init.xavier_uniform_(self.conv_v.weight)
258
+ if proximal_init:
259
+ with torch.no_grad():
260
+ self.conv_k.weight.copy_(self.conv_q.weight)
261
+ self.conv_k.bias.copy_(self.conv_q.bias)
262
+
263
+ def forward(self, x, c, attn_mask=None):
264
+ q = self.conv_q(x)
265
+ k = self.conv_k(c)
266
+ v = self.conv_v(c)
267
+
268
+ x, self.attn = self.attention(q, k, v, mask=attn_mask)
269
+
270
+ x = self.conv_o(x)
271
+ return x
272
+
273
+ def attention(self, query, key, value, mask=None):
274
+ # reshape [b, d, t] -> [b, n_h, t, d_k]
275
+ b, d, t_s, t_t = (*key.size(), query.size(2))
276
+ query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
277
+ key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
278
+ value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
279
+
280
+ scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
281
+ if self.window_size is not None:
282
+ assert (
283
+ t_s == t_t
284
+ ), "Relative attention is only available for self-attention."
285
+ key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
286
+ rel_logits = self._matmul_with_relative_keys(
287
+ query / math.sqrt(self.k_channels), key_relative_embeddings
288
+ )
289
+ scores_local = self._relative_position_to_absolute_position(rel_logits)
290
+ scores = scores + scores_local
291
+ if self.proximal_bias:
292
+ assert t_s == t_t, "Proximal bias is only available for self-attention."
293
+ scores = scores + self._attention_bias_proximal(t_s).to(
294
+ device=scores.device, dtype=scores.dtype
295
+ )
296
+ if mask is not None:
297
+ scores = scores.masked_fill(mask == 0, -1e4)
298
+ if self.block_length is not None:
299
+ assert (
300
+ t_s == t_t
301
+ ), "Local attention is only available for self-attention."
302
+ block_mask = (
303
+ torch.ones_like(scores)
304
+ .triu(-self.block_length)
305
+ .tril(self.block_length)
306
+ )
307
+ scores = scores.masked_fill(block_mask == 0, -1e4)
308
+ p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
309
+ p_attn = self.drop(p_attn)
310
+ output = torch.matmul(p_attn, value)
311
+ if self.window_size is not None:
312
+ relative_weights = self._absolute_position_to_relative_position(p_attn)
313
+ value_relative_embeddings = self._get_relative_embeddings(
314
+ self.emb_rel_v, t_s
315
+ )
316
+ output = output + self._matmul_with_relative_values(
317
+ relative_weights, value_relative_embeddings
318
+ )
319
+ output = (
320
+ output.transpose(2, 3).contiguous().view(b, d, t_t)
321
+ ) # [b, n_h, t_t, d_k] -> [b, d, t_t]
322
+ return output, p_attn
323
+
324
+ def _matmul_with_relative_values(self, x, y):
325
+ """
326
+ x: [b, h, l, m]
327
+ y: [h or 1, m, d]
328
+ ret: [b, h, l, d]
329
+ """
330
+ ret = torch.matmul(x, y.unsqueeze(0))
331
+ return ret
332
+
333
+ def _matmul_with_relative_keys(self, x, y):
334
+ """
335
+ x: [b, h, l, d]
336
+ y: [h or 1, m, d]
337
+ ret: [b, h, l, m]
338
+ """
339
+ ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
340
+ return ret
341
+
342
+ def _get_relative_embeddings(self, relative_embeddings, length):
343
+ 2 * self.window_size + 1
344
+ # Pad first before slice to avoid using cond ops.
345
+ pad_length = max(length - (self.window_size + 1), 0)
346
+ slice_start_position = max((self.window_size + 1) - length, 0)
347
+ slice_end_position = slice_start_position + 2 * length - 1
348
+ if pad_length > 0:
349
+ padded_relative_embeddings = F.pad(
350
+ relative_embeddings,
351
+ commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]),
352
+ )
353
+ else:
354
+ padded_relative_embeddings = relative_embeddings
355
+ used_relative_embeddings = padded_relative_embeddings[
356
+ :, slice_start_position:slice_end_position
357
+ ]
358
+ return used_relative_embeddings
359
+
360
+ def _relative_position_to_absolute_position(self, x):
361
+ """
362
+ x: [b, h, l, 2*l-1]
363
+ ret: [b, h, l, l]
364
+ """
365
+ batch, heads, length, _ = x.size()
366
+ # Concat columns of pad to shift from relative to absolute indexing.
367
+ x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
368
+
369
+ # Concat extra elements so to add up to shape (len+1, 2*len-1).
370
+ x_flat = x.view([batch, heads, length * 2 * length])
371
+ x_flat = F.pad(
372
+ x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])
373
+ )
374
+
375
+ # Reshape and slice out the padded elements.
376
+ x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[
377
+ :, :, :length, length - 1 :
378
+ ]
379
+ return x_final
380
+
381
+ def _absolute_position_to_relative_position(self, x):
382
+ """
383
+ x: [b, h, l, l]
384
+ ret: [b, h, l, 2*l-1]
385
+ """
386
+ batch, heads, length, _ = x.size()
387
+ # pad along column
388
+ x = F.pad(
389
+ x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])
390
+ )
391
+ x_flat = x.view([batch, heads, length**2 + length * (length - 1)])
392
+ # add 0's in the beginning that will skew the elements after reshape
393
+ x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
394
+ x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
395
+ return x_final
396
+
397
+ def _attention_bias_proximal(self, length):
398
+ """Bias for self-attention to encourage attention to close positions.
399
+ Args:
400
+ length: an integer scalar.
401
+ Returns:
402
+ a Tensor with shape [1, 1, length, length]
403
+ """
404
+ r = torch.arange(length, dtype=torch.float32)
405
+ diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
406
+ return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
407
+
408
+
409
+ class FFN(nn.Module):
410
+ def __init__(
411
+ self,
412
+ in_channels,
413
+ out_channels,
414
+ filter_channels,
415
+ kernel_size,
416
+ p_dropout=0.0,
417
+ activation=None,
418
+ causal=False,
419
+ ):
420
+ super().__init__()
421
+ self.in_channels = in_channels
422
+ self.out_channels = out_channels
423
+ self.filter_channels = filter_channels
424
+ self.kernel_size = kernel_size
425
+ self.p_dropout = p_dropout
426
+ self.activation = activation
427
+ self.causal = causal
428
+
429
+ if causal:
430
+ self.padding = self._causal_padding
431
+ else:
432
+ self.padding = self._same_padding
433
+
434
+ self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
435
+ self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
436
+ self.drop = nn.Dropout(p_dropout)
437
+
438
+ def forward(self, x, x_mask):
439
+ x = self.conv_1(self.padding(x * x_mask))
440
+ if self.activation == "gelu":
441
+ x = x * torch.sigmoid(1.702 * x)
442
+ else:
443
+ x = torch.relu(x)
444
+ x = self.drop(x)
445
+ x = self.conv_2(self.padding(x * x_mask))
446
+ return x * x_mask
447
+
448
+ def _causal_padding(self, x):
449
+ if self.kernel_size == 1:
450
+ return x
451
+ pad_l = self.kernel_size - 1
452
+ pad_r = 0
453
+ padding = [[0, 0], [0, 0], [pad_l, pad_r]]
454
+ x = F.pad(x, commons.convert_pad_shape(padding))
455
+ return x
456
+
457
+ def _same_padding(self, x):
458
+ if self.kernel_size == 1:
459
+ return x
460
+ pad_l = (self.kernel_size - 1) // 2
461
+ pad_r = self.kernel_size // 2
462
+ padding = [[0, 0], [0, 0], [pad_l, pad_r]]
463
+ x = F.pad(x, commons.convert_pad_shape(padding))
464
+ return x
bert/bert-base-japanese-v3/.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
bert/bert-base-japanese-v3/README.md ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ datasets:
4
+ - cc100
5
+ - wikipedia
6
+ language:
7
+ - ja
8
+ widget:
9
+ - text: 東北大学で[MASK]の研究をしています。
10
+ ---
11
+
12
+ # BERT base Japanese (unidic-lite with whole word masking, CC-100 and jawiki-20230102)
13
+
14
+ This is a [BERT](https://github.com/google-research/bert) model pretrained on texts in the Japanese language.
15
+
16
+ This version of the model processes input texts with word-level tokenization based on the Unidic 2.1.2 dictionary (available in [unidic-lite](https://pypi.org/project/unidic-lite/) package), followed by the WordPiece subword tokenization.
17
+ Additionally, the model is trained with the whole word masking enabled for the masked language modeling (MLM) objective.
18
+
19
+ The codes for the pretraining are available at [cl-tohoku/bert-japanese](https://github.com/cl-tohoku/bert-japanese/).
20
+
21
+ ## Model architecture
22
+
23
+ The model architecture is the same as the original BERT base model; 12 layers, 768 dimensions of hidden states, and 12 attention heads.
24
+
25
+ ## Training Data
26
+
27
+ The model is trained on the Japanese portion of [CC-100 dataset](https://data.statmt.org/cc-100/) and the Japanese version of Wikipedia.
28
+ For Wikipedia, we generated a text corpus from the [Wikipedia Cirrussearch dump file](https://dumps.wikimedia.org/other/cirrussearch/) as of January 2, 2023.
29
+ The corpus files generated from CC-100 and Wikipedia are 74.3GB and 4.9GB in size and consist of approximately 392M and 34M sentences, respectively.
30
+
31
+ For the purpose of splitting texts into sentences, we used [fugashi](https://github.com/polm/fugashi) with [mecab-ipadic-NEologd](https://github.com/neologd/mecab-ipadic-neologd) dictionary (v0.0.7).
32
+
33
+ ## Tokenization
34
+
35
+ The texts are first tokenized by MeCab with the Unidic 2.1.2 dictionary and then split into subwords by the WordPiece algorithm.
36
+ The vocabulary size is 32768.
37
+
38
+ We used [fugashi](https://github.com/polm/fugashi) and [unidic-lite](https://github.com/polm/unidic-lite) packages for the tokenization.
39
+
40
+ ## Training
41
+
42
+ We trained the model first on the CC-100 corpus for 1M steps and then on the Wikipedia corpus for another 1M steps.
43
+ For training of the MLM (masked language modeling) objective, we introduced whole word masking in which all of the subword tokens corresponding to a single word (tokenized by MeCab) are masked at once.
44
+
45
+ For training of each model, we used a v3-8 instance of Cloud TPUs provided by [TPU Research Cloud](https://sites.research.google/trc/about/).
46
+
47
+ ## Licenses
48
+
49
+ The pretrained models are distributed under the Apache License 2.0.
50
+
51
+ ## Acknowledgments
52
+
53
+ This model is trained with Cloud TPUs provided by [TPU Research Cloud](https://sites.research.google/trc/about/) program.
bert/bert-base-japanese-v3/config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "BertForPreTraining"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "hidden_act": "gelu",
7
+ "hidden_dropout_prob": 0.1,
8
+ "hidden_size": 768,
9
+ "initializer_range": 0.02,
10
+ "intermediate_size": 3072,
11
+ "layer_norm_eps": 1e-12,
12
+ "max_position_embeddings": 512,
13
+ "model_type": "bert",
14
+ "num_attention_heads": 12,
15
+ "num_hidden_layers": 12,
16
+ "pad_token_id": 0,
17
+ "type_vocab_size": 2,
18
+ "vocab_size": 32768
19
+ }
bert/bert-base-japanese-v3/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e172862e0674054d65e0ba40d67df2a4687982f589db44aa27091c386e5450a4
3
+ size 447406217
bert/bert-base-japanese-v3/tokenizer_config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "tokenizer_class": "BertJapaneseTokenizer",
3
+ "model_max_length": 512,
4
+ "do_lower_case": false,
5
+ "word_tokenizer_type": "mecab",
6
+ "subword_tokenizer_type": "wordpiece",
7
+ "mecab_kwargs": {
8
+ "mecab_dic": "unidic_lite"
9
+ }
10
+ }
bert/bert-base-japanese-v3/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
bert/bert-large-japanese-v2/.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
bert/bert-large-japanese-v2/README.md ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ datasets:
4
+ - cc100
5
+ - wikipedia
6
+ language:
7
+ - ja
8
+ widget:
9
+ - text: 東北大学で[MASK]の研究をしています。
10
+ ---
11
+
12
+ # BERT large Japanese (unidic-lite with whole word masking, CC-100 and jawiki-20230102)
13
+
14
+ This is a [BERT](https://github.com/google-research/bert) model pretrained on texts in the Japanese language.
15
+
16
+ This version of the model processes input texts with word-level tokenization based on the Unidic 2.1.2 dictionary (available in [unidic-lite](https://pypi.org/project/unidic-lite/) package), followed by the WordPiece subword tokenization.
17
+ Additionally, the model is trained with the whole word masking enabled for the masked language modeling (MLM) objective.
18
+
19
+ The codes for the pretraining are available at [cl-tohoku/bert-japanese](https://github.com/cl-tohoku/bert-japanese/).
20
+
21
+ ## Model architecture
22
+
23
+ The model architecture is the same as the original BERT large model; 24 layers, 1024 dimensions of hidden states, and 16 attention heads.
24
+
25
+ ## Training Data
26
+
27
+ The model is trained on the Japanese portion of [CC-100 dataset](https://data.statmt.org/cc-100/) and the Japanese version of Wikipedia.
28
+ For Wikipedia, we generated a text corpus from the [Wikipedia Cirrussearch dump file](https://dumps.wikimedia.org/other/cirrussearch/) as of January 2, 2023.
29
+ The corpus files generated from CC-100 and Wikipedia are 74.3GB and 4.9GB in size and consist of approximately 392M and 34M sentences, respectively.
30
+
31
+ For the purpose of splitting texts into sentences, we used [fugashi](https://github.com/polm/fugashi) with [mecab-ipadic-NEologd](https://github.com/neologd/mecab-ipadic-neologd) dictionary (v0.0.7).
32
+
33
+ ## Tokenization
34
+
35
+ The texts are first tokenized by MeCab with the Unidic 2.1.2 dictionary and then split into subwords by the WordPiece algorithm.
36
+ The vocabulary size is 32768.
37
+
38
+ We used [fugashi](https://github.com/polm/fugashi) and [unidic-lite](https://github.com/polm/unidic-lite) packages for the tokenization.
39
+
40
+ ## Training
41
+
42
+ We trained the model first on the CC-100 corpus for 1M steps and then on the Wikipedia corpus for another 1M steps.
43
+ For training of the MLM (masked language modeling) objective, we introduced whole word masking in which all of the subword tokens corresponding to a single word (tokenized by MeCab) are masked at once.
44
+
45
+ For training of each model, we used a v3-8 instance of Cloud TPUs provided by [TPU Research Cloud](https://sites.research.google/trc/about/).
46
+
47
+ ## Licenses
48
+
49
+ The pretrained models are distributed under the Apache License 2.0.
50
+
51
+ ## Acknowledgments
52
+
53
+ This model is trained with Cloud TPUs provided by [TPU Research Cloud](https://sites.research.google/trc/about/) program.
bert/bert-large-japanese-v2/config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "BertForPreTraining"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "hidden_act": "gelu",
7
+ "hidden_dropout_prob": 0.1,
8
+ "hidden_size": 1024,
9
+ "initializer_range": 0.02,
10
+ "intermediate_size": 4096,
11
+ "layer_norm_eps": 1e-12,
12
+ "max_position_embeddings": 512,
13
+ "model_type": "bert",
14
+ "num_attention_heads": 16,
15
+ "num_hidden_layers": 24,
16
+ "pad_token_id": 0,
17
+ "type_vocab_size": 2,
18
+ "vocab_size": 32768
19
+ }
bert/bert-large-japanese-v2/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50212d714f79af45d3e47205faa356d0e5030e1c9a37138eadda544180f9e7c9
3
+ size 1354248201
bert/bert-large-japanese-v2/tokenizer_config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "tokenizer_class": "BertJapaneseTokenizer",
3
+ "model_max_length": 512,
4
+ "do_lower_case": false,
5
+ "word_tokenizer_type": "mecab",
6
+ "subword_tokenizer_type": "wordpiece",
7
+ "mecab_kwargs": {
8
+ "mecab_dic": "unidic_lite"
9
+ }
10
+ }
bert/bert-large-japanese-v2/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
bert/bert_models.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "deberta-v2-large-japanese": {
3
+ "repo_id": "ku-nlp/deberta-v2-large-japanese",
4
+ "files": ["pytorch_model.bin"]
5
+ },
6
+ "chinese-roberta-wwm-ext-large": {
7
+ "repo_id": "hfl/chinese-roberta-wwm-ext-large",
8
+ "files": ["pytorch_model.bin"]
9
+ },
10
+ "deberta-v3-large": {
11
+ "repo_id": "microsoft/deberta-v3-large",
12
+ "files": ["spm.model", "pytorch_model.bin"]
13
+ }
14
+ }
bert/chinese-roberta-wwm-ext-large/.gitattributes ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
2
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.h5 filter=lfs diff=lfs merge=lfs -text
5
+ *.tflite filter=lfs diff=lfs merge=lfs -text
6
+ *.tar.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.ot filter=lfs diff=lfs merge=lfs -text
8
+ *.onnx filter=lfs diff=lfs merge=lfs -text
9
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
bert/chinese-roberta-wwm-ext-large/README.md ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - zh
4
+ tags:
5
+ - bert
6
+ license: "apache-2.0"
7
+ ---
8
+
9
+ # Please use 'Bert' related functions to load this model!
10
+
11
+ ## Chinese BERT with Whole Word Masking
12
+ For further accelerating Chinese natural language processing, we provide **Chinese pre-trained BERT with Whole Word Masking**.
13
+
14
+ **[Pre-Training with Whole Word Masking for Chinese BERT](https://arxiv.org/abs/1906.08101)**
15
+ Yiming Cui, Wanxiang Che, Ting Liu, Bing Qin, Ziqing Yang, Shijin Wang, Guoping Hu
16
+
17
+ This repository is developed based on:https://github.com/google-research/bert
18
+
19
+ You may also interested in,
20
+ - Chinese BERT series: https://github.com/ymcui/Chinese-BERT-wwm
21
+ - Chinese MacBERT: https://github.com/ymcui/MacBERT
22
+ - Chinese ELECTRA: https://github.com/ymcui/Chinese-ELECTRA
23
+ - Chinese XLNet: https://github.com/ymcui/Chinese-XLNet
24
+ - Knowledge Distillation Toolkit - TextBrewer: https://github.com/airaria/TextBrewer
25
+
26
+ More resources by HFL: https://github.com/ymcui/HFL-Anthology
27
+
28
+ ## Citation
29
+ If you find the technical report or resource is useful, please cite the following technical report in your paper.
30
+ - Primary: https://arxiv.org/abs/2004.13922
31
+ ```
32
+ @inproceedings{cui-etal-2020-revisiting,
33
+ title = "Revisiting Pre-Trained Models for {C}hinese Natural Language Processing",
34
+ author = "Cui, Yiming and
35
+ Che, Wanxiang and
36
+ Liu, Ting and
37
+ Qin, Bing and
38
+ Wang, Shijin and
39
+ Hu, Guoping",
40
+ booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: Findings",
41
+ month = nov,
42
+ year = "2020",
43
+ address = "Online",
44
+ publisher = "Association for Computational Linguistics",
45
+ url = "https://www.aclweb.org/anthology/2020.findings-emnlp.58",
46
+ pages = "657--668",
47
+ }
48
+ ```
49
+ - Secondary: https://arxiv.org/abs/1906.08101
50
+ ```
51
+ @article{chinese-bert-wwm,
52
+ title={Pre-Training with Whole Word Masking for Chinese BERT},
53
+ author={Cui, Yiming and Che, Wanxiang and Liu, Ting and Qin, Bing and Yang, Ziqing and Wang, Shijin and Hu, Guoping},
54
+ journal={arXiv preprint arXiv:1906.08101},
55
+ year={2019}
56
+ }
57
+ ```
bert/chinese-roberta-wwm-ext-large/added_tokens.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {}
bert/chinese-roberta-wwm-ext-large/config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "BertForMaskedLM"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 0,
7
+ "directionality": "bidi",
8
+ "eos_token_id": 2,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 1024,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 4096,
14
+ "layer_norm_eps": 1e-12,
15
+ "max_position_embeddings": 512,
16
+ "model_type": "bert",
17
+ "num_attention_heads": 16,
18
+ "num_hidden_layers": 24,
19
+ "output_past": true,
20
+ "pad_token_id": 0,
21
+ "pooler_fc_size": 768,
22
+ "pooler_num_attention_heads": 12,
23
+ "pooler_num_fc_layers": 3,
24
+ "pooler_size_per_head": 128,
25
+ "pooler_type": "first_token_transform",
26
+ "type_vocab_size": 2,
27
+ "vocab_size": 21128
28
+ }
bert/chinese-roberta-wwm-ext-large/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ac62d49144d770c5ca9a5d1d3039c4995665a080febe63198189857c6bd11cd
3
+ size 1306484351
bert/chinese-roberta-wwm-ext-large/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
bert/chinese-roberta-wwm-ext-large/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
bert/chinese-roberta-wwm-ext-large/tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"init_inputs": []}
bert/chinese-roberta-wwm-ext-large/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
bert/deberta-v2-large-japanese/.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
bert/deberta-v2-large-japanese/README.md ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language: ja
3
+ license: cc-by-sa-4.0
4
+ library_name: transformers
5
+ tags:
6
+ - deberta
7
+ - deberta-v2
8
+ - fill-mask
9
+ datasets:
10
+ - wikipedia
11
+ - cc100
12
+ - oscar
13
+ metrics:
14
+ - accuracy
15
+ mask_token: "[MASK]"
16
+ widget:
17
+ - text: "京都 大学 で 自然 言語 処理 を [MASK] する 。"
18
+ ---
19
+
20
+ # Model Card for Japanese DeBERTa V2 large
21
+
22
+ ## Model description
23
+
24
+ This is a Japanese DeBERTa V2 large model pre-trained on Japanese Wikipedia, the Japanese portion of CC-100, and the
25
+ Japanese portion of OSCAR.
26
+
27
+ ## How to use
28
+
29
+ You can use this model for masked language modeling as follows:
30
+
31
+ ```python
32
+ from transformers import AutoTokenizer, AutoModelForMaskedLM
33
+
34
+ tokenizer = AutoTokenizer.from_pretrained('ku-nlp/deberta-v2-large-japanese')
35
+ model = AutoModelForMaskedLM.from_pretrained('ku-nlp/deberta-v2-large-japanese')
36
+
37
+ sentence = '京都 大学 で 自然 言語 処理 を [MASK] する 。' # input should be segmented into words by Juman++ in advance
38
+ encoding = tokenizer(sentence, return_tensors='pt')
39
+ ...
40
+ ```
41
+
42
+ You can also fine-tune this model on downstream tasks.
43
+
44
+ ## Tokenization
45
+
46
+ The input text should be segmented into words by [Juman++](https://github.com/ku-nlp/jumanpp) in
47
+ advance. [Juman++ 2.0.0-rc3](https://github.com/ku-nlp/jumanpp/releases/tag/v2.0.0-rc3) was used for pre-training. Each
48
+ word is tokenized into subwords by [sentencepiece](https://github.com/google/sentencepiece).
49
+
50
+ ## Training data
51
+
52
+ We used the following corpora for pre-training:
53
+
54
+ - Japanese Wikipedia (as of 20221020, 3.2GB, 27M sentences, 1.3M documents)
55
+ - Japanese portion of CC-100 (85GB, 619M sentences, 66M documents)
56
+ - Japanese portion of OSCAR (54GB, 326M sentences, 25M documents)
57
+
58
+ Note that we filtered out documents annotated with "header", "footer", or "noisy" tags in OSCAR.
59
+ Also note that Japanese Wikipedia was duplicated 10 times to make the total size of the corpus comparable to that of
60
+ CC-100 and OSCAR. As a result, the total size of the training data is 171GB.
61
+
62
+ ## Training procedure
63
+
64
+ We first segmented texts in the corpora into words using [Juman++](https://github.com/ku-nlp/jumanpp).
65
+ Then, we built a sentencepiece model with 32000 tokens including words ([JumanDIC](https://github.com/ku-nlp/JumanDIC))
66
+ and subwords induced by the unigram language model of [sentencepiece](https://github.com/google/sentencepiece).
67
+
68
+ We tokenized the segmented corpora into subwords using the sentencepiece model and trained the Japanese DeBERTa model
69
+ using [transformers](https://github.com/huggingface/transformers) library.
70
+ The training took 36 days using 8 NVIDIA A100-SXM4-40GB GPUs.
71
+
72
+ The following hyperparameters were used during pre-training:
73
+
74
+ - learning_rate: 1e-4
75
+ - per_device_train_batch_size: 18
76
+ - distributed_type: multi-GPU
77
+ - num_devices: 8
78
+ - gradient_accumulation_steps: 16
79
+ - total_train_batch_size: 2,304
80
+ - max_seq_length: 512
81
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-06
82
+ - lr_scheduler_type: linear schedule with warmup
83
+ - training_steps: 300,000
84
+ - warmup_steps: 10,000
85
+
86
+ The accuracy of the trained model on the masked language modeling task was 0.799.
87
+ The evaluation set consists of 5,000 randomly sampled documents from each of the training corpora.
88
+
89
+ ## Fine-tuning on NLU tasks
90
+
91
+ We fine-tuned the following models and evaluated them on the dev set of JGLUE.
92
+ We tuned learning rate and training epochs for each model and task
93
+ following [the JGLUE paper](https://www.jstage.jst.go.jp/article/jnlp/30/1/30_63/_pdf/-char/ja).
94
+
95
+ | Model | MARC-ja/acc | JSTS/pearson | JSTS/spearman | JNLI/acc | JSQuAD/EM | JSQuAD/F1 | JComQA/acc |
96
+ |-------------------------------|-------------|--------------|---------------|----------|-----------|-----------|------------|
97
+ | Waseda RoBERTa base | 0.965 | 0.913 | 0.876 | 0.905 | 0.853 | 0.916 | 0.853 |
98
+ | Waseda RoBERTa large (seq512) | 0.969 | 0.925 | 0.890 | 0.928 | 0.910 | 0.955 | 0.900 |
99
+ | LUKE Japanese base* | 0.965 | 0.916 | 0.877 | 0.912 | - | - | 0.842 |
100
+ | LUKE Japanese large* | 0.965 | 0.932 | 0.902 | 0.927 | - | - | 0.893 |
101
+ | DeBERTaV2 base | 0.970 | 0.922 | 0.886 | 0.922 | 0.899 | 0.951 | 0.873 |
102
+ | DeBERTaV2 large | 0.968 | 0.925 | 0.892 | 0.924 | 0.912 | 0.959 | 0.890 |
103
+
104
+ *The scores of LUKE are from [the official repository](https://github.com/studio-ousia/luke).
105
+
106
+ ## Acknowledgments
107
+
108
+ This work was supported by Joint Usage/Research Center for Interdisciplinary Large-scale Information Infrastructures (
109
+ JHPCN) through General Collaboration Project no. jh221004, "Developing a Platform for Constructing and Sharing of
110
+ Large-Scale Japanese Language Models".
111
+ For training models, we used the mdx: a platform for the data-driven future.
bert/deberta-v2-large-japanese/config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "configs/deberta_v2_large.json",
3
+ "architectures": [
4
+ "DebertaV2ForMaskedLM"
5
+ ],
6
+ "attention_head_size": 64,
7
+ "attention_probs_dropout_prob": 0.1,
8
+ "conv_act": "gelu",
9
+ "conv_kernel_size": 3,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 1024,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 4096,
15
+ "layer_norm_eps": 1e-07,
16
+ "max_position_embeddings": 512,
17
+ "max_relative_positions": -1,
18
+ "model_type": "deberta-v2",
19
+ "norm_rel_ebd": "layer_norm",
20
+ "num_attention_heads": 16,
21
+ "num_hidden_layers": 24,
22
+ "pad_token_id": 0,
23
+ "pooler_dropout": 0,
24
+ "pooler_hidden_act": "gelu",
25
+ "pooler_hidden_size": 1024,
26
+ "pos_att_type": [
27
+ "p2c",
28
+ "c2p"
29
+ ],
30
+ "position_biased_input": false,
31
+ "position_buckets": 256,
32
+ "relative_attention": true,
33
+ "share_att_key": true,
34
+ "torch_dtype": "float32",
35
+ "transformers_version": "4.23.1",
36
+ "type_vocab_size": 0,
37
+ "vocab_size": 32000
38
+ }
bert/deberta-v2-large-japanese/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6c15feac0dea77ab8835c70e1befa4cf4c2137862c6fb2443b1553f70840047
3
+ size 1490693213
bert/deberta-v2-large-japanese/special_tokens_map.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[CLS]",
3
+ "cls_token": "[CLS]",
4
+ "eos_token": "[SEP]",
5
+ "mask_token": "[MASK]",
6
+ "pad_token": "[PAD]",
7
+ "sep_token": "[SEP]",
8
+ "unk_token": "[UNK]"
9
+ }
bert/deberta-v2-large-japanese/spm.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c111c16e2e52366dcac46b886e40650bb843fe2938a65f5970271fc5697a127
3
+ size 805061
bert/deberta-v2-large-japanese/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
bert/deberta-v2-large-japanese/tokenizer_config.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[CLS]",
3
+ "cls_token": "[CLS]",
4
+ "do_lower_case": false,
5
+ "eos_token": "[SEP]",
6
+ "keep_accents": true,
7
+ "mask_token": "[MASK]",
8
+ "pad_token": "[PAD]",
9
+ "sep_token": "[SEP]",
10
+ "sp_model_kwargs": {},
11
+ "special_tokens_map_file": null,
12
+ "split_by_punct": false,
13
+ "tokenizer_class": "DebertaV2Tokenizer",
14
+ "unk_token": "[UNK]"
15
+ }
bert/deberta-v3-large/.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
bert/deberta-v3-large/README.md ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language: en
3
+ tags:
4
+ - deberta
5
+ - deberta-v3
6
+ - fill-mask
7
+ thumbnail: https://huggingface.co/front/thumbnails/microsoft.png
8
+ license: mit
9
+ ---
10
+
11
+ ## DeBERTaV3: Improving DeBERTa using ELECTRA-Style Pre-Training with Gradient-Disentangled Embedding Sharing
12
+
13
+ [DeBERTa](https://arxiv.org/abs/2006.03654) improves the BERT and RoBERTa models using disentangled attention and enhanced mask decoder. With those two improvements, DeBERTa out perform RoBERTa on a majority of NLU tasks with 80GB training data.
14
+
15
+ In [DeBERTa V3](https://arxiv.org/abs/2111.09543), we further improved the efficiency of DeBERTa using ELECTRA-Style pre-training with Gradient Disentangled Embedding Sharing. Compared to DeBERTa, our V3 version significantly improves the model performance on downstream tasks. You can find more technique details about the new model from our [paper](https://arxiv.org/abs/2111.09543).
16
+
17
+ Please check the [official repository](https://github.com/microsoft/DeBERTa) for more implementation details and updates.
18
+
19
+ The DeBERTa V3 large model comes with 24 layers and a hidden size of 1024. It has 304M backbone parameters with a vocabulary containing 128K tokens which introduces 131M parameters in the Embedding layer. This model was trained using the 160GB data as DeBERTa V2.
20
+
21
+
22
+ #### Fine-tuning on NLU tasks
23
+
24
+ We present the dev results on SQuAD 2.0 and MNLI tasks.
25
+
26
+ | Model |Vocabulary(K)|Backbone #Params(M)| SQuAD 2.0(F1/EM) | MNLI-m/mm(ACC)|
27
+ |-------------------|----------|-------------------|-----------|----------|
28
+ | RoBERTa-large |50 |304 | 89.4/86.5 | 90.2 |
29
+ | XLNet-large |32 |- | 90.6/87.9 | 90.8 |
30
+ | DeBERTa-large |50 |- | 90.7/88.0 | 91.3 |
31
+ | **DeBERTa-v3-large**|128|304 | **91.5/89.0**| **91.8/91.9**|
32
+
33
+
34
+ #### Fine-tuning with HF transformers
35
+
36
+ ```bash
37
+ #!/bin/bash
38
+
39
+ cd transformers/examples/pytorch/text-classification/
40
+
41
+ pip install datasets
42
+ export TASK_NAME=mnli
43
+
44
+ output_dir="ds_results"
45
+
46
+ num_gpus=8
47
+
48
+ batch_size=8
49
+
50
+ python -m torch.distributed.launch --nproc_per_node=${num_gpus} \
51
+ run_glue.py \
52
+ --model_name_or_path microsoft/deberta-v3-large \
53
+ --task_name $TASK_NAME \
54
+ --do_train \
55
+ --do_eval \
56
+ --evaluation_strategy steps \
57
+ --max_seq_length 256 \
58
+ --warmup_steps 50 \
59
+ --per_device_train_batch_size ${batch_size} \
60
+ --learning_rate 6e-6 \
61
+ --num_train_epochs 2 \
62
+ --output_dir $output_dir \
63
+ --overwrite_output_dir \
64
+ --logging_steps 1000 \
65
+ --logging_dir $output_dir
66
+
67
+ ```
68
+
69
+ ### Citation
70
+
71
+ If you find DeBERTa useful for your work, please cite the following papers:
72
+
73
+ ``` latex
74
+ @misc{he2021debertav3,
75
+ title={DeBERTaV3: Improving DeBERTa using ELECTRA-Style Pre-Training with Gradient-Disentangled Embedding Sharing},
76
+ author={Pengcheng He and Jianfeng Gao and Weizhu Chen},
77
+ year={2021},
78
+ eprint={2111.09543},
79
+ archivePrefix={arXiv},
80
+ primaryClass={cs.CL}
81
+ }
82
+ ```
83
+
84
+ ``` latex
85
+ @inproceedings{
86
+ he2021deberta,
87
+ title={DEBERTA: DECODING-ENHANCED BERT WITH DISENTANGLED ATTENTION},
88
+ author={Pengcheng He and Xiaodong Liu and Jianfeng Gao and Weizhu Chen},
89
+ booktitle={International Conference on Learning Representations},
90
+ year={2021},
91
+ url={https://openreview.net/forum?id=XPZIaotutsD}
92
+ }
93
+ ```
bert/deberta-v3-large/config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_type": "deberta-v2",
3
+ "attention_probs_dropout_prob": 0.1,
4
+ "hidden_act": "gelu",
5
+ "hidden_dropout_prob": 0.1,
6
+ "hidden_size": 1024,
7
+ "initializer_range": 0.02,
8
+ "intermediate_size": 4096,
9
+ "max_position_embeddings": 512,
10
+ "relative_attention": true,
11
+ "position_buckets": 256,
12
+ "norm_rel_ebd": "layer_norm",
13
+ "share_att_key": true,
14
+ "pos_att_type": "p2c|c2p",
15
+ "layer_norm_eps": 1e-7,
16
+ "max_relative_positions": -1,
17
+ "position_biased_input": false,
18
+ "num_attention_heads": 16,
19
+ "num_hidden_layers": 24,
20
+ "type_vocab_size": 0,
21
+ "vocab_size": 128100
22
+ }