|
|
|
|
|
"""Multiple choice model.""" |
|
|
|
import torch |
|
|
|
import megatron.model.language_model |
|
from megatron import get_args, print_rank_last |
|
from megatron.model.enums import AttnMaskType |
|
from megatron.model.bert_model import bert_extended_attention_mask, bert_position_ids |
|
|
|
import megatron.model.utils |
|
from megatron.model.utils import init_method_normal |
|
from megatron.model.utils import scaled_init_method_normal |
|
from .module import MegatronModule |
|
|
|
|
|
class MultipleChoice(MegatronModule): |
|
def __init__(self, |
|
num_tokentypes=2, |
|
pre_process=True, |
|
post_process=True, |
|
model_type=None): |
|
super(MultipleChoice, self).__init__(share_word_embeddings=False) |
|
|
|
args = get_args() |
|
assert model_type is not None |
|
|
|
init_method = init_method_normal(args.init_method_std) |
|
self.pre_process = pre_process |
|
self.post_process = post_process |
|
self.language_model, self._language_model_key = megatron.model.language_model.get_language_model( |
|
num_tokentypes=num_tokentypes, |
|
add_pooler=True, |
|
encoder_attn_mask_type=AttnMaskType.padding, |
|
init_method=init_method, |
|
scaled_init_method=scaled_init_method_normal(args.init_method_std, |
|
args.num_layers), |
|
pre_process=self.pre_process, |
|
post_process=self.post_process, |
|
args=args, |
|
model_type=model_type) |
|
|
|
|
|
if self.post_process: |
|
self.multichoice_dropout = torch.nn.Dropout(args.hidden_dropout) |
|
self.multichoice_head = megatron.model.utils.get_linear_layer(args.hidden_size, |
|
1, |
|
init_method, |
|
args.perform_initialization) |
|
self._multichoice_head_key = 'multichoice_head' |
|
|
|
def set_input_tensor(self, input_tensor): |
|
"""See megatron.model.transformer.set_input_tensor()""" |
|
self.language_model.set_input_tensor(input_tensor) |
|
|
|
def forward(self, model_input, attention_mask, tokentype_ids=None): |
|
|
|
|
|
|
|
|
|
|
|
assert len(attention_mask.shape) == 3 |
|
num_choices = attention_mask.shape[1] |
|
|
|
|
|
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) |
|
extended_attention_mask = bert_extended_attention_mask(attention_mask) |
|
|
|
input_ids = model_input |
|
|
|
assert len(input_ids.shape) == 3 |
|
assert len(tokentype_ids.shape) == 3 |
|
input_ids = input_ids.view(-1, input_ids.size(-1)) |
|
tokentype_ids = tokentype_ids.view(-1, tokentype_ids.size(-1)) |
|
position_ids = bert_position_ids(input_ids) |
|
|
|
lm_output = self.language_model( |
|
input_ids, |
|
position_ids, |
|
extended_attention_mask, |
|
tokentype_ids=tokentype_ids |
|
) |
|
if self.post_process: |
|
_, pooled_output = lm_output |
|
multichoice_output = self.multichoice_dropout(pooled_output) |
|
multichoice_logits = self.multichoice_head(multichoice_output) |
|
|
|
|
|
multichoice_logits = multichoice_logits.view(-1, num_choices) |
|
|
|
return multichoice_logits |
|
return lm_output |
|
|
|
def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False): |
|
"""For easy load when model is combined with other heads, |
|
add an extra key.""" |
|
|
|
state_dict_ = {} |
|
state_dict_[self._language_model_key] \ |
|
= self.language_model.state_dict_for_save_checkpoint(prefix=prefix, |
|
keep_vars=keep_vars) |
|
if self.post_process: |
|
state_dict_[self._multichoice_head_key] \ |
|
= self.multichoice_head.state_dict(prefix=prefix, keep_vars=keep_vars) |
|
return state_dict_ |
|
|
|
def load_state_dict(self, state_dict, strict=True): |
|
"""Customized load.""" |
|
|
|
self.language_model.load_state_dict( |
|
state_dict[self._language_model_key], strict=strict) |
|
if self.post_process: |
|
if self._multichoice_head_key in state_dict: |
|
self.multichoice_head.load_state_dict( |
|
state_dict[self._multichoice_head_key], strict=strict) |
|
else: |
|
print_rank_last('***WARNING*** could not find {} in the checkpoint, ' |
|
'initializing to random'.format( |
|
self._multichoice_head_key)) |
|
|