import torch

#参数个数
def init_electra(init_checkpoint, args=None):
    from transformers import ElectraConfig, ElectraForPreTraining
    model_config = ElectraConfig.from_json_file(init_checkpoint + 'config.json')
    model_config.output_hidden_states = True
    #model_config.num_labels = 2

    electra_model = ElectraForPreTraining(model_config)
    state_dict_t = torch.load(init_checkpoint + 'pytorch_model.bin', map_location='cpu')
    # state_weight = {k[5:]:v for k,v in state_dict_S.items() if k.startswith('bert.')}
    # missing_keys,_ = model_S.bert.load_state_dict(state_weight,strict=False)
    missing_keys, unexpected_keys = electra_model.load_state_dict(state_dict_t, strict=False)
    print('# discriminator parameters:', sum(param.numel() for param in electra_model.parameters()))
    return electra_model

dir = './electra_res/chinese_electra_base_discriminator_pytorch/'
model = init_electra(dir)

from transformers import ElectraTokenizer, ElectraForPreTraining
tokenizer = ElectraTokenizer.from_pretrained(dir)
input_ids = torch.tensor(tokenizer.encode("还 是 物 块 钱 就 是 旁 边 儿 比 如 说".replace(' ',''), add_special_tokens=True)).unsqueeze(0)  # Batch size 1
logits = model(input_ids)[0]
print(logits)