
from transformers import GPT2LMHeadModel, GPT2Config
from lstm_crf import LSTM_CRF

punct_list  = [' b', ' ,', '。', ' ?', ' !', ' _'] # add start and end label
# Chinese sentence biaodian symbol prediction
class BIAODIAN(torch.nn.Module):
    def __init__(self, lm_model, frozen_lm=True, batch_first=True):
        super(BIAODIAN, self).__init__()
        self.lm_pretrained = lm_model
        self.blstm_crf     = LSTM_CRF(rnn_input_dim=320, rnn_type='lstm', rnn_units=100, num_rnn_layers=1, bi_flag=False,
                             dropout_rate=0.25, average_batch=True, use_crf=True, target_size=7, use_cuda=True)
        if frozen_lm:
            for param in self.lm_pretrained.parameters():
                param.requires_grad = False

        # initialize parameters from uniform distribution, LSTM has init by stdv = 1/sqrt(out_units)
        # keep in mind, all params have been initialized when create it

    def forward(self, x, states=None, past_kv=None):
        # Forward propagate
        if states is not None:
            outputs = self.lm_pretrained.forward(input_ids=x, past=past_kv)
        else:# the first word
            outputs = self.lm_pretrained.forward(input_ids=x)

        char_logp = outputs[0].log_softmax(dim=-1)
        top_hidden = outputs[2][-1] #
        # Decode hidden states of all time steps
        y_out, states = self.blstm_crf.forward(top_hidden, states, sentlen=None)
        logp = y_out.log_softmax(dim=-1)

        # outputs[0], scores for each vocab after SoftMax, the shape is (batch_size, 1, config.vocab_size)
        # logp, scores for each punct after SoftMax, the shape is (batch_size, seq_len, target_size)
        return char_logp, logp, states, outputs[1]

    # punctuation prediction, return the list containing the best tag sequence
    def predict(self, emission, mask=None):
        return self.blstm_crf.loss_function.decode(emission, mask)

# Main func
def main(args):
    # 加载配置及模型文件
    config_file = '/data/xuyongdang/model_exp/transfomer_trained_model/max81_embed320_head16_layer8/punct/gpt2-config.json'
    model_file  = '/data/xuyongdang/model_exp/transfomer_trained_model/max81_embed320_head16_layer8/punct/gpt2.model.1'
    config = GPT2Config.from_json_file(config_file)
    lm_model = GPT2LMHeadModel(config)
    model    = BIAODIAN(lm_model, frozen_lm=True, batch_first=True)
    torch_load_epoch(model_file, model)

    batch_size, sent_len = x.shape
    rnn_states = None # If you want to reset the model states, just set it to None
    for ii in range(sent_len):
        y_char_logp, logp, rnn_states = model.forward(x[:, ii].view(batch_size, 1), states=rnn_states)
        
        # you can keep logp for num characters, and then predict punct
        y_pred = model.predict(logp)

    logging.info(''.join([punct_list[idx] for idx in y_pred if idx >= 0]))
