if '__main__' == __name__:
    
    from PyCmpltrtok.common import sep
    
    model_path = "/home/peiyp2004/jupyter_notebook.large.d/distilbert-base-uncased_QA_on_squad_2024_01_01_19_15_44_613229_temp0/checkpoint-2188"
    
    question = "How many programming languages does BLOOM support?"
    context = "BLOOM has 176 billion parameters and can generate text in 46 languages natural languages and 13 programming languages."

    sep('Tokenizer')    
    from transformers import AutoTokenizer
    tokenizer = AutoTokenizer.from_pretrained(model_path)
    print(tokenizer)
    
    sep('inputs')
    inputs = tokenizer(question, context, return_tensors="pt")
    print(inputs)
    print(tokenizer.decode(inputs['input_ids'][0]))
    
    sep('model')
    import torch
    from transformers import AutoModelForQuestionAnswering
    dev = torch.device('cuda:0')
    model = AutoModelForQuestionAnswering.from_pretrained(model_path)
    model = model.to(dev)
    print(model)
    
    sep('infer')
    inputs = {k: v.to(dev) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)

    answer_start_index = outputs.start_logits.argmax(axis=1)
    answer_end_index = outputs.end_logits.argmax(axis=1)
    predict_answer_tokens = inputs['input_ids'][0, answer_start_index[0] : answer_end_index[0] + 1]
    answer = tokenizer.decode(predict_answer_tokens)
    print('answer:', answer)
    
    