import argparse
import torch
from model_bert_rnn import BERTGRUClassificate
from transformers import BertTokenizer, BertModel

# single sentence classificate
def single_sms_classificate(model, tokenizer, sentence: str):
    tokens = tokenizer.tokenize(sentence)
    tokens = tokens[:max_input_length-2]
    indexed = [init_token_idx] + tokenizer.convert_tokens_to_ids(tokens) + [eos_token_idx]
    tensor = torch.LongTensor(indexed).to(device)
    tensor = tensor.unsqueeze(0)
    prediction = torch.sigmoid(model(tensor))
    return prediction.item()


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--i",help="input txt path")
    parser.add_argument("--o",help="output txt path")
    parser.add_argument("--model", help="saved model path")
    args = parser.parse_args()

    out_txt = ''
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')

    # model argumetns
    HIDDEN_DIM = 256
    OUTPUT_DIM = 1
    N_LAYERS = 2
    BIDIRECTIONAL = True
    DROPOUT = 0.25
    N_EPOCHS = 5
    # create model
    bert = BertModel.from_pretrained('bert-base-uncased')
    model = BERTGRUClassificate(bert,
                                HIDDEN_DIM,
                                OUTPUT_DIM,
                                N_LAYERS,
                                BIDIRECTIONAL,
                                DROPOUT)
    model = model.to(device)
    model.load_state_dict(torch.load(args.model))
    model.eval()
    # predict dict
    with open(args.i, 'r', encoding='utf-8') as rf, open(args.o, 'w', encoding='utf-8') as wf:
        for line in rf:
            isTrue = sms_classificate(model, tokenizer, line)
            isTrue = 1 if isTrue > 0.5 else 0
            out_txt += str(isTrue) + '\n'
        out_txt = out_txt.strip('\n')
        wf.writelines(out_txt)


