import paddle
from Model import PromptModel
from BertPrompt import tokenizer
from predict import collate_fn
from paddle.io import Dataset, DataLoader

from tqdm import tqdm

paddle.device.set_device('gpu:1')


class PredictDataSet(Dataset):
    def __init__(self):
        super(PredictDataSet, self).__init__()
        self.result = []
        with open("paddle_data/test.txt", encoding='utf-8') as f:
            data_list = f.read().splitlines()
        self.result = ["这句话与[MASK][MASK]有关：" + item for item in data_list]

    def __getitem__(self, item):
        return self.result[item]

    def __len__(self):
        return len(self.result)


@paddle.no_grad()
def predict(model: PromptModel, test_data_loader: DataLoader):
    results = []
    model.eval()
    for batch in tqdm(test_data_loader):
        input_ids = batch['input_ids']
        attention_mask = batch['attention']
        token_type_ids = batch['token_type_ids']
        logits = model(input_ids=input_ids, token_type_ids=token_type_ids,
                       attention_mask=attention_mask)  # batch*max_seqence*vocab_size
        d = paddle.tensor.index_select(logits, index=paddle.to_tensor([5, 6]), axis=1)  # batch*2*vocab_size
        model_result = paddle.argmax(d, axis=2)  # batch*2
        for row in model_result:
            temp_list = tokenizer.convert_ids_to_tokens(paddle.tolist(row))
            results.append(''.join(temp_list))
    with open("new_result.txt", mode='w', encoding="utf-8") as f:
        f.write('\n'.join(results))


if __name__ == '__main__':
    model = PromptModel()
    # 载入模型参数、优化器参数和最后一个epoch保存的检查点
    layer_state_dict = paddle.load("save_model/model.pdparams")
    # 将load后的参数与模型关联起来
    model.set_state_dict(layer_state_dict)
    # 模型加载完成，可以进行预测了

    data_loader = DataLoader(PredictDataSet(), shuffle=False, batch_size=256, drop_last=False, collate_fn=collate_fn)
    predict(model, data_loader)
