import pandas as pd
from pandas import read_json
from tqdm import tqdm
from transformers import BertForSequenceClassification, BertTokenizerFast

model = BertForSequenceClassification.from_pretrained('./best_ckpt/', num_labels=36)
model = model.to('cuda')
tokenizer = BertTokenizerFast.from_pretrained('Langboat/mengzi-bert-base')
df_test = pd.DataFrame.from_records(read_json('data/testA.json'))
df_test['input_string'] = df_test.apply(lambda x: f"这份专利的标题为：《{x.title}》，由“{x.assignee}”公司申请，详细说明如下：{x.abstract}",
                                        axis=1)
BATCH_SIZE = 6
total_batch_num = int(len(df_test) / BATCH_SIZE) + 1
outputs_list = []
for batch_num in tqdm(range(total_batch_num)):
    batch_input = df_test[batch_num * BATCH_SIZE:(batch_num + 1) * BATCH_SIZE].reset_index(drop=True)
    if len(batch_input) == 0:
        break
    tokens = tokenizer([batch_input.loc[i, 'input_string'] for i in range(len(batch_input))],
                       return_tensors="pt",
                       max_length=512,
                       truncation=True,
                       padding=True).to('cuda')
    outputs = model(input_ids=tokens.input_ids,
                    attention_mask=tokens.attention_mask,
                    token_type_ids=tokens.token_type_ids)
    res_tmp = outputs['logits'].argmax(1).tolist()
    outputs_list.extend(res_tmp)
    del tokens, res_tmp
assert len(df_test) == len(outputs_list)
df_test['label'] = outputs_list
df_test['label'] = df_test['label'].apply(int)
df_test[['id', 'label']].to_csv('submit.csv', index=False)
