import os.path

from tools import *
from bdtime import tt
from bdtime import show_json, show_ls

debug = 1


# if debug: print([tokenizer.decode(i).strip("P").strip("E").strip("S") for i in tokenizer.get_batch_data(prefix=False)[1]][:10])
if debug: print([tokenizer.decode(i) for i in tokenizer.get_batch_data(prefix=False)[1]][:10])


model_cls = ModelCLS()
output_path = os.path.join(output_dir, 'cls.model')


if __name__ == '__main__':
    if debug: show_ls([tokenizer.decode(i) for i in tokenizer.get_batch_data(prefix=True)[1]][:5])

    optimizer = torch.optim.AdamW(params=model_cls.parameters(), lr=1e-4)
    criterion = torch.nn.CrossEntropyLoss()

    for epoch in range(500):
        # break
        label, input_ids, attention_mask = tokenizer.get_batch_data(prefix=False)
        label = torch.LongTensor(label).to(device)
        input_ids = torch.LongTensor(input_ids).to(device)
        attention_mask = torch.LongTensor(attention_mask).to(device)

        logits = model_cls(input_ids=input_ids, attention_mask=attention_mask)

        loss = criterion(logits, label)
        loss.backward()
        optimizer.step()
        optimizer.zero_grad()

        if epoch % 50 == 0:
            logits = logits.argmax(1)
            acc = (logits == label).sum().item() / len(label)
            print(f'========== epoch: {epoch}, acc: {round(acc, 3)} ====== now:', tt.now())

            for i in range(2):
                print('--- tests:', tokenizer.decode(input_ids[i].tolist()), '--- type:', logits[i].item())

    model_cls.to('cpu')
    torch.save(model_cls, output_path)


