import os.path

from tools import *
from bdtime import tt
from bdtime import show_json, show_ls

debug = 1


# if debug: print([tokenizer.decode(i).strip("P").strip("E").strip("S") for i in tokenizer.get_batch_data(prefix=False)[1]][:10])
if debug: print([tokenizer.decode(i) for i in tokenizer.get_batch_data(prefix=False)[1]][:10])


model_gen = ModelGEN()
output_path = os.path.join(output_dir, 'gen.model')


if __name__ == '__main__':
    if debug: show_ls([tokenizer.decode(i) for i in tokenizer.get_batch_data(prefix=True)[1]][:5])

    optimizer = torch.optim.AdamW(model_gen.parameters(), lr=1e-4)
    criterion = torch.nn.CrossEntropyLoss(ignore_index=tokenizer.encoder['P'])

    print('======= 开始训练')
    from tqdm import tqdm
    max_epoch = 15000
    tq_i = tqdm(total=max_epoch)
    for epoch in range(max_epoch):
        _, input_ids, attention_mask = tokenizer.get_batch_data(prefix=False)
        input_ids = torch.LongTensor(input_ids).to(device)
        attention_mask = torch.LongTensor(attention_mask).to(device)

        logits = model_gen(input_ids=input_ids, attention_mask=attention_mask)

        loss = criterion(logits[:, :-1].flatten(end_dim=1),
                         input_ids[:, 1:].flatten())

        loss.backward()
        optimizer.step()
        optimizer.zero_grad()

        if epoch % 100 == 0:
            print('\n-------------- epoch: ', epoch)
            for i in generate(model_gen, input_ids[:2, :9]):
                print(tokenizer.decode(i.tolist()))
            tt.sleep(0.5)
        tq_i.update(1)

    model_gen.to('cpu')
    torch.save(model_gen, output_path)

