import d2l.torch
import numpy as np
import torch
from tools import with_timer
from bdtime import tt
from bdtime import show_ls
import os
from utils.my_tools import device, tokenizer, output_dir
from utils.my_tools import generate
from utils.my_tools import ModelGEN, test_model_gen
from utils.my_tools import output_path__gen_model


data_size, batch_size = tokenizer.data_size, tokenizer.batch_size
max_seq_length, num_steps = tokenizer.max_seq_length, tokenizer.num_steps

_, texts_vocab = tokenizer.data_iter, tokenizer.texts_vocab
data_iter, texts_vocab = tokenizer.get_data_iter(
    data_size=data_size,
    batch_size=batch_size,
    is_train=True,
    texts_vocab=texts_vocab,
    prefix=None
)


model_gen = ModelGEN()
output_path = output_path__gen_model


def main():
    if 0:
        for batch in data_iter:
            break
        input_ids, attention_mask, label = batch
        show_ls([''.join(ls) for ls in tokenizer.decode(input_ids)[:5]])

        # my_questions = ['请问: 2011年03月13日=', '请问: 1981年05月21日=']
        # [tokenizer.encode(q) for q in my_questions]
        # test_gen(test_times=3, my_questions=my_questions)
        # from utils.my_tools import TestModel
        # TestModel.show_decode_input_ids(input_ids)
        #
        # tokenizer.decode(tokenizer.batch_pad(text=my_questions)[0], '')
        #
        # TestModel.show_decode_input_ids(tokenizer.batch_pad(text=my_questions)[0])
        # TestModel.show_decode_input_ids(input_ids)
        1

    optimizer = torch.optim.AdamW(model_gen.parameters(), lr=1e-4)
    criterion = torch.nn.CrossEntropyLoss(ignore_index=tokenizer.pad_token_id)

    print('======= 开始训练')
    from tqdm import tqdm
    max_epoch = 1500
    tq_i = tqdm(total=max_epoch)
    tq_i.desc = "快捷键[alt + t]结束训练并保存模型"
    for epoch in range(max_epoch):
        if tt.stop_alt('t'):
            print('***** break by user! current epoch:', epoch, '******')
            break

        for batch, _epoch in zip(data_iter, range(max_epoch)):
            input_ids, attention_mask, _ = [i.to(device) for i in batch]
            # input_ids = torch.LongTensor(input_ids).to(device)
            # attention_mask = torch.LongTensor(attention_mask).to(device)

            logits = model_gen(input_ids=input_ids, attention_mask=attention_mask)

            loss = criterion(logits[:, :-1].flatten(end_dim=1), input_ids[:, 1:].flatten())

            loss.backward()
            optimizer.step()
            optimizer.zero_grad()

        if epoch % 15 == 0:
            print(f'\n-------------- epoch: {epoch} / {max_epoch}')

            show_times = 2
            test_ids = generate(model_gen, input_ids[:show_times, :tokenizer.max_seq_length])
            show_ls([''.join(ls) for ls in tokenizer.decode(test_ids)])

            print('------ test_gen ------')
            # test_gen(model_gen=model_gen, test_times=3)
            test_gen(model_gen=model_gen, test_times=2, my_questions=my_questions)

            # print(f'------ bleu: {1.1} ------')

            tt.sleep(0.5)

        tq_i.update(1)

    model_gen.to('cpu')

    if os.path.exists(output_path__gen_model):
        tt.tqdm_sleep(f'********** Warning: model_gen[{output_path__gen_model}]已存在! 将进行覆盖操作!', T=10)
    torch.save(model_gen, output_path)


from utils.my_tools import test_gen


my_questions = [
    '请问: 1991年01月11日=',
    'none: 1999年09月19日=',
    'slash: 1999年09月29日=',
    'dot: 2222年02月22日=',
    # 'bar: 2011年03月13日=',
    'abbr: 1981年05月21日=',
]


if __name__ == '__main__':
    debug = 1
    test_gen(test_times=2, my_questions=my_questions)
    # test_gen(test_times=3, prefix=True)
    # test_gen(test_times=2, prefix=True, batch_size=1)

    # if 1:
    #     _, input_ids, targets = tokenizer.get_batch_data(prefix=False, batch_size=10)
    #     show_ls(tokenizer.decode(input_ids, ''))
    # exit()

    if os.path.exists(output_path__gen_model):
        tt.tqdm_sleep(f'Warning: model_gen[{output_path__gen_model}]已存在! 将进行覆盖操作!', T=10)
    main()






