import numpy as np
import torch
from tools import with_timer
from bdtime import tt
import os
from utils.my_tools import device, tokenizer, output_dir
# from bdtime import show_ls
# import torch.nn.functional as F


max_epoch = 50

test_data_size = 100
show_interval = 5  # 展示debug信息的间隔次数
show_times = 5  # 每次展示前n条
perfect_acc_score = 0.95  # 准确率满分的阈值
perfect_acc_times = 2  # 累计满分次数, 达到则退出训练

prefix = None
seed = 11

data_size, batch_size = tokenizer.data_size, tokenizer.batch_size
max_seq_length, num_steps = tokenizer.max_seq_length, tokenizer.num_steps
# data_iter, texts_vocab = tokenizer.data_iter, tokenizer.texts_vocab
_, texts_vocab = tokenizer.data_iter, tokenizer.texts_vocab
data_iter, _ = tokenizer.get_data_iter(
    data_size=data_size,
    batch_size=batch_size,
    is_train=True,
    texts_vocab=texts_vocab,
    prefix=prefix,
    seed=seed,
)


# region # --- 有空再尝试实现`save`和`load`方法
# ss = json.dumps(texts_vocab.idx_to_token)
# path_of_vocab = 'tempdir/text_vocab.json'
# with open(path_of_vocab, 'w+', encoding='utf-8') as f:
#     f.write(ss)
#
# with open(path_of_vocab, 'r', encoding='utf-8') as f:
#     content_of_vocab = json.load(f)

# texts_vocab.idx_to_token = content_of_vocab
# texts_vocab.to_tokens([0, 1, 2, 3])

# print(json.dumps(texts_vocab.__dict__))
# dumps = json.dumps(texts_vocab.__dict__)
# with open(json_path, 'r') as f:
#     params = json.load(f)
#     self.__dict__.update(params)

# texts_vocab.__dict__.update(json.loads(dumps))
# endregion


from utils.my_tools import ModelCLS, test_model_cls


model_cls = ModelCLS(labels=tokenizer.total_types, vocab_size=len(tokenizer.decoder))
os.makedirs(output_dir, exist_ok=True)
output_path = os.path.join(output_dir, 'cls.model')


optimizer = torch.optim.AdamW(params=model_cls.parameters(), lr=1e-4)
criterion = torch.nn.CrossEntropyLoss()


# with with_timer(f"单个batch_size[{tokenizer.batch_size}]速度测试", tt) as wt:
#     for batch in data_iter:
#         input_ids, attention_mask, label = [i.to(device) for i in batch]
#         break
#
#     logits = model_cls.forward(input_ids, attention_mask)
#
#     wt.show(f'logits.shape: [{logits.shape}]')


test_data_iter, _ = tokenizer.get_data_iter(batch_size=test_data_size,
                                             data_size=test_data_size,
                                             is_train=False,
                                             texts_vocab=texts_vocab,
                                             prefix=prefix,
                                             seed=seed,
                                            )


accuracies = []
with with_timer(f"模型训练, total_types: {tokenizer.total_types}", tt) as wt:
    for epoch in range(max_epoch):
        for batch, _epoch in zip(data_iter, range(max_epoch)):
            input_ids, attention_mask, label = [i.to(device) for i in batch]
            logits = model_cls(input_ids=input_ids, attention_mask=attention_mask)

            loss = criterion(logits, label)
            loss.backward()
            optimizer.step()
            optimizer.zero_grad()

        if epoch % show_interval == 0:
            logits = logits.argmax(1)
            train_acc = (logits == label).sum().item() / len(label)

            test_acc = test_model_cls(model_cls, test_data_iter, show_times=show_times)

            wt.show(f'~~~~~~~~~ epoch: {epoch} / {max_epoch}, train_loss: {round(loss.cpu().item(), 4)}'
                    f', train_acc: {round(train_acc, 3)}, test_acc: {test_acc}', reset_cost=True)
            print()

            accuracies.append(test_acc)
            if sum(np.array(accuracies[-perfect_acc_times:]) >= perfect_acc_score) == perfect_acc_times:
                print(f'\n*** 累计连续达标[{perfect_acc_times}]次, 自动退出! perfect_acc_score: {perfect_acc_score}, current_epoch: {epoch}')
                break


print(f'****** end ****** model\'s output_path: [{output_path}]')
model_cls.to('cpu')
torch.save(model_cls, output_path)

