import importlib
import time

import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm

from dataset import BaseDataset
from evaluate import evaluate
from exp.model import MyModel
from utils import time_since, EarlyStopping

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model_name = "MyModel"
config = getattr(importlib.import_module('config'), f"{model_name}Config")

print("using device is ", device)
print("training model is ", model_name)

# 加载预训练模型
if config.use_abstract_entity and config.use_title_entity:
    pretrained_entity_embedding = torch.from_numpy(np.load('data/train/pretrained_entity_embedding.npy')).float()
    model = MyModel(config=config, pretrained_entity_embedding=pretrained_entity_embedding).to(device)
elif config.use_abstract_entity:
    pretrained_word_embedding = torch.from_numpy(np.load('data/train/pretrained_word_embedding.npy')).float()
    pretrained_entity_embedding = torch.from_numpy(np.load('data/train/pretrained_entity_embedding.npy')).float()
    model = MyModel(config=config, pretrained_word_embedding=pretrained_word_embedding,
                    pretrained_entity_embedding=pretrained_entity_embedding).to(device)
else:
    pretrained_word_embedding = torch.from_numpy(np.load('data/train/pretrained_word_embedding.npy')).float()
    model = MyModel(config=config, pretrained_word_embedding=pretrained_word_embedding).to(device)

dataset = BaseDataset(behaviors_path='data/train/behaviors_parsed.tsv',
                      news_path='data/train/news_parsed.tsv')
dataloader = iter(
    DataLoader(dataset,
               batch_size=config.batch_size,
               shuffle=True,
               num_workers=config.num_workers,
               drop_last=True,
               pin_memory=True))

criterion = nn.NLLLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=config.learning_rate)

# train
exhaustion_count = 0  # 数据用尽次数
loss_full = []
step = 0  # tensorboard : global step,更新参数次数
start_time = time.time()
early_stopping = EarlyStopping()

# tensorboard
writer = SummaryWriter(log_dir=f"runs/{model_name}/")

#  steps_per_epoch = data_num // batch_size
for i in tqdm(range(1, config.num_epochs * len(dataset) // config.batch_size + 1), desc="Training", position=0,
              leave=True):  # 分batch操作
    try:
        minibatch = next(dataloader)
    except StopIteration:
        # 数据用尽，重新加载数据,一个epoch
        exhaustion_count += 1
        tqdm.write(
            f"Training data exhausted for {exhaustion_count} times after {i} batches, reuse the dataset."
        )
        dataloader = iter(
            DataLoader(dataset,
                       batch_size=config.batch_size,
                       shuffle=True,
                       num_workers=config.num_workers,
                       drop_last=True,
                       pin_memory=True))
        minibatch = next(dataloader)

    step += 1
    y_pred = model(minibatch["user"],
                   minibatch["candidate_news"],
                   minibatch["clicked_news"])

    # minibatch['clicked]正样本的label的索引全为0,
    y = torch.zeros(len(y_pred)).long().to(device)
    # inputs:(batch_size,classes_num) , target: positive sample index,(positive class index)
    loss = criterion(y_pred, y)
    loss_full.append(loss.item())
    optimizer.zero_grad()  # 梯度初始化为0
    loss.backward()  # 反向传播求梯度
    optimizer.step()  # 更新参数

    if i % 10 == 0:
        writer.add_scalar('Train/Loss', loss.item(), step)

    # print train loss
    if i % config.num_batches_show_loss == 0:
        tqdm.write(
            f"Time {time_since(start_time)}, batches {i}, current loss {loss.item():.4f}, average loss: {np.mean(loss_full):.4f}, latest average loss: {np.mean(loss_full[-256:]):.4f}"
        )

    # validate set evaluate
    if i % config.num_batches_validate == 0:
        model.eval()  # evaluate
        val_auc, val_mrr, val_ndcg5, val_ndcg10 = evaluate(model=model, directory='data/val',
                                                           num_workers=config.num_workers,
                                                           max_count=200000)
        model.train()  # 继续train

        writer.add_scalar('Validation/AUC', val_auc, step)
        writer.add_scalar('Validation/MRR', val_mrr, step)
        writer.add_scalar('Validation/nDCG@5', val_ndcg5, step)
        writer.add_scalar('Validation/nDCG@10', val_ndcg10, step)
        tqdm.write(
            f"Time {time_since(start_time)}, batches {i}, validation AUC: {val_auc:.4f}, validation MRR: {val_mrr:.4f}, validation nDCG@5: {val_ndcg5:.4f}, validation nDCG@10: {val_ndcg10:.4f}, "
        )

        # easy stopping ： 准确率代替了 val loss
        early_stop, get_better = early_stopping(-val_auc)
        if early_stop:
            tqdm.write('Early stop.')
            break
        elif get_better:
            pass

        # try:
        #     torch.save(
        #         {
        #             'model_state_dict': model.state_dict(),
        #             'optimizer_state_dict': optimizer.state_dict(),
        #             'step': step,
        #             'early_stop_value': -val_auc
        #         }, f"checkpoint/{model_name}/ckpt-{step}.pth")
        # except OSError as error:
        #     print(f"OS error: {error}")
