import torch
import torch.nn as nn
import torch.utils.data as Data
import bert_model
import dataset_load
import numpy as np
import optimization
import os
import logging
import traceback
import nvae_model

# Config logging module.
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.INFO)
handler = logging.FileHandler("log.txt")
handler.setLevel(logging.INFO)
formatter = logging.Formatter(
    '%(asctime)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)

os.environ['CUDA_VISIBLE_DEVICES'] = "0"

# Accelerate with CuDNN.
torch.backends.cudnn.benchmark = True

hyper_params = {
    'total_items': 3533,
    'total_users': 6034,
    'seq_len': 200,
    'dataset_path': 'ml-1m-hr',
    'd_model': 128,
    'hold_out_prop': 0.125,
    'test_prop': 0.2,
    'batch_size': 128,
    'single_mask_rate': 0.1,
    'mask_rate': 0.2,
    'device': 'cuda',
    'dropout_rate': 0.2,
    'time_span': 256
}

# Hyper Parameters
LR = 1.3e-3
L2_REGULAR = 1e-2
BETA1 = 0.9
BETA2 = 0.999
BATCH_SIZE = 128
TOTAL_STEP = 400000
BIG_EPOCH = 1000
SMALL_EPOCH = 1
LR_DECAY_RATE = 0.9
WARMUP_STEP = 100
DATASET_LEN = TOTAL_STEP * BATCH_SIZE
KL_WEIGHT = 0.0

MAX_POS = dataset_load.MAX_POS
USER_CNT = dataset_load.USER_CNT
ITEM_CNT = dataset_load.ITEM_CNT
D_K = 64
D_V = 64
P = 1.0
LAMBDA = 0.0
DEVICE = 'cuda'
K = 64

CHECK_FREQ = 1000

NEGATIVE_SAMPLE_CNT = 100


# Global variables.
test_dataloader = None
net = None
user_dataloader = None
optimizer = None

dataset_load.load_data()


def generate_test_data():
    global test_dataloader

    test_dataset = dataset_load.generate_test_data()
    test_dataloader = Data.DataLoader(
        test_dataset, batch_size=BATCH_SIZE, shuffle=True)


def build_net():
    global net, optimizer

    print('Building net...')
    logger.info('Building net...')
    # net = model.VTransformer(K, D_K, D_V, ITEM_CNT, MAX_POS, P, USER_CNT).to(DEVICE)
    net = bert_model.Transformer(hyper_params).to(DEVICE)

    # Initialize the paramters with truncated normal distribution [-0.02,0.02]

    # def truncated_normal_(tensor: torch.Tensor, mean=0, std=0.02):
    #     with torch.no_grad():
    #         size = tensor.shape
    #         tmp = tensor.new_empty(size+(4,)).normal_()
    #         valid = (tmp < 2) & (tmp > -2)
    #         ind = valid.max(-1, keepdim=True)[1]
    #         tensor.data.copy_(tmp.gather(-1, ind).squeeze(-1))
    #         tensor.data.mul_(std).add_(mean)
    #         return tensor

    # for param in net.parameters():
    #     param = truncated_normal_(param)
    #     pass

    print(net)
    print('Net build finished.')
    logger.info('Net build finished.')

    optimizer = torch.optim.AdamW(
        net.parameters(), lr=LR, weight_decay=L2_REGULAR)

# print('Loading user datasets...')
# user_dataset = None
# try:
#     user_dataset = torch.load('./model_dat/user_dataset.pkl')
# except Exception as err:
#     print(err)
#     user_dataset = dataset_load.generate_train_dataset()
#     torch.save(user_dataset,'./model_dat/user_dataset.pkl')


# Dataset
user_dataset = dataset_load.BertDataset(DATASET_LEN)
# user_dataset = dataset_load.generate_train_dataset()
user_dataloader = Data.DataLoader(
    user_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=0)
print('User datasets loaded and saved.')
logger.info('User datasets loaded and saved.')

# Globel High.
high_hr1 = 0.0
high_hr5 = 0.0
high_hr10 = 0.0


# Loss function
def multi_log_likelihood(pred: torch.Tensor, label: torch.Tensor, padding: torch.Tensor, mask: torch.Tensor):
    label_one_hot = nn.functional.one_hot(label, num_classes=pred.shape[2])
    log_sum = torch.sum(label_one_hot * torch.log(pred),
                        2)*mask*(~padding)

    log_sum = torch.sum(log_sum, 1) / \
        (torch.sum(mask * (~padding), dim=1) + 1e-5)
    avg_log_sum = torch.mean(log_sum)
    return avg_log_sum


def KL_loss(mu: torch.Tensor, sigma: torch.Tensor):
    result = torch.mean(-torch.log(sigma) + 0.5 * (sigma ** 2 + mu ** 2 - 1.0))
    return result


def loss_func(pred, label, padding, mask, mu_g, sigma_g, mu_l, sigma_l, l: float):
    # multi_ans = -multi_log_likelihood(pred, label)
    # KL_ans = l * (KL_loss(mu_g, sigma_g) + KL_loss(mu_l, sigma_l))
    # print('Multi:', multi_ans.item(), 'KL:', KL_ans.item())
    nan_flag = False
    multi, kl = -multi_log_likelihood(pred, label, padding, mask), l * (
        KL_loss(mu_g, sigma_g) + KL_loss(mu_l, sigma_l))
    if torch.isnan(multi).item() > 0 or torch.isnan(kl).item() > 0:
        nan_flag = True

    return multi, kl, nan_flag


# Check
def check():
    global high_hr1, high_hr5, high_hr10
    print('Start checking...')
    logger.info('Start checking...')
    normalized_item_popular = dataset_load.item_popular / \
        torch.sum(dataset_load.item_popular)
    # normalized_item_popular = normalized_item_popular.detach().numpy()

    net.eval()
    with torch.no_grad():
        # Parameters
        top1hit, top5hit, top10hit = 0, 0, 0

        for _, (batchx, batchy, padding, mask, user_id) in enumerate(test_dataloader):
            batchx = batchx.to(DEVICE)
            batchy = batchy.to(DEVICE)
            padding = padding.to(DEVICE)
            mask = mask.to(DEVICE)
            user_id = user_id.to(DEVICE)

            # Forward.
            pred = net(batchx, padding)
            # multi_loss = -multi_log_likelihood(pred, batchy, padding, mask)

            # # If nan_flag, then continue.
            # if torch.isnan(multi_loss).item() > 0:
            #     continue

            # loss = multi_loss
            # print(f'check loss:{loss}')
            # logger.info(f'check loss:{loss}')

            for i in range(batchy.shape[0]):
                # Sample 100 negetive items with itself.
                rated = set(batchx[i].cpu().numpy())
                item_idx = [torch.sum(batchy[i] * mask[i]).item()]

                while len(item_idx) < 101:
                    sample_ids = torch.multinomial(
                        normalized_item_popular, num_samples=101).numpy()
                    sample_ids = [
                        x for x in sample_ids if x not in rated and x not in item_idx]
                    item_idx.extend(sample_ids[:])
                item_idx = item_idx[:101]

                check_mask = torch.Tensor(item_idx).long()
                check_mask = nn.functional.one_hot(
                    check_mask, num_classes=pred[i, 0].shape[0])
                check_mask = torch.sum(check_mask, dim=0).to(DEVICE)

                cur_idx = torch.max(mask[i], dim=0)[1]
                cur_label = batchy[i, cur_idx]
                cur_pred = pred[i, cur_idx] * check_mask
                sample1 = torch.topk(cur_pred, 1)[1]
                sample5 = torch.topk(cur_pred, 5)[1]
                sample10 = torch.topk(cur_pred, 10)[1]

                top1hit += int(cur_label in sample1)
                top5hit += int(cur_label in sample5)
                top10hit += int(cur_label in sample10)

        print(
            f'HR@1:{top1hit/USER_CNT}\nHR@5:{top5hit/USER_CNT}\nHR@10:{top10hit/USER_CNT}\n')
        logger.info(f'--------------------EVAL RESULT--------------------')
        logger.info(
            f'HR@1:{top1hit/USER_CNT}\nHR@5:{top5hit/USER_CNT}\nHR@10:{top10hit/USER_CNT}\n')

        high_hr1 = max(high_hr1, top1hit / USER_CNT)
        high_hr5 = max(high_hr5, top5hit / USER_CNT)
        high_hr10 = max(high_hr10, top10hit / USER_CNT)

        logger.info(
            f'high_hr1:{high_hr1}\nhigh_hr5:{high_hr5}\nhigh_hr10:{high_hr10}\n')


# Train
def train():
    print('Start training...')
    logger.info('Start training...')
    try:
        net.load_state_dict(torch.load('./model_dat/net.pkl'))
        optimizer.load_state_dict(torch.load('./model_dat/optim.pkl'))
        print('Loaded previous net.')
    except Exception as err:
        print(err)
        print('Previous net load failed.')

    global_step = 0
    for epoch in range(BIG_EPOCH):
        net.train()
        for step, (batchx, batchy, padding, mask, user_id) in enumerate(user_dataloader):
            batchx = batchx.to(DEVICE)
            batchy = batchy.to(DEVICE)
            padding = padding.to(DEVICE)
            mask = mask.to(DEVICE)
            user_id = user_id.to(DEVICE)

            global_step += 1
            # Adjust the decay rate
            if global_step < WARMUP_STEP:
                for param_group in optimizer.param_groups:
                    param_group['lr'] = LR * global_step / WARMUP_STEP
            else:
                for param_group in optimizer.param_groups:
                    param_group['lr'] = LR * \
                        max((1.0 - global_step / TOTAL_STEP), 0.05)

            # Forward.
            pred = net(batchx, padding)
            multi_loss = -multi_log_likelihood(pred, batchy, padding, mask)

            # If nan_flag, then continue.
            if torch.isnan(multi_loss).item() > 0:
                continue

            loss = multi_loss  # + kl_loss
            # Optimize.
            optimizer.zero_grad()
            loss.backward()
            # nn.utils.clip_grad_norm_(
            #     net.parameters(), max_norm=5.0, norm_type=2)

            optimizer.step()

            # Show Loss
            if step % 50 == 0:
                parameters = list(
                    filter(lambda p: p.grad is not None, net.parameters()))
                total_norm = torch.norm(torch.stack(
                    [torch.norm(p.grad.detach(), 2) for p in parameters]), 2)
                print(
                    f'EPOCH:({epoch}/{BIG_EPOCH}),STEP:{global_step}/{TOTAL_STEP},Loss:{loss.item()},L2:{total_norm.item()}')
                logger.info(
                    f'EPOCH:({epoch}/{BIG_EPOCH}),STEP:{global_step}/{TOTAL_STEP},Loss:{loss.item()},L2:{total_norm.item()}')
                torch.cuda.empty_cache()

            # Check
            if global_step % CHECK_FREQ == 0:
                torch.cuda.empty_cache()
                check()
                torch.save(net.state_dict(), './model_dat/net.pkl')
                torch.save(optimizer.state_dict(), './model_dat/optim.pkl')
                print('Model checkpoint saved!')
                torch.cuda.empty_cache()
                net.train()

        if global_step >= TOTAL_STEP:
            break


# Main
if __name__ == '__main__':
    generate_test_data()
    build_net()
    try:
        train()
        logger.info('Finished.')
    except Exception as err:
        err_info = traceback.format_exc()
        print(err_info)
        logger.info(err_info)
        logger.info('Error.')
