import torch
import torch.nn as nn
import torch.utils.data as Data
import torch.nn.functional as F
import bert_model
import dataset_load
import dataset_load_fold
import numpy as np
import optimization
import os
import logging
import traceback
import random
import time
import argparse
import sys


# Hyper Parameters
os.environ['CUDA_VISIBLE_DEVICES'] = "1"

hyper_params = {
    'total_items': 3533,
    'total_users': 6034,
    'seq_len': 200,
    'dataset_path': 'ml-1m-hr',
    'd_model': 128,
    'hold_out_prop': 0.125,
    'test_prop': 0.2,
    'batch_size': 64,
    'single_mask_rate': 0.1,
    'mask_rate': 0.2
}

USE_FOLD = False
LR_PRIMAL = 1e-3
L2_REGULAR = 1e-2
Ks = [1, 5, 10, 20, 50, 100]

USE_DISCRIMINATOR = False

BETA1 = 0.9
BETA2 = 0.999
TOTAL_STEP = 200000
BIG_EPOCH = 10000
SMALL_EPOCH = 1
LR_DECAY_RATE = 0.9
WARMUP_STEP = 100
DATASET_LEN = TOTAL_STEP * hyper_params['batch_size']

MAX_POS = hyper_params['seq_len']
USER_CNT = hyper_params['total_users']
ITEM_CNT = hyper_params['total_items']
D_K = 64
D_V = 64
P = 1.0
LAMBDA = 0.0
DEVICE = 'cuda'
K = 64

CHECK_FREQ = 10

NEGATIVE_SAMPLE_CNT = 100

# Parser
parser = argparse.ArgumentParser(prog='train')
parser.add_argument("-m", "--msg", default="no description")
args = parser.parse_args()
train_msg = args.msg


# Setup Seed.
def setup_seed(seed):
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    np.random.seed(seed)
    random.seed(seed)
    torch.backends.cudnn.deterministic = True


setup_seed(1111)


# Config logging module.
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.INFO)
local_time_str = time.strftime("%Y%m%d_%H%M%S", time.localtime())
handler = logging.FileHandler(
    "model_log/bert_log_" + local_time_str + '_' + train_msg.replace(' ', '_') + ".txt")

handler.setLevel(logging.INFO)
formatter = logging.Formatter(
    '%(asctime)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)

logger.info(train_msg)
logger.info(hyper_params['dataset_path'])
logger.info('Using CUDA:' + os.environ['CUDA_VISIBLE_DEVICES'])

# Accelerate with CuDNN.
# torch.backends.cudnn.benchmark = True

# Load data at first.
if USE_FOLD:
    dataset_load_fold.load_data(hyper_params)
else:
    dataset_load.load_data()

# Dataset
if USE_FOLD:
    user_dataset = dataset_load_fold.BertDataset(hyper_params)
else:
    user_dataset = dataset_load.BertDataset(
        dataset_len=10 * hyper_params['total_users'])
user_dataloader = Data.DataLoader(
    user_dataset, batch_size=hyper_params['batch_size'], shuffle=True)

# Generate test dataset.
if USE_FOLD:
    test_dataset = dataset_load_fold.generate_test_data(hyper_params)
else:
    test_dataset = dataset_load.generate_test_data()
test_dataloader = Data.DataLoader(
    test_dataset, batch_size=hyper_params['batch_size'], shuffle=True)

# Generate validate dataset.
if USE_FOLD:
    val_dataset = dataset_load_fold.generate_validate_data(hyper_params)
else:
    val_dataset = dataset_load.generate_test_data()
val_dataloader = Data.DataLoader(
    val_dataset, batch_size=hyper_params['batch_size'], shuffle=True)

# Build the model.
print('Building net...')
logger.info('Building net...')

net = bert_model.Transformer(hyper_params).to(DEVICE)

print(net)
print('Net build finished.')
logger.info('Net build finished.')

optimizer_primal = torch.optim.AdamW([{
    'params': net.parameters(),
    'lr': LR_PRIMAL,
    'weight_decay': L2_REGULAR}
])

print('User datasets loaded and saved.')
logger.info('User datasets loaded and saved.')

# Global High.
high_hr = [0.0 for _ in range(len(Ks))]
high_ndcg = [0.0 for _ in range(len(Ks))]
high_mrr = [0.0 for _ in range(len(Ks))]


def generate_result_str(hr, ndcg, mrr):
    s = ''
    for i in range(len(Ks)):
        s += 'HR@' + str(Ks[i]) + ':' + str(hr[i]) + ' '
    s += '\n'

    for i in range(len(Ks)):
        s += 'NDCG@' + str(Ks[i]) + ':' + str(ndcg[i]) + ' '
    s += '\n'

    for i in range(len(Ks)):
        s += 'MRR@' + str(Ks[i]) + ':' + str(mrr[i]) + ' '
    return s


# Check
def check(dataloader, validate=True, evaluate_users=None):
    if validate:
        print('Start validating...')
        logger.info('Start validating...')
    else:
        print('Start checking...')
        logger.info('Start checking...')
    item_popular = dataset_load.item_popular if not USE_FOLD else dataset_load_fold.item_popular
    item_popular = torch.ones_like(item_popular)
    normalized_item_popular = item_popular / torch.sum(item_popular)
    net.eval()
    with torch.no_grad():
        # Parameters
        checked_users = 0
        cur_hr = [0.0 for _ in range(len(Ks))]
        cur_ndcg = [0.0 for _ in range(len(Ks))]
        cur_mrr = [0.0 for _ in range(len(Ks))]

        for _, (batchx, batchy, padding, user_id, cur_cnt, cur_masked) in enumerate(dataloader):
            batchx = batchx.to(DEVICE)
            batchy = batchy.to(DEVICE)
            padding = padding.to(DEVICE)
            user_id = user_id.to(DEVICE)
            cur_cnt = cur_cnt.to(DEVICE)

            # Forward.
            pred = net(batchx, padding)

            flag = False
            for i in range(batchy.shape[0]):
                if not USE_FOLD:
                    # Sample 100 negetive items with itself.
                    rated = set(batchx[i].cpu().numpy())
                    item_idx = [batchy[i, cur_cnt[i] - 1].item()]

                    while len(item_idx) < 101:
                        sample_ids = torch.multinomial(
                            normalized_item_popular, num_samples=101).numpy()
                        sample_ids = [
                            x for x in sample_ids if x not in rated and x not in item_idx]
                        item_idx.extend(sample_ids[:])
                    item_idx = item_idx[: 101]

                    item_idx = torch.Tensor(item_idx).long()
                    cur_label = cur_cnt[i].item() - 1
                    cur_pred = pred[i, cur_label, item_idx]

                    cur_rank = torch.argsort(
                        cur_pred, descending=True).argsort()[0].item() + 1

                    for j in range(len(Ks)):
                        if cur_rank <= Ks[j]:
                            cur_hr[j] += 1
                            cur_ndcg[j] += 1 / np.log2(cur_rank + 1)
                            cur_mrr[j] += 1 / cur_rank
                else:
                    for j, k in enumerate(Ks):
                        best, now_at, dcg, hits = 0.0, 0.0, 0.0, 0.0
                        last_pred = pred[i, cur_cnt[i] - 1].scatter_(
                            dim=0, index=batchx[i], value=-10000000.0)
                        last_pred[0] = -10000000.0

                        rec_list = list(torch.argsort(
                            last_pred, descending=True)[:k].cpu().numpy())

                        first_correct = sys.maxsize
                        for movie in batchy[i]:
                            movie = movie.item()
                            if movie == 0:
                                break
                            now_at += 1.0
                            if now_at <= k:
                                best += 1.0 / float(np.log2(now_at + 1))

                            if movie not in rec_list:
                                continue

                            hits += 1.0
                            dcg += 1.0 / \
                                float(np.log2(float(rec_list.index(movie) + 2)))
                            first_correct = min(
                                first_correct, rec_list.index(movie))

                        cur_hr[j] += float(hits) / float(now_at)
                        cur_ndcg[j] += float(dcg) / float(best)
                        cur_mrr[j] += 1.0 / (first_correct + 1)

                checked_users += 1
                if evaluate_users is not None and checked_users >= evaluate_users:
                    flag = True
                    break

            if flag:
                break

        for i in range(len(Ks)):
            cur_hr[i] /= checked_users
            cur_ndcg[i] /= checked_users
            cur_mrr[i] /= checked_users
            if validate:
                high_hr[i] = max(high_hr[i], cur_hr[i])
                high_ndcg[i] = max(high_ndcg[i], cur_ndcg[i])
                high_mrr[i] = max(high_mrr[i], cur_mrr[i])

        cur_str = generate_result_str(cur_hr, cur_ndcg, cur_mrr)

        print(cur_str)
        logger.info(f'--------------------EVAL RESULT--------------------')
        logger.info(cur_str)

        if validate:
            high_str = generate_result_str(high_hr, high_ndcg, high_mrr)
            print(high_str)
            logger.info(high_str)


def multi_log_likelihood(pred: torch.Tensor, label: torch.Tensor, padding: torch.Tensor, mask: torch.Tensor):
    label_one_hot = nn.functional.one_hot(label, num_classes=pred.shape[2])
    log_sum = torch.sum(label_one_hot * torch.log(pred),
                        2)*mask*(~padding)

    log_sum = torch.sum(log_sum, 1) / \
        (torch.sum(mask * (~padding), dim=1) + 1e-5)
    avg_log_sum = torch.mean(log_sum)
    return avg_log_sum


# Train
def train():
    print('Start training...')
    logger.info('Start training...')

    global_step = 0
    total_loss_primal = 0.0
    total_loss_step = 0

    for epoch in range(BIG_EPOCH):
        net.train()

        update_count = 0
        total_anneal_steps = 200000
        for step, (batchx, batchy, padding, user_id, cur_cnt, cur_masked) in enumerate(user_dataloader):
            batchx = batchx.to(DEVICE)
            batchy = batchy.to(DEVICE)
            padding = padding.to(DEVICE)
            user_id = user_id.to(DEVICE)
            cur_cnt = cur_cnt.to(DEVICE)
            cur_masked = cur_masked.to(DEVICE)

            # Forward.
            pred = net(batchx, padding)

            # --------------------------PRIMAL---------------------------
            loss_primal = - \
                multi_log_likelihood(pred, batchy, padding, cur_masked)

            if torch.isnan(loss_primal).item() > 0:
                continue

            total_loss_primal += loss_primal.item()
            total_loss_step += 1

            # Optimize.
            optimizer_primal.zero_grad()
            loss_primal.backward()
            optimizer_primal.step()

            # Show Loss
            if step % 100 == 0:
                parameters = list(
                    filter(lambda p: p.grad is not None, net.parameters()))
                total_norm = torch.norm(torch.stack(
                    [torch.norm(p.grad.detach(), 2) for p in parameters]), 2)

                print(
                    f'EPOCH:({epoch}/{BIG_EPOCH}),STEP:{global_step}/{TOTAL_STEP},loss:{total_loss_primal/total_loss_step},L2:{total_norm.item()}')
                logger.info(
                    f'EPOCH:({epoch}/{BIG_EPOCH}),STEP:{global_step}/{TOTAL_STEP},loss:{total_loss_primal/total_loss_step},L2:{total_norm.item()}')
                total_loss_primal = 0
                total_loss_step = 0

            global_step += 1

        # Check
        if epoch % CHECK_FREQ == 0:
            check(dataloader=val_dataloader, validate=True)
            # check(dataloader=test_dataloader, validate=False)
            net.train()

        if global_step >= TOTAL_STEP:
            # Check the result.
            check(dataloader=test_dataloader, validate=False)
            break

    check(test_dataloader, validate=False, evaluate_users=10000)


# Main
if __name__ == '__main__':
    # Train the model.
    try:
        train()
        logger.info('Finished.')
    except Exception as err:
        err_info = traceback.format_exc()
        print(err_info)
        logger.info(err_info)
        logger.info('Error.')
