import torch
import torch.nn as nn
import torch.utils.data as Data
import torch.nn.functional as F
import performer_model
import dataload
import evaluator
import numpy as np
import optimization
import os
import logging
import traceback
import random
import time
import argparse
import sys
from tensorboardX import SummaryWriter


# Hyper Parameters
os.environ['CUDA_VISIBLE_DEVICES'] = "2"

hyper_params = {
    'total_items': 3533,
    'total_users': 6034,
    'seq_len': 200,
    'dataset_path': 'ml-1m',
    'item_embed_size': 64,
    'hidden_size': 50,
    'num_heads': 4,
    'depth': 16,
    'hold_out_prop': 0.125,
    'test_prop': 0.2,
    'batch_size': 64,
    'single_mask_rate': 0.1,
    'mask_rate': 0.2,
    'device': 'cuda',
    'dropout_rate': 0.2,
    'num_blocks': 2,
    'time_span': 256,
    'split_type': 'loo',
    'dataset_type': 'time_interval',
    'splitter': '::',
    'min_rate': 3.5,
    'min_items': 5,
    'Ks': [1, 5, 10],
    'evaluate_users': 10000,
    'use_fold': False,
    'output_file': False
}

LR_PRIMAL = 1e-3
L2_REGULAR = 1e-2
TOTAL_STEP = 200000
BIG_EPOCH = 10000
CHECK_FREQ = 5
NEGATIVE_SAMPLE_CNT = 100

# Path STR
path_str = f'performer_heads_{hyper_params["num_heads"]}_depth_{hyper_params["depth"]}_seq_{hyper_params["seq_len"]}'

# Parser
parser = argparse.ArgumentParser(prog='train')
parser.add_argument("-m", "--msg", default="no description")
args = parser.parse_args()
train_msg = args.msg


# Setup Seed.
def setup_seed(seed):
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    np.random.seed(seed)
    random.seed(seed)
    torch.backends.cudnn.deterministic = True


setup_seed(1111)


# Config logging module.
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.INFO)
local_time_str = time.strftime("%Y%m%d_%H%M%S", time.localtime())
handler = logging.FileHandler(
    "model_log/performer_log_" + local_time_str + '_' + train_msg.replace(' ', '_') + ".txt")

handler.setLevel(logging.INFO)
formatter = logging.Formatter(
    '%(asctime)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)

logger.info(train_msg)
logger.info(hyper_params['dataset_path'])
logger.info('Using CUDA:' + os.environ['CUDA_VISIBLE_DEVICES'])


# Build the model.
print('Building net...')
logger.info('Building net...')

net = performer_model.Model(hyper_params).to(hyper_params['device'])
print(net)
print('Net build finished.')
logger.info('Net build finished.')

optimizer_primal = torch.optim.AdamW([{
    'params': net.parameters(),
    'lr': LR_PRIMAL,
    'weight_decay': L2_REGULAR}
])

print('User datasets loaded and saved.')
logger.info('User datasets loaded and saved.')


# Datasets
loader = dataload.DatasetLoader(hyper_params)
train_dataloader = torch.utils.data.DataLoader(
    loader.generate_train_data(), batch_size=hyper_params['batch_size'], shuffle=True)
test_dataloader = torch.utils.data.DataLoader(
    loader.generate_test_data(), batch_size=hyper_params['batch_size'], shuffle=True)
val_dataloader = torch.utils.data.DataLoader(
    loader.generate_val_data(), batch_size=hyper_params['batch_size'], shuffle=True)


# Evaluator
def evaluator_func(model, batchx, batchy, batcht, padding, cur_cnt, user_id):
    pred = model(batchx, padding)
    # print(f'{pred.shape=}')
    pred = pred[:, -1, :]
    history = []
    target = []

    for i in batchx:
        s = set(i.detach().cpu().numpy())
        s.add(0)
        history.append(list(s))

    for i in batchy:
        target.append([i[-1].item()])

    return pred, history, target


val_evalaluator = evaluator.Evaluator(
    val_dataloader, evaluator_func, logger, hyper_params)
test_evalaluator = evaluator.Evaluator(
    test_dataloader, evaluator_func, logger, hyper_params)


def multi_log_likelihood(pred: torch.Tensor, label: torch.Tensor, padding: torch.Tensor):
    label_one_hot = nn.functional.one_hot(label, num_classes=pred.shape[2])
    log_sum = torch.sum(label_one_hot * torch.log(pred),
                        2)*(~padding)

    log_sum = torch.sum(log_sum, 1) / \
        (torch.sum((~padding), dim=1) + 1e-5)
    avg_log_sum = torch.mean(log_sum)
    return avg_log_sum


# Train
def train():
    print('Start training...')
    logger.info('Start training...')
    writer = SummaryWriter(f'./runs/{path_str}', flush_secs=5)

    global_step = 0
    total_loss_primal = 0.0
    total_loss_step = 0

    for epoch in range(BIG_EPOCH):
        net.train()

        update_count = 0
        total_anneal_steps = 200000
        for step, (batchx, batchy, batcht, padding, cur_cnt, user_id) in enumerate(train_dataloader):
            batchx = batchx.to(hyper_params['device'])
            batchy = batchy.to(hyper_params['device'])
            batcht = batcht.to(hyper_params['device'])
            padding = padding.to(hyper_params['device'])
            user_id = user_id.to(hyper_params['device'])
            cur_cnt = cur_cnt.to(hyper_params['device'])

            # Forward.
            pred = net(batchx, padding)

            # --------------------------PRIMAL---------------------------
            loss_primal = - \
                multi_log_likelihood(pred, batchy, padding)

            if torch.isnan(loss_primal).item() > 0:
                continue

            total_loss_primal += loss_primal.item()
            total_loss_step += 1

            # Optimize.
            optimizer_primal.zero_grad()
            loss_primal.backward()
            optimizer_primal.step()

            global_step += 1

        # Show Loss
        print(
            f'EPOCH:({epoch}/{BIG_EPOCH}),STEP:{global_step}/{TOTAL_STEP},loss:{total_loss_primal/total_loss_step}')
        logger.info(
            f'EPOCH:({epoch}/{BIG_EPOCH}),STEP:{global_step}/{TOTAL_STEP},loss:{total_loss_primal/total_loss_step}')

        writer.add_scalar('loss', total_loss_primal /
                          total_loss_step, global_step=epoch)
        total_loss_primal = 0
        total_loss_step = 0

        # Check
        if epoch % CHECK_FREQ == 0:
            val_evalaluator.evaluate(net)
            cur_hr1, cur_hr5, cur_hr10 = val_evalaluator.cur_hr
            writer.add_scalar('hr10', cur_hr10, global_step=epoch)
            # check(dataloader=test_dataloader, validate=False)
            net.train()

        if global_step >= TOTAL_STEP:
            break

    test_evalaluator.evaluate(net)


# Main
if __name__ == '__main__':
    # Train the model.
    try:
        train()
        logger.info('Finished.')
    except Exception as err:
        err_info = traceback.format_exc()
        print(err_info)
        logger.info(err_info)
        logger.info('Error.')
