import torch
import torch.nn as nn
import torch.utils.data as Data
import torch.nn.functional as F
import tcn_model
import dataload
import numpy as np
import optimization
import os
import logging
import traceback
import loss_func
import random
import time
import argparse
import sys


# Hyper Parameters
os.environ['CUDA_VISIBLE_DEVICES'] = "3"

hyper_params = {
    'seq_len': 200,
    'dataset_path': 'ml-10m',
    'kl_weight': 0.1,
    'contrast_weight': 0.05,
    'item_embed_size': 128,
    'rnn_size': 100,
    'hidden_size': 100,
    'latent_size': 64,
    'timesteps': 5,
    'hold_out_prop': 0.125,
    'test_prop': 0.2,
    'batch_size': 64,
    'split_type': 'ratio',
    'dataset_type': 'tcn',
    'train_ratio': 0.8,
    'splitter': '::',
    'min_rate': 3.5,
    'min_items': 5,
    'Ks': [1, 5, 10, 20],
    'evaluate_users': 10000,
    'use_fold': True,
    'device': 'cuda',
    'output_file': True
}

USE_FOLD = True
LR_PRIMAL = 1e-3
LR_DUAL = 3e-4
LR_PRIOR = 5e-4
L2_REGULAR = 1e-2
L2_ADVER = 1e-1
DISCRIM_WEIGHT = 0.01
NCE_WEIGHT = hyper_params['seq_len'] * 0.0
Ks = [1, 5, 10, 20, 50, 100]

USE_DISCRIMINATOR = False

BETA1 = 0.9
BETA2 = 0.999
TOTAL_STEP = 200000
BIG_EPOCH = 250
SMALL_EPOCH = 1
LR_DECAY_RATE = 0.9
WARMUP_STEP = 100
DATASET_LEN = TOTAL_STEP * hyper_params['batch_size']

MAX_POS = hyper_params['seq_len']
DEVICE = 'cuda'

CHECK_FREQ = 2

NEGATIVE_SAMPLE_CNT = 100

info_str = 'dataset:' + hyper_params['dataset_path'] + ' lr1:' + str(LR_PRIMAL) + ' lr2:' + str(
    LR_DUAL) + ' kl:' + str(hyper_params['kl_weight']) + ' contrast:' + str(hyper_params['contrast_weight']) + ' batch:' + str(hyper_params['batch_size']) + ' model: tcn'


# Parser
parser = argparse.ArgumentParser(prog='train')
parser.add_argument("-m", "--msg", default="no description")
args = parser.parse_args()
train_msg = args.msg


# Setup Seed.
def setup_seed(seed):
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    np.random.seed(seed)
    random.seed(seed)
    torch.backends.cudnn.deterministic = True


setup_seed(1111)


# Config logging module.
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.INFO)
local_time_str = time.strftime("%Y%m%d_%H%M%S", time.localtime())
handler = logging.FileHandler(
    "model_log/log_" + local_time_str + '_tcn_' + train_msg.replace(' ', '_') + ".txt")

handler.setLevel(logging.INFO)
formatter = logging.Formatter(
    '%(asctime)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)

logger.info(train_msg)
logger.info(info_str)
logger.info('Using CUDA:' + os.environ['CUDA_VISIBLE_DEVICES'])

# Accelerate with CuDNN.
# torch.backends.cudnn.benchmark = True
dataloader = dataload.DatasetLoader(hyper_params)
train_dataloader = Data.DataLoader(
    dataset=dataloader.generate_train_data(),
    batch_size=hyper_params['batch_size'],
    shuffle=True
)
test_dataloader = Data.DataLoader(
    dataset=dataloader.generate_test_data(),
    batch_size=hyper_params['batch_size'],
    shuffle=True
)

# Build the model.
print('Building net...')
logger.info('Building net...')

net = tcn_model.Model(hyper_params).to(DEVICE)
adversary = tcn_model.Adversary(hyper_params).to(DEVICE)
contrast_adversary = tcn_model.GRUAdversary(hyper_params).to(DEVICE)

print(net)
print('Net build finished.')
logger.info('Net build finished.')

# ---------------------------------------Optimizer-----------------------------------------
optimizer_primal = torch.optim.AdamW([{
    'params': net.parameters(),
    'lr': LR_PRIMAL,
    'weight_decay': L2_REGULAR}
])
optimizer_dual = torch.optim.SGD([{
    'params': net.encoder.parameters(),
    'lr': LR_DUAL,
    'weight_decay': L2_ADVER
}, {
    'params': contrast_adversary.parameters(),
    'lr': LR_DUAL,
    'weight_decay': L2_ADVER
}
])
optimizer_prior = torch.optim.SGD([{
    'params': adversary.parameters(),
    'lr': LR_PRIOR,
    'weight_decay': L2_ADVER}
])


print('User datasets loaded and saved.')
logger.info('User datasets loaded and saved.')

# Global High.
high_hr = [0.0 for _ in range(len(Ks))]
high_ndcg = [0.0 for _ in range(len(Ks))]
high_mrr = [0.0 for _ in range(len(Ks))]


def generate_result_str(hr, ndcg, mrr):
    s = ''
    for i in range(len(Ks)):
        s += 'HR@' + str(Ks[i]) + ':' + str(hr[i]) + ' '
    s += '\n'

    for i in range(len(Ks)):
        s += 'NDCG@' + str(Ks[i]) + ':' + str(ndcg[i]) + ' '
    s += '\n'

    for i in range(len(Ks)):
        s += 'MRR@' + str(Ks[i]) + ':' + str(mrr[i]) + ' '
    return s


# ---------------------------------------------Check-------------------------------------
def check(dataloader, validate=True, evaluate_users=None):
    if validate:
        print('Start validating...')
        logger.info('Start validating...')
    else:
        print('Start checking...')
        logger.info('Start checking...')

    net.eval()
    adversary.eval()
    with torch.no_grad():
        # Parameters
        checked_users = 0
        cur_hr = [0.0 for _ in range(len(Ks))]
        cur_ndcg = [0.0 for _ in range(len(Ks))]
        cur_mrr = [0.0 for _ in range(len(Ks))]

        for _, (batchx, batchy, batcht, padding, cur_cnt, user_id) in enumerate(dataloader):
            batchx = batchx.to(DEVICE)
            batchy = batchy.to(DEVICE)
            padding = padding.to(DEVICE)
            user_id = user_id.to(DEVICE)
            cur_cnt = cur_cnt.to(DEVICE)

            # Forward.
            pred, x_real, z_inferred, embed_out = net(batchx)

            flag = False
            for i in range(batchy.shape[0]):
                if not USE_FOLD:
                    # Sample 100 negetive items with itself.
                    rated = set(batchx[i].cpu().numpy())
                    item_idx = [batchy[i, cur_cnt[i] - 1].item()]

                    while len(item_idx) < 101:
                        sample_ids = torch.multinomial(
                            normalized_item_popular, num_samples=101).numpy()
                        sample_ids = [
                            x for x in sample_ids if x not in rated and x not in item_idx]
                        item_idx.extend(sample_ids[:])
                    item_idx = item_idx[: 101]

                    item_idx = torch.Tensor(item_idx).long()
                    cur_label = cur_cnt[i].item() - 1
                    cur_pred = pred[i, cur_label, item_idx]

                    cur_rank = torch.argsort(
                        cur_pred, descending=True).argsort()[0].item() + 1

                    for j in range(len(Ks)):
                        if cur_rank <= Ks[j]:
                            cur_hr[j] += 1
                            cur_ndcg[j] += 1 / np.log2(cur_rank + 1)
                    cur_mrr[0] += 1 / cur_rank
                else:
                    for j, k in enumerate(Ks):
                        best, now_at, dcg, hits = 0.0, 0.0, 0.0, 0.0
                        last_pred = pred[i, -1].scatter_(
                            dim=0, index=batchx[i], value=-10000000.0)
                        last_pred[0] = -10000000.0

                        rec_list = list(torch.argsort(
                            last_pred, descending=True)[:k].cpu().numpy())

                        first_correct = sys.maxsize
                        for movie in batchy[i]:
                            movie = movie.item()
                            if movie == 0:
                                break
                            now_at += 1.0
                            if now_at <= k:
                                best += 1.0 / float(np.log2(now_at + 1))

                            if movie not in rec_list:
                                continue

                            hits += 1.0
                            dcg += 1.0 / \
                                float(np.log2(float(rec_list.index(movie) + 2)))
                            first_correct = min(
                                first_correct, rec_list.index(movie))

                        cur_hr[j] += float(hits) / float(now_at)
                        cur_ndcg[j] += float(dcg) / float(best)
                        cur_mrr[j] += 1.0 / (first_correct + 1)

                checked_users += 1
                if evaluate_users is not None and checked_users >= evaluate_users:
                    flag = True
                    break

            if flag:
                break

        for i in range(len(Ks)):
            cur_hr[i] /= checked_users
            cur_ndcg[i] /= checked_users
            cur_mrr[i] /= checked_users
            if validate:
                high_hr[i] = max(high_hr[i], cur_hr[i])
                high_ndcg[i] = max(high_ndcg[i], cur_ndcg[i])
                high_mrr[i] = max(high_mrr[i], cur_mrr[i])

        cur_str = generate_result_str(cur_hr, cur_ndcg, cur_mrr)

        print(cur_str)
        if validate:
            logger.info(
                f'--------------------VALIDATE RESULT--------------------')
        else:
            logger.info(
                f'----------------------TEST RESULT----------------------')
        logger.info(cur_str)

        if validate:
            high_str = generate_result_str(high_hr, high_ndcg, high_mrr)
            print(high_str)
            logger.info(high_str)


# -----------------------------------------------Train------------------------------------
def train():
    print('Start training...')
    logger.info('Start training...')

    global_step = 0
    mebank = loss_func.MetricBank()

    for epoch in range(BIG_EPOCH):
        net.train()

        for step, (batchx, batchy, batcht, padding, cur_cnt, user_id) in enumerate(train_dataloader):
            batchx = batchx.to(DEVICE)
            batchy = batchy.to(DEVICE)
            padding = padding.to(DEVICE)
            user_id = user_id.to(DEVICE)
            cur_cnt = cur_cnt.to(DEVICE)

            # Forward.
            optimizer_primal.zero_grad()
            optimizer_dual.zero_grad()
            pred, x_real, z_inferred, out_embed = net(batchx)

            # --------------------------PRIMAL---------------------------
            multi_loss = loss_func.vae_loss(
                pred, batchy, cur_cnt, padding, hyper_params)
            # anneal = global_step / TOTAL_STEP * 0.1

            kl_loss = loss_func.kl_loss(
                adversary, x_real, z_inferred, padding, KL_WEIGHT=hyper_params['kl_weight'])
            # shuffled_loss = loss_func.shuffled_loss(
            #     adversary, x_real, z_inferred, padding, SHUFFLED_WEIGHT=SHUFFLED_WEIGHT)

            adver_loss = loss_func.adversary_loss(
                contrast_adversary, x_real, z_inferred, padding, CONTRAST_WEIGHT=hyper_params['contrast_weight'])

            loss = multi_loss + adver_loss + kl_loss
            loss.backward()
            optimizer_primal.step()
            optimizer_dual.step()

            # --------------------------DUAL------------------------------
            optimizer_prior.zero_grad()
            adver_kl_loss = loss_func.adversary_kl_loss(
                adversary, x_real.detach(), z_inferred.detach(), padding)
            adver_kl_loss.backward()
            optimizer_prior.step()

            mebank.store({'vae': multi_loss.item(), 'kl': kl_loss.item(), 'adver': adver_loss.item(
            ), 'prior': adver_kl_loss.item()})

            # Show Loss
            if step % 50 == 0:
                print(
                    f'EPOCH:({epoch}/{BIG_EPOCH}),STEP:{global_step}/{TOTAL_STEP},vae:{mebank.get("vae")},kl:{mebank.get("kl")},contrast:{mebank.get("adver")},prior:{mebank.get("prior")}')
                logger.info(
                    f'EPOCH:({epoch}/{BIG_EPOCH}),STEP:{global_step}/{TOTAL_STEP},vae:{mebank.get("vae")},kl:{mebank.get("kl")},contrast:{mebank.get("adver")},prior:{mebank.get("prior")}')

                mebank.clear()

            global_step += 1

        # Check
        if epoch % CHECK_FREQ == 0:
            check(dataloader=test_dataloader,
                  validate=True, evaluate_users=10000)
            # check(dataloader=test_dataloader, validate=False)
            net.train()
            adversary.train()

        if global_step >= TOTAL_STEP:
            # Check the result.
            check(dataloader=test_dataloader, validate=False)
            break

    check(dataloader=test_dataloader, validate=False, evaluate_users=10000)


# Main
if __name__ == '__main__':
    # Train the model.
    try:
        train()
        logger.info('Finished.')
    except Exception as err:
        err_info = traceback.format_exc()
        print(err_info)
        logger.info(err_info)
        logger.info('Error.')
