'''
Created on Mar 1, 2020
Pytorch Implementation of LightGCN in
Xiangnan He et al. LightGCN: Simplifying and Powering Graph Convolution Network for Recommendation

@author: Jianbai Ye (gusye@mail.ustc.edu.cn)
'''

import os
from os.path import join
from result_log import Logger
import numpy as np
import torch
from enum import Enum
from parse import args
import multiprocessing
from datetime import datetime
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'


ROOT_PATH = "../"
CODE_PATH = join(ROOT_PATH, 'code')
DATA_PATH = join(ROOT_PATH,'data')
BOARD_PATH = join(CODE_PATH, 'runs')
FILE_PATH = join(CODE_PATH, 'checkpoints')
LOG_PATH = join(CODE_PATH,'logs')
import sys
sys.path.append(join(CODE_PATH, 'sources'))
early_stop = args.early_stop

if not os.path.exists(FILE_PATH):
    os.makedirs(FILE_PATH, exist_ok=True)
path = os.path.join(DATA_PATH,args.dataset)
long_tail_doc = set(np.load(os.path.join(path,'long_tail_ids.npy')))

n_doc = None

loss_x = []
loss_y = []
recall_y = []
precision = []
ndcg_y = []
test_x = []
tail= []
tail_coverage = []
hild = []
coverage = []
tail_recall = []
is_show = args.is_show

config = {}
all_dataset = ['lastfm', 'gowalla', 'yelp2018', 'amazon-book','doctor','order_data','ask_data']
all_models  = ['mf', 'lgn']
# config['batch_size'] = 4096
config['vaerate'] = args.vaerate
config['bpr_batch_size'] = args.bpr_batch
config['latent_dim_rec'] = args.recdim
config['latent_dim_vae'] = args.vaedim
config['lightGCN_n_layers']= args.layer
config['dropout'] = args.dropout
config['keep_prob']  = args.keepprob
config['A_n_fold'] = args.a_fold
config['test_u_batch_size'] = args.testbatch
config['multicore'] = args.multicore
config['lr'] = args.lr
config['decay'] = args.decay
config['cl_rate'] = args.cl_rate
config['pretrain'] = args.pretrain
config['A_split'] = False
config['bigdata'] = False
config['t'] = args.t

GPU = torch.cuda.is_available()
device = torch.device('cuda' if GPU else "cpu")
CORES = multiprocessing.cpu_count() // 2
seed = args.seed

dataset = args.dataset
model_name = args.model
# if dataset not in all_dataset:
#     raise NotImplementedError(f"Haven't supported {dataset} yet!, try {all_dataset}")
# if model_name not in all_models:
#     raise NotImplementedError(f"Haven't supported {model_name} yet!, try {all_models}")
TRAIN_epochs = args.epochs
LOAD = args.load
PATH = args.path
topks = eval(args.topks)
tensorboard = args.tensorboard
comment = args.comment
# let pandas shut up
from warnings import simplefilter
simplefilter(action="ignore", category=FutureWarning)

task_name = "%s_%s_%s" % (datetime.now().strftime('%Y-%m-%d-%H-%M-%S'), args.dataset,args.model)
pro_logger = Logger(filename=task_name,path=LOG_PATH)
pro_logger.logging(str(config))
result_logger = Logger(filename="result",path=LOG_PATH)

def cprint(words : str):
    print(f"\033[0;30;43m{words}\033[0m")

logo = r"""
██╗      ██████╗ ███╗   ██╗
██║     ██╔════╝ ████╗  ██║
██║     ██║  ███╗██╔██╗ ██║
██║     ██║   ██║██║╚██╗██║
███████╗╚██████╔╝██║ ╚████║
╚══════╝ ╚═════╝ ╚═╝  ╚═══╝
"""
# font: ANSI Shadow
# refer to http://patorjk.com/software/taag/#p=display&f=ANSI%20Shadow&t=Sampling
# print(logo)
