import os
import time
import torch
import argparse
from utils.util import *
from data.dataset import *
from torch.utils.data import DataLoader
from models.baseline import SASRec
from tqdm import tqdm
import multiprocessing as mp

def train(args):
    # 1. 加载dataset
    dataset=MyDataset(args)
    user_num=dataset.user_num # 6400
    item_num=dataset.item_num # 3952，实际只有3883条电影记录
    train_data=TrainSubset(args)
    valid_data=ValidSubset(args)
    train_loader=DataLoader(dataset=train_data, shuffle=True, batch_size=args.batch_size, collate_fn=collate_fn)#, num_workers=4, pin_memory=True, persistent_workers=True
    valid_loader=DataLoader(dataset=valid_data, shuffle=False, batch_size=args.batch_size, collate_fn=collate_fn)#, num_workers=4, pin_memory=True, persistent_workers=True

    # 2. 初始化model
    model=SASRec(user_num, item_num, args)
    model=model_init(model)
    model.to(args.device)

    # 3. 训练配置
    optimizer=torch.optim.AdamW(model.parameters(), lr=args.lr, betas=(0.9, 0.98))
    bce_criterion = torch.nn.BCEWithLogitsLoss()

    # 4. 训练
    global_step=1
    best_hr=-1
    best_epoch=-1
    for epoch in range(1, args.num_epochs+1):
        model.train()
        print(f"Epoch {epoch}:")
        for seq,pos,neg,user_feat,item_feat in tqdm(train_loader):
            item_feat=None
            user_feat=None if args.feat==0 else user_feat
            warmup_cosine(optimizer=optimizer, current_epoch=global_step, max_epoch=len(train_loader)*args.num_epochs, lr_min=args.lr/10,lr_max=args.lr,warmup_epoch=len(train_loader)//2)
            seq = seq.to(args.device, non_blocking=True)
            pos = pos.to(args.device, non_blocking=True)
            neg = neg.to(args.device, non_blocking=True)
            pos_logits, neg_logits = model(seq, pos, neg, user_feat, item_feat)      
            pos_labels, neg_labels = torch.ones(pos_logits.shape, device=args.device), torch.zeros(neg_logits.shape, device=args.device)
            optimizer.zero_grad()
            indices = torch.where(pos != 0)
            loss = bce_criterion(pos_logits[indices], pos_labels[indices])
            loss += bce_criterion(neg_logits[indices], neg_labels[indices])
            loss.backward()
            optimizer.step()

            grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
            global_step+=1
        print("Learning rate is ", optimizer.param_groups[0]['lr'])

        # 5. 验证
        print("Begin evaling...")
        model.eval()
        valid_loss=0
        hr=0
        for seq,pos,neg,user_feat,item_feat in tqdm(valid_loader):
            # 5.1 计算BCE loss
            item_feat=None
            user_feat=None if args.feat==0 else user_feat
            seq, pos, neg = drop_zero(seq), drop_zero(pos), drop_zero(neg)
            seq = seq.to(args.device, non_blocking=True)
            pos = pos.to(args.device, non_blocking=True)
            neg = neg.to(args.device, non_blocking=True)
            pos_logits, neg_logits = model(seq, pos, neg, user_feat, item_feat)      
            pos_labels, neg_labels = torch.ones(pos_logits.shape, device=args.device), torch.zeros(neg_logits.shape, device=args.device)
            indices = torch.where(pos != 0)
            valid_loss += bce_criterion(pos_logits[indices], pos_labels[indices])
            valid_loss += bce_criterion(neg_logits[indices], neg_labels[indices])
            # 5.2 计算HR
            hr += cal_hr(model, seq, pos, item_num=item_num, user_feat=user_feat, item_feat=item_feat, k=10)
        print("Valid loss is:",valid_loss.item()/len(valid_loader))
        valid_hr=hr/len(valid_loader)
        print("Valid HR is:", valid_hr)
        if valid_hr>best_hr:
            best_hr=valid_hr
            best_epoch=epoch
            torch.save(model.state_dict(),args.save_path)

def test(args):
    test_data=TestSubset(args)
    test_loader=DataLoader(dataset=test_data, shuffle=False, batch_size=args.batch_size, collate_fn=collate_fn)
    user_num=test_data.user_num # 6400
    item_num=test_data.item_num
    model=SASRec(user_num, item_num, args)
    print(args.save_path)
    model.load_state_dict(torch.load(args.save_path))
    model.to(args.device)
    model.eval()

    hr=0
    for seq,pos,neg,user_feat,item_feat in tqdm(test_loader):
        item_feat=None
        user_feat=None if args.feat==0 else user_feat
        seq, pos, neg = drop_zero(seq), drop_zero(pos), drop_zero(neg)
        seq = seq.to(args.device, non_blocking=True)
        pos = pos.to(args.device, non_blocking=True)
        neg = neg.to(args.device, non_blocking=True)
        hr += cal_hr(model, seq, pos, item_num=item_num, user_feat=user_feat, item_feat=item_feat, k=10)

    print("Test HR is ", hr/len(test_loader))


if __name__=="__main__":
    args=get_args()
    mp.set_start_method('spawn')  
    if args.type.find('train')>=0:
        train(args)
    if args.type.find('test')>=0:
        test(args)

