import os
import time
import torch
import argparse
import pickle
import torch.nn.functional as F
from model import *
from dataset import *
from utils import *
from tqdm import tqdm
from mylog import *
import pdb
import gc

parser = argparse.ArgumentParser()
parser.add_argument('--config', default="./task_yamls/distill.yaml",type=str)
args = parser.parse_args()
args.device='cuda' if torch.cuda.is_available() else 'cpu'
# args.device='cpu'
ml=MyLog("info.log")
config=ml.read_yaml(args.config)
for key, value in config.items():
    setattr(args, key, value)
# ml.write_disable()
ml.init(os.path.abspath(__file__))

if not os.path.isdir(args.train_dir):
    os.makedirs(args.train_dir)
with open(os.path.join(args.train_dir, 'args.txt'), 'w') as f:
    f.write('\n'.join([str(k) + ',' + str(v)
            for k, v in sorted(vars(args).items(), key=lambda x: x[0])]))
f.close()
u2i_index, i2u_index = build_index(args.dataset)

# global dataset
dataset = data_partition(args.dataset)

[user_train, user_valid, user_test, usernum, itemnum] = dataset
# num_batch = len(user_train) // args.batch_size # tail? + ((len(user_train) % args.batch_size) != 0)
num_batch = (len(user_train) - 1) // args.batch_size + 1
cc = 0.0
for u in user_train:
    cc += len(user_train[u])
print('average sequence length: %.2f' % (cc / len(user_train)))

if args.state_dict_path is not None:
    f = open(os.path.join(args.train_dir, 'log.txt'), 'a')
else:
    f = open(os.path.join(args.train_dir, 'log.txt'), 'w')
    f.write('epoch (val_ndcg, val_hr) (test_ndcg, test_hr)\n')

sampler = WarpSampler(user_train, usernum, itemnum,
                        batch_size=args.batch_size, maxlen=args.maxlen, n_workers=3)
# no ReLU activation in original SASRec implementation?
model = SASRec(usernum, itemnum, args).to(args.device)

for name, param in model.named_parameters():
    try:
        torch.nn.init.xavier_normal_(param.data)
    except:
        pass  # just ignore those failed init layers

model.pos_emb.weight.data[0, :] = 0
model.item_emb.weight.data[0, :] = 0
#1.加载教师模型
base_model = args.base_model
data = args.data
model_path=args.model_path
maxlen = args.maxlen
item_embed_hidden_units = args.item_embed_hidden_units
use_pretrained=args.use_pretrained
lora_path=args.lora_path
dataset2 = SeqDataset("./data/"+data,maxlen=maxlen)
dataset3 = SASDataset("./data/"+data,item_max=dataset2.item_max,maxlen=maxlen)
print(dataset2.item_max)
print(dataset3.train_data[-1])
train_dataloader=DataLoader(dataset3.train_data,batch_size=args.batch_size,shuffle=True,collate_fn=collate_fn4)
item_embed = pickle.load(open(args.item_embed_path, 'rb'))
# pdb.set_trace()
# item_embed = torch.load("./embedding/item_embs.pkl", map_location=torch.device('cpu'))
# item_embed=None
teacher_model = RecSys2(output_dim=dataset2.item_max,
            input_dim=item_embed_hidden_units,
            base_model=base_model,
            item_embed=item_embed,
            model_path=model_path,
            use_pretrained=use_pretrained,
            lora_path=lora_path,
            cache_dir=model_path,
            device=args.device)
if use_pretrained:
    model_path = os.path.join(lora_path, "adapter.pth")
    teacher_model.load_from_checkpoint(model_path)

llm_config = AutoConfig.from_pretrained(base_model, output_hidden_states=True)
llm_hidden_size=llm_config.hidden_size
print("llm_hidden_size",llm_hidden_size)

# this fails embedding init 'Embedding' object has no attribute 'dim'
# model.apply(torch.nn.init.xavier_uniform_)

model.train()  # enable model training
teacher_model.eval()
epoch_start_idx = 1
if args.state_dict_path is not None:
    try:
        model.load_state_dict(torch.load(args.state_dict_path, map_location=torch.device(args.device)))
        print("1111")
        tail = args.state_dict_path[args.state_dict_path.find(
            'epoch=') + 6:]
        epoch_start_idx = int(tail[:tail.find('.')]) + 1
    except Exception as e:  # in case your pytorch version is not 1.6 etc., pls debug by pdb if load weights failed
        print('failed loading state_dicts, pls check file path: ')
        print(e)
        print(
            'pdb enabled for your quick check, pls type exit() if you do not need it')
        # import pdb; pdb.set_trace()

bce_criterion = torch.nn.BCEWithLogitsLoss()  # torch.nn.BCELoss()
adam_optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, betas=(0.9, 0.98))

best_val_ndcg, best_val_hr = 0.0, 0.0
best_test_ndcg, best_test_hr = 0.0, 0.0
T = 0.0
t0 = time.time()
kl_f=nn.KLDivLoss(reduction='batchmean')
temp = 0.2#蒸馏温度
alpha = 0.5#蒸馏的比例因数
teacher_logit_dict={} # 教师模型最后一个hidden state，表示下一个物品的特征向量
pred_logit_dict={} # 教师模型最后一个分类层
pred_logit_list=[[0.0]*dataset2.item_max]*(usernum+1) # 106591是训练集最后一个id
flag=False
all_loss=0
# progress_bar = tqdm(range(epoch_start_idx, args.num_epochs + 1),position=0)
# progress_bar.set_description(f'loss: {0:>7f}')
for epoch in (range(epoch_start_idx, args.num_epochs + 1)):
    # import pdb;pdb.set_trace();
    if args.inference_only:
        break  # just to decrease identition
    # tqdm(range(num_batch), total=num_batch, ncols=70, leave=False, unit='b'):
    print("Epoch:",epoch)
    progress_bar2 = tqdm(range(num_batch),position=0)
    progress_bar2.set_description(f'loss: {0:>7f}')
    for step in range(num_batch):  
        u, seq, pos, neg = sampler.next_batch()  # tuples to ndarray
        progress_bar2.update(1)
        u, seq, pos, neg = np.array(u), np.array(seq), np.array(pos), np.array(neg)
        indices = np.where(pos != 0)
        # pos=np.abs(pos)
        pos_logits, neg_logits, student_score = model(u, seq, pos, neg)
        pos_logits, neg_logits = pos_logits.to(args.device),neg_logits.to(args.device)
        pos_labels, neg_labels = torch.ones(pos_logits.shape, device=args.device), torch.zeros(neg_logits.shape, device=args.device)
        adam_optimizer.zero_grad()

        # 相当于hard targets
        hard_loss = bce_criterion(pos_logits[indices], pos_labels[indices])#只用最后一个物品的预测结果
        hard_loss += bce_criterion(neg_logits[indices], neg_labels[indices])
        # 加上soft targets
        # 1.计算教师模型的输出
        if (epoch-epoch_start_idx)>=4:
            if not flag:
                pred_logit_list=np.array(pred_logit_list)
                del teacher_model
                torch.cuda.empty_cache()  # 清理GPU缓存
                gc.collect()  # 强制垃圾回收
                flag=True
            pred_logit=pred_logit_list[u]
            pred_logit=torch.tensor(pred_logit).to(args.device)
        else:
            with torch.no_grad():
                try:
                    inp=torch.tensor(seq,dtype=torch.int).to(args.device)
                    msk=(inp!=0).to(args.device)
                    teacher_logit, pred_logit=teacher_model(inp,msk)#这里是最后一个隐藏层的输出logit，没有过最后一个线性层score
                    # teacher_logit_dict.update({
                    #         int(id_): logits.tolist() 
                    #         for id_, logits in zip(u, teacher_logit)
                    #     })
                    for id_, logits in zip(u, pred_logit):
                        pred_logit_list[int(id_)] = logits.tolist()
                except Exception as e:
                    print(e)
                    pdb.set_trace()
   
        student_softmax = F.log_softmax(student_score / temp, dim=1)
        teacher_softmax = F.softmax(pred_logit / temp, dim=1)
        soft_loss = kl_f(student_softmax,teacher_softmax)#没有打分器模块

        loss=soft_loss*alpha + hard_loss
        # print(soft_loss*alpha, hard_loss)
        for param in model.item_emb.parameters():
            loss += args.l2_emb * torch.norm(param)
        loss.backward()
        adam_optimizer.step()
        all_loss+=loss.item()
        progress_bar2.set_description(f'loss: {loss.item():>7f}')
    if epoch % args.eval_num == 0:
        model.eval()
        t1 = time.time() - t0
        T += t1
        ml.info_write(f"loss:{all_loss/100}")
        print('Evaluating', end='')
        t_test = sasrec_evaluate_all_easy(model, dataset, args)
        # t_valid = evaluate_valid(model, dataset, args)
        t_valid = [1.0,1.0]
        ml.info_write('epoch:%d, time: %f(s), valid (NDCG@10: %.4f, HR@10: %.4f), test (NDCG@10: %.4f, HR@10: %.4f)'
                % (epoch, T, t_valid[0], t_valid[1], t_test[0], t_test[1]))

        if t_valid[0] > best_val_ndcg or t_valid[1] > best_val_hr or t_test[0] > best_test_ndcg or t_test[1] > best_test_hr :# 每次一定保存结果
            best_val_ndcg = max(t_valid[0], best_val_ndcg)
            best_val_hr = max(t_valid[1], best_val_hr)
            best_test_ndcg = max(t_test[0], best_test_ndcg)
            best_test_hr = max(t_test[1], best_test_hr)
            folder = args.train_dir
            fname = 'SASRec.epoch={}.lr={}.layer={}.head={}.hidden={}.maxlen={}.pth'
            fname2 = 'Classify.epoch={}.lr={}.layer={}.head={}.hidden={}.maxlen={}.pth'
            fname = fname.format(
                epoch, args.lr, args.num_blocks, args.num_heads, args.hidden_units, args.maxlen)
            fname2 = fname2.format(
                epoch, args.lr, args.num_blocks, args.num_heads, args.hidden_units, args.maxlen)
            torch.save(model.state_dict(), os.path.join(folder, fname))

        f.write(str(epoch) + ' ' + str(t_valid) + ' ' + str(t_test) +'\tloss='+str(all_loss/num_batch/args.eval_num) +'\n')
        f.flush()
        all_loss=0
        t0 = time.time()
        model.train()

    if epoch == args.num_epochs:
        folder = args.train_dir
        fname = 'SASRec.epoch={}.lr={}.layer={}.head={}.hidden={}.maxlen={}.pth'
        fname = fname.format(args.num_epochs, args.lr, args.num_blocks,
                                args.num_heads, args.hidden_units, args.maxlen)
        torch.save(model.state_dict(), os.path.join(folder, fname))

f.close()
sampler.close()
ml.info_write("Done")
