import time

import torch
from tqdm import tqdm

from meantime.models import model_factory
from meantime.dataloaders import dataloader_factory
from meantime.trainers import trainer_factory
from meantime.utils import *
from meantime.config import *
from meantime.trainers.utils import recall, ndcg

from meantime.options import parse_args
from dotmap import DotMap
import sys
from typing import List
import yaml

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

project_path = os.path.dirname(__file__)
sys.path.append(project_path)

print("项目路径：", project_path)


template_name = "train_mmeantime2"
args = yaml.safe_load(open(os.path.join(project_path + '/templates', f'{template_name}.yaml')))

# sys_argv = sys.argv[1:]
args = DotMap(args, _dynamic=False)
args.mode = "val"
# args.absolute_kernel_types = "p-g"
# args.add_genre = True
args.val_batch_size = 64
args.test_batch_size = 64
args.train_batch_size = 64

print(args)
# print(sys.argv)
#
# ## 导入数据集
train_loader, val_loader, test_loader = dataloader_factory(args)


model = model_factory(args)
args.pretrained_weights = r"experiments\test\mmeantime2\models\best_model.pth"
if args.pretrained_weights is not None:
    model.load(args.pretrained_weights)
    print("已成功加载训练好的模型")

model = model.to(device)
model.eval()
# print(model)


# def generate_timestamp_matrix(rows, cols):
#     timestamp_matrix = []
#     current_time = int(time.time())  # 获取当前时间戳
#     for _ in range(rows):
#         row = []
#         for _ in range(cols):
#             random_offset = random.randint(0, 3600 * 24 * 365)  # 生成随机时间偏移量（最多一年的秒数）
#             timestamp = current_time - random_offset
#             row.append(timestamp)
#         timestamp_matrix.append(row)
#     return timestamp_matrix
index = 1

num_instance = 0
#
# rows = 64  # 指定矩阵行数
# cols = 200  # 指定矩阵列数
# timestamp_matrix = generate_timestamp_matrix(rows, cols)
# timestamp_matrix = torch.LongTensor(timestamp_matrix)
#
# tokens = torch.randint(0, 100, (64, 200))
# candidates = torch.randint(0, 100, (64, 101))
# days = torch.randint(0, 100, (64, 200))
# tokens = torch.randint(0, 100, (64, 200))
# days = torch.randint(0, 200, (64, 200))
# genres = torch.randint(0, 300, (64, 200))
# labels = torch.randint(0, 2, (64, 200))
# candidates = torch.randint(0, 100, (64, 200))
#
# test_data = {
#     "tokens": tokens,
#     "days": days,
#     "timestamps": timestamp_matrix,
#     # "genres": genres,
#     # "labels": labels,
#     "candidates": candidates
# }
# batch = {k: v.to(device) for k, v in test_data.items()}
#
# scores = model(batch)["scores"]
# print(scores)
with torch.no_grad():
    tqdm_dataloader = tqdm(test_loader)
    for batch_idx, batch in enumerate(tqdm_dataloader):
        batch = {k: v.to(device) for k, v in batch.items()}
        # print(batch["genres"][index])
        # print(batch["tokens"][index])
        # print(batch["candidates"][index])

        # batch["timestamps"] = torch.tensor([])
        # batch["days"] = torch.tensor([])
        # batch["genres"] = torch.tensor([])
        # print(batch.keys())
        batch_size = next(iter(batch.values())).size(0)
        num_instance += batch_size

        res = model(batch)

        # logits = res["logits"]
        # info = res["info"]
        labels = batch["labels"]
        print(labels.shape)
        # break
        scores = res["scores"]
        print("recal@5", recall(scores, labels, 10))
        # print(logits.shape)
        # print(info)
        # print(scores.shape)
        # answer_count = labels.sum(1)
        # print(answer_count.shape)
        # answer_count_float = answer_count.float()
        # labels_float = labels.float()
        rank = (-scores).argsort(dim=1)
        print(rank[index])
        print(batch["candidates"][index][rank[index][:10]])

        break
#
# import os
# import sys
# import torch
# import pickle
#
# import pandas as pd
# import yaml
# from dotmap import DotMap
#
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#
#
# def load_args():
#     project_path = os.path.dirname(__file__)
#
#     print("项目路径：", project_path)
#
#     template_name = "train_meantime"
#     args = yaml.safe_load(open(os.path.join(project_path + '/templates', f'{template_name}.yaml')))
#
#     args = DotMap(args, _dynamic=False)
#     args.mode = "val"
#     # args.absolute_kernel_types = "p-g"
#     # args.add_genre = True
#     return args
#
#
# # def personalization_dataset()
#
# def load_dataset(path):
#     with open(path, 'rb') as fp:
#         dataset = pickle.load(fp)
#     return dataset
#
# if __name__ == '__main__':
#     dataset = load_dataset('Data/preprocessed/ml-1m_min_rating0-min_uc5-min_sc5-splitleave_one_out/dataset.pkl')
#     index2sid = {index: sid for sid, index in dataset["smap"].items()}
#     # print(index2sid)
#     context_path = "Data/ml-1m/movies.dat"
#     context_df = pd.read_csv(context_path, sep='::', header=None, encoding="ISO-8859-1", engine='python')
#     context_df.columns = ['sid', 'title', 'genres']
#     sid = context_df["sid"]
#     titles = context_df["title"]
#     sid2item = {sid.iloc[i]: titles.iloc[i] for i in range(len(context_df))}
    # print(sid2item)



