"""
SASRec模型的训练和评估主程序
实现了模型的训练、验证和测试流程
"""

import os
import time
import torch
import argparse
import numpy as np
from datetime import datetime
from model import SASRec
from utils import *


def str2bool(s):
    """
    将字符串转换为布尔值

    Args:
        s: 输入字符串 ('true' 或 'false')

    Returns:
        bool: 转换后的布尔值
    """
    if s not in {"false", "true"}:
        raise ValueError("不是一个有效的布尔字符串")
    return s == "true"


# 配置命令行参数
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", default="ml-1m", type=str)  # 数据集名称
parser.add_argument("--train_dir", default="output", type=str)  # 训练输出目录
parser.add_argument("--batch_size", default=128, type=int)  # 批次大小
parser.add_argument("--lr", default=0.001, type=float)  # 学习率
parser.add_argument("--maxlen", default=200, type=int)  # 序列最大长度
parser.add_argument("--hidden_units", default=50, type=int)  # 隐藏层单元数
parser.add_argument("--num_blocks", default=2, type=int)  # Transformer块数量
parser.add_argument("--num_epochs", default=100, type=int)  # 训练轮数
parser.add_argument("--num_heads", default=1, type=int)  # 注意力头数
parser.add_argument("--dropout_rate", default=0.2, type=float)  # Dropout比率
parser.add_argument("--l2_emb", default=0.0, type=float)  # L2正则化系数
parser.add_argument("--device", default="cpu", type=str)  # 运行设备
parser.add_argument("--inference_only", default=False, type=str2bool)  # 是否只进行推理
parser.add_argument("--state_dict_path", default=None, type=str)  # 模型权重路径

args = parser.parse_args()

# 创建输出目录
output_dir = "ml-1m_output"
if not os.path.exists(output_dir):
    os.makedirs(output_dir)

# 使用当前时间创建文件名
current_time = datetime.now().strftime("%Y%m%d_%H%M%S")
output_file = os.path.join(output_dir, f"routingTransformer_{current_time}.txt")

# 创建输出目录并保存参数配置
if not os.path.isdir(args.dataset + "_" + args.train_dir):
    os.makedirs(args.dataset + "_" + args.train_dir)
with open(os.path.join(args.dataset + "_" + args.train_dir, "args.txt"), "w") as f:
    f.write(
        "\n".join(
            [
                str(k) + "," + str(v)
                for k, v in sorted(vars(args).items(), key=lambda x: x[0])
            ]
        )
    )

if __name__ == "__main__":
    # 构建用户-物品和物品-用户的索引
    u2i_index, i2u_index = build_index(args.dataset)

    # 加载并划分数据集
    dataset = data_partition(args.dataset)
    [user_train, user_valid, user_test, usernum, itemnum] = dataset

    # 计算批次数量
    num_batch = (len(user_train) - 1) // args.batch_size + 1

    # 计算平均序列长度
    cc = 0.0
    for u in user_train:
        cc += len(user_train[u])
    print("average sequence length: %.2f" % (cc / len(user_train)))

    # 创建日志文件
    f = open(os.path.join(args.dataset + "_" + args.train_dir, "log.txt"), "w")
    f.write("epoch (val_ndcg, val_hr) (test_ndcg, test_hr)\n")
    f.close()

    # 初始化数据采样器
    sampler = WarpSampler(
        user_train,
        usernum,
        itemnum,
        batch_size=args.batch_size,
        maxlen=args.maxlen,
        n_workers=3,
    )

    # 初始化模型
    model = SASRec(usernum, itemnum, args).to(args.device)

    # 初始化模型参数
    for name, param in model.named_parameters():
        try:
            torch.nn.init.xavier_normal_(param.data)
        except:
            pass  # 忽略初始化失败的层

    # 将padding位置的嵌入向量设为0
    model.pos_emb.weight.data[0, :] = 0
    model.item_emb.weight.data[0, :] = 0

    model.train()  # 设置为训练模式

    # 如果提供了预训练模型权重，则加载
    epoch_start_idx = 1
    if args.state_dict_path is not None:
        try:
            model.load_state_dict(
                torch.load(args.state_dict_path, map_location=torch.device(args.device))
            )
            tail = args.state_dict_path[args.state_dict_path.find("epoch=") + 6 :]
            epoch_start_idx = int(tail[: tail.find(".")]) + 1
        except:
            print("加载state_dicts失败，请检查文件路径： ", end="")
            print(args.state_dict_path)
            print("为快速检查启用了PDB，如果不需要，请键入exit()")
            import pdb

            pdb.set_trace()

    # 如果只进行推理，则评估模型并退出
    if args.inference_only:
        model.eval()
        t_test = evaluate(model, dataset, args)
        print("test (NDCG@10: %.4f, HR@10: %.4f)" % (t_test[0], t_test[1]))

    # 定义损失函数和优化器
    bce_criterion = torch.nn.BCEWithLogitsLoss()
    adam_optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, betas=(0.9, 0.98))

    # 记录最佳验证集和测试集性能
    best_val_ndcg, best_val_hr = 0.0, 0.0
    best_test_ndcg, best_test_hr = 0.0, 0.0
    T = 0.0
    t0 = time.time()

    # 开始训练循环
    for epoch in range(epoch_start_idx, args.num_epochs + 1):
        if args.inference_only:
            break

        # 训练一个epoch
        for step in range(num_batch):
            # 获取一个批次的数据
            u, seq, pos, neg = sampler.next_batch()
            u, seq, pos, neg = np.array(u), np.array(seq), np.array(pos), np.array(neg)

            # 前向传播
            pos_logits, neg_logits = model(u, seq, pos, neg)
            pos_labels, neg_labels = torch.ones(
                pos_logits.shape, device=args.device
            ), torch.zeros(neg_logits.shape, device=args.device)

            # 计算损失并反向传播
            adam_optimizer.zero_grad()
            indices = np.where(pos != 0)
            loss = bce_criterion(pos_logits[indices], pos_labels[indices])
            loss += bce_criterion(neg_logits[indices], neg_labels[indices])

            # 添加L2正则化
            for param in model.item_emb.parameters():
                loss += args.l2_emb * torch.norm(param)

            loss.backward()
            adam_optimizer.step()
            loss_value = loss.item()
            print("loss in epoch {} iteration {}: {}".format(epoch, step, loss_value))

            # 将损失写入文件
            with open(output_file, "a") as f:
                f.write(
                    "loss in epoch {} iteration {}: {}\n".format(
                        epoch, step, loss_value
                    )
                )

        # 每20个epoch评估一次模型
        if epoch % 20 == 0:
            model.eval()
            t1 = time.time() - t0
            T += t1
            print("Evaluating", end="")

            # 在测试集和验证集上评估
            t_test = evaluate(model, dataset, args)
            t_valid = evaluate_valid(model, dataset, args)
            eval_result = (
                "epoch:%d, time: %f(s), valid (NDCG@10: %.4f, HR@10: %.4f), test (NDCG@10: %.4f, HR@10: %.4f)"
                % (epoch, T, t_valid[0], t_valid[1], t_test[0], t_test[1])
            )
            print(eval_result)

            # 将评估结果写入文件
            with open(output_file, "a") as f:
                f.write(eval_result + "\n")

            # 如果性能提升，保存模型
            if (
                t_valid[0] > best_val_ndcg
                or t_valid[1] > best_val_hr
                or t_test[0] > best_test_ndcg
                or t_test[1] > best_test_hr
            ):
                best_val_ndcg = max(t_valid[0], best_val_ndcg)
                best_val_hr = max(t_valid[1], best_val_hr)
                best_test_ndcg = max(t_test[0], best_test_ndcg)
                best_test_hr = max(t_test[1], best_test_hr)

                folder = f"{args.dataset}_{args.train_dir}"
                if not os.path.exists(folder):
                    os.makedirs(folder)

                fname = "model.pth"
                torch.save(model.state_dict(), os.path.join(folder, fname))

            # 记录日志
            with open(
                os.path.join(args.dataset + "_" + args.train_dir, "log.txt"), "a"
            ) as f:
                f.write(str(epoch) + " " + str(t_valid) + " " + str(t_test) + "\n")
            t0 = time.time()
            model.train()

        # 在最后一个epoch保存最终模型
        if epoch == args.num_epochs:
            folder = f"{args.dataset}_{args.train_dir}"
            if not os.path.exists(folder):
                os.makedirs(folder)

            fname = "model_final.pth"
            torch.save(model.state_dict(), os.path.join(folder, fname))

    # 清理资源
    sampler.close()
    print("Done")
