import torch, os, random, numpy as np
from dataprocess import DataProcess
from dataloader  import Construct_DataLoader
from network import NeuMF
from trainer import Trainer

# reproducible
SEED = 42
random.seed(SEED); np.random.seed(SEED); torch.manual_seed(SEED)


data_path = "../ml-1m/ratings.dat"
dp = DataProcess(data_path)

cfg = {
    "num_users": len(dp._userPool),
    "num_items": len(dp._itemPool),
    "latent_dim_gmf": 8,
    "latent_dim_mlp": 8,
    "layers": [16, 64, 32, 16, 8],   # 输入=latent_dim_mlp*2 (=16)
    "model_name": "neumf_demo.pth",
    "optimizer": "adam",
    "adam_lr": 1e-3,
    "l2_regularization": 1e-6,
    "batch_size": 1024,
    "num_epoch": 1,          # demo 设 1 轮，正式可调大
    "num_negative": 4,          # 每个正样本配 4 个负样本
    "use_cuda": torch.cuda.is_available(),
    "device_id": 0,
    # 预训练相关
    "pretrain": False
}


model   = NeuMF(cfg, cfg['latent_dim_gmf'], cfg['latent_dim_mlp'])
trainer = Trainer(model, cfg)


trainer.train(sampleGenerator = dp.sample_generator)

trainer.save()

print("\n== Demo 完成！模型参数已保存到", cfg["model_name"])
