import os, random, math, numpy as np, pandas as pd, torch, matplotlib.pyplot as plt
from collections import defaultdict
from sklearn import metrics

from ClassEnd.NCF.dataprocess import DataProcess
from ClassEnd.ItemCF.itermcf import ItemCF
from ClassEnd.UserCF.usercf import UserCF
from ClassEnd.NCF.network import NeuMF
from ClassEnd.NCF.trainer import Trainer

SEED = 42
random.seed(SEED); np.random.seed(SEED); torch.manual_seed(SEED)

# ----------------------------------------------------------------------------------
# 0) 数据预处理
dp = DataProcess("../ml-1m/ratings.dat")
neg_df = dp._negatives.set_index("userId")          # 99 负样本池
train_dict = dp.train_ratings.groupby("userId")["itemId"].apply(set).to_dict()

def get_eval_items(u, pos_item, n_neg=99):
    """返回 1 正 + n_neg 负样本列表"""
    return [pos_item] + list(neg_df.loc[u, 'negative_samples'][:n_neg])

# ----------------------------------------------------------------------------------
# 1) 训练 ItemCF / UserCF / NeuMF
itemcf = ItemCF(train_dict, similarity="cosine", norm=True); itemcf.train()
usercf = UserCF(train_dict, similarity="cosine"); usercf.train()

cfg = {
    "num_users": len(dp._userPool),
    "num_items": len(dp._itemPool),

    "latent_dim_gmf": 16,
    "latent_dim_mlp": 32,
    "layers": [64, 32, 16, 8],                  # 层数可减少防过拟合

    "optimizer": "adam",
    "adam_lr": 1e-3,
    "l2_regularization": 1e-5,                 # 正则化防止过拟合

    "batch_size": 512,
    "num_epoch": 30,
    "num_negative": 4,                          # 每个正样本配 4 个负样本

    "model_name": "NeuMF_final.pth",
    "use_cuda": torch.cuda.is_available(),
    "device_id": 0,
    "pretrain": False
}
neumf = NeuMF(cfg, cfg["latent_dim_gmf"], cfg["latent_dim_mlp"])
Trainer(neumf, cfg).train(sampleGenerator = dp.sample_generator)
neumf.eval()

# ----------------------------------------------------------------------------------
# 2) 统一预测接口
@torch.no_grad()
def neumf_score(u,i):
    t_u = torch.tensor([u]); t_i = torch.tensor([i])
    if cfg["use_cuda"]: t_u, t_i = t_u.cuda(), t_i.cuda()
    return float(neumf(t_u, t_i).cpu())

def itemcf_score(u,i):
    return sum(itemcf._itemSimMatrix.get(it,{}).get(i,0) for it in train_dict[u])

def usercf_score(u,i):
    return sum(sim for v,sim in usercf._userSimMatrix.get(u,{}).items() if i in train_dict[v])

# ----------------------------------------------------------------------------------
# 3) 评估函数
def rank_metrics(score_dict, pos_item, K):
    """返回 (hit, ndcg, prec, recall, auc)"""
    ranked = sorted(score_dict.items(), key=lambda x: x[1], reverse=True)
    items  = [it for it,_ in ranked]
    labels = [1 if it==pos_item else 0 for it in items]

    # HR / NDCG / Precision / Recall
    if pos_item in items[:K]:
        hit = 1
        rank = items.index(pos_item)
        ndcg = 1/math.log2(rank+2)
        prec = 1/K
        recall = 1
    else:
        hit=ndcg=prec=recall=0
    # AUC
    auc = metrics.roc_auc_score(labels, [s for _,s in ranked])
    return hit, ndcg, prec, recall, auc

def evaluate(predict_fn, K=10):
    sums = np.zeros(5)
    for row in dp.test_ratings.itertuples():
        cand_items = get_eval_items(row.userId, row.itemId)
        scores = {it: predict_fn(row.userId, it) for it in cand_items}
        sums += np.array(rank_metrics(scores, row.itemId, K))
    return sums / len(dp.test_ratings)

# ----------------------------------------------------------------------------------
# 4) 基本评测
metrics_names = ["HR","NDCG","Precision","Recall","AUC"]
results = {
    "ItemCF": evaluate(itemcf_score, K=10),
    "UserCF": evaluate(usercf_score, K=10),
    "NeuMF" : evaluate(neumf_score , K=10)
}
df_base = pd.DataFrame(results, index=metrics_names).T.round(4)
print("\n=== 基线评测 (K=10) ==="); print(df_base)

# 覆盖度
def item_coverage(rec_func, K=10):
    rec_items = set()
    for u in dp._userPool:
        rec_items.update(rec_func(u).keys())
    return len(rec_items) / len(dp._itemPool)

def itemcf_rec(u): return itemcf.recommend(u, N=10, K=10)
def usercf_rec(u): return usercf.recommend(u, N=10, K=10)
def neumf_rec(u):
    scores = {i: neumf_score(u,i) for i in dp._itemPool if i not in train_dict[u]}
    return dict(sorted(scores.items(), key=lambda x:x[1], reverse=True)[:10])

coverage = {
    "ItemCF": item_coverage(itemcf_rec),
    "UserCF": item_coverage(usercf_rec),
    "NeuMF" : item_coverage(neumf_rec)
}
print("\nItemCoverage:", {k: round(v,4) for k,v in coverage.items()})

# ----------------------------------------------------------------------------------
# 5) 冷启动 & 小样本
# 冷启动用户≤n次
def subset_users(max_interactions):
    counts = dp.train_ratings.groupby("userId").size()
    cold_users = set(counts[counts<=max_interactions].index)
    return dp.test_ratings[dp.test_ratings.userId.isin(cold_users)]

# 冷门物品
def subset_new_items(top_k_pop=0.8):
    pop = dp.train_ratings.itemId.value_counts(normalize=True).cumsum()
    long_tail_items = set(pop[pop>top_k_pop].index)
    return dp.test_ratings[dp.test_ratings.itemId.isin(long_tail_items)]

def evaluate_subset(test_subset, predict_fn, K=10):
    sums = np.zeros(5)
    for row in test_subset.itertuples():
        cand_items = get_eval_items(row.userId, row.itemId)
        scores = {it: predict_fn(row.userId, it) for it in cand_items}
        sums += np.array(rank_metrics(scores, row.itemId, K))
    return sums/len(test_subset) if len(test_subset)>0 else np.zeros(5)

cold_df={}
cold_subset = subset_users(max_interactions=3)
cold_df["ItemCF"] = evaluate_subset(cold_subset,itemcf_score)
cold_df["UserCF"] = evaluate_subset(cold_subset,usercf_score)
cold_df["NeuMF"]  = evaluate_subset(cold_subset,neumf_score)
print("\n== 冷启动(≤3交互) HR@10 ==",
      {k: round(v[0],4) for k,v in cold_df.items()})

# ----------------------------------------------------------------------------------
# 6) 可视化
plt.figure(figsize=(8,4))
df_base[['HR','NDCG','Precision','Recall']].plot(kind="bar")
plt.title("Three Models on HR/NDCG/Precision/Recall (K=10)")
plt.ylabel("score"); plt.xticks(rotation=0)
plt.tight_layout(); plt.show()
