# ====================================================================================
# 文件: src/dataloader.py
# 描述: [V19 修复版] 修复 _validate_and_filter_edges 中的索引检查逻辑错误
# ====================================================================================

import os
from collections import defaultdict
from typing import Dict, List, Tuple

import numpy as np
import pandas as pd
import scipy.sparse as sp
import torch
from tqdm import tqdm

from utils import get_sparse_adj


class Loader(object):
    def __init__(self, config, path):
        self.config = config
        self.path = path
        self.device = torch.device(f"cuda:{config.gpu_id}" if torch.cuda.is_available() else "cpu")
        print(f"[Loader] 使用设备: {self.device}")
        self.n_users = config.data_config.n_users
        self.n_items = config.data_config.n_items
        print(f"[Loader] n_users={self.n_users}, n_items={self.n_items}")

        # --- 1. 加载 train.txt ---
        print("[Loader] 1/5: 加载 train.txt ...")
        self.train_file = os.path.join(path, config.data_config.train_file)
        train_pairs_list = []
        try:
            with open(self.train_file, 'r') as f:
                for line in tqdm(f, desc="  > 解析 Train 文件"):
                    parts = line.strip().split()
                    if len(parts) < 2: continue
                    try:
                        user_id = int(parts[0])
                        if not (0 <= user_id < self.n_users): continue
                        for item_str in parts[1:]:
                            if item_str.isdigit():
                                item_id = int(item_str)
                                if 0 <= item_id < self.n_items:
                                    train_pairs_list.append([user_id, item_id])
                    except ValueError:
                        continue

            if not train_pairs_list: raise ValueError("train.txt 解析后为空!")
            self.train_ui_pairs = torch.LongTensor(train_pairs_list)
            self.ui_adj = self._create_sp_adj(self.train_ui_pairs, (self.n_users, self.n_items))
            print(f"  > 成功从 train.txt 加载 {len(train_pairs_list)} 个 (u,i) 交互。")
        except Exception as e:
            print(f"[Loader] 错误: 加载或处理 train.txt 失败: {e}")
            raise

        # --- 2. 加载 test.txt ---
        print("[Loader] 2/5: 加载 test.txt ...")
        self.test_file = os.path.join(path, config.data_config.test_file)
        test_users_list = []
        test_items_list = []
        try:
            with open(self.test_file, 'r') as f:
                for line in tqdm(f, desc="  > 解析 Test 文件"):
                    parts = line.strip().split()
                    if not parts: continue
                    try:
                        user_id = int(parts[0])
                        if not (0 <= user_id < self.n_users): continue
                        item_ids_str = parts[1:]
                        items = []
                        for num_str in item_ids_str:
                            if num_str.isdigit():
                                num = int(num_str)
                                if 0 <= num < self.n_items: items.append(num)
                        test_users_list.append(user_id)
                        test_items_list.append(items)
                    except ValueError:
                        continue
            self.test_data = pd.DataFrame({'u': test_users_list, 'i_list': test_items_list})
            self.test_data = self.test_data[
                (self.test_data['u'] >= 0) & (self.test_data['u'] < self.n_users) &
                (self.test_data['i_list'].apply(len) > 0)
                ].astype({'u': int})
            print(f"  > 成功从 test.txt 加载 {len(self.test_data)} 个有效测试用户。")
        except Exception as e:
            print(f"[Loader] 严重警告: 解析 test.txt 失败: {e}. 测试集可能为空.")
            self.test_data = pd.DataFrame(columns=['u', 'i_list'])

        # --- 3. 加载 all_kg.txt ---
        print("[Loader] 3/5: 加载 all_kg.txt ...")
        self.kg_file = os.path.join(path, config.data_config.kg_file)
        kg_triples = []
        try:
            with open(self.kg_file, 'r') as f:
                for line in tqdm(f, desc="  > 解析 KG 文件"):
                    parts = line.strip().split()
                    if len(parts) == 3:
                        try:
                            h, r, t = map(int, parts)
                            if h >= 0 and r >= 0 and t >= 0: kg_triples.append([h, r, t])
                        except ValueError:
                            pass
        except FileNotFoundError:
            raise

        if not kg_triples:
            kg_data = pd.DataFrame(columns=['h', 'r', 't'])
            self.n_entities = self.n_items
            self.n_relations = 0
        else:
            kg_data = pd.DataFrame(kg_triples, columns=['h', 'r', 't'])
            self.n_entities = max(kg_data['h'].max(), kg_data['t'].max()) + 1
            self.n_relations = kg_data['r'].max() + 1
            print(f"  > 成功解析 {len(kg_triples)} 条 KG 三元组。n_entities={self.n_entities}")

        if self.n_items > self.n_entities:
            self.n_entities = self.n_items

        ik_pairs = torch.LongTensor(kg_data[['h', 't']].values) if not kg_data.empty else torch.LongTensor([])
        self.ik_adj = self._create_sp_adj(ik_pairs, (self.n_entities, self.n_entities))

        # --- 4. 加载 user_interest_clustered.txt ---
        print("[Loader] 4/5: 加载 user_interest_clustered.txt ...")
        self_enh_file = os.path.join(path, config.preproc_config.user_enhance.output_file)
        try:
            uk_data = pd.read_csv(self_enh_file, sep=' ', header=None, names=['u', 'k'], engine='python',
                                  on_bad_lines='skip')
            uk_data = uk_data.dropna(how='any').astype(int)
            uk_data = uk_data[(uk_data['u'] >= 0) & (uk_data['u'] < self.n_users) & (uk_data['k'] >= 0)]

            if not uk_data.empty:
                self.n_kg_entities = uk_data['k'].max() + 1
                if self.n_kg_entities > self.n_entities:
                    print(f"[Loader] 发现新的兴趣簇实体, n_entities 更新为: {self.n_kg_entities}")
                    self.n_entities = self.n_kg_entities
                uk_pairs = torch.LongTensor(uk_data.values)
            else:
                uk_pairs = torch.LongTensor([])
                self.n_kg_entities = self.n_entities
        except FileNotFoundError:
            print(f"[Loader] 警告: {self_enh_file} 未找到。")
            uk_pairs = torch.LongTensor([])
            self.n_kg_entities = self.n_entities

        self.uk_adj = self._create_sp_adj(uk_pairs, (self.n_users, self.n_entities))

        # --- 5. 加载 item_semantic_graph.pt ---
        print("[Loader] 5/5: 加载 item_semantic_graph.pt ...")
        self.ii_adj_file = os.path.join(path, config.preproc_config.item_enhance.output_graph_file)
        try:
            loaded_ii_graph = torch.load(self.ii_adj_file, map_location='cpu')
            if isinstance(loaded_ii_graph, (list, tuple)) and len(loaded_ii_graph) == 2:
                self.norm_ii_edge_index = loaded_ii_graph[0].long().to(self.device)
                self.norm_ii_edge_weight = loaded_ii_graph[1].float().to(self.device)
            elif isinstance(loaded_ii_graph, torch.Tensor):
                self.norm_ii_edge_index = loaded_ii_graph._indices().long().to(self.device)
                self.norm_ii_edge_weight = loaded_ii_graph._values().float().to(self.device)
        except FileNotFoundError:
            raise

        print("[Loader] --- 开始图归一化 ---")

        # (U,I)
        if self.ui_adj.nnz > 0:
            N, M = self.n_users, self.n_items
            A_hat = sp.bmat([[sp.csr_matrix((N, N)), self.ui_adj], [self.ui_adj.T, sp.csr_matrix((M, M))]],
                            format='csr')
            self.norm_ui_edge_index, self.norm_ui_edge_weight = get_sparse_adj(A_hat, device=self.device,
                                                                               gmae_norm=False)
        else:
            self.norm_ui_edge_index = torch.empty((2, 0), dtype=torch.long, device=self.device)
            self.norm_ui_edge_weight = torch.empty((0,), dtype=torch.float, device=self.device)

        # (I,K)
        if self.ik_adj.nnz > 0:
            if self.ik_adj.shape[0] < self.n_entities or self.ik_adj.shape[1] < self.n_entities:
                self.ik_adj = self._create_sp_adj(ik_pairs, (self.n_entities, self.n_entities))
            ik_adj_sym = self.ik_adj + self.ik_adj.T
            self.norm_ik_edge_index, self.norm_ik_edge_weight = get_sparse_adj(ik_adj_sym, device=self.device,
                                                                               gmae_norm=False)
        else:
            self.norm_ik_edge_index = torch.empty((2, 0), dtype=torch.long, device=self.device)
            self.norm_ik_edge_weight = torch.empty((0,), dtype=torch.float, device=self.device)

        # (U,K) GMAE
        if self.uk_adj.nnz > 0:
            self.norm_uk_edge_index, self.norm_uk_edge_weight = get_sparse_adj(self.uk_adj, device=self.device,
                                                                               gmae_norm=True)
        else:
            self.norm_uk_edge_index = torch.empty((2, 0), dtype=torch.long, device=self.device)
            self.norm_uk_edge_weight = torch.empty((0,), dtype=torch.float, device=self.device)

    def _create_sp_adj(self, pairs, shape):
        if pairs.numel() == 0: return sp.csr_matrix(shape)
        rows = pairs[:, 0].cpu().numpy()
        cols = pairs[:, 1].cpu().numpy()
        valid_mask = (rows >= 0) & (rows < shape[0]) & (cols >= 0) & (cols < shape[1])
        rows, cols = rows[valid_mask], cols[valid_mask]
        data = np.ones_like(rows, dtype=np.float32)
        adj = sp.coo_matrix((data, (rows, cols)), shape=shape)
        return adj.tocsr()

    def get_train_loader(self, batch_size):
        user_pos_items = {}
        for u, i in self.train_ui_pairs.cpu().numpy():
            u, i = int(u), int(i)
            if u not in user_pos_items: user_pos_items[u] = []
            user_pos_items[u].append(i)
        valid_users = [u for u in user_pos_items if user_pos_items[u]]
        return torch.utils.data.DataLoader(self.BPRSampler(valid_users, self.n_items, user_pos_items),
                                           batch_size=batch_size, shuffle=True, num_workers=4, pin_memory=True)

    class BPRSampler(torch.utils.data.Dataset):
        def __init__(self, users, n_items, user_pos_items):
            self.users = users
            self.n_items = n_items
            self.user_pos_items = user_pos_items

        def __len__(self): return len(self.users)

        def __getitem__(self, idx):
            u = self.users[idx]
            pos_items = self.user_pos_items[u]
            pos_i = np.random.choice(pos_items)
            neg_j = np.random.randint(0, self.n_items)
            while neg_j in pos_items: neg_j = np.random.randint(0, self.n_items)
            return u, pos_i, neg_j

    def get_all_graphs_and_features(self):
        n_clusters = self.config.preproc_config.user_enhance.cluster_num
        if self.uk_adj.nnz > 0:
            uk_adj_dense = self.uk_adj.toarray()
            if uk_adj_dense.shape[1] >= self.n_entities:
                start = self.n_entities - n_clusters
                if start >= 0:
                    user_interest_features = uk_adj_dense[:, start:self.n_entities]
                    if user_interest_features.shape[1] < n_clusters:
                        padding = np.zeros((self.n_users, n_clusters - user_interest_features.shape[1]),
                                           dtype=np.float32)
                        user_interest_features = np.hstack((user_interest_features, padding))
                else:
                    user_interest_features = np.zeros((self.n_users, n_clusters), dtype=np.float32)
            else:
                user_interest_features = np.zeros((self.n_users, n_clusters), dtype=np.float32)
        else:
            user_interest_features = np.zeros((self.n_users, n_clusters), dtype=np.float32)

        n_ui = self.n_users + self.n_items
        norm_ui_idx, norm_ui_wt = self._validate_and_filter_edges(self.norm_ui_edge_index, self.norm_ui_edge_weight,
                                                                  n_ui, n_ui, "(U,I)")
        norm_ik_idx, norm_ik_wt = self._validate_and_filter_edges(self.norm_ik_edge_index, self.norm_ik_edge_weight,
                                                                  self.n_entities, self.n_entities, "(I,K)")
        norm_ii_idx, norm_ii_wt = self._validate_and_filter_edges(self.norm_ii_edge_index, self.norm_ii_edge_weight,
                                                                  self.n_items, self.n_items, "(I,I)")

        # (U,K)
        norm_uk_idx, norm_uk_wt = self._validate_and_filter_edges(self.norm_uk_edge_index, self.norm_uk_edge_weight,
                                                                  self.n_users, self.n_entities, "(U,K)-GMAE")

        # (K,U)
        if self.norm_uk_edge_index.numel() > 0:
            ku_idx = torch.stack([self.norm_uk_edge_index[1], self.norm_uk_edge_index[0]], dim=0)
            ku_wt = self.norm_uk_edge_weight
        else:
            ku_idx = torch.empty((2, 0), dtype=torch.long, device=self.device)
            ku_wt = torch.empty((0,), dtype=torch.float, device=self.device)

        norm_ku_idx, norm_ku_wt = self._validate_and_filter_edges(ku_idx, ku_wt, self.n_entities, self.n_users,
                                                                  "(K,U)-GMAE")

        return {
            'n_users': self.n_users, 'n_items': self.n_items, 'n_entities': self.n_entities,
            'n_relations': self.n_relations,
            'norm_ui_edge_index': norm_ui_idx, 'norm_ui_edge_weight': norm_ui_wt,
            'norm_ik_edge_index': norm_ik_idx, 'norm_ik_edge_weight': norm_ik_wt,
            'norm_ii_edge_index': norm_ii_idx, 'norm_ii_edge_weight': norm_ii_wt,
            'norm_uk_edge_index': norm_uk_idx, 'norm_uk_edge_weight': norm_uk_wt,
            'norm_ku_edge_index': norm_ku_idx, 'norm_ku_edge_weight': norm_ku_wt,
            'user_interest_features': torch.tensor(user_interest_features).float().to(self.device)
        }

    def _validate_and_filter_edges(self, edge_index, edge_weight, num_src_nodes, num_target_nodes, graph_name):
        if edge_index.numel() == 0: return edge_index, edge_weight

        # [Fix V19] Correct Index Logic:
        # edge_index[0] is Source, edge_index[1] is Target (PyG convention)
        # Previous code flipped this check, causing "Out of Bounds" for valid edges.

        row, col = edge_index  # row=Src, col=Tgt

        mask = (
                (row >= 0) & (row < num_src_nodes) &
                (col >= 0) & (col < num_target_nodes)
        )

        if not torch.all(mask):
            original = edge_index.shape[1]
            edge_index = edge_index[:, mask]
            edge_weight = edge_weight[mask]
            print(f"[Loader._validate] 警告: {graph_name} 图过滤掉 {original - edge_index.shape[1]} 条越界边。"
                  f"规则: Src < {num_src_nodes}, Tgt < {num_target_nodes}."
                  f"实际Max: Src={row.max().item()}, Tgt={col.max().item()}")
        return edge_index, edge_weight

    def get_test_loader(self, batch_size):
        test_map = {}
        if not self.test_data.empty:
            for _, row in self.test_data.iterrows():
                if isinstance(row['i_list'], list): test_map[int(row['u'])] = row['i_list']

        train_map = defaultdict(set)
        for u, i in self.train_ui_pairs.cpu().numpy(): train_map[int(u)].add(int(i))

        valid_users = list(test_map.keys())
        dataset = self.TestSampler(valid_users, test_map, train_map)

        def collate(batch):
            users = torch.LongTensor([b[0] for b in batch])
            test_lists = [b[1] for b in batch]
            train_lists = [b[2] for b in batch]
            return users, test_lists, train_lists

        return torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=4,
                                           pin_memory=True, collate_fn=collate)

    class TestSampler(torch.utils.data.Dataset):
        def __init__(self, users, test_map, train_map):
            self.users = users
            self.test_map = test_map
            self.train_map = train_map

        def __len__(self): return len(self.users)

        def __getitem__(self, idx):
            u = self.users[idx]
            return u, self.test_map.get(u, []), list(self.train_map.get(u, set()))


class SDKRData:
    def __init__(self, config, loader: Loader):
        graphs = loader.get_all_graphs_and_features()
        self.n_users = graphs['n_users']
        self.n_items = graphs['n_items']
        self.n_entities = graphs['n_entities']
        self.device = loader.device
        self.graph_ui_edge_index = graphs['norm_ui_edge_index']
        self.graph_ui_edge_weight = graphs['norm_ui_edge_weight']
        self.graph_ik_edge_index = graphs['norm_ik_edge_index']
        self.graph_ik_edge_weight = graphs['norm_ik_edge_weight']
        self.graph_ii_edge_index = graphs['norm_ii_edge_index']
        self.graph_ii_edge_weight = graphs['norm_ii_edge_weight']
        self.graph_uk_edge_index = graphs['norm_uk_edge_index']
        self.graph_uk_edge_weight = graphs['norm_uk_edge_weight']
        self.graph_ku_edge_index = graphs['norm_ku_edge_index']
        self.graph_ku_edge_weight = graphs['norm_ku_edge_weight']
        self.user_interest_features = graphs['user_interest_features'].to(self.device)
        self.loader = loader

    def get_train_loader(self, batch_size): return self.loader.get_train_loader(batch_size)

    def get_test_loader(self, batch_size): return self.loader.get_test_loader(batch_size)


def load_data(config) -> SDKRData:
    data_dir = os.path.join(config.data_config.path, config.dataset) if os.path.isabs(
        config.data_config.path) else os.path.join(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")),
                                                   config.data_config.path, config.dataset)
    return SDKRData(config, Loader(config, data_dir))