# data_loader/loader.py
import os
import random
import collections
import torch
import numpy as np
import pandas as pd
import scipy.sparse as sp
from torch.utils.data import DataLoader as PyTorchDataLoader
from .dataset import CFDataset  # 导入我们新创建的Dataset


class DataLoader:
    """
    数据加载与预处理核心类。
    【优化版】:
    - 移除批次生成器，改为使用标准的PyTorch Dataset和DataLoader。
    - 实现了多进程数据加载，解决CPU瓶颈。
    """

    def __init__(self, args, logging):
        self.args = args
        self.logger = logging
        self.data_dir = os.path.join(args.data_dir, args.data_name)

        # --- 1. 定义文件路径 ---
        self.train_file = os.path.join(self.data_dir, 'train.txt')
        self.test_file = os.path.join(self.data_dir, 'test.txt')
        self.kg_file = os.path.join(self.data_dir, 'kg_final.txt')
        self.cs_kg_file = os.path.join(self.data_dir, 'llm_enhanced_kg.txt')

        # --- 2. 加载所有数据源 ---
        self.logger.info("开始加载用户-物品交互数据...")
        self.cf_train_data, self.train_user_dict = self._load_cf_from_txt(self.train_file)
        _, self.test_user_dict = self._load_cf_from_txt(self.test_file)
        self._statistic_cf()

        self.logger.info("开始加载知识图谱...")
        kg_data = self._load_kg(self.kg_file)
        cs_kg_data = self._load_cs_kg(self.cs_kg_file)

        # --- 3. 构建图结构 ---
        self._construct_graph(kg_data, cs_kg_data)

        # --- 4. 【核心优化】创建PyTorch DataLoader ---
        self.train_dataset = CFDataset(
            users=torch.LongTensor(self.cf_train_data[0]),
            pos_items=torch.LongTensor(self.cf_train_data[1])
        )
        # num_workers > 0 会启用多进程加载
        # pin_memory=True 可以加速数据从CPU到GPU的传输
        self.train_loader = PyTorchDataLoader(
            self.train_dataset,
            batch_size=args.cf_batch_size,
            shuffle=True,
            num_workers=10,  # 可根据CPU核心数调整，例如设置为8
            pin_memory=True
        )

        # --- 5. 打印最终统计信息 ---
        self._print_info()

    def _load_cf_from_txt(self, filename):
        user_dict = collections.defaultdict(list)
        u_list, i_list = [], []
        try:
            with open(filename, 'r') as f:
                for line in f:
                    parts = line.strip().split()
                    if len(parts) < 2: continue
                    user_id = int(parts[0])
                    item_ids = [int(i) for i in parts[1:]]
                    user_dict[user_id].extend(item_ids)
                    u_list.extend([user_id] * len(item_ids))
                    i_list.extend(item_ids)
        except FileNotFoundError:
            self.logger.error(f"交互文件未找到: {filename}")
            raise
        return (np.array(u_list), np.array(i_list)), user_dict

    def _statistic_cf(self):
        # 统计用户和物品的数量
        all_users = list(self.train_user_dict.keys()) + list(self.test_user_dict.keys())
        all_items = []
        for items in self.train_user_dict.values(): all_items.extend(items)
        for items in self.test_user_dict.values(): all_items.extend(items)

        self.n_users = max(all_users) + 1
        self.n_items = max(all_items) + 1
        self.n_cf_train = len(self.cf_train_data[0])
        self.n_cf_test = sum(len(v) for v in self.test_user_dict.values())

    def _load_kg(self, filename):
        try:
            kg_data = pd.read_csv(filename, sep=' ', names=['h', 'r', 't'], engine='python')
            return kg_data.drop_duplicates()
        except (FileNotFoundError, pd.errors.EmptyDataError):
            self.logger.warning(f"领域KG文件未找到或为空: {filename}。将不使用领域KG。")
            return pd.DataFrame(columns=['h', 'r', 't'])

    def _load_cs_kg(self, filename):
        try:
            cs_kg_data = pd.read_csv(filename, sep='\t', engine='python')
            if 'confidence' not in cs_kg_data.columns:
                self.logger.warning(f"常识KG文件 {filename} 中未找到 'confidence' 列。所有边的置信度将设为默认值1.0。")
                cs_kg_data['confidence'] = 1.0
            return cs_kg_data
        except (FileNotFoundError, pd.errors.EmptyDataError):
            self.logger.warning(f"常识KG文件未找到或为空: {filename}。将不使用LLM增强的常识KG。")
            return pd.DataFrame(columns=['h', 'r', 't', 'confidence'])

    def _construct_graph(self, kg_data, cs_kg_data):
        self.logger.info("构建异构图中...")
        max_h = max(kg_data['h'].max() if not kg_data.empty else 0,
                    cs_kg_data['h'].max() if not cs_kg_data.empty else 0)
        max_t = max(kg_data['t'].max() if not kg_data.empty else 0,
                    cs_kg_data['t'].max() if not cs_kg_data.empty else 0)
        self.n_entities = int(max(max_h, max_t, self.n_items - 1)) + 1
        self.n_nodes = self.n_users + self.n_entities

        adj_mat_list = []
        self.logger.info("添加用户-物品交互关系...")
        rows = self.cf_train_data[0] + self.n_entities
        cols = self.cf_train_data[1]
        vals = np.ones_like(rows, dtype=np.float32)
        adj_mat_list.append(sp.coo_matrix((vals, (rows, cols)), shape=(self.n_nodes, self.n_nodes)))
        adj_mat_list.append(sp.coo_matrix((vals, (cols, rows)), shape=(self.n_nodes, self.n_nodes)))

        if not kg_data.empty:
            self.logger.info("添加领域知识图谱关系...")
            vals = np.ones(len(kg_data), dtype=np.float32)
            adj_mat_list.append(sp.coo_matrix((vals, (kg_data['h'], kg_data['t'])), shape=(self.n_nodes, self.n_nodes)))
            adj_mat_list.append(sp.coo_matrix((vals, (kg_data['t'], kg_data['h'])), shape=(self.n_nodes, self.n_nodes)))

        if not cs_kg_data.empty:
            self.logger.info("添加LLM生成的常识知识图谱关系 (带置信度权重)...")
            vals = cs_kg_data['confidence'].astype(np.float32).values
            adj_mat_list.append(
                sp.coo_matrix((vals, (cs_kg_data['h'], cs_kg_data['t'])), shape=(self.n_nodes, self.n_nodes)))
            adj_mat_list.append(
                sp.coo_matrix((vals, (cs_kg_data['t'], cs_kg_data['h'])), shape=(self.n_nodes, self.n_nodes)))

        self.logger.info("聚合所有图并进行归一化...")
        if not adj_mat_list:
            self.logger.error("没有任何图数据可供构建，程序将终止。")
            raise ValueError("No graph data available to build adjacency matrix.")

        final_adj = sum(adj_mat_list)
        rowsum = np.array(final_adj.sum(axis=1)).flatten()
        d_inv = np.power(rowsum, -1.0)
        d_inv[np.isinf(d_inv)] = 0.
        d_mat_inv = sp.diags(d_inv)
        norm_adj = d_mat_inv.dot(final_adj).tocoo()

        indices = torch.from_numpy(np.vstack((norm_adj.row, norm_adj.col))).long()
        values = torch.from_numpy(norm_adj.data).float()
        self.A_in = torch.sparse.FloatTensor(indices, values, torch.Size(norm_adj.shape))

    def _print_info(self):
        """打印数据集和图结构的最终统计信息。"""
        self.logger.info("=" * 30)
        self.logger.info("    CSE-KGAN 数据统计信息")
        self.logger.info("=" * 30)
        self.logger.info(f"  用户数量 (n_users): {self.n_users}")
        self.logger.info(f"  物品数量 (n_items): {self.n_items}")
        self.logger.info(f"  实体数量 (n_entities): {self.n_entities}")
        self.logger.info(f"  图总节点数 (n_nodes): {self.n_nodes}")
        self.logger.info(f"  训练集交互数 (n_cf_train): {self.n_cf_train}")
        self.logger.info(f"  测试集交互数 (n_cf_test): {self.n_cf_test}")
        if hasattr(self, 'A_in') and self.A_in is not None:
            self.logger.info(f"  归一化邻接矩阵 (A_in) 形状: {self.A_in.shape}")
            self.logger.info(f"  图中总边数 (nnz): {self.A_in._nnz()}")
        self.logger.info("=" * 30)
