import utils as u
import os
import torch
#erase
import time
import tarfile
import itertools
import numpy as np
import pdb
import pandas as pd

import pandas as pd
import torch
from torch.utils.data import Dataset

from sklearn.preprocessing import MinMaxScaler

class TrendingWeibo_Dataset(Dataset):
    def __init__(self, args):
        # 初始化数据集
        self.nodes, self.nodes_feats = self.load_node_feats()
        self.num_nodes = len(np.unique(self.nodes))
        #self.num_nodes = 6000
        self.nodes_labels_times = self.load_node_labels()
        self.nodes_static_feats = self.load_node_static_feats()
        self.edges = self.load_transactions()
        ## pdb.set_trace()

        # 创建连续ID到原始ID的映射 (核心修复)
        self.contID_to_origID = {idx: orig_id for idx, orig_id in enumerate(self.nodes)}

        # 数据集元信息
        ## pdb.set_trace()
        self.feats_per_node = self.nodes_feats[0].size(1) + self.nodes_static_feats.size(1)
        #self.max_time = self.edges['idx'][:, 2].max().item()
        #self.min_time = self.edges['idx'][:, 2].min().item()

    def load_node_static_feats(self):
        """加载节点特征"""
        #nodes = self.nodes
        #data = torch.load("./data/trendingweibo/embedding.pt")
        data = torch.load("./data/trendingweibo/embedding2.pt")
        embeddings = data.get("embeddings", None)
        ## pdb.set_trace()
        return embeddings

    def load_node_feats(self):
        """加载节点特征"""
        # 读取原始数据（假设第一列是原始节点ID）
        #data = pd.read_csv("./data/trendingweibo/elliptic_txs_features.csv", header=None)
        #data = pd.read_csv("./data/trendingweibo/modified_trending_value8.csv", header=None, skiprows=1)
        #data = pd.read_csv("./data/trendingweibo/modified_trending_value8_withname.csv")
        data = pd.read_csv("./data/trendingweibo/new_table5.csv")
        #selected_columns1 = ['timestamp', 'topicID','future1','future3','future4','future5']
        selected_columns1 = ['timestamp', 'topicID', 'platform','future1','future2','future3','future4','future5']
        data = data[selected_columns1]
        ## pdb.set_trace()
        data.iloc[:, 3:] = data.iloc[:, 3:].astype(float)
        data.iloc[:, 3:] = np.log(data.iloc[:, 3:] + 1e-5)
        for col in data.columns[3:]:
            data[col] = data.groupby('platform')[col].transform(
                lambda x: (x - x.min()) / (x.max() - x.min() + 0.00001)
        )
        #scaler = MinMaxScaler()
        #data.iloc[:, 2:] = scaler.fit_transform(data.iloc[:, 2:])
        ## pdb.set_trace()

        # 分离原始ID和特征
        original_ids = data.iloc[:, 1].values  # 原始ID列
        features = data.values
        keys = np.unique(features[:, 0])  # 获取第一列的唯一值作为 key
        #nodes = np.unique(features[:, 1])
        nodes = np.sort(np.unique(features[:, 1]))
        feature_dim = features.shape[1] - 3  # 计算特征维度 (去掉时间步和节点列)
        #features = data.iloc[:, 2:].values  # 特征列
        #nodes_feats = {
        #    key: torch.tensor(features[features[:, 0] == key, 2:], dtype=torch.float32)
        #    for key in keys
        #}
        nodes_feats = {}

        for key in keys:
            # 获取当前时间步 `key` 对应的所有节点及其特征
            key_features = features[features[:, 0] == key]
            key_nodes = key_features[:, 1]  # 当前时间步包含的节点
            key_feats_dict = {node: key_features[key_nodes == node, 3:][0] for node in key_nodes}

            # 遍历所有 `nodes`，如果某个 `node` 不在当前时间步，填充 0
            all_node_feats = np.array([
                key_feats_dict.get(node, np.zeros(feature_dim))  # 确保特征维度一致
                for node in nodes
            ], dtype=np.float32)

            nodes_feats[key] = torch.tensor(all_node_feats)

        #keys = features[:, 0]  # 第一列作为 key
        #values = torch.tensor(features[:, 1:], dtype=torch.float32)  # 其余列转换为 Tensor 作为
        ## pdb.set_trace()
        #nodes_feats = {key: value for key, value in zip(keys, values)}
        # 转换为Tensor
        #nodes_feats = torch.tensor(features, dtype=torch.float32)

        return original_ids, nodes_feats

    def load_node_labels(self):
        #labels_df = pd.read_csv("./data/trendingweibo/modified_trending_value8_withname.csv")
        labels_df = pd.read_csv("./data/trendingweibo/new_table5.csv")
        labels_df['label'] = labels_df['label'].astype(float)
        labels_df['label'] = np.log(labels_df['label'] + 1e-5)
        labels_df['label'] = labels_df.groupby('platform')['label'].transform(
            lambda x: 10 * (x - x.min()) / (x.max() - x.min())
        )
        selected_columns1 = ['topicID', 'label', 'timestamp']
        filtered_df1 = labels_df[selected_columns1]
        nodes_labels_times = torch.tensor(filtered_df1.values, dtype=torch.int64)

        ## pdb.set_trace()
        return torch.tensor(nodes_labels_times)

    def load_node_labels2(self):
        """加载节点标签和时间"""
        # 读取标签数据
        #labels_df = pd.read_csv("./data/trendingweibo/elliptic_txs_classes.csv",
        #                        header=None, skiprows=1)
        #times_df = pd.read_csv("./data/trendingweibo/elliptic_txs_nodetime.csv",
        #                       header=None, skiprows=1)
        #labels_df = pd.read_csv("./data/trendingweibo/trending_value_class.csv",
        #                        header=None, skiprows=1)
        labels_df = pd.read_csv("./data/trendingweibo/modified_trending_value8.csv")
        #times_df = pd.read_csv("./data/trendingweibo/modified_trending_node4.csv",header=None, skiprows=1)
        #selected_columns = ['timestamp', 'topicID', 'future1']
        #selected_columns = ['topicID', 'future1', 'timestamp']
        selected_columns1 = ['topicID', 'future2']
        selected_columns2 = ['topicID', 'timestamp']
        filtered_df1 = labels_df[selected_columns1]
        filtered_df1.iloc[:, 1] = np.random.randint(1, 2, size=len(filtered_df1))
        filtered_df2 = labels_df[selected_columns2]

        # 转换为Tensor
        ## pdb.set_trace()
        labels = torch.tensor(filtered_df1.values, dtype=torch.int64)
        times = torch.tensor(filtered_df2.values, dtype=torch.int64)
        #nodes_labels_times = torch.tensor(filtered_df.values, dtype=torch.int64)

        # 定义列索引
        lcols = {'nid': 0, 'label': 1}
        tcols = {'nid': 0, 'time': 1}

        # 构建标签-时间矩阵
        nodes_labels_times = []
        for i in range(len(labels)):
            label = labels[i, lcols['label']].long()
            if label >= 0:  # 过滤无效标签
                nid = labels[i, lcols['nid']].long()
                time = times[nid, tcols['time']].long()
                nodes_labels_times.append([nid, label, time])

        nodes_labels_times = torch.tensor(nodes_labels_times)
        ## pdb.set_trace()
        return torch.tensor(nodes_labels_times)

    def load_transactions(self):
        """加载交易关系（边数据）"""
        # 读取边数据
        #data = pd.read_csv("./data/trendingweibo/trending_edges8.csv",header=None, skiprows=1)
        data = pd.read_csv("./data/trendingweibo/new_table5_edge.csv",header=None, skiprows=1)
        ## pdb.set_trace()
        filtered_data = data[data[3] == 1]
        data = filtered_data.iloc[:, :3]

        # 转换为Tensor
        edges = torch.tensor(data.values, dtype=torch.int64)

        # 添加反向边（用于无向图）
        ## pdb.set_trace()
        #reversed_edges = edges[:, [1, 0, 2]]  # 交换source和target
        reversed_edges = edges[:, [1, 2, 0]]  # 交换source和target
        edges = edges[:, [2, 1, 0]]  # 交换source和target
        full_edges = torch.cat([edges, reversed_edges])


        ## pdb.set_trace()
        self.max_time = full_edges[:,2].max().item()
        self.min_time = full_edges[:,2].min().item()

        # 添加自环边（根据需求可选）
        # self_edges = torch.stack([
        #     torch.arange(self.num_nodes),
        #     torch.arange(self.num_nodes),
        #     torch.zeros(self.num_nodes)
        # ], dim=1)
        # full_edges = torch.cat([full_edges, self_edges])

        return {
            'idx': full_edges,
            'vals': torch.ones(full_edges.size(0))
        }

    def __len__(self):
        return self.num_nodes

    def __getitem__(self, idx):
        return {
            'features': self.nodes_feats[idx],
            'label': self.nodes_labels_times[idx, 1],
            'time': self.nodes_labels_times[idx, 2]
        }
