import os
import torch
import numpy as np
import pandas as pd
from torch.utils.data import Dataset
import pdb
class TrendingWeibo_Dataset(Dataset):
    def __init__(self, args):
        # 初始化数据集
        self.nodes, self.nodes_feats = self.load_node_feats()
        self.num_nodes = len(np.unique(self.nodes))
        self.nodes_labels_times = self.load_node_labels()
        self.edges = self.load_transactions()
        self.edges_r = self.load_transactions_r()
        #pdb.set_trace()

        # 保底静态特征（全 0，维度为 0）
        #self.nodes_static_feats = torch.zeros((self.num_nodes, 0))
        self.nodes_static_feats,_  = self.load_node_static_feats()

        # 创建连续ID到原始ID的映射
        self.contID_to_origID = {idx: orig_id for idx, orig_id in enumerate(self.nodes)}

        # 特征维度信息（不报错）
        self.feats_per_node = self.nodes_feats[0].size(1) + self.nodes_static_feats.size(1)
        #self.feats_per_node = self.nodes_feats[0].size(1)

    def load_node_feats(self):
        data = pd.read_csv("./data/trendingweibo/new_table5.csv")
        #data = pd.read_csv("./data/trendingweibo/new_table6_edge_simi1.csv")
        #selected_columns1 = ['timestamp', 'topicID', 'platform','future1','future2','future3','future4','future5']
        selected_columns1 = ['timestamp', 'topicID', 'platform','future1','future3','future4','future5']
        #selected_columns1 = ['timestamp', 'topicID', 'platform','future1','future3','future4','future5']
        #selected_columns1 = ['timestamp', 'topicID', 'platform','future1','future3','future4','future5']
        #selected_columns1 = ['timestamp', 'topicID', 'platform','top_rank','max_span_up_rank','max_span_down_rank','rank_flucation_avg_span']
        #selected_columns1 = ['timestamp', 'topicID', 'platform','top_rank','hot_value','max_span_up_rank','max_span_down_rank','rank_flucation_avg_span']
        data = data[selected_columns1]
        data.iloc[:, 3:] = data.iloc[:, 3:].astype(float)
        data.iloc[:, 3:] = np.log(data.iloc[:, 3:] + 1e-5)
        for col in data.columns[3:]:
            data[col] = data.groupby('platform')[col].transform(
                lambda x: 1 * (x - x.min()) / (x.max() - x.min() + 0.00001)
            )

        original_ids = data.iloc[:, 1].values
        features = data.values
        keys = np.unique(features[:, 0])
        nodes = np.sort(np.unique(features[:, 1]))
        feature_dim = features.shape[1] - 3
        nodes_feats = {}
        #pdb.set_trace()

        for key in keys:
            key_features = features[features[:, 0] == key]
            key_nodes = key_features[:, 1]
            key_feats_dict = {node: key_features[key_nodes == node, 3:][0] for node in key_nodes}
            all_node_feats = np.array([
                key_feats_dict.get(node, np.zeros(feature_dim))
                for node in nodes
            ], dtype=np.float32)
            nodes_feats[key] = torch.tensor(all_node_feats)

        #pdb.set_trace()
        return original_ids, nodes_feats

    def load_node_labels(self):
        labels_df = pd.read_csv("./data/trendingweibo/new_table5.csv")
        labels_df['label'] = labels_df['label'].astype(float)
        original_stats = labels_df['label'].describe(percentiles=[.25, .5, .75])
        original_mean = labels_df['label'].mean()
        original_min = labels_df['label'].min()
        original_max = labels_df['label'].max()

        labels_df['label'] = np.log(labels_df['label'] + 1e-5)
        labels_df['label'] = labels_df.groupby('platform')['label'].transform(
            lambda x: 3 * (x - x.min()) / (x.max() - x.min())
        )
        normalized_stats = labels_df['label'].describe(percentiles=[.25, .5, .75])
        normalized_mean = labels_df['label'].mean()
        normalized_min = labels_df['label'].min()
        normalized_max = labels_df['label'].max()

        print("Original Statistics (Before Normalization):")
        print(f"Mean: {original_mean}")
        print(f"Min: {original_min}")
        print(f"Max: {original_max}")
        print(f"25th Percentile: {original_stats['25%']}")
        print(f"50th Percentile (Median): {original_stats['50%']}")
        print(f"75th Percentile: {original_stats['75%']}")

        print("\nNormalized Statistics (After Normalization):")
        print(f"Mean: {normalized_mean}")
        print(f"Min: {normalized_min}")
        print(f"Max: {normalized_max}")
        print(f"25th Percentile: {normalized_stats['25%']}")
        print(f"50th Percentile (Median): {normalized_stats['50%']}")
        print(f"75th Percentile: {normalized_stats['75%']}")

        selected_columns1 = ['topicID', 'label', 'timestamp']
        filtered_df1 = labels_df[selected_columns1]
        nodes_labels_times = torch.tensor(filtered_df1.values, dtype=torch.int64)
        return torch.tensor(nodes_labels_times)

    def load_node_static_feats(self):
        data = torch.load("./data/trendingweibo/embedding_qs_41c.pt")
        #data = torch.load("./data/trendingweibo/fineuned_embeddings.pt")
        embeddings = data.get("embeddings", None)
        topic_ids = data.get("topic_ids", None)
        if embeddings is None or topic_ids is None:
            raise ValueError("embedding3.pt 中必须包含 'embeddings' 和 'topic_ids' 字段")
        return embeddings, topic_ids

    def load_transactions_r(self):
        #data = pd.read_csv("./data/trendingweibo/new_table6_edge_simi2.csv")
        data = pd.read_csv("./data/trendingweibo/edges_final.csv")
        selected_columns1 = ['timestamp', 'topicID-A', 'topicID-B','Type','semantic_similarity','co_occurrence_count']
        # 分离阈值条件
        cosine_condition = 0.60  # 赋值给condition1
        jaccard_condition = 26  # 赋值给condition2

        result = {'idx': [], 'vals': []}
        #time_values = []

        # 处理cosine_similarity边
        filtered_cosine = data[(data['semantic_similarity'] >= cosine_condition)]
        edges_cosine = torch.tensor(filtered_cosine[['timestamp', 'topicID-A', 'topicID-B']].values, dtype=torch.int64)

        # 处理jaccard_similarity边`
        filtered_jaccard = data[(data['co_occurrence_count'] >= jaccard_condition)]
        edges_jaccard = torch.tensor(filtered_jaccard[['timestamp', 'topicID-A', 'topicID-B']].values, dtype=torch.int64)

        # 合并处理所有边
        for edges in [edges_cosine, edges_jaccard]:
            reversed_edges = edges[:, [1, 2, 0]]
            edges = edges[:, [2, 1, 0]]
            full_edges = torch.cat([edges, reversed_edges])
            result['idx'].append(full_edges)
        result['vals'] = [torch.ones(len(edge_pair)) for edge_pair in result['idx']]
        return result


    def load_transactions(self):
        data = pd.read_csv("./data/trendingweibo/new_table6_edge_simi2.csv")
        #pdb.set_trace()
        #data = pd.read_csv("./data/trendingweibo/new_table5_edge.csv", header=None, skiprows=1)
        selected_columns1 = ['timestamp', 'topicID-A', 'topicID-B','Type','cosine_similarity','jaccard_similarity']
        #data = data[selected_columns1]
        #pdb.set_trace()

        #filtered_data = data[data.Type == 1]
        
        #filtered_data = data[data.jaccard_similarity>= 0.60]
        filtered_data = data[(data.jaccard_similarity >= 0.60) & (data.Type == 1)]
        data = filtered_data.iloc[:, :3]
        edges = torch.tensor(data.values, dtype=torch.int64)
        #pdb.set_trace()

        reversed_edges = edges[:, [1, 2, 0]]
        edges = edges[:, [2, 1, 0]]
        full_edges = torch.cat([edges, reversed_edges])
        #full_edges = reversed_edges

        self.max_time = full_edges[:, 2].max().item()
        self.min_time = full_edges[:, 2].min().item()
        #pdb.set_trace()

        return {
            'idx': full_edges,
            'vals': torch.ones(full_edges.size(0))
        }

    def __len__(self):
        return self.num_nodes

    def __getitem__(self, idx):
        return {
            'features': self.nodes_feats[idx],
            'label': self.nodes_labels_times[idx, 1],
            'time': self.nodes_labels_times[idx, 2]
        }
