ZhuofengLi commited on
Commit
f765459
1 Parent(s): 1361cac

Upload folder using huggingface_hub

Browse files
Reddit/processed/reddit_graph.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8aed3368b44889670caa82468ea6c78944c6eede3a03891c6b70b56d137db70f
3
+ size 134
Reddit/raw/68841_tweets_multiclasses_filtered_0722_part1.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdc595c36f74073feeb9dea9af01a467dd64743ceec15442085d8c3f2f187339
3
+ size 20623408
Reddit/raw/Reddit-processing.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pandas as pd
3
+ import math
4
+ import pickle as pkl
5
+ import torch
6
+ from torch_geometric.data import Data
7
+
8
+ # Get the directory of the current script
9
+ script_dir = os.path.dirname(os.path.abspath(__file__))
10
+ base_dir = os.path.dirname(script_dir)
11
+ raw_dir = os.path.join(base_dir, 'processed/original')
12
+
13
+ # Define the file path
14
+ reddit_path = os.path.join(raw_dir, 'reddit_1m.csv')
15
+
16
+ # Read the Reddit data
17
+ df = pd.read_csv(reddit_path)
18
+ print(df.shape)
19
+
20
+ # Select required columns
21
+ df_graph = df[['subreddit_id', 'subreddit', 'name', 'body', 'score', 'author', 'author_flair_text', 'distinguished']]
22
+ df_graph.rename(columns={'name': 'post_id',
23
+ 'body': 'post',
24
+ 'author': 'user',
25
+ 'author_flair_text': 'user_flair'},
26
+ inplace=True, errors='raise')
27
+
28
+ # Drop duplicates, deleted posts, and rows with NaN post_id
29
+ df_graph = df_graph.drop_duplicates()
30
+ df_graph = df_graph[df_graph['post'] != '[deleted]']
31
+ df_graph = df_graph.dropna(subset=['post_id'])
32
+ print(df_graph.shape)
33
+ print(df_graph['post_id'].nunique())
34
+
35
+ # Encode distinguished and user_flair
36
+ df_graph['distinguished'] = df_graph['distinguished'].apply(lambda x: 0 if pd.isna(x) else 1)
37
+ df_graph['user_flair'] = df_graph['user_flair'].apply(lambda x: "" if pd.isna(x) else x)
38
+
39
+ text_nodes = []
40
+
41
+ # Create sub_id2idx
42
+ sub_id2idx = {}
43
+ sub_nodes = []
44
+ for _, row in df_graph.iterrows():
45
+ sub_id = row['subreddit_id']
46
+ if sub_id not in sub_nodes:
47
+ sub_id2idx[sub_id] = len(sub_nodes)
48
+ sub_nodes.append(sub_id)
49
+ text_nodes.append(row['subreddit'])
50
+ node_labels = [-1] * len(sub_nodes) # No labels
51
+
52
+ print("Length of sub nodes:", len(sub_nodes))
53
+ print("Sample sub node labels:", node_labels[:5])
54
+ print("Sample sub node texts:", text_nodes[:5])
55
+
56
+ # Create user_n2idx
57
+ user_n2idx = {} # Username to index mapping
58
+ user_nodes = []
59
+ for _, row in df_graph.iterrows():
60
+ user_n = row['user']
61
+ if user_n in user_nodes: # Existing user: add new flair and update label
62
+ if row['user_flair'] not in text_nodes[user_n2idx[user_n]]:
63
+ text_nodes[user_n2idx[user_n]] += "\n" + row['user_flair']
64
+ node_labels[user_n2idx[user_n]] = max(row['distinguished'], node_labels[user_n2idx[user_n]])
65
+ else: # New user: add the user to user_n2idx
66
+ user_n2idx[user_n] = len(user_nodes) + len(sub_nodes)
67
+ user_nodes.append(user_n)
68
+ text_nodes.append(row['user_flair'])
69
+ node_labels.append(row['distinguished'])
70
+
71
+ print("Length of user nodes:", len(user_nodes))
72
+ print("Sample user node labels:", node_labels[-10:])
73
+ print("Sample user node texts:", text_nodes[-10:])
74
+
75
+ # Record edge information
76
+ edge_index = [[], []]
77
+ text_edges = []
78
+ edge_scr_labels = [] # Continuous score
79
+ edge_spe_labels = [] # Binary special label
80
+ all_edges = set()
81
+
82
+ for _, row in df_graph.iterrows():
83
+ user_idx = user_n2idx[row['user']]
84
+ sub_idx = sub_id2idx[row['subreddit_id']]
85
+
86
+ if (user_idx, sub_idx) not in all_edges: # Only keep one edge between two nodes
87
+ edge_index[0].append(user_idx)
88
+ edge_index[1].append(sub_idx)
89
+
90
+ text_edges.append(row['post'])
91
+ edge_scr_labels.append(row['score'])
92
+ edge_spe_labels.append(row['distinguished'])
93
+
94
+ all_edges.add((user_idx, sub_idx))
95
+
96
+ print("Length of edges:", len(edge_index[0]))
97
+ print("Sample edge score labels:", edge_scr_labels[-10:])
98
+ print("Sample edge special labels:", edge_spe_labels[-10:])
99
+ print("Sample edge texts:", text_edges[-10:])
100
+
101
+ edge_scr_labels = [0 if math.isnan(x) else x for x in edge_scr_labels]
102
+ edge_spe_labels = [0 if math.isnan(x) else x for x in edge_spe_labels]
103
+
104
+ # Save as torch data
105
+ graph = Data(
106
+ text_nodes=text_nodes,
107
+ text_edges=text_edges,
108
+ node_labels=torch.tensor(node_labels, dtype=torch.long),
109
+ edge_index=torch.tensor(edge_index, dtype=torch.long),
110
+ edge_score_labels=torch.tensor(edge_scr_labels, dtype=torch.long),
111
+ edge_special_labels=torch.tensor(edge_spe_labels, dtype=torch.long),
112
+ )
113
+
114
+ output_file = os.path.join(base_dir, 'output/reddit_graph.pkl')
115
+ with open(output_file, 'wb') as file:
116
+ pkl.dump(graph, file)
117
+
118
+ print(f"Data processing complete. Processed data saved to: {output_file}")
Reddit/raw/download_data.sh ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Get the directory of the current script
4
+ SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
5
+ BASE_DIR="$(dirname "$SCRIPT_DIR")"
6
+ RAW_DIR="$BASE_DIR/raw"
7
+
8
+ # Create the raw directory
9
+ mkdir -p "$RAW_DIR"
10
+
11
+ # Define URLs of the files to be downloaded
12
+ urls=(
13
+ "https://github.com/YuweiCao-UIC/KPGNN/raw/main/datasets/Twitter/68841_tweets_multiclasses_filtered_0722_part1.npy"
14
+ )
15
+
16
+ # Download each file to the raw directory
17
+ for url in "${urls[@]}"; do
18
+ wget -P "$RAW_DIR" "$url"
19
+ done
20
+
21
+ echo "Download complete."
22
+
23
+
24
+