Upload 2 files
Browse files
Twitter/processed/Twitter-processing.py
ADDED
|
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
import numpy as np
|
| 3 |
+
import pickle as pkl
|
| 4 |
+
import networkx as nx
|
| 5 |
+
|
| 6 |
+
from torch_geometric.data import Data
|
| 7 |
+
import torch
|
| 8 |
+
|
| 9 |
+
# Define file paths
|
| 10 |
+
p_part1 = 'processed/original/68841_tweets_multiclasses_filtered_0722_part1.npy'
|
| 11 |
+
p_part2 = 'processed/original/68841_tweets_multiclasses_filtered_0722_part2.npy'
|
| 12 |
+
|
| 13 |
+
# Load the numpy data
|
| 14 |
+
df_np_part1 = np.load(p_part1, allow_pickle=True)
|
| 15 |
+
df_np_part2 = np.load(p_part2, allow_pickle=True)
|
| 16 |
+
df_np = np.concatenate((df_np_part1, df_np_part2), axis=0)
|
| 17 |
+
|
| 18 |
+
# Convert numpy data to pandas DataFrame
|
| 19 |
+
df = pd.DataFrame(data=df_np, columns=["event_id", "tweet_id", "text", "user_id", "created_at", "user_loc",
|
| 20 |
+
"place_type", "place_full_name", "place_country_code", "hashtags", "user_mentions", "image_urls", "entities",
|
| 21 |
+
"words", "filtered_words", "sampled_words"])
|
| 22 |
+
print("Data converted to dataframe.")
|
| 23 |
+
|
| 24 |
+
# Select relevant columns
|
| 25 |
+
df_graph_4 = df[['tweet_id', 'text', 'user_id', 'user_mentions', 'event_id']]
|
| 26 |
+
|
| 27 |
+
def create_graph_4(df_graph):
|
| 28 |
+
# Create an empty graph
|
| 29 |
+
G = nx.Graph()
|
| 30 |
+
text_nodes = []
|
| 31 |
+
edge_index = [[], []]
|
| 32 |
+
text_edges = []
|
| 33 |
+
node_labels = []
|
| 34 |
+
edge_labels = []
|
| 35 |
+
|
| 36 |
+
# Create all user nodes and user_id2idx
|
| 37 |
+
user_id2idx = {}
|
| 38 |
+
user_nodes = []
|
| 39 |
+
for _, row in df_graph.iterrows():
|
| 40 |
+
mentions_list = row['user_mentions']
|
| 41 |
+
user_id = row["user_id"]
|
| 42 |
+
# Only include user nodes where users mention each other
|
| 43 |
+
if len(mentions_list) > 0:
|
| 44 |
+
if user_id not in user_id2idx:
|
| 45 |
+
user_id2idx[user_id] = len(user_nodes)
|
| 46 |
+
user_nodes.append({user_id: user_id2idx[user_id]})
|
| 47 |
+
G.add_node(user_id2idx[user_id])
|
| 48 |
+
for mention in mentions_list:
|
| 49 |
+
if mention not in user_id2idx:
|
| 50 |
+
user_id2idx[mention] = len(user_nodes)
|
| 51 |
+
user_nodes.append({mention: user_id2idx[mention]})
|
| 52 |
+
G.add_node(user_id2idx[mention])
|
| 53 |
+
|
| 54 |
+
text_nodes = ["user"] * len(user_nodes) # All user nodes have text "user" and label -1
|
| 55 |
+
node_labels = [-1] * len(user_nodes)
|
| 56 |
+
print("Length of user nodes:", len(user_nodes))
|
| 57 |
+
print("Sample user nodes:", user_nodes[:5])
|
| 58 |
+
print("Sample user node labels:", node_labels[:5])
|
| 59 |
+
print("Sample user node texts:", text_nodes[:5])
|
| 60 |
+
|
| 61 |
+
# Initialize tweet_id2idx
|
| 62 |
+
tweet_id2idx = {}
|
| 63 |
+
tweet_id2node_idx = {}
|
| 64 |
+
tweet_nodes = []
|
| 65 |
+
|
| 66 |
+
# Add information related to mentions (user-user edge, tweet node, user-tweet edge)
|
| 67 |
+
for _, row in df_graph.iterrows():
|
| 68 |
+
mentions_list = row['user_mentions']
|
| 69 |
+
if len(mentions_list) > 0:
|
| 70 |
+
user_idx = user_id2idx[row['user_id']]
|
| 71 |
+
|
| 72 |
+
for mention in mentions_list:
|
| 73 |
+
mention_idx = user_id2idx[mention]
|
| 74 |
+
if not G.has_edge(user_idx, mention_idx): # Only include one edge between two users
|
| 75 |
+
# Add edge u1-u2
|
| 76 |
+
G.add_edge(user_idx, mention_idx)
|
| 77 |
+
edge_index[0].append(user_idx)
|
| 78 |
+
edge_index[1].append(mention_idx)
|
| 79 |
+
text_edges.append(row['text'])
|
| 80 |
+
edge_labels.append(row["event_id"])
|
| 81 |
+
|
| 82 |
+
# Add node t1, create tweet_id2idx
|
| 83 |
+
tweet_id = row['tweet_id']
|
| 84 |
+
tweet_id2idx[tweet_id] = len(tweet_nodes)
|
| 85 |
+
tweet_id2node_idx[tweet_id] = len(user_nodes) + tweet_id2idx[tweet_id]
|
| 86 |
+
tweet_nodes.append({tweet_id: len(user_nodes) + tweet_id2idx[tweet_id]})
|
| 87 |
+
G.add_node(tweet_id2node_idx[tweet_id])
|
| 88 |
+
text_nodes.append(row["text"])
|
| 89 |
+
node_labels.append(row['event_id'])
|
| 90 |
+
|
| 91 |
+
# Add edge u1-t1
|
| 92 |
+
tweet_node_idx = tweet_id2node_idx[tweet_id]
|
| 93 |
+
edge_index[0].append(user_idx)
|
| 94 |
+
edge_index[1].append(tweet_node_idx)
|
| 95 |
+
text_edges.append("")
|
| 96 |
+
edge_labels.append(-1)
|
| 97 |
+
G.add_edge(user_idx, tweet_node_idx)
|
| 98 |
+
|
| 99 |
+
# Add edge t1-u2
|
| 100 |
+
edge_index[0].append(mention_idx)
|
| 101 |
+
edge_index[1].append(tweet_node_idx)
|
| 102 |
+
text_edges.append("")
|
| 103 |
+
edge_labels.append(-1)
|
| 104 |
+
G.add_edge(mention_idx, tweet_node_idx)
|
| 105 |
+
|
| 106 |
+
# Add information not related to mentions (user-tweet edge, tweet node)
|
| 107 |
+
for _, row in df_graph.iterrows():
|
| 108 |
+
tweet_id = row['tweet_id']
|
| 109 |
+
user_id = row["user_id"]
|
| 110 |
+
if user_id in user_id2idx.keys() and tweet_id not in tweet_id2idx.keys():
|
| 111 |
+
# Create tweet_id2node_idx and add node t1
|
| 112 |
+
tweet_id2idx[tweet_id] = len(tweet_nodes)
|
| 113 |
+
tweet_id2node_idx[tweet_id] = len(user_nodes) + tweet_id2idx[tweet_id]
|
| 114 |
+
tweet_nodes.append({tweet_id: len(user_nodes) + tweet_id2idx[tweet_id]})
|
| 115 |
+
G.add_node(tweet_id2node_idx[tweet_id])
|
| 116 |
+
text_nodes.append(row["text"])
|
| 117 |
+
node_labels.append(row['event_id'])
|
| 118 |
+
|
| 119 |
+
# Add edge u1-t1
|
| 120 |
+
user_idx = user_id2idx[user_id]
|
| 121 |
+
tweet_node_idx = tweet_id2node_idx[tweet_id]
|
| 122 |
+
edge_index[0].append(user_idx)
|
| 123 |
+
edge_index[1].append(tweet_node_idx)
|
| 124 |
+
text_edges.append("")
|
| 125 |
+
edge_labels.append(-1)
|
| 126 |
+
G.add_edge(user_idx, tweet_node_idx)
|
| 127 |
+
|
| 128 |
+
print("Length of tweet nodes:", len(tweet_nodes))
|
| 129 |
+
print("Sample tweet nodes:", tweet_nodes[:5])
|
| 130 |
+
print("Sample tweet node labels:", node_labels[-5:])
|
| 131 |
+
print("Sample tweet node texts:", text_nodes[-5:])
|
| 132 |
+
|
| 133 |
+
return text_nodes, text_edges, node_labels, edge_labels, edge_index
|
| 134 |
+
|
| 135 |
+
# Create the graph
|
| 136 |
+
text_nodes, text_edges, node_labels, edge_labels, edge_index = create_graph_4(df_graph_4)
|
| 137 |
+
|
| 138 |
+
# Create Data object
|
| 139 |
+
graph = Data(
|
| 140 |
+
text_nodes=text_nodes,
|
| 141 |
+
text_edges=text_edges,
|
| 142 |
+
node_labels=torch.tensor(node_labels, dtype=torch.long),
|
| 143 |
+
edge_labels=torch.tensor(edge_labels, dtype=torch.long),
|
| 144 |
+
edge_index=torch.tensor(edge_index, dtype=torch.long)
|
| 145 |
+
)
|
| 146 |
+
|
| 147 |
+
# Save the processed graph data
|
| 148 |
+
output_file = os.path.join(base_dir, 'output/twitter_graph.pkl')
|
| 149 |
+
with open(output_file, 'wb') as file:
|
| 150 |
+
pkl.dump(graph, file)
|
| 151 |
+
|
| 152 |
+
print(f"Data processing complete. Processed data saved to: {output_file}")
|
Twitter/processed/download_data.sh
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
# Get the directory of the current script
|
| 4 |
+
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
|
| 5 |
+
BASE_DIR="$(dirname "$SCRIPT_DIR")"
|
| 6 |
+
RAW_DIR="$BASE_DIR/processed/original"
|
| 7 |
+
|
| 8 |
+
# Create the raw directory
|
| 9 |
+
mkdir -p "$RAW_DIR"
|
| 10 |
+
|
| 11 |
+
# Define URLs of the files to be downloaded
|
| 12 |
+
urls=(
|
| 13 |
+
"https://github.com/YuweiCao-UIC/KPGNN/raw/main/datasets/Twitter/68841_tweets_multiclasses_filtered_0722_part1.npy"
|
| 14 |
+
"https://github.com/YuweiCao-UIC/KPGNN/raw/main/datasets/Twitter/68841_tweets_multiclasses_filtered_0722_part2.npy"
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
# Download each file to the raw directory
|
| 18 |
+
for url in "${urls[@]}"; do
|
| 19 |
+
wget -P "$RAW_DIR" "$url"
|
| 20 |
+
done
|
| 21 |
+
|
| 22 |
+
echo "Download complete."
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
|