mo-mittal commited on
Commit
8807690
1 Parent(s): fb44eb6

Upload political_subs.py

Browse files
Files changed (1) hide show
  1. political_subs.py +101 -0
political_subs.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import datasets
3
+ from datasets import Features, Value, ClassLabel, Image, DownloadManager
4
+ import logging
5
+ import requests
6
+ from PIL.Image import Image as PIL_Image
7
+ import io
8
+
9
+ _URLS = {
10
+ "my_data": "https://drive.google.com/uc?export=download&id=1t7qllYbonoCgNzzh7w9NhmnMZ4pmqERo",
11
+ }
12
+
13
+ class RedditDataset(datasets.GeneratorBasedBuilder):
14
+ """A Dataset builder for a DataFrame with Reddit data."""
15
+
16
+ VERSION = datasets.Version('1.0.0')
17
+
18
+ def _info(self):
19
+ return datasets.DatasetInfo(
20
+ description=("This dataset contains Reddit posts with various attributes."),
21
+ features=Features({
22
+ "author": Value("string"),
23
+ "created_utc": Value("string"),
24
+ "domain": Value("string"),
25
+ "title": Value("string"),
26
+ "selftext": Value("string"),
27
+ "subreddit": Value("string"),
28
+ "score": Value("int32"),
29
+ "num_comments": Value("int32"),
30
+ "ups": Value("float32"),
31
+ "downs": Value("float32"),
32
+ "permalink": Value("string"),
33
+ "is_self": Value("bool"),
34
+ "url": Value("string"),
35
+ "subreddit_subscribers": Value("float32"),
36
+ "upvote_ratio": Value("float32"),
37
+ "is_original_content": Value("string"),
38
+ "media": Value("string"),
39
+ "selftext_html": Value("string"),
40
+ "author_flair_text": Value("string"),
41
+ "link_flair_text": Value("string"),
42
+ "image": Image(),
43
+ "image_text": Value("string"),
44
+ }),
45
+ supervised_keys=None,
46
+ homepage='https://www.reddit.com/',
47
+ citation="",
48
+ )
49
+
50
+ def _split_generators(self, dl_manager: DownloadManager):
51
+ downloaded_file = dl_manager.download_and_extract(_URLS["my_data"])
52
+ return [
53
+ datasets.SplitGenerator(
54
+ name=datasets.Split.TRAIN,
55
+ gen_kwargs={"dataframe_path": downloaded_file}, # light config
56
+ ),
57
+ ]
58
+
59
+ def _generate_examples(self, dataframe_path): #light config
60
+ """Yields examples."""
61
+ df = pd.read_csv(dataframe_path)
62
+
63
+ for idx, row in df.iterrows():
64
+ image_data = None
65
+ if pd.notna(row['url']):
66
+ try:
67
+ response = requests.get(row['url'])
68
+ response.raise_for_status()
69
+ image = PIL_Image.open(io.BytesIO(response.content))
70
+ image = PIL_Image.convert('RGB')
71
+ img_byte_arr = io.BytesIO()
72
+ image.save(img_byte_arr, format='JPEG')
73
+ image_data = img_byte_arr.getvalue()
74
+ except Exception as e:
75
+ logging.exception(f"Could not download or process image from {row['url']}: {e}")
76
+ image_data = None
77
+
78
+ yield idx, {
79
+ "author": row["author"],
80
+ "created_utc": row["created_utc"],
81
+ "domain": row["domain"] if pd.notna(row["domain"]) else "",
82
+ "title": row["title"],
83
+ "selftext": row["selftext"] if pd.notna(row["selftext"]) else "",
84
+ "subreddit": row["subreddit"],
85
+ "score": row["score"],
86
+ "num_comments": row["num_comments"],
87
+ "ups": row["ups"] if pd.notna(row["ups"]) else 0,
88
+ "downs": row["downs"] if pd.notna(row["downs"]) else 0,
89
+ "permalink": row["permalink"],
90
+ "is_self": row["is_self"],
91
+ "url": row["url"] if pd.notna(row["url"]) else "",
92
+ "subreddit_subscribers": row["subreddit_subscribers"] if pd.notna(row["subreddit_subscribers"]) else 0.0,
93
+ "upvote_ratio": row["upvote_ratio"] if pd.notna(row["upvote_ratio"]) else 0.0,
94
+ "is_original_content": row["is_original_content"] if pd.notna(row["is_original_content"]) else False,
95
+ "media": row["media"] if pd.notna(row["media"]) else "",
96
+ "selftext_html": row["selftext_html"] if pd.notna(row["selftext_html"]) else "",
97
+ "author_flair_text": row["author_flair_text"] if pd.notna(row["author_flair_text"]) else "",
98
+ "link_flair_text": row["link_flair_text"] if pd.notna(row["link_flair_text"]) else "",
99
+ "image": image_data,
100
+ "image_text": row['image_text'] if pd.notna(row['image_text']) else "",
101
+ }