p1atdev commited on
Commit
97152f0
1 Parent(s): be3b817

Upload dump.py

Browse files
Files changed (1) hide show
  1. dump.py +167 -0
dump.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ from bs4 import BeautifulSoup
3
+ import json
4
+ from concurrent.futures import ThreadPoolExecutor
5
+ import argparse
6
+
7
+ BASE = "https://cosppi.net"
8
+ USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:108.0) Gecko/20100101 Firefox/108.0"
9
+ RETRY_MAX = 3
10
+ TIMEOUT = 20
11
+
12
+ SROTS = ["all-rank", "china-rank", "new-add", "follower-rank"]
13
+
14
+ def process_url(url):
15
+ if url.startswith("//"):
16
+ return f"https:{url}"
17
+ elif url.startswith("/"):
18
+ return f"{BASE}{url}"
19
+ else:
20
+ return url
21
+
22
+ def get_user_urls(debug, sort, page):
23
+ found_urls = []
24
+ url = f"{BASE}/sort/{sort}/page/{page}"
25
+ print(f"Fetching {url}...")
26
+ for i in range(RETRY_MAX):
27
+ try:
28
+ response = requests.get(
29
+ url,
30
+ headers={"User-Agent": USER_AGENT},
31
+ timeout=TIMEOUT,
32
+ )
33
+ response.raise_for_status()
34
+ break
35
+ except requests.exceptions.RequestException as e:
36
+ print(f"Error: {e}")
37
+ if i < RETRY_MAX - 1:
38
+ print("Retrying after 3 seconds...")
39
+ else:
40
+ print("Max retries reached.")
41
+
42
+ if response.status_code == 404 or (debug and page >= 2):
43
+ print(f"Page {page} not found!")
44
+ return None
45
+ else:
46
+ print(f"Fetched page {page}")
47
+ soup = BeautifulSoup(response.text, "html.parser")
48
+ main_section = soup.find("main")
49
+ users = main_section.find_all("a", {"class": "sort_prof_link"})
50
+ if not users:
51
+ return None
52
+ for user in users:
53
+ raw_url = user.get("href")
54
+ processed_url = process_url(raw_url)
55
+ found_urls.append(processed_url)
56
+ print(f"User URL found: {processed_url}")
57
+ return found_urls
58
+
59
+ def get_all_user_urls(debug, sort, thread_count):
60
+ pages = range(1, 50)
61
+
62
+ with ThreadPoolExecutor(max_workers=thread_count) as executor:
63
+ user_urls = executor.map(get_user_urls, [debug] * len(pages), [sort] * len(pages), pages)
64
+
65
+ # remove None and flatten list
66
+ user_urls = [url for sublist in user_urls if sublist for url in sublist]
67
+
68
+ print(f"\nTotal number of user pages: {len(user_urls)}\n")
69
+ return user_urls
70
+
71
+ def get_user_images(user_url):
72
+ page = 1
73
+
74
+ images = []
75
+
76
+ while True:
77
+ response = requests.get(
78
+ f"{user_url}/page/{page}",
79
+ headers={"User-Agent": USER_AGENT},
80
+ )
81
+ if response.status_code == 404:
82
+ print(f"User {user_url} not found!")
83
+ break
84
+ else:
85
+ print(f"Fetching user images {user_url}...")
86
+
87
+ soup = BeautifulSoup(response.text, "html.parser")
88
+ img_wrapper_outer = soup.find("div", {"class": "img_wrapper_outer"})
89
+ image_wrappers = img_wrapper_outer.find_all("div", {"class": "img_wrapper"})
90
+
91
+ if not image_wrappers:
92
+ print(f"User {user_url} has no images!")
93
+ break
94
+
95
+ print(f"Found {len(image_wrappers)} images on page {page}...")
96
+
97
+ for image_wrapper in image_wrappers:
98
+ url = image_wrapper.find("img").get("data-src")
99
+ # remove last ":small" or ":large"
100
+ url = url.rsplit(":", 1)[0]
101
+ span_tags = image_wrapper.find("div", {"class": "img_num"}).find_all("span")
102
+ [likes, retweets] = [int(span_tag.text) for span_tag in span_tags]
103
+
104
+ image_item = {
105
+ "url": url,
106
+ "likes": likes,
107
+ "retweets": retweets,
108
+ }
109
+ images.append(image_item)
110
+
111
+ page += 1
112
+
113
+ print(f"Images found in article {user_url}: {len(images)}")
114
+
115
+ username = user_url.split("/")[-1]
116
+
117
+ return {
118
+ "username": username,
119
+ "images": images,
120
+ }
121
+
122
+ def get_image_urls(user_urls, thread_count):
123
+
124
+ with ThreadPoolExecutor(max_workers=thread_count) as executor:
125
+ results = executor.map(get_user_images, user_urls)
126
+
127
+ return [result for result in results]
128
+
129
+ def main(debug, thread_count, output_path, sort):
130
+ user_urls = get_all_user_urls(debug, sort, thread_count)
131
+
132
+ print(f"\nFetching images from {len(user_urls)} user pages...")
133
+ print(user_urls)
134
+
135
+ users_and_images = get_image_urls(user_urls, thread_count)
136
+
137
+ print(f"\nTotal number of users: {len(users_and_images)}")
138
+ num_images = 0
139
+ for user in users_and_images:
140
+ num_images += len(user["images"])
141
+ print(f"Total number of images: {num_images}")
142
+
143
+ # preview
144
+ print("\nPreview:")
145
+ # 1~3
146
+ for user in users_and_images[:3]:
147
+ print(f"Username: {user['username']}")
148
+ print(f"Number of images: {len(user['images'])}")
149
+ print(f"First image: {user['images'][0]['url']}")
150
+ print(f"Last image: {user['images'][-1]['url']}")
151
+ print()
152
+
153
+ print(f"\nWriting to {output_path}...")
154
+ with open(output_path, "w", encoding="utf-8") as output_file:
155
+ json.dump(users_and_images, output_file, indent=4)
156
+ print("Done!")
157
+
158
+ if __name__ == "__main__":
159
+ parser = argparse.ArgumentParser()
160
+ parser.add_argument("--debug", action="store_true", help="Enable debug mode (only fetches up to page 2)")
161
+ parser.add_argument("--threads", type=int, default=5, help="Number of threads to use for parallel processing")
162
+ parser.add_argument("--output", type=str, default="output.json", help="Output file name for JSON data")
163
+ parser.add_argument("--sort", type=str, default="all-rank", help="Sort type")
164
+ args = parser.parse_args()
165
+
166
+ main(args.debug, args.threads, args.output, args.sort)
167
+