Spotify-unsorted-300k / Scraper.py
rafa-br34's picture
Upload 2 files
106d24b verified
raw
history blame contribute delete
No virus
14.8 kB
import latest_user_agents
import threading
import requests
import pathlib
import tarfile
import urllib.parse as urlparse
import random
import pickle
import json
import time
import bs4
import PIL.Image as Image
import io
import re
c_imaging_threads = 30
c_paging_threads = 12
c_state_file = "state.pickle"
c_output_file = "./data.tar"
class Query:
name = None
mode = None
query_hash = None
def __init__(self, name, mode, query_hash):
self.name = name
self.mode = mode
self.query_hash = query_hash
def __repr__(self):
return f"Query<{self.name}, {self.mode}, {self.query_hash}>"
def _build_extension(self):
return json.dumps({
"persistedQuery": {
"version": 1,
"sha256Hash": self.query_hash
}
})
def build(self, url, variables):
return f"{urlparse.urljoin(url, self.mode)}?operationName={self.name}&variables={json.dumps(variables)}&extensions={self._build_extension()}"
class Client:
session = requests.session()
client_version = None
client_token = None
client_auth = None
client_id = None
queries = {}
_client_token_expiration = 0
_client_token_renewal = 0
_client_auth_expiration = 0
def _setup_session(self):
self.session.headers = {
"User-Agent": latest_user_agents.get_random_user_agent(),
"Accept-Encoding": "gzip, deflate, br, zstd",
"Accept-Language": "en-US,en;q=0.5",
"Accept": "*/*",
"Connection": "keep-alive",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "no-cors",
"Sec-Fetch-Site": "same-origin",
"Referer": "https://open.spotify.com/",
"Origin": "https://open.spotify.com",
"Priority": "u=4",
"Sec-GPC": "1",
"DNT": "1"
}
def _acquire_client(self):
def get_key(source, name):
result = re.search(name + r":\s*['\"](.+?)['\"]", source, re.UNICODE)
if result:
return result.group(1)
else:
return None
def get_queries(source):
queries = {}
for section in re.finditer(r"\(\s*?['\"](\w+?)['\"],\s*?['\"](\w+?)['\"],\s*?['\"]([a-z\d]+?)['\"]", source, re.UNICODE):
query = Query(*section.groups())
queries[query.name] = query
return queries
document = bs4.BeautifulSoup(self.session.get("https://open.spotify.com/").text, "html.parser")
client_version = None
client_id = None
for script in document.find_all("script"):
source_link = script.get("src")
if source_link and "web-player" in source_link:
source = self.session.get(source_link).text
client_version = get_key(source, "clientVersion")
client_id = get_key(source, "clientID")
self.queries.update(get_queries(source))
if client_version and client_id:
break
assert client_version and client_id, "Couldn't find keys"
assert len(client_id) + len(client_version) < 1024, "Keys are too big (regex failure?)"
self.session.headers.update({
"Spotify-App-Version": client_version,
"App-Platform": "WebPlayer"
})
result = self.session.get("https://open.spotify.com/get_access_token?reason=transport&productType=web-player", headers={ "Accept": "application/json" })
assert result.status_code == 200, "Status code for get_access_token isn't 200"
parsed = result.json()
assert parsed["clientId"] == client_id, "client_id mismatch"
self.client_auth = parsed["accessToken"]
self._client_auth_expiration = float(parsed["accessTokenExpirationTimestampMs"]) / 1000
self.client_version = client_version
self.client_id = client_id
def _get_client_token(self):
result = self.session.options("https://clienttoken.spotify.com/v1/clienttoken", headers = {
"Access-Control-Request-Method": "POST",
"Access-Control-Request-Headers": "content-type"
})
assert result.status_code == 200, "Failed to configure v1/clienttoken for request"
result = self.session.post(
"https://clienttoken.spotify.com/v1/clienttoken",
headers = {
"Accept": "application/json",
"TE": "trailers"
},
json = {
"client_data": {
"client_version": self.client_version,
"client_id": self.client_id,
"js_sdk_data": {
"device_brand": "unknown",
"device_model": "unknown",
"os": "windows",
"os_version": "NT 10.0",
"device_id": random.randbytes(16).hex(),
"device_type": "computer"
}
}
}
)
assert result.status_code, "Failed to acquire authorization from v1/clienttoken"
parsed = result.json()
response_type = "response_type" in parsed and parsed["response_type"]
assert response_type == "RESPONSE_GRANTED_TOKEN_RESPONSE", f"Expected RESPONSE_GRANTED_TOKEN_RESPONSE got {response_type}"
granted_token = parsed["granted_token"]
self._client_token_expiration = time.time() + int(granted_token["expires_after_seconds"])
self._client_token_renewal = time.time() + int(granted_token["refresh_after_seconds"])
self.client_token = granted_token["token"]
def is_authenticated(self, slack=10):
return self._client_token_renewal > time.time() + slack and self._client_auth_expiration > time.time() + slack
def authenticate(self):
if not self.is_authenticated():
self._setup_session()
self._acquire_client()
self._get_client_token()
def request(self, name, variables):
assert name in self.queries, f"Operation {name} not found"
self.authenticate()
return self.session.get(
self.queries[name].build("https://api-partner.spotify.com/pathfinder/v1/", variables),
headers = {
"authorization": f"Bearer {self.client_auth}",
"client-token": self.client_token,
"Accept": "application/json"
}
)
class ImageWriter:
def __init__(self, output_file="data.tar"):
self.current_file_index = 0
self.current_file = tarfile.open(output_file, mode="a")
self._lock = threading.Lock()
def write_file(self, name, value):
with self._lock:
value.seek(0)
info = tarfile.TarInfo(name.format(index = self.current_file_index))
info.size = value.getbuffer().nbytes
self.current_file.addfile(info, value)
self.current_file_index += 1
def close(self):
with self._lock:
self.current_file.close()
def can_iterate(obj):
return "__iter__" in dir(obj) and "__next__" in dir(obj) or "__getitem__" in dir(obj)
def traverse(obj, path):
current = obj
for name in path.split('.'):
current = current.get(name)
if not current:
break
return current
def recursive_scraping(parent, uri_list, url_list):
if isinstance(parent, list):
for child in parent:
if can_iterate(child):
recursive_scraping(child, uri_list, url_list)
elif isinstance(parent, dict):
if "width" in parent and "height" in parent:
if int(parent["width"] or 0) > 500 and int(parent["height"] or 0) > 500:
uri_list.add(parent["url"])
return
for key, child in parent.items():
if key == "uri" or key == "_uri":
uri_list.add(child)
elif key == "url":
url_list.add(child)
elif can_iterate(child):
recursive_scraping(child, uri_list, url_list)
def parse_uri(uri, offset=0, limit=50):
if not uri or uri.count(':') != 2:
return None, None, None
[_spotify, section, _index] = uri.split(':')
total_count = None
variables = None
query = None
match section:
case "episode" | "chapter":
query = "getEpisodeOrChapter"
variables = { "uri": uri }
case "show":
query = "queryPodcastEpisodes"
variables = { "uri": uri, "offset": offset, "limit": limit }
total_count = "data.podcastUnionV2.episodesV2.totalCount"
case "album":
query = "getAlbum"
variables = { "uri": uri, "offset": offset, "limit": limit, "locale": "intl-pt" }
total_count = "data.albumUnion.tracks.totalCount"
case "playlist":
query = "fetchPlaylist"
variables = { "uri": uri, "offset": offset, "limit": limit }
total_count = "data.playlistV2.content.totalCount"
case "artist":
query = "queryArtistOverview"
variables = { "uri": uri, "includePrerelease": True, "locale": "intl-pt" }
case "track" | "section" | "concert" | "page" | "user" | "merch" | "prerelease":
query = None
variables = None
case _:
raise RuntimeError(f"Unknown section type \"{section}\" found in {uri}")
return query, variables, total_count
def evaluate_uri(client, uri):
current = 0
limit = 50
uri_list = set()
url_list = set()
error_count = 0
while 10 > error_count:
[query, variables, total_count_path] = parse_uri(uri, current, limit)
if not query:
break
result = None
parsed = None
try:
result = client.request(query, variables)
parsed = result.json()
except (requests.exceptions.ConnectionError, requests.exceptions.JSONDecodeError) as error:
if result:
print(f"Failed to query uri {uri} with code {result.status_code}, error {error}")
else:
print(f"Failed to query uri {uri} (no result), error {error}")
error_count += 1
continue
recursive_scraping(parsed, uri_list, url_list)
total = 0
if total_count_path:
total = traverse(parsed, total_count_path)
if not total or current + limit > total:
break
current += limit
return uri_list, url_list
class _Queue:
pages_done = set()
pages = set()
sources_done = set()
sources = set()
def copy(self):
val = type(self)
val.pages = self.pages.copy()
val.pages_done = self.pages_done.copy()
val.sources = self.sources.copy()
val.sources_done = self.sources_done.copy()
return val
class _State:
update_event = threading.Condition()
running = True
g_image_writer = ImageWriter(c_output_file)
g_queue = _Queue()
g_state = _State()
def atomic_wait_item(lock, iterable):
target = None
while not target and g_state.running:
if len(iterable):
target = iterable.pop()
else:
with lock:
lock.wait()
return target
def paging_worker_logic(thread_id):
global g_queue, g_state
client = Client()
update_event = g_state.update_event
pages_done = g_queue.pages_done
pages = g_queue.pages
sources_done = g_queue.sources_done
sources = g_queue.sources
print(f"W[{thread_id}] Started")
try:
while g_state.running:
#print(f"[{thread_id}] Waiting...")
target = atomic_wait_item(update_event, pages)
pages_done.add(target)
if not client.is_authenticated():
print(f"W[{thread_id}] Token renewal...")
try:
client.authenticate()
print(f"W[{thread_id}] Acquired new tokens")
except RuntimeError as error:
print(error)
#print(f"[{thread_id}] Eval...")
[uri_list, url_list] = evaluate_uri(client, target)
pages.update(uri_list.difference(pages_done))
sources.update(url_list.difference(sources_done))
#print(f"[{thread_id}] Fire...")
with update_event:
update_event.notify_all()
finally:
print(f"W[{thread_id}] Halted")
def imaging_worker_logic(thread_id):
global g_queue, g_state
update_event = g_state.update_event
sources_done = g_queue.sources_done
sources = g_queue.sources
session = requests.session()
session.headers["User-Agent"] = latest_user_agents.get_random_user_agent()
print(f"I[{thread_id}] Started")
try:
while g_state.running:
target = atomic_wait_item(update_event, sources)
if not target:
continue
sources_done.add(target)
match = re.fullmatch(r"https?:\/\/(.*\.scdn\.co|image-cdn-ak.spotifycdn.com)\/image\/([a-z\d]+)", target)
if not match:
continue
(_domain, name) = match.groups()
data = None
for _ in range(3):
try:
data = io.BytesIO(session.get(target).content)
break
except (requests.exceptions.ConnectionError, requests.exceptions.ChunkedEncodingError):
pass
if not data:
print(f"I[{thread_id}] Failed to download image")
continue
image = None
try:
image = Image.open(data)
except (Image.UnidentifiedImageError):
pass
if not image:
print(f"I[{thread_id}] Failed to identify image")
continue
(x, y) = image.size
if x < 500 or y < 500:
continue
g_image_writer.write_file(f"{name}-{{index}}.{image.format.lower()}", data)
finally:
print(f"I[{thread_id}] Halted")
def save_state(path, queue, image_writer):
file = open(path, "wb")
pickle.dump({
"sources_done": queue.sources_done,
"sources": queue.sources,
"pages_done": queue.pages_done,
"pages": queue.pages,
"current_file_index": image_writer.current_file_index,
}, file)
file.close()
def load_state(path, queue, image_writer):
global g_queue, g_state
file = open(path, "rb")
loaded = pickle.load(file)
queue.sources_done = loaded["sources_done"]
queue.sources = loaded["sources"]
queue.pages_done = loaded["pages_done"]
queue.pages = loaded["pages"]
image_writer.current_file_index = loaded["current_file_index"]
file.close()
def main():
global g_queue, g_state
if pathlib.Path(c_state_file).is_file():
load_state(c_state_file, g_queue, g_image_writer)
else:
client = Client()
client.authenticate()
print("Client:")
print("\tVersion:", client.client_version)
print("\tID:", client.client_id)
print("\tToken:", client.client_token)
print("\tAuth:", client.client_auth)
result = client.request("home", {
"timeZone": "UTC",
"sp_t": random.randbytes(16).hex(),
"country": "US",
"facet": None,
"sectionItemsLimit": 15
})
recursive_scraping(result.json(), g_queue.pages, g_queue.sources)
threads = []
for thread_id in range(c_paging_threads):
thread = threading.Thread(target=paging_worker_logic, args=(thread_id,))
thread.start()
threads.append(thread)
for thread_id in range(c_imaging_threads):
thread = threading.Thread(target=imaging_worker_logic, args=(thread_id,))
thread.start()
threads.append(thread)
try:
start = time.time()
while g_state.running:
with g_state.update_event:
g_state.update_event.wait()
print(f"URIs: {len(g_queue.pages)}/{len(g_queue.pages_done)} images: {len(g_queue.sources)}/{len(g_queue.sources_done)} written: {g_image_writer.current_file_index}")
if time.time() > start + 60 * 30:
start = time.time()
save_state(c_state_file + ".bak", g_queue.copy(), g_image_writer)
except KeyboardInterrupt:
print("Halting tasks...")
g_state.running = False
for thread in threads:
with g_state.update_event:
g_state.update_event.notify_all()
if thread.is_alive():
thread.join()
print("Saving state...")
g_image_writer.close()
save_state(c_state_file, g_queue, g_image_writer)
if __name__ == "__main__":
main()