init
Browse files- .gitignore +2 -0
- download_audio.py +8 -0
- main.sh +419 -0
- requirements.txt +1 -0
- util.py +47 -0
.gitignore
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
build
|
2 |
+
preprocess
|
download_audio.py
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from util import wget
|
2 |
+
|
3 |
+
url_metadata_s2s = "https://dl.fbaipublicfiles.com/seamless/data/seamless_align_nov2023_extension/seamless.dataset.metadata.public.enA-jaA.tsv.gz"
|
4 |
+
url_metadata_s2t = "https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-jpn.withduration.tsv.gz"
|
5 |
+
cache_dir = "./download"
|
6 |
+
|
7 |
+
wget(url_metadata_s2s, cache_dir=cache_dir)
|
8 |
+
wget(url_metadata_s2t, cache_dir=cache_dir)
|
main.sh
ADDED
@@ -0,0 +1,419 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#include "xxhash.h"
|
2 |
+
|
3 |
+
#include "warcstream.hh"
|
4 |
+
#include "util/file_stream.hh"
|
5 |
+
#include "util/file_piece.hh"
|
6 |
+
#include "util/murmur_hash.hh"
|
7 |
+
#include "util/pool.hh"
|
8 |
+
#include "util/tokenize_piece.hh"
|
9 |
+
|
10 |
+
#include <charconv>
|
11 |
+
#include <exception>
|
12 |
+
#include <iostream>
|
13 |
+
#include <unordered_map>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
#include <curl/curl.h>
|
17 |
+
#include <string.h>
|
18 |
+
|
19 |
+
// Thrown for errors finding a match that can mostly r
|
20 |
+
class MatchException : public util::Exception {
|
21 |
+
public:
|
22 |
+
void SetLocation(const char *file, unsigned int line, const char *func, const char * /*child_name*/, const char *condition) {
|
23 |
+
std::string old_text;
|
24 |
+
what_.swap(old_text);
|
25 |
+
what_ << file << ':' << line << ' ';
|
26 |
+
if (func) what_ << func;
|
27 |
+
what_ << ' ';
|
28 |
+
if (condition) what_ << condition;
|
29 |
+
what_ << ' ';
|
30 |
+
what_ << old_text;
|
31 |
+
}
|
32 |
+
};
|
33 |
+
|
34 |
+
struct Extract {
|
35 |
+
// Raw line number in the WET
|
36 |
+
uint64_t paragraph_number;
|
37 |
+
// XXH3_64bits_withSeed of line in the WET
|
38 |
+
uint64_t paragraph_digest;
|
39 |
+
|
40 |
+
util::StringPiece original_line;
|
41 |
+
};
|
42 |
+
|
43 |
+
struct SHA1 {
|
44 |
+
char base32[32];
|
45 |
+
bool operator==(const SHA1 &other) const {
|
46 |
+
return !memcmp(base32, other.base32, sizeof(base32));
|
47 |
+
}
|
48 |
+
};
|
49 |
+
|
50 |
+
// TODO if we decode the base32 to a number this could just be the prefix.
|
51 |
+
namespace std {
|
52 |
+
template<> struct hash<SHA1> {
|
53 |
+
size_t operator()(const SHA1 &in) const {
|
54 |
+
return util::MurmurHashNative(in.base32, sizeof(in.base32));
|
55 |
+
}
|
56 |
+
};
|
57 |
+
} // namespace std
|
58 |
+
|
59 |
+
class Retrieve {
|
60 |
+
public:
|
61 |
+
typedef std::unordered_map<SHA1, std::vector<Extract> >::iterator Iterator;
|
62 |
+
|
63 |
+
void Clear() {
|
64 |
+
map_.clear();
|
65 |
+
}
|
66 |
+
|
67 |
+
void Add(util::StringPiece sha1, const Extract &extract) {
|
68 |
+
UTIL_THROW_IF(sha1.size() != 32, MatchException, "Expected 32-character hash but got '" << sha1 << "' with size " << sha1.size());
|
69 |
+
const SHA1 &key = *reinterpret_cast<const SHA1*>(sha1.data());
|
70 |
+
std::vector<Extract> &extracts = map_[key];
|
71 |
+
UTIL_THROW_IF2(!extracts.empty() && extracts.back().paragraph_number > extract.paragraph_number, "Metadata should be sorted by paragraph number in each document");
|
72 |
+
extracts.push_back(extract);
|
73 |
+
}
|
74 |
+
|
75 |
+
Iterator Lookup(util::StringPiece sha1) {
|
76 |
+
UTIL_THROW_IF(sha1.size() != 32, MatchException, "Expected 32-character hash but got '" << sha1 << "' with size " << sha1.size());
|
77 |
+
const SHA1 &key = *reinterpret_cast<const SHA1*>(sha1.data());
|
78 |
+
return map_.find(key);
|
79 |
+
}
|
80 |
+
|
81 |
+
void Erase(Iterator it) {
|
82 |
+
map_.erase(it);
|
83 |
+
}
|
84 |
+
|
85 |
+
bool Empty() const { return map_.empty(); }
|
86 |
+
|
87 |
+
Iterator begin() { return map_.begin(); }
|
88 |
+
Iterator end() { return map_.end(); }
|
89 |
+
|
90 |
+
private:
|
91 |
+
const std::vector<Extract> empty_;
|
92 |
+
std::unordered_map<SHA1, std::vector<Extract> > map_;
|
93 |
+
};
|
94 |
+
|
95 |
+
|
96 |
+
class Output {
|
97 |
+
public:
|
98 |
+
Output() : success_(1), failure_(2) {}
|
99 |
+
|
100 |
+
void Success(util::StringPiece original_line, util::StringPiece paragraph) {
|
101 |
+
success_ << original_line << '\t' << paragraph << '\n';
|
102 |
+
}
|
103 |
+
void Failure(util::StringPiece original_line, util::StringPiece what) {
|
104 |
+
failure_ << original_line << '\t' << what << '\n';
|
105 |
+
}
|
106 |
+
|
107 |
+
void Flush() {
|
108 |
+
success_.flush();
|
109 |
+
failure_.flush();
|
110 |
+
}
|
111 |
+
|
112 |
+
private:
|
113 |
+
util::FileStream success_, failure_;
|
114 |
+
};
|
115 |
+
|
116 |
+
void Normalize(const util::StringPiece in, std::string &out) {
|
117 |
+
// '|' goes to '_', '\t' goes to ' ', and '\r' to empty string.
|
118 |
+
out.clear();
|
119 |
+
for (char i : in) {
|
120 |
+
switch (i) {
|
121 |
+
case '|':
|
122 |
+
out.push_back('_');
|
123 |
+
break;
|
124 |
+
case '\t':
|
125 |
+
out.push_back(' ');
|
126 |
+
break;
|
127 |
+
case '\r':
|
128 |
+
break;
|
129 |
+
default:
|
130 |
+
out.push_back(i);
|
131 |
+
}
|
132 |
+
}
|
133 |
+
}
|
134 |
+
|
135 |
+
bool ProcessExtract(const Extract &extract, util::StringPiece line, Output &out) {
|
136 |
+
// First try with just the line as-is.
|
137 |
+
XXH64_hash_t hash = XXH3_64bits_withSeed(line.data(), line.size(), 0);
|
138 |
+
if (hash == extract.paragraph_digest) {
|
139 |
+
out.Success(extract.original_line, line);
|
140 |
+
return true;
|
141 |
+
}
|
142 |
+
// Then try normalizing the string.
|
143 |
+
std::string normalized;
|
144 |
+
Normalize(line, normalized);
|
145 |
+
XXH64_hash_t norm_hash = XXH3_64bits_withSeed(normalized.data(), normalized.size(), 0);
|
146 |
+
if (norm_hash == extract.paragraph_digest) {
|
147 |
+
out.Success(extract.original_line, normalized);
|
148 |
+
return true;
|
149 |
+
}
|
150 |
+
// Didn't match, let's fall back to matching regardless of line number.
|
151 |
+
return false;
|
152 |
+
}
|
153 |
+
|
154 |
+
util::StringPiece Strip(const util::StringPiece &in) {
|
155 |
+
util::StringPiece str(in);
|
156 |
+
while (!str.empty() && util::kSpaces[(unsigned char)str[0]]) {
|
157 |
+
str.remove_prefix(1);
|
158 |
+
}
|
159 |
+
while (!str.empty() && util::kSpaces[(unsigned char)str[str.size() - 1]]) {
|
160 |
+
str.remove_suffix(1);
|
161 |
+
}
|
162 |
+
return str;
|
163 |
+
}
|
164 |
+
|
165 |
+
void FallbackHashTable(util::TokenIter<util::SingleCharacter, false> &line, std::vector<Extract>::const_iterator extract, std::vector<Extract>::const_iterator extract_end, Output &out) {
|
166 |
+
// Did not use a unordered_multimap due to the need to preserve order for error messages.
|
167 |
+
std::unordered_map<uint64_t, std::vector<const Extract*> > lookup;
|
168 |
+
for (; extract != extract_end; ++extract) {
|
169 |
+
lookup[extract->paragraph_digest].push_back(&*extract);
|
170 |
+
}
|
171 |
+
std::string normalized;
|
172 |
+
for (; line; ++line) {
|
173 |
+
// Fun fact: python text mode considers a lone '\r' without "\r\n" as a line separator, presumably for Mac OS 9 compabilility.
|
174 |
+
for (util::TokenIter<util::SingleCharacter, true> carriage(*line, '\r'); carriage; ++carriage) {
|
175 |
+
Normalize(*carriage, normalized);
|
176 |
+
XXH64_hash_t norm_hash = XXH3_64bits_withSeed(normalized.data(), normalized.size(), 0);
|
177 |
+
auto found = lookup.find(norm_hash);
|
178 |
+
if (found == lookup.end()) continue;
|
179 |
+
for (const Extract *ext : found->second) {
|
180 |
+
out.Success(ext->original_line, normalized);
|
181 |
+
}
|
182 |
+
lookup.erase(found);
|
183 |
+
}
|
184 |
+
if (lookup.empty()) return;
|
185 |
+
}
|
186 |
+
// Failed to match the lines in lookup.
|
187 |
+
util::StringStream message;
|
188 |
+
for (std::pair<const uint64_t, std::vector<const Extract*> > &entry : lookup) {
|
189 |
+
for (const Extract *ext : entry.second) {
|
190 |
+
message.clear();
|
191 |
+
message << "Hash " << ext->paragraph_digest << " did not match any line in the WET";
|
192 |
+
out.Failure(ext->original_line, message.str());
|
193 |
+
}
|
194 |
+
}
|
195 |
+
}
|
196 |
+
|
197 |
+
void MatchLines(util::TokenIter<util::SingleCharacter, false> &line, const std::vector<Extract> &extracts, Output &out) {
|
198 |
+
util::TokenIter<util::SingleCharacter, false> line_start(line);
|
199 |
+
assert(!extracts.empty());
|
200 |
+
uint64_t line_counter = 0;
|
201 |
+
std::vector<Extract>::const_iterator extract = extracts.begin();
|
202 |
+
for (; line; ++line) {
|
203 |
+
// Upstream does python strip() then skips empty lines without counting them.
|
204 |
+
util::StringPiece stripped = Strip(*line);
|
205 |
+
if (stripped.empty()) continue;
|
206 |
+
while (line_counter == extract->paragraph_number) {
|
207 |
+
if (!ProcessExtract(*extract, stripped, out)) {
|
208 |
+
// A line failed to match the expected hash. Fall back to a hash join of all lines.
|
209 |
+
FallbackHashTable(line_start, extract, extracts.end(), out);
|
210 |
+
return;
|
211 |
+
}
|
212 |
+
if (++extract == extracts.end()) {
|
213 |
+
return;
|
214 |
+
}
|
215 |
+
}
|
216 |
+
++line_counter;
|
217 |
+
}
|
218 |
+
// Paragraph number exceeds number of lines.
|
219 |
+
FallbackHashTable(line_start, extract, extracts.end(), out);
|
220 |
+
}
|
221 |
+
|
222 |
+
// Extract SHA1 from header, leave at line
|
223 |
+
util::StringPiece FindSHA1(util::TokenIter<util::SingleCharacter, false> &line) {
|
224 |
+
const util::StringPiece kBlockDigest("WARC-Block-Digest: sha1:");
|
225 |
+
// Header through SHA1
|
226 |
+
for (; ; ++line) {
|
227 |
+
UTIL_THROW_IF(!line, MatchException, "Missing end of header");
|
228 |
+
if (line->starts_with(kBlockDigest)) {
|
229 |
+
UTIL_THROW_IF((*line)[line->size() - 1] != '\r', MatchException, "Expected carriage return in WARC.");
|
230 |
+
util::StringPiece ret(line->substr(kBlockDigest.size()));
|
231 |
+
ret.remove_suffix(1);
|
232 |
+
return ret;
|
233 |
+
}
|
234 |
+
UTIL_THROW_IF(line->empty(), MatchException, "No digest");
|
235 |
+
}
|
236 |
+
}
|
237 |
+
|
238 |
+
// The WARC reader calls this for every document in the WARC.
|
239 |
+
class DocumentCallback {
|
240 |
+
public:
|
241 |
+
DocumentCallback(Retrieve &retrieve, Output &out) : retrieve_(retrieve), out_(out) {}
|
242 |
+
|
243 |
+
// Return true if there's more documents to get from the same WARC.
|
244 |
+
bool operator()(const std::string &document) {
|
245 |
+
util::TokenIter<util::SingleCharacter, false> line(document, '\n');
|
246 |
+
UTIL_THROW_IF(!line, MatchException, "Blank document");
|
247 |
+
UTIL_THROW_IF(*line != "WARC/1.0\r", MatchException, "Expected WARC/1.0 header but got `" << *line << '\'');
|
248 |
+
UTIL_THROW_IF(!++line, MatchException, "Nothing after WARC/1.0 header");
|
249 |
+
if (*line == "WARC-Type: warcinfo\r") {
|
250 |
+
return true;
|
251 |
+
}
|
252 |
+
util::StringPiece sha1 = FindSHA1(line);
|
253 |
+
Retrieve::Iterator it = retrieve_.Lookup(sha1);
|
254 |
+
if (it == retrieve_.end()) return true;
|
255 |
+
const std::vector<Extract> &extracts = it->second;
|
256 |
+
assert(!extracts.empty());
|
257 |
+
// Consume rest of the header.
|
258 |
+
for (++line; ; ++line) {
|
259 |
+
UTIL_THROW_IF(!line, MatchException, "Missing end of header");
|
260 |
+
if (line->size() == 1 && (*line)[0] == '\r') break;
|
261 |
+
}
|
262 |
+
++line; // Skip blank.
|
263 |
+
MatchLines(line, extracts, out_);
|
264 |
+
retrieve_.Erase(it);
|
265 |
+
return !retrieve_.Empty();
|
266 |
+
}
|
267 |
+
|
268 |
+
private:
|
269 |
+
Retrieve &retrieve_;
|
270 |
+
Output &out_;
|
271 |
+
};
|
272 |
+
|
273 |
+
class CurlCallback {
|
274 |
+
public:
|
275 |
+
CurlCallback(Retrieve &retrieve, Output &out) : document_(retrieve, out), want_more_(true) {}
|
276 |
+
|
277 |
+
size_t operator()(void *buffer, size_t length) {
|
278 |
+
// As a C library, curl can't handle exceptions thrown by the callback.
|
279 |
+
try {
|
280 |
+
if (!warc_.GiveBytes(static_cast<const char*>(buffer), length, document_)) {
|
281 |
+
// Hang up early if all sentences from the WARC are complete.
|
282 |
+
want_more_ = false;
|
283 |
+
return 0;
|
284 |
+
}
|
285 |
+
} catch (...) {
|
286 |
+
exception_ = std::current_exception();
|
287 |
+
return 0;
|
288 |
+
}
|
289 |
+
return length;
|
290 |
+
}
|
291 |
+
|
292 |
+
bool CheckStatus() {
|
293 |
+
if (exception_) {
|
294 |
+
std::exception_ptr moved(std::move(exception_));
|
295 |
+
std::rethrow_exception(moved);
|
296 |
+
}
|
297 |
+
return want_more_;
|
298 |
+
}
|
299 |
+
private:
|
300 |
+
preprocess::WARCStream warc_;
|
301 |
+
DocumentCallback document_;
|
302 |
+
std::exception_ptr exception_;
|
303 |
+
bool want_more_;
|
304 |
+
};
|
305 |
+
|
306 |
+
class CurlWrap {
|
307 |
+
public:
|
308 |
+
CurlWrap() : curl_(curl_easy_init()) {
|
309 |
+
UTIL_THROW_IF(!curl_, MatchException, "Failed to initialize CURL");
|
310 |
+
UTIL_THROW_IF(CURLE_OK != curl_easy_setopt(curl_, CURLOPT_ERRORBUFFER, error_buffer_), MatchException, "CURL Setting error buffer failed");
|
311 |
+
UTIL_THROW_IF(CURLE_OK != curl_easy_setopt(curl_, CURLOPT_FOLLOWLOCATION, 1L), MatchException, "CURL Setting follow location failed " << error_buffer_);
|
312 |
+
UTIL_THROW_IF(CURLE_OK != curl_easy_setopt(curl_, CURLOPT_WRITEFUNCTION, Incoming), MatchException, "CURL Setting function failed " << error_buffer_);
|
313 |
+
UTIL_THROW_IF(CURLE_OK != curl_easy_setopt(curl_, CURLOPT_USERAGENT, "wet lines extraction"), MatchException, "CURL User Agent setting failed " << error_buffer_);
|
314 |
+
// TODO make timeouts configurable
|
315 |
+
UTIL_THROW_IF(CURLE_OK != curl_easy_setopt(curl_, CURLOPT_TIMEOUT, 60L), MatchException, "CURL timeout setting failed " << error_buffer_);
|
316 |
+
UTIL_THROW_IF(CURLE_OK != curl_easy_setopt(curl_, CURLOPT_LOW_SPEED_LIMIT, 1048576L), MatchException, "CURL low setting low speed failed " << error_buffer_);
|
317 |
+
UTIL_THROW_IF(CURLE_OK != curl_easy_setopt(curl_, CURLOPT_LOW_SPEED_TIME, 5L), MatchException, "CURL low setting low speed time failed " << error_buffer_);
|
318 |
+
}
|
319 |
+
|
320 |
+
~CurlWrap() {
|
321 |
+
curl_easy_cleanup(curl_);
|
322 |
+
}
|
323 |
+
|
324 |
+
void Download(const char *url, CurlCallback &callback) {
|
325 |
+
UTIL_THROW_IF(CURLE_OK != curl_easy_setopt(curl_, CURLOPT_URL, url), MatchException, "CURL Could not set URL " << error_buffer_);
|
326 |
+
UTIL_THROW_IF(CURLE_OK != curl_easy_setopt(curl_, CURLOPT_WRITEDATA, &callback), MatchException, "CURL Could not set callback " << error_buffer_);
|
327 |
+
CURLcode performed = curl_easy_perform(curl_);
|
328 |
+
// Throw any exceptions gathered during execution.
|
329 |
+
if (!callback.CheckStatus()) {
|
330 |
+
// If the code got everything it wanted then hung up, don't worry about CURL status.
|
331 |
+
return;
|
332 |
+
}
|
333 |
+
UTIL_THROW_IF(CURLE_OK != performed, MatchException, "CURL perform failed " << error_buffer_);
|
334 |
+
}
|
335 |
+
|
336 |
+
private:
|
337 |
+
static size_t Incoming(void *buffer, size_t /* one */, size_t nmemb, void *ptr) {
|
338 |
+
return (*static_cast<CurlCallback*>(ptr))(buffer, nmemb);
|
339 |
+
}
|
340 |
+
|
341 |
+
CURL *curl_;
|
342 |
+
|
343 |
+
char error_buffer_[CURL_ERROR_SIZE];
|
344 |
+
};
|
345 |
+
|
346 |
+
void ParseLine(util::StringPiece line, util::StringPiece &wet_path, util::StringPiece &sha1, Extract &extract) {
|
347 |
+
util::TokenIter<util::SingleCharacter, false> spaces(line, ' ');
|
348 |
+
UTIL_THROW_IF2(!spaces, "Metadata missing");
|
349 |
+
wet_path = *spaces;
|
350 |
+
UTIL_THROW_IF2(!++spaces, "Metadata missing sha1");
|
351 |
+
UTIL_THROW_IF2(!spaces->starts_with("sha1:"), "Expected hash starting with sha1");
|
352 |
+
sha1 = *spaces;
|
353 |
+
sha1.remove_prefix(5);
|
354 |
+
UTIL_THROW_IF2(!++spaces, "Metadata missing URL");
|
355 |
+
UTIL_THROW_IF2(!++spaces, "Metadata missing line");
|
356 |
+
std::from_chars_result r = std::from_chars(spaces->data(), spaces->data() + spaces->size(), extract.paragraph_number, 10);
|
357 |
+
UTIL_THROW_IF2(r.ec != std::errc(), "Error in number " << *spaces);
|
358 |
+
UTIL_THROW_IF2(r.ptr != spaces->end(), "Did not consume full number " << *spaces);
|
359 |
+
UTIL_THROW_IF2(!++spaces, "Metadata missing paragraph digest");
|
360 |
+
r = std::from_chars(spaces->data(), spaces->end(), extract.paragraph_digest);
|
361 |
+
UTIL_THROW_IF2(r.ec != std::errc(), "Error in number " << *spaces);
|
362 |
+
UTIL_THROW_IF2(r.ptr != spaces->end(), "Did not consume full number " << *spaces);
|
363 |
+
}
|
364 |
+
|
365 |
+
void RunWARC(const char *url, CurlWrap &curl, Retrieve &retrieve, Output &out) {
|
366 |
+
try {
|
367 |
+
CurlCallback callback(retrieve, out);
|
368 |
+
curl.Download(url, callback);
|
369 |
+
} catch (const util::Exception &e) {
|
370 |
+
for (Retrieve::Iterator i = retrieve.begin(); i != retrieve.end(); ++i) {
|
371 |
+
for (const Extract &extract : i->second) {
|
372 |
+
out.Failure(extract.original_line, e.what());
|
373 |
+
}
|
374 |
+
}
|
375 |
+
return;
|
376 |
+
}
|
377 |
+
for (Retrieve::Iterator i = retrieve.begin(); i != retrieve.end(); ++i) {
|
378 |
+
for (const Extract &extract : i->second) {
|
379 |
+
out.Failure(extract.original_line, "No error but unmatched");
|
380 |
+
}
|
381 |
+
}
|
382 |
+
out.Flush();
|
383 |
+
}
|
384 |
+
|
385 |
+
void ProcessMetadata(const util::StringPiece download_prefix, util::FilePiece &in, Output &out) {
|
386 |
+
Retrieve retrieve;
|
387 |
+
util::Pool string_pool;
|
388 |
+
CurlWrap curl;
|
389 |
+
util::StringPiece previous_wet_path;
|
390 |
+
std::string download_path(download_prefix.data(), download_prefix.size());
|
391 |
+
for (util::StringPiece line : in) {
|
392 |
+
util::StringPiece wet_path, sha1;
|
393 |
+
Extract extract;
|
394 |
+
ParseLine(line, wet_path, sha1, extract);
|
395 |
+
if (wet_path != previous_wet_path) {
|
396 |
+
// Flush existing data.
|
397 |
+
if (!previous_wet_path.empty()) {
|
398 |
+
download_path.replace(download_prefix.size(), download_path.size() - download_prefix.size(), previous_wet_path.data(), previous_wet_path.size());
|
399 |
+
RunWARC(download_path.c_str(), curl, retrieve, out);
|
400 |
+
}
|
401 |
+
retrieve.Clear();
|
402 |
+
string_pool.FreeAll();
|
403 |
+
}
|
404 |
+
// Store the full string line for use in printing later.
|
405 |
+
extract.original_line = util::StringPiece(static_cast<const char*>(memcpy(string_pool.Allocate(line.size()), line.data(), line.size())), line.size());
|
406 |
+
previous_wet_path = util::StringPiece(extract.original_line.data() + (wet_path.data() - line.data()), wet_path.size());
|
407 |
+
retrieve.Add(sha1, extract);
|
408 |
+
}
|
409 |
+
if (!previous_wet_path.empty()) {
|
410 |
+
download_path.replace(download_prefix.size(), download_path.size() - download_prefix.size(), previous_wet_path.data(), previous_wet_path.size());
|
411 |
+
RunWARC(download_path.c_str(), curl, retrieve, out);
|
412 |
+
}
|
413 |
+
}
|
414 |
+
|
415 |
+
int main() {
|
416 |
+
util::FilePiece in(0);
|
417 |
+
Output out;
|
418 |
+
ProcessMetadata("http://data.commoncrawl.org/", in, out);
|
419 |
+
}
|
requirements.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
requests
|
util.py
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from os.path import join as p_join
|
3 |
+
from typing import Optional
|
4 |
+
import tarfile
|
5 |
+
import zipfile
|
6 |
+
import gzip
|
7 |
+
import requests
|
8 |
+
|
9 |
+
|
10 |
+
__all__ = 'wget'
|
11 |
+
|
12 |
+
|
13 |
+
def wget(url: str, cache_dir: str, filename: Optional[str] = None, uncompress_file: bool = True):
|
14 |
+
os.makedirs(cache_dir, exist_ok=True)
|
15 |
+
filename = os.path.basename(url) if not filename else filename
|
16 |
+
output_file = p_join(cache_dir, filename)
|
17 |
+
with open(output_file, "wb") as f:
|
18 |
+
r = requests.get(url)
|
19 |
+
f.write(r.content)
|
20 |
+
if not uncompress_file:
|
21 |
+
return output_file
|
22 |
+
|
23 |
+
if output_file.endswith('.tar.gz') or output_file.endswith('.tgz') or output_file.endswith('.tar'):
|
24 |
+
if output_file.endswith('.tar'):
|
25 |
+
tar = tarfile.open(output_file)
|
26 |
+
else:
|
27 |
+
tar = tarfile.open(output_file, "r:gz")
|
28 |
+
tar.extractall(cache_dir)
|
29 |
+
tar.close()
|
30 |
+
os.remove(output_file)
|
31 |
+
return output_file.replace('.tar.gz', '').replace('.tgz', '').replace('.tar', '')
|
32 |
+
elif output_file.endswith('.gz'):
|
33 |
+
with gzip.open(output_file, 'rb') as f:
|
34 |
+
with open(output_file.replace('.gz', ''), 'wb') as f_write:
|
35 |
+
f_write.write(f.read())
|
36 |
+
os.remove(output_file)
|
37 |
+
return output_file.replace('.gz', '')
|
38 |
+
elif output_file.endswith('.zip'):
|
39 |
+
with zipfile.ZipFile(output_file, 'r') as zip_ref:
|
40 |
+
zip_ref.extractall(cache_dir)
|
41 |
+
os.remove(output_file)
|
42 |
+
return output_file.replace('.zip', '')
|
43 |
+
return output_file
|
44 |
+
|
45 |
+
|
46 |
+
if __name__ == '__main__':
|
47 |
+
wget()
|