Datasets:

Languages:
English
Tags:
Not-For-All-Audiences
License:
Gaeros commited on
Commit
e2c0539
1 Parent(s): 48e7ab6

import: text format exports

Browse files
Files changed (1) hide show
  1. e6db/importdb.py +42 -3
e6db/importdb.py CHANGED
@@ -1,5 +1,7 @@
1
  import re
2
  import datetime
 
 
3
  import logging
4
  from pathlib import Path
5
 
@@ -17,27 +19,64 @@ def convert_db_export_to_parquet(
17
  paths = get_csv_paths(dumps_path)
18
  out_path = dumps_path if out_path is None else Path(out_path)
19
 
20
- logging.info("Reading tag CSVs...")
21
  tags, aliases, impls = read_tags_csvs(paths)
22
  post_parquet_paths, tag_freqs = read_posts_csv(paths["posts"], out_path)
23
 
24
- logging.info("Normalizing tags...")
25
  tags, tag2index, impl_mapped, rejtag_impls_csq_mapped = normalize_tag_list(
26
  tag_freqs, tags, aliases, impls, min_freq=min_freq
27
  )
28
 
 
29
  tags.with_columns(col("tag").cast(pl.String)).write_parquet(
30
  out_path / "tags.parquet", compression="zstd"
31
  )
 
 
 
32
  tag2index.with_columns(col("tag").cast(pl.String)).write_parquet(
33
  out_path / "tag2idx.parquet", compression="zstd"
34
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
 
 
36
  all_posts = post_process_posts(
37
  post_parquet_paths, tag2index, rejtag_impls_csq_mapped, impl_mapped
38
  )
39
 
40
- logging.info("Writing posts.parquet...")
41
  all_posts.write_parquet(out_path / "posts.parquet", compression="zstd")
42
 
43
  return tags, all_posts
 
1
  import re
2
  import datetime
3
+ import json
4
+ import gzip
5
  import logging
6
  from pathlib import Path
7
 
 
19
  paths = get_csv_paths(dumps_path)
20
  out_path = dumps_path if out_path is None else Path(out_path)
21
 
22
+ logging.info("Reading tag CSVs")
23
  tags, aliases, impls = read_tags_csvs(paths)
24
  post_parquet_paths, tag_freqs = read_posts_csv(paths["posts"], out_path)
25
 
26
+ logging.info("Normalizing tags")
27
  tags, tag2index, impl_mapped, rejtag_impls_csq_mapped = normalize_tag_list(
28
  tag_freqs, tags, aliases, impls, min_freq=min_freq
29
  )
30
 
31
+ logging.info("Writing tags indexes")
32
  tags.with_columns(col("tag").cast(pl.String)).write_parquet(
33
  out_path / "tags.parquet", compression="zstd"
34
  )
35
+ with gzip.open(out_path / "tags.txt.gz", "wt") as fd:
36
+ fd.writelines(f"{t}\n" for t in tags["tag"])
37
+
38
  tag2index.with_columns(col("tag").cast(pl.String)).write_parquet(
39
  out_path / "tag2idx.parquet", compression="zstd"
40
  )
41
+ with gzip.open(out_path / "tag2idx.json.gz", "wt") as fd:
42
+ json.dump(
43
+ {
44
+ t: i
45
+ for t, i in tag2index.sort(col("tag").cast(pl.String))[
46
+ ["tag", "index"]
47
+ ].iter_rows()
48
+ },
49
+ fd,
50
+ )
51
+
52
+ with gzip.open(out_path / "implications.json.gz", "wt") as fd:
53
+ json.dump(
54
+ {
55
+ t: i
56
+ for t, i in impl_mapped.group_by("antecedent")
57
+ .agg("consequent")
58
+ .iter_rows()
59
+ },
60
+ fd,
61
+ )
62
+
63
+ with gzip.open(out_path / "implications_rej.json.gz", "wt") as fd:
64
+ json.dump(
65
+ {
66
+ t: i
67
+ for t, i in rejtag_impls_csq_mapped.group_by("antecedent_name")
68
+ .agg("consequent")
69
+ .iter_rows()
70
+ },
71
+ fd,
72
+ )
73
 
74
+ logging.info("Post-processing posts")
75
  all_posts = post_process_posts(
76
  post_parquet_paths, tag2index, rejtag_impls_csq_mapped, impl_mapped
77
  )
78
 
79
+ logging.info("Writing posts.parquet")
80
  all_posts.write_parquet(out_path / "posts.parquet", compression="zstd")
81
 
82
  return tags, all_posts