MRiabov commited on
Commit
cffe6c7
·
1 Parent(s): 4a08cf1

refactor: reorganize repository filtering into separate clean/ module with metadata and content filters

Browse files
filter.py → clean/clean_docs_using_content.py RENAMED
@@ -31,13 +31,13 @@ if __name__ == "__main__":
31
  ),
32
  -- Step 2: Rank by mtime within each (owner, repo, filename) group to identify latest versions
33
  ranked_mtime AS (
34
- SELECT *, ROW_NUMBER() OVER (PARTITION BY owner, repo, filename ORDER BY mtime DESC) as rn_mtime
35
  FROM filtered
36
  ),
37
  -- Step 3: Keep only the latest version for duplicates, but preserve all README.md files
38
  filtered_dup AS (
39
  SELECT * FROM ranked_mtime
40
- WHERE rn_mtime = 1 OR filename LIKE 'README.md'
41
  ),
42
  -- Step 4: Rank by content to remove duplicate content, keeping the first occurrence
43
  ranked AS (
@@ -45,7 +45,7 @@ if __name__ == "__main__":
45
  FROM filtered_dup
46
  )
47
  -- Final selection: unique content after filename deduplication
48
- SELECT owner, repo, repo_dir, file_rel_repo, file_rel_outdir, size, mtime, lang, content, filename
49
  FROM ranked
50
  WHERE rn = 1
51
  """
 
31
  ),
32
  -- Step 2: Rank by mtime within each (owner, repo, filename) group to identify latest versions
33
  ranked_mtime AS (
34
+ SELECT *, ROW_NUMBER() OVER (PARTITION BY owner, repo, file_rel_repo ORDER BY mtime DESC) as rn_mtime
35
  FROM filtered
36
  ),
37
  -- Step 3: Keep only the latest version for duplicates, but preserve all README.md files
38
  filtered_dup AS (
39
  SELECT * FROM ranked_mtime
40
+ WHERE rn_mtime = 1 OR file_rel_repo LIKE 'README.md'
41
  ),
42
  -- Step 4: Rank by content to remove duplicate content, keeping the first occurrence
43
  ranked AS (
 
45
  FROM filtered_dup
46
  )
47
  -- Final selection: unique content after filename deduplication
48
+ SELECT owner, repo, repo_dir, file_rel_repo, file_rel_outdir, size, mtime, lang, content
49
  FROM ranked
50
  WHERE rn = 1
51
  """
clean/clean_meta.py ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ clean_meta.py
4
+
5
+ Filter repository-level metadata (repometa.parquet) using declarative predicates
6
+ so you can decide which repos to pass to the docs scraper.
7
+
8
+ Configuration is read from clean_meta.yaml next to this script.
9
+ Outputs a filtered metadata parquet and a links parquet with a single 'link' column.
10
+ """
11
+
12
+ from __future__ import annotations
13
+
14
+ from pathlib import Path
15
+ from typing import Dict, Any, List, Optional
16
+ import sys
17
+ import logging
18
+
19
+ import pandas as pd
20
+ import yaml
21
+ from tqdm import tqdm
22
+
23
+
24
+ class TqdmLoggingHandler(logging.Handler):
25
+ def emit(self, record):
26
+ try:
27
+ msg = self.format(record)
28
+ tqdm.write(msg)
29
+ except Exception:
30
+ sys.stderr.write(str(record.getMessage()) + "\n")
31
+
32
+
33
+ logger = logging.getLogger("clean_meta")
34
+
35
+
36
+ def setup_logging():
37
+ logger.setLevel(logging.INFO)
38
+ logger.propagate = False
39
+ logger.handlers.clear()
40
+ handler = TqdmLoggingHandler()
41
+ handler.setFormatter(logging.Formatter("[%(levelname)s] %(message)s"))
42
+ handler.setLevel(logging.INFO)
43
+ logger.addHandler(handler)
44
+
45
+
46
+ def main():
47
+ setup_logging()
48
+
49
+ cfg_path = Path(__file__).with_name("clean_meta.yaml")
50
+ assert cfg_path.exists(), f"Missing config: {cfg_path}"
51
+ cfg: Dict[str, Any] = yaml.safe_load(cfg_path.read_text(encoding="utf-8")) or {}
52
+
53
+ def _resolve(p: Optional[str]) -> str:
54
+ if p is None:
55
+ return ""
56
+ q = Path(p)
57
+ if not q.is_absolute():
58
+ q = (cfg_path.parent / q).resolve()
59
+ return str(q)
60
+
61
+ in_meta = _resolve(cfg.get("repometa_parquet", "../output/repometa.parquet"))
62
+ out_meta = _resolve(
63
+ cfg.get("out_repometa_parquet", "../output/repometa.filtered.parquet")
64
+ )
65
+ out_links = _resolve(
66
+ cfg.get("out_links_parquet", "../output/links.filtered.parquet")
67
+ )
68
+ out_filtered_reasons_csv = _resolve(
69
+ cfg.get("out_filtered_reasons_csv", "../output/repometa.filtered_out.csv")
70
+ )
71
+
72
+ include_languages: List[str] = cfg.get("include_languages", [])
73
+ exclude_languages: List[str] = cfg.get("exclude_languages", [])
74
+ include_topic_substrings: List[str] = cfg.get("include_topic_substrings", [])
75
+ exclude_topic_substrings: List[str] = cfg.get("exclude_topic_substrings", [])
76
+ min_stars: int = int(cfg.get("min_stars", 0))
77
+ exclude_forks: bool = bool(cfg.get("exclude_forks", True))
78
+ updated_after: Optional[str] = cfg.get("updated_after") # ISO date string
79
+ include_owners: List[str] = cfg.get("include_owners", [])
80
+ exclude_owners: List[str] = cfg.get("exclude_owners", [])
81
+
82
+ df0 = pd.read_parquet(in_meta)
83
+ total = len(df0)
84
+ df = df0.copy()
85
+ # Track filtered-out reasons per repo_name
86
+ # repo_name := "owner/repo"
87
+ filtered_reasons: Dict[str, List[str]] = {}
88
+ reason_counts: Dict[str, int] = {}
89
+
90
+ def _note_excluded(mask: pd.Series, reason: str):
91
+ nonlocal filtered_reasons, reason_counts
92
+ if mask.any():
93
+ excluded = df[mask]
94
+ for _, row in excluded.iterrows():
95
+ repo_name = f"{row['owner']}/{row['repo']}"
96
+ lst = filtered_reasons.get(repo_name)
97
+ if lst is None:
98
+ filtered_reasons[repo_name] = [reason]
99
+ else:
100
+ lst.append(reason)
101
+ reason_counts[reason] = reason_counts.get(reason, 0) + int(mask.sum())
102
+
103
+ # Normalize columns we expect from fetch_gh_meta.py
104
+ assert {"owner", "repo", "link"}.issubset(df.columns)
105
+
106
+ if include_languages:
107
+ mask_keep = df["language"].isin(include_languages)
108
+ _note_excluded(~mask_keep, "include_languages")
109
+ df = df[mask_keep]
110
+ if exclude_languages:
111
+ mask_excl = df["language"].isin(exclude_languages)
112
+ _note_excluded(mask_excl, "exclude_languages")
113
+ df = df[~mask_excl]
114
+ if min_stars > 0 and "stars" in df.columns:
115
+ mask_keep = df["stars"] >= min_stars
116
+ _note_excluded(~mask_keep, "min_stars")
117
+ df = df[mask_keep]
118
+ if exclude_forks and "is_fork" in df.columns:
119
+ mask_excl = df["is_fork"].fillna(False)
120
+ _note_excluded(mask_excl, "exclude_forks")
121
+ df = df[~mask_excl]
122
+ if updated_after and "last_commit_date" in df.columns:
123
+ mask_keep = df["last_commit_date"] >= updated_after
124
+ _note_excluded(~mask_keep, "updated_after")
125
+ df = df[mask_keep]
126
+ if include_owners:
127
+ mask_keep = df["owner"].isin(include_owners)
128
+ _note_excluded(~mask_keep, "include_owners")
129
+ df = df[mask_keep]
130
+ if exclude_owners:
131
+ mask_excl = df["owner"].isin(exclude_owners)
132
+ _note_excluded(mask_excl, "exclude_owners")
133
+ df = df[~mask_excl]
134
+
135
+ # Topic filters. Topics are stored as comma-joined string in fetch_gh_meta.
136
+ if include_topic_substrings:
137
+ mask_any = False
138
+ for sub in include_topic_substrings:
139
+ mask_any = mask_any | df["topics"].fillna("").str.contains(sub, case=False)
140
+ _note_excluded(~mask_any, "include_topic_substrings")
141
+ df = df[mask_any]
142
+ if exclude_topic_substrings:
143
+ mask_excl = False
144
+ for sub in exclude_topic_substrings:
145
+ mask_excl = mask_excl | df["topics"].fillna("").str.contains(
146
+ sub, case=False
147
+ )
148
+ _note_excluded(mask_excl, "exclude_topic_substrings")
149
+ df = df[~mask_excl]
150
+
151
+ # Summary logging
152
+ kept = len(df)
153
+ filtered_total = total - kept
154
+ logger.info(f"Kept {kept} from total {total} (filtered out {filtered_total})")
155
+ if reason_counts:
156
+ for reason, cnt in reason_counts.items():
157
+ logger.info(f" - {reason}: {cnt}")
158
+
159
+ # Write filtered-out reasons CSV
160
+ if filtered_reasons:
161
+ rows = [
162
+ {"repo_name": rn, "reasons": ",".join(sorted(set(rs)))}
163
+ for rn, rs in filtered_reasons.items()
164
+ ]
165
+ pd.DataFrame(rows, columns=["repo_name", "reasons"]).to_csv(
166
+ out_filtered_reasons_csv, index=False
167
+ )
168
+
169
+ out_meta_path = Path(out_meta)
170
+ out_meta_path.parent.mkdir(parents=True, exist_ok=True)
171
+ df.to_parquet(out_meta_path)
172
+
173
+ # Export links parquet for the scraper
174
+ out_links_path = Path(out_links)
175
+ out_links_path.parent.mkdir(parents=True, exist_ok=True)
176
+ df[["link"]].drop_duplicates().to_parquet(out_links_path)
177
+
178
+ logger.info(
179
+ f"Filtered metadata saved to {out_meta_path} (rows={len(df)}); links written to {out_links_path}"
180
+ )
181
+
182
+
183
+ if __name__ == "__main__":
184
+ main()
clean/clean_meta.yaml ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Configuration for clean_meta.py
2
+
3
+ # Input metadata parquet produced by fetch_gh_meta.py
4
+ repometa_parquet: ../output/repometa.parquet
5
+
6
+ # Outputs
7
+ out_repometa_parquet: ../output/repometa.filtered.parquet
8
+ out_links_parquet: ../output/links.filtered.parquet
9
+ out_filtered_reasons_csv: ../output/repometa.filtered_out.csv
10
+
11
+ # Predicates
12
+ # Keep only repos whose primaryLanguage is in this list (empty means no include filter)
13
+ include_languages: []
14
+ # Exclude repos whose primaryLanguage is in this list
15
+ exclude_languages: [null] # filter empty here.
16
+ # Minimum number of stars
17
+ min_stars: 300
18
+ # Exclude forks
19
+ exclude_forks: true
20
+ # Keep only repos whose last commit date is on/after this ISO8601 date, e.g., "2022-01-01"
21
+ updated_after: null
22
+ # Restrict to specific owners (orgs/users); empty means no include filter
23
+ include_owners: []
24
+ # Exclude these owners
25
+ exclude_owners: []
26
+ # Topic filters (substring match, case-insensitive) over comma-joined topics field
27
+ include_topic_substrings: []
28
+ exclude_topic_substrings: ["interview","interview-prep","learn"]
data_collection_utils/fetch_gh_meta.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ fetch_gh_meta.py
4
+
5
+ Pipeline step to fetch repository metadata from GitHub using GraphQL and save it as Parquet.
6
+ This script is separate from docs scraping so you can pre-filter repos before downloading docs.
7
+
8
+ Inputs (configured via YAML file next to this script: fetch_gh_meta_config.yaml):
9
+ - input_parquet: list of parquet files, each with a column 'link' containing GitHub repo URLs
10
+ - out_parquet: where to write the output metadata parquet (default: ../output/repometa.parquet)
11
+ - batch_size: number of repositories to fetch per GraphQL request (default: 20)
12
+ - quiet: reduce logging verbosity
13
+
14
+ Usage:
15
+ uv run data_collection_utils/fetch_gh_meta.py
16
+ """
17
+
18
+ from __future__ import annotations
19
+
20
+ import os
21
+ import sys
22
+ from pathlib import Path
23
+ from urllib.parse import urlparse
24
+ from typing import List, Dict, Any, Optional, Tuple
25
+
26
+ import pandas as pd
27
+ import yaml
28
+ from tqdm import tqdm
29
+ import logging
30
+
31
+ from github_api_utils import fetch_repos_metadata_graphql
32
+
33
+
34
+ class TqdmLoggingHandler(logging.Handler):
35
+ def emit(self, record):
36
+ try:
37
+ msg = self.format(record)
38
+ tqdm.write(msg)
39
+ except Exception:
40
+ sys.stderr.write(str(record.getMessage()) + "\n")
41
+
42
+
43
+ logger = logging.getLogger("fetch_gh_meta")
44
+
45
+
46
+ def setup_logging(quiet: bool = False):
47
+ logger.setLevel(logging.DEBUG)
48
+ logger.propagate = False
49
+ logger.handlers.clear()
50
+ handler = TqdmLoggingHandler()
51
+ handler.setFormatter(logging.Formatter("[%(levelname)s] %(message)s"))
52
+ handler.setLevel(logging.WARNING if quiet else logging.INFO)
53
+ logger.addHandler(handler)
54
+
55
+
56
+ def ensure_github_token() -> str:
57
+ tok = os.getenv("GITHUB_TOKEN")
58
+ assert tok is not None and tok.strip() != "", (
59
+ "GITHUB_TOKEN is required. Export it in your environment or put it in a .env file."
60
+ )
61
+ return tok
62
+
63
+
64
+ def _parse_owner_repo(s: str) -> Optional[Tuple[str, str]]:
65
+ s = s.strip()
66
+ if not s:
67
+ return None
68
+ if s.startswith("http://") or s.startswith("https://"):
69
+ p = urlparse(s)
70
+ if p.netloc != "github.com":
71
+ return None
72
+ parts = [part for part in p.path.split("/") if part]
73
+ if len(parts) < 2:
74
+ return None
75
+ owner, repo = parts[0], parts[1]
76
+ if repo.endswith(".git"):
77
+ repo = repo[:-4]
78
+ return owner, repo
79
+ if "/" in s:
80
+ owner, repo = s.split("/", 1)
81
+ return owner, repo
82
+ return None
83
+
84
+
85
+ def main():
86
+ # No CLI args; configuration is via YAML next to this script
87
+
88
+ # Load YAML config next to this script if present
89
+ cfg: Dict[str, Any] = {}
90
+ cfg_path = Path(__file__).with_name("fetch_gh_meta_config.yaml")
91
+ if cfg_path.exists():
92
+ cfg = yaml.safe_load(cfg_path.read_text(encoding="utf-8")) or {}
93
+
94
+ def _resolve_cfg_path(val: Optional[str]) -> Optional[str]:
95
+ if val is None:
96
+ return None
97
+ p = Path(val)
98
+ if not p.is_absolute():
99
+ p = (cfg_path.parent / p).resolve()
100
+ return str(p)
101
+
102
+ def _resolve_cfg_paths(val) -> List[str]:
103
+ if val is None:
104
+ return []
105
+ if isinstance(val, (list, tuple)):
106
+ return [_resolve_cfg_path(v) for v in val if v is not None]
107
+ return [_resolve_cfg_path(val)]
108
+
109
+ input_parquet_values = _resolve_cfg_paths(cfg.get("input_parquet"))
110
+ out_parquet_value = _resolve_cfg_path(
111
+ cfg.get("out_parquet", "../output/repometa.parquet")
112
+ )
113
+ batch_size = int(cfg.get("batch_size", 20))
114
+ quiet = bool(cfg.get("quiet", False))
115
+
116
+ setup_logging(quiet=quiet)
117
+
118
+ ensure_github_token()
119
+
120
+ assert input_parquet_values, (
121
+ "input_parquet must be configured in fetch_gh_meta_config.yaml"
122
+ )
123
+ pairs: List[Tuple[str, str]] = []
124
+ seen: set[str] = set()
125
+ for pth in input_parquet_values:
126
+ df = pd.read_parquet(pth)
127
+ assert "link" in df.columns, f"Parquet {pth} must contain 'link' column"
128
+ for u in df["link"].tolist():
129
+ s = str(u).strip()
130
+ if not s:
131
+ continue
132
+ parsed = _parse_owner_repo(s)
133
+ if not parsed:
134
+ continue
135
+ owner, repo = parsed
136
+ key = f"{owner}/{repo}"
137
+ if key in seen:
138
+ continue
139
+ seen.add(key)
140
+ pairs.append((owner, repo))
141
+
142
+ logger.info(f"Total unique repos to fetch: {len(pairs)}")
143
+
144
+ # Fetch in batches via GraphQL
145
+ records: List[Dict[str, Any]] = []
146
+ for i in tqdm(range(0, len(pairs), batch_size), desc="GraphQL batches"):
147
+ batch = pairs[i : i + batch_size]
148
+ meta = fetch_repos_metadata_graphql(batch)
149
+ for owner, repo in batch:
150
+ key = f"{owner}/{repo}"
151
+ m = meta.get(key) or {}
152
+ records.append(
153
+ {
154
+ "owner": owner,
155
+ "repo": repo,
156
+ "link": f"https://github.com/{owner}/{repo}",
157
+ "name": m.get("name"),
158
+ "description": m.get("description"),
159
+ "stars": m.get("stars"),
160
+ "default_branch": m.get("default_branch"),
161
+ "last_commit_date": m.get("last_commit_date"),
162
+ "language": m.get("language"),
163
+ # store topics as JSON string for portability in parquet, can be parsed downstream
164
+ "topics": (
165
+ ",".join(m.get("topics", []))
166
+ if isinstance(m.get("topics"), list)
167
+ else None
168
+ ),
169
+ "is_fork": m.get("is_fork"),
170
+ "parent_url": m.get("parent_url"),
171
+ }
172
+ )
173
+
174
+ df_out = pd.DataFrame(records)
175
+ out_path = Path(out_parquet_value)
176
+ out_path.parent.mkdir(parents=True, exist_ok=True)
177
+ df_out.to_parquet(out_path)
178
+ logger.info(f"Wrote metadata for {len(df_out)} repos to {out_path}")
179
+
180
+
181
+ if __name__ == "__main__":
182
+ main()
data_collection_utils/fetch_gh_meta_config.yaml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Configuration for fetch_gh_meta.py
2
+
3
+ # Inputs: one or more Parquet files with a 'link' column pointing to GitHub repos
4
+ input_parquet:
5
+ - ./top-1000-repos.parquet
6
+
7
+ # Output metadata parquet
8
+ out_parquet: ../output/repometa.parquet
9
+
10
+ # GraphQL batching
11
+ batch_size: 20
12
+
13
+ # Logging
14
+ quiet: false
data_collection_utils/github_api_utils.py CHANGED
@@ -375,9 +375,7 @@ def fetch_repos_metadata_graphql(
375
  " pushedAt\n"
376
  " isFork\n"
377
  " parent { url nameWithOwner }\n"
378
- " primaryLanguage { name }\n"
379
- + topics_fragment +
380
- " defaultBranchRef {\n"
381
  " name\n"
382
  " target {\n"
383
  " ... on Commit {\n"
@@ -393,7 +391,9 @@ def fetch_repos_metadata_graphql(
393
  )
394
  fields.append(header + body)
395
  # Assemble query
396
- var_decls = " ".join([f"$owner{i}: String!, $name{i}: String!" for i in range(len(pairs))])
 
 
397
  query = "query(" + var_decls + ") {\n" + "\n".join(fields) + "\n}"
398
 
399
  payload = {"query": query, "variables": vars}
 
375
  " pushedAt\n"
376
  " isFork\n"
377
  " parent { url nameWithOwner }\n"
378
+ " primaryLanguage { name }\n" + topics_fragment + " defaultBranchRef {\n"
 
 
379
  " name\n"
380
  " target {\n"
381
  " ... on Commit {\n"
 
391
  )
392
  fields.append(header + body)
393
  # Assemble query
394
+ var_decls = " ".join(
395
+ [f"$owner{i}: String!, $name{i}: String!" for i in range(len(pairs))]
396
+ )
397
  query = "query(" + var_decls + ") {\n" + "\n".join(fields) + "\n}"
398
 
399
  payload = {"query": query, "variables": vars}
data_collection_utils/parse_gh_docs_config.yaml CHANGED
@@ -7,14 +7,12 @@
7
  # - data_collection_utils/awesome_final_repos.py -> awesome-repos.parquet
8
  # - data_collection_utils/top_1000_repos.py -> top-1000-repos.parquet
9
  input_parquet:
10
- - ./top-1000-repos.parquet
11
 
12
  # Output directories/files
13
- outdir: ../output
14
  md_failed: ../md-failed.txt
15
  texts_parquet: ../output/texts.parquet
16
- # Optional: path to write repository-level metadata parquet
17
- repometa_parquet: ../output/repometa.parquet
18
 
19
  # Concurrency and behavior
20
  workers: 1
@@ -24,12 +22,6 @@ quiet: false
24
  # How often to checkpoint partial outputs (in processed repos)
25
  checkpoint_every: 50
26
 
27
- # GraphQL batching and metadata
28
- # Max repos per GraphQL request; keep modest to avoid cost limits
29
- graphql_batch_size: 20
30
- # Number of topics to fetch per repository via GraphQL
31
- topics_limit: 20
32
-
33
  # Auth
34
  # Secrets are NOT configured here. Put your GitHub token in a .env file (recommended)
35
  # or export it in your shell environment. Required env var:
@@ -49,9 +41,7 @@ only_md: true
49
  # Minimum number of .md files for a repo to be considered useful (otherwise marked low-md-count)
50
  min_docs_md_count: 10
51
 
52
- # Filtering
53
- # Skip repos younger than this many years
54
- min_repo_age_years: 0
55
 
56
  # Language filtering for texts parquet
57
  lang_filter: en
 
7
  # - data_collection_utils/awesome_final_repos.py -> awesome-repos.parquet
8
  # - data_collection_utils/top_1000_repos.py -> top-1000-repos.parquet
9
  input_parquet:
10
+ - ../output/links_filtered.parquet
11
 
12
  # Output directories/files
13
+ outdir: ../output/raw_docs
14
  md_failed: ../md-failed.txt
15
  texts_parquet: ../output/texts.parquet
 
 
16
 
17
  # Concurrency and behavior
18
  workers: 1
 
22
  # How often to checkpoint partial outputs (in processed repos)
23
  checkpoint_every: 50
24
 
 
 
 
 
 
 
25
  # Auth
26
  # Secrets are NOT configured here. Put your GitHub token in a .env file (recommended)
27
  # or export it in your shell environment. Required env var:
 
41
  # Minimum number of .md files for a repo to be considered useful (otherwise marked low-md-count)
42
  min_docs_md_count: 10
43
 
44
+ # Filtering (note: age filtering moved to metadata stage)
 
 
45
 
46
  # Language filtering for texts parquet
47
  lang_filter: en
data_collection_utils/scrape_gh_docs.py CHANGED
@@ -14,8 +14,9 @@ Key features:
14
  3) Zip fallback (optional): `--prefer-zip` to download a codeload zip (no REST usage) and extract only .md.
15
  4) Org heuristics and search fallback via GitHub API if direct docs folder not found.
16
  - Content selection: `--only-md` limits downloads/extractions to Markdown files.
17
- - Filtering: `--min-repo-age-years` skips repos younger than N years (uses repo metadata, requires token).
18
  - Central config: reads YAML from `parse_gh_docs_config.yaml` to control inputs/outputs and strategies.
 
 
19
  - Quiet mode: `--quiet` or YAML `quiet: true` switches logging to warnings+ so tqdm progress stays visible.
20
  - No-fetch mode: `--no-fetch` rebuilds Parquet(s) from existing outdir without any network calls. You can also emit a per-file texts Parquet via `--texts-parquet` or YAML `texts_parquet`.
21
 
@@ -41,13 +42,11 @@ import concurrent.futures
41
  import threading
42
  from typing import Optional
43
  from typing import Dict, Any, List
44
- import json
45
 
46
  import pandas as pd
47
  import subprocess
48
  import yaml
49
  import duckdb
50
- from datetime import datetime, timezone
51
  import logging
52
  import langid # https://github.com/saffsd/langid.py
53
 
@@ -55,18 +54,10 @@ import langid # https://github.com/saffsd/langid.py
55
  from github_api_utils import (
56
  download_file,
57
  get_repo_info,
58
- get_latest_commit_date,
59
  get_contents,
60
  get_owner_type,
61
  get_org_repos,
62
  search_repos,
63
- get_repo_tree_paths,
64
- get_repo_tree_md_paths,
65
- )
66
- from repo_tree import (
67
- build_tree_from_local_dir,
68
- build_tree_from_paths,
69
- filter_paths_by_directories,
70
  )
71
 
72
  # Note: core Github helpers and repo tree builders are defined in the modules above
@@ -538,7 +529,6 @@ def process_repo_entry(
538
  prefer_zip: bool = False,
539
  prefer_sparse: bool = False,
540
  only_md: bool = False,
541
- min_repo_age_years: int = 0,
542
  ):
543
  owner_repo = owner_repo.strip()
544
  if not owner_repo or owner_repo.startswith("#"):
@@ -567,67 +557,6 @@ def process_repo_entry(
567
  got_any = False
568
  default_branch = None
569
 
570
- # Age filter: ensure repo existed for at least the configured number of years
571
- if min_repo_age_years and min_repo_age_years > 0:
572
- repo_json_age = get_repo_info(owner, repo)
573
- if not repo_json_age:
574
- logger.error(f"Cannot fetch repo metadata for age check: {owner}/{repo}")
575
- append_line_threadsafe(
576
- md_failed_path, f"{owner}/{repo} # metadata-failed\n", lock
577
- )
578
- return {
579
- "owner": owner,
580
- "repo": repo,
581
- "default_branch": None,
582
- "method": None,
583
- "docs_found": False,
584
- "docs_folder": None,
585
- "md_count": None,
586
- "status": "metadata-failed",
587
- "note": "failed to fetch repo metadata (age)",
588
- }
589
- created_at = repo_json_age.get("created_at")
590
- if not created_at:
591
- logger.warning(
592
- f"Missing created_at for {owner}/{repo}; cannot evaluate age. Skipping repo."
593
- )
594
- append_line_threadsafe(
595
- md_failed_path, f"{owner}/{repo} # created_at-missing\n", lock
596
- )
597
- return {
598
- "owner": owner,
599
- "repo": repo,
600
- "default_branch": repo_json_age.get("default_branch"),
601
- "method": None,
602
- "docs_found": False,
603
- "docs_folder": None,
604
- "md_count": None,
605
- "status": "age-unknown",
606
- "note": "created_at missing",
607
- }
608
- # Normalize ISO8601 (Z -> +00:00)
609
- created_dt = datetime.fromisoformat(created_at.replace("Z", "+00:00"))
610
- now = datetime.now(timezone.utc)
611
- age_years = (now - created_dt).days / 365.25
612
- if age_years < min_repo_age_years:
613
- logger.info(
614
- f"Skipping {owner}/{repo}: age {age_years:.2f}y < {min_repo_age_years}y"
615
- )
616
- append_line_threadsafe(
617
- md_failed_path, f"{owner}/{repo} # too-young\n", lock
618
- )
619
- return {
620
- "owner": owner,
621
- "repo": repo,
622
- "default_branch": repo_json_age.get("default_branch"),
623
- "method": None,
624
- "docs_found": False,
625
- "docs_folder": None,
626
- "md_count": None,
627
- "status": "too-young",
628
- "note": f"age_years={age_years:.2f}",
629
- }
630
-
631
  if prefer_sparse:
632
  # Try to fetch only docs/ folder via git sparse-checkout without REST API
633
  for branch_guess in ("main", "master"):
@@ -856,22 +785,6 @@ def _init_duckdb(con):
856
  );
857
  """
858
  )
859
- con.execute(
860
- """
861
- CREATE TABLE IF NOT EXISTS repometa (
862
- latest_commit_date TEXT,
863
- name TEXT,
864
- parent_org TEXT,
865
- stars BIGINT,
866
- link TEXT,
867
- language TEXT,
868
- topics TEXT,
869
- docs_found_in TEXT,
870
- docs_repo_structure TEXT,
871
- repo_structure_all_files TEXT
872
- );
873
- """
874
- )
875
 
876
 
877
  # === CLI ===
@@ -909,13 +822,11 @@ def main():
909
  dry_run_value = bool(cfg.get("dry_run", False))
910
  workers_value = int(cfg.get("workers", 4))
911
  texts_parquet_value = _resolve_cfg_path(cfg.get("texts_parquet"))
912
- repometa_parquet_value = _resolve_cfg_path(cfg.get("repometa_parquet"))
913
  duckdb_path_value = _resolve_cfg_path(cfg.get("duckdb_path"))
914
  token_file_value = _resolve_cfg_path(cfg.get("token_file"))
915
  prefer_zip_value = bool(cfg.get("prefer_zip", False))
916
  prefer_sparse_value = bool(cfg.get("prefer_sparse", False))
917
  only_md_value = bool(cfg.get("only_md", False))
918
- min_repo_age_years_value = int(cfg.get("min_repo_age_years", 0))
919
  quiet_value = bool(cfg.get("quiet", False))
920
  # CLI should override YAML for convenience
921
  no_fetch_value = bool(args.no_fetch or cfg.get("no_fetch", False))
@@ -995,7 +906,6 @@ def main():
995
  prefer_zip=prefer_zip_value,
996
  prefer_sparse=prefer_sparse_value,
997
  only_md=only_md_value,
998
- min_repo_age_years=min_repo_age_years_value,
999
  )
1000
  if res is not None:
1001
  with results_lock:
@@ -1074,7 +984,21 @@ def main():
1074
  for d in outdir.iterdir()
1075
  if d.is_dir() and "__" in d.name and not d.name.startswith("tmp_")
1076
  ]
1077
- md_rows: List[Dict[str, Any]] = []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1078
  with tqdm(total=len(repo_dirs), desc="Collecting per-file rows (repos)") as pbar:
1079
  with concurrent.futures.ThreadPoolExecutor(
1080
  max_workers=workers_value
@@ -1096,232 +1020,29 @@ def main():
1096
  logger.error(f"Error while scanning repo dir: {e}")
1097
  rows = []
1098
  if rows:
1099
- md_rows.extend(rows)
 
 
 
 
 
1100
  pbar.update(1)
1101
 
1102
- # Save per-file dataset (texts) into DuckDB and export Parquet
1103
  texts_parquet_path = (
1104
  Path(texts_parquet_value) if texts_parquet_value else (outdir / "texts.parquet")
1105
  )
1106
  try:
1107
- cols = [
1108
- "owner",
1109
- "repo",
1110
- "repo_dir",
1111
- "file_rel_repo",
1112
- "file_rel_outdir",
1113
- "size",
1114
- "mtime",
1115
- "lang",
1116
- "content",
1117
- ]
1118
- df_txt = pd.DataFrame(md_rows, columns=cols)
1119
- with duckdb_lock:
1120
- con.execute("BEGIN")
1121
- con.execute("DELETE FROM texts")
1122
- con.register("df_txt_all", df_txt)
1123
- con.execute("INSERT INTO texts SELECT * FROM df_txt_all")
1124
- con.unregister("df_txt_all")
1125
- con.execute("COMMIT")
1126
  texts_parquet_path.parent.mkdir(parents=True, exist_ok=True)
1127
  con.execute(
1128
  "COPY (SELECT * FROM texts) TO ? (FORMAT PARQUET)",
1129
  [str(texts_parquet_path)],
1130
  )
1131
  logger.info(
1132
- f"Wrote per-file dataset to {texts_parquet_path} (rows={len(md_rows)})"
1133
- )
1134
- except Exception as e:
1135
- logger.error(f"Failed to persist/export texts: {e}")
1136
-
1137
- # Build and save repo metadata dataset
1138
- repometa_rows: List[Dict[str, Any]] = []
1139
- if no_fetch_value:
1140
- # Derive metadata purely from local folders (no network calls)
1141
- for d in repo_dirs:
1142
- try:
1143
- owner, repo = d.name.split("__", 1)
1144
- except ValueError:
1145
- continue
1146
- link = f"https://github.com/{owner}/{repo}"
1147
- # Determine docs folder similar to compute_md_failed_for_existing()
1148
- if (d / "docs").exists():
1149
- docs_folder = d / "docs"
1150
- else:
1151
- found = None
1152
- for p in d.rglob("docs"):
1153
- if p.is_dir():
1154
- found = p
1155
- break
1156
- docs_folder = found if found else d
1157
-
1158
- docs_tree_json = None
1159
- if docs_folder.exists() and docs_folder.is_dir():
1160
- docs_tree = build_tree_from_local_dir(docs_folder, only_md=True)
1161
- docs_tree_json = json.dumps(docs_tree, ensure_ascii=False)
1162
-
1163
- full_tree = build_tree_from_local_dir(d, only_md=False)
1164
- full_tree_json = json.dumps(full_tree, ensure_ascii=False)
1165
-
1166
- repometa_rows.append(
1167
- {
1168
- "latest_commit_date": None,
1169
- "name": repo,
1170
- "parent_org": owner,
1171
- "stars": None,
1172
- "link": link,
1173
- "language": None,
1174
- "topics": None,
1175
- "docs_found_in": None,
1176
- "docs_repo_structure": docs_tree_json,
1177
- "repo_structure_all_files": full_tree_json,
1178
- }
1179
- )
1180
- else:
1181
- for res in results:
1182
- owner = res.get("owner")
1183
- repo = res.get("repo")
1184
- if not owner or not repo:
1185
- continue
1186
- try:
1187
- repo_json = get_repo_info(owner, repo) or {}
1188
- default_branch = res.get("default_branch") or repo_json.get(
1189
- "default_branch", "main"
1190
- )
1191
- latest_commit_date = get_latest_commit_date(
1192
- owner, repo, default_branch, repo_json
1193
- )
1194
- stars = repo_json.get("stargazers_count")
1195
- link = f"https://github.com/{owner}/{repo}"
1196
- docs_found_in = res.get("docs_found_in")
1197
-
1198
- # Build docs tree
1199
- method = res.get("method")
1200
- docs_tree_json = None
1201
- try:
1202
- docs_src_owner = owner
1203
- docs_src_repo = repo
1204
- docs_src_ref = default_branch
1205
- path_filters: List[str] | None = None
1206
-
1207
- if method in ("org_docs_repo_zip", "search_repo_zip"):
1208
- dfi = res.get("docs_found_in")
1209
- if isinstance(dfi, str) and dfi.startswith("http"):
1210
- u = urlparse(dfi)
1211
- parts = [p for p in u.path.split("/") if p]
1212
- if len(parts) >= 2:
1213
- docs_src_owner, docs_src_repo = parts[0], parts[1]
1214
- info = (
1215
- get_repo_info(docs_src_owner, docs_src_repo) or {}
1216
- )
1217
- docs_src_ref = info.get("default_branch", "main")
1218
- path_filters = None
1219
- elif method in ("docs_folder_in_repo", "docs_file_in_repo"):
1220
- path_filters = ["docs"]
1221
- elif method in ("sparse_docs", "zip_whole_repo"):
1222
- path_filters = ["docs", "doc", "documentation"]
1223
-
1224
- md_paths_all = get_repo_tree_md_paths(
1225
- docs_src_owner, docs_src_repo, docs_src_ref
1226
- )
1227
- if path_filters is not None:
1228
- md_paths = filter_paths_by_directories(
1229
- md_paths_all, path_filters
1230
- )
1231
- else:
1232
- md_paths = md_paths_all
1233
-
1234
- if md_paths:
1235
- root_name = f"{docs_src_owner}__{docs_src_repo}"
1236
- docs_tree = build_tree_from_paths(md_paths, root_name=root_name)
1237
- docs_tree_json = json.dumps(docs_tree, ensure_ascii=False)
1238
- except Exception:
1239
- pass
1240
- if docs_tree_json is None:
1241
- docs_folder_rel = res.get("docs_folder")
1242
- if docs_folder_rel:
1243
- docs_dir = outdir / docs_folder_rel
1244
- if docs_dir.exists() and docs_dir.is_dir():
1245
- docs_tree = build_tree_from_local_dir(
1246
- docs_dir, only_md=True
1247
- )
1248
- docs_tree_json = json.dumps(docs_tree, ensure_ascii=False)
1249
-
1250
- # Build full tree
1251
- full_tree_json = None
1252
- try:
1253
- paths = get_repo_tree_paths(owner, repo, default_branch)
1254
- if paths:
1255
- full_tree = build_tree_from_paths(
1256
- paths, root_name=f"{owner}__{repo}"
1257
- )
1258
- full_tree_json = json.dumps(full_tree, ensure_ascii=False)
1259
- except Exception:
1260
- pass
1261
- if full_tree_json is None:
1262
- saved_root = outdir / safe_name(f"{owner}__{repo}")
1263
- if saved_root.exists():
1264
- full_tree = build_tree_from_local_dir(saved_root, only_md=False)
1265
- full_tree_json = json.dumps(full_tree, ensure_ascii=False)
1266
-
1267
- repometa_rows.append(
1268
- {
1269
- "latest_commit_date": latest_commit_date,
1270
- "name": repo,
1271
- "parent_org": owner,
1272
- "stars": stars,
1273
- "link": link,
1274
- "language": repo_json.get("language"),
1275
- "topics": json.dumps(
1276
- repo_json.get("topics"), ensure_ascii=False
1277
- )
1278
- if repo_json.get("topics") is not None
1279
- else None,
1280
- "docs_found_in": docs_found_in,
1281
- "docs_repo_structure": docs_tree_json,
1282
- "repo_structure_all_files": full_tree_json,
1283
- }
1284
- )
1285
- except Exception as e:
1286
- logger.warning(f"Failed to build repometa for {owner}/{repo}: {e}")
1287
-
1288
- # Persist repo metadata to DuckDB and export Parquet
1289
- repometa_parquet_path = (
1290
- Path(repometa_parquet_value)
1291
- if repometa_parquet_value
1292
- else (outdir / "repometa.parquet")
1293
- )
1294
- try:
1295
- cols_meta = [
1296
- "latest_commit_date",
1297
- "name",
1298
- "parent_org",
1299
- "stars",
1300
- "link",
1301
- "language",
1302
- "topics",
1303
- "docs_found_in",
1304
- "docs_repo_structure",
1305
- "repo_structure_all_files",
1306
- ]
1307
- df_meta = pd.DataFrame(repometa_rows, columns=cols_meta)
1308
- with duckdb_lock:
1309
- con.execute("BEGIN")
1310
- con.execute("DELETE FROM repometa")
1311
- con.register("df_meta_all", df_meta)
1312
- con.execute("INSERT INTO repometa SELECT * FROM df_meta_all")
1313
- con.unregister("df_meta_all")
1314
- con.execute("COMMIT")
1315
- repometa_parquet_path.parent.mkdir(parents=True, exist_ok=True)
1316
- con.execute(
1317
- "COPY (SELECT * FROM repometa) TO ? (FORMAT PARQUET)",
1318
- [str(repometa_parquet_path)],
1319
- )
1320
- logger.info(
1321
- f"Wrote repo metadata dataset to {repometa_parquet_path} (rows={len(repometa_rows)})"
1322
  )
1323
  except Exception as e:
1324
- logger.error(f"Failed to persist/export repo metadata: {e}")
1325
 
1326
  # Close DB connection
1327
  try:
@@ -1332,7 +1053,6 @@ def main():
1332
  logger.info("Done. Check output directory and md-failed.txt")
1333
 
1334
 
1335
- # ... (rest of the code remains the same)
1336
  class TqdmLoggingHandler(logging.Handler):
1337
  def emit(self, record):
1338
  try:
 
14
  3) Zip fallback (optional): `--prefer-zip` to download a codeload zip (no REST usage) and extract only .md.
15
  4) Org heuristics and search fallback via GitHub API if direct docs folder not found.
16
  - Content selection: `--only-md` limits downloads/extractions to Markdown files.
 
17
  - Central config: reads YAML from `parse_gh_docs_config.yaml` to control inputs/outputs and strategies.
18
+ - Note: Repository metadata fetching and filtering (e.g., by age/language/topics) has been split
19
+ into a separate pipeline step (see `data_collection_utils/fetch_gh_meta.py` and `clean/clean_meta.py`).
20
  - Quiet mode: `--quiet` or YAML `quiet: true` switches logging to warnings+ so tqdm progress stays visible.
21
  - No-fetch mode: `--no-fetch` rebuilds Parquet(s) from existing outdir without any network calls. You can also emit a per-file texts Parquet via `--texts-parquet` or YAML `texts_parquet`.
22
 
 
42
  import threading
43
  from typing import Optional
44
  from typing import Dict, Any, List
 
45
 
46
  import pandas as pd
47
  import subprocess
48
  import yaml
49
  import duckdb
 
50
  import logging
51
  import langid # https://github.com/saffsd/langid.py
52
 
 
54
  from github_api_utils import (
55
  download_file,
56
  get_repo_info,
 
57
  get_contents,
58
  get_owner_type,
59
  get_org_repos,
60
  search_repos,
 
 
 
 
 
 
 
61
  )
62
 
63
  # Note: core Github helpers and repo tree builders are defined in the modules above
 
529
  prefer_zip: bool = False,
530
  prefer_sparse: bool = False,
531
  only_md: bool = False,
 
532
  ):
533
  owner_repo = owner_repo.strip()
534
  if not owner_repo or owner_repo.startswith("#"):
 
557
  got_any = False
558
  default_branch = None
559
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
560
  if prefer_sparse:
561
  # Try to fetch only docs/ folder via git sparse-checkout without REST API
562
  for branch_guess in ("main", "master"):
 
785
  );
786
  """
787
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
788
 
789
 
790
  # === CLI ===
 
822
  dry_run_value = bool(cfg.get("dry_run", False))
823
  workers_value = int(cfg.get("workers", 4))
824
  texts_parquet_value = _resolve_cfg_path(cfg.get("texts_parquet"))
 
825
  duckdb_path_value = _resolve_cfg_path(cfg.get("duckdb_path"))
826
  token_file_value = _resolve_cfg_path(cfg.get("token_file"))
827
  prefer_zip_value = bool(cfg.get("prefer_zip", False))
828
  prefer_sparse_value = bool(cfg.get("prefer_sparse", False))
829
  only_md_value = bool(cfg.get("only_md", False))
 
830
  quiet_value = bool(cfg.get("quiet", False))
831
  # CLI should override YAML for convenience
832
  no_fetch_value = bool(args.no_fetch or cfg.get("no_fetch", False))
 
906
  prefer_zip=prefer_zip_value,
907
  prefer_sparse=prefer_sparse_value,
908
  only_md=only_md_value,
 
909
  )
910
  if res is not None:
911
  with results_lock:
 
984
  for d in outdir.iterdir()
985
  if d.is_dir() and "__" in d.name and not d.name.startswith("tmp_")
986
  ]
987
+ # Rebuild texts table from filesystem by streaming per-repo inserts into DuckDB
988
+ cols = [
989
+ "owner",
990
+ "repo",
991
+ "repo_dir",
992
+ "file_rel_repo",
993
+ "file_rel_outdir",
994
+ "size",
995
+ "mtime",
996
+ "lang",
997
+ "content",
998
+ ]
999
+ total_inserted = 0
1000
+ with duckdb_lock:
1001
+ con.execute("DELETE FROM texts")
1002
  with tqdm(total=len(repo_dirs), desc="Collecting per-file rows (repos)") as pbar:
1003
  with concurrent.futures.ThreadPoolExecutor(
1004
  max_workers=workers_value
 
1020
  logger.error(f"Error while scanning repo dir: {e}")
1021
  rows = []
1022
  if rows:
1023
+ df_chunk = pd.DataFrame(rows, columns=cols)
1024
+ with duckdb_lock:
1025
+ con.register("df_txt_chunk", df_chunk)
1026
+ con.execute("INSERT INTO texts SELECT * FROM df_txt_chunk")
1027
+ con.unregister("df_txt_chunk")
1028
+ total_inserted += len(rows)
1029
  pbar.update(1)
1030
 
1031
+ # Export texts from DuckDB to Parquet
1032
  texts_parquet_path = (
1033
  Path(texts_parquet_value) if texts_parquet_value else (outdir / "texts.parquet")
1034
  )
1035
  try:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1036
  texts_parquet_path.parent.mkdir(parents=True, exist_ok=True)
1037
  con.execute(
1038
  "COPY (SELECT * FROM texts) TO ? (FORMAT PARQUET)",
1039
  [str(texts_parquet_path)],
1040
  )
1041
  logger.info(
1042
+ f"Wrote per-file dataset to {texts_parquet_path} (rows={total_inserted})"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1043
  )
1044
  except Exception as e:
1045
+ logger.error(f"Failed to export texts to Parquet: {e}")
1046
 
1047
  # Close DB connection
1048
  try:
 
1053
  logger.info("Done. Check output directory and md-failed.txt")
1054
 
1055
 
 
1056
  class TqdmLoggingHandler(logging.Handler):
1057
  def emit(self, record):
1058
  try:
data_collection_utils/top-1000-repos.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cd4d9bdf975cf71b2a68fa7e4e525911c9a46b77b95f5e6c85f3306527cb239c
3
- size 22108
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4fa7b4ed3bbeca1048ed6fbe9e2ccb212043c211785fff53564230b4c5cad876
3
+ size 90891
data_collection_utils/top_1000_repos_config.yaml CHANGED
@@ -9,13 +9,14 @@ out_parquet: ./top-1000-repos.parquet
9
  # Browser
10
  headless: true
11
 
12
- # Scrolling behavior to load all repo links
13
- scroll_max_iters: 200
14
  scroll_pause_ms: 300
15
- stable_threshold: 10
16
- min_anchors: 1500
17
 
18
  # GraphQL
19
  graphql_batch_size: 50
20
  topics_limit: 20
 
21
  fork_resolution: true
 
9
  # Browser
10
  headless: true
11
 
12
+ # Scrolling behavior to load all repo links (more generous)
13
+ scroll_max_iters: 500
14
  scroll_pause_ms: 300
15
+ stable_threshold: 25
16
+ min_anchors: 4000
17
 
18
  # GraphQL
19
  graphql_batch_size: 50
20
  topics_limit: 20
21
+ # Keep forks as separate entries to avoid collapsing many entries to parents
22
  fork_resolution: true
texts.parquet → filtered_texts.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3a065276d3cd17cbecdf20a62d6e7610ecf7d9be6415b5e3648a2f1ac63fb080
3
- size 381801625
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b506a8cc9cd928f237b367cf8c975c3280f512a7d85925fdf97ce9feed1d5c7c
3
+ size 50065296
gooddocs.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3cf9e395f5191efec809e9be1c3a647a0bc7a12030dd003b839280c4fbbae5e2
3
- size 250675101
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:016c90924a61528cc8eae7f75ebea250bc9bdc63be22655be6f9a31e7411d5c3
3
+ size 91294381